1 2 /* 3 * Copyright 2010 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10 #include "GrGpu.h" 11 12 #include "GrBufferAllocPool.h" 13 #include "GrContext.h" 14 #include "GrIndexBuffer.h" 15 #include "GrStencilBuffer.h" 16 #include "GrVertexBuffer.h" 17 18 // probably makes no sense for this to be less than a page 19 static const size_t VERTEX_POOL_VB_SIZE = 1 << 18; 20 static const int VERTEX_POOL_VB_COUNT = 4; 21 static const size_t INDEX_POOL_IB_SIZE = 1 << 16; 22 static const int INDEX_POOL_IB_COUNT = 4; 23 24 //////////////////////////////////////////////////////////////////////////////// 25 26 #define DEBUG_INVAL_BUFFER 0xdeadcafe 27 #define DEBUG_INVAL_START_IDX -1 28 29 GrGpu::GrGpu() 30 : fContext(NULL) 31 , fResetTimestamp(kExpiredTimestamp+1) 32 , fVertexPool(NULL) 33 , fIndexPool(NULL) 34 , fVertexPoolUseCnt(0) 35 , fIndexPoolUseCnt(0) 36 , fUnitSquareVertexBuffer(NULL) 37 , fQuadIndexBuffer(NULL) 38 , fContextIsDirty(true) { 39 40 fClipMaskManager.setGpu(this); 41 42 fGeomPoolStateStack.push_back(); 43 #if GR_DEBUG 44 GeometryPoolState& poolState = fGeomPoolStateStack.back(); 45 poolState.fPoolVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER; 46 poolState.fPoolStartVertex = DEBUG_INVAL_START_IDX; 47 poolState.fPoolIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER; 48 poolState.fPoolStartIndex = DEBUG_INVAL_START_IDX; 49 #endif 50 51 for (int i = 0; i < kGrPixelConfigCount; ++i) { 52 fConfigRenderSupport[i] = false; 53 }; 54 } 55 56 GrGpu::~GrGpu() { 57 this->releaseResources(); 58 } 59 60 void GrGpu::abandonResources() { 61 62 fClipMaskManager.releaseResources(); 63 64 while (NULL != fResourceList.head()) { 65 fResourceList.head()->abandon(); 66 } 67 68 GrAssert(NULL == fQuadIndexBuffer || !fQuadIndexBuffer->isValid()); 69 GrAssert(NULL == fUnitSquareVertexBuffer || 70 !fUnitSquareVertexBuffer->isValid()); 71 GrSafeSetNull(fQuadIndexBuffer); 72 GrSafeSetNull(fUnitSquareVertexBuffer); 73 delete fVertexPool; 74 fVertexPool = NULL; 75 delete fIndexPool; 76 fIndexPool = NULL; 77 } 78 79 void GrGpu::releaseResources() { 80 81 fClipMaskManager.releaseResources(); 82 83 while (NULL != fResourceList.head()) { 84 fResourceList.head()->release(); 85 } 86 87 GrAssert(NULL == fQuadIndexBuffer || !fQuadIndexBuffer->isValid()); 88 GrAssert(NULL == fUnitSquareVertexBuffer || 89 !fUnitSquareVertexBuffer->isValid()); 90 GrSafeSetNull(fQuadIndexBuffer); 91 GrSafeSetNull(fUnitSquareVertexBuffer); 92 delete fVertexPool; 93 fVertexPool = NULL; 94 delete fIndexPool; 95 fIndexPool = NULL; 96 } 97 98 void GrGpu::insertResource(GrResource* resource) { 99 GrAssert(NULL != resource); 100 GrAssert(this == resource->getGpu()); 101 102 fResourceList.addToHead(resource); 103 } 104 105 void GrGpu::removeResource(GrResource* resource) { 106 GrAssert(NULL != resource); 107 GrAssert(this == resource->getGpu()); 108 109 fResourceList.remove(resource); 110 } 111 112 113 void GrGpu::unimpl(const char msg[]) { 114 #if GR_DEBUG 115 GrPrintf("--- GrGpu unimplemented(\"%s\")\n", msg); 116 #endif 117 } 118 119 //////////////////////////////////////////////////////////////////////////////// 120 121 GrTexture* GrGpu::createTexture(const GrTextureDesc& desc, 122 const void* srcData, size_t rowBytes) { 123 if (kUnknown_GrPixelConfig == desc.fConfig) { 124 return NULL; 125 } 126 127 this->handleDirtyContext(); 128 GrTexture* tex = this->onCreateTexture(desc, srcData, rowBytes); 129 if (NULL != tex && 130 (kRenderTarget_GrTextureFlagBit & desc.fFlags) && 131 !(kNoStencil_GrTextureFlagBit & desc.fFlags)) { 132 GrAssert(NULL != tex->asRenderTarget()); 133 // TODO: defer this and attach dynamically 134 if (!this->attachStencilBufferToRenderTarget(tex->asRenderTarget())) { 135 tex->unref(); 136 return NULL; 137 } 138 } 139 return tex; 140 } 141 142 bool GrGpu::attachStencilBufferToRenderTarget(GrRenderTarget* rt) { 143 GrAssert(NULL == rt->getStencilBuffer()); 144 GrStencilBuffer* sb = 145 this->getContext()->findStencilBuffer(rt->width(), 146 rt->height(), 147 rt->numSamples()); 148 if (NULL != sb) { 149 rt->setStencilBuffer(sb); 150 bool attached = this->attachStencilBufferToRenderTarget(sb, rt); 151 if (!attached) { 152 rt->setStencilBuffer(NULL); 153 } 154 return attached; 155 } 156 if (this->createStencilBufferForRenderTarget(rt, 157 rt->width(), rt->height())) { 158 // Right now we're clearing the stencil buffer here after it is 159 // attached to an RT for the first time. When we start matching 160 // stencil buffers with smaller color targets this will no longer 161 // be correct because it won't be guaranteed to clear the entire 162 // sb. 163 // We used to clear down in the GL subclass using a special purpose 164 // FBO. But iOS doesn't allow a stencil-only FBO. It reports unsupported 165 // FBO status. 166 GrDrawState::AutoRenderTargetRestore artr(this->drawState(), rt); 167 this->clearStencil(); 168 return true; 169 } else { 170 return false; 171 } 172 } 173 174 GrTexture* GrGpu::wrapBackendTexture(const GrBackendTextureDesc& desc) { 175 this->handleDirtyContext(); 176 GrTexture* tex = this->onWrapBackendTexture(desc); 177 if (NULL == tex) { 178 return NULL; 179 } 180 // TODO: defer this and attach dynamically 181 GrRenderTarget* tgt = tex->asRenderTarget(); 182 if (NULL != tgt && 183 !this->attachStencilBufferToRenderTarget(tgt)) { 184 tex->unref(); 185 return NULL; 186 } else { 187 return tex; 188 } 189 } 190 191 GrRenderTarget* GrGpu::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { 192 this->handleDirtyContext(); 193 return this->onWrapBackendRenderTarget(desc); 194 } 195 196 GrVertexBuffer* GrGpu::createVertexBuffer(uint32_t size, bool dynamic) { 197 this->handleDirtyContext(); 198 return this->onCreateVertexBuffer(size, dynamic); 199 } 200 201 GrIndexBuffer* GrGpu::createIndexBuffer(uint32_t size, bool dynamic) { 202 this->handleDirtyContext(); 203 return this->onCreateIndexBuffer(size, dynamic); 204 } 205 206 GrPath* GrGpu::createPath(const SkPath& path) { 207 GrAssert(fCaps.pathStencilingSupport()); 208 this->handleDirtyContext(); 209 return this->onCreatePath(path); 210 } 211 212 void GrGpu::clear(const GrIRect* rect, 213 GrColor color, 214 GrRenderTarget* renderTarget) { 215 GrDrawState::AutoRenderTargetRestore art; 216 if (NULL != renderTarget) { 217 art.set(this->drawState(), renderTarget); 218 } 219 if (NULL == this->getDrawState().getRenderTarget()) { 220 return; 221 } 222 this->handleDirtyContext(); 223 this->onClear(rect, color); 224 } 225 226 void GrGpu::forceRenderTargetFlush() { 227 this->handleDirtyContext(); 228 this->onForceRenderTargetFlush(); 229 } 230 231 bool GrGpu::readPixels(GrRenderTarget* target, 232 int left, int top, int width, int height, 233 GrPixelConfig config, void* buffer, 234 size_t rowBytes, bool invertY) { 235 this->handleDirtyContext(); 236 return this->onReadPixels(target, left, top, width, height, 237 config, buffer, rowBytes, invertY); 238 } 239 240 void GrGpu::writeTexturePixels(GrTexture* texture, 241 int left, int top, int width, int height, 242 GrPixelConfig config, const void* buffer, 243 size_t rowBytes) { 244 this->handleDirtyContext(); 245 this->onWriteTexturePixels(texture, left, top, width, height, 246 config, buffer, rowBytes); 247 } 248 249 void GrGpu::resolveRenderTarget(GrRenderTarget* target) { 250 GrAssert(target); 251 this->handleDirtyContext(); 252 this->onResolveRenderTarget(target); 253 } 254 255 256 //////////////////////////////////////////////////////////////////////////////// 257 258 static const int MAX_QUADS = 1 << 12; // max possible: (1 << 14) - 1; 259 260 GR_STATIC_ASSERT(4 * MAX_QUADS <= 65535); 261 262 static inline void fill_indices(uint16_t* indices, int quadCount) { 263 for (int i = 0; i < quadCount; ++i) { 264 indices[6 * i + 0] = 4 * i + 0; 265 indices[6 * i + 1] = 4 * i + 1; 266 indices[6 * i + 2] = 4 * i + 2; 267 indices[6 * i + 3] = 4 * i + 0; 268 indices[6 * i + 4] = 4 * i + 2; 269 indices[6 * i + 5] = 4 * i + 3; 270 } 271 } 272 273 const GrIndexBuffer* GrGpu::getQuadIndexBuffer() const { 274 if (NULL == fQuadIndexBuffer) { 275 static const int SIZE = sizeof(uint16_t) * 6 * MAX_QUADS; 276 GrGpu* me = const_cast<GrGpu*>(this); 277 fQuadIndexBuffer = me->createIndexBuffer(SIZE, false); 278 if (NULL != fQuadIndexBuffer) { 279 uint16_t* indices = (uint16_t*)fQuadIndexBuffer->lock(); 280 if (NULL != indices) { 281 fill_indices(indices, MAX_QUADS); 282 fQuadIndexBuffer->unlock(); 283 } else { 284 indices = (uint16_t*)GrMalloc(SIZE); 285 fill_indices(indices, MAX_QUADS); 286 if (!fQuadIndexBuffer->updateData(indices, SIZE)) { 287 fQuadIndexBuffer->unref(); 288 fQuadIndexBuffer = NULL; 289 GrCrash("Can't get indices into buffer!"); 290 } 291 GrFree(indices); 292 } 293 } 294 } 295 296 return fQuadIndexBuffer; 297 } 298 299 const GrVertexBuffer* GrGpu::getUnitSquareVertexBuffer() const { 300 if (NULL == fUnitSquareVertexBuffer) { 301 302 static const GrPoint DATA[] = { 303 { 0, 0 }, 304 { SK_Scalar1, 0 }, 305 { SK_Scalar1, SK_Scalar1 }, 306 { 0, SK_Scalar1 } 307 #if 0 308 GrPoint(0, 0), 309 GrPoint(SK_Scalar1,0), 310 GrPoint(SK_Scalar1,SK_Scalar1), 311 GrPoint(0, SK_Scalar1) 312 #endif 313 }; 314 static const size_t SIZE = sizeof(DATA); 315 316 GrGpu* me = const_cast<GrGpu*>(this); 317 fUnitSquareVertexBuffer = me->createVertexBuffer(SIZE, false); 318 if (NULL != fUnitSquareVertexBuffer) { 319 if (!fUnitSquareVertexBuffer->updateData(DATA, SIZE)) { 320 fUnitSquareVertexBuffer->unref(); 321 fUnitSquareVertexBuffer = NULL; 322 GrCrash("Can't get vertices into buffer!"); 323 } 324 } 325 } 326 327 return fUnitSquareVertexBuffer; 328 } 329 330 //////////////////////////////////////////////////////////////////////////////// 331 332 bool GrGpu::setupClipAndFlushState(DrawType type) { 333 334 if (!fClipMaskManager.setupClipping(this->getClip())) { 335 return false; 336 } 337 338 if (!this->flushGraphicsState(type)) { 339 return false; 340 } 341 342 return true; 343 } 344 345 //////////////////////////////////////////////////////////////////////////////// 346 347 void GrGpu::geometrySourceWillPush() { 348 const GeometrySrcState& geoSrc = this->getGeomSrc(); 349 if (kArray_GeometrySrcType == geoSrc.fVertexSrc || 350 kReserved_GeometrySrcType == geoSrc.fVertexSrc) { 351 this->finalizeReservedVertices(); 352 } 353 if (kArray_GeometrySrcType == geoSrc.fIndexSrc || 354 kReserved_GeometrySrcType == geoSrc.fIndexSrc) { 355 this->finalizeReservedIndices(); 356 } 357 GeometryPoolState& newState = fGeomPoolStateStack.push_back(); 358 #if GR_DEBUG 359 newState.fPoolVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER; 360 newState.fPoolStartVertex = DEBUG_INVAL_START_IDX; 361 newState.fPoolIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER; 362 newState.fPoolStartIndex = DEBUG_INVAL_START_IDX; 363 #else 364 (void) newState; // silence compiler warning 365 #endif 366 } 367 368 void GrGpu::geometrySourceWillPop(const GeometrySrcState& restoredState) { 369 // if popping last entry then pops are unbalanced with pushes 370 GrAssert(fGeomPoolStateStack.count() > 1); 371 fGeomPoolStateStack.pop_back(); 372 } 373 374 void GrGpu::onDraw(const DrawInfo& info) { 375 this->handleDirtyContext(); 376 if (!this->setupClipAndFlushState(PrimTypeToDrawType(info.primitiveType()))) { 377 return; 378 } 379 this->onGpuDraw(info); 380 } 381 382 void GrGpu::onStencilPath(const GrPath* path, const SkStrokeRec&, SkPath::FillType fill) { 383 this->handleDirtyContext(); 384 385 // TODO: make this more efficient (don't copy and copy back) 386 GrAutoTRestore<GrStencilSettings> asr(this->drawState()->stencil()); 387 388 this->setStencilPathSettings(*path, fill, this->drawState()->stencil()); 389 if (!this->setupClipAndFlushState(kStencilPath_DrawType)) { 390 return; 391 } 392 393 this->onGpuStencilPath(path, fill); 394 } 395 396 void GrGpu::finalizeReservedVertices() { 397 GrAssert(NULL != fVertexPool); 398 fVertexPool->unlock(); 399 } 400 401 void GrGpu::finalizeReservedIndices() { 402 GrAssert(NULL != fIndexPool); 403 fIndexPool->unlock(); 404 } 405 406 void GrGpu::prepareVertexPool() { 407 if (NULL == fVertexPool) { 408 GrAssert(0 == fVertexPoolUseCnt); 409 fVertexPool = SkNEW_ARGS(GrVertexBufferAllocPool, (this, true, 410 VERTEX_POOL_VB_SIZE, 411 VERTEX_POOL_VB_COUNT)); 412 fVertexPool->releaseGpuRef(); 413 } else if (!fVertexPoolUseCnt) { 414 // the client doesn't have valid data in the pool 415 fVertexPool->reset(); 416 } 417 } 418 419 void GrGpu::prepareIndexPool() { 420 if (NULL == fIndexPool) { 421 GrAssert(0 == fIndexPoolUseCnt); 422 fIndexPool = SkNEW_ARGS(GrIndexBufferAllocPool, (this, true, 423 INDEX_POOL_IB_SIZE, 424 INDEX_POOL_IB_COUNT)); 425 fIndexPool->releaseGpuRef(); 426 } else if (!fIndexPoolUseCnt) { 427 // the client doesn't have valid data in the pool 428 fIndexPool->reset(); 429 } 430 } 431 432 bool GrGpu::onReserveVertexSpace(size_t vertexSize, 433 int vertexCount, 434 void** vertices) { 435 GeometryPoolState& geomPoolState = fGeomPoolStateStack.back(); 436 437 GrAssert(vertexCount > 0); 438 GrAssert(NULL != vertices); 439 440 this->prepareVertexPool(); 441 442 *vertices = fVertexPool->makeSpace(vertexSize, 443 vertexCount, 444 &geomPoolState.fPoolVertexBuffer, 445 &geomPoolState.fPoolStartVertex); 446 if (NULL == *vertices) { 447 return false; 448 } 449 ++fVertexPoolUseCnt; 450 return true; 451 } 452 453 bool GrGpu::onReserveIndexSpace(int indexCount, void** indices) { 454 GeometryPoolState& geomPoolState = fGeomPoolStateStack.back(); 455 456 GrAssert(indexCount > 0); 457 GrAssert(NULL != indices); 458 459 this->prepareIndexPool(); 460 461 *indices = fIndexPool->makeSpace(indexCount, 462 &geomPoolState.fPoolIndexBuffer, 463 &geomPoolState.fPoolStartIndex); 464 if (NULL == *indices) { 465 return false; 466 } 467 ++fIndexPoolUseCnt; 468 return true; 469 } 470 471 void GrGpu::releaseReservedVertexSpace() { 472 const GeometrySrcState& geoSrc = this->getGeomSrc(); 473 GrAssert(kReserved_GeometrySrcType == geoSrc.fVertexSrc); 474 size_t bytes = geoSrc.fVertexCount * GrDrawState::VertexSize(geoSrc.fVertexLayout); 475 fVertexPool->putBack(bytes); 476 --fVertexPoolUseCnt; 477 } 478 479 void GrGpu::releaseReservedIndexSpace() { 480 const GeometrySrcState& geoSrc = this->getGeomSrc(); 481 GrAssert(kReserved_GeometrySrcType == geoSrc.fIndexSrc); 482 size_t bytes = geoSrc.fIndexCount * sizeof(uint16_t); 483 fIndexPool->putBack(bytes); 484 --fIndexPoolUseCnt; 485 } 486 487 void GrGpu::onSetVertexSourceToArray(const void* vertexArray, int vertexCount) { 488 this->prepareVertexPool(); 489 GeometryPoolState& geomPoolState = fGeomPoolStateStack.back(); 490 #if GR_DEBUG 491 bool success = 492 #endif 493 fVertexPool->appendVertices(GrDrawState::VertexSize(this->getVertexLayout()), 494 vertexCount, 495 vertexArray, 496 &geomPoolState.fPoolVertexBuffer, 497 &geomPoolState.fPoolStartVertex); 498 ++fVertexPoolUseCnt; 499 GR_DEBUGASSERT(success); 500 } 501 502 void GrGpu::onSetIndexSourceToArray(const void* indexArray, int indexCount) { 503 this->prepareIndexPool(); 504 GeometryPoolState& geomPoolState = fGeomPoolStateStack.back(); 505 #if GR_DEBUG 506 bool success = 507 #endif 508 fIndexPool->appendIndices(indexCount, 509 indexArray, 510 &geomPoolState.fPoolIndexBuffer, 511 &geomPoolState.fPoolStartIndex); 512 ++fIndexPoolUseCnt; 513 GR_DEBUGASSERT(success); 514 } 515 516 void GrGpu::releaseVertexArray() { 517 // if vertex source was array, we stowed data in the pool 518 const GeometrySrcState& geoSrc = this->getGeomSrc(); 519 GrAssert(kArray_GeometrySrcType == geoSrc.fVertexSrc); 520 size_t bytes = geoSrc.fVertexCount * GrDrawState::VertexSize(geoSrc.fVertexLayout); 521 fVertexPool->putBack(bytes); 522 --fVertexPoolUseCnt; 523 } 524 525 void GrGpu::releaseIndexArray() { 526 // if index source was array, we stowed data in the pool 527 const GeometrySrcState& geoSrc = this->getGeomSrc(); 528 GrAssert(kArray_GeometrySrcType == geoSrc.fIndexSrc); 529 size_t bytes = geoSrc.fIndexCount * sizeof(uint16_t); 530 fIndexPool->putBack(bytes); 531 --fIndexPoolUseCnt; 532 } 533