1 /* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrInOrderDrawBuffer.h" 9 10 #include "GrBufferAllocPool.h" 11 #include "GrDrawTargetCaps.h" 12 #include "GrGpu.h" 13 #include "GrIndexBuffer.h" 14 #include "GrPath.h" 15 #include "GrPoint.h" 16 #include "GrRenderTarget.h" 17 #include "GrTemplates.h" 18 #include "GrTexture.h" 19 #include "GrVertexBuffer.h" 20 21 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, 22 GrVertexBufferAllocPool* vertexPool, 23 GrIndexBufferAllocPool* indexPool) 24 : GrDrawTarget(gpu->getContext()) 25 , fDstGpu(gpu) 26 , fClipSet(true) 27 , fClipProxyState(kUnknown_ClipProxyState) 28 , fVertexPool(*vertexPool) 29 , fIndexPool(*indexPool) 30 , fFlushing(false) { 31 32 fDstGpu->ref(); 33 fCaps.reset(SkRef(fDstGpu->caps())); 34 35 GrAssert(NULL != vertexPool); 36 GrAssert(NULL != indexPool); 37 38 GeometryPoolState& poolState = fGeoPoolStateStack.push_back(); 39 poolState.fUsedPoolVertexBytes = 0; 40 poolState.fUsedPoolIndexBytes = 0; 41 #if GR_DEBUG 42 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0; 43 poolState.fPoolStartVertex = ~0; 44 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0; 45 poolState.fPoolStartIndex = ~0; 46 #endif 47 this->reset(); 48 } 49 50 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { 51 this->reset(); 52 // This must be called by before the GrDrawTarget destructor 53 this->releaseGeometry(); 54 fDstGpu->unref(); 55 } 56 57 //////////////////////////////////////////////////////////////////////////////// 58 59 namespace { 60 void get_vertex_bounds(const void* vertices, 61 size_t vertexSize, 62 int vertexCount, 63 SkRect* bounds) { 64 GrAssert(vertexSize >= sizeof(GrPoint)); 65 GrAssert(vertexCount > 0); 66 const GrPoint* point = static_cast<const GrPoint*>(vertices); 67 bounds->fLeft = bounds->fRight = point->fX; 68 bounds->fTop = bounds->fBottom = point->fY; 69 for (int i = 1; i < vertexCount; ++i) { 70 point = reinterpret_cast<GrPoint*>(reinterpret_cast<intptr_t>(point) + vertexSize); 71 bounds->growToInclude(point->fX, point->fY); 72 } 73 } 74 } 75 76 77 namespace { 78 79 extern const GrVertexAttrib kRectPosColorUVAttribs[] = { 80 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, 81 {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding}, 82 {kVec2f_GrVertexAttribType, sizeof(GrPoint)+sizeof(GrColor), 83 kLocalCoord_GrVertexAttribBinding}, 84 }; 85 86 extern const GrVertexAttrib kRectPosUVAttribs[] = { 87 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, 88 {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding}, 89 }; 90 91 static void set_vertex_attributes(GrDrawState* drawState, 92 bool hasColor, bool hasUVs, 93 int* colorOffset, int* localOffset) { 94 *colorOffset = -1; 95 *localOffset = -1; 96 97 // Using per-vertex colors allows batching across colors. (A lot of rects in a row differing 98 // only in color is a common occurrence in tables). However, having per-vertex colors disables 99 // blending optimizations because we don't know if the color will be solid or not. These 100 // optimizations help determine whether coverage and color can be blended correctly when 101 // dual-source blending isn't available. This comes into play when there is coverage. If colors 102 // were a stage it could take a hint that every vertex's color will be opaque. 103 if (hasColor && hasUVs) { 104 *colorOffset = sizeof(GrPoint); 105 *localOffset = sizeof(GrPoint) + sizeof(GrColor); 106 drawState->setVertexAttribs<kRectPosColorUVAttribs>(3); 107 } else if (hasColor) { 108 *colorOffset = sizeof(GrPoint); 109 drawState->setVertexAttribs<kRectPosColorUVAttribs>(2); 110 } else if (hasUVs) { 111 *localOffset = sizeof(GrPoint); 112 drawState->setVertexAttribs<kRectPosUVAttribs>(2); 113 } else { 114 drawState->setVertexAttribs<kRectPosUVAttribs>(1); 115 } 116 } 117 118 }; 119 120 void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect, 121 const SkMatrix* matrix, 122 const SkRect* localRect, 123 const SkMatrix* localMatrix) { 124 GrDrawState::AutoColorRestore acr; 125 126 GrDrawState* drawState = this->drawState(); 127 128 GrColor color = drawState->getColor(); 129 130 int colorOffset, localOffset; 131 set_vertex_attributes(drawState, 132 this->caps()->dualSourceBlendingSupport() || drawState->hasSolidCoverage(), 133 NULL != localRect, 134 &colorOffset, &localOffset); 135 if (colorOffset >= 0) { 136 // We set the draw state's color to white here. This is done so that any batching performed 137 // in our subclass's onDraw() won't get a false from GrDrawState::op== due to a color 138 // mismatch. TODO: Once vertex layout is owned by GrDrawState it should skip comparing the 139 // constant color in its op== when the kColor layout bit is set and then we can remove 140 // this. 141 acr.set(drawState, 0xFFFFFFFF); 142 } 143 144 AutoReleaseGeometry geo(this, 4, 0); 145 if (!geo.succeeded()) { 146 GrPrintf("Failed to get space for vertices!\n"); 147 return; 148 } 149 150 // Go to device coords to allow batching across matrix changes 151 SkMatrix combinedMatrix; 152 if (NULL != matrix) { 153 combinedMatrix = *matrix; 154 } else { 155 combinedMatrix.reset(); 156 } 157 combinedMatrix.postConcat(drawState->getViewMatrix()); 158 // When the caller has provided an explicit source rect for a stage then we don't want to 159 // modify that stage's matrix. Otherwise if the effect is generating its source rect from 160 // the vertex positions then we have to account for the view matrix change. 161 GrDrawState::AutoViewMatrixRestore avmr; 162 if (!avmr.setIdentity(drawState)) { 163 return; 164 } 165 166 size_t vsize = drawState->getVertexSize(); 167 168 geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vsize); 169 combinedMatrix.mapPointsWithStride(geo.positions(), vsize, 4); 170 171 SkRect devBounds; 172 // since we already computed the dev verts, set the bounds hint. This will help us avoid 173 // unnecessary clipping in our onDraw(). 174 get_vertex_bounds(geo.vertices(), vsize, 4, &devBounds); 175 176 if (localOffset >= 0) { 177 GrPoint* coords = GrTCast<GrPoint*>(GrTCast<intptr_t>(geo.vertices()) + localOffset); 178 coords->setRectFan(localRect->fLeft, localRect->fTop, 179 localRect->fRight, localRect->fBottom, 180 vsize); 181 if (NULL != localMatrix) { 182 localMatrix->mapPointsWithStride(coords, vsize, 4); 183 } 184 } 185 186 if (colorOffset >= 0) { 187 GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + colorOffset); 188 for (int i = 0; i < 4; ++i) { 189 *vertColor = color; 190 vertColor = (GrColor*) ((intptr_t) vertColor + vsize); 191 } 192 } 193 194 this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer()); 195 this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds); 196 197 // to ensure that stashing the drawState ptr is valid 198 GrAssert(this->drawState() == drawState); 199 } 200 201 bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) { 202 if (!this->getDrawState().isClipState()) { 203 return true; 204 } 205 if (kUnknown_ClipProxyState == fClipProxyState) { 206 SkIRect rect; 207 bool iior; 208 this->getClip()->getConservativeBounds(this->getDrawState().getRenderTarget(), &rect, &iior); 209 if (iior) { 210 // The clip is a rect. We will remember that in fProxyClip. It is common for an edge (or 211 // all edges) of the clip to be at the edge of the RT. However, we get that clipping for 212 // free via the viewport. We don't want to think that clipping must be enabled in this 213 // case. So we extend the clip outward from the edge to avoid these false negatives. 214 fClipProxyState = kValid_ClipProxyState; 215 fClipProxy = SkRect::MakeFromIRect(rect); 216 217 if (fClipProxy.fLeft <= 0) { 218 fClipProxy.fLeft = SK_ScalarMin; 219 } 220 if (fClipProxy.fTop <= 0) { 221 fClipProxy.fTop = SK_ScalarMin; 222 } 223 if (fClipProxy.fRight >= this->getDrawState().getRenderTarget()->width()) { 224 fClipProxy.fRight = SK_ScalarMax; 225 } 226 if (fClipProxy.fBottom >= this->getDrawState().getRenderTarget()->height()) { 227 fClipProxy.fBottom = SK_ScalarMax; 228 } 229 } else { 230 fClipProxyState = kInvalid_ClipProxyState; 231 } 232 } 233 if (kValid_ClipProxyState == fClipProxyState) { 234 return fClipProxy.contains(devBounds); 235 } 236 SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX), 237 SkIntToScalar(this->getClip()->fOrigin.fY)}; 238 SkRect clipSpaceBounds = devBounds; 239 clipSpaceBounds.offset(originOffset); 240 return this->getClip()->fClipStack->quickContains(clipSpaceBounds); 241 } 242 243 int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) { 244 GrAssert(info.isInstanced()); 245 246 const GeometrySrcState& geomSrc = this->getGeomSrc(); 247 const GrDrawState& drawState = this->getDrawState(); 248 249 // we only attempt to concat the case when reserved verts are used with a client-specified index 250 // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated 251 // between draws. 252 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc || 253 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { 254 return 0; 255 } 256 // Check if there is a draw info that is compatible that uses the same VB from the pool and 257 // the same IB 258 if (kDraw_Cmd != fCmds.back()) { 259 return 0; 260 } 261 262 DrawRecord* draw = &fDraws.back(); 263 GeometryPoolState& poolState = fGeoPoolStateStack.back(); 264 const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer; 265 266 if (!draw->isInstanced() || 267 draw->verticesPerInstance() != info.verticesPerInstance() || 268 draw->indicesPerInstance() != info.indicesPerInstance() || 269 draw->fVertexBuffer != vertexBuffer || 270 draw->fIndexBuffer != geomSrc.fIndexBuffer) { 271 return 0; 272 } 273 // info does not yet account for the offset from the start of the pool's VB while the previous 274 // draw record does. 275 int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex(); 276 if (draw->startVertex() + draw->vertexCount() != adjustedStartVertex) { 277 return 0; 278 } 279 280 GrAssert(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount()); 281 282 // how many instances can be concat'ed onto draw given the size of the index buffer 283 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance(); 284 instancesToConcat -= draw->instanceCount(); 285 instancesToConcat = GrMin(instancesToConcat, info.instanceCount()); 286 287 // update the amount of reserved vertex data actually referenced in draws 288 size_t vertexBytes = instancesToConcat * info.verticesPerInstance() * 289 drawState.getVertexSize(); 290 poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes); 291 292 draw->adjustInstanceCount(instancesToConcat); 293 return instancesToConcat; 294 } 295 296 class AutoClipReenable { 297 public: 298 AutoClipReenable() : fDrawState(NULL) {} 299 ~AutoClipReenable() { 300 if (NULL != fDrawState) { 301 fDrawState->enableState(GrDrawState::kClip_StateBit); 302 } 303 } 304 void set(GrDrawState* drawState) { 305 if (drawState->isClipState()) { 306 fDrawState = drawState; 307 drawState->disableState(GrDrawState::kClip_StateBit); 308 } 309 } 310 private: 311 GrDrawState* fDrawState; 312 }; 313 314 void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) { 315 316 GeometryPoolState& poolState = fGeoPoolStateStack.back(); 317 const GrDrawState& drawState = this->getDrawState(); 318 AutoClipReenable acr; 319 320 if (drawState.isClipState() && 321 NULL != info.getDevBounds() && 322 this->quickInsideClip(*info.getDevBounds())) { 323 acr.set(this->drawState()); 324 } 325 326 if (this->needsNewClip()) { 327 this->recordClip(); 328 } 329 if (this->needsNewState()) { 330 this->recordState(); 331 } 332 333 DrawRecord* draw; 334 if (info.isInstanced()) { 335 int instancesConcated = this->concatInstancedDraw(info); 336 if (info.instanceCount() > instancesConcated) { 337 draw = this->recordDraw(info); 338 draw->adjustInstanceCount(-instancesConcated); 339 } else { 340 return; 341 } 342 } else { 343 draw = this->recordDraw(info); 344 } 345 346 switch (this->getGeomSrc().fVertexSrc) { 347 case kBuffer_GeometrySrcType: 348 draw->fVertexBuffer = this->getGeomSrc().fVertexBuffer; 349 break; 350 case kReserved_GeometrySrcType: // fallthrough 351 case kArray_GeometrySrcType: { 352 size_t vertexBytes = (info.vertexCount() + info.startVertex()) * 353 drawState.getVertexSize(); 354 poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes); 355 draw->fVertexBuffer = poolState.fPoolVertexBuffer; 356 draw->adjustStartVertex(poolState.fPoolStartVertex); 357 break; 358 } 359 default: 360 GrCrash("unknown geom src type"); 361 } 362 draw->fVertexBuffer->ref(); 363 364 if (info.isIndexed()) { 365 switch (this->getGeomSrc().fIndexSrc) { 366 case kBuffer_GeometrySrcType: 367 draw->fIndexBuffer = this->getGeomSrc().fIndexBuffer; 368 break; 369 case kReserved_GeometrySrcType: // fallthrough 370 case kArray_GeometrySrcType: { 371 size_t indexBytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t); 372 poolState.fUsedPoolIndexBytes = GrMax(poolState.fUsedPoolIndexBytes, indexBytes); 373 draw->fIndexBuffer = poolState.fPoolIndexBuffer; 374 draw->adjustStartIndex(poolState.fPoolStartIndex); 375 break; 376 } 377 default: 378 GrCrash("unknown geom src type"); 379 } 380 draw->fIndexBuffer->ref(); 381 } else { 382 draw->fIndexBuffer = NULL; 383 } 384 } 385 386 GrInOrderDrawBuffer::StencilPath::StencilPath() : fStroke(SkStrokeRec::kFill_InitStyle) {} 387 388 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, const SkStrokeRec& stroke, 389 SkPath::FillType fill) { 390 if (this->needsNewClip()) { 391 this->recordClip(); 392 } 393 // Only compare the subset of GrDrawState relevant to path stenciling? 394 if (this->needsNewState()) { 395 this->recordState(); 396 } 397 StencilPath* sp = this->recordStencilPath(); 398 sp->fPath.reset(path); 399 path->ref(); 400 sp->fFill = fill; 401 sp->fStroke = stroke; 402 } 403 404 void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color, GrRenderTarget* renderTarget) { 405 SkIRect r; 406 if (NULL == renderTarget) { 407 renderTarget = this->drawState()->getRenderTarget(); 408 GrAssert(NULL != renderTarget); 409 } 410 if (NULL == rect) { 411 // We could do something smart and remove previous draws and clears to 412 // the current render target. If we get that smart we have to make sure 413 // those draws aren't read before this clear (render-to-texture). 414 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); 415 rect = &r; 416 } 417 Clear* clr = this->recordClear(); 418 clr->fColor = color; 419 clr->fRect = *rect; 420 clr->fRenderTarget = renderTarget; 421 renderTarget->ref(); 422 } 423 424 void GrInOrderDrawBuffer::reset() { 425 GrAssert(1 == fGeoPoolStateStack.count()); 426 this->resetVertexSource(); 427 this->resetIndexSource(); 428 int numDraws = fDraws.count(); 429 for (int d = 0; d < numDraws; ++d) { 430 // we always have a VB, but not always an IB 431 GrAssert(NULL != fDraws[d].fVertexBuffer); 432 fDraws[d].fVertexBuffer->unref(); 433 GrSafeUnref(fDraws[d].fIndexBuffer); 434 } 435 fCmds.reset(); 436 fDraws.reset(); 437 fStencilPaths.reset(); 438 fStates.reset(); 439 fClears.reset(); 440 fVertexPool.reset(); 441 fIndexPool.reset(); 442 fClips.reset(); 443 fClipOrigins.reset(); 444 fCopySurfaces.reset(); 445 fClipSet = true; 446 } 447 448 void GrInOrderDrawBuffer::flush() { 449 if (fFlushing) { 450 return; 451 } 452 453 GrAssert(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc); 454 GrAssert(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc); 455 456 int numCmds = fCmds.count(); 457 if (0 == numCmds) { 458 return; 459 } 460 461 GrAutoTRestore<bool> flushRestore(&fFlushing); 462 fFlushing = true; 463 464 fVertexPool.unlock(); 465 fIndexPool.unlock(); 466 467 GrDrawTarget::AutoClipRestore acr(fDstGpu); 468 AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit); 469 470 GrDrawState playbackState; 471 GrDrawState* prevDrawState = fDstGpu->drawState(); 472 prevDrawState->ref(); 473 fDstGpu->setDrawState(&playbackState); 474 475 GrClipData clipData; 476 477 int currState = 0; 478 int currClip = 0; 479 int currClear = 0; 480 int currDraw = 0; 481 int currStencilPath = 0; 482 int currCopySurface = 0; 483 484 for (int c = 0; c < numCmds; ++c) { 485 switch (fCmds[c]) { 486 case kDraw_Cmd: { 487 const DrawRecord& draw = fDraws[currDraw]; 488 fDstGpu->setVertexSourceToBuffer(draw.fVertexBuffer); 489 if (draw.isIndexed()) { 490 fDstGpu->setIndexSourceToBuffer(draw.fIndexBuffer); 491 } 492 fDstGpu->executeDraw(draw); 493 494 ++currDraw; 495 break; 496 } 497 case kStencilPath_Cmd: { 498 const StencilPath& sp = fStencilPaths[currStencilPath]; 499 fDstGpu->stencilPath(sp.fPath.get(), sp.fStroke, sp.fFill); 500 ++currStencilPath; 501 break; 502 } 503 case kSetState_Cmd: 504 fStates[currState].restoreTo(&playbackState); 505 ++currState; 506 break; 507 case kSetClip_Cmd: 508 clipData.fClipStack = &fClips[currClip]; 509 clipData.fOrigin = fClipOrigins[currClip]; 510 fDstGpu->setClip(&clipData); 511 ++currClip; 512 break; 513 case kClear_Cmd: 514 fDstGpu->clear(&fClears[currClear].fRect, 515 fClears[currClear].fColor, 516 fClears[currClear].fRenderTarget); 517 ++currClear; 518 break; 519 case kCopySurface_Cmd: 520 fDstGpu->copySurface(fCopySurfaces[currCopySurface].fDst.get(), 521 fCopySurfaces[currCopySurface].fSrc.get(), 522 fCopySurfaces[currCopySurface].fSrcRect, 523 fCopySurfaces[currCopySurface].fDstPoint); 524 ++currCopySurface; 525 break; 526 } 527 } 528 // we should have consumed all the states, clips, etc. 529 GrAssert(fStates.count() == currState); 530 GrAssert(fClips.count() == currClip); 531 GrAssert(fClipOrigins.count() == currClip); 532 GrAssert(fClears.count() == currClear); 533 GrAssert(fDraws.count() == currDraw); 534 GrAssert(fCopySurfaces.count() == currCopySurface); 535 536 fDstGpu->setDrawState(prevDrawState); 537 prevDrawState->unref(); 538 this->reset(); 539 } 540 541 bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst, 542 GrSurface* src, 543 const SkIRect& srcRect, 544 const SkIPoint& dstPoint) { 545 if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) { 546 CopySurface* cs = this->recordCopySurface(); 547 cs->fDst.reset(SkRef(dst)); 548 cs->fSrc.reset(SkRef(src)); 549 cs->fSrcRect = srcRect; 550 cs->fDstPoint = dstPoint; 551 return true; 552 } else { 553 return false; 554 } 555 } 556 557 bool GrInOrderDrawBuffer::onCanCopySurface(GrSurface* dst, 558 GrSurface* src, 559 const SkIRect& srcRect, 560 const SkIPoint& dstPoint) { 561 return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint); 562 } 563 564 void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) { 565 fDstGpu->initCopySurfaceDstDesc(src, desc); 566 } 567 568 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace( 569 int vertexCount, 570 int indexCount) { 571 // We use geometryHints() to know whether to flush the draw buffer. We 572 // can't flush if we are inside an unbalanced pushGeometrySource. 573 // Moreover, flushing blows away vertex and index data that was 574 // previously reserved. So if the vertex or index data is pulled from 575 // reserved space and won't be released by this request then we can't 576 // flush. 577 bool insideGeoPush = fGeoPoolStateStack.count() > 1; 578 579 bool unreleasedVertexSpace = 580 !vertexCount && 581 kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc; 582 583 bool unreleasedIndexSpace = 584 !indexCount && 585 kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc; 586 587 // we don't want to finalize any reserved geom on the target since 588 // we don't know that the client has finished writing to it. 589 bool targetHasReservedGeom = fDstGpu->hasReservedVerticesOrIndices(); 590 591 int vcount = vertexCount; 592 int icount = indexCount; 593 594 if (!insideGeoPush && 595 !unreleasedVertexSpace && 596 !unreleasedIndexSpace && 597 !targetHasReservedGeom && 598 this->geometryHints(&vcount, &icount)) { 599 600 this->flush(); 601 } 602 } 603 604 bool GrInOrderDrawBuffer::geometryHints(int* vertexCount, 605 int* indexCount) const { 606 // we will recommend a flush if the data could fit in a single 607 // preallocated buffer but none are left and it can't fit 608 // in the current buffer (which may not be prealloced). 609 bool flush = false; 610 if (NULL != indexCount) { 611 int32_t currIndices = fIndexPool.currentBufferIndices(); 612 if (*indexCount > currIndices && 613 (!fIndexPool.preallocatedBuffersRemaining() && 614 *indexCount <= fIndexPool.preallocatedBufferIndices())) { 615 616 flush = true; 617 } 618 *indexCount = currIndices; 619 } 620 if (NULL != vertexCount) { 621 size_t vertexSize = this->getDrawState().getVertexSize(); 622 int32_t currVertices = fVertexPool.currentBufferVertices(vertexSize); 623 if (*vertexCount > currVertices && 624 (!fVertexPool.preallocatedBuffersRemaining() && 625 *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexSize))) { 626 627 flush = true; 628 } 629 *vertexCount = currVertices; 630 } 631 return flush; 632 } 633 634 bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize, 635 int vertexCount, 636 void** vertices) { 637 GeometryPoolState& poolState = fGeoPoolStateStack.back(); 638 GrAssert(vertexCount > 0); 639 GrAssert(NULL != vertices); 640 GrAssert(0 == poolState.fUsedPoolVertexBytes); 641 642 *vertices = fVertexPool.makeSpace(vertexSize, 643 vertexCount, 644 &poolState.fPoolVertexBuffer, 645 &poolState.fPoolStartVertex); 646 return NULL != *vertices; 647 } 648 649 bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) { 650 GeometryPoolState& poolState = fGeoPoolStateStack.back(); 651 GrAssert(indexCount > 0); 652 GrAssert(NULL != indices); 653 GrAssert(0 == poolState.fUsedPoolIndexBytes); 654 655 *indices = fIndexPool.makeSpace(indexCount, 656 &poolState.fPoolIndexBuffer, 657 &poolState.fPoolStartIndex); 658 return NULL != *indices; 659 } 660 661 void GrInOrderDrawBuffer::releaseReservedVertexSpace() { 662 GeometryPoolState& poolState = fGeoPoolStateStack.back(); 663 const GeometrySrcState& geoSrc = this->getGeomSrc(); 664 665 // If we get a release vertex space call then our current source should either be reserved 666 // or array (which we copied into reserved space). 667 GrAssert(kReserved_GeometrySrcType == geoSrc.fVertexSrc || 668 kArray_GeometrySrcType == geoSrc.fVertexSrc); 669 670 // When the caller reserved vertex buffer space we gave it back a pointer 671 // provided by the vertex buffer pool. At each draw we tracked the largest 672 // offset into the pool's pointer that was referenced. Now we return to the 673 // pool any portion at the tail of the allocation that no draw referenced. 674 size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount; 675 fVertexPool.putBack(reservedVertexBytes - 676 poolState.fUsedPoolVertexBytes); 677 poolState.fUsedPoolVertexBytes = 0; 678 poolState.fPoolVertexBuffer = NULL; 679 poolState.fPoolStartVertex = 0; 680 } 681 682 void GrInOrderDrawBuffer::releaseReservedIndexSpace() { 683 GeometryPoolState& poolState = fGeoPoolStateStack.back(); 684 const GeometrySrcState& geoSrc = this->getGeomSrc(); 685 686 // If we get a release index space call then our current source should either be reserved 687 // or array (which we copied into reserved space). 688 GrAssert(kReserved_GeometrySrcType == geoSrc.fIndexSrc || 689 kArray_GeometrySrcType == geoSrc.fIndexSrc); 690 691 // Similar to releaseReservedVertexSpace we return any unused portion at 692 // the tail 693 size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount; 694 fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes); 695 poolState.fUsedPoolIndexBytes = 0; 696 poolState.fPoolIndexBuffer = NULL; 697 poolState.fPoolStartIndex = 0; 698 } 699 700 void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray, 701 int vertexCount) { 702 703 GeometryPoolState& poolState = fGeoPoolStateStack.back(); 704 GrAssert(0 == poolState.fUsedPoolVertexBytes); 705 #if GR_DEBUG 706 bool success = 707 #endif 708 fVertexPool.appendVertices(this->getVertexSize(), 709 vertexCount, 710 vertexArray, 711 &poolState.fPoolVertexBuffer, 712 &poolState.fPoolStartVertex); 713 GR_DEBUGASSERT(success); 714 } 715 716 void GrInOrderDrawBuffer::onSetIndexSourceToArray(const void* indexArray, 717 int indexCount) { 718 GeometryPoolState& poolState = fGeoPoolStateStack.back(); 719 GrAssert(0 == poolState.fUsedPoolIndexBytes); 720 #if GR_DEBUG 721 bool success = 722 #endif 723 fIndexPool.appendIndices(indexCount, 724 indexArray, 725 &poolState.fPoolIndexBuffer, 726 &poolState.fPoolStartIndex); 727 GR_DEBUGASSERT(success); 728 } 729 730 void GrInOrderDrawBuffer::releaseVertexArray() { 731 // When the client provides an array as the vertex source we handled it 732 // by copying their array into reserved space. 733 this->GrInOrderDrawBuffer::releaseReservedVertexSpace(); 734 } 735 736 void GrInOrderDrawBuffer::releaseIndexArray() { 737 // When the client provides an array as the index source we handled it 738 // by copying their array into reserved space. 739 this->GrInOrderDrawBuffer::releaseReservedIndexSpace(); 740 } 741 742 void GrInOrderDrawBuffer::geometrySourceWillPush() { 743 GeometryPoolState& poolState = fGeoPoolStateStack.push_back(); 744 poolState.fUsedPoolVertexBytes = 0; 745 poolState.fUsedPoolIndexBytes = 0; 746 #if GR_DEBUG 747 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0; 748 poolState.fPoolStartVertex = ~0; 749 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0; 750 poolState.fPoolStartIndex = ~0; 751 #endif 752 } 753 754 void GrInOrderDrawBuffer::geometrySourceWillPop( 755 const GeometrySrcState& restoredState) { 756 GrAssert(fGeoPoolStateStack.count() > 1); 757 fGeoPoolStateStack.pop_back(); 758 GeometryPoolState& poolState = fGeoPoolStateStack.back(); 759 // we have to assume that any slack we had in our vertex/index data 760 // is now unreleasable because data may have been appended later in the 761 // pool. 762 if (kReserved_GeometrySrcType == restoredState.fVertexSrc || 763 kArray_GeometrySrcType == restoredState.fVertexSrc) { 764 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredState.fVertexCount; 765 } 766 if (kReserved_GeometrySrcType == restoredState.fIndexSrc || 767 kArray_GeometrySrcType == restoredState.fIndexSrc) { 768 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) * 769 restoredState.fIndexCount; 770 } 771 } 772 773 bool GrInOrderDrawBuffer::needsNewState() const { 774 return fStates.empty() || !fStates.back().isEqual(this->getDrawState()); 775 } 776 777 bool GrInOrderDrawBuffer::needsNewClip() const { 778 GrAssert(fClips.count() == fClipOrigins.count()); 779 if (this->getDrawState().isClipState()) { 780 if (fClipSet && 781 (fClips.empty() || 782 fClips.back() != *this->getClip()->fClipStack || 783 fClipOrigins.back() != this->getClip()->fOrigin)) { 784 return true; 785 } 786 } 787 return false; 788 } 789 790 void GrInOrderDrawBuffer::recordClip() { 791 fClips.push_back() = *this->getClip()->fClipStack; 792 fClipOrigins.push_back() = this->getClip()->fOrigin; 793 fClipSet = false; 794 fCmds.push_back(kSetClip_Cmd); 795 } 796 797 void GrInOrderDrawBuffer::recordState() { 798 fStates.push_back().saveFrom(this->getDrawState()); 799 fCmds.push_back(kSetState_Cmd); 800 } 801 802 GrInOrderDrawBuffer::DrawRecord* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info) { 803 fCmds.push_back(kDraw_Cmd); 804 return &fDraws.push_back(info); 805 } 806 807 GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath() { 808 fCmds.push_back(kStencilPath_Cmd); 809 return &fStencilPaths.push_back(); 810 } 811 812 GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear() { 813 fCmds.push_back(kClear_Cmd); 814 return &fClears.push_back(); 815 } 816 817 GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface() { 818 fCmds.push_back(kCopySurface_Cmd); 819 return &fCopySurfaces.push_back(); 820 } 821 822 823 void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) { 824 INHERITED::clipWillBeSet(newClipData); 825 fClipSet = true; 826 fClipProxyState = kUnknown_ClipProxyState; 827 } 828