1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrBatch_DEFINED 9 #define GrBatch_DEFINED 10 11 #include <new> 12 #include "GrNonAtomicRef.h" 13 14 #include "SkRect.h" 15 #include "SkString.h" 16 17 class GrCaps; 18 class GrBatchFlushState; 19 class GrRenderTarget; 20 21 /** 22 * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate 23 * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it 24 * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch 25 * subclasses complete freedom to decide how / what they can batch. 26 * 27 * Batches are created when GrContext processes a draw call. Batches of the same subclass may be 28 * merged using combineIfPossible. When two batches merge, one takes on the union of the data 29 * and the other is left empty. The merged batch becomes responsible for drawing the data from both 30 * the original batches. 31 * 32 * If there are any possible optimizations which might require knowing more about the full state of 33 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this 34 * information will be communicated to the GrBatch prior to geometry generation. 35 * 36 * The bounds of the batch must contain all the vertices in device space *irrespective* of the clip. 37 * The bounds are used in determining which clip elements must be applied and thus the bounds cannot 38 * in turn depend upon the clip. 39 */ 40 #define GR_BATCH_SPEW 0 41 #if GR_BATCH_SPEW 42 #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__) 43 #define GrBATCH_SPEW(code) code 44 #else 45 #define GrBATCH_SPEW(code) 46 #define GrBATCH_INFO(...) 47 #endif 48 49 // A helper macro to generate a class static id 50 #define DEFINE_BATCH_CLASS_ID \ 51 static uint32_t ClassID() { \ 52 static uint32_t kClassID = GenBatchClassID(); \ 53 return kClassID; \ 54 } 55 56 class GrBatch : public GrNonAtomicRef<GrBatch> { 57 public: 58 GrBatch(uint32_t classID); 59 virtual ~GrBatch(); 60 61 virtual const char* name() const = 0; 62 63 bool combineIfPossible(GrBatch* that, const GrCaps& caps) { 64 if (this->classID() != that->classID()) { 65 return false; 66 } 67 68 return this->onCombineIfPossible(that, caps); 69 } 70 71 const SkRect& bounds() const { return fBounds; } 72 73 void* operator new(size_t size); 74 void operator delete(void* target); 75 76 void* operator new(size_t size, void* placement) { 77 return ::operator new(size, placement); 78 } 79 void operator delete(void* target, void* placement) { 80 ::operator delete(target, placement); 81 } 82 83 /** 84 * Helper for safely down-casting to a GrBatch subclass 85 */ 86 template <typename T> const T& cast() const { 87 SkASSERT(T::ClassID() == this->classID()); 88 return *static_cast<const T*>(this); 89 } 90 91 template <typename T> T* cast() { 92 SkASSERT(T::ClassID() == this->classID()); 93 return static_cast<T*>(this); 94 } 95 96 uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; } 97 98 #if GR_BATCH_SPEW 99 uint32_t uniqueID() const { return fUniqueID; } 100 #endif 101 SkDEBUGCODE(bool isUsed() const { return fUsed; }) 102 103 /** Called prior to drawing. The batch should perform any resource creation necessary to 104 to quickly issue its draw when draw is called. */ 105 void prepare(GrBatchFlushState* state) { this->onPrepare(state); } 106 107 /** Issues the batches commands to GrGpu. */ 108 void draw(GrBatchFlushState* state) { this->onDraw(state); } 109 110 /** Used to block batching across render target changes. Remove this once we store 111 GrBatches for different RTs in different targets. */ 112 virtual uint32_t renderTargetUniqueID() const = 0; 113 114 /** Used for spewing information about batches when debugging. */ 115 virtual SkString dumpInfo() const = 0; 116 117 /** Can remove this when multi-draw-buffer lands */ 118 virtual GrRenderTarget* renderTarget() const = 0; 119 120 protected: 121 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds 122 // rect because we outset it for dst copy textures 123 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } 124 125 void joinBounds(const SkRect& otherBounds) { 126 return fBounds.joinPossiblyEmptyRect(otherBounds); 127 } 128 129 static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); } 130 131 SkRect fBounds; 132 133 private: 134 virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0; 135 136 virtual void onPrepare(GrBatchFlushState*) = 0; 137 virtual void onDraw(GrBatchFlushState*) = 0; 138 139 static uint32_t GenID(int32_t* idCounter) { 140 // The atomic inc returns the old value not the incremented value. So we add 141 // 1 to the returned value. 142 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1; 143 if (!id) { 144 SkFAIL("This should never wrap as it should only be called once for each GrBatch " 145 "subclass."); 146 } 147 return id; 148 } 149 150 enum { 151 kIllegalBatchID = 0, 152 }; 153 154 SkDEBUGCODE(bool fUsed;) 155 const uint32_t fClassID; 156 #if GR_BATCH_SPEW 157 static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); } 158 const uint32_t fUniqueID; 159 static int32_t gCurrBatchUniqueID; 160 #endif 161 static int32_t gCurrBatchClassID; 162 }; 163 164 #endif 165