1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrBatch_DEFINED
9 #define GrBatch_DEFINED
10 
11 #include <new>
12 #include "GrBatchTarget.h"
13 #include "GrGeometryProcessor.h"
14 #include "GrVertices.h"
15 #include "SkRefCnt.h"
16 #include "SkThread.h"
17 #include "SkTypes.h"
18 
19 class GrGpu;
20 class GrPipeline;
21 
22 struct GrInitInvariantOutput;
23 
24 /*
25  * GrBatch is the base class for all Ganesh deferred geometry generators.  To facilitate
26  * reorderable batching, Ganesh does not generate geometry inline with draw calls.  Instead, it
27  * captures the arguments to the draw and then generates the geometry on demand.  This gives GrBatch
28  * subclasses complete freedom to decide how / what they can batch.
29  *
30  * Batches are created when GrContext processes a draw call. Batches of the same  subclass may be
31  * merged using combineIfPossible. When two batches merge, one takes on the union of the data
32  * and the other is left empty. The merged batch becomes responsible for drawing the data from both
33  * the original batches.
34  *
35  * If there are any possible optimizations which might require knowing more about the full state of
36  * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
37  * information will be communicated to the GrBatch prior to geometry generation.
38  */
39 
40 class GrBatch : public SkRefCnt {
41 public:
SK_DECLARE_INST_COUNT(GrBatch)42     SK_DECLARE_INST_COUNT(GrBatch)
43     GrBatch() : fClassID(kIllegalBatchClassID), fNumberOfDraws(0) { SkDEBUGCODE(fUsed = false;) }
~GrBatch()44     virtual ~GrBatch() {}
45 
46     virtual const char* name() const = 0;
47     virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
48     virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
49 
50     /*
51      * initBatchTracker is a hook for the some additional overrides / optimization possibilities
52      * from the GrXferProcessor.
53      */
54     virtual void initBatchTracker(const GrPipelineInfo& init) = 0;
55 
combineIfPossible(GrBatch * that)56     bool combineIfPossible(GrBatch* that) {
57         if (this->classID() != that->classID()) {
58             return false;
59         }
60 
61         return this->onCombineIfPossible(that);
62     }
63 
64     virtual bool onCombineIfPossible(GrBatch*) = 0;
65 
66     virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0;
67 
bounds()68     const SkRect& bounds() const { return fBounds; }
69 
70     // TODO this goes away when batches are everywhere
setNumberOfDraws(int numberOfDraws)71     void setNumberOfDraws(int numberOfDraws) { fNumberOfDraws = numberOfDraws; }
numberOfDraws()72     int numberOfDraws() const { return fNumberOfDraws; }
73 
74     void* operator new(size_t size);
75     void operator delete(void* target);
76 
new(size_t size,void * placement)77     void* operator new(size_t size, void* placement) {
78         return ::operator new(size, placement);
79     }
delete(void * target,void * placement)80     void operator delete(void* target, void* placement) {
81         ::operator delete(target, placement);
82     }
83 
84     /**
85       * Helper for down-casting to a GrBatch subclass
86       */
cast()87     template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
cast()88     template <typename T> T* cast() { return static_cast<T*>(this); }
89 
classID()90     uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); return fClassID; }
91 
92     // TODO no GrPrimitiveProcessors yet read fragment position
willReadFragmentPosition()93     bool willReadFragmentPosition() const { return false; }
94 
SkDEBUGCODE(bool isUsed ()const{ return fUsed; })95     SkDEBUGCODE(bool isUsed() const { return fUsed; })
96 
97 protected:
98     template <typename PROC_SUBCLASS> void initClassID() {
99          static uint32_t kClassID = GenClassID();
100          fClassID = kClassID;
101     }
102 
103     uint32_t fClassID;
104 
105     // NOTE, compute some bounds, even if extremely conservative.  Do *NOT* setLargest on the bounds
106     // rect because we outset it for dst copy textures
setBounds(const SkRect & newBounds)107     void setBounds(const SkRect& newBounds) { fBounds = newBounds; }
108 
joinBounds(const SkRect & otherBounds)109     void joinBounds(const SkRect& otherBounds) {
110         return fBounds.joinPossiblyEmptyRect(otherBounds);
111     }
112 
113     /** Helper for rendering instances using an instanced index index buffer. This class creates the
114         space for the vertices and flushes the draws to the batch target.*/
115    class InstancedHelper {
116    public:
InstancedHelper()117         InstancedHelper() {}
118         /** Returns the allocated storage for the vertices. The caller should populate the before
119             vertices before calling issueDraws(). */
120         void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStride,
121                    const GrIndexBuffer*, int verticesPerInstance, int indicesPerInstance,
122                    int instancesToDraw);
123 
124         /** Call after init() to issue draws to the batch target.*/
issueDraw(GrBatchTarget * batchTarget)125         void issueDraw(GrBatchTarget* batchTarget) {
126             SkASSERT(fVertices.instanceCount());
127             batchTarget->draw(fVertices);
128         }
129     private:
130         GrVertices  fVertices;
131     };
132 
133     static const int kVerticesPerQuad = 4;
134     static const int kIndicesPerQuad = 6;
135 
136     /** A specialization of InstanceHelper for quad rendering. */
137     class QuadHelper : private InstancedHelper {
138     public:
QuadHelper()139         QuadHelper() : INHERITED() {}
140         /** Finds the cached quad index buffer and reserves vertex space. Returns NULL on failure
141             and on sucess a pointer to the vertex data that the caller should populate before
142             calling issueDraws(). */
143         void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToDraw);
144 
145         using InstancedHelper::issueDraw;
146 
147     private:
148         typedef InstancedHelper INHERITED;
149     };
150 
151     SkRect fBounds;
152 
153 private:
GenClassID()154     static uint32_t GenClassID() {
155         // fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The
156         // atomic inc returns the old value not the incremented value. So we add
157         // 1 to the returned value.
158         uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1;
159         if (!id) {
160             SkFAIL("This should never wrap as it should only be called once for each GrBatch "
161                    "subclass.");
162         }
163         return id;
164     }
165 
166     enum {
167         kIllegalBatchClassID = 0,
168     };
169     static int32_t gCurrBatchClassID;
170 
171     SkDEBUGCODE(bool fUsed;)
172 
173     int fNumberOfDraws;
174 
175     typedef SkRefCnt INHERITED;
176 };
177 
178 #endif
179