1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GLInstancedRendering.h"
9
10 #include "GrResourceProvider.h"
11 #include "gl/GrGLGpu.h"
12 #include "instanced/InstanceProcessor.h"
13
14 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
15
16 namespace gr_instanced {
17
18 class GLInstancedRendering::GLOp final : public InstancedRendering::Op {
19 public:
20 DEFINE_OP_CLASS_ID
21
GLOp(GLInstancedRendering * instRendering,GrPaint && paint)22 GLOp(GLInstancedRendering* instRendering, GrPaint&& paint)
23 : INHERITED(ClassID(), std::move(paint), instRendering) {}
numGLCommands() const24 int numGLCommands() const { return 1 + fNumChangesInGeometry; }
25
26 private:
27 int fEmulatedBaseInstance;
28 int fGLDrawCmdsIdx;
29
30 friend class GLInstancedRendering;
31
32 typedef Op INHERITED;
33 };
34
CheckSupport(const GrGLCaps & glCaps)35 GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCaps) {
36 // This method is only intended to be used for initializing fInstancedSupport in the caps.
37 SkASSERT(GrCaps::InstancedSupport::kNone == glCaps.instancedSupport());
38 if (!glCaps.vertexArrayObjectSupport() ||
39 (!glCaps.drawIndirectSupport() && !glCaps.drawInstancedSupport())) {
40 return GrCaps::InstancedSupport::kNone;
41 }
42 return InstanceProcessor::CheckSupport(*glCaps.shaderCaps(), glCaps);
43 }
44
GLInstancedRendering(GrGLGpu * gpu)45 GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu)
46 : INHERITED(gpu),
47 fVertexArrayID(0),
48 fGLDrawCmdsInfo(0),
49 fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) {
50 SkASSERT(GrCaps::InstancedSupport::kNone != this->gpu()->caps()->instancedSupport());
51 }
52
~GLInstancedRendering()53 GLInstancedRendering::~GLInstancedRendering() {
54 if (fVertexArrayID) {
55 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
56 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
57 }
58 }
59
glGpu() const60 inline GrGLGpu* GLInstancedRendering::glGpu() const {
61 return static_cast<GrGLGpu*>(this->gpu());
62 }
63
makeOp(GrPaint && paint)64 std::unique_ptr<InstancedRendering::Op> GLInstancedRendering::makeOp(GrPaint&& paint) {
65 return std::unique_ptr<Op>(new GLOp(this, std::move(paint)));
66 }
67
onBeginFlush(GrResourceProvider * rp)68 void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
69 // Count what there is to draw.
70 OpList::Iter iter;
71 iter.init(this->trackedOps(), OpList::Iter::kHead_IterStart);
72 int numGLInstances = 0;
73 int numGLDrawCmds = 0;
74 while (Op* o = iter.get()) {
75 GLOp* op = static_cast<GLOp*>(o);
76 iter.next();
77
78 numGLInstances += op->fNumDraws;
79 numGLDrawCmds += op->numGLCommands();
80 }
81 if (!numGLDrawCmds) {
82 return;
83 }
84 SkASSERT(numGLInstances);
85
86 // Lazily create a vertex array object.
87 if (!fVertexArrayID) {
88 GL_CALL(GenVertexArrays(1, &fVertexArrayID));
89 if (!fVertexArrayID) {
90 return;
91 }
92 this->glGpu()->bindVertexArray(fVertexArrayID);
93
94 // Attach our index buffer to the vertex array.
95 SkASSERT(!this->indexBuffer()->isCPUBacked());
96 GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
97 static_cast<const GrGLBuffer*>(this->indexBuffer())->bufferID()));
98
99 // Set up the non-instanced attribs.
100 this->glGpu()->bindBuffer(kVertex_GrBufferType, this->vertexBuffer());
101 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeCoords));
102 GL_CALL(VertexAttribPointer((int)Attrib::kShapeCoords, 2, GR_GL_FLOAT, GR_GL_FALSE,
103 sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fX)));
104 GL_CALL(EnableVertexAttribArray((int)Attrib::kVertexAttrs));
105 GL_CALL(VertexAttribIPointer((int)Attrib::kVertexAttrs, 1, GR_GL_INT, sizeof(ShapeVertex),
106 (void*) offsetof(ShapeVertex, fAttrs)));
107
108 SkASSERT(fInstanceAttribsBufferUniqueId.isInvalid());
109 }
110
111 // Create and map instance and draw-indirect buffers.
112 SkASSERT(!fInstanceBuffer);
113 fInstanceBuffer.reset(
114 rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType,
115 kDynamic_GrAccessPattern,
116 GrResourceProvider::kNoPendingIO_Flag |
117 GrResourceProvider::kRequireGpuMemory_Flag));
118 if (!fInstanceBuffer) {
119 return;
120 }
121
122 SkASSERT(!fDrawIndirectBuffer);
123 if (this->glGpu()->glCaps().drawIndirectSupport()) {
124 fDrawIndirectBuffer.reset(
125 rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds,
126 kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern,
127 GrResourceProvider::kNoPendingIO_Flag |
128 GrResourceProvider::kRequireGpuMemory_Flag));
129 if (!fDrawIndirectBuffer) {
130 return;
131 }
132 }
133
134 Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map());
135 SkASSERT(glMappedInstances);
136 int glInstancesIdx = 0;
137
138 GrGLDrawElementsIndirectCommand* glMappedCmds = nullptr;
139 int glDrawCmdsIdx = 0;
140 if (fDrawIndirectBuffer) {
141 glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndirectBuffer->map());
142 SkASSERT(glMappedCmds);
143 }
144
145 bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport();
146 SkASSERT(!baseInstanceSupport || fDrawIndirectBuffer);
147
148 SkASSERT(!fGLDrawCmdsInfo);
149 if (GR_GL_LOG_INSTANCED_OPS || !baseInstanceSupport) {
150 fGLDrawCmdsInfo.reset(numGLDrawCmds);
151 }
152
153 // Generate the instance and draw-indirect buffer contents based on the tracked ops.
154 iter.init(this->trackedOps(), OpList::Iter::kHead_IterStart);
155 while (Op* o = iter.get()) {
156 GLOp* op = static_cast<GLOp*>(o);
157 iter.next();
158
159 op->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
160 op->fGLDrawCmdsIdx = glDrawCmdsIdx;
161
162 const Op::Draw* draw = op->fHeadDraw;
163 SkASSERT(draw);
164 do {
165 int instanceCount = 0;
166 IndexRange geometry = draw->fGeometry;
167 SkASSERT(!geometry.isEmpty());
168
169 do {
170 glMappedInstances[glInstancesIdx + instanceCount++] = draw->fInstance;
171 draw = draw->fNext;
172 } while (draw && draw->fGeometry == geometry);
173
174 if (fDrawIndirectBuffer) {
175 GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx];
176 glCmd.fCount = geometry.fCount;
177 glCmd.fInstanceCount = instanceCount;
178 glCmd.fFirstIndex = geometry.fStart;
179 glCmd.fBaseVertex = 0;
180 glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0;
181 }
182
183 if (GR_GL_LOG_INSTANCED_OPS || !baseInstanceSupport) {
184 GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glDrawCmdsIdx];
185 cmdInfo.fGeometry = geometry;
186 cmdInfo.fInstanceCount = instanceCount;
187 }
188
189 glInstancesIdx += instanceCount;
190 ++glDrawCmdsIdx;
191 } while (draw);
192 }
193
194 SkASSERT(glDrawCmdsIdx == numGLDrawCmds);
195 if (fDrawIndirectBuffer) {
196 fDrawIndirectBuffer->unmap();
197 }
198
199 SkASSERT(glInstancesIdx == numGLInstances);
200 fInstanceBuffer->unmap();
201 }
202
onDraw(const GrPipeline & pipeline,const InstanceProcessor & instProc,const Op * baseOp)203 void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProcessor& instProc,
204 const Op* baseOp) {
205 if (!fDrawIndirectBuffer && !fGLDrawCmdsInfo) {
206 return; // beginFlush was not successful.
207 }
208 if (!this->glGpu()->flushGLState(pipeline, instProc, false)) {
209 return;
210 }
211
212 if (fDrawIndirectBuffer) {
213 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffer.get());
214 }
215
216 const GrGLCaps& glCaps = this->glGpu()->glCaps();
217 const GLOp* op = static_cast<const GLOp*>(baseOp);
218 int numCommands = op->numGLCommands();
219
220 #if GR_GL_LOG_INSTANCED_OPS
221 SkASSERT(fGLDrawCmdsInfo);
222 SkDebugf("Instanced op: [");
223 for (int i = 0; i < numCommands; ++i) {
224 int glCmdIdx = op->fGLDrawCmdsIdx + i;
225 SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInstanceCount,
226 InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx].fGeometry));
227 }
228 SkDebugf("]\n");
229 #else
230 SkASSERT(SkToBool(fGLDrawCmdsInfo) == !glCaps.baseInstanceSupport());
231 #endif
232
233 if (numCommands > 1 && glCaps.multiDrawIndirectSupport() && glCaps.baseInstanceSupport()) {
234 SkASSERT(fDrawIndirectBuffer);
235 int glCmdsIdx = op->fGLDrawCmdsIdx;
236 this->flushInstanceAttribs(op->fEmulatedBaseInstance);
237 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
238 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdsIdx,
239 numCommands, 0));
240 return;
241 }
242
243 int emulatedBaseInstance = op->fEmulatedBaseInstance;
244 for (int i = 0; i < numCommands; ++i) {
245 int glCmdIdx = op->fGLDrawCmdsIdx + i;
246 this->flushInstanceAttribs(emulatedBaseInstance);
247 if (fDrawIndirectBuffer) {
248 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
249 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdIdx));
250 } else {
251 const GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glCmdIdx];
252 GL_CALL(DrawElementsInstanced(GR_GL_TRIANGLES, cmdInfo.fGeometry.fCount,
253 GR_GL_UNSIGNED_BYTE,
254 (GrGLubyte*) nullptr + cmdInfo.fGeometry.fStart,
255 cmdInfo.fInstanceCount));
256 }
257 if (!glCaps.baseInstanceSupport()) {
258 const GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glCmdIdx];
259 emulatedBaseInstance += cmdInfo.fInstanceCount;
260 }
261 }
262 }
263
flushInstanceAttribs(int baseInstance)264 void GLInstancedRendering::flushInstanceAttribs(int baseInstance) {
265 SkASSERT(fVertexArrayID);
266 this->glGpu()->bindVertexArray(fVertexArrayID);
267
268 SkASSERT(fInstanceBuffer);
269 if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->uniqueID() ||
270 fInstanceAttribsBaseInstance != baseInstance) {
271 Instance* offsetInBuffer = (Instance*) nullptr + baseInstance;
272
273 this->glGpu()->bindBuffer(kVertex_GrBufferType, fInstanceBuffer.get());
274
275 // Info attrib.
276 GL_CALL(EnableVertexAttribArray((int)Attrib::kInstanceInfo));
277 GL_CALL(VertexAttribIPointer((int)Attrib::kInstanceInfo, 1, GR_GL_UNSIGNED_INT,
278 sizeof(Instance), &offsetInBuffer->fInfo));
279 GL_CALL(VertexAttribDivisor((int)Attrib::kInstanceInfo, 1));
280
281 // Shape matrix attrib.
282 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixX));
283 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixY));
284 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixX, 3, GR_GL_FLOAT, GR_GL_FALSE,
285 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[0]));
286 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixY, 3, GR_GL_FLOAT, GR_GL_FALSE,
287 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[3]));
288 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixX, 1));
289 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixY, 1));
290
291 // Color attrib.
292 GL_CALL(EnableVertexAttribArray((int)Attrib::kColor));
293 GL_CALL(VertexAttribPointer((int)Attrib::kColor, 4, GR_GL_UNSIGNED_BYTE, GR_GL_TRUE,
294 sizeof(Instance), &offsetInBuffer->fColor));
295 GL_CALL(VertexAttribDivisor((int)Attrib::kColor, 1));
296
297 // Local rect attrib.
298 GL_CALL(EnableVertexAttribArray((int)Attrib::kLocalRect));
299 GL_CALL(VertexAttribPointer((int)Attrib::kLocalRect, 4, GR_GL_FLOAT, GR_GL_FALSE,
300 sizeof(Instance), &offsetInBuffer->fLocalRect));
301 GL_CALL(VertexAttribDivisor((int)Attrib::kLocalRect, 1));
302
303 fInstanceAttribsBufferUniqueId = fInstanceBuffer->uniqueID();
304 fInstanceAttribsBaseInstance = baseInstance;
305 }
306 }
307
onEndFlush()308 void GLInstancedRendering::onEndFlush() {
309 fInstanceBuffer.reset();
310 fDrawIndirectBuffer.reset();
311 fGLDrawCmdsInfo.reset(0);
312 }
313
onResetGpuResources(ResetType resetType)314 void GLInstancedRendering::onResetGpuResources(ResetType resetType) {
315 if (fVertexArrayID && ResetType::kDestroy == resetType) {
316 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
317 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
318 }
319 fVertexArrayID = 0;
320 fInstanceBuffer.reset();
321 fDrawIndirectBuffer.reset();
322 fInstanceAttribsBufferUniqueId.makeInvalid();
323 }
324
325 }
326