1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "InstancedRendering.h"
9
10 #include "GrCaps.h"
11 #include "GrOpFlushState.h"
12 #include "GrPipeline.h"
13 #include "GrResourceProvider.h"
14 #include "instanced/InstanceProcessor.h"
15
16 namespace gr_instanced {
17
InstancedRendering(GrGpu * gpu)18 InstancedRendering::InstancedRendering(GrGpu* gpu)
19 : fGpu(SkRef(gpu)),
20 fState(State::kRecordingDraws),
21 fDrawPool(1024, 1024) {
22 }
23
recordRect(const SkRect & rect,const SkMatrix & viewMatrix,GrPaint && paint,GrAA aa,const GrInstancedPipelineInfo & info)24 std::unique_ptr<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect,
25 const SkMatrix& viewMatrix,
26 GrPaint&& paint, GrAA aa,
27 const GrInstancedPipelineInfo& info) {
28 return this->recordShape(ShapeType::kRect, rect, viewMatrix, std::move(paint), rect, aa, info);
29 }
30
recordRect(const SkRect & rect,const SkMatrix & viewMatrix,GrPaint && paint,const SkRect & localRect,GrAA aa,const GrInstancedPipelineInfo & info)31 std::unique_ptr<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect,
32 const SkMatrix& viewMatrix,
33 GrPaint&& paint, const SkRect& localRect,
34 GrAA aa,
35 const GrInstancedPipelineInfo& info) {
36 return this->recordShape(ShapeType::kRect, rect, viewMatrix, std::move(paint), localRect, aa,
37 info);
38 }
39
recordRect(const SkRect & rect,const SkMatrix & viewMatrix,GrPaint && paint,const SkMatrix & localMatrix,GrAA aa,const GrInstancedPipelineInfo & info)40 std::unique_ptr<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect,
41 const SkMatrix& viewMatrix,
42 GrPaint&& paint,
43 const SkMatrix& localMatrix, GrAA aa,
44 const GrInstancedPipelineInfo& info) {
45 if (localMatrix.hasPerspective()) {
46 return nullptr; // Perspective is not yet supported in the local matrix.
47 }
48 if (std::unique_ptr<Op> op = this->recordShape(ShapeType::kRect, rect, viewMatrix,
49 std::move(paint), rect, aa, info)) {
50 op->getSingleInstance().fInfo |= kLocalMatrix_InfoFlag;
51 op->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX(),
52 localMatrix.getTranslateX());
53 op->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY(),
54 localMatrix.getTranslateY());
55 op->fInfo.fHasLocalMatrix = true;
56 return std::move(op);
57 }
58 return nullptr;
59 }
60
recordOval(const SkRect & oval,const SkMatrix & viewMatrix,GrPaint && paint,GrAA aa,const GrInstancedPipelineInfo & info)61 std::unique_ptr<GrDrawOp> InstancedRendering::recordOval(const SkRect& oval,
62 const SkMatrix& viewMatrix,
63 GrPaint&& paint, GrAA aa,
64 const GrInstancedPipelineInfo& info) {
65 return this->recordShape(ShapeType::kOval, oval, viewMatrix, std::move(paint), oval, aa, info);
66 }
67
recordRRect(const SkRRect & rrect,const SkMatrix & viewMatrix,GrPaint && paint,GrAA aa,const GrInstancedPipelineInfo & info)68 std::unique_ptr<GrDrawOp> InstancedRendering::recordRRect(const SkRRect& rrect,
69 const SkMatrix& viewMatrix,
70 GrPaint&& paint, GrAA aa,
71 const GrInstancedPipelineInfo& info) {
72 if (std::unique_ptr<Op> op =
73 this->recordShape(GetRRectShapeType(rrect), rrect.rect(), viewMatrix,
74 std::move(paint), rrect.rect(), aa, info)) {
75 op->appendRRectParams(rrect);
76 return std::move(op);
77 }
78 return nullptr;
79 }
80
recordDRRect(const SkRRect & outer,const SkRRect & inner,const SkMatrix & viewMatrix,GrPaint && paint,GrAA aa,const GrInstancedPipelineInfo & info)81 std::unique_ptr<GrDrawOp> InstancedRendering::recordDRRect(const SkRRect& outer,
82 const SkRRect& inner,
83 const SkMatrix& viewMatrix,
84 GrPaint&& paint, GrAA aa,
85 const GrInstancedPipelineInfo& info) {
86 if (inner.getType() > SkRRect::kSimple_Type) {
87 return nullptr; // Complex inner round rects are not yet supported.
88 }
89 if (SkRRect::kEmpty_Type == inner.getType()) {
90 return this->recordRRect(outer, viewMatrix, std::move(paint), aa, info);
91 }
92 if (std::unique_ptr<Op> op =
93 this->recordShape(GetRRectShapeType(outer), outer.rect(), viewMatrix,
94 std::move(paint), outer.rect(), aa, info)) {
95 op->appendRRectParams(outer);
96 ShapeType innerShapeType = GetRRectShapeType(inner);
97 op->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType);
98 op->getSingleInstance().fInfo |= ((int)innerShapeType << kInnerShapeType_InfoBit);
99 op->appendParamsTexel(inner.rect().asScalars(), 4);
100 op->appendRRectParams(inner);
101 return std::move(op);
102 }
103 return nullptr;
104 }
105
recordShape(ShapeType type,const SkRect & bounds,const SkMatrix & viewMatrix,GrPaint && paint,const SkRect & localRect,GrAA aa,const GrInstancedPipelineInfo & info)106 std::unique_ptr<InstancedRendering::Op> InstancedRendering::recordShape(
107 ShapeType type, const SkRect& bounds, const SkMatrix& viewMatrix, GrPaint&& paint,
108 const SkRect& localRect, GrAA aa, const GrInstancedPipelineInfo& info) {
109 SkASSERT(State::kRecordingDraws == fState);
110
111 if (info.fIsRenderingToFloat && fGpu->caps()->avoidInstancedDrawsToFPTargets()) {
112 return nullptr;
113 }
114
115 GrAAType aaType;
116 if (!this->selectAntialiasMode(viewMatrix, aa, info, &aaType)) {
117 return nullptr;
118 }
119
120 GrColor color = paint.getColor();
121 std::unique_ptr<Op> op = this->makeOp(std::move(paint));
122 op->fInfo.setAAType(aaType);
123 op->fInfo.fShapeTypes = GetShapeFlag(type);
124 op->fInfo.fCannotDiscard = true;
125 op->fDrawColorsAreOpaque = GrColorIsOpaque(color);
126 op->fDrawColorsAreSame = true;
127 Instance& instance = op->getSingleInstance();
128 instance.fInfo = (int)type << kShapeType_InfoBit;
129
130 Op::HasAABloat aaBloat =
131 (aaType == GrAAType::kCoverage) ? Op::HasAABloat::kYes : Op::HasAABloat::kNo;
132 Op::IsZeroArea zeroArea = (bounds.isEmpty()) ? Op::IsZeroArea::kYes : Op::IsZeroArea::kNo;
133
134 // The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we find the matrix that
135 // will map this rectangle to the same device coordinates as "viewMatrix * bounds".
136 float sx = 0.5f * bounds.width();
137 float sy = 0.5f * bounds.height();
138 float tx = sx + bounds.fLeft;
139 float ty = sy + bounds.fTop;
140 if (!viewMatrix.hasPerspective()) {
141 float* m = instance.fShapeMatrix2x3;
142 m[0] = viewMatrix.getScaleX() * sx;
143 m[1] = viewMatrix.getSkewX() * sy;
144 m[2] = viewMatrix.getTranslateX() +
145 viewMatrix.getScaleX() * tx + viewMatrix.getSkewX() * ty;
146
147 m[3] = viewMatrix.getSkewY() * sx;
148 m[4] = viewMatrix.getScaleY() * sy;
149 m[5] = viewMatrix.getTranslateY() +
150 viewMatrix.getSkewY() * tx + viewMatrix.getScaleY() * ty;
151
152 // Since 'm' is a 2x3 matrix that maps the rect [-1, +1] into the shape's device-space quad,
153 // it's quite simple to find the bounding rectangle:
154 float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]);
155 float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]);
156 SkRect opBounds;
157 opBounds.fLeft = m[2] - devBoundsHalfWidth;
158 opBounds.fRight = m[2] + devBoundsHalfWidth;
159 opBounds.fTop = m[5] - devBoundsHalfHeight;
160 opBounds.fBottom = m[5] + devBoundsHalfHeight;
161 op->setBounds(opBounds, aaBloat, zeroArea);
162
163 // TODO: Is this worth the CPU overhead?
164 op->fInfo.fNonSquare =
165 fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early out.
166 fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew?
167 fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) >
168 1e-2f; // Diff. lengths?
169 } else {
170 SkMatrix shapeMatrix(viewMatrix);
171 shapeMatrix.preTranslate(tx, ty);
172 shapeMatrix.preScale(sx, sy);
173 instance.fInfo |= kPerspective_InfoFlag;
174
175 float* m = instance.fShapeMatrix2x3;
176 m[0] = SkScalarToFloat(shapeMatrix.getScaleX());
177 m[1] = SkScalarToFloat(shapeMatrix.getSkewX());
178 m[2] = SkScalarToFloat(shapeMatrix.getTranslateX());
179 m[3] = SkScalarToFloat(shapeMatrix.getSkewY());
180 m[4] = SkScalarToFloat(shapeMatrix.getScaleY());
181 m[5] = SkScalarToFloat(shapeMatrix.getTranslateY());
182
183 // Send the perspective column as a param.
184 op->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[SkMatrix::kMPersp1],
185 shapeMatrix[SkMatrix::kMPersp2]);
186 op->fInfo.fHasPerspective = true;
187
188 op->setBounds(bounds, aaBloat, zeroArea);
189 op->fInfo.fNonSquare = true;
190 }
191
192 instance.fColor = color;
193
194 const float* rectAsFloats = localRect.asScalars(); // Ensure SkScalar == float.
195 memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float));
196
197 op->fPixelLoad = op->bounds().height() * op->bounds().width();
198 return op;
199 }
200
selectAntialiasMode(const SkMatrix & viewMatrix,GrAA aa,const GrInstancedPipelineInfo & info,GrAAType * aaType)201 inline bool InstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa,
202 const GrInstancedPipelineInfo& info,
203 GrAAType* aaType) {
204 SkASSERT(!info.fIsMixedSampled || info.fIsMultisampled);
205 SkASSERT(GrCaps::InstancedSupport::kNone != fGpu->caps()->instancedSupport());
206
207 if (!info.fIsMultisampled || fGpu->caps()->multisampleDisableSupport()) {
208 if (GrAA::kNo == aa) {
209 *aaType = GrAAType::kNone;
210 return true;
211 }
212
213 if (info.canUseCoverageAA() && viewMatrix.preservesRightAngles()) {
214 *aaType = GrAAType::kCoverage;
215 return true;
216 }
217 }
218
219 if (info.fIsMultisampled &&
220 fGpu->caps()->instancedSupport() >= GrCaps::InstancedSupport::kMultisampled) {
221 if (!info.fIsMixedSampled) {
222 *aaType = GrAAType::kMSAA;
223 return true;
224 }
225 if (fGpu->caps()->instancedSupport() >= GrCaps::InstancedSupport::kMixedSampled) {
226 *aaType = GrAAType::kMixedSamples;
227 return true;
228 }
229 }
230
231 return false;
232 }
233
Op(uint32_t classID,GrPaint && paint,InstancedRendering * ir)234 InstancedRendering::Op::Op(uint32_t classID, GrPaint&& paint, InstancedRendering* ir)
235 : INHERITED(classID)
236 , fInstancedRendering(ir)
237 , fProcessors(std::move(paint))
238 , fIsTracked(false)
239 , fNumDraws(1)
240 , fNumChangesInGeometry(0) {
241 fHeadDraw = fTailDraw = fInstancedRendering->fDrawPool.allocate();
242 #ifdef SK_DEBUG
243 fHeadDraw->fGeometry = {-1, 0};
244 #endif
245 fHeadDraw->fNext = nullptr;
246 }
247
~Op()248 InstancedRendering::Op::~Op() {
249 if (fIsTracked) {
250 fInstancedRendering->fTrackedOps.remove(this);
251 }
252
253 Draw* draw = fHeadDraw;
254 while (draw) {
255 Draw* next = draw->fNext;
256 fInstancedRendering->fDrawPool.release(draw);
257 draw = next;
258 }
259 }
260
appendRRectParams(const SkRRect & rrect)261 void InstancedRendering::Op::appendRRectParams(const SkRRect& rrect) {
262 SkASSERT(!fIsTracked);
263 switch (rrect.getType()) {
264 case SkRRect::kSimple_Type: {
265 const SkVector& radii = rrect.getSimpleRadii();
266 this->appendParamsTexel(radii.x(), radii.y(), rrect.width(), rrect.height());
267 return;
268 }
269 case SkRRect::kNinePatch_Type: {
270 float twoOverW = 2 / rrect.width();
271 float twoOverH = 2 / rrect.height();
272 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
273 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
274 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBR.x() * twoOverW,
275 radiiTL.y() * twoOverH, radiiBR.y() * twoOverH);
276 return;
277 }
278 case SkRRect::kComplex_Type: {
279 /**
280 * The x and y radii of each arc are stored in separate vectors,
281 * in the following order:
282 *
283 * __x1 _ _ _ x3__
284 * y1 | | y2
285 *
286 * | |
287 *
288 * y3 |__ _ _ _ __| y4
289 * x2 x4
290 *
291 */
292 float twoOverW = 2 / rrect.width();
293 float twoOverH = 2 / rrect.height();
294 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
295 const SkVector& radiiTR = rrect.radii(SkRRect::kUpperRight_Corner);
296 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
297 const SkVector& radiiBL = rrect.radii(SkRRect::kLowerLeft_Corner);
298 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBL.x() * twoOverW,
299 radiiTR.x() * twoOverW, radiiBR.x() * twoOverW);
300 this->appendParamsTexel(radiiTL.y() * twoOverH, radiiTR.y() * twoOverH,
301 radiiBL.y() * twoOverH, radiiBR.y() * twoOverH);
302 return;
303 }
304 default: return;
305 }
306 }
307
appendParamsTexel(const SkScalar * vals,int count)308 void InstancedRendering::Op::appendParamsTexel(const SkScalar* vals, int count) {
309 SkASSERT(!fIsTracked);
310 SkASSERT(count <= 4 && count >= 0);
311 const float* valsAsFloats = vals; // Ensure SkScalar == float.
312 memcpy(&fParams.push_back(), valsAsFloats, count * sizeof(float));
313 fInfo.fHasParams = true;
314 }
315
appendParamsTexel(SkScalar x,SkScalar y,SkScalar z,SkScalar w)316 void InstancedRendering::Op::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w) {
317 SkASSERT(!fIsTracked);
318 ParamsTexel& texel = fParams.push_back();
319 texel.fX = SkScalarToFloat(x);
320 texel.fY = SkScalarToFloat(y);
321 texel.fZ = SkScalarToFloat(z);
322 texel.fW = SkScalarToFloat(w);
323 fInfo.fHasParams = true;
324 }
325
appendParamsTexel(SkScalar x,SkScalar y,SkScalar z)326 void InstancedRendering::Op::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z) {
327 SkASSERT(!fIsTracked);
328 ParamsTexel& texel = fParams.push_back();
329 texel.fX = SkScalarToFloat(x);
330 texel.fY = SkScalarToFloat(y);
331 texel.fZ = SkScalarToFloat(z);
332 fInfo.fHasParams = true;
333 }
334
xpRequiresDstTexture(const GrCaps & caps,const GrAppliedClip * clip)335 bool InstancedRendering::Op::xpRequiresDstTexture(const GrCaps& caps, const GrAppliedClip* clip) {
336 GrProcessorSet::FragmentProcessorAnalysis analysis;
337 GrPipelineAnalysisCoverage coverageInput;
338 if (GrAAType::kCoverage == fInfo.aaType() ||
339 (GrAAType::kNone == fInfo.aaType() && !fInfo.isSimpleRects() && fInfo.fCannotDiscard)) {
340 coverageInput = GrPipelineAnalysisCoverage::kSingleChannel;
341 } else {
342 coverageInput = GrPipelineAnalysisCoverage::kNone;
343 }
344 fProcessors.analyzeAndEliminateFragmentProcessors(&analysis, this->getSingleInstance().fColor,
345 coverageInput, clip, caps);
346 Draw& draw = this->getSingleDraw(); // This will assert if we have > 1 command.
347 SkASSERT(draw.fGeometry.isEmpty());
348 SkASSERT(SkIsPow2(fInfo.fShapeTypes));
349 SkASSERT(!fIsTracked);
350
351 if (kRect_ShapeFlag == fInfo.fShapeTypes) {
352 draw.fGeometry = InstanceProcessor::GetIndexRangeForRect(fInfo.aaType());
353 } else if (kOval_ShapeFlag == fInfo.fShapeTypes) {
354 draw.fGeometry = InstanceProcessor::GetIndexRangeForOval(fInfo.aaType(), this->bounds());
355 } else {
356 draw.fGeometry = InstanceProcessor::GetIndexRangeForRRect(fInfo.aaType());
357 }
358
359 if (!fParams.empty()) {
360 SkASSERT(fInstancedRendering->fParams.count() < (int)kParamsIdx_InfoMask); // TODO: cleaner.
361 this->getSingleInstance().fInfo |= fInstancedRendering->fParams.count();
362 fInstancedRendering->fParams.push_back_n(fParams.count(), fParams.begin());
363 }
364
365 GrColor overrideColor;
366 if (analysis.getInputColorOverrideAndColorProcessorEliminationCount(&overrideColor) >= 0) {
367 SkASSERT(State::kRecordingDraws == fInstancedRendering->fState);
368 this->getSingleDraw().fInstance.fColor = overrideColor;
369 }
370 fInfo.fCannotTweakAlphaForCoverage =
371 !analysis.isCompatibleWithCoverageAsAlpha() ||
372 !GrXPFactory::CompatibleWithCoverageAsAlpha(fProcessors.xpFactory(),
373 analysis.isOutputColorOpaque());
374
375 fInfo.fUsesLocalCoords = analysis.usesLocalCoords();
376 return GrXPFactory::WillNeedDstTexture(fProcessors.xpFactory(), caps, analysis);
377 }
378
wasRecorded()379 void InstancedRendering::Op::wasRecorded() {
380 SkASSERT(!fIsTracked);
381 fInstancedRendering->fTrackedOps.addToTail(this);
382 fProcessors.makePendingExecution();
383 fIsTracked = true;
384 }
385
onCombineIfPossible(GrOp * other,const GrCaps & caps)386 bool InstancedRendering::Op::onCombineIfPossible(GrOp* other, const GrCaps& caps) {
387 Op* that = static_cast<Op*>(other);
388 SkASSERT(fInstancedRendering == that->fInstancedRendering);
389 SkASSERT(fTailDraw);
390 SkASSERT(that->fTailDraw);
391
392 if (!OpInfo::CanCombine(fInfo, that->fInfo) || fProcessors != that->fProcessors) {
393 return false;
394 }
395
396 OpInfo combinedInfo = fInfo | that->fInfo;
397 if (!combinedInfo.isSimpleRects()) {
398 // This threshold was chosen with the "shapes_mixed" bench on a MacBook with Intel graphics.
399 // There seems to be a wide range where it doesn't matter if we combine or not. What matters
400 // is that the itty bitty rects combine with other shapes and the giant ones don't.
401 constexpr SkScalar kMaxPixelsToGeneralizeRects = 256 * 256;
402 if (fInfo.isSimpleRects() && fPixelLoad > kMaxPixelsToGeneralizeRects) {
403 return false;
404 }
405 if (that->fInfo.isSimpleRects() && that->fPixelLoad > kMaxPixelsToGeneralizeRects) {
406 return false;
407 }
408 }
409
410 this->joinBounds(*that);
411 fInfo = combinedInfo;
412 fPixelLoad += that->fPixelLoad;
413 fDrawColorsAreOpaque = fDrawColorsAreOpaque && that->fDrawColorsAreOpaque;
414 fDrawColorsAreSame = fDrawColorsAreSame && that->fDrawColorsAreSame &&
415 fHeadDraw->fInstance.fColor == that->fHeadDraw->fInstance.fColor;
416 // Adopt the other op's draws.
417 fNumDraws += that->fNumDraws;
418 fNumChangesInGeometry += that->fNumChangesInGeometry;
419 if (fTailDraw->fGeometry != that->fHeadDraw->fGeometry) {
420 ++fNumChangesInGeometry;
421 }
422 fTailDraw->fNext = that->fHeadDraw;
423 fTailDraw = that->fTailDraw;
424
425 that->fHeadDraw = that->fTailDraw = nullptr;
426
427 return true;
428 }
429
beginFlush(GrResourceProvider * rp)430 void InstancedRendering::beginFlush(GrResourceProvider* rp) {
431 SkASSERT(State::kRecordingDraws == fState);
432 fState = State::kFlushing;
433
434 if (fTrackedOps.isEmpty()) {
435 return;
436 }
437
438 if (!fVertexBuffer) {
439 fVertexBuffer.reset(InstanceProcessor::FindOrCreateVertexBuffer(fGpu.get()));
440 if (!fVertexBuffer) {
441 return;
442 }
443 }
444
445 if (!fIndexBuffer) {
446 fIndexBuffer.reset(InstanceProcessor::FindOrCreateIndex8Buffer(fGpu.get()));
447 if (!fIndexBuffer) {
448 return;
449 }
450 }
451
452 if (!fParams.empty()) {
453 fParamsBuffer.reset(rp->createBuffer(fParams.count() * sizeof(ParamsTexel),
454 kTexel_GrBufferType, kDynamic_GrAccessPattern,
455 GrResourceProvider::kNoPendingIO_Flag |
456 GrResourceProvider::kRequireGpuMemory_Flag,
457 fParams.begin()));
458 if (!fParamsBuffer) {
459 return;
460 }
461 }
462
463 this->onBeginFlush(rp);
464 }
465
onExecute(GrOpFlushState * state)466 void InstancedRendering::Op::onExecute(GrOpFlushState* state) {
467 SkASSERT(State::kFlushing == fInstancedRendering->fState);
468 SkASSERT(state->gpu() == fInstancedRendering->gpu());
469
470 state->gpu()->handleDirtyContext();
471
472 GrProcessorSet::FragmentProcessorAnalysis analysis;
473 GrPipelineAnalysisCoverage coverageInput;
474 if (GrAAType::kCoverage == fInfo.aaType() ||
475 (GrAAType::kNone == fInfo.aaType() && !fInfo.isSimpleRects() && fInfo.fCannotDiscard)) {
476 coverageInput = GrPipelineAnalysisCoverage::kSingleChannel;
477 } else {
478 coverageInput = GrPipelineAnalysisCoverage::kNone;
479 }
480 GrPipelineAnalysisColor colorInput;
481 if (fDrawColorsAreSame) {
482 colorInput = fHeadDraw->fInstance.fColor;
483 } else if (fDrawColorsAreOpaque) {
484 colorInput = GrPipelineAnalysisColor::Opaque::kYes;
485 }
486 const GrAppliedClip* clip = state->drawOpArgs().fAppliedClip;
487 analysis.init(colorInput, coverageInput, fProcessors, clip, state->caps());
488
489 GrPipeline pipeline;
490 GrPipeline::InitArgs args;
491 args.fAnalysis = &analysis;
492 args.fAppliedClip = clip;
493 args.fCaps = &state->caps();
494 args.fProcessors = &fProcessors;
495 args.fFlags = GrAATypeIsHW(fInfo.aaType()) ? GrPipeline::kHWAntialias_Flag : 0;
496 args.fRenderTarget = state->drawOpArgs().fRenderTarget;
497 args.fDstTexture = state->drawOpArgs().fDstTexture;
498 pipeline.init(args);
499
500 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*state->gpu()->caps())) {
501 state->gpu()->xferBarrier(pipeline.getRenderTarget(), barrierType);
502 }
503 InstanceProcessor instProc(fInfo, fInstancedRendering->fParamsBuffer.get());
504 fInstancedRendering->onDraw(pipeline, instProc, this);
505 }
506
endFlush()507 void InstancedRendering::endFlush() {
508 // The caller is expected to delete all tracked ops (i.e. ops whose applyPipelineOptimizations
509 // method has been called) before ending the flush.
510 SkASSERT(fTrackedOps.isEmpty());
511 fParams.reset();
512 fParamsBuffer.reset();
513 this->onEndFlush();
514 fState = State::kRecordingDraws;
515 // Hold on to the shape coords and index buffers.
516 }
517
resetGpuResources(ResetType resetType)518 void InstancedRendering::resetGpuResources(ResetType resetType) {
519 fVertexBuffer.reset();
520 fIndexBuffer.reset();
521 fParamsBuffer.reset();
522 this->onResetGpuResources(resetType);
523 }
524
525 }
526