1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrDrawingManager.h"
9
10 #include "GrContext.h"
11 #include "GrRenderTargetContext.h"
12 #include "GrPathRenderingRenderTargetContext.h"
13 #include "GrRenderTargetProxy.h"
14 #include "GrResourceProvider.h"
15 #include "GrSoftwarePathRenderer.h"
16 #include "GrSurfacePriv.h"
17 #include "GrTextureContext.h"
18 #include "GrTextureOpList.h"
19 #include "SkSurface_Gpu.h"
20 #include "SkTTopoSort.h"
21
22 #include "text/GrAtlasTextContext.h"
23 #include "text/GrStencilAndCoverTextContext.h"
24
cleanup()25 void GrDrawingManager::cleanup() {
26 for (int i = 0; i < fOpLists.count(); ++i) {
27 fOpLists[i]->makeClosed(); // no opList should receive a new command after this
28 fOpLists[i]->clearTarget();
29
30 // We shouldn't need to do this, but it turns out some clients still hold onto opLists
31 // after a cleanup
32 fOpLists[i]->reset();
33 fOpLists[i]->unref();
34 }
35
36 fOpLists.reset();
37
38 delete fPathRendererChain;
39 fPathRendererChain = nullptr;
40 SkSafeSetNull(fSoftwarePathRenderer);
41 }
42
~GrDrawingManager()43 GrDrawingManager::~GrDrawingManager() {
44 this->cleanup();
45 }
46
abandon()47 void GrDrawingManager::abandon() {
48 fAbandoned = true;
49 for (int i = 0; i < fOpLists.count(); ++i) {
50 fOpLists[i]->abandonGpuResources();
51 }
52 this->cleanup();
53 }
54
freeGpuResources()55 void GrDrawingManager::freeGpuResources() {
56 // a path renderer may be holding onto resources
57 delete fPathRendererChain;
58 fPathRendererChain = nullptr;
59 SkSafeSetNull(fSoftwarePathRenderer);
60 for (int i = 0; i < fOpLists.count(); ++i) {
61 fOpLists[i]->freeGpuResources();
62 }
63 }
64
reset()65 void GrDrawingManager::reset() {
66 for (int i = 0; i < fOpLists.count(); ++i) {
67 fOpLists[i]->reset();
68 }
69 fFlushState.reset();
70 }
71
internalFlush(GrResourceCache::FlushType type)72 void GrDrawingManager::internalFlush(GrResourceCache::FlushType type) {
73 if (fFlushing || this->wasAbandoned()) {
74 return;
75 }
76 fFlushing = true;
77 bool flushed = false;
78
79 for (int i = 0; i < fOpLists.count(); ++i) {
80 // Semi-usually the GrOpLists are already closed at this point, but sometimes Ganesh
81 // needs to flush mid-draw. In that case, the SkGpuDevice's GrOpLists won't be closed
82 // but need to be flushed anyway. Closing such GrOpLists here will mean new
83 // GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again.
84 fOpLists[i]->makeClosed();
85 }
86
87 SkDEBUGCODE(bool result =)
88 SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists);
89 SkASSERT(result);
90
91 GrPreFlushResourceProvider preFlushProvider(this);
92
93 if (fPreFlushCBObjects.count()) {
94 // MDB TODO: pre-MDB '1' is the correct pre-allocated size. Post-MDB it will need
95 // to be larger.
96 SkAutoSTArray<1, uint32_t> opListIds(fOpLists.count());
97 for (int i = 0; i < fOpLists.count(); ++i) {
98 opListIds[i] = fOpLists[i]->uniqueID();
99 }
100
101 SkSTArray<1, sk_sp<GrRenderTargetContext>> renderTargetContexts;
102 for (int i = 0; i < fPreFlushCBObjects.count(); ++i) {
103 fPreFlushCBObjects[i]->preFlush(&preFlushProvider,
104 opListIds.get(), opListIds.count(),
105 &renderTargetContexts);
106 if (!renderTargetContexts.count()) {
107 continue; // This is fine. No atlases of this type are required for this flush
108 }
109
110 for (int j = 0; j < renderTargetContexts.count(); ++j) {
111 GrRenderTargetOpList* opList = renderTargetContexts[j]->getOpList();
112 if (!opList) {
113 continue; // Odd - but not a big deal
114 }
115 SkDEBUGCODE(opList->validateTargetsSingleRenderTarget());
116 opList->prepareOps(&fFlushState);
117 if (!opList->executeOps(&fFlushState)) {
118 continue; // This is bad
119 }
120 }
121 renderTargetContexts.reset();
122 }
123 }
124
125 for (int i = 0; i < fOpLists.count(); ++i) {
126 fOpLists[i]->prepareOps(&fFlushState);
127 }
128
129 #if 0
130 // Enable this to print out verbose GrOp information
131 for (int i = 0; i < fOpLists.count(); ++i) {
132 SkDEBUGCODE(fOpLists[i]->dump();)
133 }
134 #endif
135
136 // Upload all data to the GPU
137 fFlushState.preIssueDraws();
138
139 for (int i = 0; i < fOpLists.count(); ++i) {
140 if (fOpLists[i]->executeOps(&fFlushState)) {
141 flushed = true;
142 }
143 }
144
145 SkASSERT(fFlushState.nextDrawToken() == fFlushState.nextTokenToFlush());
146
147 for (int i = 0; i < fOpLists.count(); ++i) {
148 fOpLists[i]->reset();
149 #ifdef ENABLE_MDB
150 fOpLists[i]->unref();
151 #endif
152 }
153
154 #ifndef ENABLE_MDB
155 // When MDB is disabled we keep reusing the same GrOpList
156 if (fOpLists.count()) {
157 SkASSERT(fOpLists.count() == 1);
158 // Clear out this flag so the topological sort's SkTTopoSort_CheckAllUnmarked check
159 // won't bark
160 fOpLists[0]->resetFlag(GrOpList::kWasOutput_Flag);
161 }
162 #else
163 fOpLists.reset();
164 #endif
165
166 fFlushState.reset();
167 // We always have to notify the cache when it requested a flush so it can reset its state.
168 if (flushed || type == GrResourceCache::FlushType::kCacheRequested) {
169 fContext->getResourceCache()->notifyFlushOccurred(type);
170 }
171 fFlushing = false;
172 }
173
prepareSurfaceForExternalIO(GrSurface * surface)174 void GrDrawingManager::prepareSurfaceForExternalIO(GrSurface* surface) {
175 if (this->wasAbandoned()) {
176 return;
177 }
178 SkASSERT(surface);
179 SkASSERT(surface->getContext() == fContext);
180
181 if (surface->surfacePriv().hasPendingIO()) {
182 this->flush();
183 }
184
185 GrRenderTarget* rt = surface->asRenderTarget();
186 if (fContext->getGpu() && rt) {
187 fContext->getGpu()->resolveRenderTarget(rt);
188 }
189 }
190
addPreFlushCallbackObject(sk_sp<GrPreFlushCallbackObject> preFlushCBObject)191 void GrDrawingManager::addPreFlushCallbackObject(sk_sp<GrPreFlushCallbackObject> preFlushCBObject) {
192 fPreFlushCBObjects.push_back(preFlushCBObject);
193 }
194
newOpList(GrRenderTargetProxy * rtp)195 GrRenderTargetOpList* GrDrawingManager::newOpList(GrRenderTargetProxy* rtp) {
196 SkASSERT(fContext);
197
198 #ifndef ENABLE_MDB
199 // When MDB is disabled we always just return the single GrOpList
200 if (fOpLists.count()) {
201 SkASSERT(fOpLists.count() == 1);
202 // In the non-MDB-world the same GrOpList gets reused for multiple render targets.
203 // Update this pointer so all the asserts are happy
204 rtp->setLastOpList(fOpLists[0]);
205 // DrawingManager gets the creation ref - this ref is for the caller
206
207 // TODO: although this is true right now it isn't cool
208 return SkRef((GrRenderTargetOpList*) fOpLists[0]);
209 }
210 #endif
211
212 GrRenderTargetOpList* opList = new GrRenderTargetOpList(rtp,
213 fContext->getGpu(),
214 fContext->resourceProvider(),
215 fContext->getAuditTrail(),
216 fOptionsForOpLists);
217
218 *fOpLists.append() = opList;
219
220 // DrawingManager gets the creation ref - this ref is for the caller
221 return SkRef(opList);
222 }
223
newOpList(GrTextureProxy * textureProxy)224 GrTextureOpList* GrDrawingManager::newOpList(GrTextureProxy* textureProxy) {
225 SkASSERT(fContext);
226
227 GrTextureOpList* opList = new GrTextureOpList(textureProxy, fContext->getGpu(),
228 fContext->getAuditTrail());
229
230 #ifndef ENABLE_MDB
231 // When MDB is disabled we still create a new GrOpList, but don't store or ref it - we rely
232 // on the caller to immediately execute and free it.
233 return opList;
234 #else
235 *fOpLists.append() = opList;
236
237 // Drawing manager gets the creation ref - this ref is for the caller
238 return SkRef(opList);
239 #endif
240 }
241
getAtlasTextContext()242 GrAtlasTextContext* GrDrawingManager::getAtlasTextContext() {
243 if (!fAtlasTextContext) {
244 fAtlasTextContext.reset(GrAtlasTextContext::Create());
245 }
246
247 return fAtlasTextContext.get();
248 }
249
250 /*
251 * This method finds a path renderer that can draw the specified path on
252 * the provided target.
253 * Due to its expense, the software path renderer has split out so it can
254 * can be individually allowed/disallowed via the "allowSW" boolean.
255 */
getPathRenderer(const GrPathRenderer::CanDrawPathArgs & args,bool allowSW,GrPathRendererChain::DrawType drawType,GrPathRenderer::StencilSupport * stencilSupport)256 GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
257 bool allowSW,
258 GrPathRendererChain::DrawType drawType,
259 GrPathRenderer::StencilSupport* stencilSupport) {
260
261 if (!fPathRendererChain) {
262 fPathRendererChain = new GrPathRendererChain(fContext, fOptionsForPathRendererChain);
263 }
264
265 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
266 if (!pr && allowSW) {
267 if (!fSoftwarePathRenderer) {
268 fSoftwarePathRenderer =
269 new GrSoftwarePathRenderer(fContext->resourceProvider(),
270 fOptionsForPathRendererChain.fAllowPathMaskCaching);
271 }
272 if (fSoftwarePathRenderer->canDrawPath(args)) {
273 pr = fSoftwarePathRenderer;
274 }
275 }
276
277 return pr;
278 }
279
makeRenderTargetContext(sk_sp<GrSurfaceProxy> sProxy,sk_sp<SkColorSpace> colorSpace,const SkSurfaceProps * surfaceProps)280 sk_sp<GrRenderTargetContext> GrDrawingManager::makeRenderTargetContext(
281 sk_sp<GrSurfaceProxy> sProxy,
282 sk_sp<SkColorSpace> colorSpace,
283 const SkSurfaceProps* surfaceProps) {
284 if (this->wasAbandoned() || !sProxy->asRenderTargetProxy()) {
285 return nullptr;
286 }
287
288 // SkSurface catches bad color space usage at creation. This check handles anything that slips
289 // by, including internal usage. We allow a null color space here, for read/write pixels and
290 // other special code paths. If a color space is provided, though, enforce all other rules.
291 if (colorSpace && !SkSurface_Gpu::Valid(fContext, sProxy->config(), colorSpace.get())) {
292 SkDEBUGFAIL("Invalid config and colorspace combination");
293 return nullptr;
294 }
295
296 sk_sp<GrRenderTargetProxy> rtp(sk_ref_sp(sProxy->asRenderTargetProxy()));
297
298 bool useDIF = false;
299 if (surfaceProps) {
300 useDIF = surfaceProps->isUseDeviceIndependentFonts();
301 }
302
303 if (useDIF && fContext->caps()->shaderCaps()->pathRenderingSupport() &&
304 rtp->isStencilBufferMultisampled()) {
305 // TODO: defer stencil buffer attachment for PathRenderingDrawContext
306 sk_sp<GrRenderTarget> rt(sk_ref_sp(rtp->instantiate(fContext->resourceProvider())));
307 if (!rt) {
308 return nullptr;
309 }
310 GrStencilAttachment* sb = fContext->resourceProvider()->attachStencilAttachment(rt.get());
311 if (sb) {
312 return sk_sp<GrRenderTargetContext>(new GrPathRenderingRenderTargetContext(
313 fContext, this, std::move(rtp),
314 std::move(colorSpace), surfaceProps,
315 fContext->getAuditTrail(), fSingleOwner));
316 }
317 }
318
319 return sk_sp<GrRenderTargetContext>(new GrRenderTargetContext(fContext, this, std::move(rtp),
320 std::move(colorSpace),
321 surfaceProps,
322 fContext->getAuditTrail(),
323 fSingleOwner));
324 }
325
makeTextureContext(sk_sp<GrSurfaceProxy> sProxy,sk_sp<SkColorSpace> colorSpace)326 sk_sp<GrTextureContext> GrDrawingManager::makeTextureContext(sk_sp<GrSurfaceProxy> sProxy,
327 sk_sp<SkColorSpace> colorSpace) {
328 if (this->wasAbandoned() || !sProxy->asTextureProxy()) {
329 return nullptr;
330 }
331
332 // SkSurface catches bad color space usage at creation. This check handles anything that slips
333 // by, including internal usage. We allow a null color space here, for read/write pixels and
334 // other special code paths. If a color space is provided, though, enforce all other rules.
335 if (colorSpace && !SkSurface_Gpu::Valid(fContext, sProxy->config(), colorSpace.get())) {
336 SkDEBUGFAIL("Invalid config and colorspace combination");
337 return nullptr;
338 }
339
340 // GrTextureRenderTargets should always be using GrRenderTargetContext
341 SkASSERT(!sProxy->asRenderTargetProxy());
342
343 sk_sp<GrTextureProxy> textureProxy(sk_ref_sp(sProxy->asTextureProxy()));
344
345 return sk_sp<GrTextureContext>(new GrTextureContext(fContext, this, std::move(textureProxy),
346 std::move(colorSpace),
347 fContext->getAuditTrail(),
348 fSingleOwner));
349 }
350