1 /*
2  * Copyright 2012 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrSoftwarePathRenderer.h"
9 #include "GrAuditTrail.h"
10 #include "GrClip.h"
11 #include "GrContextPriv.h"
12 #include "GrDeferredProxyUploader.h"
13 #include "GrGpuResourcePriv.h"
14 #include "GrOpFlushState.h"
15 #include "GrOpList.h"
16 #include "GrProxyProvider.h"
17 #include "GrSWMaskHelper.h"
18 #include "GrShape.h"
19 #include "GrSurfaceContextPriv.h"
20 #include "SkMakeUnique.h"
21 #include "SkSemaphore.h"
22 #include "SkTaskGroup.h"
23 #include "SkTraceEvent.h"
24 #include "ops/GrDrawOp.h"
25 #include "ops/GrFillRectOp.h"
26 
27 ////////////////////////////////////////////////////////////////////////////////
28 GrPathRenderer::CanDrawPath
29 GrSoftwarePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
30     // Pass on any style that applies. The caller will apply the style if a suitable renderer is
31     // not found and try again with the new GrShape.
32     if (!args.fShape->style().applies() && SkToBool(fProxyProvider) &&
33         (args.fAAType == GrAAType::kCoverage || args.fAAType == GrAAType::kNone)) {
34         // This is the fallback renderer for when a path is too complicated for the GPU ones.
35         return CanDrawPath::kAsBackup;
36     }
37     return CanDrawPath::kNo;
38 }
39 
40 ////////////////////////////////////////////////////////////////////////////////
41 static bool get_unclipped_shape_dev_bounds(const GrShape& shape, const SkMatrix& matrix,
42                                            SkIRect* devBounds) {
43     SkRect shapeBounds = shape.styledBounds();
44     if (shapeBounds.isEmpty()) {
45         return false;
46     }
47     SkRect shapeDevBounds;
48     matrix.mapRect(&shapeDevBounds, shapeBounds);
49     // Even though these are "unclipped" bounds we still clip to the int32_t range.
50     // This is the largest int32_t that is representable exactly as a float. The next 63 larger ints
51     // would round down to this value when cast to a float, but who really cares.
52     // INT32_MIN is exactly representable.
53     static constexpr int32_t kMaxInt = 2147483520;
54     if (!shapeDevBounds.intersect(SkRect::MakeLTRB(INT32_MIN, INT32_MIN, kMaxInt, kMaxInt))) {
55         return false;
56     }
57     // Make sure that the resulting SkIRect can have representable width and height
58     if (SkScalarRoundToInt(shapeDevBounds.width()) > kMaxInt ||
59         SkScalarRoundToInt(shapeDevBounds.height()) > kMaxInt) {
60         return false;
61     }
62     shapeDevBounds.roundOut(devBounds);
63     return true;
64 }
65 
66 // Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there
67 // is no intersection.
68 bool GrSoftwarePathRenderer::GetShapeAndClipBounds(GrRenderTargetContext* renderTargetContext,
69                                                    const GrClip& clip,
70                                                    const GrShape& shape,
71                                                    const SkMatrix& matrix,
72                                                    SkIRect* unclippedDevShapeBounds,
73                                                    SkIRect* clippedDevShapeBounds,
74                                                    SkIRect* devClipBounds) {
75     // compute bounds as intersection of rt size, clip, and path
76     clip.getConservativeBounds(renderTargetContext->width(),
77                                renderTargetContext->height(),
78                                devClipBounds);
79 
80     if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) {
81         *unclippedDevShapeBounds = SkIRect::EmptyIRect();
82         *clippedDevShapeBounds = SkIRect::EmptyIRect();
83         return false;
84     }
85     if (!clippedDevShapeBounds->intersect(*devClipBounds, *unclippedDevShapeBounds)) {
86         *clippedDevShapeBounds = SkIRect::EmptyIRect();
87         return false;
88     }
89     return true;
90 }
91 
92 ////////////////////////////////////////////////////////////////////////////////
93 
94 void GrSoftwarePathRenderer::DrawNonAARect(GrRenderTargetContext* renderTargetContext,
95                                            GrPaint&& paint,
96                                            const GrUserStencilSettings& userStencilSettings,
97                                            const GrClip& clip,
98                                            const SkMatrix& viewMatrix,
99                                            const SkRect& rect,
100                                            const SkMatrix& localMatrix) {
101     GrContext* context = renderTargetContext->surfPriv().getContext();
102     renderTargetContext->addDrawOp(clip,
103                                    GrFillRectOp::MakeWithLocalMatrix(
104                                            context, std::move(paint), GrAAType::kNone, viewMatrix,
105                                            localMatrix, rect, &userStencilSettings));
106 }
107 
108 void GrSoftwarePathRenderer::DrawAroundInvPath(GrRenderTargetContext* renderTargetContext,
109                                                GrPaint&& paint,
110                                                const GrUserStencilSettings& userStencilSettings,
111                                                const GrClip& clip,
112                                                const SkMatrix& viewMatrix,
113                                                const SkIRect& devClipBounds,
114                                                const SkIRect& devPathBounds) {
115     SkMatrix invert;
116     if (!viewMatrix.invert(&invert)) {
117         return;
118     }
119 
120     SkRect rect;
121     if (devClipBounds.fTop < devPathBounds.fTop) {
122         rect.iset(devClipBounds.fLeft, devClipBounds.fTop,
123                   devClipBounds.fRight, devPathBounds.fTop);
124         DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
125                       SkMatrix::I(), rect, invert);
126     }
127     if (devClipBounds.fLeft < devPathBounds.fLeft) {
128         rect.iset(devClipBounds.fLeft, devPathBounds.fTop,
129                   devPathBounds.fLeft, devPathBounds.fBottom);
130         DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
131                       SkMatrix::I(), rect, invert);
132     }
133     if (devClipBounds.fRight > devPathBounds.fRight) {
134         rect.iset(devPathBounds.fRight, devPathBounds.fTop,
135                   devClipBounds.fRight, devPathBounds.fBottom);
136         DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
137                       SkMatrix::I(), rect, invert);
138     }
139     if (devClipBounds.fBottom > devPathBounds.fBottom) {
140         rect.iset(devClipBounds.fLeft, devPathBounds.fBottom,
141                   devClipBounds.fRight, devClipBounds.fBottom);
142         DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip,
143                       SkMatrix::I(), rect, invert);
144     }
145 }
146 
147 void GrSoftwarePathRenderer::DrawToTargetWithShapeMask(
148         sk_sp<GrTextureProxy> proxy,
149         GrRenderTargetContext* renderTargetContext,
150         GrPaint&& paint,
151         const GrUserStencilSettings& userStencilSettings,
152         const GrClip& clip,
153         const SkMatrix& viewMatrix,
154         const SkIPoint& textureOriginInDeviceSpace,
155         const SkIRect& deviceSpaceRectToDraw) {
156     SkMatrix invert;
157     if (!viewMatrix.invert(&invert)) {
158         return;
159     }
160 
161     SkRect dstRect = SkRect::Make(deviceSpaceRectToDraw);
162 
163     // We use device coords to compute the texture coordinates. We take the device coords and apply
164     // a translation so that the top-left of the device bounds maps to 0,0, and then a scaling
165     // matrix to normalized coords.
166     SkMatrix maskMatrix = SkMatrix::MakeTrans(SkIntToScalar(-textureOriginInDeviceSpace.fX),
167                                               SkIntToScalar(-textureOriginInDeviceSpace.fY));
168     maskMatrix.preConcat(viewMatrix);
169     paint.addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(
170             std::move(proxy), maskMatrix, GrSamplerState::Filter::kNearest));
171     DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip, SkMatrix::I(),
172                   dstRect, invert);
173 }
174 
175 static sk_sp<GrTextureProxy> make_deferred_mask_texture_proxy(GrContext* context, SkBackingFit fit,
176                                                               int width, int height) {
177     GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider();
178 
179     GrSurfaceDesc desc;
180     desc.fWidth = width;
181     desc.fHeight = height;
182     desc.fConfig = kAlpha_8_GrPixelConfig;
183 
184     const GrBackendFormat format =
185             context->contextPriv().caps()->getBackendFormatFromColorType(kAlpha_8_SkColorType);
186 
187     // MDB TODO: We're going to fill this proxy with an ASAP upload (which is out of order wrt to
188     // ops), so it can't have any pending IO.
189     return proxyProvider->createProxy(format, desc, kTopLeft_GrSurfaceOrigin, fit, SkBudgeted::kYes,
190                                       GrInternalSurfaceFlags::kNoPendingIO);
191 }
192 
193 namespace {
194 
195 /**
196  * Payload class for use with GrTDeferredProxyUploader. The software path renderer only draws
197  * a single path into the mask texture. This stores all of the information needed by the worker
198  * thread's call to drawShape (see below, in onDrawPath).
199  */
200 class SoftwarePathData {
201 public:
202     SoftwarePathData(const SkIRect& maskBounds, const SkMatrix& viewMatrix, const GrShape& shape,
203                      GrAA aa)
204             : fMaskBounds(maskBounds)
205             , fViewMatrix(viewMatrix)
206             , fShape(shape)
207             , fAA(aa) {}
208 
209     const SkIRect& getMaskBounds() const { return fMaskBounds; }
210     const SkMatrix* getViewMatrix() const { return &fViewMatrix; }
211     const GrShape& getShape() const { return fShape; }
212     GrAA getAA() const { return fAA; }
213 
214 private:
215     SkIRect fMaskBounds;
216     SkMatrix fViewMatrix;
217     GrShape fShape;
218     GrAA fAA;
219 };
220 
221 // When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
222 class PathInvalidator : public SkPathRef::GenIDChangeListener {
223 public:
224     PathInvalidator(const GrUniqueKey& key, uint32_t contextUniqueID)
225             : fMsg(key, contextUniqueID) {}
226 
227 private:
228     GrUniqueKeyInvalidatedMessage fMsg;
229 
230     void onChange() override {
231         SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(fMsg);
232     }
233 };
234 
235 }
236 
237 ////////////////////////////////////////////////////////////////////////////////
238 // return true on success; false on failure
239 bool GrSoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) {
240     GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
241                               "GrSoftwarePathRenderer::onDrawPath");
242     if (!fProxyProvider) {
243         return false;
244     }
245 
246     SkASSERT(!args.fShape->style().applies());
247     // We really need to know if the shape will be inverse filled or not
248     // If the path is hairline, ignore inverse fill.
249     bool inverseFilled = args.fShape->inverseFilled() &&
250                         !IsStrokeHairlineOrEquivalent(args.fShape->style(),
251                                                       *args.fViewMatrix, nullptr);
252 
253     SkIRect unclippedDevShapeBounds, clippedDevShapeBounds, devClipBounds;
254     // To prevent overloading the cache with entries during animations we limit the cache of masks
255     // to cases where the matrix preserves axis alignment.
256     bool useCache = fAllowCaching && !inverseFilled && args.fViewMatrix->preservesAxisAlignment() &&
257                     args.fShape->hasUnstyledKey() && GrAAType::kCoverage == args.fAAType;
258 
259     if (!GetShapeAndClipBounds(args.fRenderTargetContext,
260                                *args.fClip, *args.fShape,
261                                *args.fViewMatrix, &unclippedDevShapeBounds,
262                                &clippedDevShapeBounds,
263                                &devClipBounds)) {
264         if (inverseFilled) {
265             DrawAroundInvPath(args.fRenderTargetContext, std::move(args.fPaint),
266                               *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix,
267                               devClipBounds, unclippedDevShapeBounds);
268         }
269         return true;
270     }
271 
272     const SkIRect* boundsForMask = &clippedDevShapeBounds;
273     if (useCache) {
274         // Use the cache only if >50% of the path is visible.
275         int unclippedWidth = unclippedDevShapeBounds.width();
276         int unclippedHeight = unclippedDevShapeBounds.height();
277         int64_t unclippedArea = sk_64_mul(unclippedWidth, unclippedHeight);
278         int64_t clippedArea = sk_64_mul(clippedDevShapeBounds.width(),
279                                         clippedDevShapeBounds.height());
280         int maxTextureSize = args.fRenderTargetContext->caps()->maxTextureSize();
281         if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize ||
282             unclippedHeight > maxTextureSize) {
283             useCache = false;
284         } else {
285             boundsForMask = &unclippedDevShapeBounds;
286         }
287     }
288 
289     GrUniqueKey maskKey;
290     if (useCache) {
291         // We require the upper left 2x2 of the matrix to match exactly for a cache hit.
292         SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX);
293         SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY);
294         SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX);
295         SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY);
296         static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
297         GrUniqueKey::Builder builder(&maskKey, kDomain, 5 + args.fShape->unstyledKeySize(),
298                                      "SW Path Mask");
299 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
300         // Fractional translate does not affect caching on Android. This is done for better cache
301         // hit ratio and speed, but it is matching HWUI behavior, which doesn't consider the matrix
302         // at all when caching paths.
303         SkFixed fracX = 0;
304         SkFixed fracY = 0;
305 #else
306         SkScalar tx = args.fViewMatrix->get(SkMatrix::kMTransX);
307         SkScalar ty = args.fViewMatrix->get(SkMatrix::kMTransY);
308         // Allow 8 bits each in x and y of subpixel positioning.
309         SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
310         SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
311 #endif
312         builder[0] = SkFloat2Bits(sx);
313         builder[1] = SkFloat2Bits(sy);
314         builder[2] = SkFloat2Bits(kx);
315         builder[3] = SkFloat2Bits(ky);
316         // Distinguish between hairline and filled paths. For hairlines, we also need to include
317         // the cap. (SW grows hairlines by 0.5 pixel with round and square caps). Note that
318         // stroke-and-fill of hairlines is turned into pure fill by SkStrokeRec, so this covers
319         // all cases we might see.
320         uint32_t styleBits = args.fShape->style().isSimpleHairline() ?
321                              ((args.fShape->style().strokeRec().getCap() << 1) | 1) : 0;
322         builder[4] = fracX | (fracY >> 8) | (styleBits << 16);
323         args.fShape->writeUnstyledKey(&builder[5]);
324     }
325 
326     sk_sp<GrTextureProxy> proxy;
327     if (useCache) {
328         proxy = fProxyProvider->findOrCreateProxyByUniqueKey(maskKey, kTopLeft_GrSurfaceOrigin);
329     }
330     if (!proxy) {
331         SkBackingFit fit = useCache ? SkBackingFit::kExact : SkBackingFit::kApprox;
332         GrAA aa = GrAAType::kCoverage == args.fAAType ? GrAA::kYes : GrAA::kNo;
333 
334         SkTaskGroup* taskGroup = args.fContext->contextPriv().getTaskGroup();
335         if (taskGroup) {
336             proxy = make_deferred_mask_texture_proxy(args.fContext, fit,
337                                                      boundsForMask->width(),
338                                                      boundsForMask->height());
339             if (!proxy) {
340                 return false;
341             }
342 
343             auto uploader = skstd::make_unique<GrTDeferredProxyUploader<SoftwarePathData>>(
344                     *boundsForMask, *args.fViewMatrix, *args.fShape, aa);
345             GrTDeferredProxyUploader<SoftwarePathData>* uploaderRaw = uploader.get();
346 
347             auto drawAndUploadMask = [uploaderRaw] {
348                 TRACE_EVENT0("skia", "Threaded SW Mask Render");
349                 GrSWMaskHelper helper(uploaderRaw->getPixels());
350                 if (helper.init(uploaderRaw->data().getMaskBounds())) {
351                     helper.drawShape(uploaderRaw->data().getShape(),
352                                      *uploaderRaw->data().getViewMatrix(),
353                                      SkRegion::kReplace_Op, uploaderRaw->data().getAA(), 0xFF);
354                 } else {
355                     SkDEBUGFAIL("Unable to allocate SW mask.");
356                 }
357                 uploaderRaw->signalAndFreeData();
358             };
359             taskGroup->add(std::move(drawAndUploadMask));
360             proxy->texPriv().setDeferredUploader(std::move(uploader));
361         } else {
362             GrSWMaskHelper helper;
363             if (!helper.init(*boundsForMask)) {
364                 return false;
365             }
366             helper.drawShape(*args.fShape, *args.fViewMatrix, SkRegion::kReplace_Op, aa, 0xFF);
367             proxy = helper.toTextureProxy(args.fContext, fit);
368         }
369 
370         if (!proxy) {
371             return false;
372         }
373         if (useCache) {
374             SkASSERT(proxy->origin() == kTopLeft_GrSurfaceOrigin);
375             fProxyProvider->assignUniqueKeyToProxy(maskKey, proxy.get());
376             args.fShape->addGenIDChangeListener(
377                     sk_make_sp<PathInvalidator>(maskKey, args.fContext->contextPriv().contextID()));
378         }
379     }
380     if (inverseFilled) {
381         DrawAroundInvPath(args.fRenderTargetContext, GrPaint::Clone(args.fPaint),
382                           *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix, devClipBounds,
383                           unclippedDevShapeBounds);
384     }
385     DrawToTargetWithShapeMask(
386             std::move(proxy), args.fRenderTargetContext, std::move(args.fPaint),
387             *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix,
388             SkIPoint{boundsForMask->fLeft, boundsForMask->fTop}, *boundsForMask);
389 
390     return true;
391 }
392