1 /*
2 * Copyright 2006 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "SkScanPriv.h"
9
10 #include "SkAntiRun.h"
11 #include "SkBlitter.h"
12 #include "SkCoverageDelta.h"
13 #include "SkMatrix.h"
14 #include "SkPath.h"
15 #include "SkPathPriv.h"
16 #include "SkRegion.h"
17 #include "SkTo.h"
18
19 #define SHIFT SK_SUPERSAMPLE_SHIFT
20 #define SCALE (1 << SHIFT)
21 #define MASK (SCALE - 1)
22
23 /** @file
24 We have two techniques for capturing the output of the supersampler:
25 - SUPERMASK, which records a large mask-bitmap
26 this is often faster for small, complex objects
27 - RLE, which records a rle-encoded scanline
28 this is often faster for large objects with big spans
29
30 These blitters use two coordinate systems:
31 - destination coordinates, scale equal to the output - often
32 abbreviated with 'i' or 'I' in variable names
33 - supersampled coordinates, scale equal to the output * SCALE
34 */
35
36 //#define FORCE_SUPERMASK
37 //#define FORCE_RLE
38
39 ///////////////////////////////////////////////////////////////////////////////
40
41 /// Base class for a single-pass supersampled blitter.
42 class BaseSuperBlitter : public SkBlitter {
43 public:
44 BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
45 const SkIRect& clipBounds, bool isInverse);
46
47 /// Must be explicitly defined on subclasses.
blitAntiH(int x,int y,const SkAlpha antialias[],const int16_t runs[])48 virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
49 const int16_t runs[]) override {
50 SkDEBUGFAIL("How did I get here?");
51 }
52 /// May not be called on BaseSuperBlitter because it blits out of order.
blitV(int x,int y,int height,SkAlpha alpha)53 void blitV(int x, int y, int height, SkAlpha alpha) override {
54 SkDEBUGFAIL("How did I get here?");
55 }
56
57 protected:
58 SkBlitter* fRealBlitter;
59 /// Current y coordinate, in destination coordinates.
60 int fCurrIY;
61 /// Widest row of region to be blitted, in destination coordinates.
62 int fWidth;
63 /// Leftmost x coordinate in any row, in destination coordinates.
64 int fLeft;
65 /// Leftmost x coordinate in any row, in supersampled coordinates.
66 int fSuperLeft;
67
68 SkDEBUGCODE(int fCurrX;)
69 /// Current y coordinate in supersampled coordinates.
70 int fCurrY;
71 /// Initial y coordinate (top of bounds).
72 int fTop;
73
74 SkIRect fSectBounds;
75 };
76
BaseSuperBlitter(SkBlitter * realBlit,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)77 BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir,
78 const SkIRect& clipBounds, bool isInverse) {
79 fRealBlitter = realBlit;
80
81 SkIRect sectBounds;
82 if (isInverse) {
83 // We use the clip bounds instead of the ir, since we may be asked to
84 //draw outside of the rect when we're a inverse filltype
85 sectBounds = clipBounds;
86 } else {
87 if (!sectBounds.intersect(ir, clipBounds)) {
88 sectBounds.setEmpty();
89 }
90 }
91
92 const int left = sectBounds.left();
93 const int right = sectBounds.right();
94
95 fLeft = left;
96 fSuperLeft = SkLeftShift(left, SHIFT);
97 fWidth = right - left;
98 fTop = sectBounds.top();
99 fCurrIY = fTop - 1;
100 fCurrY = SkLeftShift(fTop, SHIFT) - 1;
101
102 SkDEBUGCODE(fCurrX = -1;)
103 }
104
105 /// Run-length-encoded supersampling antialiased blitter.
106 class SuperBlitter : public BaseSuperBlitter {
107 public:
108 SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
109 bool isInverse);
110
~SuperBlitter()111 ~SuperBlitter() override {
112 this->flush();
113 }
114
115 /// Once fRuns contains a complete supersampled row, flush() blits
116 /// it out through the wrapped blitter.
117 void flush();
118
119 /// Blits a row of pixels, with location and width specified
120 /// in supersampled coordinates.
121 void blitH(int x, int y, int width) override;
122 /// Blits a rectangle of pixels, with location and size specified
123 /// in supersampled coordinates.
124 void blitRect(int x, int y, int width, int height) override;
125
126 private:
127 // The next three variables are used to track a circular buffer that
128 // contains the values used in SkAlphaRuns. These variables should only
129 // ever be updated in advanceRuns(), and fRuns should always point to
130 // a valid SkAlphaRuns...
131 int fRunsToBuffer;
132 void* fRunsBuffer;
133 int fCurrentRun;
134 SkAlphaRuns fRuns;
135
136 // extra one to store the zero at the end
getRunsSz() const137 int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
138
139 // This function updates the fRuns variable to point to the next buffer space
140 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
141 // and resets fRuns to point to an empty scanline.
advanceRuns()142 void advanceRuns() {
143 const size_t kRunsSz = this->getRunsSz();
144 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
145 fRuns.fRuns = reinterpret_cast<int16_t*>(
146 reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
147 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
148 fRuns.reset(fWidth);
149 }
150
151 int fOffsetX;
152 };
153
SuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)154 SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
155 bool isInverse)
156 : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
157 {
158 fRunsToBuffer = realBlitter->requestRowsPreserved();
159 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
160 fCurrentRun = -1;
161
162 this->advanceRuns();
163
164 fOffsetX = 0;
165 }
166
flush()167 void SuperBlitter::flush() {
168 if (fCurrIY >= fTop) {
169
170 SkASSERT(fCurrentRun < fRunsToBuffer);
171 if (!fRuns.empty()) {
172 // SkDEBUGCODE(fRuns.dump();)
173 fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
174 this->advanceRuns();
175 fOffsetX = 0;
176 }
177
178 fCurrIY = fTop - 1;
179 SkDEBUGCODE(fCurrX = -1;)
180 }
181 }
182
183 /** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
184 *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
185 to produce a final value in [0, 255] and handles clamping 256->255
186 itself, with the same (alpha - (alpha >> 8)) correction as
187 coverage_to_exact_alpha().
188 */
coverage_to_partial_alpha(int aa)189 static inline int coverage_to_partial_alpha(int aa) {
190 aa <<= 8 - 2*SHIFT;
191 return aa;
192 }
193
194 /** coverage_to_exact_alpha() is being used by our blitter, which wants
195 a final value in [0, 255].
196 */
coverage_to_exact_alpha(int aa)197 static inline int coverage_to_exact_alpha(int aa) {
198 int alpha = (256 >> SHIFT) * aa;
199 // clamp 256->255
200 return alpha - (alpha >> 8);
201 }
202
blitH(int x,int y,int width)203 void SuperBlitter::blitH(int x, int y, int width) {
204 SkASSERT(width > 0);
205
206 int iy = y >> SHIFT;
207 SkASSERT(iy >= fCurrIY);
208
209 x -= fSuperLeft;
210 // hack, until I figure out why my cubics (I think) go beyond the bounds
211 if (x < 0) {
212 width += x;
213 x = 0;
214 }
215
216 #ifdef SK_DEBUG
217 SkASSERT(y != fCurrY || x >= fCurrX);
218 #endif
219 SkASSERT(y >= fCurrY);
220 if (fCurrY != y) {
221 fOffsetX = 0;
222 fCurrY = y;
223 }
224
225 if (iy != fCurrIY) { // new scanline
226 this->flush();
227 fCurrIY = iy;
228 }
229
230 int start = x;
231 int stop = x + width;
232
233 SkASSERT(start >= 0 && stop > start);
234 // integer-pixel-aligned ends of blit, rounded out
235 int fb = start & MASK;
236 int fe = stop & MASK;
237 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
238
239 if (n < 0) {
240 fb = fe - fb;
241 n = 0;
242 fe = 0;
243 } else {
244 if (fb == 0) {
245 n += 1;
246 } else {
247 fb = SCALE - fb;
248 }
249 }
250
251 fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
252 n, coverage_to_partial_alpha(fe),
253 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
254 fOffsetX);
255
256 #ifdef SK_DEBUG
257 fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
258 fCurrX = x + width;
259 #endif
260 }
261
262 #if 0 // UNUSED
263 static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
264 int n, U8CPU riteA) {
265 SkASSERT(leftA <= 0xFF);
266 SkASSERT(riteA <= 0xFF);
267
268 int16_t* run = runs.fRuns;
269 uint8_t* aa = runs.fAlpha;
270
271 if (ileft > 0) {
272 run[0] = ileft;
273 aa[0] = 0;
274 run += ileft;
275 aa += ileft;
276 }
277
278 SkASSERT(leftA < 0xFF);
279 if (leftA > 0) {
280 *run++ = 1;
281 *aa++ = leftA;
282 }
283
284 if (n > 0) {
285 run[0] = n;
286 aa[0] = 0xFF;
287 run += n;
288 aa += n;
289 }
290
291 SkASSERT(riteA < 0xFF);
292 if (riteA > 0) {
293 *run++ = 1;
294 *aa++ = riteA;
295 }
296 run[0] = 0;
297 }
298 #endif
299
blitRect(int x,int y,int width,int height)300 void SuperBlitter::blitRect(int x, int y, int width, int height) {
301 SkASSERT(width > 0);
302 SkASSERT(height > 0);
303
304 // blit leading rows
305 while ((y & MASK)) {
306 this->blitH(x, y++, width);
307 if (--height <= 0) {
308 return;
309 }
310 }
311 SkASSERT(height > 0);
312
313 // Since this is a rect, instead of blitting supersampled rows one at a
314 // time and then resolving to the destination canvas, we can blit
315 // directly to the destintion canvas one row per SCALE supersampled rows.
316 int start_y = y >> SHIFT;
317 int stop_y = (y + height) >> SHIFT;
318 int count = stop_y - start_y;
319 if (count > 0) {
320 y += count << SHIFT;
321 height -= count << SHIFT;
322
323 // save original X for our tail blitH() loop at the bottom
324 int origX = x;
325
326 x -= fSuperLeft;
327 // hack, until I figure out why my cubics (I think) go beyond the bounds
328 if (x < 0) {
329 width += x;
330 x = 0;
331 }
332
333 // There is always a left column, a middle, and a right column.
334 // ileft is the destination x of the first pixel of the entire rect.
335 // xleft is (SCALE - # of covered supersampled pixels) in that
336 // destination pixel.
337 int ileft = x >> SHIFT;
338 int xleft = x & MASK;
339 // irite is the destination x of the last pixel of the OPAQUE section.
340 // xrite is the number of supersampled pixels extending beyond irite;
341 // xrite/SCALE should give us alpha.
342 int irite = (x + width) >> SHIFT;
343 int xrite = (x + width) & MASK;
344 if (!xrite) {
345 xrite = SCALE;
346 irite--;
347 }
348
349 // Need to call flush() to clean up pending draws before we
350 // even consider blitV(), since otherwise it can look nonmonotonic.
351 SkASSERT(start_y > fCurrIY);
352 this->flush();
353
354 int n = irite - ileft - 1;
355 if (n < 0) {
356 // If n < 0, we'll only have a single partially-transparent column
357 // of pixels to render.
358 xleft = xrite - xleft;
359 SkASSERT(xleft <= SCALE);
360 SkASSERT(xleft > 0);
361 fRealBlitter->blitV(ileft + fLeft, start_y, count,
362 coverage_to_exact_alpha(xleft));
363 } else {
364 // With n = 0, we have two possibly-transparent columns of pixels
365 // to render; with n > 0, we have opaque columns between them.
366
367 xleft = SCALE - xleft;
368
369 // Using coverage_to_exact_alpha is not consistent with blitH()
370 const int coverageL = coverage_to_exact_alpha(xleft);
371 const int coverageR = coverage_to_exact_alpha(xrite);
372
373 SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
374 SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
375
376 fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
377 coverageL, coverageR);
378 }
379
380 // preamble for our next call to blitH()
381 fCurrIY = stop_y - 1;
382 fOffsetX = 0;
383 fCurrY = y - 1;
384 fRuns.reset(fWidth);
385 x = origX;
386 }
387
388 // catch any remaining few rows
389 SkASSERT(height <= MASK);
390 while (--height >= 0) {
391 this->blitH(x, y++, width);
392 }
393 }
394
395 ///////////////////////////////////////////////////////////////////////////////
396
397 /// Masked supersampling antialiased blitter.
398 class MaskSuperBlitter : public BaseSuperBlitter {
399 public:
400 MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect&, bool isInverse);
~MaskSuperBlitter()401 ~MaskSuperBlitter() override {
402 fRealBlitter->blitMask(fMask, fClipRect);
403 }
404
405 void blitH(int x, int y, int width) override;
406
CanHandleRect(const SkIRect & bounds)407 static bool CanHandleRect(const SkIRect& bounds) {
408 #ifdef FORCE_RLE
409 return false;
410 #endif
411 int width = bounds.width();
412 int64_t rb = SkAlign4(width);
413 // use 64bits to detect overflow
414 int64_t storage = rb * bounds.height();
415
416 return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
417 (storage <= MaskSuperBlitter::kMAX_STORAGE);
418 }
419
420 private:
421 enum {
422 #ifdef FORCE_SUPERMASK
423 kMAX_WIDTH = 2048,
424 kMAX_STORAGE = 1024 * 1024 * 2
425 #else
426 kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
427 kMAX_STORAGE = 1024
428 #endif
429 };
430
431 SkMask fMask;
432 SkIRect fClipRect;
433 // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
434 // perform a test to see if stopAlpha != 0
435 uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
436 };
437
MaskSuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)438 MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
439 const SkIRect& clipBounds, bool isInverse)
440 : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
441 {
442 SkASSERT(CanHandleRect(ir));
443 SkASSERT(!isInverse);
444
445 fMask.fImage = (uint8_t*)fStorage;
446 fMask.fBounds = ir;
447 fMask.fRowBytes = ir.width();
448 fMask.fFormat = SkMask::kA8_Format;
449
450 fClipRect = ir;
451 if (!fClipRect.intersect(clipBounds)) {
452 SkASSERT(0);
453 fClipRect.setEmpty();
454 }
455
456 // For valgrind, write 1 extra byte at the end so we don't read
457 // uninitialized memory. See comment in add_aa_span and fStorage[].
458 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
459 }
460
add_aa_span(uint8_t * alpha,U8CPU startAlpha)461 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
462 /* I should be able to just add alpha[x] + startAlpha.
463 However, if the trailing edge of the previous span and the leading
464 edge of the current span round to the same super-sampled x value,
465 I might overflow to 256 with this add, hence the funny subtract.
466 */
467 unsigned tmp = *alpha + startAlpha;
468 SkASSERT(tmp <= 256);
469 *alpha = SkToU8(tmp - (tmp >> 8));
470 }
471
quadplicate_byte(U8CPU value)472 static inline uint32_t quadplicate_byte(U8CPU value) {
473 uint32_t pair = (value << 8) | value;
474 return (pair << 16) | pair;
475 }
476
477 // Perform this tricky subtract, to avoid overflowing to 256. Our caller should
478 // only ever call us with at most enough to hit 256 (never larger), so it is
479 // enough to just subtract the high-bit. Actually clamping with a branch would
480 // be slower (e.g. if (tmp > 255) tmp = 255;)
481 //
saturated_add(uint8_t * ptr,U8CPU add)482 static inline void saturated_add(uint8_t* ptr, U8CPU add) {
483 unsigned tmp = *ptr + add;
484 SkASSERT(tmp <= 256);
485 *ptr = SkToU8(tmp - (tmp >> 8));
486 }
487
488 // minimum count before we want to setup an inner loop, adding 4-at-a-time
489 #define MIN_COUNT_FOR_QUAD_LOOP 16
490
add_aa_span(uint8_t * alpha,U8CPU startAlpha,int middleCount,U8CPU stopAlpha,U8CPU maxValue)491 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
492 U8CPU stopAlpha, U8CPU maxValue) {
493 SkASSERT(middleCount >= 0);
494
495 saturated_add(alpha, startAlpha);
496 alpha += 1;
497
498 if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
499 // loop until we're quad-byte aligned
500 while (reinterpret_cast<intptr_t>(alpha) & 0x3) {
501 alpha[0] = SkToU8(alpha[0] + maxValue);
502 alpha += 1;
503 middleCount -= 1;
504 }
505
506 int bigCount = middleCount >> 2;
507 uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
508 uint32_t qval = quadplicate_byte(maxValue);
509 do {
510 *qptr++ += qval;
511 } while (--bigCount > 0);
512
513 middleCount &= 3;
514 alpha = reinterpret_cast<uint8_t*> (qptr);
515 // fall through to the following while-loop
516 }
517
518 while (--middleCount >= 0) {
519 alpha[0] = SkToU8(alpha[0] + maxValue);
520 alpha += 1;
521 }
522
523 // potentially this can be off the end of our "legal" alpha values, but that
524 // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
525 // every time (slow), we just do it, and ensure that we've allocated extra space
526 // (see the + 1 comment in fStorage[]
527 saturated_add(alpha, stopAlpha);
528 }
529
blitH(int x,int y,int width)530 void MaskSuperBlitter::blitH(int x, int y, int width) {
531 int iy = (y >> SHIFT);
532
533 SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
534 iy -= fMask.fBounds.fTop; // make it relative to 0
535
536 // This should never happen, but it does. Until the true cause is
537 // discovered, let's skip this span instead of crashing.
538 // See http://crbug.com/17569.
539 if (iy < 0) {
540 return;
541 }
542
543 #ifdef SK_DEBUG
544 {
545 int ix = x >> SHIFT;
546 SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
547 }
548 #endif
549
550 x -= SkLeftShift(fMask.fBounds.fLeft, SHIFT);
551
552 // hack, until I figure out why my cubics (I think) go beyond the bounds
553 if (x < 0) {
554 width += x;
555 x = 0;
556 }
557
558 uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
559
560 int start = x;
561 int stop = x + width;
562
563 SkASSERT(start >= 0 && stop > start);
564 int fb = start & MASK;
565 int fe = stop & MASK;
566 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
567
568
569 if (n < 0) {
570 SkASSERT(row >= fMask.fImage);
571 SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
572 add_aa_span(row, coverage_to_partial_alpha(fe - fb));
573 } else {
574 fb = SCALE - fb;
575 SkASSERT(row >= fMask.fImage);
576 SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
577 add_aa_span(row, coverage_to_partial_alpha(fb),
578 n, coverage_to_partial_alpha(fe),
579 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
580 }
581
582 #ifdef SK_DEBUG
583 fCurrX = x + width;
584 #endif
585 }
586
587 ///////////////////////////////////////////////////////////////////////////////
588
safeRoundOut(const SkRect & src)589 static SkIRect safeRoundOut(const SkRect& src) {
590 // roundOut will pin huge floats to max/min int
591 SkIRect dst = src.roundOut();
592
593 // intersect with a smaller huge rect, so the rect will not be considered empty for being
594 // too large. e.g. { -SK_MaxS32 ... SK_MaxS32 } is considered empty because its width
595 // exceeds signed 32bit.
596 const int32_t limit = SK_MaxS32 >> SK_SUPERSAMPLE_SHIFT;
597 (void)dst.intersect({ -limit, -limit, limit, limit});
598
599 return dst;
600 }
601
602 constexpr int kSampleSize = 8;
603 #if !defined(SK_DISABLE_DAA) || !defined(SK_DISABLE_AAA)
604 constexpr SkScalar kComplexityThreshold = 0.25;
605 #endif
606
compute_complexity(const SkPath & path,SkScalar & avgLength,SkScalar & complexity)607 static void compute_complexity(const SkPath& path, SkScalar& avgLength, SkScalar& complexity) {
608 int n = path.countPoints();
609 if (n < kSampleSize || path.getBounds().isEmpty()) {
610 // set to invalid value to indicate that we failed to compute
611 avgLength = complexity = -1;
612 return;
613 }
614
615 SkScalar sumLength = 0;
616 SkPoint lastPoint = path.getPoint(0);
617 for(int i = 1; i < kSampleSize; ++i) {
618 SkPoint point = path.getPoint(i);
619 sumLength += SkPoint::Distance(lastPoint, point);
620 lastPoint = point;
621 }
622 avgLength = sumLength / (kSampleSize - 1);
623
624 auto sqr = [](SkScalar x) { return x*x; };
625
626 SkScalar diagonalSqr = sqr(path.getBounds().width()) + sqr(path.getBounds().height());
627
628 // If the path consists of random line segments, the number of intersections should be
629 // proportional to this.
630 SkScalar intersections = sk_ieee_float_divide(sqr(n) * sqr(avgLength), diagonalSqr);
631
632 // The number of intersections per scanline should be proportional to this number.
633 complexity = sk_ieee_float_divide(intersections, path.getBounds().height());
634
635 if (sk_float_isnan(complexity)) { // it may be possible to have 0.0 / 0.0; inf is fine for us.
636 complexity = -1;
637 }
638 }
639
ShouldUseDAA(const SkPath & path,SkScalar avgLength,SkScalar complexity)640 static bool ShouldUseDAA(const SkPath& path, SkScalar avgLength, SkScalar complexity) {
641 #if defined(SK_DISABLE_DAA)
642 return false;
643 #else
644 if (gSkForceDeltaAA) {
645 return true;
646 }
647 if (!gSkUseDeltaAA || SkPathPriv::IsBadForDAA(path)) {
648 return false;
649 }
650
651 #ifdef SK_SUPPORT_LEGACY_AA_CHOICE
652 const SkRect& bounds = path.getBounds();
653 return !path.isConvex()
654 && path.countPoints() >= SkTMax(bounds.width(), bounds.height()) / 8;
655 #else
656 if (avgLength < 0 || complexity < 0 || path.getBounds().isEmpty() || path.isConvex()) {
657 return false;
658 }
659
660 // DAA is fast with mask
661 if (SkCoverageDeltaMask::CanHandle(safeRoundOut(path.getBounds()))) {
662 return true;
663 }
664
665 // DAA is much faster in small cubics (since we don't have to chop them).
666 // If there are many cubics, and the average length if small, use DAA.
667 constexpr SkScalar kSmallCubicThreshold = 16;
668 if (avgLength < kSmallCubicThreshold) {
669 uint8_t sampleVerbs[kSampleSize];
670 int verbCount = SkTMin(kSampleSize, path.getVerbs(sampleVerbs, kSampleSize));
671 int cubicCount = 0;
672 for(int i = 0; i < verbCount; ++i) {
673 cubicCount += (sampleVerbs[i] == SkPath::kCubic_Verb);
674 }
675 if (cubicCount * 2 >= verbCount) {
676 return true;
677 }
678 }
679
680 return complexity >= kComplexityThreshold;
681 #endif
682 #endif
683 }
684
ShouldUseAAA(const SkPath & path,SkScalar avgLength,SkScalar complexity)685 static bool ShouldUseAAA(const SkPath& path, SkScalar avgLength, SkScalar complexity) {
686 #if defined(SK_DISABLE_AAA)
687 return false;
688 #else
689 if (gSkForceAnalyticAA) {
690 return true;
691 }
692 if (!gSkUseAnalyticAA) {
693 return false;
694 }
695 if (path.isRect(nullptr)) {
696 return true;
697 }
698
699 #ifdef SK_SUPPORT_LEGACY_AAA_CHOICE
700 const SkRect& bounds = path.getBounds();
701 // When the path have so many points compared to the size of its
702 // bounds/resolution, it indicates that the path is not quite smooth in
703 // the current resolution: the expected number of turning points in
704 // every pixel row/column is significantly greater than zero. Hence
705 // Aanlytic AA is not likely to produce visible quality improvements,
706 // and Analytic AA might be slower than supersampling.
707 return path.countPoints() < SkTMax(bounds.width(), bounds.height()) / 2 - 10;
708 #else
709 if (path.countPoints() >= path.getBounds().height()) {
710 // SAA is faster than AAA in this case even if there are no
711 // intersections because AAA will have too many scan lines. See
712 // skbug.com/8272
713 return false;
714 }
715 // We will use AAA if the number of verbs < kSampleSize and therefore complexity < 0
716 return complexity < kComplexityThreshold;
717 #endif
718 #endif
719 }
720
SAAFillPath(const SkPath & path,SkBlitter * blitter,const SkIRect & ir,const SkIRect & clipBounds,bool forceRLE)721 void SkScan::SAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
722 const SkIRect& clipBounds, bool forceRLE) {
723 bool containedInClip = clipBounds.contains(ir);
724 bool isInverse = path.isInverseFillType();
725
726 // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
727 // if we're an inverse filltype
728 if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
729 MaskSuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
730 SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
731 sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
732 } else {
733 SuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
734 sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
735 }
736 }
737
overflows_short_shift(int value,int shift)738 static int overflows_short_shift(int value, int shift) {
739 const int s = 16 + shift;
740 return (SkLeftShift(value, s) >> s) - value;
741 }
742
743 /**
744 Would any of the coordinates of this rectangle not fit in a short,
745 when left-shifted by shift?
746 */
rect_overflows_short_shift(SkIRect rect,int shift)747 static int rect_overflows_short_shift(SkIRect rect, int shift) {
748 SkASSERT(!overflows_short_shift(8191, shift));
749 SkASSERT(overflows_short_shift(8192, shift));
750 SkASSERT(!overflows_short_shift(32767, 0));
751 SkASSERT(overflows_short_shift(32768, 0));
752
753 // Since we expect these to succeed, we bit-or together
754 // for a tiny extra bit of speed.
755 return overflows_short_shift(rect.fLeft, shift) |
756 overflows_short_shift(rect.fRight, shift) |
757 overflows_short_shift(rect.fTop, shift) |
758 overflows_short_shift(rect.fBottom, shift);
759 }
760
AntiFillPath(const SkPath & path,const SkRegion & origClip,SkBlitter * blitter,bool forceRLE,SkDAARecord * daaRecord)761 void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
762 SkBlitter* blitter, bool forceRLE, SkDAARecord* daaRecord) {
763 if (origClip.isEmpty()) {
764 SkDAARecord::SetEmpty(daaRecord);
765 return;
766 }
767
768 const bool isInverse = path.isInverseFillType();
769 SkIRect ir = safeRoundOut(path.getBounds());
770 if (ir.isEmpty()) {
771 if (isInverse) {
772 blitter->blitRegion(origClip);
773 }
774 SkDAARecord::SetEmpty(daaRecord);
775 return;
776 }
777
778 // If the intersection of the path bounds and the clip bounds
779 // will overflow 32767 when << by SHIFT, we can't supersample,
780 // so draw without antialiasing.
781 SkIRect clippedIR;
782 if (isInverse) {
783 // If the path is an inverse fill, it's going to fill the entire
784 // clip, and we care whether the entire clip exceeds our limits.
785 clippedIR = origClip.getBounds();
786 } else {
787 if (!clippedIR.intersect(ir, origClip.getBounds())) {
788 SkDAARecord::SetEmpty(daaRecord);
789 return;
790 }
791 }
792 if (!daaRecord && rect_overflows_short_shift(clippedIR, SHIFT)) {
793 SkScan::FillPath(path, origClip, blitter);
794 return;
795 }
796
797 // Our antialiasing can't handle a clip larger than 32767, so we restrict
798 // the clip to that limit here. (the runs[] uses int16_t for its index).
799 //
800 // A more general solution (one that could also eliminate the need to
801 // disable aa based on ir bounds (see overflows_short_shift) would be
802 // to tile the clip/target...
803 SkRegion tmpClipStorage;
804 const SkRegion* clipRgn = &origClip;
805 {
806 static const int32_t kMaxClipCoord = 32767;
807 const SkIRect& bounds = origClip.getBounds();
808 if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
809 SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
810 tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
811 clipRgn = &tmpClipStorage;
812 }
813 }
814 // for here down, use clipRgn, not origClip
815
816 SkScanClipper clipper(blitter, clipRgn, ir);
817
818 if (clipper.getBlitter() == nullptr) { // clipped out
819 if (isInverse) {
820 blitter->blitRegion(*clipRgn);
821 }
822 SkDAARecord::SetEmpty(daaRecord);
823 return;
824 }
825
826 SkASSERT(clipper.getClipRect() == nullptr ||
827 *clipper.getClipRect() == clipRgn->getBounds());
828
829 // now use the (possibly wrapped) blitter
830 blitter = clipper.getBlitter();
831
832 if (isInverse) {
833 sk_blit_above(blitter, ir, *clipRgn);
834 }
835
836 SkScalar avgLength, complexity;
837 compute_complexity(path, avgLength, complexity);
838
839 if (daaRecord || ShouldUseDAA(path, avgLength, complexity)) {
840 SkScan::DAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE, daaRecord);
841 } else if (ShouldUseAAA(path, avgLength, complexity)) {
842 // Do not use AAA if path is too complicated:
843 // there won't be any speedup or significant visual improvement.
844 SkScan::AAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
845 } else {
846 SkScan::SAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
847 }
848
849 if (isInverse) {
850 sk_blit_below(blitter, ir, *clipRgn);
851 }
852 }
853
854 ///////////////////////////////////////////////////////////////////////////////
855
856 #include "SkRasterClip.h"
857
FillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter)858 void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
859 if (clip.isEmpty() || !path.isFinite()) {
860 return;
861 }
862
863 if (clip.isBW()) {
864 FillPath(path, clip.bwRgn(), blitter);
865 } else {
866 SkRegion tmp;
867 SkAAClipBlitter aaBlitter;
868
869 tmp.setRect(clip.getBounds());
870 aaBlitter.init(blitter, &clip.aaRgn());
871 SkScan::FillPath(path, tmp, &aaBlitter);
872 }
873 }
874
AntiFillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter,SkDAARecord * daaRecord)875 void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
876 SkBlitter* blitter, SkDAARecord* daaRecord) {
877 if (clip.isEmpty() || !path.isFinite()) {
878 SkDAARecord::SetEmpty(daaRecord);
879 return;
880 }
881
882 if (clip.isBW()) {
883 AntiFillPath(path, clip.bwRgn(), blitter, false, daaRecord);
884 } else {
885 SkRegion tmp;
886 SkAAClipBlitter aaBlitter;
887
888 tmp.setRect(clip.getBounds());
889 aaBlitter.init(blitter, &clip.aaRgn());
890 AntiFillPath(path, tmp, &aaBlitter, true, daaRecord); // SkAAClipBlitter can blitMask, why forceRLE?
891 }
892 }
893