1 /*
2 * Copyright 2006 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "SkScanPriv.h"
10 #include "SkPath.h"
11 #include "SkMatrix.h"
12 #include "SkBlitter.h"
13 #include "SkRegion.h"
14 #include "SkAntiRun.h"
15
16 #define SHIFT SK_SUPERSAMPLE_SHIFT
17 #define SCALE (1 << SHIFT)
18 #define MASK (SCALE - 1)
19
20 /** @file
21 We have two techniques for capturing the output of the supersampler:
22 - SUPERMASK, which records a large mask-bitmap
23 this is often faster for small, complex objects
24 - RLE, which records a rle-encoded scanline
25 this is often faster for large objects with big spans
26
27 These blitters use two coordinate systems:
28 - destination coordinates, scale equal to the output - often
29 abbreviated with 'i' or 'I' in variable names
30 - supersampled coordinates, scale equal to the output * SCALE
31 */
32
33 //#define FORCE_SUPERMASK
34 //#define FORCE_RLE
35
36 ///////////////////////////////////////////////////////////////////////////////
37
38 /// Base class for a single-pass supersampled blitter.
39 class BaseSuperBlitter : public SkBlitter {
40 public:
41 BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
42 const SkIRect& clipBounds, bool isInverse);
43
44 /// Must be explicitly defined on subclasses.
blitAntiH(int x,int y,const SkAlpha antialias[],const int16_t runs[])45 virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
46 const int16_t runs[]) override {
47 SkDEBUGFAIL("How did I get here?");
48 }
49 /// May not be called on BaseSuperBlitter because it blits out of order.
blitV(int x,int y,int height,SkAlpha alpha)50 void blitV(int x, int y, int height, SkAlpha alpha) override {
51 SkDEBUGFAIL("How did I get here?");
52 }
53
54 protected:
55 SkBlitter* fRealBlitter;
56 /// Current y coordinate, in destination coordinates.
57 int fCurrIY;
58 /// Widest row of region to be blitted, in destination coordinates.
59 int fWidth;
60 /// Leftmost x coordinate in any row, in destination coordinates.
61 int fLeft;
62 /// Leftmost x coordinate in any row, in supersampled coordinates.
63 int fSuperLeft;
64
65 SkDEBUGCODE(int fCurrX;)
66 /// Current y coordinate in supersampled coordinates.
67 int fCurrY;
68 /// Initial y coordinate (top of bounds).
69 int fTop;
70
71 SkIRect fSectBounds;
72 };
73
BaseSuperBlitter(SkBlitter * realBlit,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)74 BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir,
75 const SkIRect& clipBounds, bool isInverse) {
76 fRealBlitter = realBlit;
77
78 SkIRect sectBounds;
79 if (isInverse) {
80 // We use the clip bounds instead of the ir, since we may be asked to
81 //draw outside of the rect when we're a inverse filltype
82 sectBounds = clipBounds;
83 } else {
84 if (!sectBounds.intersect(ir, clipBounds)) {
85 sectBounds.setEmpty();
86 }
87 }
88
89 const int left = sectBounds.left();
90 const int right = sectBounds.right();
91
92 fLeft = left;
93 fSuperLeft = SkLeftShift(left, SHIFT);
94 fWidth = right - left;
95 fTop = sectBounds.top();
96 fCurrIY = fTop - 1;
97 fCurrY = SkLeftShift(fTop, SHIFT) - 1;
98
99 SkDEBUGCODE(fCurrX = -1;)
100 }
101
102 /// Run-length-encoded supersampling antialiased blitter.
103 class SuperBlitter : public BaseSuperBlitter {
104 public:
105 SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
106 bool isInverse);
107
~SuperBlitter()108 ~SuperBlitter() override {
109 this->flush();
110 }
111
112 /// Once fRuns contains a complete supersampled row, flush() blits
113 /// it out through the wrapped blitter.
114 void flush();
115
116 /// Blits a row of pixels, with location and width specified
117 /// in supersampled coordinates.
118 void blitH(int x, int y, int width) override;
119 /// Blits a rectangle of pixels, with location and size specified
120 /// in supersampled coordinates.
121 void blitRect(int x, int y, int width, int height) override;
122
123 private:
124 // The next three variables are used to track a circular buffer that
125 // contains the values used in SkAlphaRuns. These variables should only
126 // ever be updated in advanceRuns(), and fRuns should always point to
127 // a valid SkAlphaRuns...
128 int fRunsToBuffer;
129 void* fRunsBuffer;
130 int fCurrentRun;
131 SkAlphaRuns fRuns;
132
133 // extra one to store the zero at the end
getRunsSz() const134 int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
135
136 // This function updates the fRuns variable to point to the next buffer space
137 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
138 // and resets fRuns to point to an empty scanline.
advanceRuns()139 void advanceRuns() {
140 const size_t kRunsSz = this->getRunsSz();
141 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
142 fRuns.fRuns = reinterpret_cast<int16_t*>(
143 reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
144 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
145 fRuns.reset(fWidth);
146 }
147
148 int fOffsetX;
149 };
150
SuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)151 SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
152 bool isInverse)
153 : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
154 {
155 fRunsToBuffer = realBlitter->requestRowsPreserved();
156 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
157 fCurrentRun = -1;
158
159 this->advanceRuns();
160
161 fOffsetX = 0;
162 }
163
flush()164 void SuperBlitter::flush() {
165 if (fCurrIY >= fTop) {
166
167 SkASSERT(fCurrentRun < fRunsToBuffer);
168 if (!fRuns.empty()) {
169 // SkDEBUGCODE(fRuns.dump();)
170 fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
171 this->advanceRuns();
172 fOffsetX = 0;
173 }
174
175 fCurrIY = fTop - 1;
176 SkDEBUGCODE(fCurrX = -1;)
177 }
178 }
179
180 /** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
181 *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
182 to produce a final value in [0, 255] and handles clamping 256->255
183 itself, with the same (alpha - (alpha >> 8)) correction as
184 coverage_to_exact_alpha().
185 */
coverage_to_partial_alpha(int aa)186 static inline int coverage_to_partial_alpha(int aa) {
187 aa <<= 8 - 2*SHIFT;
188 return aa;
189 }
190
191 /** coverage_to_exact_alpha() is being used by our blitter, which wants
192 a final value in [0, 255].
193 */
coverage_to_exact_alpha(int aa)194 static inline int coverage_to_exact_alpha(int aa) {
195 int alpha = (256 >> SHIFT) * aa;
196 // clamp 256->255
197 return alpha - (alpha >> 8);
198 }
199
blitH(int x,int y,int width)200 void SuperBlitter::blitH(int x, int y, int width) {
201 SkASSERT(width > 0);
202
203 int iy = y >> SHIFT;
204 SkASSERT(iy >= fCurrIY);
205
206 x -= fSuperLeft;
207 // hack, until I figure out why my cubics (I think) go beyond the bounds
208 if (x < 0) {
209 width += x;
210 x = 0;
211 }
212
213 #ifdef SK_DEBUG
214 SkASSERT(y != fCurrY || x >= fCurrX);
215 #endif
216 SkASSERT(y >= fCurrY);
217 if (fCurrY != y) {
218 fOffsetX = 0;
219 fCurrY = y;
220 }
221
222 if (iy != fCurrIY) { // new scanline
223 this->flush();
224 fCurrIY = iy;
225 }
226
227 int start = x;
228 int stop = x + width;
229
230 SkASSERT(start >= 0 && stop > start);
231 // integer-pixel-aligned ends of blit, rounded out
232 int fb = start & MASK;
233 int fe = stop & MASK;
234 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
235
236 if (n < 0) {
237 fb = fe - fb;
238 n = 0;
239 fe = 0;
240 } else {
241 if (fb == 0) {
242 n += 1;
243 } else {
244 fb = SCALE - fb;
245 }
246 }
247
248 fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
249 n, coverage_to_partial_alpha(fe),
250 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
251 fOffsetX);
252
253 #ifdef SK_DEBUG
254 fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
255 fCurrX = x + width;
256 #endif
257 }
258
259 #if 0 // UNUSED
260 static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
261 int n, U8CPU riteA) {
262 SkASSERT(leftA <= 0xFF);
263 SkASSERT(riteA <= 0xFF);
264
265 int16_t* run = runs.fRuns;
266 uint8_t* aa = runs.fAlpha;
267
268 if (ileft > 0) {
269 run[0] = ileft;
270 aa[0] = 0;
271 run += ileft;
272 aa += ileft;
273 }
274
275 SkASSERT(leftA < 0xFF);
276 if (leftA > 0) {
277 *run++ = 1;
278 *aa++ = leftA;
279 }
280
281 if (n > 0) {
282 run[0] = n;
283 aa[0] = 0xFF;
284 run += n;
285 aa += n;
286 }
287
288 SkASSERT(riteA < 0xFF);
289 if (riteA > 0) {
290 *run++ = 1;
291 *aa++ = riteA;
292 }
293 run[0] = 0;
294 }
295 #endif
296
blitRect(int x,int y,int width,int height)297 void SuperBlitter::blitRect(int x, int y, int width, int height) {
298 SkASSERT(width > 0);
299 SkASSERT(height > 0);
300
301 // blit leading rows
302 while ((y & MASK)) {
303 this->blitH(x, y++, width);
304 if (--height <= 0) {
305 return;
306 }
307 }
308 SkASSERT(height > 0);
309
310 // Since this is a rect, instead of blitting supersampled rows one at a
311 // time and then resolving to the destination canvas, we can blit
312 // directly to the destintion canvas one row per SCALE supersampled rows.
313 int start_y = y >> SHIFT;
314 int stop_y = (y + height) >> SHIFT;
315 int count = stop_y - start_y;
316 if (count > 0) {
317 y += count << SHIFT;
318 height -= count << SHIFT;
319
320 // save original X for our tail blitH() loop at the bottom
321 int origX = x;
322
323 x -= fSuperLeft;
324 // hack, until I figure out why my cubics (I think) go beyond the bounds
325 if (x < 0) {
326 width += x;
327 x = 0;
328 }
329
330 // There is always a left column, a middle, and a right column.
331 // ileft is the destination x of the first pixel of the entire rect.
332 // xleft is (SCALE - # of covered supersampled pixels) in that
333 // destination pixel.
334 int ileft = x >> SHIFT;
335 int xleft = x & MASK;
336 // irite is the destination x of the last pixel of the OPAQUE section.
337 // xrite is the number of supersampled pixels extending beyond irite;
338 // xrite/SCALE should give us alpha.
339 int irite = (x + width) >> SHIFT;
340 int xrite = (x + width) & MASK;
341 if (!xrite) {
342 xrite = SCALE;
343 irite--;
344 }
345
346 // Need to call flush() to clean up pending draws before we
347 // even consider blitV(), since otherwise it can look nonmonotonic.
348 SkASSERT(start_y > fCurrIY);
349 this->flush();
350
351 int n = irite - ileft - 1;
352 if (n < 0) {
353 // If n < 0, we'll only have a single partially-transparent column
354 // of pixels to render.
355 xleft = xrite - xleft;
356 SkASSERT(xleft <= SCALE);
357 SkASSERT(xleft > 0);
358 fRealBlitter->blitV(ileft + fLeft, start_y, count,
359 coverage_to_exact_alpha(xleft));
360 } else {
361 // With n = 0, we have two possibly-transparent columns of pixels
362 // to render; with n > 0, we have opaque columns between them.
363
364 xleft = SCALE - xleft;
365
366 // Using coverage_to_exact_alpha is not consistent with blitH()
367 const int coverageL = coverage_to_exact_alpha(xleft);
368 const int coverageR = coverage_to_exact_alpha(xrite);
369
370 SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
371 SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
372
373 fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
374 coverageL, coverageR);
375 }
376
377 // preamble for our next call to blitH()
378 fCurrIY = stop_y - 1;
379 fOffsetX = 0;
380 fCurrY = y - 1;
381 fRuns.reset(fWidth);
382 x = origX;
383 }
384
385 // catch any remaining few rows
386 SkASSERT(height <= MASK);
387 while (--height >= 0) {
388 this->blitH(x, y++, width);
389 }
390 }
391
392 ///////////////////////////////////////////////////////////////////////////////
393
394 /// Masked supersampling antialiased blitter.
395 class MaskSuperBlitter : public BaseSuperBlitter {
396 public:
397 MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect&, bool isInverse);
~MaskSuperBlitter()398 ~MaskSuperBlitter() override {
399 fRealBlitter->blitMask(fMask, fClipRect);
400 }
401
402 void blitH(int x, int y, int width) override;
403
CanHandleRect(const SkIRect & bounds)404 static bool CanHandleRect(const SkIRect& bounds) {
405 #ifdef FORCE_RLE
406 return false;
407 #endif
408 int width = bounds.width();
409 int64_t rb = SkAlign4(width);
410 // use 64bits to detect overflow
411 int64_t storage = rb * bounds.height();
412
413 return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
414 (storage <= MaskSuperBlitter::kMAX_STORAGE);
415 }
416
417 private:
418 enum {
419 #ifdef FORCE_SUPERMASK
420 kMAX_WIDTH = 2048,
421 kMAX_STORAGE = 1024 * 1024 * 2
422 #else
423 kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
424 kMAX_STORAGE = 1024
425 #endif
426 };
427
428 SkMask fMask;
429 SkIRect fClipRect;
430 // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
431 // perform a test to see if stopAlpha != 0
432 uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
433 };
434
MaskSuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkIRect & clipBounds,bool isInverse)435 MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
436 const SkIRect& clipBounds, bool isInverse)
437 : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
438 {
439 SkASSERT(CanHandleRect(ir));
440 SkASSERT(!isInverse);
441
442 fMask.fImage = (uint8_t*)fStorage;
443 fMask.fBounds = ir;
444 fMask.fRowBytes = ir.width();
445 fMask.fFormat = SkMask::kA8_Format;
446
447 fClipRect = ir;
448 if (!fClipRect.intersect(clipBounds)) {
449 SkASSERT(0);
450 fClipRect.setEmpty();
451 }
452
453 // For valgrind, write 1 extra byte at the end so we don't read
454 // uninitialized memory. See comment in add_aa_span and fStorage[].
455 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
456 }
457
add_aa_span(uint8_t * alpha,U8CPU startAlpha)458 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
459 /* I should be able to just add alpha[x] + startAlpha.
460 However, if the trailing edge of the previous span and the leading
461 edge of the current span round to the same super-sampled x value,
462 I might overflow to 256 with this add, hence the funny subtract.
463 */
464 unsigned tmp = *alpha + startAlpha;
465 SkASSERT(tmp <= 256);
466 *alpha = SkToU8(tmp - (tmp >> 8));
467 }
468
quadplicate_byte(U8CPU value)469 static inline uint32_t quadplicate_byte(U8CPU value) {
470 uint32_t pair = (value << 8) | value;
471 return (pair << 16) | pair;
472 }
473
474 // Perform this tricky subtract, to avoid overflowing to 256. Our caller should
475 // only ever call us with at most enough to hit 256 (never larger), so it is
476 // enough to just subtract the high-bit. Actually clamping with a branch would
477 // be slower (e.g. if (tmp > 255) tmp = 255;)
478 //
saturated_add(uint8_t * ptr,U8CPU add)479 static inline void saturated_add(uint8_t* ptr, U8CPU add) {
480 unsigned tmp = *ptr + add;
481 SkASSERT(tmp <= 256);
482 *ptr = SkToU8(tmp - (tmp >> 8));
483 }
484
485 // minimum count before we want to setup an inner loop, adding 4-at-a-time
486 #define MIN_COUNT_FOR_QUAD_LOOP 16
487
add_aa_span(uint8_t * alpha,U8CPU startAlpha,int middleCount,U8CPU stopAlpha,U8CPU maxValue)488 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
489 U8CPU stopAlpha, U8CPU maxValue) {
490 SkASSERT(middleCount >= 0);
491
492 saturated_add(alpha, startAlpha);
493 alpha += 1;
494
495 if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
496 // loop until we're quad-byte aligned
497 while (SkTCast<intptr_t>(alpha) & 0x3) {
498 alpha[0] = SkToU8(alpha[0] + maxValue);
499 alpha += 1;
500 middleCount -= 1;
501 }
502
503 int bigCount = middleCount >> 2;
504 uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
505 uint32_t qval = quadplicate_byte(maxValue);
506 do {
507 *qptr++ += qval;
508 } while (--bigCount > 0);
509
510 middleCount &= 3;
511 alpha = reinterpret_cast<uint8_t*> (qptr);
512 // fall through to the following while-loop
513 }
514
515 while (--middleCount >= 0) {
516 alpha[0] = SkToU8(alpha[0] + maxValue);
517 alpha += 1;
518 }
519
520 // potentially this can be off the end of our "legal" alpha values, but that
521 // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
522 // every time (slow), we just do it, and ensure that we've allocated extra space
523 // (see the + 1 comment in fStorage[]
524 saturated_add(alpha, stopAlpha);
525 }
526
blitH(int x,int y,int width)527 void MaskSuperBlitter::blitH(int x, int y, int width) {
528 int iy = (y >> SHIFT);
529
530 SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
531 iy -= fMask.fBounds.fTop; // make it relative to 0
532
533 // This should never happen, but it does. Until the true cause is
534 // discovered, let's skip this span instead of crashing.
535 // See http://crbug.com/17569.
536 if (iy < 0) {
537 return;
538 }
539
540 #ifdef SK_DEBUG
541 {
542 int ix = x >> SHIFT;
543 SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
544 }
545 #endif
546
547 x -= SkLeftShift(fMask.fBounds.fLeft, SHIFT);
548
549 // hack, until I figure out why my cubics (I think) go beyond the bounds
550 if (x < 0) {
551 width += x;
552 x = 0;
553 }
554
555 uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
556
557 int start = x;
558 int stop = x + width;
559
560 SkASSERT(start >= 0 && stop > start);
561 int fb = start & MASK;
562 int fe = stop & MASK;
563 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
564
565
566 if (n < 0) {
567 SkASSERT(row >= fMask.fImage);
568 SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
569 add_aa_span(row, coverage_to_partial_alpha(fe - fb));
570 } else {
571 fb = SCALE - fb;
572 SkASSERT(row >= fMask.fImage);
573 SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
574 add_aa_span(row, coverage_to_partial_alpha(fb),
575 n, coverage_to_partial_alpha(fe),
576 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
577 }
578
579 #ifdef SK_DEBUG
580 fCurrX = x + width;
581 #endif
582 }
583
584 ///////////////////////////////////////////////////////////////////////////////
585
ShouldUseDAA(const SkPath & path)586 static bool ShouldUseDAA(const SkPath& path) {
587 if (gSkForceDeltaAA) {
588 return true;
589 }
590 if (!gSkUseDeltaAA) {
591 return false;
592 }
593 const SkRect& bounds = path.getBounds();
594 return !path.isConvex() && path.countPoints() >= SkTMax(bounds.width(), bounds.height()) / 8;
595 }
596
ShouldUseAAA(const SkPath & path)597 static bool ShouldUseAAA(const SkPath& path) {
598 if (gSkForceAnalyticAA) {
599 return true;
600 }
601 if (!gSkUseAnalyticAA) {
602 return false;
603 }
604 if (path.isRect(nullptr)) {
605 return true;
606 }
607 const SkRect& bounds = path.getBounds();
608 // When the path have so many points compared to the size of its bounds/resolution,
609 // it indicates that the path is not quite smooth in the current resolution:
610 // the expected number of turning points in every pixel row/column is significantly greater than
611 // zero. Hence Aanlytic AA is not likely to produce visible quality improvements, and Analytic
612 // AA might be slower than supersampling.
613 return path.countPoints() < SkTMax(bounds.width(), bounds.height()) / 2 - 10;
614 }
615
SAAFillPath(const SkPath & path,SkBlitter * blitter,const SkIRect & ir,const SkIRect & clipBounds,bool forceRLE)616 void SkScan::SAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
617 const SkIRect& clipBounds, bool forceRLE) {
618 bool containedInClip = clipBounds.contains(ir);
619 bool isInverse = path.isInverseFillType();
620
621 // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
622 // if we're an inverse filltype
623 if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
624 MaskSuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
625 SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
626 sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
627 } else {
628 SuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
629 sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
630 }
631 }
632
overflows_short_shift(int value,int shift)633 static int overflows_short_shift(int value, int shift) {
634 const int s = 16 + shift;
635 return (SkLeftShift(value, s) >> s) - value;
636 }
637
638 /**
639 Would any of the coordinates of this rectangle not fit in a short,
640 when left-shifted by shift?
641 */
rect_overflows_short_shift(SkIRect rect,int shift)642 static int rect_overflows_short_shift(SkIRect rect, int shift) {
643 SkASSERT(!overflows_short_shift(8191, shift));
644 SkASSERT(overflows_short_shift(8192, shift));
645 SkASSERT(!overflows_short_shift(32767, 0));
646 SkASSERT(overflows_short_shift(32768, 0));
647
648 // Since we expect these to succeed, we bit-or together
649 // for a tiny extra bit of speed.
650 return overflows_short_shift(rect.fLeft, shift) |
651 overflows_short_shift(rect.fRight, shift) |
652 overflows_short_shift(rect.fTop, shift) |
653 overflows_short_shift(rect.fBottom, shift);
654 }
655
safeRoundOut(const SkRect & src)656 static SkIRect safeRoundOut(const SkRect& src) {
657 // roundOut will pin huge floats to max/min int
658 SkIRect dst = src.roundOut();
659
660 // intersect with a smaller huge rect, so the rect will not be considered empty for being
661 // too large. e.g. { -SK_MaxS32 ... SK_MaxS32 } is considered empty because its width
662 // exceeds signed 32bit.
663 const int32_t limit = SK_MaxS32 >> SK_SUPERSAMPLE_SHIFT;
664 (void)dst.intersect({ -limit, -limit, limit, limit});
665
666 return dst;
667 }
668
AntiFillPath(const SkPath & path,const SkRegion & origClip,SkBlitter * blitter,bool forceRLE,SkDAARecord * daaRecord)669 void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
670 SkBlitter* blitter, bool forceRLE, SkDAARecord* daaRecord) {
671 if (origClip.isEmpty()) {
672 return;
673 }
674
675 const bool isInverse = path.isInverseFillType();
676 SkIRect ir = safeRoundOut(path.getBounds());
677 if (ir.isEmpty()) {
678 if (isInverse) {
679 blitter->blitRegion(origClip);
680 }
681 return;
682 }
683
684 // If the intersection of the path bounds and the clip bounds
685 // will overflow 32767 when << by SHIFT, we can't supersample,
686 // so draw without antialiasing.
687 SkIRect clippedIR;
688 if (isInverse) {
689 // If the path is an inverse fill, it's going to fill the entire
690 // clip, and we care whether the entire clip exceeds our limits.
691 clippedIR = origClip.getBounds();
692 } else {
693 if (!clippedIR.intersect(ir, origClip.getBounds())) {
694 return;
695 }
696 }
697 if (rect_overflows_short_shift(clippedIR, SHIFT)) {
698 SkScan::FillPath(path, origClip, blitter);
699 return;
700 }
701
702 // Our antialiasing can't handle a clip larger than 32767, so we restrict
703 // the clip to that limit here. (the runs[] uses int16_t for its index).
704 //
705 // A more general solution (one that could also eliminate the need to
706 // disable aa based on ir bounds (see overflows_short_shift) would be
707 // to tile the clip/target...
708 SkRegion tmpClipStorage;
709 const SkRegion* clipRgn = &origClip;
710 {
711 static const int32_t kMaxClipCoord = 32767;
712 const SkIRect& bounds = origClip.getBounds();
713 if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
714 SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
715 tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
716 clipRgn = &tmpClipStorage;
717 }
718 }
719 // for here down, use clipRgn, not origClip
720
721 SkScanClipper clipper(blitter, clipRgn, ir);
722
723 if (clipper.getBlitter() == nullptr) { // clipped out
724 if (isInverse) {
725 blitter->blitRegion(*clipRgn);
726 }
727 return;
728 }
729
730 SkASSERT(clipper.getClipRect() == nullptr ||
731 *clipper.getClipRect() == clipRgn->getBounds());
732
733 // now use the (possibly wrapped) blitter
734 blitter = clipper.getBlitter();
735
736 if (isInverse) {
737 sk_blit_above(blitter, ir, *clipRgn);
738 }
739
740 if (daaRecord || ShouldUseDAA(path)) {
741 SkScan::DAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE, daaRecord);
742 } else if (ShouldUseAAA(path)) {
743 // Do not use AAA if path is too complicated:
744 // there won't be any speedup or significant visual improvement.
745 SkScan::AAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
746 } else {
747 SkScan::SAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
748 }
749
750 if (isInverse) {
751 sk_blit_below(blitter, ir, *clipRgn);
752 }
753 }
754
755 ///////////////////////////////////////////////////////////////////////////////
756
757 #include "SkRasterClip.h"
758
FillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter)759 void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
760 if (clip.isEmpty() || !path.isFinite()) {
761 return;
762 }
763
764 if (clip.isBW()) {
765 FillPath(path, clip.bwRgn(), blitter);
766 } else {
767 SkRegion tmp;
768 SkAAClipBlitter aaBlitter;
769
770 tmp.setRect(clip.getBounds());
771 aaBlitter.init(blitter, &clip.aaRgn());
772 SkScan::FillPath(path, tmp, &aaBlitter);
773 }
774 }
775
AntiFillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter,SkDAARecord * daaRecord)776 void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
777 SkBlitter* blitter, SkDAARecord* daaRecord) {
778 if (clip.isEmpty() || !path.isFinite()) {
779 return;
780 }
781
782 if (clip.isBW()) {
783 AntiFillPath(path, clip.bwRgn(), blitter, false, daaRecord);
784 } else {
785 SkRegion tmp;
786 SkAAClipBlitter aaBlitter;
787
788 tmp.setRect(clip.getBounds());
789 aaBlitter.init(blitter, &clip.aaRgn());
790 AntiFillPath(path, tmp, &aaBlitter, true, daaRecord); // SkAAClipBlitter can blitMask, why forceRLE?
791 }
792 }
793