1
2 /*
3 * Copyright 2006 The Android Open Source Project
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10 #include "SkScanPriv.h"
11 #include "SkPath.h"
12 #include "SkMatrix.h"
13 #include "SkBlitter.h"
14 #include "SkRegion.h"
15 #include "SkAntiRun.h"
16
17 #define SHIFT 2
18 #define SCALE (1 << SHIFT)
19 #define MASK (SCALE - 1)
20
21 /** @file
22 We have two techniques for capturing the output of the supersampler:
23 - SUPERMASK, which records a large mask-bitmap
24 this is often faster for small, complex objects
25 - RLE, which records a rle-encoded scanline
26 this is often faster for large objects with big spans
27
28 These blitters use two coordinate systems:
29 - destination coordinates, scale equal to the output - often
30 abbreviated with 'i' or 'I' in variable names
31 - supersampled coordinates, scale equal to the output * SCALE
32 */
33
34 //#define FORCE_SUPERMASK
35 //#define FORCE_RLE
36
37 ///////////////////////////////////////////////////////////////////////////////
38
39 /// Base class for a single-pass supersampled blitter.
40 class BaseSuperBlitter : public SkBlitter {
41 public:
42 BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
43 const SkRegion& clip, bool isInverse);
44
45 /// Must be explicitly defined on subclasses.
blitAntiH(int x,int y,const SkAlpha antialias[],const int16_t runs[])46 virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
47 const int16_t runs[]) override {
48 SkDEBUGFAIL("How did I get here?");
49 }
50 /// May not be called on BaseSuperBlitter because it blits out of order.
blitV(int x,int y,int height,SkAlpha alpha)51 void blitV(int x, int y, int height, SkAlpha alpha) override {
52 SkDEBUGFAIL("How did I get here?");
53 }
54
55 protected:
56 SkBlitter* fRealBlitter;
57 /// Current y coordinate, in destination coordinates.
58 int fCurrIY;
59 /// Widest row of region to be blitted, in destination coordinates.
60 int fWidth;
61 /// Leftmost x coordinate in any row, in destination coordinates.
62 int fLeft;
63 /// Leftmost x coordinate in any row, in supersampled coordinates.
64 int fSuperLeft;
65
66 SkDEBUGCODE(int fCurrX;)
67 /// Current y coordinate in supersampled coordinates.
68 int fCurrY;
69 /// Initial y coordinate (top of bounds).
70 int fTop;
71
72 SkIRect fSectBounds;
73 };
74
BaseSuperBlitter(SkBlitter * realBlit,const SkIRect & ir,const SkRegion & clip,bool isInverse)75 BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir, const SkRegion& clip,
76 bool isInverse) {
77 fRealBlitter = realBlit;
78
79 SkIRect sectBounds;
80 if (isInverse) {
81 // We use the clip bounds instead of the ir, since we may be asked to
82 //draw outside of the rect when we're a inverse filltype
83 sectBounds = clip.getBounds();
84 } else {
85 if (!sectBounds.intersect(ir, clip.getBounds())) {
86 sectBounds.setEmpty();
87 }
88 }
89
90 const int left = sectBounds.left();
91 const int right = sectBounds.right();
92
93 fLeft = left;
94 fSuperLeft = SkLeftShift(left, SHIFT);
95 fWidth = right - left;
96 fTop = sectBounds.top();
97 fCurrIY = fTop - 1;
98 fCurrY = SkLeftShift(fTop, SHIFT) - 1;
99
100 SkDEBUGCODE(fCurrX = -1;)
101 }
102
103 /// Run-length-encoded supersampling antialiased blitter.
104 class SuperBlitter : public BaseSuperBlitter {
105 public:
106 SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip, bool isInverse);
107
~SuperBlitter()108 virtual ~SuperBlitter() {
109 this->flush();
110 }
111
112 /// Once fRuns contains a complete supersampled row, flush() blits
113 /// it out through the wrapped blitter.
114 void flush();
115
116 /// Blits a row of pixels, with location and width specified
117 /// in supersampled coordinates.
118 void blitH(int x, int y, int width) override;
119 /// Blits a rectangle of pixels, with location and size specified
120 /// in supersampled coordinates.
121 void blitRect(int x, int y, int width, int height) override;
122
123 private:
124 // The next three variables are used to track a circular buffer that
125 // contains the values used in SkAlphaRuns. These variables should only
126 // ever be updated in advanceRuns(), and fRuns should always point to
127 // a valid SkAlphaRuns...
128 int fRunsToBuffer;
129 void* fRunsBuffer;
130 int fCurrentRun;
131 SkAlphaRuns fRuns;
132
133 // extra one to store the zero at the end
getRunsSz() const134 int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
135
136 // This function updates the fRuns variable to point to the next buffer space
137 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
138 // and resets fRuns to point to an empty scanline.
advanceRuns()139 void advanceRuns() {
140 const size_t kRunsSz = this->getRunsSz();
141 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
142 fRuns.fRuns = reinterpret_cast<int16_t*>(
143 reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
144 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
145 fRuns.reset(fWidth);
146 }
147
148 int fOffsetX;
149 };
150
SuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkRegion & clip,bool isInverse)151 SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
152 bool isInverse)
153 : BaseSuperBlitter(realBlitter, ir, clip, isInverse)
154 {
155 fRunsToBuffer = realBlitter->requestRowsPreserved();
156 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
157 fCurrentRun = -1;
158
159 this->advanceRuns();
160
161 fOffsetX = 0;
162 }
163
flush()164 void SuperBlitter::flush() {
165 if (fCurrIY >= fTop) {
166
167 SkASSERT(fCurrentRun < fRunsToBuffer);
168 if (!fRuns.empty()) {
169 // SkDEBUGCODE(fRuns.dump();)
170 fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
171 this->advanceRuns();
172 fOffsetX = 0;
173 }
174
175 fCurrIY = fTop - 1;
176 SkDEBUGCODE(fCurrX = -1;)
177 }
178 }
179
180 /** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
181 *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
182 to produce a final value in [0, 255] and handles clamping 256->255
183 itself, with the same (alpha - (alpha >> 8)) correction as
184 coverage_to_exact_alpha().
185 */
coverage_to_partial_alpha(int aa)186 static inline int coverage_to_partial_alpha(int aa) {
187 aa <<= 8 - 2*SHIFT;
188 return aa;
189 }
190
191 /** coverage_to_exact_alpha() is being used by our blitter, which wants
192 a final value in [0, 255].
193 */
coverage_to_exact_alpha(int aa)194 static inline int coverage_to_exact_alpha(int aa) {
195 int alpha = (256 >> SHIFT) * aa;
196 // clamp 256->255
197 return alpha - (alpha >> 8);
198 }
199
blitH(int x,int y,int width)200 void SuperBlitter::blitH(int x, int y, int width) {
201 SkASSERT(width > 0);
202
203 int iy = y >> SHIFT;
204 SkASSERT(iy >= fCurrIY);
205
206 x -= fSuperLeft;
207 // hack, until I figure out why my cubics (I think) go beyond the bounds
208 if (x < 0) {
209 width += x;
210 x = 0;
211 }
212
213 #ifdef SK_DEBUG
214 SkASSERT(y != fCurrY || x >= fCurrX);
215 #endif
216 SkASSERT(y >= fCurrY);
217 if (fCurrY != y) {
218 fOffsetX = 0;
219 fCurrY = y;
220 }
221
222 if (iy != fCurrIY) { // new scanline
223 this->flush();
224 fCurrIY = iy;
225 }
226
227 int start = x;
228 int stop = x + width;
229
230 SkASSERT(start >= 0 && stop > start);
231 // integer-pixel-aligned ends of blit, rounded out
232 int fb = start & MASK;
233 int fe = stop & MASK;
234 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
235
236 if (n < 0) {
237 fb = fe - fb;
238 n = 0;
239 fe = 0;
240 } else {
241 if (fb == 0) {
242 n += 1;
243 } else {
244 fb = SCALE - fb;
245 }
246 }
247
248 fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
249 n, coverage_to_partial_alpha(fe),
250 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
251 fOffsetX);
252
253 #ifdef SK_DEBUG
254 fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
255 fCurrX = x + width;
256 #endif
257 }
258
259 #if 0 // UNUSED
260 static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
261 int n, U8CPU riteA) {
262 SkASSERT(leftA <= 0xFF);
263 SkASSERT(riteA <= 0xFF);
264
265 int16_t* run = runs.fRuns;
266 uint8_t* aa = runs.fAlpha;
267
268 if (ileft > 0) {
269 run[0] = ileft;
270 aa[0] = 0;
271 run += ileft;
272 aa += ileft;
273 }
274
275 SkASSERT(leftA < 0xFF);
276 if (leftA > 0) {
277 *run++ = 1;
278 *aa++ = leftA;
279 }
280
281 if (n > 0) {
282 run[0] = n;
283 aa[0] = 0xFF;
284 run += n;
285 aa += n;
286 }
287
288 SkASSERT(riteA < 0xFF);
289 if (riteA > 0) {
290 *run++ = 1;
291 *aa++ = riteA;
292 }
293 run[0] = 0;
294 }
295 #endif
296
blitRect(int x,int y,int width,int height)297 void SuperBlitter::blitRect(int x, int y, int width, int height) {
298 SkASSERT(width > 0);
299 SkASSERT(height > 0);
300
301 // blit leading rows
302 while ((y & MASK)) {
303 this->blitH(x, y++, width);
304 if (--height <= 0) {
305 return;
306 }
307 }
308 SkASSERT(height > 0);
309
310 // Since this is a rect, instead of blitting supersampled rows one at a
311 // time and then resolving to the destination canvas, we can blit
312 // directly to the destintion canvas one row per SCALE supersampled rows.
313 int start_y = y >> SHIFT;
314 int stop_y = (y + height) >> SHIFT;
315 int count = stop_y - start_y;
316 if (count > 0) {
317 y += count << SHIFT;
318 height -= count << SHIFT;
319
320 // save original X for our tail blitH() loop at the bottom
321 int origX = x;
322
323 x -= fSuperLeft;
324 // hack, until I figure out why my cubics (I think) go beyond the bounds
325 if (x < 0) {
326 width += x;
327 x = 0;
328 }
329
330 // There is always a left column, a middle, and a right column.
331 // ileft is the destination x of the first pixel of the entire rect.
332 // xleft is (SCALE - # of covered supersampled pixels) in that
333 // destination pixel.
334 int ileft = x >> SHIFT;
335 int xleft = x & MASK;
336 // irite is the destination x of the last pixel of the OPAQUE section.
337 // xrite is the number of supersampled pixels extending beyond irite;
338 // xrite/SCALE should give us alpha.
339 int irite = (x + width) >> SHIFT;
340 int xrite = (x + width) & MASK;
341 if (!xrite) {
342 xrite = SCALE;
343 irite--;
344 }
345
346 // Need to call flush() to clean up pending draws before we
347 // even consider blitV(), since otherwise it can look nonmonotonic.
348 SkASSERT(start_y > fCurrIY);
349 this->flush();
350
351 int n = irite - ileft - 1;
352 if (n < 0) {
353 // If n < 0, we'll only have a single partially-transparent column
354 // of pixels to render.
355 xleft = xrite - xleft;
356 SkASSERT(xleft <= SCALE);
357 SkASSERT(xleft > 0);
358 xrite = 0;
359 fRealBlitter->blitV(ileft + fLeft, start_y, count,
360 coverage_to_exact_alpha(xleft));
361 } else {
362 // With n = 0, we have two possibly-transparent columns of pixels
363 // to render; with n > 0, we have opaque columns between them.
364
365 xleft = SCALE - xleft;
366
367 // Using coverage_to_exact_alpha is not consistent with blitH()
368 const int coverageL = coverage_to_exact_alpha(xleft);
369 const int coverageR = coverage_to_exact_alpha(xrite);
370
371 SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
372 SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
373
374 fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
375 coverageL, coverageR);
376 }
377
378 // preamble for our next call to blitH()
379 fCurrIY = stop_y - 1;
380 fOffsetX = 0;
381 fCurrY = y - 1;
382 fRuns.reset(fWidth);
383 x = origX;
384 }
385
386 // catch any remaining few rows
387 SkASSERT(height <= MASK);
388 while (--height >= 0) {
389 this->blitH(x, y++, width);
390 }
391 }
392
393 ///////////////////////////////////////////////////////////////////////////////
394
395 /// Masked supersampling antialiased blitter.
396 class MaskSuperBlitter : public BaseSuperBlitter {
397 public:
398 MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion&, bool isInverse);
~MaskSuperBlitter()399 virtual ~MaskSuperBlitter() {
400 fRealBlitter->blitMask(fMask, fClipRect);
401 }
402
403 void blitH(int x, int y, int width) override;
404
CanHandleRect(const SkIRect & bounds)405 static bool CanHandleRect(const SkIRect& bounds) {
406 #ifdef FORCE_RLE
407 return false;
408 #endif
409 int width = bounds.width();
410 int64_t rb = SkAlign4(width);
411 // use 64bits to detect overflow
412 int64_t storage = rb * bounds.height();
413
414 return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
415 (storage <= MaskSuperBlitter::kMAX_STORAGE);
416 }
417
418 private:
419 enum {
420 #ifdef FORCE_SUPERMASK
421 kMAX_WIDTH = 2048,
422 kMAX_STORAGE = 1024 * 1024 * 2
423 #else
424 kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
425 kMAX_STORAGE = 1024
426 #endif
427 };
428
429 SkMask fMask;
430 SkIRect fClipRect;
431 // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
432 // perform a test to see if stopAlpha != 0
433 uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
434 };
435
MaskSuperBlitter(SkBlitter * realBlitter,const SkIRect & ir,const SkRegion & clip,bool isInverse)436 MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
437 bool isInverse)
438 : BaseSuperBlitter(realBlitter, ir, clip, isInverse)
439 {
440 SkASSERT(CanHandleRect(ir));
441 SkASSERT(!isInverse);
442
443 fMask.fImage = (uint8_t*)fStorage;
444 fMask.fBounds = ir;
445 fMask.fRowBytes = ir.width();
446 fMask.fFormat = SkMask::kA8_Format;
447
448 fClipRect = ir;
449 if (!fClipRect.intersect(clip.getBounds())) {
450 SkASSERT(0);
451 fClipRect.setEmpty();
452 }
453
454 // For valgrind, write 1 extra byte at the end so we don't read
455 // uninitialized memory. See comment in add_aa_span and fStorage[].
456 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
457 }
458
add_aa_span(uint8_t * alpha,U8CPU startAlpha)459 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
460 /* I should be able to just add alpha[x] + startAlpha.
461 However, if the trailing edge of the previous span and the leading
462 edge of the current span round to the same super-sampled x value,
463 I might overflow to 256 with this add, hence the funny subtract.
464 */
465 unsigned tmp = *alpha + startAlpha;
466 SkASSERT(tmp <= 256);
467 *alpha = SkToU8(tmp - (tmp >> 8));
468 }
469
quadplicate_byte(U8CPU value)470 static inline uint32_t quadplicate_byte(U8CPU value) {
471 uint32_t pair = (value << 8) | value;
472 return (pair << 16) | pair;
473 }
474
475 // Perform this tricky subtract, to avoid overflowing to 256. Our caller should
476 // only ever call us with at most enough to hit 256 (never larger), so it is
477 // enough to just subtract the high-bit. Actually clamping with a branch would
478 // be slower (e.g. if (tmp > 255) tmp = 255;)
479 //
saturated_add(uint8_t * ptr,U8CPU add)480 static inline void saturated_add(uint8_t* ptr, U8CPU add) {
481 unsigned tmp = *ptr + add;
482 SkASSERT(tmp <= 256);
483 *ptr = SkToU8(tmp - (tmp >> 8));
484 }
485
486 // minimum count before we want to setup an inner loop, adding 4-at-a-time
487 #define MIN_COUNT_FOR_QUAD_LOOP 16
488
add_aa_span(uint8_t * alpha,U8CPU startAlpha,int middleCount,U8CPU stopAlpha,U8CPU maxValue)489 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
490 U8CPU stopAlpha, U8CPU maxValue) {
491 SkASSERT(middleCount >= 0);
492
493 saturated_add(alpha, startAlpha);
494 alpha += 1;
495
496 if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
497 // loop until we're quad-byte aligned
498 while (SkTCast<intptr_t>(alpha) & 0x3) {
499 alpha[0] = SkToU8(alpha[0] + maxValue);
500 alpha += 1;
501 middleCount -= 1;
502 }
503
504 int bigCount = middleCount >> 2;
505 uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
506 uint32_t qval = quadplicate_byte(maxValue);
507 do {
508 *qptr++ += qval;
509 } while (--bigCount > 0);
510
511 middleCount &= 3;
512 alpha = reinterpret_cast<uint8_t*> (qptr);
513 // fall through to the following while-loop
514 }
515
516 while (--middleCount >= 0) {
517 alpha[0] = SkToU8(alpha[0] + maxValue);
518 alpha += 1;
519 }
520
521 // potentially this can be off the end of our "legal" alpha values, but that
522 // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
523 // every time (slow), we just do it, and ensure that we've allocated extra space
524 // (see the + 1 comment in fStorage[]
525 saturated_add(alpha, stopAlpha);
526 }
527
blitH(int x,int y,int width)528 void MaskSuperBlitter::blitH(int x, int y, int width) {
529 int iy = (y >> SHIFT);
530
531 SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
532 iy -= fMask.fBounds.fTop; // make it relative to 0
533
534 // This should never happen, but it does. Until the true cause is
535 // discovered, let's skip this span instead of crashing.
536 // See http://crbug.com/17569.
537 if (iy < 0) {
538 return;
539 }
540
541 #ifdef SK_DEBUG
542 {
543 int ix = x >> SHIFT;
544 SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
545 }
546 #endif
547
548 x -= SkLeftShift(fMask.fBounds.fLeft, SHIFT);
549
550 // hack, until I figure out why my cubics (I think) go beyond the bounds
551 if (x < 0) {
552 width += x;
553 x = 0;
554 }
555
556 uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
557
558 int start = x;
559 int stop = x + width;
560
561 SkASSERT(start >= 0 && stop > start);
562 int fb = start & MASK;
563 int fe = stop & MASK;
564 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
565
566
567 if (n < 0) {
568 SkASSERT(row >= fMask.fImage);
569 SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
570 add_aa_span(row, coverage_to_partial_alpha(fe - fb));
571 } else {
572 fb = SCALE - fb;
573 SkASSERT(row >= fMask.fImage);
574 SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
575 add_aa_span(row, coverage_to_partial_alpha(fb),
576 n, coverage_to_partial_alpha(fe),
577 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
578 }
579
580 #ifdef SK_DEBUG
581 fCurrX = x + width;
582 #endif
583 }
584
585 ///////////////////////////////////////////////////////////////////////////////
586
fitsInsideLimit(const SkRect & r,SkScalar max)587 static bool fitsInsideLimit(const SkRect& r, SkScalar max) {
588 const SkScalar min = -max;
589 return r.fLeft > min && r.fTop > min &&
590 r.fRight < max && r.fBottom < max;
591 }
592
overflows_short_shift(int value,int shift)593 static int overflows_short_shift(int value, int shift) {
594 const int s = 16 + shift;
595 return (SkLeftShift(value, s) >> s) - value;
596 }
597
598 /**
599 Would any of the coordinates of this rectangle not fit in a short,
600 when left-shifted by shift?
601 */
rect_overflows_short_shift(SkIRect rect,int shift)602 static int rect_overflows_short_shift(SkIRect rect, int shift) {
603 SkASSERT(!overflows_short_shift(8191, SHIFT));
604 SkASSERT(overflows_short_shift(8192, SHIFT));
605 SkASSERT(!overflows_short_shift(32767, 0));
606 SkASSERT(overflows_short_shift(32768, 0));
607
608 // Since we expect these to succeed, we bit-or together
609 // for a tiny extra bit of speed.
610 return overflows_short_shift(rect.fLeft, SHIFT) |
611 overflows_short_shift(rect.fRight, SHIFT) |
612 overflows_short_shift(rect.fTop, SHIFT) |
613 overflows_short_shift(rect.fBottom, SHIFT);
614 }
615
safeRoundOut(const SkRect & src,SkIRect * dst,int32_t maxInt)616 static bool safeRoundOut(const SkRect& src, SkIRect* dst, int32_t maxInt) {
617 const SkScalar maxScalar = SkIntToScalar(maxInt);
618
619 if (fitsInsideLimit(src, maxScalar)) {
620 src.roundOut(dst);
621 return true;
622 }
623 return false;
624 }
625
AntiFillPath(const SkPath & path,const SkRegion & origClip,SkBlitter * blitter,bool forceRLE)626 void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
627 SkBlitter* blitter, bool forceRLE) {
628 if (origClip.isEmpty()) {
629 return;
630 }
631
632 const bool isInverse = path.isInverseFillType();
633 SkIRect ir;
634
635 if (!safeRoundOut(path.getBounds(), &ir, SK_MaxS32 >> SHIFT)) {
636 #if 0
637 const SkRect& r = path.getBounds();
638 SkDebugf("--- bounds can't fit in SkIRect\n", r.fLeft, r.fTop, r.fRight, r.fBottom);
639 #endif
640 return;
641 }
642 if (ir.isEmpty()) {
643 if (isInverse) {
644 blitter->blitRegion(origClip);
645 }
646 return;
647 }
648
649 // If the intersection of the path bounds and the clip bounds
650 // will overflow 32767 when << by SHIFT, we can't supersample,
651 // so draw without antialiasing.
652 SkIRect clippedIR;
653 if (isInverse) {
654 // If the path is an inverse fill, it's going to fill the entire
655 // clip, and we care whether the entire clip exceeds our limits.
656 clippedIR = origClip.getBounds();
657 } else {
658 if (!clippedIR.intersect(ir, origClip.getBounds())) {
659 return;
660 }
661 }
662 if (rect_overflows_short_shift(clippedIR, SHIFT)) {
663 SkScan::FillPath(path, origClip, blitter);
664 return;
665 }
666
667 // Our antialiasing can't handle a clip larger than 32767, so we restrict
668 // the clip to that limit here. (the runs[] uses int16_t for its index).
669 //
670 // A more general solution (one that could also eliminate the need to
671 // disable aa based on ir bounds (see overflows_short_shift) would be
672 // to tile the clip/target...
673 SkRegion tmpClipStorage;
674 const SkRegion* clipRgn = &origClip;
675 {
676 static const int32_t kMaxClipCoord = 32767;
677 const SkIRect& bounds = origClip.getBounds();
678 if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
679 SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
680 tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
681 clipRgn = &tmpClipStorage;
682 }
683 }
684 // for here down, use clipRgn, not origClip
685
686 SkScanClipper clipper(blitter, clipRgn, ir);
687 const SkIRect* clipRect = clipper.getClipRect();
688
689 if (clipper.getBlitter() == nullptr) { // clipped out
690 if (isInverse) {
691 blitter->blitRegion(*clipRgn);
692 }
693 return;
694 }
695
696 // now use the (possibly wrapped) blitter
697 blitter = clipper.getBlitter();
698
699 if (isInverse) {
700 sk_blit_above(blitter, ir, *clipRgn);
701 }
702
703 SkIRect superRect, *superClipRect = nullptr;
704
705 if (clipRect) {
706 superRect.set(SkLeftShift(clipRect->fLeft, SHIFT),
707 SkLeftShift(clipRect->fTop, SHIFT),
708 SkLeftShift(clipRect->fRight, SHIFT),
709 SkLeftShift(clipRect->fBottom, SHIFT));
710 superClipRect = &superRect;
711 }
712
713 SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
714
715 // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
716 // if we're an inverse filltype
717 if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
718 MaskSuperBlitter superBlit(blitter, ir, *clipRgn, isInverse);
719 SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
720 sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
721 } else {
722 SuperBlitter superBlit(blitter, ir, *clipRgn, isInverse);
723 sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
724 }
725
726 if (isInverse) {
727 sk_blit_below(blitter, ir, *clipRgn);
728 }
729 }
730
731 ///////////////////////////////////////////////////////////////////////////////
732
733 #include "SkRasterClip.h"
734
FillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter)735 void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip,
736 SkBlitter* blitter) {
737 if (clip.isEmpty()) {
738 return;
739 }
740
741 if (clip.isBW()) {
742 FillPath(path, clip.bwRgn(), blitter);
743 } else {
744 SkRegion tmp;
745 SkAAClipBlitter aaBlitter;
746
747 tmp.setRect(clip.getBounds());
748 aaBlitter.init(blitter, &clip.aaRgn());
749 SkScan::FillPath(path, tmp, &aaBlitter);
750 }
751 }
752
AntiFillPath(const SkPath & path,const SkRasterClip & clip,SkBlitter * blitter)753 void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
754 SkBlitter* blitter) {
755 if (clip.isEmpty()) {
756 return;
757 }
758
759 if (clip.isBW()) {
760 AntiFillPath(path, clip.bwRgn(), blitter);
761 } else {
762 SkRegion tmp;
763 SkAAClipBlitter aaBlitter;
764
765 tmp.setRect(clip.getBounds());
766 aaBlitter.init(blitter, &clip.aaRgn());
767 SkScan::AntiFillPath(path, tmp, &aaBlitter, true);
768 }
769 }
770