1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef SkRasterPipeline_opts_DEFINED
9 #define SkRasterPipeline_opts_DEFINED
10
11 #include "SkColorPriv.h"
12 #include "SkColorLookUpTable.h"
13 #include "SkColorSpaceXform_A2B.h"
14 #include "SkColorSpaceXformPriv.h"
15 #include "SkHalf.h"
16 #include "SkImageShaderContext.h"
17 #include "SkMSAN.h"
18 #include "SkPM4f.h"
19 #include "SkPM4fPriv.h"
20 #include "SkRasterPipeline.h"
21 #include "SkShader.h"
22 #include "SkSRGB.h"
23
24 namespace {
25
26 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
27 static constexpr int N = 8;
28 #else
29 static constexpr int N = 4;
30 #endif
31
32 using SkNf = SkNx<N, float>;
33 using SkNi = SkNx<N, int32_t>;
34 using SkNu = SkNx<N, uint32_t>;
35 using SkNh = SkNx<N, uint16_t>;
36 using SkNb = SkNx<N, uint8_t>;
37
38 using Fn = void(SK_VECTORCALL *)(size_t x_tail, void** p, SkNf,SkNf,SkNf,SkNf,
39 SkNf,SkNf,SkNf,SkNf);
40 // x_tail encodes two values x and tail as x*N+tail, where 0 <= tail < N.
41 // x is the induction variable we're walking along, incrementing by N each step.
42 // tail == 0 means work with a full N pixels; otherwise use only the low tail pixels.
43 //
44 // p is our program, a sequence of Fn to call interlaced with any void* context pointers. E.g.
45 // &load_8888
46 // (src ptr)
47 // &from_srgb
48 // &move_src_dst
49 // &load_f16
50 // (dst ptr)
51 // &swap
52 // &srcover
53 // &store_f16
54 // (dst ptr)
55 // &just_return
56
57 } // namespace
58
59 #define SI static inline
60
61 // Basically, return *(*ptr)++, maybe faster than the compiler can do it.
load_and_increment(void *** ptr)62 SI void* load_and_increment(void*** ptr) {
63 // We do this often enough that it's worth hyper-optimizing.
64 // x86 can do this in one instruction if ptr is in rsi.
65 // (This is why p is the second argument to Fn: it's passed in rsi.)
66 #if defined(__GNUC__) && defined(__x86_64__)
67 void* rax;
68 __asm__("lodsq" : "=a"(rax), "+S"(*ptr));
69 return rax;
70 #else
71 return *(*ptr)++;
72 #endif
73 }
74
75 // Stages are logically a pipeline, and physically are contiguous in an array.
76 // To get to the next stage, we just increment our pointer to the next array element.
next(size_t x_tail,void ** p,SkNf r,SkNf g,SkNf b,SkNf a,SkNf dr,SkNf dg,SkNf db,SkNf da)77 SI void SK_VECTORCALL next(size_t x_tail, void** p, SkNf r, SkNf g, SkNf b, SkNf a,
78 SkNf dr, SkNf dg, SkNf db, SkNf da) {
79 auto next = (Fn)load_and_increment(&p);
80 next(x_tail,p, r,g,b,a, dr,dg,db,da);
81 }
82
83 // Stages defined below always call next.
84 // This is always the last stage, a backstop that actually returns to the caller when done.
just_return(size_t,void **,SkNf,SkNf,SkNf,SkNf,SkNf,SkNf,SkNf,SkNf)85 SI void SK_VECTORCALL just_return(size_t, void**, SkNf, SkNf, SkNf, SkNf,
86 SkNf, SkNf, SkNf, SkNf) {}
87
88 #define STAGE(name) \
89 static SK_ALWAYS_INLINE void name##_kernel(size_t x, size_t tail, \
90 SkNf& r, SkNf& g, SkNf& b, SkNf& a, \
91 SkNf& dr, SkNf& dg, SkNf& db, SkNf& da); \
92 SI void SK_VECTORCALL name(size_t x_tail, void** p, \
93 SkNf r, SkNf g, SkNf b, SkNf a, \
94 SkNf dr, SkNf dg, SkNf db, SkNf da) { \
95 name##_kernel(x_tail/N, x_tail%N, r,g,b,a, dr,dg,db,da); \
96 next(x_tail,p, r,g,b,a, dr,dg,db,da); \
97 } \
98 static SK_ALWAYS_INLINE void name##_kernel(size_t x, size_t tail, \
99 SkNf& r, SkNf& g, SkNf& b, SkNf& a, \
100 SkNf& dr, SkNf& dg, SkNf& db, SkNf& da)
101
102 #define STAGE_CTX(name, Ctx) \
103 static SK_ALWAYS_INLINE void name##_kernel(Ctx ctx, size_t x, size_t tail, \
104 SkNf& r, SkNf& g, SkNf& b, SkNf& a, \
105 SkNf& dr, SkNf& dg, SkNf& db, SkNf& da); \
106 SI void SK_VECTORCALL name(size_t x_tail, void** p, \
107 SkNf r, SkNf g, SkNf b, SkNf a, \
108 SkNf dr, SkNf dg, SkNf db, SkNf da) { \
109 auto ctx = (Ctx)load_and_increment(&p); \
110 name##_kernel(ctx, x_tail/N, x_tail%N, r,g,b,a, dr,dg,db,da); \
111 next(x_tail,p, r,g,b,a, dr,dg,db,da); \
112 } \
113 static SK_ALWAYS_INLINE void name##_kernel(Ctx ctx, size_t x, size_t tail, \
114 SkNf& r, SkNf& g, SkNf& b, SkNf& a, \
115 SkNf& dr, SkNf& dg, SkNf& db, SkNf& da)
116
117 // Many xfermodes apply the same logic to each channel.
118 #define RGBA_XFERMODE(name) \
119 static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
120 const SkNf& d, const SkNf& da); \
121 SI void SK_VECTORCALL name(size_t x_tail, void** p, \
122 SkNf r, SkNf g, SkNf b, SkNf a, \
123 SkNf dr, SkNf dg, SkNf db, SkNf da) { \
124 r = name##_kernel(r,a,dr,da); \
125 g = name##_kernel(g,a,dg,da); \
126 b = name##_kernel(b,a,db,da); \
127 a = name##_kernel(a,a,da,da); \
128 next(x_tail,p, r,g,b,a, dr,dg,db,da); \
129 } \
130 static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
131 const SkNf& d, const SkNf& da)
132
133 // Most of the rest apply the same logic to color channels and use srcover's alpha logic.
134 #define RGB_XFERMODE(name) \
135 static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
136 const SkNf& d, const SkNf& da); \
137 SI void SK_VECTORCALL name(size_t x_tail, void** p, \
138 SkNf r, SkNf g, SkNf b, SkNf a, \
139 SkNf dr, SkNf dg, SkNf db, SkNf da) { \
140 r = name##_kernel(r,a,dr,da); \
141 g = name##_kernel(g,a,dg,da); \
142 b = name##_kernel(b,a,db,da); \
143 a = a + (da * (1.0f-a)); \
144 next(x_tail,p, r,g,b,a, dr,dg,db,da); \
145 } \
146 static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
147 const SkNf& d, const SkNf& da)
148
149 template <typename T>
load(size_t tail,const T * src)150 SI SkNx<N,T> load(size_t tail, const T* src) {
151 if (tail) {
152 T buf[8];
153 memset(buf, 0, 8*sizeof(T));
154 switch (tail & (N-1)) {
155 case 7: buf[6] = src[6];
156 case 6: buf[5] = src[5];
157 case 5: buf[4] = src[4];
158 case 4: buf[3] = src[3];
159 case 3: buf[2] = src[2];
160 case 2: buf[1] = src[1];
161 }
162 buf[0] = src[0];
163 return SkNx<N,T>::Load(buf);
164 }
165 return SkNx<N,T>::Load(src);
166 }
167 template <typename T>
gather(size_t tail,const T * src,const SkNi & offset)168 SI SkNx<N,T> gather(size_t tail, const T* src, const SkNi& offset) {
169 if (tail) {
170 T buf[8] = {0};
171 switch (tail & (N-1)) {
172 case 7: buf[6] = src[offset[6]];
173 case 6: buf[5] = src[offset[5]];
174 case 5: buf[4] = src[offset[4]];
175 case 4: buf[3] = src[offset[3]];
176 case 3: buf[2] = src[offset[2]];
177 case 2: buf[1] = src[offset[1]];
178 }
179 buf[0] = src[offset[0]];
180 return SkNx<N,T>::Load(buf);
181 }
182 T buf[8];
183 for (size_t i = 0; i < N; i++) {
184 buf[i] = src[offset[i]];
185 }
186 return SkNx<N,T>::Load(buf);
187 }
188 template <typename T>
store(size_t tail,const SkNx<N,T> & v,T * dst)189 SI void store(size_t tail, const SkNx<N,T>& v, T* dst) {
190 if (tail) {
191 switch (tail & (N-1)) {
192 case 7: dst[6] = v[6];
193 case 6: dst[5] = v[5];
194 case 5: dst[4] = v[4];
195 case 4: dst[3] = v[3];
196 case 3: dst[2] = v[2];
197 case 2: dst[1] = v[1];
198 }
199 dst[0] = v[0];
200 return;
201 }
202 v.store(dst);
203 }
204
205 #if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
mask(size_t tail)206 SI __m256i mask(size_t tail) {
207 static const int masks[][8] = {
208 {~0,~0,~0,~0, ~0,~0,~0,~0 }, // remember, tail == 0 ~~> load all N
209 {~0, 0, 0, 0, 0, 0, 0, 0 },
210 {~0,~0, 0, 0, 0, 0, 0, 0 },
211 {~0,~0,~0, 0, 0, 0, 0, 0 },
212 {~0,~0,~0,~0, 0, 0, 0, 0 },
213 {~0,~0,~0,~0, ~0, 0, 0, 0 },
214 {~0,~0,~0,~0, ~0,~0, 0, 0 },
215 {~0,~0,~0,~0, ~0,~0,~0, 0 },
216 };
217 return SkNi::Load(masks + tail).fVec;
218 }
219
load(size_t tail,const int32_t * src)220 SI SkNi load(size_t tail, const int32_t* src) {
221 return tail ? _mm256_maskload_epi32((const int*)src, mask(tail))
222 : SkNi::Load(src);
223 }
load(size_t tail,const uint32_t * src)224 SI SkNu load(size_t tail, const uint32_t* src) {
225 return tail ? _mm256_maskload_epi32((const int*)src, mask(tail))
226 : SkNu::Load(src);
227 }
load(size_t tail,const float * src)228 SI SkNf load(size_t tail, const float* src) {
229 return tail ? _mm256_maskload_ps((const float*)src, mask(tail))
230 : SkNf::Load(src);
231 }
gather(size_t tail,const int32_t * src,const SkNi & offset)232 SI SkNi gather(size_t tail, const int32_t* src, const SkNi& offset) {
233 auto m = mask(tail);
234 return _mm256_mask_i32gather_epi32(SkNi(0).fVec, (const int*)src, offset.fVec, m, 4);
235 }
gather(size_t tail,const uint32_t * src,const SkNi & offset)236 SI SkNu gather(size_t tail, const uint32_t* src, const SkNi& offset) {
237 auto m = mask(tail);
238 return _mm256_mask_i32gather_epi32(SkNi(0).fVec, (const int*)src, offset.fVec, m, 4);
239 }
gather(size_t tail,const float * src,const SkNi & offset)240 SI SkNf gather(size_t tail, const float* src, const SkNi& offset) {
241 auto m = _mm256_castsi256_ps(mask(tail));
242 return _mm256_mask_i32gather_ps(SkNf(0).fVec, (const float*)src, offset.fVec, m, 4);
243 }
244
245 static const char* bug = "I don't think MSAN understands maskstore.";
246
store(size_t tail,const SkNi & v,int32_t * dst)247 SI void store(size_t tail, const SkNi& v, int32_t* dst) {
248 if (tail) {
249 _mm256_maskstore_epi32((int*)dst, mask(tail), v.fVec);
250 return sk_msan_mark_initialized(dst, dst+tail, bug);
251 }
252 v.store(dst);
253 }
store(size_t tail,const SkNu & v,uint32_t * dst)254 SI void store(size_t tail, const SkNu& v, uint32_t* dst) {
255 if (tail) {
256 _mm256_maskstore_epi32((int*)dst, mask(tail), v.fVec);
257 return sk_msan_mark_initialized(dst, dst+tail, bug);
258 }
259 v.store(dst);
260 }
store(size_t tail,const SkNf & v,float * dst)261 SI void store(size_t tail, const SkNf& v, float* dst) {
262 if (tail) {
263 _mm256_maskstore_ps((float*)dst, mask(tail), v.fVec);
264 return sk_msan_mark_initialized(dst, dst+tail, bug);
265 }
266 v.store(dst);
267 }
268 #endif
269
SkNf_fma(const SkNf & f,const SkNf & m,const SkNf & a)270 SI SkNf SkNf_fma(const SkNf& f, const SkNf& m, const SkNf& a) { return SkNx_fma(f,m,a); }
271
SkNf_round(const SkNf & x,const SkNf & scale)272 SI SkNi SkNf_round(const SkNf& x, const SkNf& scale) {
273 // Every time I try, _mm_cvtps_epi32 benches as slower than using FMA and _mm_cvttps_epi32. :/
274 return SkNx_cast<int>(SkNf_fma(x,scale, 0.5f));
275 }
276
SkNf_from_byte(const SkNi & x)277 SI SkNf SkNf_from_byte(const SkNi& x) {
278 // Same trick as in store_8888: 0x470000BB == 32768.0f + BB/256.0f for all bytes BB.
279 auto v = 0x47000000 | x;
280 // Read this as (pun_float(v) - 32768.0f) * (256/255.0f), redistributed to be an FMA.
281 return SkNf_fma(SkNf::Load(&v), 256/255.0f, -32768*256/255.0f);
282 }
SkNf_from_byte(const SkNu & x)283 SI SkNf SkNf_from_byte(const SkNu& x) { return SkNf_from_byte(SkNi::Load(&x)); }
SkNf_from_byte(const SkNb & x)284 SI SkNf SkNf_from_byte(const SkNb& x) { return SkNf_from_byte(SkNx_cast<int>(x)); }
285
from_8888(const SkNu & _8888,SkNf * r,SkNf * g,SkNf * b,SkNf * a)286 SI void from_8888(const SkNu& _8888, SkNf* r, SkNf* g, SkNf* b, SkNf* a) {
287 *r = SkNf_from_byte((_8888 ) & 0xff);
288 *g = SkNf_from_byte((_8888 >> 8) & 0xff);
289 *b = SkNf_from_byte((_8888 >> 16) & 0xff);
290 *a = SkNf_from_byte((_8888 >> 24) );
291 }
from_4444(const SkNh & _4444,SkNf * r,SkNf * g,SkNf * b,SkNf * a)292 SI void from_4444(const SkNh& _4444, SkNf* r, SkNf* g, SkNf* b, SkNf* a) {
293 auto _32_bit = SkNx_cast<int>(_4444);
294
295 *r = SkNx_cast<float>(_32_bit & (0xF << SK_R4444_SHIFT)) * (1.0f / (0xF << SK_R4444_SHIFT));
296 *g = SkNx_cast<float>(_32_bit & (0xF << SK_G4444_SHIFT)) * (1.0f / (0xF << SK_G4444_SHIFT));
297 *b = SkNx_cast<float>(_32_bit & (0xF << SK_B4444_SHIFT)) * (1.0f / (0xF << SK_B4444_SHIFT));
298 *a = SkNx_cast<float>(_32_bit & (0xF << SK_A4444_SHIFT)) * (1.0f / (0xF << SK_A4444_SHIFT));
299 }
from_565(const SkNh & _565,SkNf * r,SkNf * g,SkNf * b)300 SI void from_565(const SkNh& _565, SkNf* r, SkNf* g, SkNf* b) {
301 auto _32_bit = SkNx_cast<int>(_565);
302
303 *r = SkNx_cast<float>(_32_bit & SK_R16_MASK_IN_PLACE) * (1.0f / SK_R16_MASK_IN_PLACE);
304 *g = SkNx_cast<float>(_32_bit & SK_G16_MASK_IN_PLACE) * (1.0f / SK_G16_MASK_IN_PLACE);
305 *b = SkNx_cast<float>(_32_bit & SK_B16_MASK_IN_PLACE) * (1.0f / SK_B16_MASK_IN_PLACE);
306 }
from_f16(const void * px,SkNf * r,SkNf * g,SkNf * b,SkNf * a)307 SI void from_f16(const void* px, SkNf* r, SkNf* g, SkNf* b, SkNf* a) {
308 SkNh rh, gh, bh, ah;
309 SkNh::Load4(px, &rh, &gh, &bh, &ah);
310
311 *r = SkHalfToFloat_finite_ftz(rh);
312 *g = SkHalfToFloat_finite_ftz(gh);
313 *b = SkHalfToFloat_finite_ftz(bh);
314 *a = SkHalfToFloat_finite_ftz(ah);
315 }
316
STAGE_CTX(trace,const char *)317 STAGE_CTX(trace, const char*) {
318 SkDebugf("%s\n", ctx);
319 }
STAGE(registers)320 STAGE(registers) {
321 auto print = [](const char* name, const SkNf& v) {
322 SkDebugf("%s:", name);
323 for (int i = 0; i < N; i++) {
324 SkDebugf(" %g", v[i]);
325 }
326 SkDebugf("\n");
327 };
328 print(" r", r);
329 print(" g", g);
330 print(" b", b);
331 print(" a", a);
332 print("dr", dr);
333 print("dg", dg);
334 print("db", db);
335 print("da", da);
336 }
337
STAGE(clamp_0)338 STAGE(clamp_0) {
339 a = SkNf::Max(a, 0.0f);
340 r = SkNf::Max(r, 0.0f);
341 g = SkNf::Max(g, 0.0f);
342 b = SkNf::Max(b, 0.0f);
343 }
STAGE(clamp_1)344 STAGE(clamp_1) {
345 a = SkNf::Min(a, 1.0f);
346 r = SkNf::Min(r, 1.0f);
347 g = SkNf::Min(g, 1.0f);
348 b = SkNf::Min(b, 1.0f);
349 }
STAGE(clamp_a)350 STAGE(clamp_a) {
351 a = SkNf::Min(a, 1.0f);
352 r = SkNf::Min(r, a);
353 g = SkNf::Min(g, a);
354 b = SkNf::Min(b, a);
355 }
356
STAGE(unpremul)357 STAGE(unpremul) {
358 auto scale = (a == 0.0f).thenElse(0.0f, 1.0f/a);
359 r *= scale;
360 g *= scale;
361 b *= scale;
362 }
STAGE(premul)363 STAGE(premul) {
364 r *= a;
365 g *= a;
366 b *= a;
367 }
368
STAGE_CTX(set_rgb,const float *)369 STAGE_CTX(set_rgb, const float*) {
370 r = ctx[0];
371 g = ctx[1];
372 b = ctx[2];
373 }
STAGE(swap_rb)374 STAGE(swap_rb) { SkTSwap(r,b); }
375
STAGE(move_src_dst)376 STAGE(move_src_dst) {
377 dr = r;
378 dg = g;
379 db = b;
380 da = a;
381 }
STAGE(move_dst_src)382 STAGE(move_dst_src) {
383 r = dr;
384 g = dg;
385 b = db;
386 a = da;
387 }
STAGE(swap)388 STAGE(swap) {
389 SkTSwap(r,dr);
390 SkTSwap(g,dg);
391 SkTSwap(b,db);
392 SkTSwap(a,da);
393 }
394
STAGE(from_srgb)395 STAGE(from_srgb) {
396 r = sk_linear_from_srgb_math(r);
397 g = sk_linear_from_srgb_math(g);
398 b = sk_linear_from_srgb_math(b);
399 }
STAGE(to_srgb)400 STAGE(to_srgb) {
401 r = sk_linear_to_srgb_needs_round(r);
402 g = sk_linear_to_srgb_needs_round(g);
403 b = sk_linear_to_srgb_needs_round(b);
404 }
405
STAGE(from_2dot2)406 STAGE(from_2dot2) {
407 auto from_2dot2 = [](const SkNf& x) {
408 // x^(141/64) = x^(2.20312) is a great approximation of the true value, x^(2.2).
409 // (note: x^(35/16) = x^(2.1875) is an okay one as well and would be quicker)
410 auto x16 = x.rsqrt().rsqrt().rsqrt().rsqrt(); // x^(1/16) = x^(4/64);
411 auto x64 = x16.rsqrt().rsqrt(); // x^(1/64)
412
413 // x^(141/64) = x^(128/64) * x^(12/64) * x^(1/64)
414 return SkNf::Max((x*x) * (x16*x16*x16) * (x64), 0.0f);
415 };
416
417 r = from_2dot2(r);
418 g = from_2dot2(g);
419 b = from_2dot2(b);
420 }
STAGE(to_2dot2)421 STAGE(to_2dot2) {
422 auto to_2dot2 = [](const SkNf& x) {
423 // x^(29/64) is a very good approximation of the true value, x^(1/2.2).
424 auto x2 = x.rsqrt(), // x^(-1/2)
425 x32 = x2.rsqrt().rsqrt().rsqrt().rsqrt(), // x^(-1/32)
426 x64 = x32.rsqrt(); // x^(+1/64)
427
428 // 29 = 32 - 2 - 1
429 return SkNf::Max(x2.invert() * x32 * x64.invert(), 0.0f); // Watch out for NaN.
430 };
431
432 r = to_2dot2(r);
433 g = to_2dot2(g);
434 b = to_2dot2(b);
435 }
436
437 // The default shader produces a constant color (from the SkPaint).
STAGE_CTX(constant_color,const SkPM4f *)438 STAGE_CTX(constant_color, const SkPM4f*) {
439 r = ctx->r();
440 g = ctx->g();
441 b = ctx->b();
442 a = ctx->a();
443 }
444
445 // Set up registers with values relevant to shaders.
STAGE_CTX(seed_shader,const int *)446 STAGE_CTX(seed_shader, const int*) {
447 int y = *ctx;
448
449 static const float dx[] = { 0,1,2,3,4,5,6,7 };
450 r = x + 0.5f + SkNf::Load(dx); // dst pixel center x coordinates
451 g = y + 0.5f; // dst pixel center y coordinate(s)
452 b = 1.0f;
453 a = 0.0f;
454 dr = dg = db = da = 0.0f;
455 }
456
457 // s' = sc for a scalar c.
STAGE_CTX(scale_1_float,const float *)458 STAGE_CTX(scale_1_float, const float*) {
459 SkNf c = *ctx;
460
461 r *= c;
462 g *= c;
463 b *= c;
464 a *= c;
465 }
466 // s' = sc for 8-bit c.
STAGE_CTX(scale_u8,const uint8_t **)467 STAGE_CTX(scale_u8, const uint8_t**) {
468 auto ptr = *ctx + x;
469 SkNf c = SkNf_from_byte(load(tail, ptr));
470
471 r = r*c;
472 g = g*c;
473 b = b*c;
474 a = a*c;
475 }
476
lerp(const SkNf & from,const SkNf & to,const SkNf & cov)477 SI SkNf lerp(const SkNf& from, const SkNf& to, const SkNf& cov) {
478 return SkNf_fma(to-from, cov, from);
479 }
480
481 // s' = d(1-c) + sc, for a scalar c.
STAGE_CTX(lerp_1_float,const float *)482 STAGE_CTX(lerp_1_float, const float*) {
483 SkNf c = *ctx;
484
485 r = lerp(dr, r, c);
486 g = lerp(dg, g, c);
487 b = lerp(db, b, c);
488 a = lerp(da, a, c);
489 }
490
491 // s' = d(1-c) + sc for 8-bit c.
STAGE_CTX(lerp_u8,const uint8_t **)492 STAGE_CTX(lerp_u8, const uint8_t**) {
493 auto ptr = *ctx + x;
494 SkNf c = SkNf_from_byte(load(tail, ptr));
495
496 r = lerp(dr, r, c);
497 g = lerp(dg, g, c);
498 b = lerp(db, b, c);
499 a = lerp(da, a, c);
500 }
501
502 // s' = d(1-c) + sc for 565 c.
STAGE_CTX(lerp_565,const uint16_t **)503 STAGE_CTX(lerp_565, const uint16_t**) {
504 auto ptr = *ctx + x;
505 SkNf cr, cg, cb;
506 from_565(load(tail, ptr), &cr, &cg, &cb);
507
508 r = lerp(dr, r, cr);
509 g = lerp(dg, g, cg);
510 b = lerp(db, b, cb);
511 a = 1.0f;
512 }
513
STAGE_CTX(load_a8,const uint8_t **)514 STAGE_CTX(load_a8, const uint8_t**) {
515 auto ptr = *ctx + x;
516 r = g = b = 0.0f;
517 a = SkNf_from_byte(load(tail, ptr));
518 }
STAGE_CTX(store_a8,uint8_t **)519 STAGE_CTX(store_a8, uint8_t**) {
520 auto ptr = *ctx + x;
521 store(tail, SkNx_cast<uint8_t>(SkNf_round(255.0f, a)), ptr);
522 }
523
STAGE_CTX(load_g8,const uint8_t **)524 STAGE_CTX(load_g8, const uint8_t**) {
525 auto ptr = *ctx + x;
526 r = g = b = SkNf_from_byte(load(tail, ptr));
527 a = 1.0f;
528 }
529
STAGE_CTX(load_565,const uint16_t **)530 STAGE_CTX(load_565, const uint16_t**) {
531 auto ptr = *ctx + x;
532 from_565(load(tail, ptr), &r,&g,&b);
533 a = 1.0f;
534 }
STAGE_CTX(store_565,uint16_t **)535 STAGE_CTX(store_565, uint16_t**) {
536 auto ptr = *ctx + x;
537 store(tail, SkNx_cast<uint16_t>( SkNf_round(r, SK_R16_MASK) << SK_R16_SHIFT
538 | SkNf_round(g, SK_G16_MASK) << SK_G16_SHIFT
539 | SkNf_round(b, SK_B16_MASK) << SK_B16_SHIFT), ptr);
540 }
541
STAGE_CTX(load_4444,const uint16_t **)542 STAGE_CTX(load_4444, const uint16_t**) {
543 auto ptr = *ctx + x;
544 from_4444(load(tail, ptr), &r,&g,&b,&a);
545 }
STAGE_CTX(store_4444,uint16_t **)546 STAGE_CTX(store_4444, uint16_t**) {
547 auto ptr = *ctx + x;
548 store(tail, SkNx_cast<uint16_t>( SkNf_round(r, 0xF) << SK_R4444_SHIFT
549 | SkNf_round(g, 0xF) << SK_G4444_SHIFT
550 | SkNf_round(b, 0xF) << SK_B4444_SHIFT
551 | SkNf_round(a, 0xF) << SK_A4444_SHIFT), ptr);
552 }
553
STAGE_CTX(load_f16,const uint64_t **)554 STAGE_CTX(load_f16, const uint64_t**) {
555 auto ptr = *ctx + x;
556
557 const void* src = ptr;
558 SkNx<N, uint64_t> px;
559 if (tail) {
560 px = load(tail, ptr);
561 src = &px;
562 }
563 from_f16(src, &r, &g, &b, &a);
564 }
STAGE_CTX(store_f16,uint64_t **)565 STAGE_CTX(store_f16, uint64_t**) {
566 auto ptr = *ctx + x;
567
568 SkNx<N, uint64_t> px;
569 SkNh::Store4(tail ? (void*)&px : (void*)ptr, SkFloatToHalf_finite_ftz(r),
570 SkFloatToHalf_finite_ftz(g),
571 SkFloatToHalf_finite_ftz(b),
572 SkFloatToHalf_finite_ftz(a));
573 if (tail) {
574 store(tail, px, ptr);
575 }
576 }
577
STAGE_CTX(load_f32,const SkPM4f **)578 STAGE_CTX(load_f32, const SkPM4f**) {
579 auto ptr = *ctx + x;
580
581 const void* src = ptr;
582 SkNx<N, SkPM4f> px;
583 if (tail) {
584 px = load(tail, ptr);
585 src = &px;
586 }
587 SkNf::Load4(src, &r, &g, &b, &a);
588 }
STAGE_CTX(store_f32,SkPM4f **)589 STAGE_CTX(store_f32, SkPM4f**) {
590 auto ptr = *ctx + x;
591
592 SkNx<N, SkPM4f> px;
593 SkNf::Store4(tail ? (void*)&px : (void*)ptr, r,g,b,a);
594 if (tail) {
595 store(tail, px, ptr);
596 }
597 }
598
599
STAGE_CTX(load_8888,const uint32_t **)600 STAGE_CTX(load_8888, const uint32_t**) {
601 auto ptr = *ctx + x;
602 from_8888(load(tail, ptr), &r, &g, &b, &a);
603 }
STAGE_CTX(store_8888,uint32_t **)604 STAGE_CTX(store_8888, uint32_t**) {
605 auto byte = [](const SkNf& x, int ix) {
606 // Here's a neat trick: 0x47000000 == 32768.0f, and 0x470000ff == 32768.0f + (255/256.0f).
607 auto v = SkNf_fma(255/256.0f, x, 32768.0f);
608 switch (ix) {
609 case 0: return SkNi::Load(&v) & 0xff; // R
610 case 3: return SkNi::Load(&v) << 24; // A
611 }
612 return (SkNi::Load(&v) & 0xff) << (8*ix); // B or G
613 };
614
615 auto ptr = *ctx + x;
616 store(tail, byte(r,0)|byte(g,1)|byte(b,2)|byte(a,3), (int*)ptr);
617 }
618
STAGE_CTX(load_u16_be,const uint64_t **)619 STAGE_CTX(load_u16_be, const uint64_t**) {
620 auto ptr = *ctx + x;
621 const void* src = ptr;
622 SkNx<N, uint64_t> px;
623 if (tail) {
624 px = load(tail, ptr);
625 src = &px;
626 }
627
628 SkNh rh, gh, bh, ah;
629 SkNh::Load4(src, &rh, &gh, &bh, &ah);
630 r = (1.0f / 65535.0f) * SkNx_cast<float>((rh << 8) | (rh >> 8));
631 g = (1.0f / 65535.0f) * SkNx_cast<float>((gh << 8) | (gh >> 8));
632 b = (1.0f / 65535.0f) * SkNx_cast<float>((bh << 8) | (bh >> 8));
633 a = (1.0f / 65535.0f) * SkNx_cast<float>((ah << 8) | (ah >> 8));
634 }
635
STAGE_CTX(load_rgb_u16_be,const uint16_t **)636 STAGE_CTX(load_rgb_u16_be, const uint16_t**) {
637 auto ptr = *ctx + 3*x;
638 const void* src = ptr;
639 uint16_t buf[N*3] = {0};
640 if (tail) {
641 memcpy(buf, src, tail*3*sizeof(uint16_t));
642 src = buf;
643 }
644
645 SkNh rh, gh, bh;
646 SkNh::Load3(src, &rh, &gh, &bh);
647 r = (1.0f / 65535.0f) * SkNx_cast<float>((rh << 8) | (rh >> 8));
648 g = (1.0f / 65535.0f) * SkNx_cast<float>((gh << 8) | (gh >> 8));
649 b = (1.0f / 65535.0f) * SkNx_cast<float>((bh << 8) | (bh >> 8));
650 a = 1.0f;
651 }
652
STAGE_CTX(store_u16_be,uint64_t **)653 STAGE_CTX(store_u16_be, uint64_t**) {
654 auto to_u16_be = [](const SkNf& x) {
655 SkNh x16 = SkNx_cast<uint16_t>(65535.0f * x);
656 return (x16 << 8) | (x16 >> 8);
657 };
658
659 auto ptr = *ctx + x;
660 SkNx<N, uint64_t> px;
661 SkNh::Store4(tail ? (void*)&px : (void*)ptr, to_u16_be(r),
662 to_u16_be(g),
663 to_u16_be(b),
664 to_u16_be(a));
665 if (tail) {
666 store(tail, px, ptr);
667 }
668 }
669
STAGE_CTX(load_tables,const LoadTablesContext *)670 STAGE_CTX(load_tables, const LoadTablesContext*) {
671 auto ptr = (const uint32_t*)ctx->fSrc + x;
672
673 SkNu rgba = load(tail, ptr);
674 auto to_int = [](const SkNu& v) { return SkNi::Load(&v); };
675 r = gather(tail, ctx->fR, to_int((rgba >> 0) & 0xff));
676 g = gather(tail, ctx->fG, to_int((rgba >> 8) & 0xff));
677 b = gather(tail, ctx->fB, to_int((rgba >> 16) & 0xff));
678 a = SkNf_from_byte(rgba >> 24);
679 }
680
STAGE_CTX(load_tables_u16_be,const LoadTablesContext *)681 STAGE_CTX(load_tables_u16_be, const LoadTablesContext*) {
682 auto ptr = (const uint64_t*)ctx->fSrc + x;
683 const void* src = ptr;
684 SkNx<N, uint64_t> px;
685 if (tail) {
686 px = load(tail, ptr);
687 src = &px;
688 }
689
690 SkNh rh, gh, bh, ah;
691 SkNh::Load4(src, &rh, &gh, &bh, &ah);
692
693 // ctx->fSrc is big-endian, so "& 0xff" grabs the 8 most significant bits of each component.
694 r = gather(tail, ctx->fR, SkNx_cast<int>(rh & 0xff));
695 g = gather(tail, ctx->fG, SkNx_cast<int>(gh & 0xff));
696 b = gather(tail, ctx->fB, SkNx_cast<int>(bh & 0xff));
697 a = (1.0f / 65535.0f) * SkNx_cast<float>((ah << 8) | (ah >> 8));
698 }
699
STAGE_CTX(load_tables_rgb_u16_be,const LoadTablesContext *)700 STAGE_CTX(load_tables_rgb_u16_be, const LoadTablesContext*) {
701 auto ptr = (const uint16_t*)ctx->fSrc + 3*x;
702 const void* src = ptr;
703 uint16_t buf[N*3] = {0};
704 if (tail) {
705 memcpy(buf, src, tail*3*sizeof(uint16_t));
706 src = buf;
707 }
708
709 SkNh rh, gh, bh;
710 SkNh::Load3(src, &rh, &gh, &bh);
711
712 // ctx->fSrc is big-endian, so "& 0xff" grabs the 8 most significant bits of each component.
713 r = gather(tail, ctx->fR, SkNx_cast<int>(rh & 0xff));
714 g = gather(tail, ctx->fG, SkNx_cast<int>(gh & 0xff));
715 b = gather(tail, ctx->fB, SkNx_cast<int>(bh & 0xff));
716 a = 1.0f;
717 }
718
inv(const SkNf & x)719 SI SkNf inv(const SkNf& x) { return 1.0f - x; }
720
RGBA_XFERMODE(clear)721 RGBA_XFERMODE(clear) { return 0.0f; }
RGBA_XFERMODE(srcatop)722 RGBA_XFERMODE(srcatop) { return s*da + d*inv(sa); }
RGBA_XFERMODE(srcin)723 RGBA_XFERMODE(srcin) { return s * da; }
RGBA_XFERMODE(srcout)724 RGBA_XFERMODE(srcout) { return s * inv(da); }
RGBA_XFERMODE(srcover)725 RGBA_XFERMODE(srcover) { return SkNf_fma(d, inv(sa), s); }
RGBA_XFERMODE(dstatop)726 RGBA_XFERMODE(dstatop) { return srcatop_kernel(d,da,s,sa); }
RGBA_XFERMODE(dstin)727 RGBA_XFERMODE(dstin) { return srcin_kernel (d,da,s,sa); }
RGBA_XFERMODE(dstout)728 RGBA_XFERMODE(dstout) { return srcout_kernel (d,da,s,sa); }
RGBA_XFERMODE(dstover)729 RGBA_XFERMODE(dstover) { return srcover_kernel(d,da,s,sa); }
730
RGBA_XFERMODE(modulate)731 RGBA_XFERMODE(modulate) { return s*d; }
RGBA_XFERMODE(multiply)732 RGBA_XFERMODE(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
RGBA_XFERMODE(plus_)733 RGBA_XFERMODE(plus_) { return s + d; }
RGBA_XFERMODE(screen)734 RGBA_XFERMODE(screen) { return s + d - s*d; }
RGBA_XFERMODE(xor_)735 RGBA_XFERMODE(xor_) { return s*inv(da) + d*inv(sa); }
736
RGB_XFERMODE(colorburn)737 RGB_XFERMODE(colorburn) {
738 return (d == da ).thenElse(d + s*inv(da),
739 (s == 0.0f).thenElse(s + d*inv(sa),
740 sa*(da - SkNf::Min(da, (da-d)*sa/s)) + s*inv(da) + d*inv(sa)));
741 }
RGB_XFERMODE(colordodge)742 RGB_XFERMODE(colordodge) {
743 return (d == 0.0f).thenElse(d + s*inv(da),
744 (s == sa ).thenElse(s + d*inv(sa),
745 sa*SkNf::Min(da, (d*sa)/(sa - s)) + s*inv(da) + d*inv(sa)));
746 }
RGB_XFERMODE(darken)747 RGB_XFERMODE(darken) { return s + d - SkNf::Max(s*da, d*sa); }
RGB_XFERMODE(difference)748 RGB_XFERMODE(difference) { return s + d - 2.0f*SkNf::Min(s*da,d*sa); }
RGB_XFERMODE(exclusion)749 RGB_XFERMODE(exclusion) { return s + d - 2.0f*s*d; }
RGB_XFERMODE(hardlight)750 RGB_XFERMODE(hardlight) {
751 return s*inv(da) + d*inv(sa)
752 + (2.0f*s <= sa).thenElse(2.0f*s*d, sa*da - 2.0f*(da-d)*(sa-s));
753 }
RGB_XFERMODE(lighten)754 RGB_XFERMODE(lighten) { return s + d - SkNf::Min(s*da, d*sa); }
RGB_XFERMODE(overlay)755 RGB_XFERMODE(overlay) { return hardlight_kernel(d,da,s,sa); }
RGB_XFERMODE(softlight)756 RGB_XFERMODE(softlight) {
757 SkNf m = (da > 0.0f).thenElse(d / da, 0.0f),
758 s2 = 2.0f*s,
759 m4 = 4.0f*m;
760
761 // The logic forks three ways:
762 // 1. dark src?
763 // 2. light src, dark dst?
764 // 3. light src, light dst?
765 SkNf darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
766 darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2.
767 liteDst = m.rsqrt().invert() - m, // Used in case 3.
768 liteSrc = d*sa + da*(s2 - sa) * (4.0f*d <= da).thenElse(darkDst, liteDst); // 2 or 3?
769 return s*inv(da) + d*inv(sa) + (s2 <= sa).thenElse(darkSrc, liteSrc); // 1 or (2 or 3)?
770 }
771
STAGE(luminance_to_alpha)772 STAGE(luminance_to_alpha) {
773 a = SK_LUM_COEFF_R*r + SK_LUM_COEFF_G*g + SK_LUM_COEFF_B*b;
774 r = g = b = 0;
775 }
776
STAGE(rgb_to_hsl)777 STAGE(rgb_to_hsl) {
778 auto max = SkNf::Max(SkNf::Max(r, g), b);
779 auto min = SkNf::Min(SkNf::Min(r, g), b);
780 auto l = 0.5f * (max + min);
781
782 auto d = max - min;
783 auto d_inv = 1.0f/d;
784 auto s = (max == min).thenElse(0.0f,
785 d/(l > 0.5f).thenElse(2.0f - max - min, max + min));
786 SkNf h = (max != r).thenElse(0.0f,
787 (g - b)*d_inv + (g < b).thenElse(6.0f, 0.0f));
788 h = (max == g).thenElse((b - r)*d_inv + 2.0f, h);
789 h = (max == b).thenElse((r - g)*d_inv + 4.0f, h);
790 h *= (1/6.0f);
791
792 h = (max == min).thenElse(0.0f, h);
793
794 r = h;
795 g = s;
796 b = l;
797 }
798
STAGE(hsl_to_rgb)799 STAGE(hsl_to_rgb) {
800 auto h = r;
801 auto s = g;
802 auto l = b;
803 auto q = (l < 0.5f).thenElse(l*(1.0f + s), l + s - l*s);
804 auto p = 2.0f*l - q;
805
806 auto hue_to_rgb = [](const SkNf& p, const SkNf& q, const SkNf& t) {
807 auto t2 = (t < 0.0f).thenElse(t + 1.0f, (t > 1.0f).thenElse(t - 1.0f, t));
808 return (t2 < (1/6.0f)).thenElse(
809 p + (q - p)*6.0f*t, (t2 < (3/6.0f)).thenElse(
810 q, (t2 < (4/6.0f)).thenElse(
811 p + (q - p)*((4/6.0f) - t2)*6.0f, p)));
812 };
813
814 r = (s == 0.f).thenElse(l, hue_to_rgb(p, q, h + (1/3.0f)));
815 g = (s == 0.f).thenElse(l, hue_to_rgb(p, q, h));
816 b = (s == 0.f).thenElse(l, hue_to_rgb(p, q, h - (1/3.0f)));
817 }
818
STAGE_CTX(matrix_2x3,const float *)819 STAGE_CTX(matrix_2x3, const float*) {
820 auto m = ctx;
821
822 auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[2], m[4])),
823 G = SkNf_fma(r,m[1], SkNf_fma(g,m[3], m[5]));
824 r = R;
825 g = G;
826 }
STAGE_CTX(matrix_3x4,const float *)827 STAGE_CTX(matrix_3x4, const float*) {
828 auto m = ctx;
829
830 auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[3], SkNf_fma(b,m[6], m[ 9]))),
831 G = SkNf_fma(r,m[1], SkNf_fma(g,m[4], SkNf_fma(b,m[7], m[10]))),
832 B = SkNf_fma(r,m[2], SkNf_fma(g,m[5], SkNf_fma(b,m[8], m[11])));
833 r = R;
834 g = G;
835 b = B;
836 }
STAGE_CTX(matrix_4x5,const float *)837 STAGE_CTX(matrix_4x5, const float*) {
838 auto m = ctx;
839
840 auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[4], SkNf_fma(b,m[ 8], SkNf_fma(a,m[12], m[16])))),
841 G = SkNf_fma(r,m[1], SkNf_fma(g,m[5], SkNf_fma(b,m[ 9], SkNf_fma(a,m[13], m[17])))),
842 B = SkNf_fma(r,m[2], SkNf_fma(g,m[6], SkNf_fma(b,m[10], SkNf_fma(a,m[14], m[18])))),
843 A = SkNf_fma(r,m[3], SkNf_fma(g,m[7], SkNf_fma(b,m[11], SkNf_fma(a,m[15], m[19]))));
844 r = R;
845 g = G;
846 b = B;
847 a = A;
848 }
STAGE_CTX(matrix_perspective,const float *)849 STAGE_CTX(matrix_perspective, const float*) {
850 // N.B. unlike the matrix_NxM stages, this takes a row-major matrix.
851 auto m = ctx;
852
853 auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[1], m[2])),
854 G = SkNf_fma(r,m[3], SkNf_fma(g,m[4], m[5])),
855 Z = SkNf_fma(r,m[6], SkNf_fma(g,m[7], m[8]));
856 r = R * Z.invert();
857 g = G * Z.invert();
858 }
859
parametric(const SkNf & v,const SkColorSpaceTransferFn & p)860 SI SkNf parametric(const SkNf& v, const SkColorSpaceTransferFn& p) {
861 float result[N]; // Unconstrained powf() doesn't vectorize well...
862 for (int i = 0; i < N; i++) {
863 float s = v[i];
864 result[i] = (s <= p.fD) ? p.fC * s + p.fF
865 : powf(s * p.fA + p.fB, p.fG) + p.fE;
866 }
867 // Clamp the output to [0, 1].
868 // Max(NaN, 0) = 0, but Max(0, NaN) = NaN, so we want this exact order to ensure NaN => 0
869 return SkNf::Min(SkNf::Max(SkNf::Load(result), 0.0f), 1.0f);
870 }
STAGE_CTX(parametric_r,const SkColorSpaceTransferFn *)871 STAGE_CTX(parametric_r, const SkColorSpaceTransferFn*) { r = parametric(r, *ctx); }
STAGE_CTX(parametric_g,const SkColorSpaceTransferFn *)872 STAGE_CTX(parametric_g, const SkColorSpaceTransferFn*) { g = parametric(g, *ctx); }
STAGE_CTX(parametric_b,const SkColorSpaceTransferFn *)873 STAGE_CTX(parametric_b, const SkColorSpaceTransferFn*) { b = parametric(b, *ctx); }
STAGE_CTX(parametric_a,const SkColorSpaceTransferFn *)874 STAGE_CTX(parametric_a, const SkColorSpaceTransferFn*) { a = parametric(a, *ctx); }
875
table(const SkNf & v,const SkTableTransferFn & table)876 SI SkNf table(const SkNf& v, const SkTableTransferFn& table) {
877 float result[N];
878 for (int i = 0; i < N; i++) {
879 result[i] = interp_lut(v[i], table.fData, table.fSize);
880 }
881 // no need to clamp - tables are by-design [0,1] -> [0,1]
882 return SkNf::Load(result);
883 }
STAGE_CTX(table_r,const SkTableTransferFn *)884 STAGE_CTX(table_r, const SkTableTransferFn*) { r = table(r, *ctx); }
STAGE_CTX(table_g,const SkTableTransferFn *)885 STAGE_CTX(table_g, const SkTableTransferFn*) { g = table(g, *ctx); }
STAGE_CTX(table_b,const SkTableTransferFn *)886 STAGE_CTX(table_b, const SkTableTransferFn*) { b = table(b, *ctx); }
STAGE_CTX(table_a,const SkTableTransferFn *)887 STAGE_CTX(table_a, const SkTableTransferFn*) { a = table(a, *ctx); }
888
STAGE_CTX(color_lookup_table,const SkColorLookUpTable *)889 STAGE_CTX(color_lookup_table, const SkColorLookUpTable*) {
890 const SkColorLookUpTable* colorLUT = ctx;
891 SkASSERT(3 == colorLUT->inputChannels() || 4 == colorLUT->inputChannels());
892 SkASSERT(3 == colorLUT->outputChannels());
893 float result[3][N];
894 for (int i = 0; i < N; ++i) {
895 const float in[4] = { r[i], g[i], b[i], a[i] };
896 float out[3];
897 colorLUT->interp(out, in);
898 for (int j = 0; j < colorLUT->outputChannels(); ++j) {
899 result[j][i] = out[j];
900 }
901 }
902 r = SkNf::Load(result[0]);
903 g = SkNf::Load(result[1]);
904 b = SkNf::Load(result[2]);
905 if (4 == colorLUT->inputChannels()) {
906 // we must set the pixel to opaque, as the alpha channel was used
907 // as input before this.
908 a = 1.f;
909 }
910 }
911
STAGE(lab_to_xyz)912 STAGE(lab_to_xyz) {
913 const auto lab_l = r * 100.0f;
914 const auto lab_a = g * 255.0f - 128.0f;
915 const auto lab_b = b * 255.0f - 128.0f;
916 auto Y = (lab_l + 16.0f) * (1/116.0f);
917 auto X = lab_a * (1/500.0f) + Y;
918 auto Z = Y - (lab_b * (1/200.0f));
919
920 const auto X3 = X*X*X;
921 X = (X3 > 0.008856f).thenElse(X3, (X - (16/116.0f)) * (1/7.787f));
922 const auto Y3 = Y*Y*Y;
923 Y = (Y3 > 0.008856f).thenElse(Y3, (Y - (16/116.0f)) * (1/7.787f));
924 const auto Z3 = Z*Z*Z;
925 Z = (Z3 > 0.008856f).thenElse(Z3, (Z - (16/116.0f)) * (1/7.787f));
926
927 // adjust to D50 illuminant
928 X *= 0.96422f;
929 Y *= 1.00000f;
930 Z *= 0.82521f;
931
932 r = X;
933 g = Y;
934 b = Z;
935 }
936
assert_in_tile(const SkNf & v,float limit)937 SI SkNf assert_in_tile(const SkNf& v, float limit) {
938 for (int i = 0; i < N; i++) {
939 SkASSERT(0 <= v[i] && v[i] < limit);
940 }
941 return v;
942 }
943
ulp_before(float v)944 SI SkNf ulp_before(float v) {
945 SkASSERT(v > 0);
946 SkNf vs(v);
947 SkNu uvs = SkNu::Load(&vs) - 1;
948 return SkNf::Load(&uvs);
949 }
950
clamp(const SkNf & v,float limit)951 SI SkNf clamp(const SkNf& v, float limit) {
952 SkNf result = SkNf::Max(0, SkNf::Min(v, ulp_before(limit)));
953 return assert_in_tile(result, limit);
954 }
repeat(const SkNf & v,float limit)955 SI SkNf repeat(const SkNf& v, float limit) {
956 SkNf result = v - (v/limit).floor()*limit;
957 // For small negative v, (v/limit).floor()*limit can dominate v in the subtraction,
958 // which leaves result == limit. We want result < limit, so clamp it one ULP.
959 result = SkNf::Min(result, ulp_before(limit));
960 return assert_in_tile(result, limit);
961 }
mirror(const SkNf & v,float l)962 SI SkNf mirror(const SkNf& v, float l/*imit*/) {
963 SkNf result = ((v - l) - ((v - l) / (2*l)).floor()*(2*l) - l).abs();
964 // Same deal as repeat.
965 result = SkNf::Min(result, ulp_before(l));
966 return assert_in_tile(result, l);
967 }
STAGE_CTX(clamp_x,const float *)968 STAGE_CTX( clamp_x, const float*) { r = clamp (r, *ctx); }
STAGE_CTX(repeat_x,const float *)969 STAGE_CTX(repeat_x, const float*) { r = repeat(r, *ctx); }
STAGE_CTX(mirror_x,const float *)970 STAGE_CTX(mirror_x, const float*) { r = mirror(r, *ctx); }
STAGE_CTX(clamp_y,const float *)971 STAGE_CTX( clamp_y, const float*) { g = clamp (g, *ctx); }
STAGE_CTX(repeat_y,const float *)972 STAGE_CTX(repeat_y, const float*) { g = repeat(g, *ctx); }
STAGE_CTX(mirror_y,const float *)973 STAGE_CTX(mirror_y, const float*) { g = mirror(g, *ctx); }
974
STAGE_CTX(save_xy,SkImageShaderContext *)975 STAGE_CTX(save_xy, SkImageShaderContext*) {
976 r.store(ctx->x);
977 g.store(ctx->y);
978
979 // Whether bilinear or bicubic, all sample points have the same fractional offset (fx,fy).
980 // They're either the 4 corners of a logical 1x1 pixel or the 16 corners of a 3x3 grid
981 // surrounding (x,y), all (0.5,0.5) off-center.
982 auto fract = [](const SkNf& v) { return v - v.floor(); };
983 fract(r + 0.5f).store(ctx->fx);
984 fract(g + 0.5f).store(ctx->fy);
985 }
986
STAGE_CTX(accumulate,const SkImageShaderContext *)987 STAGE_CTX(accumulate, const SkImageShaderContext*) {
988 // Bilinear and bicubic filtering are both separable, so we'll end up with independent
989 // scale contributions in x and y that we multiply together to get each pixel's scale factor.
990 auto scale = SkNf::Load(ctx->scalex) * SkNf::Load(ctx->scaley);
991 dr = SkNf_fma(scale, r, dr);
992 dg = SkNf_fma(scale, g, dg);
993 db = SkNf_fma(scale, b, db);
994 da = SkNf_fma(scale, a, da);
995 }
996
997 // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
998 // are combined in direct proportion to their area overlapping that logical query pixel.
999 // At positive offsets, the x-axis contribution to that rectangular area is fx; (1-fx)
1000 // at negative x offsets. The y-axis is treated symmetrically.
1001 template <int Scale>
bilinear_x(SkImageShaderContext * ctx,SkNf * x)1002 SI void bilinear_x(SkImageShaderContext* ctx, SkNf* x) {
1003 *x = SkNf::Load(ctx->x) + Scale*0.5f;
1004 auto fx = SkNf::Load(ctx->fx);
1005 (Scale > 0 ? fx : (1.0f - fx)).store(ctx->scalex);
1006 }
1007 template <int Scale>
bilinear_y(SkImageShaderContext * ctx,SkNf * y)1008 SI void bilinear_y(SkImageShaderContext* ctx, SkNf* y) {
1009 *y = SkNf::Load(ctx->y) + Scale*0.5f;
1010 auto fy = SkNf::Load(ctx->fy);
1011 (Scale > 0 ? fy : (1.0f - fy)).store(ctx->scaley);
1012 }
STAGE_CTX(bilinear_nx,SkImageShaderContext *)1013 STAGE_CTX(bilinear_nx, SkImageShaderContext*) { bilinear_x<-1>(ctx, &r); }
STAGE_CTX(bilinear_px,SkImageShaderContext *)1014 STAGE_CTX(bilinear_px, SkImageShaderContext*) { bilinear_x<+1>(ctx, &r); }
STAGE_CTX(bilinear_ny,SkImageShaderContext *)1015 STAGE_CTX(bilinear_ny, SkImageShaderContext*) { bilinear_y<-1>(ctx, &g); }
STAGE_CTX(bilinear_py,SkImageShaderContext *)1016 STAGE_CTX(bilinear_py, SkImageShaderContext*) { bilinear_y<+1>(ctx, &g); }
1017
1018
1019 // In bilinear interpolation, the 16 pixels at +/- 0.5 and +/- 1.5 offsets from the sample
1020 // pixel center are combined with a non-uniform cubic filter, with high filter values near
1021 // the center and lower values farther away.
1022 //
1023 // We break this filter function into two parts, one for near +/- 0.5 offsets,
1024 // and one for far +/- 1.5 offsets.
1025 //
1026 // See GrBicubicEffect for details about this particular Mitchell-Netravali filter.
bicubic_near(const SkNf & t)1027 SI SkNf bicubic_near(const SkNf& t) {
1028 // 1/18 + 9/18t + 27/18t^2 - 21/18t^3 == t ( t ( -21/18t + 27/18) + 9/18) + 1/18
1029 return SkNf_fma(t, SkNf_fma(t, SkNf_fma(-21/18.0f, t, 27/18.0f), 9/18.0f), 1/18.0f);
1030 }
bicubic_far(const SkNf & t)1031 SI SkNf bicubic_far(const SkNf& t) {
1032 // 0/18 + 0/18*t - 6/18t^2 + 7/18t^3 == t^2 (7/18t - 6/18)
1033 return (t*t)*SkNf_fma(7/18.0f, t, -6/18.0f);
1034 }
1035
1036 template <int Scale>
bicubic_x(SkImageShaderContext * ctx,SkNf * x)1037 SI void bicubic_x(SkImageShaderContext* ctx, SkNf* x) {
1038 *x = SkNf::Load(ctx->x) + Scale*0.5f;
1039 auto fx = SkNf::Load(ctx->fx);
1040 if (Scale == -3) { return bicubic_far (1.0f - fx).store(ctx->scalex); }
1041 if (Scale == -1) { return bicubic_near(1.0f - fx).store(ctx->scalex); }
1042 if (Scale == +1) { return bicubic_near( fx).store(ctx->scalex); }
1043 if (Scale == +3) { return bicubic_far ( fx).store(ctx->scalex); }
1044 SkDEBUGFAIL("unreachable");
1045 }
1046 template <int Scale>
bicubic_y(SkImageShaderContext * ctx,SkNf * y)1047 SI void bicubic_y(SkImageShaderContext* ctx, SkNf* y) {
1048 *y = SkNf::Load(ctx->y) + Scale*0.5f;
1049 auto fy = SkNf::Load(ctx->fy);
1050 if (Scale == -3) { return bicubic_far (1.0f - fy).store(ctx->scaley); }
1051 if (Scale == -1) { return bicubic_near(1.0f - fy).store(ctx->scaley); }
1052 if (Scale == +1) { return bicubic_near( fy).store(ctx->scaley); }
1053 if (Scale == +3) { return bicubic_far ( fy).store(ctx->scaley); }
1054 SkDEBUGFAIL("unreachable");
1055 }
STAGE_CTX(bicubic_n3x,SkImageShaderContext *)1056 STAGE_CTX(bicubic_n3x, SkImageShaderContext*) { bicubic_x<-3>(ctx, &r); }
STAGE_CTX(bicubic_n1x,SkImageShaderContext *)1057 STAGE_CTX(bicubic_n1x, SkImageShaderContext*) { bicubic_x<-1>(ctx, &r); }
STAGE_CTX(bicubic_p1x,SkImageShaderContext *)1058 STAGE_CTX(bicubic_p1x, SkImageShaderContext*) { bicubic_x<+1>(ctx, &r); }
STAGE_CTX(bicubic_p3x,SkImageShaderContext *)1059 STAGE_CTX(bicubic_p3x, SkImageShaderContext*) { bicubic_x<+3>(ctx, &r); }
1060
STAGE_CTX(bicubic_n3y,SkImageShaderContext *)1061 STAGE_CTX(bicubic_n3y, SkImageShaderContext*) { bicubic_y<-3>(ctx, &g); }
STAGE_CTX(bicubic_n1y,SkImageShaderContext *)1062 STAGE_CTX(bicubic_n1y, SkImageShaderContext*) { bicubic_y<-1>(ctx, &g); }
STAGE_CTX(bicubic_p1y,SkImageShaderContext *)1063 STAGE_CTX(bicubic_p1y, SkImageShaderContext*) { bicubic_y<+1>(ctx, &g); }
STAGE_CTX(bicubic_p3y,SkImageShaderContext *)1064 STAGE_CTX(bicubic_p3y, SkImageShaderContext*) { bicubic_y<+3>(ctx, &g); }
1065
1066
1067 template <typename T>
offset_and_ptr(T ** ptr,const SkImageShaderContext * ctx,const SkNf & x,const SkNf & y)1068 SI SkNi offset_and_ptr(T** ptr, const SkImageShaderContext* ctx, const SkNf& x, const SkNf& y) {
1069 SkNi ix = SkNx_cast<int>(x),
1070 iy = SkNx_cast<int>(y);
1071 SkNi offset = iy*ctx->stride + ix;
1072
1073 *ptr = (const T*)ctx->pixels;
1074 return offset;
1075 }
1076
STAGE_CTX(gather_a8,const SkImageShaderContext *)1077 STAGE_CTX(gather_a8, const SkImageShaderContext*) {
1078 const uint8_t* p;
1079 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1080
1081 r = g = b = 0.0f;
1082 a = SkNf_from_byte(gather(tail, p, offset));
1083 }
STAGE_CTX(gather_i8,const SkImageShaderContext *)1084 STAGE_CTX(gather_i8, const SkImageShaderContext*) {
1085 const uint8_t* p;
1086 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1087
1088 SkNi ix = SkNx_cast<int>(gather(tail, p, offset));
1089 from_8888(gather(tail, ctx->ctable->readColors(), ix), &r, &g, &b, &a);
1090 }
STAGE_CTX(gather_g8,const SkImageShaderContext *)1091 STAGE_CTX(gather_g8, const SkImageShaderContext*) {
1092 const uint8_t* p;
1093 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1094
1095 r = g = b = SkNf_from_byte(gather(tail, p, offset));
1096 a = 1.0f;
1097 }
STAGE_CTX(gather_565,const SkImageShaderContext *)1098 STAGE_CTX(gather_565, const SkImageShaderContext*) {
1099 const uint16_t* p;
1100 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1101
1102 from_565(gather(tail, p, offset), &r, &g, &b);
1103 a = 1.0f;
1104 }
STAGE_CTX(gather_4444,const SkImageShaderContext *)1105 STAGE_CTX(gather_4444, const SkImageShaderContext*) {
1106 const uint16_t* p;
1107 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1108
1109 from_4444(gather(tail, p, offset), &r, &g, &b, &a);
1110 }
STAGE_CTX(gather_8888,const SkImageShaderContext *)1111 STAGE_CTX(gather_8888, const SkImageShaderContext*) {
1112 const uint32_t* p;
1113 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1114
1115 from_8888(gather(tail, p, offset), &r, &g, &b, &a);
1116 }
STAGE_CTX(gather_f16,const SkImageShaderContext *)1117 STAGE_CTX(gather_f16, const SkImageShaderContext*) {
1118 const uint64_t* p;
1119 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1120
1121 auto px = gather(tail, p, offset);
1122 from_f16(&px, &r, &g, &b, &a);
1123 }
1124
STAGE_CTX(linear_gradient_2stops,const SkPM4f *)1125 STAGE_CTX(linear_gradient_2stops, const SkPM4f*) {
1126 auto t = r;
1127 SkPM4f c0 = ctx[0],
1128 dc = ctx[1];
1129
1130 r = SkNf_fma(t, dc.r(), c0.r());
1131 g = SkNf_fma(t, dc.g(), c0.g());
1132 b = SkNf_fma(t, dc.b(), c0.b());
1133 a = SkNf_fma(t, dc.a(), c0.a());
1134 }
1135
STAGE_CTX(byte_tables,const void *)1136 STAGE_CTX(byte_tables, const void*) {
1137 struct Tables { const uint8_t *r, *g, *b, *a; };
1138 auto tables = (const Tables*)ctx;
1139
1140 r = SkNf_from_byte(gather(tail, tables->r, SkNf_round(255.0f, r)));
1141 g = SkNf_from_byte(gather(tail, tables->g, SkNf_round(255.0f, g)));
1142 b = SkNf_from_byte(gather(tail, tables->b, SkNf_round(255.0f, b)));
1143 a = SkNf_from_byte(gather(tail, tables->a, SkNf_round(255.0f, a)));
1144 }
1145
STAGE_CTX(byte_tables_rgb,const void *)1146 STAGE_CTX(byte_tables_rgb, const void*) {
1147 struct Tables { const uint8_t *r, *g, *b; int n; };
1148 auto tables = (const Tables*)ctx;
1149
1150 float scale = tables->n - 1;
1151 r = SkNf_from_byte(gather(tail, tables->r, SkNf_round(scale, r)));
1152 g = SkNf_from_byte(gather(tail, tables->g, SkNf_round(scale, g)));
1153 b = SkNf_from_byte(gather(tail, tables->b, SkNf_round(scale, b)));
1154 }
1155
STAGE_CTX(shader_adapter,SkShader::Context *)1156 STAGE_CTX(shader_adapter, SkShader::Context*) {
1157 SkPM4f buf[N];
1158 static_assert(sizeof(buf) == sizeof(r) + sizeof(g) + sizeof(b) + sizeof(a), "");
1159 ctx->shadeSpan4f(x, (int)g[0], buf, N);
1160 SkNf::Load4(buf, &r, &g, &b, &a);
1161 }
1162
enum_to_Fn(SkRasterPipeline::StockStage st)1163 SI Fn enum_to_Fn(SkRasterPipeline::StockStage st) {
1164 switch (st) {
1165 #define M(stage) case SkRasterPipeline::stage: return stage;
1166 SK_RASTER_PIPELINE_STAGES(M)
1167 #undef M
1168 }
1169 SkASSERT(false);
1170 return just_return;
1171 }
1172
1173 namespace {
1174
build_program(void ** program,const SkRasterPipeline::Stage * stages,int nstages)1175 static void build_program(void** program, const SkRasterPipeline::Stage* stages, int nstages) {
1176 for (int i = 0; i < nstages; i++) {
1177 *program++ = (void*)enum_to_Fn(stages[i].stage);
1178 if (stages[i].ctx) {
1179 *program++ = stages[i].ctx;
1180 }
1181 }
1182 *program++ = (void*)just_return;
1183 }
1184
run_program(void ** program,size_t x,size_t n)1185 static void run_program(void** program, size_t x, size_t n) {
1186 SkNf u; // fastest to start uninitialized.
1187
1188 auto start = (Fn)load_and_increment(&program);
1189 while (n >= N) {
1190 start(x*N, program, u,u,u,u, u,u,u,u);
1191 x += N;
1192 n -= N;
1193 }
1194 if (n) {
1195 start(x*N+n, program, u,u,u,u, u,u,u,u);
1196 }
1197 }
1198
1199 // Compiled manages its memory manually because it's not safe to use
1200 // std::vector, SkTDArray, etc without setting us up for big ODR violations.
1201 struct Compiled {
CompiledCompiled1202 Compiled(const SkRasterPipeline::Stage* stages, int nstages) {
1203 int slots = nstages + 1; // One extra for just_return.
1204 for (int i = 0; i < nstages; i++) {
1205 if (stages[i].ctx) {
1206 slots++;
1207 }
1208 }
1209 fProgram = (void**)sk_malloc_throw(slots * sizeof(void*));
1210 build_program(fProgram, stages, nstages);
1211 }
~CompiledCompiled1212 ~Compiled() { sk_free(fProgram); }
1213
CompiledCompiled1214 Compiled(const Compiled& o) {
1215 int slots = 0;
1216 while (o.fProgram[slots++] != (void*)just_return);
1217
1218 fProgram = (void**)sk_malloc_throw(slots * sizeof(void*));
1219 memcpy(fProgram, o.fProgram, slots * sizeof(void*));
1220 }
1221
operatorCompiled1222 void operator()(size_t x, size_t n) {
1223 run_program(fProgram, x, n);
1224 }
1225
1226 void** fProgram;
1227 };
1228 }
1229
1230 namespace SK_OPTS_NS {
1231
run_pipeline(size_t x,size_t n,const SkRasterPipeline::Stage * stages,int nstages)1232 SI void run_pipeline(size_t x, size_t n,
1233 const SkRasterPipeline::Stage* stages, int nstages) {
1234 static const int kStackMax = 256;
1235 // Worst case is nstages stages with nstages context pointers, and just_return.
1236 if (2*nstages+1 <= kStackMax) {
1237 void* program[kStackMax];
1238 build_program(program, stages, nstages);
1239 run_program(program, x,n);
1240 } else {
1241 Compiled{stages,nstages}(x,n);
1242 }
1243 }
1244
1245 } // namespace SK_OPTS_NS
1246
1247 #undef SI
1248 #undef STAGE
1249 #undef STAGE_CTX
1250 #undef RGBA_XFERMODE
1251 #undef RGB_XFERMODE
1252
1253 #endif//SkRasterPipeline_opts_DEFINED
1254