1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkRasterPipeline_opts_DEFINED
9 #define SkRasterPipeline_opts_DEFINED
10 
11 #include "include/core/SkData.h"
12 #include "include/core/SkTypes.h"
13 #include "src/core/SkUtils.h"  // unaligned_{load,store}
14 
15 // Every function in this file should be marked static and inline using SI.
16 #if defined(__clang__)
17     #define SI __attribute__((always_inline)) static inline
18 #else
19     #define SI static inline
20 #endif
21 
22 template <typename Dst, typename Src>
widen_cast(const Src & src)23 SI Dst widen_cast(const Src& src) {
24     static_assert(sizeof(Dst) > sizeof(Src));
25     static_assert(std::is_trivially_copyable<Dst>::value);
26     static_assert(std::is_trivially_copyable<Src>::value);
27     Dst dst;
28     memcpy(&dst, &src, sizeof(Src));
29     return dst;
30 }
31 
32 // Our program is an array of void*, either
33 //   - 1 void* per stage with no context pointer, the next stage;
34 //   - 2 void* per stage with a context pointer, first the context pointer, then the next stage.
35 
36 // load_and_inc() steps the program forward by 1 void*, returning that pointer.
load_and_inc(void ** & program)37 SI void* load_and_inc(void**& program) {
38 #if defined(__GNUC__) && defined(__x86_64__)
39     // If program is in %rsi (we try to make this likely) then this is a single instruction.
40     void* rax;
41     asm("lodsq" : "=a"(rax), "+S"(program));  // Write-only %rax, read-write %rsi.
42     return rax;
43 #else
44     // On ARM *program++ compiles into pretty ideal code without any handholding.
45     return *program++;
46 #endif
47 }
48 
49 // Lazily resolved on first cast.  Does nothing if cast to Ctx::None.
50 struct Ctx {
51     struct None {};
52 
53     void*   ptr;
54     void**& program;
55 
CtxCtx56     explicit Ctx(void**& p) : ptr(nullptr), program(p) {}
57 
58     template <typename T>
59     operator T*() {
60         if (!ptr) { ptr = load_and_inc(program); }
61         return (T*)ptr;
62     }
NoneCtx63     operator None() { return None{}; }
64 };
65 
66 
67 #if !defined(__clang__)
68     #define JUMPER_IS_SCALAR
69 #elif defined(SK_ARM_HAS_NEON)
70     #define JUMPER_IS_NEON
71 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SKX
72     #define JUMPER_IS_SKX
73 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
74     #define JUMPER_IS_HSW
75 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
76     #define JUMPER_IS_AVX
77 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
78     #define JUMPER_IS_SSE41
79 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
80     #define JUMPER_IS_SSE2
81 #else
82     #define JUMPER_IS_SCALAR
83 #endif
84 
85 // Older Clangs seem to crash when generating non-optimized NEON code for ARMv7.
86 #if defined(__clang__) && !defined(__OPTIMIZE__) && defined(SK_CPU_ARM32)
87     // Apple Clang 9 and vanilla Clang 5 are fine, and may even be conservative.
88     #if defined(__apple_build_version__) && __clang_major__ < 9
89         #define JUMPER_IS_SCALAR
90     #elif __clang_major__ < 5
91         #define JUMPER_IS_SCALAR
92     #endif
93 
94     #if defined(JUMPER_IS_NEON) && defined(JUMPER_IS_SCALAR)
95         #undef  JUMPER_IS_NEON
96     #endif
97 #endif
98 
99 #if defined(JUMPER_IS_SCALAR)
100     #include <math.h>
101 #elif defined(JUMPER_IS_NEON)
102     #include <arm_neon.h>
103 #else
104     #include <immintrin.h>
105 #endif
106 
107 namespace SK_OPTS_NS {
108 
109 #if defined(JUMPER_IS_SCALAR)
110     // This path should lead to portable scalar code.
111     using F   = float   ;
112     using I32 =  int32_t;
113     using U64 = uint64_t;
114     using U32 = uint32_t;
115     using U16 = uint16_t;
116     using U8  = uint8_t ;
117 
mad(F f,F m,F a)118     SI F   mad(F f, F m, F a)   { return f*m+a; }
min(F a,F b)119     SI F   min(F a, F b)        { return fminf(a,b); }
max(F a,F b)120     SI F   max(F a, F b)        { return fmaxf(a,b); }
abs_(F v)121     SI F   abs_  (F v)          { return fabsf(v); }
floor_(F v)122     SI F   floor_(F v)          { return floorf(v); }
rcp(F v)123     SI F   rcp   (F v)          { return 1.0f / v; }
rsqrt(F v)124     SI F   rsqrt (F v)          { return 1.0f / sqrtf(v); }
sqrt_(F v)125     SI F    sqrt_(F v)          { return sqrtf(v); }
round(F v,F scale)126     SI U32 round (F v, F scale) { return (uint32_t)(v*scale + 0.5f); }
pack(U32 v)127     SI U16 pack(U32 v)          { return (U16)v; }
pack(U16 v)128     SI U8  pack(U16 v)          { return  (U8)v; }
129 
if_then_else(I32 c,F t,F e)130     SI F if_then_else(I32 c, F t, F e) { return c ? t : e; }
131 
132     template <typename T>
gather(const T * p,U32 ix)133     SI T gather(const T* p, U32 ix) { return p[ix]; }
134 
load2(const uint16_t * ptr,size_t tail,U16 * r,U16 * g)135     SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
136         *r = ptr[0];
137         *g = ptr[1];
138     }
store2(uint16_t * ptr,size_t tail,U16 r,U16 g)139     SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
140         ptr[0] = r;
141         ptr[1] = g;
142     }
load3(const uint16_t * ptr,size_t tail,U16 * r,U16 * g,U16 * b)143     SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
144         *r = ptr[0];
145         *g = ptr[1];
146         *b = ptr[2];
147     }
load4(const uint16_t * ptr,size_t tail,U16 * r,U16 * g,U16 * b,U16 * a)148     SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
149         *r = ptr[0];
150         *g = ptr[1];
151         *b = ptr[2];
152         *a = ptr[3];
153     }
store4(uint16_t * ptr,size_t tail,U16 r,U16 g,U16 b,U16 a)154     SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
155         ptr[0] = r;
156         ptr[1] = g;
157         ptr[2] = b;
158         ptr[3] = a;
159     }
160 
load2(const float * ptr,size_t tail,F * r,F * g)161     SI void load2(const float* ptr, size_t tail, F* r, F* g) {
162         *r = ptr[0];
163         *g = ptr[1];
164     }
store2(float * ptr,size_t tail,F r,F g)165     SI void store2(float* ptr, size_t tail, F r, F g) {
166         ptr[0] = r;
167         ptr[1] = g;
168     }
load4(const float * ptr,size_t tail,F * r,F * g,F * b,F * a)169     SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
170         *r = ptr[0];
171         *g = ptr[1];
172         *b = ptr[2];
173         *a = ptr[3];
174     }
store4(float * ptr,size_t tail,F r,F g,F b,F a)175     SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
176         ptr[0] = r;
177         ptr[1] = g;
178         ptr[2] = b;
179         ptr[3] = a;
180     }
181 
182 #elif defined(JUMPER_IS_NEON)
183     // Since we know we're using Clang, we can use its vector extensions.
184     template <typename T> using V = T __attribute__((ext_vector_type(4)));
185     using F   = V<float   >;
186     using I32 = V< int32_t>;
187     using U64 = V<uint64_t>;
188     using U32 = V<uint32_t>;
189     using U16 = V<uint16_t>;
190     using U8  = V<uint8_t >;
191 
192     // We polyfill a few routines that Clang doesn't build into ext_vector_types.
193     SI F   min(F a, F b)                         { return vminq_f32(a,b);          }
194     SI F   max(F a, F b)                         { return vmaxq_f32(a,b);          }
195     SI F   abs_  (F v)                           { return vabsq_f32(v);            }
196     SI F   rcp   (F v) { auto e = vrecpeq_f32 (v); return vrecpsq_f32 (v,e  ) * e; }
197     SI F   rsqrt (F v) { auto e = vrsqrteq_f32(v); return vrsqrtsq_f32(v,e*e) * e; }
198     SI U16 pack(U32 v)                           { return __builtin_convertvector(v, U16); }
199     SI U8  pack(U16 v)                           { return __builtin_convertvector(v,  U8); }
200 
201     SI F if_then_else(I32 c, F t, F e) { return vbslq_f32((U32)c,t,e); }
202 
203     #if defined(SK_CPU_ARM64)
204         SI F     mad(F f, F m, F a) { return vfmaq_f32(a,f,m); }
205         SI F  floor_(F v) { return vrndmq_f32(v); }
206         SI F   sqrt_(F v) { return vsqrtq_f32(v); }
207         SI U32 round(F v, F scale) { return vcvtnq_u32_f32(v*scale); }
208     #else
209         SI F mad(F f, F m, F a) { return vmlaq_f32(a,f,m); }
210         SI F floor_(F v) {
211             F roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
212             return roundtrip - if_then_else(roundtrip > v, 1, 0);
213         }
214 
215         SI F sqrt_(F v) {
216             auto e = vrsqrteq_f32(v);  // Estimate and two refinement steps for e = rsqrt(v).
217             e *= vrsqrtsq_f32(v,e*e);
218             e *= vrsqrtsq_f32(v,e*e);
219             return v*e;                // sqrt(v) == v*rsqrt(v).
220         }
221 
222         SI U32 round(F v, F scale) {
223             return vcvtq_u32_f32(mad(v,scale,0.5f));
224         }
225     #endif
226 
227 
228     template <typename T>
229     SI V<T> gather(const T* p, U32 ix) {
230         return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
231     }
232     SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
233         uint16x4x2_t rg;
234         if (__builtin_expect(tail,0)) {
235             if (  true  ) { rg = vld2_lane_u16(ptr + 0, rg, 0); }
236             if (tail > 1) { rg = vld2_lane_u16(ptr + 2, rg, 1); }
237             if (tail > 2) { rg = vld2_lane_u16(ptr + 4, rg, 2); }
238         } else {
239             rg = vld2_u16(ptr);
240         }
241         *r = rg.val[0];
242         *g = rg.val[1];
243     }
244     SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
245         if (__builtin_expect(tail,0)) {
246             if (  true  ) { vst2_lane_u16(ptr + 0, (uint16x4x2_t{{r,g}}), 0); }
247             if (tail > 1) { vst2_lane_u16(ptr + 2, (uint16x4x2_t{{r,g}}), 1); }
248             if (tail > 2) { vst2_lane_u16(ptr + 4, (uint16x4x2_t{{r,g}}), 2); }
249         } else {
250             vst2_u16(ptr, (uint16x4x2_t{{r,g}}));
251         }
252     }
253     SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
254         uint16x4x3_t rgb;
255         if (__builtin_expect(tail,0)) {
256             if (  true  ) { rgb = vld3_lane_u16(ptr + 0, rgb, 0); }
257             if (tail > 1) { rgb = vld3_lane_u16(ptr + 3, rgb, 1); }
258             if (tail > 2) { rgb = vld3_lane_u16(ptr + 6, rgb, 2); }
259         } else {
260             rgb = vld3_u16(ptr);
261         }
262         *r = rgb.val[0];
263         *g = rgb.val[1];
264         *b = rgb.val[2];
265     }
266     SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
267         uint16x4x4_t rgba;
268         if (__builtin_expect(tail,0)) {
269             if (  true  ) { rgba = vld4_lane_u16(ptr + 0, rgba, 0); }
270             if (tail > 1) { rgba = vld4_lane_u16(ptr + 4, rgba, 1); }
271             if (tail > 2) { rgba = vld4_lane_u16(ptr + 8, rgba, 2); }
272         } else {
273             rgba = vld4_u16(ptr);
274         }
275         *r = rgba.val[0];
276         *g = rgba.val[1];
277         *b = rgba.val[2];
278         *a = rgba.val[3];
279     }
280 
281     SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
282         if (__builtin_expect(tail,0)) {
283             if (  true  ) { vst4_lane_u16(ptr + 0, (uint16x4x4_t{{r,g,b,a}}), 0); }
284             if (tail > 1) { vst4_lane_u16(ptr + 4, (uint16x4x4_t{{r,g,b,a}}), 1); }
285             if (tail > 2) { vst4_lane_u16(ptr + 8, (uint16x4x4_t{{r,g,b,a}}), 2); }
286         } else {
287             vst4_u16(ptr, (uint16x4x4_t{{r,g,b,a}}));
288         }
289     }
290     SI void load2(const float* ptr, size_t tail, F* r, F* g) {
291         float32x4x2_t rg;
292         if (__builtin_expect(tail,0)) {
293             if (  true  ) { rg = vld2q_lane_f32(ptr + 0, rg, 0); }
294             if (tail > 1) { rg = vld2q_lane_f32(ptr + 2, rg, 1); }
295             if (tail > 2) { rg = vld2q_lane_f32(ptr + 4, rg, 2); }
296         } else {
297             rg = vld2q_f32(ptr);
298         }
299         *r = rg.val[0];
300         *g = rg.val[1];
301     }
302     SI void store2(float* ptr, size_t tail, F r, F g) {
303         if (__builtin_expect(tail,0)) {
304             if (  true  ) { vst2q_lane_f32(ptr + 0, (float32x4x2_t{{r,g}}), 0); }
305             if (tail > 1) { vst2q_lane_f32(ptr + 2, (float32x4x2_t{{r,g}}), 1); }
306             if (tail > 2) { vst2q_lane_f32(ptr + 4, (float32x4x2_t{{r,g}}), 2); }
307         } else {
308             vst2q_f32(ptr, (float32x4x2_t{{r,g}}));
309         }
310     }
311     SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
312         float32x4x4_t rgba;
313         if (__builtin_expect(tail,0)) {
314             if (  true  ) { rgba = vld4q_lane_f32(ptr + 0, rgba, 0); }
315             if (tail > 1) { rgba = vld4q_lane_f32(ptr + 4, rgba, 1); }
316             if (tail > 2) { rgba = vld4q_lane_f32(ptr + 8, rgba, 2); }
317         } else {
318             rgba = vld4q_f32(ptr);
319         }
320         *r = rgba.val[0];
321         *g = rgba.val[1];
322         *b = rgba.val[2];
323         *a = rgba.val[3];
324     }
325     SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
326         if (__builtin_expect(tail,0)) {
327             if (  true  ) { vst4q_lane_f32(ptr + 0, (float32x4x4_t{{r,g,b,a}}), 0); }
328             if (tail > 1) { vst4q_lane_f32(ptr + 4, (float32x4x4_t{{r,g,b,a}}), 1); }
329             if (tail > 2) { vst4q_lane_f32(ptr + 8, (float32x4x4_t{{r,g,b,a}}), 2); }
330         } else {
331             vst4q_f32(ptr, (float32x4x4_t{{r,g,b,a}}));
332         }
333     }
334 
335 #elif defined(JUMPER_IS_AVX) || defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
336     // These are __m256 and __m256i, but friendlier and strongly-typed.
337     template <typename T> using V = T __attribute__((ext_vector_type(8)));
338     using F   = V<float   >;
339     using I32 = V< int32_t>;
340     using U64 = V<uint64_t>;
341     using U32 = V<uint32_t>;
342     using U16 = V<uint16_t>;
343     using U8  = V<uint8_t >;
344 
345     SI F mad(F f, F m, F a)  {
346     #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
347         return _mm256_fmadd_ps(f,m,a);
348     #else
349         return f*m+a;
350     #endif
351     }
352 
353     SI F   min(F a, F b)        { return _mm256_min_ps(a,b);    }
354     SI F   max(F a, F b)        { return _mm256_max_ps(a,b);    }
355     SI F   abs_  (F v)          { return _mm256_and_ps(v, 0-v); }
356     SI F   floor_(F v)          { return _mm256_floor_ps(v);    }
357     SI F   rcp   (F v)          { return _mm256_rcp_ps  (v);    }
358     SI F   rsqrt (F v)          { return _mm256_rsqrt_ps(v);    }
359     SI F    sqrt_(F v)          { return _mm256_sqrt_ps (v);    }
360     SI U32 round (F v, F scale) { return _mm256_cvtps_epi32(v*scale); }
361 
362     SI U16 pack(U32 v) {
363         return _mm_packus_epi32(_mm256_extractf128_si256(v, 0),
364                                 _mm256_extractf128_si256(v, 1));
365     }
366     SI U8 pack(U16 v) {
367         auto r = _mm_packus_epi16(v,v);
368         return sk_unaligned_load<U8>(&r);
369     }
370 
371     SI F if_then_else(I32 c, F t, F e) { return _mm256_blendv_ps(e,t,c); }
372 
373     template <typename T>
374     SI V<T> gather(const T* p, U32 ix) {
375         return { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]],
376                  p[ix[4]], p[ix[5]], p[ix[6]], p[ix[7]], };
377     }
378     #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
379         SI F   gather(const float*    p, U32 ix) { return _mm256_i32gather_ps   (p, ix, 4); }
380         SI U32 gather(const uint32_t* p, U32 ix) { return _mm256_i32gather_epi32(p, ix, 4); }
381         SI U64 gather(const uint64_t* p, U32 ix) {
382             __m256i parts[] = {
383                 _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,0), 8),
384                 _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,1), 8),
385             };
386             return sk_bit_cast<U64>(parts);
387         }
388     #endif
389 
390     SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
391         U16 _0123, _4567;
392         if (__builtin_expect(tail,0)) {
393             _0123 = _4567 = _mm_setzero_si128();
394             auto* d = &_0123;
395             if (tail > 3) {
396                 *d = _mm_loadu_si128(((__m128i*)ptr) + 0);
397                 tail -= 4;
398                 ptr += 8;
399                 d = &_4567;
400             }
401             bool high = false;
402             if (tail > 1) {
403                 *d = _mm_loadu_si64(ptr);
404                 tail -= 2;
405                 ptr += 4;
406                 high = true;
407             }
408             if (tail > 0) {
409                 (*d)[high ? 4 : 0] = *(ptr + 0);
410                 (*d)[high ? 5 : 1] = *(ptr + 1);
411             }
412         } else {
413             _0123 = _mm_loadu_si128(((__m128i*)ptr) + 0);
414             _4567 = _mm_loadu_si128(((__m128i*)ptr) + 1);
415         }
416         *r = _mm_packs_epi32(_mm_srai_epi32(_mm_slli_epi32(_0123, 16), 16),
417                              _mm_srai_epi32(_mm_slli_epi32(_4567, 16), 16));
418         *g = _mm_packs_epi32(_mm_srai_epi32(_0123, 16),
419                              _mm_srai_epi32(_4567, 16));
420     }
421     SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
422         auto _0123 = _mm_unpacklo_epi16(r, g),
423              _4567 = _mm_unpackhi_epi16(r, g);
424         if (__builtin_expect(tail,0)) {
425             const auto* s = &_0123;
426             if (tail > 3) {
427                 _mm_storeu_si128((__m128i*)ptr, *s);
428                 s = &_4567;
429                 tail -= 4;
430                 ptr += 8;
431             }
432             bool high = false;
433             if (tail > 1) {
434                 _mm_storel_epi64((__m128i*)ptr, *s);
435                 ptr += 4;
436                 tail -= 2;
437                 high = true;
438             }
439             if (tail > 0) {
440                 if (high) {
441                     *(int32_t*)ptr = _mm_extract_epi32(*s, 2);
442                 } else {
443                     *(int32_t*)ptr = _mm_cvtsi128_si32(*s);
444                 }
445             }
446         } else {
447             _mm_storeu_si128((__m128i*)ptr + 0, _0123);
448             _mm_storeu_si128((__m128i*)ptr + 1, _4567);
449         }
450     }
451 
452     SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
453         __m128i _0,_1,_2,_3,_4,_5,_6,_7;
454         if (__builtin_expect(tail,0)) {
455             auto load_rgb = [](const uint16_t* src) {
456                 auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
457                 return _mm_insert_epi16(v, src[2], 2);
458             };
459             _1 = _2 = _3 = _4 = _5 = _6 = _7 = _mm_setzero_si128();
460             if (  true  ) { _0 = load_rgb(ptr +  0); }
461             if (tail > 1) { _1 = load_rgb(ptr +  3); }
462             if (tail > 2) { _2 = load_rgb(ptr +  6); }
463             if (tail > 3) { _3 = load_rgb(ptr +  9); }
464             if (tail > 4) { _4 = load_rgb(ptr + 12); }
465             if (tail > 5) { _5 = load_rgb(ptr + 15); }
466             if (tail > 6) { _6 = load_rgb(ptr + 18); }
467         } else {
468             // Load 0+1, 2+3, 4+5 normally, and 6+7 backed up 4 bytes so we don't run over.
469             auto _01 =                _mm_loadu_si128((const __m128i*)(ptr +  0))    ;
470             auto _23 =                _mm_loadu_si128((const __m128i*)(ptr +  6))    ;
471             auto _45 =                _mm_loadu_si128((const __m128i*)(ptr + 12))    ;
472             auto _67 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 16)), 4);
473             _0 = _01; _1 = _mm_srli_si128(_01, 6);
474             _2 = _23; _3 = _mm_srli_si128(_23, 6);
475             _4 = _45; _5 = _mm_srli_si128(_45, 6);
476             _6 = _67; _7 = _mm_srli_si128(_67, 6);
477         }
478 
479         auto _02 = _mm_unpacklo_epi16(_0, _2),  // r0 r2 g0 g2 b0 b2 xx xx
480              _13 = _mm_unpacklo_epi16(_1, _3),
481              _46 = _mm_unpacklo_epi16(_4, _6),
482              _57 = _mm_unpacklo_epi16(_5, _7);
483 
484         auto rg0123 = _mm_unpacklo_epi16(_02, _13),  // r0 r1 r2 r3 g0 g1 g2 g3
485              bx0123 = _mm_unpackhi_epi16(_02, _13),  // b0 b1 b2 b3 xx xx xx xx
486              rg4567 = _mm_unpacklo_epi16(_46, _57),
487              bx4567 = _mm_unpackhi_epi16(_46, _57);
488 
489         *r = _mm_unpacklo_epi64(rg0123, rg4567);
490         *g = _mm_unpackhi_epi64(rg0123, rg4567);
491         *b = _mm_unpacklo_epi64(bx0123, bx4567);
492     }
493     SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
494         __m128i _01, _23, _45, _67;
495         if (__builtin_expect(tail,0)) {
496             auto src = (const double*)ptr;
497             _01 = _23 = _45 = _67 = _mm_setzero_si128();
498             if (tail > 0) { _01 = _mm_loadl_pd(_01, src+0); }
499             if (tail > 1) { _01 = _mm_loadh_pd(_01, src+1); }
500             if (tail > 2) { _23 = _mm_loadl_pd(_23, src+2); }
501             if (tail > 3) { _23 = _mm_loadh_pd(_23, src+3); }
502             if (tail > 4) { _45 = _mm_loadl_pd(_45, src+4); }
503             if (tail > 5) { _45 = _mm_loadh_pd(_45, src+5); }
504             if (tail > 6) { _67 = _mm_loadl_pd(_67, src+6); }
505         } else {
506             _01 = _mm_loadu_si128(((__m128i*)ptr) + 0);
507             _23 = _mm_loadu_si128(((__m128i*)ptr) + 1);
508             _45 = _mm_loadu_si128(((__m128i*)ptr) + 2);
509             _67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
510         }
511 
512         auto _02 = _mm_unpacklo_epi16(_01, _23),  // r0 r2 g0 g2 b0 b2 a0 a2
513              _13 = _mm_unpackhi_epi16(_01, _23),  // r1 r3 g1 g3 b1 b3 a1 a3
514              _46 = _mm_unpacklo_epi16(_45, _67),
515              _57 = _mm_unpackhi_epi16(_45, _67);
516 
517         auto rg0123 = _mm_unpacklo_epi16(_02, _13),  // r0 r1 r2 r3 g0 g1 g2 g3
518              ba0123 = _mm_unpackhi_epi16(_02, _13),  // b0 b1 b2 b3 a0 a1 a2 a3
519              rg4567 = _mm_unpacklo_epi16(_46, _57),
520              ba4567 = _mm_unpackhi_epi16(_46, _57);
521 
522         *r = _mm_unpacklo_epi64(rg0123, rg4567);
523         *g = _mm_unpackhi_epi64(rg0123, rg4567);
524         *b = _mm_unpacklo_epi64(ba0123, ba4567);
525         *a = _mm_unpackhi_epi64(ba0123, ba4567);
526     }
527     SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
528         auto rg0123 = _mm_unpacklo_epi16(r, g),  // r0 g0 r1 g1 r2 g2 r3 g3
529              rg4567 = _mm_unpackhi_epi16(r, g),  // r4 g4 r5 g5 r6 g6 r7 g7
530              ba0123 = _mm_unpacklo_epi16(b, a),
531              ba4567 = _mm_unpackhi_epi16(b, a);
532 
533         auto _01 = _mm_unpacklo_epi32(rg0123, ba0123),
534              _23 = _mm_unpackhi_epi32(rg0123, ba0123),
535              _45 = _mm_unpacklo_epi32(rg4567, ba4567),
536              _67 = _mm_unpackhi_epi32(rg4567, ba4567);
537 
538         if (__builtin_expect(tail,0)) {
539             auto dst = (double*)ptr;
540             if (tail > 0) { _mm_storel_pd(dst+0, _01); }
541             if (tail > 1) { _mm_storeh_pd(dst+1, _01); }
542             if (tail > 2) { _mm_storel_pd(dst+2, _23); }
543             if (tail > 3) { _mm_storeh_pd(dst+3, _23); }
544             if (tail > 4) { _mm_storel_pd(dst+4, _45); }
545             if (tail > 5) { _mm_storeh_pd(dst+5, _45); }
546             if (tail > 6) { _mm_storel_pd(dst+6, _67); }
547         } else {
548             _mm_storeu_si128((__m128i*)ptr + 0, _01);
549             _mm_storeu_si128((__m128i*)ptr + 1, _23);
550             _mm_storeu_si128((__m128i*)ptr + 2, _45);
551             _mm_storeu_si128((__m128i*)ptr + 3, _67);
552         }
553     }
554 
555     SI void load2(const float* ptr, size_t tail, F* r, F* g) {
556         F _0123, _4567;
557         if (__builtin_expect(tail, 0)) {
558             _0123 = _4567 = _mm256_setzero_ps();
559             F* d = &_0123;
560             if (tail > 3) {
561                 *d = _mm256_loadu_ps(ptr);
562                 ptr += 8;
563                 tail -= 4;
564                 d = &_4567;
565             }
566             bool high = false;
567             if (tail > 1) {
568                 *d = _mm256_castps128_ps256(_mm_loadu_ps(ptr));
569                 ptr += 4;
570                 tail -= 2;
571                 high = true;
572             }
573             if (tail > 0) {
574                 *d = high ? _mm256_insertf128_ps(*d, _mm_loadu_si64(ptr), 1)
575                           : _mm256_insertf128_ps(*d, _mm_loadu_si64(ptr), 0);
576             }
577         } else {
578             _0123 = _mm256_loadu_ps(ptr + 0);
579             _4567 = _mm256_loadu_ps(ptr + 8);
580         }
581 
582         F _0145 = _mm256_permute2f128_pd(_0123, _4567, 0x20),
583           _2367 = _mm256_permute2f128_pd(_0123, _4567, 0x31);
584 
585         *r = _mm256_shuffle_ps(_0145, _2367, 0x88);
586         *g = _mm256_shuffle_ps(_0145, _2367, 0xDD);
587     }
588     SI void store2(float* ptr, size_t tail, F r, F g) {
589         F _0145 = _mm256_unpacklo_ps(r, g),
590           _2367 = _mm256_unpackhi_ps(r, g);
591         F _0123 = _mm256_permute2f128_pd(_0145, _2367, 0x20),
592           _4567 = _mm256_permute2f128_pd(_0145, _2367, 0x31);
593 
594         if (__builtin_expect(tail, 0)) {
595             const __m256* s = &_0123;
596             if (tail > 3) {
597                 _mm256_storeu_ps(ptr, *s);
598                 s = &_4567;
599                 tail -= 4;
600                 ptr += 8;
601             }
602             bool high = false;
603             if (tail > 1) {
604                 _mm_storeu_ps(ptr, _mm256_extractf128_ps(*s, 0));
605                 ptr += 4;
606                 tail -= 2;
607                 high = true;
608             }
609             if (tail > 0) {
610                 *(ptr + 0) = (*s)[ high ? 4 : 0];
611                 *(ptr + 1) = (*s)[ high ? 5 : 1];
612             }
613         } else {
614             _mm256_storeu_ps(ptr + 0, _0123);
615             _mm256_storeu_ps(ptr + 8, _4567);
616         }
617     }
618 
619     SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
620         F _04, _15, _26, _37;
621         _04 = _15 = _26 = _37 = 0;
622         switch (tail) {
623             case 0: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+28), 1); [[fallthrough]];
624             case 7: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+24), 1); [[fallthrough]];
625             case 6: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+20), 1); [[fallthrough]];
626             case 5: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+16), 1); [[fallthrough]];
627             case 4: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+12), 0); [[fallthrough]];
628             case 3: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+ 8), 0); [[fallthrough]];
629             case 2: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+ 4), 0); [[fallthrough]];
630             case 1: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+ 0), 0);
631         }
632 
633         F rg0145 = _mm256_unpacklo_ps(_04,_15),  // r0 r1 g0 g1 | r4 r5 g4 g5
634           ba0145 = _mm256_unpackhi_ps(_04,_15),
635           rg2367 = _mm256_unpacklo_ps(_26,_37),
636           ba2367 = _mm256_unpackhi_ps(_26,_37);
637 
638         *r = _mm256_unpacklo_pd(rg0145, rg2367);
639         *g = _mm256_unpackhi_pd(rg0145, rg2367);
640         *b = _mm256_unpacklo_pd(ba0145, ba2367);
641         *a = _mm256_unpackhi_pd(ba0145, ba2367);
642     }
643     SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
644         F rg0145 = _mm256_unpacklo_ps(r, g),  // r0 g0 r1 g1 | r4 g4 r5 g5
645           rg2367 = _mm256_unpackhi_ps(r, g),  // r2 ...      | r6 ...
646           ba0145 = _mm256_unpacklo_ps(b, a),  // b0 a0 b1 a1 | b4 a4 b5 a5
647           ba2367 = _mm256_unpackhi_ps(b, a);  // b2 ...      | b6 ...
648 
649         F _04 = _mm256_unpacklo_pd(rg0145, ba0145),  // r0 g0 b0 a0 | r4 g4 b4 a4
650           _15 = _mm256_unpackhi_pd(rg0145, ba0145),  // r1 ...      | r5 ...
651           _26 = _mm256_unpacklo_pd(rg2367, ba2367),  // r2 ...      | r6 ...
652           _37 = _mm256_unpackhi_pd(rg2367, ba2367);  // r3 ...      | r7 ...
653 
654         if (__builtin_expect(tail, 0)) {
655             if (tail > 0) { _mm_storeu_ps(ptr+ 0, _mm256_extractf128_ps(_04, 0)); }
656             if (tail > 1) { _mm_storeu_ps(ptr+ 4, _mm256_extractf128_ps(_15, 0)); }
657             if (tail > 2) { _mm_storeu_ps(ptr+ 8, _mm256_extractf128_ps(_26, 0)); }
658             if (tail > 3) { _mm_storeu_ps(ptr+12, _mm256_extractf128_ps(_37, 0)); }
659             if (tail > 4) { _mm_storeu_ps(ptr+16, _mm256_extractf128_ps(_04, 1)); }
660             if (tail > 5) { _mm_storeu_ps(ptr+20, _mm256_extractf128_ps(_15, 1)); }
661             if (tail > 6) { _mm_storeu_ps(ptr+24, _mm256_extractf128_ps(_26, 1)); }
662         } else {
663             F _01 = _mm256_permute2f128_ps(_04, _15, 32),  // 32 == 0010 0000 == lo, lo
664               _23 = _mm256_permute2f128_ps(_26, _37, 32),
665               _45 = _mm256_permute2f128_ps(_04, _15, 49),  // 49 == 0011 0001 == hi, hi
666               _67 = _mm256_permute2f128_ps(_26, _37, 49);
667             _mm256_storeu_ps(ptr+ 0, _01);
668             _mm256_storeu_ps(ptr+ 8, _23);
669             _mm256_storeu_ps(ptr+16, _45);
670             _mm256_storeu_ps(ptr+24, _67);
671         }
672     }
673 
674 #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41)
675     template <typename T> using V = T __attribute__((ext_vector_type(4)));
676     using F   = V<float   >;
677     using I32 = V< int32_t>;
678     using U64 = V<uint64_t>;
679     using U32 = V<uint32_t>;
680     using U16 = V<uint16_t>;
681     using U8  = V<uint8_t >;
682 
683     SI F   mad(F f, F m, F a)  { return f*m+a;              }
684     SI F   min(F a, F b)       { return _mm_min_ps(a,b);    }
685     SI F   max(F a, F b)       { return _mm_max_ps(a,b);    }
686     SI F   abs_(F v)           { return _mm_and_ps(v, 0-v); }
687     SI F   rcp   (F v)         { return _mm_rcp_ps  (v);    }
688     SI F   rsqrt (F v)         { return _mm_rsqrt_ps(v);    }
689     SI F    sqrt_(F v)         { return _mm_sqrt_ps (v);    }
690     SI U32 round(F v, F scale) { return _mm_cvtps_epi32(v*scale); }
691 
692     SI U16 pack(U32 v) {
693     #if defined(JUMPER_IS_SSE41)
694         auto p = _mm_packus_epi32(v,v);
695     #else
696         // Sign extend so that _mm_packs_epi32() does the pack we want.
697         auto p = _mm_srai_epi32(_mm_slli_epi32(v, 16), 16);
698         p = _mm_packs_epi32(p,p);
699     #endif
700         return sk_unaligned_load<U16>(&p);  // We have two copies.  Return (the lower) one.
701     }
702     SI U8 pack(U16 v) {
703         auto r = widen_cast<__m128i>(v);
704         r = _mm_packus_epi16(r,r);
705         return sk_unaligned_load<U8>(&r);
706     }
707 
708     SI F if_then_else(I32 c, F t, F e) {
709         return _mm_or_ps(_mm_and_ps(c, t), _mm_andnot_ps(c, e));
710     }
711 
712     SI F floor_(F v) {
713     #if defined(JUMPER_IS_SSE41)
714         return _mm_floor_ps(v);
715     #else
716         F roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
717         return roundtrip - if_then_else(roundtrip > v, 1, 0);
718     #endif
719     }
720 
721     template <typename T>
722     SI V<T> gather(const T* p, U32 ix) {
723         return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
724     }
725 
726     SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
727         __m128i _01;
728         if (__builtin_expect(tail,0)) {
729             _01 = _mm_setzero_si128();
730             if (tail > 1) {
731                 _01 = _mm_loadl_pd(_01, (const double*)ptr);            // r0 g0 r1 g1 00 00 00 00
732                 if (tail > 2) {
733                   _01 = _mm_insert_epi16(_01, *(ptr+4), 4);             // r0 g0 r1 g1 r2 00 00 00
734                   _01 = _mm_insert_epi16(_01, *(ptr+5), 5);             // r0 g0 r1 g1 r2 g2 00 00
735                 }
736             } else {
737                 _01 = _mm_cvtsi32_si128(*(const uint32_t*)ptr);         // r0 g0 00 00 00 00 00 00
738             }
739         } else {
740             _01 = _mm_loadu_si128(((__m128i*)ptr) + 0);  // r0 g0 r1 g1 r2 g2 r3 g3
741         }
742         auto rg01_23 = _mm_shufflelo_epi16(_01, 0xD8);      // r0 r1 g0 g1 r2 g2 r3 g3
743         auto rg      = _mm_shufflehi_epi16(rg01_23, 0xD8);  // r0 r1 g0 g1 r2 r3 g2 g3
744 
745         auto R = _mm_shuffle_epi32(rg, 0x88);  // r0 r1 r2 r3 r0 r1 r2 r3
746         auto G = _mm_shuffle_epi32(rg, 0xDD);  // g0 g1 g2 g3 g0 g1 g2 g3
747         *r = sk_unaligned_load<U16>(&R);
748         *g = sk_unaligned_load<U16>(&G);
749     }
750     SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
751         U32 rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g));
752         if (__builtin_expect(tail, 0)) {
753             if (tail > 1) {
754                 _mm_storel_epi64((__m128i*)ptr, rg);
755                 if (tail > 2) {
756                     int32_t rgpair = rg[2];
757                     memcpy(ptr + 4, &rgpair, sizeof(rgpair));
758                 }
759             } else {
760                 int32_t rgpair = rg[0];
761                 memcpy(ptr, &rgpair, sizeof(rgpair));
762             }
763         } else {
764             _mm_storeu_si128((__m128i*)ptr + 0, rg);
765         }
766     }
767 
768     SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
769         __m128i _0, _1, _2, _3;
770         if (__builtin_expect(tail,0)) {
771             _1 = _2 = _3 = _mm_setzero_si128();
772             auto load_rgb = [](const uint16_t* src) {
773                 auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
774                 return _mm_insert_epi16(v, src[2], 2);
775             };
776             if (  true  ) { _0 = load_rgb(ptr + 0); }
777             if (tail > 1) { _1 = load_rgb(ptr + 3); }
778             if (tail > 2) { _2 = load_rgb(ptr + 6); }
779         } else {
780             // Load slightly weirdly to make sure we don't load past the end of 4x48 bits.
781             auto _01 =                _mm_loadu_si128((const __m128i*)(ptr + 0))    ,
782                  _23 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 4)), 4);
783 
784             // Each _N holds R,G,B for pixel N in its lower 3 lanes (upper 5 are ignored).
785             _0 = _01;
786             _1 = _mm_srli_si128(_01, 6);
787             _2 = _23;
788             _3 = _mm_srli_si128(_23, 6);
789         }
790 
791         // De-interlace to R,G,B.
792         auto _02 = _mm_unpacklo_epi16(_0, _2),  // r0 r2 g0 g2 b0 b2 xx xx
793              _13 = _mm_unpacklo_epi16(_1, _3);  // r1 r3 g1 g3 b1 b3 xx xx
794 
795         auto R = _mm_unpacklo_epi16(_02, _13),  // r0 r1 r2 r3 g0 g1 g2 g3
796              G = _mm_srli_si128(R, 8),
797              B = _mm_unpackhi_epi16(_02, _13);  // b0 b1 b2 b3 xx xx xx xx
798 
799         *r = sk_unaligned_load<U16>(&R);
800         *g = sk_unaligned_load<U16>(&G);
801         *b = sk_unaligned_load<U16>(&B);
802     }
803 
804     SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
805         __m128i _01, _23;
806         if (__builtin_expect(tail,0)) {
807             _01 = _23 = _mm_setzero_si128();
808             auto src = (const double*)ptr;
809             if (  true  ) { _01 = _mm_loadl_pd(_01, src + 0); } // r0 g0 b0 a0 00 00 00 00
810             if (tail > 1) { _01 = _mm_loadh_pd(_01, src + 1); } // r0 g0 b0 a0 r1 g1 b1 a1
811             if (tail > 2) { _23 = _mm_loadl_pd(_23, src + 2); } // r2 g2 b2 a2 00 00 00 00
812         } else {
813             _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 b0 a0 r1 g1 b1 a1
814             _23 = _mm_loadu_si128(((__m128i*)ptr) + 1); // r2 g2 b2 a2 r3 g3 b3 a3
815         }
816 
817         auto _02 = _mm_unpacklo_epi16(_01, _23),  // r0 r2 g0 g2 b0 b2 a0 a2
818              _13 = _mm_unpackhi_epi16(_01, _23);  // r1 r3 g1 g3 b1 b3 a1 a3
819 
820         auto rg = _mm_unpacklo_epi16(_02, _13),  // r0 r1 r2 r3 g0 g1 g2 g3
821              ba = _mm_unpackhi_epi16(_02, _13);  // b0 b1 b2 b3 a0 a1 a2 a3
822 
823         *r = sk_unaligned_load<U16>((uint16_t*)&rg + 0);
824         *g = sk_unaligned_load<U16>((uint16_t*)&rg + 4);
825         *b = sk_unaligned_load<U16>((uint16_t*)&ba + 0);
826         *a = sk_unaligned_load<U16>((uint16_t*)&ba + 4);
827     }
828 
829     SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
830         auto rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)),
831              ba = _mm_unpacklo_epi16(widen_cast<__m128i>(b), widen_cast<__m128i>(a));
832 
833         if (__builtin_expect(tail, 0)) {
834             auto dst = (double*)ptr;
835             if (  true  ) { _mm_storel_pd(dst + 0, _mm_unpacklo_epi32(rg, ba)); }
836             if (tail > 1) { _mm_storeh_pd(dst + 1, _mm_unpacklo_epi32(rg, ba)); }
837             if (tail > 2) { _mm_storel_pd(dst + 2, _mm_unpackhi_epi32(rg, ba)); }
838         } else {
839             _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg, ba));
840             _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg, ba));
841         }
842     }
843 
844     SI void load2(const float* ptr, size_t tail, F* r, F* g) {
845         F _01, _23;
846         if (__builtin_expect(tail, 0)) {
847             _01 = _23 = _mm_setzero_si128();
848             if (  true  ) { _01 = _mm_loadl_pi(_01, (__m64 const*)(ptr + 0)); }
849             if (tail > 1) { _01 = _mm_loadh_pi(_01, (__m64 const*)(ptr + 2)); }
850             if (tail > 2) { _23 = _mm_loadl_pi(_23, (__m64 const*)(ptr + 4)); }
851         } else {
852             _01 = _mm_loadu_ps(ptr + 0);
853             _23 = _mm_loadu_ps(ptr + 4);
854         }
855         *r = _mm_shuffle_ps(_01, _23, 0x88);
856         *g = _mm_shuffle_ps(_01, _23, 0xDD);
857     }
858     SI void store2(float* ptr, size_t tail, F r, F g) {
859         F _01 = _mm_unpacklo_ps(r, g),
860           _23 = _mm_unpackhi_ps(r, g);
861         if (__builtin_expect(tail, 0)) {
862             if (  true  ) { _mm_storel_pi((__m64*)(ptr + 0), _01); }
863             if (tail > 1) { _mm_storeh_pi((__m64*)(ptr + 2), _01); }
864             if (tail > 2) { _mm_storel_pi((__m64*)(ptr + 4), _23); }
865         } else {
866             _mm_storeu_ps(ptr + 0, _01);
867             _mm_storeu_ps(ptr + 4, _23);
868         }
869     }
870 
871     SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
872         F _0, _1, _2, _3;
873         if (__builtin_expect(tail, 0)) {
874             _1 = _2 = _3 = _mm_setzero_si128();
875             if (  true  ) { _0 = _mm_loadu_ps(ptr + 0); }
876             if (tail > 1) { _1 = _mm_loadu_ps(ptr + 4); }
877             if (tail > 2) { _2 = _mm_loadu_ps(ptr + 8); }
878         } else {
879             _0 = _mm_loadu_ps(ptr + 0);
880             _1 = _mm_loadu_ps(ptr + 4);
881             _2 = _mm_loadu_ps(ptr + 8);
882             _3 = _mm_loadu_ps(ptr +12);
883         }
884         _MM_TRANSPOSE4_PS(_0,_1,_2,_3);
885         *r = _0;
886         *g = _1;
887         *b = _2;
888         *a = _3;
889     }
890 
891     SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
892         _MM_TRANSPOSE4_PS(r,g,b,a);
893         if (__builtin_expect(tail, 0)) {
894             if (  true  ) { _mm_storeu_ps(ptr + 0, r); }
895             if (tail > 1) { _mm_storeu_ps(ptr + 4, g); }
896             if (tail > 2) { _mm_storeu_ps(ptr + 8, b); }
897         } else {
898             _mm_storeu_ps(ptr + 0, r);
899             _mm_storeu_ps(ptr + 4, g);
900             _mm_storeu_ps(ptr + 8, b);
901             _mm_storeu_ps(ptr +12, a);
902         }
903     }
904 #endif
905 
906 // We need to be a careful with casts.
907 // (F)x means cast x to float in the portable path, but bit_cast x to float in the others.
908 // These named casts and bit_cast() are always what they seem to be.
909 #if defined(JUMPER_IS_SCALAR)
cast(U32 v)910     SI F   cast  (U32 v) { return   (F)v; }
cast64(U64 v)911     SI F   cast64(U64 v) { return   (F)v; }
trunc_(F v)912     SI U32 trunc_(F   v) { return (U32)v; }
expand(U16 v)913     SI U32 expand(U16 v) { return (U32)v; }
expand(U8 v)914     SI U32 expand(U8  v) { return (U32)v; }
915 #else
cast(U32 v)916     SI F   cast  (U32 v) { return      __builtin_convertvector((I32)v,   F); }
cast64(U64 v)917     SI F   cast64(U64 v) { return      __builtin_convertvector(     v,   F); }
trunc_(F v)918     SI U32 trunc_(F   v) { return (U32)__builtin_convertvector(     v, I32); }
expand(U16 v)919     SI U32 expand(U16 v) { return      __builtin_convertvector(     v, U32); }
expand(U8 v)920     SI U32 expand(U8  v) { return      __builtin_convertvector(     v, U32); }
921 #endif
922 
923 template <typename V>
if_then_else(I32 c,V t,V e)924 SI V if_then_else(I32 c, V t, V e) {
925     return sk_bit_cast<V>(if_then_else(c, sk_bit_cast<F>(t), sk_bit_cast<F>(e)));
926 }
927 
bswap(U16 x)928 SI U16 bswap(U16 x) {
929 #if defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41)
930     // Somewhat inexplicably Clang decides to do (x<<8) | (x>>8) in 32-bit lanes
931     // when generating code for SSE2 and SSE4.1.  We'll do it manually...
932     auto v = widen_cast<__m128i>(x);
933     v = _mm_slli_epi16(v,8) | _mm_srli_epi16(v,8);
934     return sk_unaligned_load<U16>(&v);
935 #else
936     return (x<<8) | (x>>8);
937 #endif
938 }
939 
fract(F v)940 SI F fract(F v) { return v - floor_(v); }
941 
942 // See http://www.machinedlearnings.com/2011/06/fast-approximate-logarithm-exponential.html.
approx_log2(F x)943 SI F approx_log2(F x) {
944     // e - 127 is a fair approximation of log2(x) in its own right...
945     F e = cast(sk_bit_cast<U32>(x)) * (1.0f / (1<<23));
946 
947     // ... but using the mantissa to refine its error is _much_ better.
948     F m = sk_bit_cast<F>((sk_bit_cast<U32>(x) & 0x007fffff) | 0x3f000000);
949     return e
950          - 124.225514990f
951          -   1.498030302f * m
952          -   1.725879990f / (0.3520887068f + m);
953 }
954 
approx_log(F x)955 SI F approx_log(F x) {
956     const float ln2 = 0.69314718f;
957     return ln2 * approx_log2(x);
958 }
959 
approx_pow2(F x)960 SI F approx_pow2(F x) {
961     F f = fract(x);
962     return sk_bit_cast<F>(round(1.0f * (1<<23),
963                                 x + 121.274057500f
964                                   -   1.490129070f * f
965                                   +  27.728023300f / (4.84252568f - f)));
966 }
967 
approx_exp(F x)968 SI F approx_exp(F x) {
969     const float log2_e = 1.4426950408889634074f;
970     return approx_pow2(log2_e * x);
971 }
972 
approx_powf(F x,F y)973 SI F approx_powf(F x, F y) {
974     return if_then_else((x == 0)|(x == 1), x
975                                          , approx_pow2(approx_log2(x) * y));
976 }
977 
from_half(U16 h)978 SI F from_half(U16 h) {
979 #if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
980     && !defined(SK_BUILD_FOR_GOOGLE3)  // Temporary workaround for some Google3 builds.
981     return vcvt_f32_f16(h);
982 
983 #elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
984     return _mm256_cvtph_ps(h);
985 
986 #else
987     // Remember, a half is 1-5-10 (sign-exponent-mantissa) with 15 exponent bias.
988     U32 sem = expand(h),
989         s   = sem & 0x8000,
990          em = sem ^ s;
991 
992     // Convert to 1-8-23 float with 127 bias, flushing denorm halfs (including zero) to zero.
993     auto denorm = (I32)em < 0x0400;      // I32 comparison is often quicker, and always safe here.
994     return if_then_else(denorm, F(0)
995                               , sk_bit_cast<F>( (s<<16) + (em<<13) + ((127-15)<<23) ));
996 #endif
997 }
998 
to_half(F f)999 SI U16 to_half(F f) {
1000 #if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
1001     && !defined(SK_BUILD_FOR_GOOGLE3)  // Temporary workaround for some Google3 builds.
1002     return vcvt_f16_f32(f);
1003 
1004 #elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
1005     return _mm256_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
1006 
1007 #else
1008     // Remember, a float is 1-8-23 (sign-exponent-mantissa) with 127 exponent bias.
1009     U32 sem = sk_bit_cast<U32>(f),
1010         s   = sem & 0x80000000,
1011          em = sem ^ s;
1012 
1013     // Convert to 1-5-10 half with 15 bias, flushing denorm halfs (including zero) to zero.
1014     auto denorm = (I32)em < 0x38800000;  // I32 comparison is often quicker, and always safe here.
1015     return pack(if_then_else(denorm, U32(0)
1016                                    , (s>>16) + (em>>13) - ((127-15)<<10)));
1017 #endif
1018 }
1019 
1020 // Our fundamental vector depth is our pixel stride.
1021 static const size_t N = sizeof(F) / sizeof(float);
1022 
1023 // We're finally going to get to what a Stage function looks like!
1024 //    tail == 0 ~~> work on a full N pixels
1025 //    tail != 0 ~~> work on only the first tail pixels
1026 // tail is always < N.
1027 
1028 // Any custom ABI to use for all (non-externally-facing) stage functions?
1029 // Also decide here whether to use narrow (compromise) or wide (ideal) stages.
1030 #if defined(SK_CPU_ARM32) && defined(JUMPER_IS_NEON)
1031     // This lets us pass vectors more efficiently on 32-bit ARM.
1032     // We can still only pass 16 floats, so best as 4x {r,g,b,a}.
1033     #define ABI __attribute__((pcs("aapcs-vfp")))
1034     #define JUMPER_NARROW_STAGES 1
1035 #elif defined(_MSC_VER)
1036     // Even if not vectorized, this lets us pass {r,g,b,a} as registers,
1037     // instead of {b,a} on the stack.  Narrow stages work best for __vectorcall.
1038     #define ABI __vectorcall
1039     #define JUMPER_NARROW_STAGES 1
1040 #elif defined(__x86_64__) || defined(SK_CPU_ARM64)
1041     // These platforms are ideal for wider stages, and their default ABI is ideal.
1042     #define ABI
1043     #define JUMPER_NARROW_STAGES 0
1044 #else
1045     // 32-bit or unknown... shunt them down the narrow path.
1046     // Odds are these have few registers and are better off there.
1047     #define ABI
1048     #define JUMPER_NARROW_STAGES 1
1049 #endif
1050 
1051 #if JUMPER_NARROW_STAGES
1052     struct Params {
1053         size_t dx, dy, tail;
1054         F dr,dg,db,da;
1055     };
1056     using Stage = void(ABI*)(Params*, void** program, F r, F g, F b, F a);
1057 #else
1058     // We keep program the second argument, so that it's passed in rsi for load_and_inc().
1059     using Stage = void(ABI*)(size_t tail, void** program, size_t dx, size_t dy, F,F,F,F, F,F,F,F);
1060 #endif
1061 
1062 
start_pipeline(size_t dx,size_t dy,size_t xlimit,size_t ylimit,void ** program)1063 static void start_pipeline(size_t dx, size_t dy, size_t xlimit, size_t ylimit, void** program) {
1064     auto start = (Stage)load_and_inc(program);
1065     const size_t x0 = dx;
1066     for (; dy < ylimit; dy++) {
1067     #if JUMPER_NARROW_STAGES
1068         Params params = { x0,dy,0, 0,0,0,0 };
1069         while (params.dx + N <= xlimit) {
1070             start(&params,program, 0,0,0,0);
1071             params.dx += N;
1072         }
1073         if (size_t tail = xlimit - params.dx) {
1074             params.tail = tail;
1075             start(&params,program, 0,0,0,0);
1076         }
1077     #else
1078         dx = x0;
1079         while (dx + N <= xlimit) {
1080             start(0,program,dx,dy,    0,0,0,0, 0,0,0,0);
1081             dx += N;
1082         }
1083         if (size_t tail = xlimit - dx) {
1084             start(tail,program,dx,dy, 0,0,0,0, 0,0,0,0);
1085         }
1086     #endif
1087     }
1088 }
1089 
1090 #if JUMPER_NARROW_STAGES
1091     #define STAGE(name, ...)                                                    \
1092         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail,        \
1093                          F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da);   \
1094         static void ABI name(Params* params, void** program,                    \
1095                              F r, F g, F b, F a) {                              \
1096             name##_k(Ctx{program},params->dx,params->dy,params->tail, r,g,b,a,  \
1097                      params->dr, params->dg, params->db, params->da);           \
1098             auto next = (Stage)load_and_inc(program);                           \
1099             next(params,program, r,g,b,a);                                      \
1100         }                                                                       \
1101         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail,        \
1102                          F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
1103 #else
1104     #define STAGE(name, ...)                                                         \
1105         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail,             \
1106                          F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da);        \
1107         static void ABI name(size_t tail, void** program, size_t dx, size_t dy,      \
1108                              F r, F g, F b, F a, F dr, F dg, F db, F da) {           \
1109             name##_k(Ctx{program},dx,dy,tail, r,g,b,a, dr,dg,db,da);                 \
1110             auto next = (Stage)load_and_inc(program);                                \
1111             next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da);                          \
1112         }                                                                            \
1113         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail,             \
1114                          F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
1115 #endif
1116 
1117 
1118 // just_return() is a simple no-op stage that only exists to end the chain,
1119 // returning back up to start_pipeline(), and from there to the caller.
1120 #if JUMPER_NARROW_STAGES
just_return(Params *,void **,F,F,F,F)1121     static void ABI just_return(Params*, void**, F,F,F,F) {}
1122 #else
just_return(size_t,void **,size_t,size_t,F,F,F,F,F,F,F,F)1123     static void ABI just_return(size_t, void**, size_t,size_t, F,F,F,F, F,F,F,F) {}
1124 #endif
1125 
1126 
1127 // We could start defining normal Stages now.  But first, some helper functions.
1128 
1129 // These load() and store() methods are tail-aware,
1130 // but focus mainly on keeping the at-stride tail==0 case fast.
1131 
1132 template <typename V, typename T>
load(const T * src,size_t tail)1133 SI V load(const T* src, size_t tail) {
1134 #if !defined(JUMPER_IS_SCALAR)
1135     __builtin_assume(tail < N);
1136     if (__builtin_expect(tail, 0)) {
1137         V v{};  // Any inactive lanes are zeroed.
1138         switch (tail) {
1139             case 7: v[6] = src[6]; [[fallthrough]];
1140             case 6: v[5] = src[5]; [[fallthrough]];
1141             case 5: v[4] = src[4]; [[fallthrough]];
1142             case 4: memcpy(&v, src, 4*sizeof(T)); break;
1143             case 3: v[2] = src[2]; [[fallthrough]];
1144             case 2: memcpy(&v, src, 2*sizeof(T)); break;
1145             case 1: memcpy(&v, src, 1*sizeof(T)); break;
1146         }
1147         return v;
1148     }
1149 #endif
1150     return sk_unaligned_load<V>(src);
1151 }
1152 
1153 template <typename V, typename T>
store(T * dst,V v,size_t tail)1154 SI void store(T* dst, V v, size_t tail) {
1155 #if !defined(JUMPER_IS_SCALAR)
1156     __builtin_assume(tail < N);
1157     if (__builtin_expect(tail, 0)) {
1158         switch (tail) {
1159             case 7: dst[6] = v[6]; [[fallthrough]];
1160             case 6: dst[5] = v[5]; [[fallthrough]];
1161             case 5: dst[4] = v[4]; [[fallthrough]];
1162             case 4: memcpy(dst, &v, 4*sizeof(T)); break;
1163             case 3: dst[2] = v[2]; [[fallthrough]];
1164             case 2: memcpy(dst, &v, 2*sizeof(T)); break;
1165             case 1: memcpy(dst, &v, 1*sizeof(T)); break;
1166         }
1167         return;
1168     }
1169 #endif
1170     sk_unaligned_store(dst, v);
1171 }
1172 
from_byte(U8 b)1173 SI F from_byte(U8 b) {
1174     return cast(expand(b)) * (1/255.0f);
1175 }
from_short(U16 s)1176 SI F from_short(U16 s) {
1177     return cast(expand(s)) * (1/65535.0f);
1178 }
from_565(U16 _565,F * r,F * g,F * b)1179 SI void from_565(U16 _565, F* r, F* g, F* b) {
1180     U32 wide = expand(_565);
1181     *r = cast(wide & (31<<11)) * (1.0f / (31<<11));
1182     *g = cast(wide & (63<< 5)) * (1.0f / (63<< 5));
1183     *b = cast(wide & (31<< 0)) * (1.0f / (31<< 0));
1184 }
from_4444(U16 _4444,F * r,F * g,F * b,F * a)1185 SI void from_4444(U16 _4444, F* r, F* g, F* b, F* a) {
1186     U32 wide = expand(_4444);
1187     *r = cast(wide & (15<<12)) * (1.0f / (15<<12));
1188     *g = cast(wide & (15<< 8)) * (1.0f / (15<< 8));
1189     *b = cast(wide & (15<< 4)) * (1.0f / (15<< 4));
1190     *a = cast(wide & (15<< 0)) * (1.0f / (15<< 0));
1191 }
from_8888(U32 _8888,F * r,F * g,F * b,F * a)1192 SI void from_8888(U32 _8888, F* r, F* g, F* b, F* a) {
1193     *r = cast((_8888      ) & 0xff) * (1/255.0f);
1194     *g = cast((_8888 >>  8) & 0xff) * (1/255.0f);
1195     *b = cast((_8888 >> 16) & 0xff) * (1/255.0f);
1196     *a = cast((_8888 >> 24)       ) * (1/255.0f);
1197 }
from_88(U16 _88,F * r,F * g)1198 SI void from_88(U16 _88, F* r, F* g) {
1199     U32 wide = expand(_88);
1200     *r = cast((wide      ) & 0xff) * (1/255.0f);
1201     *g = cast((wide >>  8) & 0xff) * (1/255.0f);
1202 }
from_1010102(U32 rgba,F * r,F * g,F * b,F * a)1203 SI void from_1010102(U32 rgba, F* r, F* g, F* b, F* a) {
1204     *r = cast((rgba      ) & 0x3ff) * (1/1023.0f);
1205     *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f);
1206     *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f);
1207     *a = cast((rgba >> 30)        ) * (1/   3.0f);
1208 }
from_1616(U32 _1616,F * r,F * g)1209 SI void from_1616(U32 _1616, F* r, F* g) {
1210     *r = cast((_1616      ) & 0xffff) * (1/65535.0f);
1211     *g = cast((_1616 >> 16) & 0xffff) * (1/65535.0f);
1212 }
from_16161616(U64 _16161616,F * r,F * g,F * b,F * a)1213 SI void from_16161616(U64 _16161616, F* r, F* g, F* b, F* a) {
1214     *r = cast64((_16161616      ) & 0xffff) * (1/65535.0f);
1215     *g = cast64((_16161616 >> 16) & 0xffff) * (1/65535.0f);
1216     *b = cast64((_16161616 >> 32) & 0xffff) * (1/65535.0f);
1217     *a = cast64((_16161616 >> 48) & 0xffff) * (1/65535.0f);
1218 }
1219 
1220 // Used by load_ and store_ stages to get to the right (dx,dy) starting point of contiguous memory.
1221 template <typename T>
ptr_at_xy(const SkRasterPipeline_MemoryCtx * ctx,size_t dx,size_t dy)1222 SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
1223     return (T*)ctx->pixels + dy*ctx->stride + dx;
1224 }
1225 
1226 // clamp v to [0,limit).
clamp(F v,F limit)1227 SI F clamp(F v, F limit) {
1228     F inclusive = sk_bit_cast<F>( sk_bit_cast<U32>(limit) - 1 );  // Exclusive -> inclusive.
1229     return min(max(0, v), inclusive);
1230 }
1231 
1232 // Used by gather_ stages to calculate the base pointer and a vector of indices to load.
1233 template <typename T>
ix_and_ptr(T ** ptr,const SkRasterPipeline_GatherCtx * ctx,F x,F y)1234 SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
1235     x = clamp(x, ctx->width);
1236     y = clamp(y, ctx->height);
1237 
1238     *ptr = (const T*)ctx->pixels;
1239     return trunc_(y)*ctx->stride + trunc_(x);
1240 }
1241 
1242 // We often have a nominally [0,1] float value we need to scale and convert to an integer,
1243 // whether for a table lookup or to pack back down into bytes for storage.
1244 //
1245 // In practice, especially when dealing with interesting color spaces, that notionally
1246 // [0,1] float may be out of [0,1] range.  Unorms cannot represent that, so we must clamp.
1247 //
1248 // You can adjust the expected input to [0,bias] by tweaking that parameter.
1249 SI U32 to_unorm(F v, F scale, F bias = 1.0f) {
1250     // Any time we use round() we probably want to use to_unorm().
1251     return round(min(max(0, v), bias), scale);
1252 }
1253 
cond_to_mask(I32 cond)1254 SI I32 cond_to_mask(I32 cond) { return if_then_else(cond, I32(~0), I32(0)); }
1255 
1256 // Now finally, normal Stages!
1257 
STAGE(seed_shader,Ctx::None)1258 STAGE(seed_shader, Ctx::None) {
1259     static const float iota[] = {
1260         0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
1261         8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
1262     };
1263     // It's important for speed to explicitly cast(dx) and cast(dy),
1264     // which has the effect of splatting them to vectors before converting to floats.
1265     // On Intel this breaks a data dependency on previous loop iterations' registers.
1266     r = cast(dx) + sk_unaligned_load<F>(iota);
1267     g = cast(dy) + 0.5f;
1268     b = 1.0f;
1269     a = 0;
1270     dr = dg = db = da = 0;
1271 }
1272 
STAGE(dither,const float * rate)1273 STAGE(dither, const float* rate) {
1274     // Get [(dx,dy), (dx+1,dy), (dx+2,dy), ...] loaded up in integer vectors.
1275     uint32_t iota[] = {0,1,2,3,4,5,6,7};
1276     U32 X = dx + sk_unaligned_load<U32>(iota),
1277         Y = dy;
1278 
1279     // We're doing 8x8 ordered dithering, see https://en.wikipedia.org/wiki/Ordered_dithering.
1280     // In this case n=8 and we're using the matrix that looks like 1/64 x [ 0 48 12 60 ... ].
1281 
1282     // We only need X and X^Y from here on, so it's easier to just think of that as "Y".
1283     Y ^= X;
1284 
1285     // We'll mix the bottom 3 bits of each of X and Y to make 6 bits,
1286     // for 2^6 == 64 == 8x8 matrix values.  If X=abc and Y=def, we make fcebda.
1287     U32 M = (Y & 1) << 5 | (X & 1) << 4
1288           | (Y & 2) << 2 | (X & 2) << 1
1289           | (Y & 4) >> 1 | (X & 4) >> 2;
1290 
1291     // Scale that dither to [0,1), then (-0.5,+0.5), here using 63/128 = 0.4921875 as 0.5-epsilon.
1292     // We want to make sure our dither is less than 0.5 in either direction to keep exact values
1293     // like 0 and 1 unchanged after rounding.
1294     F dither = cast(M) * (2/128.0f) - (63/128.0f);
1295 
1296     r += *rate*dither;
1297     g += *rate*dither;
1298     b += *rate*dither;
1299 
1300     r = max(0, min(r, a));
1301     g = max(0, min(g, a));
1302     b = max(0, min(b, a));
1303 }
1304 
1305 // load 4 floats from memory, and splat them into r,g,b,a
STAGE(uniform_color,const SkRasterPipeline_UniformColorCtx * c)1306 STAGE(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
1307     r = c->r;
1308     g = c->g;
1309     b = c->b;
1310     a = c->a;
1311 }
STAGE(unbounded_uniform_color,const SkRasterPipeline_UniformColorCtx * c)1312 STAGE(unbounded_uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
1313     r = c->r;
1314     g = c->g;
1315     b = c->b;
1316     a = c->a;
1317 }
1318 // load 4 floats from memory, and splat them into dr,dg,db,da
STAGE(uniform_color_dst,const SkRasterPipeline_UniformColorCtx * c)1319 STAGE(uniform_color_dst, const SkRasterPipeline_UniformColorCtx* c) {
1320     dr = c->r;
1321     dg = c->g;
1322     db = c->b;
1323     da = c->a;
1324 }
1325 
1326 // splats opaque-black into r,g,b,a
STAGE(black_color,Ctx::None)1327 STAGE(black_color, Ctx::None) {
1328     r = g = b = 0.0f;
1329     a = 1.0f;
1330 }
1331 
STAGE(white_color,Ctx::None)1332 STAGE(white_color, Ctx::None) {
1333     r = g = b = a = 1.0f;
1334 }
1335 
1336 // load registers r,g,b,a from context (mirrors store_rgba)
STAGE(load_src,const float * ptr)1337 STAGE(load_src, const float* ptr) {
1338     r = sk_unaligned_load<F>(ptr + 0*N);
1339     g = sk_unaligned_load<F>(ptr + 1*N);
1340     b = sk_unaligned_load<F>(ptr + 2*N);
1341     a = sk_unaligned_load<F>(ptr + 3*N);
1342 }
1343 
1344 // store registers r,g,b,a into context (mirrors load_rgba)
STAGE(store_src,float * ptr)1345 STAGE(store_src, float* ptr) {
1346     sk_unaligned_store(ptr + 0*N, r);
1347     sk_unaligned_store(ptr + 1*N, g);
1348     sk_unaligned_store(ptr + 2*N, b);
1349     sk_unaligned_store(ptr + 3*N, a);
1350 }
STAGE(store_src_a,float * ptr)1351 STAGE(store_src_a, float* ptr) {
1352     sk_unaligned_store(ptr, a);
1353 }
1354 
1355 // load registers dr,dg,db,da from context (mirrors store_dst)
STAGE(load_dst,const float * ptr)1356 STAGE(load_dst, const float* ptr) {
1357     dr = sk_unaligned_load<F>(ptr + 0*N);
1358     dg = sk_unaligned_load<F>(ptr + 1*N);
1359     db = sk_unaligned_load<F>(ptr + 2*N);
1360     da = sk_unaligned_load<F>(ptr + 3*N);
1361 }
1362 
1363 // store registers dr,dg,db,da into context (mirrors load_dst)
STAGE(store_dst,float * ptr)1364 STAGE(store_dst, float* ptr) {
1365     sk_unaligned_store(ptr + 0*N, dr);
1366     sk_unaligned_store(ptr + 1*N, dg);
1367     sk_unaligned_store(ptr + 2*N, db);
1368     sk_unaligned_store(ptr + 3*N, da);
1369 }
1370 
1371 // Most blend modes apply the same logic to each channel.
1372 #define BLEND_MODE(name)                       \
1373     SI F name##_channel(F s, F d, F sa, F da); \
1374     STAGE(name, Ctx::None) {                   \
1375         r = name##_channel(r,dr,a,da);         \
1376         g = name##_channel(g,dg,a,da);         \
1377         b = name##_channel(b,db,a,da);         \
1378         a = name##_channel(a,da,a,da);         \
1379     }                                          \
1380     SI F name##_channel(F s, F d, F sa, F da)
1381 
inv(F x)1382 SI F inv(F x) { return 1.0f - x; }
two(F x)1383 SI F two(F x) { return x + x; }
1384 
1385 
BLEND_MODE(clear)1386 BLEND_MODE(clear)    { return 0; }
BLEND_MODE(srcatop)1387 BLEND_MODE(srcatop)  { return s*da + d*inv(sa); }
BLEND_MODE(dstatop)1388 BLEND_MODE(dstatop)  { return d*sa + s*inv(da); }
BLEND_MODE(srcin)1389 BLEND_MODE(srcin)    { return s * da; }
BLEND_MODE(dstin)1390 BLEND_MODE(dstin)    { return d * sa; }
BLEND_MODE(srcout)1391 BLEND_MODE(srcout)   { return s * inv(da); }
BLEND_MODE(dstout)1392 BLEND_MODE(dstout)   { return d * inv(sa); }
BLEND_MODE(srcover)1393 BLEND_MODE(srcover)  { return mad(d, inv(sa), s); }
BLEND_MODE(dstover)1394 BLEND_MODE(dstover)  { return mad(s, inv(da), d); }
1395 
BLEND_MODE(modulate)1396 BLEND_MODE(modulate) { return s*d; }
BLEND_MODE(multiply)1397 BLEND_MODE(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
BLEND_MODE(plus_)1398 BLEND_MODE(plus_)    { return min(s + d, 1.0f); }  // We can clamp to either 1 or sa.
BLEND_MODE(screen)1399 BLEND_MODE(screen)   { return s + d - s*d; }
BLEND_MODE(xor_)1400 BLEND_MODE(xor_)     { return s*inv(da) + d*inv(sa); }
1401 #undef BLEND_MODE
1402 
1403 // Most other blend modes apply the same logic to colors, and srcover to alpha.
1404 #define BLEND_MODE(name)                       \
1405     SI F name##_channel(F s, F d, F sa, F da); \
1406     STAGE(name, Ctx::None) {                   \
1407         r = name##_channel(r,dr,a,da);         \
1408         g = name##_channel(g,dg,a,da);         \
1409         b = name##_channel(b,db,a,da);         \
1410         a = mad(da, inv(a), a);                \
1411     }                                          \
1412     SI F name##_channel(F s, F d, F sa, F da)
1413 
BLEND_MODE(darken)1414 BLEND_MODE(darken)     { return s + d -     max(s*da, d*sa) ; }
BLEND_MODE(lighten)1415 BLEND_MODE(lighten)    { return s + d -     min(s*da, d*sa) ; }
BLEND_MODE(difference)1416 BLEND_MODE(difference) { return s + d - two(min(s*da, d*sa)); }
BLEND_MODE(exclusion)1417 BLEND_MODE(exclusion)  { return s + d - two(s*d); }
1418 
BLEND_MODE(colorburn)1419 BLEND_MODE(colorburn) {
1420     return if_then_else(d == da,    d +    s*inv(da),
1421            if_then_else(s ==  0, /* s + */ d*inv(sa),
1422                                  sa*(da - min(da, (da-d)*sa*rcp(s))) + s*inv(da) + d*inv(sa)));
1423 }
BLEND_MODE(colordodge)1424 BLEND_MODE(colordodge) {
1425     return if_then_else(d ==  0, /* d + */ s*inv(da),
1426            if_then_else(s == sa,    s +    d*inv(sa),
1427                                  sa*min(da, (d*sa)*rcp(sa - s)) + s*inv(da) + d*inv(sa)));
1428 }
BLEND_MODE(hardlight)1429 BLEND_MODE(hardlight) {
1430     return s*inv(da) + d*inv(sa)
1431          + if_then_else(two(s) <= sa, two(s*d), sa*da - two((da-d)*(sa-s)));
1432 }
BLEND_MODE(overlay)1433 BLEND_MODE(overlay) {
1434     return s*inv(da) + d*inv(sa)
1435          + if_then_else(two(d) <= da, two(s*d), sa*da - two((da-d)*(sa-s)));
1436 }
1437 
BLEND_MODE(softlight)1438 BLEND_MODE(softlight) {
1439     F m  = if_then_else(da > 0, d / da, 0),
1440       s2 = two(s),
1441       m4 = two(two(m));
1442 
1443     // The logic forks three ways:
1444     //    1. dark src?
1445     //    2. light src, dark dst?
1446     //    3. light src, light dst?
1447     F darkSrc = d*(sa + (s2 - sa)*(1.0f - m)),     // Used in case 1.
1448       darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m,  // Used in case 2.
1449     #if defined(SK_RASTER_PIPELINE_LEGACY_RCP_RSQRT)
1450       liteDst = rcp(rsqrt(m)) - m,                 // Used in case 3.
1451     #else
1452       liteDst = sqrt_(m) - m,
1453     #endif
1454       liteSrc = d*sa + da*(s2 - sa) * if_then_else(two(two(d)) <= da, darkDst, liteDst); // 2 or 3?
1455     return s*inv(da) + d*inv(sa) + if_then_else(s2 <= sa, darkSrc, liteSrc);      // 1 or (2 or 3)?
1456 }
1457 #undef BLEND_MODE
1458 
1459 // We're basing our implemenation of non-separable blend modes on
1460 //   https://www.w3.org/TR/compositing-1/#blendingnonseparable.
1461 // and
1462 //   https://www.khronos.org/registry/OpenGL/specs/es/3.2/es_spec_3.2.pdf
1463 // They're equivalent, but ES' math has been better simplified.
1464 //
1465 // Anything extra we add beyond that is to make the math work with premul inputs.
1466 
sat(F r,F g,F b)1467 SI F sat(F r, F g, F b) { return max(r, max(g,b)) - min(r, min(g,b)); }
lum(F r,F g,F b)1468 SI F lum(F r, F g, F b) { return r*0.30f + g*0.59f + b*0.11f; }
1469 
set_sat(F * r,F * g,F * b,F s)1470 SI void set_sat(F* r, F* g, F* b, F s) {
1471     F mn  = min(*r, min(*g,*b)),
1472       mx  = max(*r, max(*g,*b)),
1473       sat = mx - mn;
1474 
1475     // Map min channel to 0, max channel to s, and scale the middle proportionally.
1476     auto scale = [=](F c) {
1477         return if_then_else(sat == 0, 0, (c - mn) * s / sat);
1478     };
1479     *r = scale(*r);
1480     *g = scale(*g);
1481     *b = scale(*b);
1482 }
set_lum(F * r,F * g,F * b,F l)1483 SI void set_lum(F* r, F* g, F* b, F l) {
1484     F diff = l - lum(*r, *g, *b);
1485     *r += diff;
1486     *g += diff;
1487     *b += diff;
1488 }
clip_color(F * r,F * g,F * b,F a)1489 SI void clip_color(F* r, F* g, F* b, F a) {
1490     F mn = min(*r, min(*g, *b)),
1491       mx = max(*r, max(*g, *b)),
1492       l  = lum(*r, *g, *b);
1493 
1494     auto clip = [=](F c) {
1495         c = if_then_else(mn >= 0, c, l + (c - l) * (    l) / (l - mn)   );
1496         c = if_then_else(mx >  a,    l + (c - l) * (a - l) / (mx - l), c);
1497         c = max(c, 0);  // Sometimes without this we may dip just a little negative.
1498         return c;
1499     };
1500     *r = clip(*r);
1501     *g = clip(*g);
1502     *b = clip(*b);
1503 }
1504 
STAGE(hue,Ctx::None)1505 STAGE(hue, Ctx::None) {
1506     F R = r*a,
1507       G = g*a,
1508       B = b*a;
1509 
1510     set_sat(&R, &G, &B, sat(dr,dg,db)*a);
1511     set_lum(&R, &G, &B, lum(dr,dg,db)*a);
1512     clip_color(&R,&G,&B, a*da);
1513 
1514     r = r*inv(da) + dr*inv(a) + R;
1515     g = g*inv(da) + dg*inv(a) + G;
1516     b = b*inv(da) + db*inv(a) + B;
1517     a = a + da - a*da;
1518 }
STAGE(saturation,Ctx::None)1519 STAGE(saturation, Ctx::None) {
1520     F R = dr*a,
1521       G = dg*a,
1522       B = db*a;
1523 
1524     set_sat(&R, &G, &B, sat( r, g, b)*da);
1525     set_lum(&R, &G, &B, lum(dr,dg,db)* a);  // (This is not redundant.)
1526     clip_color(&R,&G,&B, a*da);
1527 
1528     r = r*inv(da) + dr*inv(a) + R;
1529     g = g*inv(da) + dg*inv(a) + G;
1530     b = b*inv(da) + db*inv(a) + B;
1531     a = a + da - a*da;
1532 }
STAGE(color,Ctx::None)1533 STAGE(color, Ctx::None) {
1534     F R = r*da,
1535       G = g*da,
1536       B = b*da;
1537 
1538     set_lum(&R, &G, &B, lum(dr,dg,db)*a);
1539     clip_color(&R,&G,&B, a*da);
1540 
1541     r = r*inv(da) + dr*inv(a) + R;
1542     g = g*inv(da) + dg*inv(a) + G;
1543     b = b*inv(da) + db*inv(a) + B;
1544     a = a + da - a*da;
1545 }
STAGE(luminosity,Ctx::None)1546 STAGE(luminosity, Ctx::None) {
1547     F R = dr*a,
1548       G = dg*a,
1549       B = db*a;
1550 
1551     set_lum(&R, &G, &B, lum(r,g,b)*da);
1552     clip_color(&R,&G,&B, a*da);
1553 
1554     r = r*inv(da) + dr*inv(a) + R;
1555     g = g*inv(da) + dg*inv(a) + G;
1556     b = b*inv(da) + db*inv(a) + B;
1557     a = a + da - a*da;
1558 }
1559 
STAGE(srcover_rgba_8888,const SkRasterPipeline_MemoryCtx * ctx)1560 STAGE(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
1561     auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
1562 
1563     U32 dst = load<U32>(ptr, tail);
1564     dr = cast((dst      ) & 0xff);
1565     dg = cast((dst >>  8) & 0xff);
1566     db = cast((dst >> 16) & 0xff);
1567     da = cast((dst >> 24)       );
1568     // {dr,dg,db,da} are in [0,255]
1569     // { r, g, b, a} are in [0,  1] (but may be out of gamut)
1570 
1571     r = mad(dr, inv(a), r*255.0f);
1572     g = mad(dg, inv(a), g*255.0f);
1573     b = mad(db, inv(a), b*255.0f);
1574     a = mad(da, inv(a), a*255.0f);
1575     // { r, g, b, a} are now in [0,255]  (but may be out of gamut)
1576 
1577     // to_unorm() clamps back to gamut.  Scaling by 1 since we're already 255-biased.
1578     dst = to_unorm(r, 1, 255)
1579         | to_unorm(g, 1, 255) <<  8
1580         | to_unorm(b, 1, 255) << 16
1581         | to_unorm(a, 1, 255) << 24;
1582     store(ptr, dst, tail);
1583 }
1584 
STAGE(clamp_0,Ctx::None)1585 STAGE(clamp_0, Ctx::None) {
1586     r = max(r, 0);
1587     g = max(g, 0);
1588     b = max(b, 0);
1589     a = max(a, 0);
1590 }
1591 
STAGE(clamp_1,Ctx::None)1592 STAGE(clamp_1, Ctx::None) {
1593     r = min(r, 1.0f);
1594     g = min(g, 1.0f);
1595     b = min(b, 1.0f);
1596     a = min(a, 1.0f);
1597 }
1598 
STAGE(clamp_a,Ctx::None)1599 STAGE(clamp_a, Ctx::None) {
1600     a = min(a, 1.0f);
1601     r = min(r, a);
1602     g = min(g, a);
1603     b = min(b, a);
1604 }
1605 
STAGE(clamp_gamut,Ctx::None)1606 STAGE(clamp_gamut, Ctx::None) {
1607     a = min(max(a, 0), 1.0f);
1608     r = min(max(r, 0), a);
1609     g = min(max(g, 0), a);
1610     b = min(max(b, 0), a);
1611 }
1612 
STAGE(set_rgb,const float * rgb)1613 STAGE(set_rgb, const float* rgb) {
1614     r = rgb[0];
1615     g = rgb[1];
1616     b = rgb[2];
1617 }
STAGE(unbounded_set_rgb,const float * rgb)1618 STAGE(unbounded_set_rgb, const float* rgb) {
1619     r = rgb[0];
1620     g = rgb[1];
1621     b = rgb[2];
1622 }
1623 
STAGE(swap_rb,Ctx::None)1624 STAGE(swap_rb, Ctx::None) {
1625     auto tmp = r;
1626     r = b;
1627     b = tmp;
1628 }
STAGE(swap_rb_dst,Ctx::None)1629 STAGE(swap_rb_dst, Ctx::None) {
1630     auto tmp = dr;
1631     dr = db;
1632     db = tmp;
1633 }
1634 
STAGE(move_src_dst,Ctx::None)1635 STAGE(move_src_dst, Ctx::None) {
1636     dr = r;
1637     dg = g;
1638     db = b;
1639     da = a;
1640 }
STAGE(move_dst_src,Ctx::None)1641 STAGE(move_dst_src, Ctx::None) {
1642     r = dr;
1643     g = dg;
1644     b = db;
1645     a = da;
1646 }
1647 
STAGE(premul,Ctx::None)1648 STAGE(premul, Ctx::None) {
1649     r = r * a;
1650     g = g * a;
1651     b = b * a;
1652 }
STAGE(premul_dst,Ctx::None)1653 STAGE(premul_dst, Ctx::None) {
1654     dr = dr * da;
1655     dg = dg * da;
1656     db = db * da;
1657 }
STAGE(unpremul,Ctx::None)1658 STAGE(unpremul, Ctx::None) {
1659     float inf = sk_bit_cast<float>(0x7f800000);
1660     auto scale = if_then_else(1.0f/a < inf, 1.0f/a, 0);
1661     r *= scale;
1662     g *= scale;
1663     b *= scale;
1664 }
1665 
STAGE(force_opaque,Ctx::None)1666 STAGE(force_opaque    , Ctx::None) {  a = 1; }
STAGE(force_opaque_dst,Ctx::None)1667 STAGE(force_opaque_dst, Ctx::None) { da = 1; }
1668 
1669 // Clamp x to [0,1], both sides inclusive (think, gradients).
1670 // Even repeat and mirror funnel through a clamp to handle bad inputs like +Inf, NaN.
clamp_01(F v)1671 SI F clamp_01(F v) { return min(max(0, v), 1); }
1672 
STAGE(rgb_to_hsl,Ctx::None)1673 STAGE(rgb_to_hsl, Ctx::None) {
1674     F mx = max(r, max(g,b)),
1675       mn = min(r, min(g,b)),
1676       d = mx - mn,
1677       d_rcp = 1.0f / d;
1678 
1679     F h = (1/6.0f) *
1680           if_then_else(mx == mn, 0,
1681           if_then_else(mx ==  r, (g-b)*d_rcp + if_then_else(g < b, 6.0f, 0),
1682           if_then_else(mx ==  g, (b-r)*d_rcp + 2.0f,
1683                                  (r-g)*d_rcp + 4.0f)));
1684 
1685     F l = (mx + mn) * 0.5f;
1686     F s = if_then_else(mx == mn, 0,
1687                        d / if_then_else(l > 0.5f, 2.0f-mx-mn, mx+mn));
1688 
1689     r = h;
1690     g = s;
1691     b = l;
1692 }
STAGE(hsl_to_rgb,Ctx::None)1693 STAGE(hsl_to_rgb, Ctx::None) {
1694     // See GrRGBToHSLFilterEffect.fp
1695 
1696     F h = r,
1697       s = g,
1698       l = b,
1699       c = (1.0f - abs_(2.0f * l - 1)) * s;
1700 
1701     auto hue_to_rgb = [&](F hue) {
1702         F q = clamp_01(abs_(fract(hue) * 6.0f - 3.0f) - 1.0f);
1703         return (q - 0.5f) * c + l;
1704     };
1705 
1706     r = hue_to_rgb(h + 0.0f/3.0f);
1707     g = hue_to_rgb(h + 2.0f/3.0f);
1708     b = hue_to_rgb(h + 1.0f/3.0f);
1709 }
1710 
1711 // Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
alpha_coverage_from_rgb_coverage(F a,F da,F cr,F cg,F cb)1712 SI F alpha_coverage_from_rgb_coverage(F a, F da, F cr, F cg, F cb) {
1713     return if_then_else(a < da, min(cr, min(cg,cb))
1714                               , max(cr, max(cg,cb)));
1715 }
1716 
STAGE(scale_1_float,const float * c)1717 STAGE(scale_1_float, const float* c) {
1718     r = r * *c;
1719     g = g * *c;
1720     b = b * *c;
1721     a = a * *c;
1722 }
STAGE(scale_u8,const SkRasterPipeline_MemoryCtx * ctx)1723 STAGE(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
1724     auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
1725 
1726     auto scales = load<U8>(ptr, tail);
1727     auto c = from_byte(scales);
1728 
1729     r = r * c;
1730     g = g * c;
1731     b = b * c;
1732     a = a * c;
1733 }
STAGE(scale_565,const SkRasterPipeline_MemoryCtx * ctx)1734 STAGE(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
1735     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
1736 
1737     F cr,cg,cb;
1738     from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
1739 
1740     F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
1741 
1742     r = r * cr;
1743     g = g * cg;
1744     b = b * cb;
1745     a = a * ca;
1746 }
1747 
lerp(F from,F to,F t)1748 SI F lerp(F from, F to, F t) {
1749     return mad(to-from, t, from);
1750 }
1751 
STAGE(lerp_1_float,const float * c)1752 STAGE(lerp_1_float, const float* c) {
1753     r = lerp(dr, r, *c);
1754     g = lerp(dg, g, *c);
1755     b = lerp(db, b, *c);
1756     a = lerp(da, a, *c);
1757 }
STAGE(scale_native,const float scales[])1758 STAGE(scale_native, const float scales[]) {
1759     auto c = sk_unaligned_load<F>(scales);
1760     r = r * c;
1761     g = g * c;
1762     b = b * c;
1763     a = a * c;
1764 }
STAGE(lerp_native,const float scales[])1765 STAGE(lerp_native, const float scales[]) {
1766     auto c = sk_unaligned_load<F>(scales);
1767     r = lerp(dr, r, c);
1768     g = lerp(dg, g, c);
1769     b = lerp(db, b, c);
1770     a = lerp(da, a, c);
1771 }
STAGE(lerp_u8,const SkRasterPipeline_MemoryCtx * ctx)1772 STAGE(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
1773     auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
1774 
1775     auto scales = load<U8>(ptr, tail);
1776     auto c = from_byte(scales);
1777 
1778     r = lerp(dr, r, c);
1779     g = lerp(dg, g, c);
1780     b = lerp(db, b, c);
1781     a = lerp(da, a, c);
1782 }
STAGE(lerp_565,const SkRasterPipeline_MemoryCtx * ctx)1783 STAGE(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
1784     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
1785 
1786     F cr,cg,cb;
1787     from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
1788 
1789     F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
1790 
1791     r = lerp(dr, r, cr);
1792     g = lerp(dg, g, cg);
1793     b = lerp(db, b, cb);
1794     a = lerp(da, a, ca);
1795 }
1796 
STAGE(emboss,const SkRasterPipeline_EmbossCtx * ctx)1797 STAGE(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
1798     auto mptr = ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy),
1799          aptr = ptr_at_xy<const uint8_t>(&ctx->add, dx,dy);
1800 
1801     F mul = from_byte(load<U8>(mptr, tail)),
1802       add = from_byte(load<U8>(aptr, tail));
1803 
1804     r = mad(r, mul, add);
1805     g = mad(g, mul, add);
1806     b = mad(b, mul, add);
1807 }
1808 
STAGE(byte_tables,const void * ctx)1809 STAGE(byte_tables, const void* ctx) {
1810     struct Tables { const uint8_t *r, *g, *b, *a; };
1811     auto tables = (const Tables*)ctx;
1812 
1813     r = from_byte(gather(tables->r, to_unorm(r, 255)));
1814     g = from_byte(gather(tables->g, to_unorm(g, 255)));
1815     b = from_byte(gather(tables->b, to_unorm(b, 255)));
1816     a = from_byte(gather(tables->a, to_unorm(a, 255)));
1817 }
1818 
strip_sign(F x,U32 * sign)1819 SI F strip_sign(F x, U32* sign) {
1820     U32 bits = sk_bit_cast<U32>(x);
1821     *sign = bits & 0x80000000;
1822     return sk_bit_cast<F>(bits ^ *sign);
1823 }
1824 
apply_sign(F x,U32 sign)1825 SI F apply_sign(F x, U32 sign) {
1826     return sk_bit_cast<F>(sign | sk_bit_cast<U32>(x));
1827 }
1828 
STAGE(parametric,const skcms_TransferFunction * ctx)1829 STAGE(parametric, const skcms_TransferFunction* ctx) {
1830     auto fn = [&](F v) {
1831         U32 sign;
1832         v = strip_sign(v, &sign);
1833 
1834         F r = if_then_else(v <= ctx->d, mad(ctx->c, v, ctx->f)
1835                                       , approx_powf(mad(ctx->a, v, ctx->b), ctx->g) + ctx->e);
1836         return apply_sign(r, sign);
1837     };
1838     r = fn(r);
1839     g = fn(g);
1840     b = fn(b);
1841 }
1842 
STAGE(gamma_,const float * G)1843 STAGE(gamma_, const float* G) {
1844     auto fn = [&](F v) {
1845         U32 sign;
1846         v = strip_sign(v, &sign);
1847         return apply_sign(approx_powf(v, *G), sign);
1848     };
1849     r = fn(r);
1850     g = fn(g);
1851     b = fn(b);
1852 }
1853 
STAGE(PQish,const skcms_TransferFunction * ctx)1854 STAGE(PQish, const skcms_TransferFunction* ctx) {
1855     auto fn = [&](F v) {
1856         U32 sign;
1857         v = strip_sign(v, &sign);
1858 
1859         F r = approx_powf(max(mad(ctx->b, approx_powf(v, ctx->c), ctx->a), 0)
1860                            / (mad(ctx->e, approx_powf(v, ctx->c), ctx->d)),
1861                         ctx->f);
1862 
1863         return apply_sign(r, sign);
1864     };
1865     r = fn(r);
1866     g = fn(g);
1867     b = fn(b);
1868 }
1869 
STAGE(HLGish,const skcms_TransferFunction * ctx)1870 STAGE(HLGish, const skcms_TransferFunction* ctx) {
1871     auto fn = [&](F v) {
1872         U32 sign;
1873         v = strip_sign(v, &sign);
1874 
1875         const float R = ctx->a, G = ctx->b,
1876                     a = ctx->c, b = ctx->d, c = ctx->e,
1877                     K = ctx->f + 1.0f;
1878 
1879         F r = if_then_else(v*R <= 1, approx_powf(v*R, G)
1880                                    , approx_exp((v-c)*a) + b);
1881 
1882         return K * apply_sign(r, sign);
1883     };
1884     r = fn(r);
1885     g = fn(g);
1886     b = fn(b);
1887 }
1888 
STAGE(HLGinvish,const skcms_TransferFunction * ctx)1889 STAGE(HLGinvish, const skcms_TransferFunction* ctx) {
1890     auto fn = [&](F v) {
1891         U32 sign;
1892         v = strip_sign(v, &sign);
1893 
1894         const float R = ctx->a, G = ctx->b,
1895                     a = ctx->c, b = ctx->d, c = ctx->e,
1896                     K = ctx->f + 1.0f;
1897 
1898         v /= K;
1899         F r = if_then_else(v <= 1, R * approx_powf(v, G)
1900                                  , a * approx_log(v - b) + c);
1901 
1902         return apply_sign(r, sign);
1903     };
1904     r = fn(r);
1905     g = fn(g);
1906     b = fn(b);
1907 }
1908 
STAGE(load_a8,const SkRasterPipeline_MemoryCtx * ctx)1909 STAGE(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
1910     auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
1911 
1912     r = g = b = 0.0f;
1913     a = from_byte(load<U8>(ptr, tail));
1914 }
STAGE(load_a8_dst,const SkRasterPipeline_MemoryCtx * ctx)1915 STAGE(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
1916     auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
1917 
1918     dr = dg = db = 0.0f;
1919     da = from_byte(load<U8>(ptr, tail));
1920 }
STAGE(gather_a8,const SkRasterPipeline_GatherCtx * ctx)1921 STAGE(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
1922     const uint8_t* ptr;
1923     U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1924     r = g = b = 0.0f;
1925     a = from_byte(gather(ptr, ix));
1926 }
STAGE(store_a8,const SkRasterPipeline_MemoryCtx * ctx)1927 STAGE(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
1928     auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
1929 
1930     U8 packed = pack(pack(to_unorm(a, 255)));
1931     store(ptr, packed, tail);
1932 }
1933 
STAGE(load_565,const SkRasterPipeline_MemoryCtx * ctx)1934 STAGE(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
1935     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
1936 
1937     from_565(load<U16>(ptr, tail), &r,&g,&b);
1938     a = 1.0f;
1939 }
STAGE(load_565_dst,const SkRasterPipeline_MemoryCtx * ctx)1940 STAGE(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
1941     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
1942 
1943     from_565(load<U16>(ptr, tail), &dr,&dg,&db);
1944     da = 1.0f;
1945 }
STAGE(gather_565,const SkRasterPipeline_GatherCtx * ctx)1946 STAGE(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
1947     const uint16_t* ptr;
1948     U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1949     from_565(gather(ptr, ix), &r,&g,&b);
1950     a = 1.0f;
1951 }
STAGE(store_565,const SkRasterPipeline_MemoryCtx * ctx)1952 STAGE(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
1953     auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
1954 
1955     U16 px = pack( to_unorm(r, 31) << 11
1956                  | to_unorm(g, 63) <<  5
1957                  | to_unorm(b, 31)      );
1958     store(ptr, px, tail);
1959 }
1960 
STAGE(load_4444,const SkRasterPipeline_MemoryCtx * ctx)1961 STAGE(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
1962     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
1963     from_4444(load<U16>(ptr, tail), &r,&g,&b,&a);
1964 }
STAGE(load_4444_dst,const SkRasterPipeline_MemoryCtx * ctx)1965 STAGE(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
1966     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
1967     from_4444(load<U16>(ptr, tail), &dr,&dg,&db,&da);
1968 }
STAGE(gather_4444,const SkRasterPipeline_GatherCtx * ctx)1969 STAGE(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
1970     const uint16_t* ptr;
1971     U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1972     from_4444(gather(ptr, ix), &r,&g,&b,&a);
1973 }
STAGE(store_4444,const SkRasterPipeline_MemoryCtx * ctx)1974 STAGE(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
1975     auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
1976     U16 px = pack( to_unorm(r, 15) << 12
1977                  | to_unorm(g, 15) <<  8
1978                  | to_unorm(b, 15) <<  4
1979                  | to_unorm(a, 15)      );
1980     store(ptr, px, tail);
1981 }
1982 
STAGE(load_8888,const SkRasterPipeline_MemoryCtx * ctx)1983 STAGE(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
1984     auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
1985     from_8888(load<U32>(ptr, tail), &r,&g,&b,&a);
1986 }
STAGE(load_8888_dst,const SkRasterPipeline_MemoryCtx * ctx)1987 STAGE(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
1988     auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
1989     from_8888(load<U32>(ptr, tail), &dr,&dg,&db,&da);
1990 }
STAGE(gather_8888,const SkRasterPipeline_GatherCtx * ctx)1991 STAGE(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
1992     const uint32_t* ptr;
1993     U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1994     from_8888(gather(ptr, ix), &r,&g,&b,&a);
1995 }
STAGE(store_8888,const SkRasterPipeline_MemoryCtx * ctx)1996 STAGE(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
1997     auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
1998 
1999     U32 px = to_unorm(r, 255)
2000            | to_unorm(g, 255) <<  8
2001            | to_unorm(b, 255) << 16
2002            | to_unorm(a, 255) << 24;
2003     store(ptr, px, tail);
2004 }
2005 
STAGE(load_rg88,const SkRasterPipeline_MemoryCtx * ctx)2006 STAGE(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
2007     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2008     from_88(load<U16>(ptr, tail), &r, &g);
2009     b = 0;
2010     a = 1;
2011 }
STAGE(load_rg88_dst,const SkRasterPipeline_MemoryCtx * ctx)2012 STAGE(load_rg88_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2013     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2014     from_88(load<U16>(ptr, tail), &dr, &dg);
2015     db = 0;
2016     da = 1;
2017 }
STAGE(gather_rg88,const SkRasterPipeline_GatherCtx * ctx)2018 STAGE(gather_rg88, const SkRasterPipeline_GatherCtx* ctx) {
2019     const uint16_t* ptr;
2020     U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2021     from_88(gather(ptr, ix), &r, &g);
2022     b = 0;
2023     a = 1;
2024 }
STAGE(store_rg88,const SkRasterPipeline_MemoryCtx * ctx)2025 STAGE(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
2026     auto ptr = ptr_at_xy<uint16_t>(ctx, dx, dy);
2027     U16 px = pack( to_unorm(r, 255) | to_unorm(g, 255) <<  8 );
2028     store(ptr, px, tail);
2029 }
2030 
STAGE(load_a16,const SkRasterPipeline_MemoryCtx * ctx)2031 STAGE(load_a16, const SkRasterPipeline_MemoryCtx* ctx) {
2032     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2033     r = g = b = 0;
2034     a = from_short(load<U16>(ptr, tail));
2035 }
STAGE(load_a16_dst,const SkRasterPipeline_MemoryCtx * ctx)2036 STAGE(load_a16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2037     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2038     dr = dg = db = 0.0f;
2039     da = from_short(load<U16>(ptr, tail));
2040 }
STAGE(gather_a16,const SkRasterPipeline_GatherCtx * ctx)2041 STAGE(gather_a16, const SkRasterPipeline_GatherCtx* ctx) {
2042     const uint16_t* ptr;
2043     U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2044     r = g = b = 0.0f;
2045     a = from_short(gather(ptr, ix));
2046 }
STAGE(store_a16,const SkRasterPipeline_MemoryCtx * ctx)2047 STAGE(store_a16, const SkRasterPipeline_MemoryCtx* ctx) {
2048     auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2049 
2050     U16 px = pack(to_unorm(a, 65535));
2051     store(ptr, px, tail);
2052 }
2053 
STAGE(load_rg1616,const SkRasterPipeline_MemoryCtx * ctx)2054 STAGE(load_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
2055     auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2056     b = 0; a = 1;
2057     from_1616(load<U32>(ptr, tail), &r,&g);
2058 }
STAGE(load_rg1616_dst,const SkRasterPipeline_MemoryCtx * ctx)2059 STAGE(load_rg1616_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2060     auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2061     from_1616(load<U32>(ptr, tail), &dr, &dg);
2062     db = 0;
2063     da = 1;
2064 }
STAGE(gather_rg1616,const SkRasterPipeline_GatherCtx * ctx)2065 STAGE(gather_rg1616, const SkRasterPipeline_GatherCtx* ctx) {
2066     const uint32_t* ptr;
2067     U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2068     from_1616(gather(ptr, ix), &r, &g);
2069     b = 0;
2070     a = 1;
2071 }
STAGE(store_rg1616,const SkRasterPipeline_MemoryCtx * ctx)2072 STAGE(store_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
2073     auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2074 
2075     U32 px = to_unorm(r, 65535)
2076            | to_unorm(g, 65535) <<  16;
2077     store(ptr, px, tail);
2078 }
2079 
STAGE(load_16161616,const SkRasterPipeline_MemoryCtx * ctx)2080 STAGE(load_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
2081     auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
2082     from_16161616(load<U64>(ptr, tail), &r,&g, &b, &a);
2083 }
STAGE(load_16161616_dst,const SkRasterPipeline_MemoryCtx * ctx)2084 STAGE(load_16161616_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2085     auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
2086     from_16161616(load<U64>(ptr, tail), &dr, &dg, &db, &da);
2087 }
STAGE(gather_16161616,const SkRasterPipeline_GatherCtx * ctx)2088 STAGE(gather_16161616, const SkRasterPipeline_GatherCtx* ctx) {
2089     const uint64_t* ptr;
2090     U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2091     from_16161616(gather(ptr, ix), &r, &g, &b, &a);
2092 }
STAGE(store_16161616,const SkRasterPipeline_MemoryCtx * ctx)2093 STAGE(store_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
2094     auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
2095 
2096     U16 R = pack(to_unorm(r, 65535)),
2097         G = pack(to_unorm(g, 65535)),
2098         B = pack(to_unorm(b, 65535)),
2099         A = pack(to_unorm(a, 65535));
2100 
2101     store4(ptr,tail, R,G,B,A);
2102 }
2103 
2104 
STAGE(load_1010102,const SkRasterPipeline_MemoryCtx * ctx)2105 STAGE(load_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
2106     auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2107     from_1010102(load<U32>(ptr, tail), &r,&g,&b,&a);
2108 }
STAGE(load_1010102_dst,const SkRasterPipeline_MemoryCtx * ctx)2109 STAGE(load_1010102_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2110     auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2111     from_1010102(load<U32>(ptr, tail), &dr,&dg,&db,&da);
2112 }
STAGE(gather_1010102,const SkRasterPipeline_GatherCtx * ctx)2113 STAGE(gather_1010102, const SkRasterPipeline_GatherCtx* ctx) {
2114     const uint32_t* ptr;
2115     U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2116     from_1010102(gather(ptr, ix), &r,&g,&b,&a);
2117 }
STAGE(store_1010102,const SkRasterPipeline_MemoryCtx * ctx)2118 STAGE(store_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
2119     auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2120 
2121     U32 px = to_unorm(r, 1023)
2122            | to_unorm(g, 1023) << 10
2123            | to_unorm(b, 1023) << 20
2124            | to_unorm(a,    3) << 30;
2125     store(ptr, px, tail);
2126 }
2127 
STAGE(load_f16,const SkRasterPipeline_MemoryCtx * ctx)2128 STAGE(load_f16, const SkRasterPipeline_MemoryCtx* ctx) {
2129     auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
2130 
2131     U16 R,G,B,A;
2132     load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
2133     r = from_half(R);
2134     g = from_half(G);
2135     b = from_half(B);
2136     a = from_half(A);
2137 }
STAGE(load_f16_dst,const SkRasterPipeline_MemoryCtx * ctx)2138 STAGE(load_f16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2139     auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
2140 
2141     U16 R,G,B,A;
2142     load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
2143     dr = from_half(R);
2144     dg = from_half(G);
2145     db = from_half(B);
2146     da = from_half(A);
2147 }
STAGE(gather_f16,const SkRasterPipeline_GatherCtx * ctx)2148 STAGE(gather_f16, const SkRasterPipeline_GatherCtx* ctx) {
2149     const uint64_t* ptr;
2150     U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2151     auto px = gather(ptr, ix);
2152 
2153     U16 R,G,B,A;
2154     load4((const uint16_t*)&px,0, &R,&G,&B,&A);
2155     r = from_half(R);
2156     g = from_half(G);
2157     b = from_half(B);
2158     a = from_half(A);
2159 }
STAGE(store_f16,const SkRasterPipeline_MemoryCtx * ctx)2160 STAGE(store_f16, const SkRasterPipeline_MemoryCtx* ctx) {
2161     auto ptr = ptr_at_xy<uint64_t>(ctx, dx,dy);
2162     store4((uint16_t*)ptr,tail, to_half(r)
2163                               , to_half(g)
2164                               , to_half(b)
2165                               , to_half(a));
2166 }
2167 
STAGE(store_u16_be,const SkRasterPipeline_MemoryCtx * ctx)2168 STAGE(store_u16_be, const SkRasterPipeline_MemoryCtx* ctx) {
2169     auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,dy);
2170 
2171     U16 R = bswap(pack(to_unorm(r, 65535))),
2172         G = bswap(pack(to_unorm(g, 65535))),
2173         B = bswap(pack(to_unorm(b, 65535))),
2174         A = bswap(pack(to_unorm(a, 65535)));
2175 
2176     store4(ptr,tail, R,G,B,A);
2177 }
2178 
STAGE(load_af16,const SkRasterPipeline_MemoryCtx * ctx)2179 STAGE(load_af16, const SkRasterPipeline_MemoryCtx* ctx) {
2180     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2181 
2182     U16 A = load<U16>((const uint16_t*)ptr, tail);
2183     r = 0;
2184     g = 0;
2185     b = 0;
2186     a = from_half(A);
2187 }
STAGE(load_af16_dst,const SkRasterPipeline_MemoryCtx * ctx)2188 STAGE(load_af16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2189     auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2190 
2191     U16 A = load<U16>((const uint16_t*)ptr, tail);
2192     dr = dg = db = 0.0f;
2193     da = from_half(A);
2194 }
STAGE(gather_af16,const SkRasterPipeline_GatherCtx * ctx)2195 STAGE(gather_af16, const SkRasterPipeline_GatherCtx* ctx) {
2196     const uint16_t* ptr;
2197     U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2198     r = g = b = 0.0f;
2199     a = from_half(gather(ptr, ix));
2200 }
STAGE(store_af16,const SkRasterPipeline_MemoryCtx * ctx)2201 STAGE(store_af16, const SkRasterPipeline_MemoryCtx* ctx) {
2202     auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2203     store(ptr, to_half(a), tail);
2204 }
2205 
STAGE(load_rgf16,const SkRasterPipeline_MemoryCtx * ctx)2206 STAGE(load_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
2207     auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2208 
2209     U16 R,G;
2210     load2((const uint16_t*)ptr, tail, &R, &G);
2211     r = from_half(R);
2212     g = from_half(G);
2213     b = 0;
2214     a = 1;
2215 }
STAGE(load_rgf16_dst,const SkRasterPipeline_MemoryCtx * ctx)2216 STAGE(load_rgf16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2217     auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2218 
2219     U16 R,G;
2220     load2((const uint16_t*)ptr, tail, &R, &G);
2221     dr = from_half(R);
2222     dg = from_half(G);
2223     db = 0;
2224     da = 1;
2225 }
STAGE(gather_rgf16,const SkRasterPipeline_GatherCtx * ctx)2226 STAGE(gather_rgf16, const SkRasterPipeline_GatherCtx* ctx) {
2227     const uint32_t* ptr;
2228     U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2229     auto px = gather(ptr, ix);
2230 
2231     U16 R,G;
2232     load2((const uint16_t*)&px, 0, &R, &G);
2233     r = from_half(R);
2234     g = from_half(G);
2235     b = 0;
2236     a = 1;
2237 }
STAGE(store_rgf16,const SkRasterPipeline_MemoryCtx * ctx)2238 STAGE(store_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
2239     auto ptr = ptr_at_xy<uint32_t>(ctx, dx, dy);
2240     store2((uint16_t*)ptr, tail, to_half(r)
2241                                , to_half(g));
2242 }
2243 
STAGE(load_f32,const SkRasterPipeline_MemoryCtx * ctx)2244 STAGE(load_f32, const SkRasterPipeline_MemoryCtx* ctx) {
2245     auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
2246     load4(ptr,tail, &r,&g,&b,&a);
2247 }
STAGE(load_f32_dst,const SkRasterPipeline_MemoryCtx * ctx)2248 STAGE(load_f32_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2249     auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
2250     load4(ptr,tail, &dr,&dg,&db,&da);
2251 }
STAGE(gather_f32,const SkRasterPipeline_GatherCtx * ctx)2252 STAGE(gather_f32, const SkRasterPipeline_GatherCtx* ctx) {
2253     const float* ptr;
2254     U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2255     r = gather(ptr, 4*ix + 0);
2256     g = gather(ptr, 4*ix + 1);
2257     b = gather(ptr, 4*ix + 2);
2258     a = gather(ptr, 4*ix + 3);
2259 }
STAGE(store_f32,const SkRasterPipeline_MemoryCtx * ctx)2260 STAGE(store_f32, const SkRasterPipeline_MemoryCtx* ctx) {
2261     auto ptr = ptr_at_xy<float>(ctx, 4*dx,4*dy);
2262     store4(ptr,tail, r,g,b,a);
2263 }
2264 
STAGE(load_rgf32,const SkRasterPipeline_MemoryCtx * ctx)2265 STAGE(load_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
2266     auto ptr = ptr_at_xy<const float>(ctx, 2*dx,2*dy);
2267     load2(ptr, tail, &r, &g);
2268     b = 0;
2269     a = 1;
2270 }
STAGE(store_rgf32,const SkRasterPipeline_MemoryCtx * ctx)2271 STAGE(store_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
2272     auto ptr = ptr_at_xy<float>(ctx, 2*dx,2*dy);
2273     store2(ptr, tail, r, g);
2274 }
2275 
exclusive_repeat(F v,const SkRasterPipeline_TileCtx * ctx)2276 SI F exclusive_repeat(F v, const SkRasterPipeline_TileCtx* ctx) {
2277     return v - floor_(v*ctx->invScale)*ctx->scale;
2278 }
exclusive_mirror(F v,const SkRasterPipeline_TileCtx * ctx)2279 SI F exclusive_mirror(F v, const SkRasterPipeline_TileCtx* ctx) {
2280     auto limit = ctx->scale;
2281     auto invLimit = ctx->invScale;
2282     return abs_( (v-limit) - (limit+limit)*floor_((v-limit)*(invLimit*0.5f)) - limit );
2283 }
2284 // Tile x or y to [0,limit) == [0,limit - 1 ulp] (think, sampling from images).
2285 // The gather stages will hard clamp the output of these stages to [0,limit)...
2286 // we just need to do the basic repeat or mirroring.
STAGE(repeat_x,const SkRasterPipeline_TileCtx * ctx)2287 STAGE(repeat_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_repeat(r, ctx); }
STAGE(repeat_y,const SkRasterPipeline_TileCtx * ctx)2288 STAGE(repeat_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_repeat(g, ctx); }
STAGE(mirror_x,const SkRasterPipeline_TileCtx * ctx)2289 STAGE(mirror_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_mirror(r, ctx); }
STAGE(mirror_y,const SkRasterPipeline_TileCtx * ctx)2290 STAGE(mirror_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_mirror(g, ctx); }
2291 
STAGE(clamp_x_1,Ctx::None)2292 STAGE( clamp_x_1, Ctx::None) { r = clamp_01(r); }
STAGE(repeat_x_1,Ctx::None)2293 STAGE(repeat_x_1, Ctx::None) { r = clamp_01(r - floor_(r)); }
STAGE(mirror_x_1,Ctx::None)2294 STAGE(mirror_x_1, Ctx::None) { r = clamp_01(abs_( (r-1.0f) - two(floor_((r-1.0f)*0.5f)) - 1.0f )); }
2295 
2296 // Decal stores a 32bit mask after checking the coordinate (x and/or y) against its domain:
2297 //      mask == 0x00000000 if the coordinate(s) are out of bounds
2298 //      mask == 0xFFFFFFFF if the coordinate(s) are in bounds
2299 // After the gather stage, the r,g,b,a values are AND'd with this mask, setting them to 0
2300 // if either of the coordinates were out of bounds.
2301 
STAGE(decal_x,SkRasterPipeline_DecalTileCtx * ctx)2302 STAGE(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
2303     auto w = ctx->limit_x;
2304     sk_unaligned_store(ctx->mask, cond_to_mask((0 <= r) & (r < w)));
2305 }
STAGE(decal_y,SkRasterPipeline_DecalTileCtx * ctx)2306 STAGE(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
2307     auto h = ctx->limit_y;
2308     sk_unaligned_store(ctx->mask, cond_to_mask((0 <= g) & (g < h)));
2309 }
STAGE(decal_x_and_y,SkRasterPipeline_DecalTileCtx * ctx)2310 STAGE(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
2311     auto w = ctx->limit_x;
2312     auto h = ctx->limit_y;
2313     sk_unaligned_store(ctx->mask,
2314                     cond_to_mask((0 <= r) & (r < w) & (0 <= g) & (g < h)));
2315 }
STAGE(check_decal_mask,SkRasterPipeline_DecalTileCtx * ctx)2316 STAGE(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
2317     auto mask = sk_unaligned_load<U32>(ctx->mask);
2318     r = sk_bit_cast<F>(sk_bit_cast<U32>(r) & mask);
2319     g = sk_bit_cast<F>(sk_bit_cast<U32>(g) & mask);
2320     b = sk_bit_cast<F>(sk_bit_cast<U32>(b) & mask);
2321     a = sk_bit_cast<F>(sk_bit_cast<U32>(a) & mask);
2322 }
2323 
STAGE(alpha_to_gray,Ctx::None)2324 STAGE(alpha_to_gray, Ctx::None) {
2325     r = g = b = a;
2326     a = 1;
2327 }
STAGE(alpha_to_gray_dst,Ctx::None)2328 STAGE(alpha_to_gray_dst, Ctx::None) {
2329     dr = dg = db = da;
2330     da = 1;
2331 }
STAGE(bt709_luminance_or_luma_to_alpha,Ctx::None)2332 STAGE(bt709_luminance_or_luma_to_alpha, Ctx::None) {
2333     a = r*0.2126f + g*0.7152f + b*0.0722f;
2334     r = g = b = 0;
2335 }
STAGE(bt709_luminance_or_luma_to_rgb,Ctx::None)2336 STAGE(bt709_luminance_or_luma_to_rgb, Ctx::None) {
2337     r = g = b = r*0.2126f + g*0.7152f + b*0.0722f;
2338 }
2339 
STAGE(matrix_translate,const float * m)2340 STAGE(matrix_translate, const float* m) {
2341     r += m[0];
2342     g += m[1];
2343 }
STAGE(matrix_scale_translate,const float * m)2344 STAGE(matrix_scale_translate, const float* m) {
2345     r = mad(r,m[0], m[2]);
2346     g = mad(g,m[1], m[3]);
2347 }
STAGE(matrix_2x3,const float * m)2348 STAGE(matrix_2x3, const float* m) {
2349     auto R = mad(r,m[0], mad(g,m[2], m[4])),
2350          G = mad(r,m[1], mad(g,m[3], m[5]));
2351     r = R;
2352     g = G;
2353 }
STAGE(matrix_3x3,const float * m)2354 STAGE(matrix_3x3, const float* m) {
2355     auto R = mad(r,m[0], mad(g,m[3], b*m[6])),
2356          G = mad(r,m[1], mad(g,m[4], b*m[7])),
2357          B = mad(r,m[2], mad(g,m[5], b*m[8]));
2358     r = R;
2359     g = G;
2360     b = B;
2361 }
STAGE(matrix_3x4,const float * m)2362 STAGE(matrix_3x4, const float* m) {
2363     auto R = mad(r,m[0], mad(g,m[3], mad(b,m[6], m[ 9]))),
2364          G = mad(r,m[1], mad(g,m[4], mad(b,m[7], m[10]))),
2365          B = mad(r,m[2], mad(g,m[5], mad(b,m[8], m[11])));
2366     r = R;
2367     g = G;
2368     b = B;
2369 }
STAGE(matrix_4x5,const float * m)2370 STAGE(matrix_4x5, const float* m) {
2371     auto R = mad(r,m[ 0], mad(g,m[ 1], mad(b,m[ 2], mad(a,m[ 3], m[ 4])))),
2372          G = mad(r,m[ 5], mad(g,m[ 6], mad(b,m[ 7], mad(a,m[ 8], m[ 9])))),
2373          B = mad(r,m[10], mad(g,m[11], mad(b,m[12], mad(a,m[13], m[14])))),
2374          A = mad(r,m[15], mad(g,m[16], mad(b,m[17], mad(a,m[18], m[19]))));
2375     r = R;
2376     g = G;
2377     b = B;
2378     a = A;
2379 }
STAGE(matrix_4x3,const float * m)2380 STAGE(matrix_4x3, const float* m) {
2381     auto X = r,
2382          Y = g;
2383 
2384     r = mad(X, m[0], mad(Y, m[4], m[ 8]));
2385     g = mad(X, m[1], mad(Y, m[5], m[ 9]));
2386     b = mad(X, m[2], mad(Y, m[6], m[10]));
2387     a = mad(X, m[3], mad(Y, m[7], m[11]));
2388 }
STAGE(matrix_perspective,const float * m)2389 STAGE(matrix_perspective, const float* m) {
2390     // N.B. Unlike the other matrix_ stages, this matrix is row-major.
2391     auto R = mad(r,m[0], mad(g,m[1], m[2])),
2392          G = mad(r,m[3], mad(g,m[4], m[5])),
2393          Z = mad(r,m[6], mad(g,m[7], m[8]));
2394     r = R * rcp(Z);
2395     g = G * rcp(Z);
2396 }
2397 
gradient_lookup(const SkRasterPipeline_GradientCtx * c,U32 idx,F t,F * r,F * g,F * b,F * a)2398 SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
2399                         F* r, F* g, F* b, F* a) {
2400     F fr, br, fg, bg, fb, bb, fa, ba;
2401 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
2402     if (c->stopCount <=8) {
2403         fr = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), idx);
2404         br = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), idx);
2405         fg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), idx);
2406         bg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), idx);
2407         fb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), idx);
2408         bb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), idx);
2409         fa = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), idx);
2410         ba = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), idx);
2411     } else
2412 #endif
2413     {
2414         fr = gather(c->fs[0], idx);
2415         br = gather(c->bs[0], idx);
2416         fg = gather(c->fs[1], idx);
2417         bg = gather(c->bs[1], idx);
2418         fb = gather(c->fs[2], idx);
2419         bb = gather(c->bs[2], idx);
2420         fa = gather(c->fs[3], idx);
2421         ba = gather(c->bs[3], idx);
2422     }
2423 
2424     *r = mad(t, fr, br);
2425     *g = mad(t, fg, bg);
2426     *b = mad(t, fb, bb);
2427     *a = mad(t, fa, ba);
2428 }
2429 
STAGE(evenly_spaced_gradient,const SkRasterPipeline_GradientCtx * c)2430 STAGE(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
2431     auto t = r;
2432     auto idx = trunc_(t * (c->stopCount-1));
2433     gradient_lookup(c, idx, t, &r, &g, &b, &a);
2434 }
2435 
STAGE(gradient,const SkRasterPipeline_GradientCtx * c)2436 STAGE(gradient, const SkRasterPipeline_GradientCtx* c) {
2437     auto t = r;
2438     U32 idx = 0;
2439 
2440     // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
2441     for (size_t i = 1; i < c->stopCount; i++) {
2442         idx += if_then_else(t >= c->ts[i], U32(1), U32(0));
2443     }
2444 
2445     gradient_lookup(c, idx, t, &r, &g, &b, &a);
2446 }
2447 
STAGE(evenly_spaced_2_stop_gradient,const void * ctx)2448 STAGE(evenly_spaced_2_stop_gradient, const void* ctx) {
2449     struct Ctx { float f[4], b[4]; };
2450     auto c = (const Ctx*)ctx;
2451 
2452     auto t = r;
2453     r = mad(t, c->f[0], c->b[0]);
2454     g = mad(t, c->f[1], c->b[1]);
2455     b = mad(t, c->f[2], c->b[2]);
2456     a = mad(t, c->f[3], c->b[3]);
2457 }
2458 
STAGE(xy_to_unit_angle,Ctx::None)2459 STAGE(xy_to_unit_angle, Ctx::None) {
2460     F X = r,
2461       Y = g;
2462     F xabs = abs_(X),
2463       yabs = abs_(Y);
2464 
2465     F slope = min(xabs, yabs)/max(xabs, yabs);
2466     F s = slope * slope;
2467 
2468     // Use a 7th degree polynomial to approximate atan.
2469     // This was generated using sollya.gforge.inria.fr.
2470     // A float optimized polynomial was generated using the following command.
2471     // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
2472     F phi = slope
2473              * (0.15912117063999176025390625f     + s
2474              * (-5.185396969318389892578125e-2f   + s
2475              * (2.476101927459239959716796875e-2f + s
2476              * (-7.0547382347285747528076171875e-3f))));
2477 
2478     phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
2479     phi = if_then_else(X < 0.0f   , 1.0f/2.0f - phi, phi);
2480     phi = if_then_else(Y < 0.0f   , 1.0f - phi     , phi);
2481     phi = if_then_else(phi != phi , 0              , phi);  // Check for NaN.
2482     r = phi;
2483 }
2484 
STAGE(xy_to_radius,Ctx::None)2485 STAGE(xy_to_radius, Ctx::None) {
2486     F X2 = r * r,
2487       Y2 = g * g;
2488     r = sqrt_(X2 + Y2);
2489 }
2490 
2491 // Please see https://skia.org/dev/design/conical for how our 2pt conical shader works.
2492 
STAGE(negate_x,Ctx::None)2493 STAGE(negate_x, Ctx::None) { r = -r; }
2494 
STAGE(xy_to_2pt_conical_strip,const SkRasterPipeline_2PtConicalCtx * ctx)2495 STAGE(xy_to_2pt_conical_strip, const SkRasterPipeline_2PtConicalCtx* ctx) {
2496     F x = r, y = g, &t = r;
2497     t = x + sqrt_(ctx->fP0 - y*y); // ctx->fP0 = r0 * r0
2498 }
2499 
STAGE(xy_to_2pt_conical_focal_on_circle,Ctx::None)2500 STAGE(xy_to_2pt_conical_focal_on_circle, Ctx::None) {
2501     F x = r, y = g, &t = r;
2502     t = x + y*y / x; // (x^2 + y^2) / x
2503 }
2504 
STAGE(xy_to_2pt_conical_well_behaved,const SkRasterPipeline_2PtConicalCtx * ctx)2505 STAGE(xy_to_2pt_conical_well_behaved, const SkRasterPipeline_2PtConicalCtx* ctx) {
2506     F x = r, y = g, &t = r;
2507     t = sqrt_(x*x + y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
2508 }
2509 
STAGE(xy_to_2pt_conical_greater,const SkRasterPipeline_2PtConicalCtx * ctx)2510 STAGE(xy_to_2pt_conical_greater, const SkRasterPipeline_2PtConicalCtx* ctx) {
2511     F x = r, y = g, &t = r;
2512     t = sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
2513 }
2514 
STAGE(xy_to_2pt_conical_smaller,const SkRasterPipeline_2PtConicalCtx * ctx)2515 STAGE(xy_to_2pt_conical_smaller, const SkRasterPipeline_2PtConicalCtx* ctx) {
2516     F x = r, y = g, &t = r;
2517     t = -sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
2518 }
2519 
STAGE(alter_2pt_conical_compensate_focal,const SkRasterPipeline_2PtConicalCtx * ctx)2520 STAGE(alter_2pt_conical_compensate_focal, const SkRasterPipeline_2PtConicalCtx* ctx) {
2521     F& t = r;
2522     t = t + ctx->fP1; // ctx->fP1 = f
2523 }
2524 
STAGE(alter_2pt_conical_unswap,Ctx::None)2525 STAGE(alter_2pt_conical_unswap, Ctx::None) {
2526     F& t = r;
2527     t = 1 - t;
2528 }
2529 
STAGE(mask_2pt_conical_nan,SkRasterPipeline_2PtConicalCtx * c)2530 STAGE(mask_2pt_conical_nan, SkRasterPipeline_2PtConicalCtx* c) {
2531     F& t = r;
2532     auto is_degenerate = (t != t); // NaN
2533     t = if_then_else(is_degenerate, F(0), t);
2534     sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
2535 }
2536 
STAGE(mask_2pt_conical_degenerates,SkRasterPipeline_2PtConicalCtx * c)2537 STAGE(mask_2pt_conical_degenerates, SkRasterPipeline_2PtConicalCtx* c) {
2538     F& t = r;
2539     auto is_degenerate = (t <= 0) | (t != t);
2540     t = if_then_else(is_degenerate, F(0), t);
2541     sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
2542 }
2543 
STAGE(apply_vector_mask,const uint32_t * ctx)2544 STAGE(apply_vector_mask, const uint32_t* ctx) {
2545     const U32 mask = sk_unaligned_load<U32>(ctx);
2546     r = sk_bit_cast<F>(sk_bit_cast<U32>(r) & mask);
2547     g = sk_bit_cast<F>(sk_bit_cast<U32>(g) & mask);
2548     b = sk_bit_cast<F>(sk_bit_cast<U32>(b) & mask);
2549     a = sk_bit_cast<F>(sk_bit_cast<U32>(a) & mask);
2550 }
2551 
STAGE(save_xy,SkRasterPipeline_SamplerCtx * c)2552 STAGE(save_xy, SkRasterPipeline_SamplerCtx* c) {
2553     // Whether bilinear or bicubic, all sample points are at the same fractional offset (fx,fy).
2554     // They're either the 4 corners of a logical 1x1 pixel or the 16 corners of a 3x3 grid
2555     // surrounding (x,y) at (0.5,0.5) off-center.
2556     F fx = fract(r + 0.5f),
2557       fy = fract(g + 0.5f);
2558 
2559     // Samplers will need to load x and fx, or y and fy.
2560     sk_unaligned_store(c->x,  r);
2561     sk_unaligned_store(c->y,  g);
2562     sk_unaligned_store(c->fx, fx);
2563     sk_unaligned_store(c->fy, fy);
2564 }
2565 
STAGE(accumulate,const SkRasterPipeline_SamplerCtx * c)2566 STAGE(accumulate, const SkRasterPipeline_SamplerCtx* c) {
2567     // Bilinear and bicubic filters are both separable, so we produce independent contributions
2568     // from x and y, multiplying them together here to get each pixel's total scale factor.
2569     auto scale = sk_unaligned_load<F>(c->scalex)
2570                * sk_unaligned_load<F>(c->scaley);
2571     dr = mad(scale, r, dr);
2572     dg = mad(scale, g, dg);
2573     db = mad(scale, b, db);
2574     da = mad(scale, a, da);
2575 }
2576 
2577 // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
2578 // are combined in direct proportion to their area overlapping that logical query pixel.
2579 // At positive offsets, the x-axis contribution to that rectangle is fx, or (1-fx) at negative x.
2580 // The y-axis is symmetric.
2581 
2582 template <int kScale>
bilinear_x(SkRasterPipeline_SamplerCtx * ctx,F * x)2583 SI void bilinear_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
2584     *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
2585     F fx = sk_unaligned_load<F>(ctx->fx);
2586 
2587     F scalex;
2588     if (kScale == -1) { scalex = 1.0f - fx; }
2589     if (kScale == +1) { scalex =        fx; }
2590     sk_unaligned_store(ctx->scalex, scalex);
2591 }
2592 template <int kScale>
bilinear_y(SkRasterPipeline_SamplerCtx * ctx,F * y)2593 SI void bilinear_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
2594     *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
2595     F fy = sk_unaligned_load<F>(ctx->fy);
2596 
2597     F scaley;
2598     if (kScale == -1) { scaley = 1.0f - fy; }
2599     if (kScale == +1) { scaley =        fy; }
2600     sk_unaligned_store(ctx->scaley, scaley);
2601 }
2602 
STAGE(bilinear_nx,SkRasterPipeline_SamplerCtx * ctx)2603 STAGE(bilinear_nx, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<-1>(ctx, &r); }
STAGE(bilinear_px,SkRasterPipeline_SamplerCtx * ctx)2604 STAGE(bilinear_px, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<+1>(ctx, &r); }
STAGE(bilinear_ny,SkRasterPipeline_SamplerCtx * ctx)2605 STAGE(bilinear_ny, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<-1>(ctx, &g); }
STAGE(bilinear_py,SkRasterPipeline_SamplerCtx * ctx)2606 STAGE(bilinear_py, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<+1>(ctx, &g); }
2607 
2608 
2609 // In bicubic interpolation, the 16 pixels and +/- 0.5 and +/- 1.5 offsets from the sample
2610 // pixel center are combined with a non-uniform cubic filter, with higher values near the center.
2611 //
2612 // We break this function into two parts, one for near 0.5 offsets and one for far 1.5 offsets.
2613 // See GrCubicEffect for details of this particular filter.
2614 
bicubic_near(F t)2615 SI F bicubic_near(F t) {
2616     // 1/18 + 9/18t + 27/18t^2 - 21/18t^3 == t ( t ( -21/18t + 27/18) + 9/18) + 1/18
2617     return mad(t, mad(t, mad((-21/18.0f), t, (27/18.0f)), (9/18.0f)), (1/18.0f));
2618 }
bicubic_far(F t)2619 SI F bicubic_far(F t) {
2620     // 0/18 + 0/18*t - 6/18t^2 + 7/18t^3 == t^2 (7/18t - 6/18)
2621     return (t*t)*mad((7/18.0f), t, (-6/18.0f));
2622 }
2623 
2624 template <int kScale>
bicubic_x(SkRasterPipeline_SamplerCtx * ctx,F * x)2625 SI void bicubic_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
2626     *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
2627     F fx = sk_unaligned_load<F>(ctx->fx);
2628 
2629     F scalex;
2630     if (kScale == -3) { scalex = bicubic_far (1.0f - fx); }
2631     if (kScale == -1) { scalex = bicubic_near(1.0f - fx); }
2632     if (kScale == +1) { scalex = bicubic_near(       fx); }
2633     if (kScale == +3) { scalex = bicubic_far (       fx); }
2634     sk_unaligned_store(ctx->scalex, scalex);
2635 }
2636 template <int kScale>
bicubic_y(SkRasterPipeline_SamplerCtx * ctx,F * y)2637 SI void bicubic_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
2638     *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
2639     F fy = sk_unaligned_load<F>(ctx->fy);
2640 
2641     F scaley;
2642     if (kScale == -3) { scaley = bicubic_far (1.0f - fy); }
2643     if (kScale == -1) { scaley = bicubic_near(1.0f - fy); }
2644     if (kScale == +1) { scaley = bicubic_near(       fy); }
2645     if (kScale == +3) { scaley = bicubic_far (       fy); }
2646     sk_unaligned_store(ctx->scaley, scaley);
2647 }
2648 
STAGE(bicubic_n3x,SkRasterPipeline_SamplerCtx * ctx)2649 STAGE(bicubic_n3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-3>(ctx, &r); }
STAGE(bicubic_n1x,SkRasterPipeline_SamplerCtx * ctx)2650 STAGE(bicubic_n1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-1>(ctx, &r); }
STAGE(bicubic_p1x,SkRasterPipeline_SamplerCtx * ctx)2651 STAGE(bicubic_p1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+1>(ctx, &r); }
STAGE(bicubic_p3x,SkRasterPipeline_SamplerCtx * ctx)2652 STAGE(bicubic_p3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+3>(ctx, &r); }
2653 
STAGE(bicubic_n3y,SkRasterPipeline_SamplerCtx * ctx)2654 STAGE(bicubic_n3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-3>(ctx, &g); }
STAGE(bicubic_n1y,SkRasterPipeline_SamplerCtx * ctx)2655 STAGE(bicubic_n1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-1>(ctx, &g); }
STAGE(bicubic_p1y,SkRasterPipeline_SamplerCtx * ctx)2656 STAGE(bicubic_p1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+1>(ctx, &g); }
STAGE(bicubic_p3y,SkRasterPipeline_SamplerCtx * ctx)2657 STAGE(bicubic_p3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+3>(ctx, &g); }
2658 
STAGE(callback,SkRasterPipeline_CallbackCtx * c)2659 STAGE(callback, SkRasterPipeline_CallbackCtx* c) {
2660     store4(c->rgba,0, r,g,b,a);
2661     c->fn(c, tail ? tail : N);
2662     load4(c->read_from,0, &r,&g,&b,&a);
2663 }
2664 
STAGE(gauss_a_to_rgba,Ctx::None)2665 STAGE(gauss_a_to_rgba, Ctx::None) {
2666     // x = 1 - x;
2667     // exp(-x * x * 4) - 0.018f;
2668     // ... now approximate with quartic
2669     //
2670     const float c4 = -2.26661229133605957031f;
2671     const float c3 = 2.89795351028442382812f;
2672     const float c2 = 0.21345567703247070312f;
2673     const float c1 = 0.15489584207534790039f;
2674     const float c0 = 0.00030726194381713867f;
2675     a = mad(a, mad(a, mad(a, mad(a, c4, c3), c2), c1), c0);
2676     r = a;
2677     g = a;
2678     b = a;
2679 }
2680 
tile(F v,SkTileMode mode,float limit,float invLimit)2681 SI F tile(F v, SkTileMode mode, float limit, float invLimit) {
2682     // The ix_and_ptr() calls in sample() will clamp tile()'s output, so no need to clamp here.
2683     switch (mode) {
2684         case SkTileMode::kDecal:
2685         case SkTileMode::kClamp:  return v;
2686         case SkTileMode::kRepeat: return v - floor_(v*invLimit)*limit;
2687         case SkTileMode::kMirror:
2688             return abs_( (v-limit) - (limit+limit)*floor_((v-limit)*(invLimit*0.5f)) - limit );
2689     }
2690     SkUNREACHABLE;
2691 }
2692 
sample(const SkRasterPipeline_SamplerCtx2 * ctx,F x,F y,F * r,F * g,F * b,F * a)2693 SI void sample(const SkRasterPipeline_SamplerCtx2* ctx, F x, F y,
2694                F* r, F* g, F* b, F* a) {
2695     x = tile(x, ctx->tileX, ctx->width , ctx->invWidth );
2696     y = tile(y, ctx->tileY, ctx->height, ctx->invHeight);
2697 
2698     switch (ctx->ct) {
2699         default: *r = *g = *b = *a = 0;
2700                  break;
2701 
2702         case kRGBA_8888_SkColorType:
2703         case kBGRA_8888_SkColorType: {
2704             const uint32_t* ptr;
2705             U32 ix = ix_and_ptr(&ptr, ctx, x,y);
2706             from_8888(gather(ptr, ix), r,g,b,a);
2707             if (ctx->ct == kBGRA_8888_SkColorType) {
2708                 std::swap(*r,*b);
2709             }
2710         } break;
2711     }
2712 }
2713 
2714 template <int D>
sampler(const SkRasterPipeline_SamplerCtx2 * ctx,F cx,F cy,const F (& wx)[D],const F (& wy)[D],F * r,F * g,F * b,F * a)2715 SI void sampler(const SkRasterPipeline_SamplerCtx2* ctx,
2716                 F cx, F cy, const F (&wx)[D], const F (&wy)[D],
2717                 F* r, F* g, F* b, F* a) {
2718 
2719     float start = -0.5f*(D-1);
2720 
2721     *r = *g = *b = *a = 0;
2722     F y = cy + start;
2723     for (int j = 0; j < D; j++, y += 1.0f) {
2724         F x = cx + start;
2725         for (int i = 0; i < D; i++, x += 1.0f) {
2726             F R,G,B,A;
2727             sample(ctx, x,y, &R,&G,&B,&A);
2728 
2729             F w = wx[i] * wy[j];
2730             *r = mad(w,R,*r);
2731             *g = mad(w,G,*g);
2732             *b = mad(w,B,*b);
2733             *a = mad(w,A,*a);
2734         }
2735     }
2736 }
2737 
STAGE(bilinear,const SkRasterPipeline_SamplerCtx2 * ctx)2738 STAGE(bilinear, const SkRasterPipeline_SamplerCtx2* ctx) {
2739     F x = r, fx = fract(x + 0.5f),
2740       y = g, fy = fract(y + 0.5f);
2741     const F wx[] = {1.0f - fx, fx};
2742     const F wy[] = {1.0f - fy, fy};
2743 
2744     sampler(ctx, x,y, wx,wy, &r,&g,&b,&a);
2745 }
STAGE(bicubic,SkRasterPipeline_SamplerCtx2 * ctx)2746 STAGE(bicubic, SkRasterPipeline_SamplerCtx2* ctx) {
2747     F x = r, fx = fract(x + 0.5f),
2748       y = g, fy = fract(y + 0.5f);
2749     const F wx[] = { bicubic_far(1-fx), bicubic_near(1-fx), bicubic_near(fx), bicubic_far(fx) };
2750     const F wy[] = { bicubic_far(1-fy), bicubic_near(1-fy), bicubic_near(fy), bicubic_far(fy) };
2751 
2752     sampler(ctx, x,y, wx,wy, &r,&g,&b,&a);
2753 }
2754 
2755 // A specialized fused image shader for clamp-x, clamp-y, non-sRGB sampling.
STAGE(bilerp_clamp_8888,const SkRasterPipeline_GatherCtx * ctx)2756 STAGE(bilerp_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
2757     // (cx,cy) are the center of our sample.
2758     F cx = r,
2759       cy = g;
2760 
2761     // All sample points are at the same fractional offset (fx,fy).
2762     // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
2763     F fx = fract(cx + 0.5f),
2764       fy = fract(cy + 0.5f);
2765 
2766     // We'll accumulate the color of all four samples into {r,g,b,a} directly.
2767     r = g = b = a = 0;
2768 
2769     for (float dy = -0.5f; dy <= +0.5f; dy += 1.0f)
2770     for (float dx = -0.5f; dx <= +0.5f; dx += 1.0f) {
2771         // (x,y) are the coordinates of this sample point.
2772         F x = cx + dx,
2773           y = cy + dy;
2774 
2775         // ix_and_ptr() will clamp to the image's bounds for us.
2776         const uint32_t* ptr;
2777         U32 ix = ix_and_ptr(&ptr, ctx, x,y);
2778 
2779         F sr,sg,sb,sa;
2780         from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
2781 
2782         // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
2783         // are combined in direct proportion to their area overlapping that logical query pixel.
2784         // At positive offsets, the x-axis contribution to that rectangle is fx,
2785         // or (1-fx) at negative x.  Same deal for y.
2786         F sx = (dx > 0) ? fx : 1.0f - fx,
2787           sy = (dy > 0) ? fy : 1.0f - fy,
2788           area = sx * sy;
2789 
2790         r += sr * area;
2791         g += sg * area;
2792         b += sb * area;
2793         a += sa * area;
2794     }
2795 }
2796 
2797 // A specialized fused image shader for clamp-x, clamp-y, non-sRGB sampling.
STAGE(bicubic_clamp_8888,const SkRasterPipeline_GatherCtx * ctx)2798 STAGE(bicubic_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
2799     // (cx,cy) are the center of our sample.
2800     F cx = r,
2801       cy = g;
2802 
2803     // All sample points are at the same fractional offset (fx,fy).
2804     // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
2805     F fx = fract(cx + 0.5f),
2806       fy = fract(cy + 0.5f);
2807 
2808     // We'll accumulate the color of all four samples into {r,g,b,a} directly.
2809     r = g = b = a = 0;
2810 
2811     const F scaley[4] = {
2812         bicubic_far (1.0f - fy), bicubic_near(1.0f - fy),
2813         bicubic_near(       fy), bicubic_far (       fy),
2814     };
2815     const F scalex[4] = {
2816         bicubic_far (1.0f - fx), bicubic_near(1.0f - fx),
2817         bicubic_near(       fx), bicubic_far (       fx),
2818     };
2819 
2820     F sample_y = cy - 1.5f;
2821     for (int yy = 0; yy <= 3; ++yy) {
2822         F sample_x = cx - 1.5f;
2823         for (int xx = 0; xx <= 3; ++xx) {
2824             F scale = scalex[xx] * scaley[yy];
2825 
2826             // ix_and_ptr() will clamp to the image's bounds for us.
2827             const uint32_t* ptr;
2828             U32 ix = ix_and_ptr(&ptr, ctx, sample_x, sample_y);
2829 
2830             F sr,sg,sb,sa;
2831             from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
2832 
2833             r = mad(scale, sr, r);
2834             g = mad(scale, sg, g);
2835             b = mad(scale, sb, b);
2836             a = mad(scale, sa, a);
2837 
2838             sample_x += 1;
2839         }
2840         sample_y += 1;
2841     }
2842 }
2843 
2844 // ~~~~~~ GrSwizzle stage ~~~~~~ //
2845 
STAGE(swizzle,void * ctx)2846 STAGE(swizzle, void* ctx) {
2847     auto ir = r, ig = g, ib = b, ia = a;
2848     F* o[] = {&r, &g, &b, &a};
2849     char swiz[4];
2850     memcpy(swiz, &ctx, sizeof(swiz));
2851 
2852     for (int i = 0; i < 4; ++i) {
2853         switch (swiz[i]) {
2854             case 'r': *o[i] = ir;   break;
2855             case 'g': *o[i] = ig;   break;
2856             case 'b': *o[i] = ib;   break;
2857             case 'a': *o[i] = ia;   break;
2858             case '0': *o[i] = F(0); break;
2859             case '1': *o[i] = F(1); break;
2860             default:                break;
2861         }
2862     }
2863 }
2864 
2865 namespace lowp {
2866 #if defined(JUMPER_IS_SCALAR) || defined(SK_DISABLE_LOWP_RASTER_PIPELINE)
2867     // If we're not compiled by Clang, or otherwise switched into scalar mode (old Clang, manually),
2868     // we don't generate lowp stages.  All these nullptrs will tell SkJumper.cpp to always use the
2869     // highp float pipeline.
2870     #define M(st) static void (*st)(void) = nullptr;
2871         SK_RASTER_PIPELINE_STAGES(M)
2872     #undef M
2873     static void (*just_return)(void) = nullptr;
2874 
start_pipeline(size_t,size_t,size_t,size_t,void **)2875     static void start_pipeline(size_t,size_t,size_t,size_t, void**) {}
2876 
2877 #else  // We are compiling vector code with Clang... let's make some lowp stages!
2878 
2879 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
2880     using U8  = uint8_t  __attribute__((ext_vector_type(16)));
2881     using U16 = uint16_t __attribute__((ext_vector_type(16)));
2882     using I16 =  int16_t __attribute__((ext_vector_type(16)));
2883     using I32 =  int32_t __attribute__((ext_vector_type(16)));
2884     using U32 = uint32_t __attribute__((ext_vector_type(16)));
2885     using F   = float    __attribute__((ext_vector_type(16)));
2886 #else
2887     using U8  = uint8_t  __attribute__((ext_vector_type(8)));
2888     using U16 = uint16_t __attribute__((ext_vector_type(8)));
2889     using I16 =  int16_t __attribute__((ext_vector_type(8)));
2890     using I32 =  int32_t __attribute__((ext_vector_type(8)));
2891     using U32 = uint32_t __attribute__((ext_vector_type(8)));
2892     using F   = float    __attribute__((ext_vector_type(8)));
2893 #endif
2894 
2895 static const size_t N = sizeof(U16) / sizeof(uint16_t);
2896 
2897 // Once again, some platforms benefit from a restricted Stage calling convention,
2898 // but others can pass tons and tons of registers and we're happy to exploit that.
2899 // It's exactly the same decision and implementation strategy as the F stages above.
2900 #if JUMPER_NARROW_STAGES
2901     struct Params {
2902         size_t dx, dy, tail;
2903         U16 dr,dg,db,da;
2904     };
2905     using Stage = void(ABI*)(Params*, void** program, U16 r, U16 g, U16 b, U16 a);
2906 #else
2907     // We pass program as the second argument so that load_and_inc() will find it in %rsi on x86-64.
2908     using Stage = void (ABI*)(size_t tail, void** program, size_t dx, size_t dy,
2909                               U16  r, U16  g, U16  b, U16  a,
2910                               U16 dr, U16 dg, U16 db, U16 da);
2911 #endif
2912 
2913 static void start_pipeline(const size_t x0,     const size_t y0,
2914                            const size_t xlimit, const size_t ylimit, void** program) {
2915     auto start = (Stage)load_and_inc(program);
2916     for (size_t dy = y0; dy < ylimit; dy++) {
2917     #if JUMPER_NARROW_STAGES
2918         Params params = { x0,dy,0, 0,0,0,0 };
2919         for (; params.dx + N <= xlimit; params.dx += N) {
2920             start(&params,program, 0,0,0,0);
2921         }
2922         if (size_t tail = xlimit - params.dx) {
2923             params.tail = tail;
2924             start(&params,program, 0,0,0,0);
2925         }
2926     #else
2927         size_t dx = x0;
2928         for (; dx + N <= xlimit; dx += N) {
2929             start(   0,program,dx,dy, 0,0,0,0, 0,0,0,0);
2930         }
2931         if (size_t tail = xlimit - dx) {
2932             start(tail,program,dx,dy, 0,0,0,0, 0,0,0,0);
2933         }
2934     #endif
2935     }
2936 }
2937 
2938 #if JUMPER_NARROW_STAGES
2939     static void ABI just_return(Params*, void**, U16,U16,U16,U16) {}
2940 #else
2941     static void ABI just_return(size_t,void**,size_t,size_t, U16,U16,U16,U16, U16,U16,U16,U16) {}
2942 #endif
2943 
2944 // All stages use the same function call ABI to chain into each other, but there are three types:
2945 //   GG: geometry in, geometry out  -- think, a matrix
2946 //   GP: geometry in, pixels out.   -- think, a memory gather
2947 //   PP: pixels in, pixels out.     -- think, a blend mode
2948 //
2949 // (Some stages ignore their inputs or produce no logical output.  That's perfectly fine.)
2950 //
2951 // These three STAGE_ macros let you define each type of stage,
2952 // and will have (x,y) geometry and/or (r,g,b,a, dr,dg,db,da) pixel arguments as appropriate.
2953 
2954 #if JUMPER_NARROW_STAGES
2955     #define STAGE_GG(name, ...)                                                                \
2956         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y);          \
2957         static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) {     \
2958             auto x = join<F>(r,g),                                                             \
2959                  y = join<F>(b,a);                                                             \
2960             name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y);                   \
2961             split(x, &r,&g);                                                                   \
2962             split(y, &b,&a);                                                                   \
2963             auto next = (Stage)load_and_inc(program);                                          \
2964             next(params,program, r,g,b,a);                                                     \
2965         }                                                                                      \
2966         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y)
2967 
2968     #define STAGE_GP(name, ...)                                                            \
2969         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y,         \
2970                          U16&  r, U16&  g, U16&  b, U16&  a,                               \
2971                          U16& dr, U16& dg, U16& db, U16& da);                              \
2972         static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) { \
2973             auto x = join<F>(r,g),                                                         \
2974                  y = join<F>(b,a);                                                         \
2975             name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y, r,g,b,a,       \
2976                      params->dr,params->dg,params->db,params->da);                         \
2977             auto next = (Stage)load_and_inc(program);                                      \
2978             next(params,program, r,g,b,a);                                                 \
2979         }                                                                                  \
2980         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y,         \
2981                          U16&  r, U16&  g, U16&  b, U16&  a,                               \
2982                          U16& dr, U16& dg, U16& db, U16& da)
2983 
2984     #define STAGE_PP(name, ...)                                                            \
2985         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail,                   \
2986                          U16&  r, U16&  g, U16&  b, U16&  a,                               \
2987                          U16& dr, U16& dg, U16& db, U16& da);                              \
2988         static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) { \
2989             name##_k(Ctx{program}, params->dx,params->dy,params->tail, r,g,b,a,            \
2990                      params->dr,params->dg,params->db,params->da);                         \
2991             auto next = (Stage)load_and_inc(program);                                      \
2992             next(params,program, r,g,b,a);                                                 \
2993         }                                                                                  \
2994         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail,                   \
2995                          U16&  r, U16&  g, U16&  b, U16&  a,                               \
2996                          U16& dr, U16& dg, U16& db, U16& da)
2997 #else
2998     #define STAGE_GG(name, ...)                                                            \
2999         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y);      \
3000         static void ABI name(size_t tail, void** program, size_t dx, size_t dy,            \
3001                              U16  r, U16  g, U16  b, U16  a,                               \
3002                              U16 dr, U16 dg, U16 db, U16 da) {                             \
3003             auto x = join<F>(r,g),                                                         \
3004                  y = join<F>(b,a);                                                         \
3005             name##_k(Ctx{program}, dx,dy,tail, x,y);                                       \
3006             split(x, &r,&g);                                                               \
3007             split(y, &b,&a);                                                               \
3008             auto next = (Stage)load_and_inc(program);                                      \
3009             next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da);                                \
3010         }                                                                                  \
3011         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y)
3012 
3013     #define STAGE_GP(name, ...)                                                            \
3014         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y,         \
3015                          U16&  r, U16&  g, U16&  b, U16&  a,                               \
3016                          U16& dr, U16& dg, U16& db, U16& da);                              \
3017         static void ABI name(size_t tail, void** program, size_t dx, size_t dy,            \
3018                              U16  r, U16  g, U16  b, U16  a,                               \
3019                              U16 dr, U16 dg, U16 db, U16 da) {                             \
3020             auto x = join<F>(r,g),                                                         \
3021                  y = join<F>(b,a);                                                         \
3022             name##_k(Ctx{program}, dx,dy,tail, x,y, r,g,b,a, dr,dg,db,da);                 \
3023             auto next = (Stage)load_and_inc(program);                                      \
3024             next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da);                                \
3025         }                                                                                  \
3026         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y,         \
3027                          U16&  r, U16&  g, U16&  b, U16&  a,                               \
3028                          U16& dr, U16& dg, U16& db, U16& da)
3029 
3030     #define STAGE_PP(name, ...)                                                            \
3031         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail,                   \
3032                          U16&  r, U16&  g, U16&  b, U16&  a,                               \
3033                          U16& dr, U16& dg, U16& db, U16& da);                              \
3034         static void ABI name(size_t tail, void** program, size_t dx, size_t dy,            \
3035                              U16  r, U16  g, U16  b, U16  a,                               \
3036                              U16 dr, U16 dg, U16 db, U16 da) {                             \
3037             name##_k(Ctx{program}, dx,dy,tail, r,g,b,a, dr,dg,db,da);                      \
3038             auto next = (Stage)load_and_inc(program);                                      \
3039             next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da);                                \
3040         }                                                                                  \
3041         SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail,                   \
3042                          U16&  r, U16&  g, U16&  b, U16&  a,                               \
3043                          U16& dr, U16& dg, U16& db, U16& da)
3044 #endif
3045 
3046 // ~~~~~~ Commonly used helper functions ~~~~~~ //
3047 
3048 SI U16 div255(U16 v) {
3049 #if 0
3050     return (v+127)/255;  // The ideal rounding divide by 255.
3051 #elif 1 && defined(JUMPER_IS_NEON)
3052     // With NEON we can compute (v+127)/255 as (v + ((v+128)>>8) + 128)>>8
3053     // just as fast as we can do the approximation below, so might as well be correct!
3054     // First we compute v + ((v+128)>>8), then one more round of (...+128)>>8 to finish up.
3055     return vrshrq_n_u16(vrsraq_n_u16(v, v, 8), 8);
3056 #else
3057     return (v+255)/256;  // A good approximation of (v+127)/255.
3058 #endif
3059 }
3060 
3061 SI U16 inv(U16 v) { return 255-v; }
3062 
3063 SI U16 if_then_else(I16 c, U16 t, U16 e) { return (t & c) | (e & ~c); }
3064 SI U32 if_then_else(I32 c, U32 t, U32 e) { return (t & c) | (e & ~c); }
3065 
3066 SI U16 max(U16 x, U16 y) { return if_then_else(x < y, y, x); }
3067 SI U16 min(U16 x, U16 y) { return if_then_else(x < y, x, y); }
3068 
3069 SI U16 from_float(float f) { return f * 255.0f + 0.5f; }
3070 
3071 SI U16 lerp(U16 from, U16 to, U16 t) { return div255( from*inv(t) + to*t ); }
3072 
3073 template <typename D, typename S>
3074 SI D cast(S src) {
3075     return __builtin_convertvector(src, D);
3076 }
3077 
3078 template <typename D, typename S>
3079 SI void split(S v, D* lo, D* hi) {
3080     static_assert(2*sizeof(D) == sizeof(S), "");
3081     memcpy(lo, (const char*)&v + 0*sizeof(D), sizeof(D));
3082     memcpy(hi, (const char*)&v + 1*sizeof(D), sizeof(D));
3083 }
3084 template <typename D, typename S>
3085 SI D join(S lo, S hi) {
3086     static_assert(sizeof(D) == 2*sizeof(S), "");
3087     D v;
3088     memcpy((char*)&v + 0*sizeof(S), &lo, sizeof(S));
3089     memcpy((char*)&v + 1*sizeof(S), &hi, sizeof(S));
3090     return v;
3091 }
3092 
3093 SI F if_then_else(I32 c, F t, F e) {
3094     return sk_bit_cast<F>( (sk_bit_cast<I32>(t) & c) | (sk_bit_cast<I32>(e) & ~c) );
3095 }
3096 SI F max(F x, F y) { return if_then_else(x < y, y, x); }
3097 SI F min(F x, F y) { return if_then_else(x < y, x, y); }
3098 
3099 SI F mad(F f, F m, F a) { return f*m+a; }
3100 SI U32 trunc_(F x) { return (U32)cast<I32>(x); }
3101 
3102 SI F rcp(F x) {
3103 #if defined(SK_RASTER_PIPELINE_LEGACY_RCP_RSQRT)
3104     #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
3105         __m256 lo,hi;
3106         split(x, &lo,&hi);
3107         return join<F>(_mm256_rcp_ps(lo), _mm256_rcp_ps(hi));
3108     #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
3109         __m128 lo,hi;
3110         split(x, &lo,&hi);
3111         return join<F>(_mm_rcp_ps(lo), _mm_rcp_ps(hi));
3112     #elif defined(JUMPER_IS_NEON)
3113         auto rcp = [](float32x4_t v) {
3114             auto est = vrecpeq_f32(v);
3115             return vrecpsq_f32(v,est)*est;
3116         };
3117         float32x4_t lo,hi;
3118         split(x, &lo,&hi);
3119         return join<F>(rcp(lo), rcp(hi));
3120     #else
3121         return 1.0f / x;
3122     #endif
3123 #else
3124     // Please don't use _mm[256_rcp_ps, vrecp[es]q_f32, etc. here.
3125     // They deliver inconsistent results, both across arch (x86 vs ARM vs ARM64),
3126     // but also even within (SSE/AVX vs AVX-512, Intel vs AMD).
3127     // This annoys people who want pixel-exact golden tests.  (sia:11861)
3128     return 1.0f / x;
3129 #endif
3130 }
3131 SI F sqrt_(F x) {
3132 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
3133     __m256 lo,hi;
3134     split(x, &lo,&hi);
3135     return join<F>(_mm256_sqrt_ps(lo), _mm256_sqrt_ps(hi));
3136 #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
3137     __m128 lo,hi;
3138     split(x, &lo,&hi);
3139     return join<F>(_mm_sqrt_ps(lo), _mm_sqrt_ps(hi));
3140 #elif defined(SK_CPU_ARM64)
3141     float32x4_t lo,hi;
3142     split(x, &lo,&hi);
3143     return join<F>(vsqrtq_f32(lo), vsqrtq_f32(hi));
3144 #elif defined(JUMPER_IS_NEON)
3145     auto sqrt = [](float32x4_t v) {
3146         auto est = vrsqrteq_f32(v);  // Estimate and two refinement steps for est = rsqrt(v).
3147         est *= vrsqrtsq_f32(v,est*est);
3148         est *= vrsqrtsq_f32(v,est*est);
3149         return v*est;                // sqrt(v) == v*rsqrt(v).
3150     };
3151     float32x4_t lo,hi;
3152     split(x, &lo,&hi);
3153     return join<F>(sqrt(lo), sqrt(hi));
3154 #else
3155     return F{
3156         sqrtf(x[0]), sqrtf(x[1]), sqrtf(x[2]), sqrtf(x[3]),
3157         sqrtf(x[4]), sqrtf(x[5]), sqrtf(x[6]), sqrtf(x[7]),
3158     };
3159 #endif
3160 }
3161 
3162 SI F floor_(F x) {
3163 #if defined(SK_CPU_ARM64)
3164     float32x4_t lo,hi;
3165     split(x, &lo,&hi);
3166     return join<F>(vrndmq_f32(lo), vrndmq_f32(hi));
3167 #elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
3168     __m256 lo,hi;
3169     split(x, &lo,&hi);
3170     return join<F>(_mm256_floor_ps(lo), _mm256_floor_ps(hi));
3171 #elif defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
3172     __m128 lo,hi;
3173     split(x, &lo,&hi);
3174     return join<F>(_mm_floor_ps(lo), _mm_floor_ps(hi));
3175 #else
3176     F roundtrip = cast<F>(cast<I32>(x));
3177     return roundtrip - if_then_else(roundtrip > x, F(1), F(0));
3178 #endif
3179 }
3180 SI F fract(F x) { return x - floor_(x); }
3181 SI F abs_(F x) { return sk_bit_cast<F>( sk_bit_cast<I32>(x) & 0x7fffffff ); }
3182 
3183 // ~~~~~~ Basic / misc. stages ~~~~~~ //
3184 
3185 STAGE_GG(seed_shader, Ctx::None) {
3186     static const float iota[] = {
3187         0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
3188         8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
3189     };
3190     x = cast<F>(I32(dx)) + sk_unaligned_load<F>(iota);
3191     y = cast<F>(I32(dy)) + 0.5f;
3192 }
3193 
3194 STAGE_GG(matrix_translate, const float* m) {
3195     x += m[0];
3196     y += m[1];
3197 }
3198 STAGE_GG(matrix_scale_translate, const float* m) {
3199     x = mad(x,m[0], m[2]);
3200     y = mad(y,m[1], m[3]);
3201 }
3202 STAGE_GG(matrix_2x3, const float* m) {
3203     auto X = mad(x,m[0], mad(y,m[2], m[4])),
3204          Y = mad(x,m[1], mad(y,m[3], m[5]));
3205     x = X;
3206     y = Y;
3207 }
3208 STAGE_GG(matrix_perspective, const float* m) {
3209     // N.B. Unlike the other matrix_ stages, this matrix is row-major.
3210     auto X = mad(x,m[0], mad(y,m[1], m[2])),
3211          Y = mad(x,m[3], mad(y,m[4], m[5])),
3212          Z = mad(x,m[6], mad(y,m[7], m[8]));
3213     x = X * rcp(Z);
3214     y = Y * rcp(Z);
3215 }
3216 
3217 STAGE_PP(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
3218     r = c->rgba[0];
3219     g = c->rgba[1];
3220     b = c->rgba[2];
3221     a = c->rgba[3];
3222 }
3223 STAGE_PP(uniform_color_dst, const SkRasterPipeline_UniformColorCtx* c) {
3224     dr = c->rgba[0];
3225     dg = c->rgba[1];
3226     db = c->rgba[2];
3227     da = c->rgba[3];
3228 }
3229 STAGE_PP(black_color, Ctx::None) { r = g = b =   0; a = 255; }
3230 STAGE_PP(white_color, Ctx::None) { r = g = b = 255; a = 255; }
3231 
3232 STAGE_PP(set_rgb, const float rgb[3]) {
3233     r = from_float(rgb[0]);
3234     g = from_float(rgb[1]);
3235     b = from_float(rgb[2]);
3236 }
3237 
3238 STAGE_PP(clamp_0, Ctx::None) { /*definitely a noop*/ }
3239 STAGE_PP(clamp_1, Ctx::None) { /*_should_ be a noop*/ }
3240 
3241 STAGE_PP(clamp_a, Ctx::None) {
3242     r = min(r, a);
3243     g = min(g, a);
3244     b = min(b, a);
3245 }
3246 
3247 STAGE_PP(clamp_gamut, Ctx::None) {
3248     // It shouldn't be possible to get out-of-gamut
3249     // colors when working in lowp.
3250 }
3251 
3252 STAGE_PP(premul, Ctx::None) {
3253     r = div255(r * a);
3254     g = div255(g * a);
3255     b = div255(b * a);
3256 }
3257 STAGE_PP(premul_dst, Ctx::None) {
3258     dr = div255(dr * da);
3259     dg = div255(dg * da);
3260     db = div255(db * da);
3261 }
3262 
3263 STAGE_PP(force_opaque    , Ctx::None) {  a = 255; }
3264 STAGE_PP(force_opaque_dst, Ctx::None) { da = 255; }
3265 
3266 STAGE_PP(swap_rb, Ctx::None) {
3267     auto tmp = r;
3268     r = b;
3269     b = tmp;
3270 }
3271 STAGE_PP(swap_rb_dst, Ctx::None) {
3272     auto tmp = dr;
3273     dr = db;
3274     db = tmp;
3275 }
3276 
3277 STAGE_PP(move_src_dst, Ctx::None) {
3278     dr = r;
3279     dg = g;
3280     db = b;
3281     da = a;
3282 }
3283 
3284 STAGE_PP(move_dst_src, Ctx::None) {
3285     r = dr;
3286     g = dg;
3287     b = db;
3288     a = da;
3289 }
3290 
3291 // ~~~~~~ Blend modes ~~~~~~ //
3292 
3293 // The same logic applied to all 4 channels.
3294 #define BLEND_MODE(name)                                 \
3295     SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
3296     STAGE_PP(name, Ctx::None) {                          \
3297         r = name##_channel(r,dr,a,da);                   \
3298         g = name##_channel(g,dg,a,da);                   \
3299         b = name##_channel(b,db,a,da);                   \
3300         a = name##_channel(a,da,a,da);                   \
3301     }                                                    \
3302     SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
3303 
3304     BLEND_MODE(clear)    { return 0; }
3305     BLEND_MODE(srcatop)  { return div255( s*da + d*inv(sa) ); }
3306     BLEND_MODE(dstatop)  { return div255( d*sa + s*inv(da) ); }
3307     BLEND_MODE(srcin)    { return div255( s*da ); }
3308     BLEND_MODE(dstin)    { return div255( d*sa ); }
3309     BLEND_MODE(srcout)   { return div255( s*inv(da) ); }
3310     BLEND_MODE(dstout)   { return div255( d*inv(sa) ); }
3311     BLEND_MODE(srcover)  { return s + div255( d*inv(sa) ); }
3312     BLEND_MODE(dstover)  { return d + div255( s*inv(da) ); }
3313     BLEND_MODE(modulate) { return div255( s*d ); }
3314     BLEND_MODE(multiply) { return div255( s*inv(da) + d*inv(sa) + s*d ); }
3315     BLEND_MODE(plus_)    { return min(s+d, 255); }
3316     BLEND_MODE(screen)   { return s + d - div255( s*d ); }
3317     BLEND_MODE(xor_)     { return div255( s*inv(da) + d*inv(sa) ); }
3318 #undef BLEND_MODE
3319 
3320 // The same logic applied to color, and srcover for alpha.
3321 #define BLEND_MODE(name)                                 \
3322     SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
3323     STAGE_PP(name, Ctx::None) {                          \
3324         r = name##_channel(r,dr,a,da);                   \
3325         g = name##_channel(g,dg,a,da);                   \
3326         b = name##_channel(b,db,a,da);                   \
3327         a = a + div255( da*inv(a) );                     \
3328     }                                                    \
3329     SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
3330 
3331     BLEND_MODE(darken)     { return s + d -   div255( max(s*da, d*sa) ); }
3332     BLEND_MODE(lighten)    { return s + d -   div255( min(s*da, d*sa) ); }
3333     BLEND_MODE(difference) { return s + d - 2*div255( min(s*da, d*sa) ); }
3334     BLEND_MODE(exclusion)  { return s + d - 2*div255( s*d ); }
3335 
3336     BLEND_MODE(hardlight) {
3337         return div255( s*inv(da) + d*inv(sa) +
3338                        if_then_else(2*s <= sa, 2*s*d, sa*da - 2*(sa-s)*(da-d)) );
3339     }
3340     BLEND_MODE(overlay) {
3341         return div255( s*inv(da) + d*inv(sa) +
3342                        if_then_else(2*d <= da, 2*s*d, sa*da - 2*(sa-s)*(da-d)) );
3343     }
3344 #undef BLEND_MODE
3345 
3346 // ~~~~~~ Helpers for interacting with memory ~~~~~~ //
3347 
3348 template <typename T>
3349 SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
3350     return (T*)ctx->pixels + dy*ctx->stride + dx;
3351 }
3352 
3353 template <typename T>
3354 SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
3355     // Exclusive -> inclusive.
3356     const F w = sk_bit_cast<float>( sk_bit_cast<uint32_t>(ctx->width ) - 1),
3357             h = sk_bit_cast<float>( sk_bit_cast<uint32_t>(ctx->height) - 1);
3358 
3359     x = min(max(0, x), w);
3360     y = min(max(0, y), h);
3361 
3362     *ptr = (const T*)ctx->pixels;
3363     return trunc_(y)*ctx->stride + trunc_(x);
3364 }
3365 
3366 template <typename V, typename T>
3367 SI V load(const T* ptr, size_t tail) {
3368     V v = 0;
3369     switch (tail & (N-1)) {
3370         case  0: memcpy(&v, ptr, sizeof(v)); break;
3371     #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
3372         case 15: v[14] = ptr[14]; [[fallthrough]];
3373         case 14: v[13] = ptr[13]; [[fallthrough]];
3374         case 13: v[12] = ptr[12]; [[fallthrough]];
3375         case 12: memcpy(&v, ptr, 12*sizeof(T)); break;
3376         case 11: v[10] = ptr[10]; [[fallthrough]];
3377         case 10: v[ 9] = ptr[ 9]; [[fallthrough]];
3378         case  9: v[ 8] = ptr[ 8]; [[fallthrough]];
3379         case  8: memcpy(&v, ptr,  8*sizeof(T)); break;
3380     #endif
3381         case  7: v[ 6] = ptr[ 6]; [[fallthrough]];
3382         case  6: v[ 5] = ptr[ 5]; [[fallthrough]];
3383         case  5: v[ 4] = ptr[ 4]; [[fallthrough]];
3384         case  4: memcpy(&v, ptr,  4*sizeof(T)); break;
3385         case  3: v[ 2] = ptr[ 2]; [[fallthrough]];
3386         case  2: memcpy(&v, ptr,  2*sizeof(T)); break;
3387         case  1: v[ 0] = ptr[ 0];
3388     }
3389     return v;
3390 }
3391 template <typename V, typename T>
3392 SI void store(T* ptr, size_t tail, V v) {
3393     switch (tail & (N-1)) {
3394         case  0: memcpy(ptr, &v, sizeof(v)); break;
3395     #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
3396         case 15: ptr[14] = v[14]; [[fallthrough]];
3397         case 14: ptr[13] = v[13]; [[fallthrough]];
3398         case 13: ptr[12] = v[12]; [[fallthrough]];
3399         case 12: memcpy(ptr, &v, 12*sizeof(T)); break;
3400         case 11: ptr[10] = v[10]; [[fallthrough]];
3401         case 10: ptr[ 9] = v[ 9]; [[fallthrough]];
3402         case  9: ptr[ 8] = v[ 8]; [[fallthrough]];
3403         case  8: memcpy(ptr, &v,  8*sizeof(T)); break;
3404     #endif
3405         case  7: ptr[ 6] = v[ 6]; [[fallthrough]];
3406         case  6: ptr[ 5] = v[ 5]; [[fallthrough]];
3407         case  5: ptr[ 4] = v[ 4]; [[fallthrough]];
3408         case  4: memcpy(ptr, &v,  4*sizeof(T)); break;
3409         case  3: ptr[ 2] = v[ 2]; [[fallthrough]];
3410         case  2: memcpy(ptr, &v,  2*sizeof(T)); break;
3411         case  1: ptr[ 0] = v[ 0];
3412     }
3413 }
3414 
3415 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
3416     template <typename V, typename T>
3417     SI V gather(const T* ptr, U32 ix) {
3418         return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
3419                   ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]],
3420                   ptr[ix[ 8]], ptr[ix[ 9]], ptr[ix[10]], ptr[ix[11]],
3421                   ptr[ix[12]], ptr[ix[13]], ptr[ix[14]], ptr[ix[15]], };
3422     }
3423 
3424     template<>
3425     F gather(const float* ptr, U32 ix) {
3426         __m256i lo, hi;
3427         split(ix, &lo, &hi);
3428 
3429         return join<F>(_mm256_i32gather_ps(ptr, lo, 4),
3430                        _mm256_i32gather_ps(ptr, hi, 4));
3431     }
3432 
3433     template<>
3434     U32 gather(const uint32_t* ptr, U32 ix) {
3435         __m256i lo, hi;
3436         split(ix, &lo, &hi);
3437 
3438         return join<U32>(_mm256_i32gather_epi32(ptr, lo, 4),
3439                          _mm256_i32gather_epi32(ptr, hi, 4));
3440     }
3441 #else
3442     template <typename V, typename T>
3443     SI V gather(const T* ptr, U32 ix) {
3444         return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
3445                   ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]], };
3446     }
3447 #endif
3448 
3449 
3450 // ~~~~~~ 32-bit memory loads and stores ~~~~~~ //
3451 
3452 SI void from_8888(U32 rgba, U16* r, U16* g, U16* b, U16* a) {
3453 #if 1 && defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
3454     // Swap the middle 128-bit lanes to make _mm256_packus_epi32() in cast_U16() work out nicely.
3455     __m256i _01,_23;
3456     split(rgba, &_01, &_23);
3457     __m256i _02 = _mm256_permute2x128_si256(_01,_23, 0x20),
3458             _13 = _mm256_permute2x128_si256(_01,_23, 0x31);
3459     rgba = join<U32>(_02, _13);
3460 
3461     auto cast_U16 = [](U32 v) -> U16 {
3462         __m256i _02,_13;
3463         split(v, &_02,&_13);
3464         return _mm256_packus_epi32(_02,_13);
3465     };
3466 #else
3467     auto cast_U16 = [](U32 v) -> U16 {
3468         return cast<U16>(v);
3469     };
3470 #endif
3471     *r = cast_U16(rgba & 65535) & 255;
3472     *g = cast_U16(rgba & 65535) >>  8;
3473     *b = cast_U16(rgba >>   16) & 255;
3474     *a = cast_U16(rgba >>   16) >>  8;
3475 }
3476 
3477 SI void load_8888_(const uint32_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
3478 #if 1 && defined(JUMPER_IS_NEON)
3479     uint8x8x4_t rgba;
3480     switch (tail & (N-1)) {
3481         case 0: rgba = vld4_u8     ((const uint8_t*)(ptr+0)         ); break;
3482         case 7: rgba = vld4_lane_u8((const uint8_t*)(ptr+6), rgba, 6); [[fallthrough]];
3483         case 6: rgba = vld4_lane_u8((const uint8_t*)(ptr+5), rgba, 5); [[fallthrough]];
3484         case 5: rgba = vld4_lane_u8((const uint8_t*)(ptr+4), rgba, 4); [[fallthrough]];
3485         case 4: rgba = vld4_lane_u8((const uint8_t*)(ptr+3), rgba, 3); [[fallthrough]];
3486         case 3: rgba = vld4_lane_u8((const uint8_t*)(ptr+2), rgba, 2); [[fallthrough]];
3487         case 2: rgba = vld4_lane_u8((const uint8_t*)(ptr+1), rgba, 1); [[fallthrough]];
3488         case 1: rgba = vld4_lane_u8((const uint8_t*)(ptr+0), rgba, 0);
3489     }
3490     *r = cast<U16>(rgba.val[0]);
3491     *g = cast<U16>(rgba.val[1]);
3492     *b = cast<U16>(rgba.val[2]);
3493     *a = cast<U16>(rgba.val[3]);
3494 #else
3495     from_8888(load<U32>(ptr, tail), r,g,b,a);
3496 #endif
3497 }
3498 SI void store_8888_(uint32_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
3499 #if 1 && defined(JUMPER_IS_NEON)
3500     uint8x8x4_t rgba = {{
3501         cast<U8>(r),
3502         cast<U8>(g),
3503         cast<U8>(b),
3504         cast<U8>(a),
3505     }};
3506     switch (tail & (N-1)) {
3507         case 0: vst4_u8     ((uint8_t*)(ptr+0), rgba   ); break;
3508         case 7: vst4_lane_u8((uint8_t*)(ptr+6), rgba, 6); [[fallthrough]];
3509         case 6: vst4_lane_u8((uint8_t*)(ptr+5), rgba, 5); [[fallthrough]];
3510         case 5: vst4_lane_u8((uint8_t*)(ptr+4), rgba, 4); [[fallthrough]];
3511         case 4: vst4_lane_u8((uint8_t*)(ptr+3), rgba, 3); [[fallthrough]];
3512         case 3: vst4_lane_u8((uint8_t*)(ptr+2), rgba, 2); [[fallthrough]];
3513         case 2: vst4_lane_u8((uint8_t*)(ptr+1), rgba, 1); [[fallthrough]];
3514         case 1: vst4_lane_u8((uint8_t*)(ptr+0), rgba, 0);
3515     }
3516 #else
3517     store(ptr, tail, cast<U32>(r | (g<<8)) <<  0
3518                    | cast<U32>(b | (a<<8)) << 16);
3519 #endif
3520 }
3521 
3522 STAGE_PP(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
3523     load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
3524 }
3525 STAGE_PP(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
3526     load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
3527 }
3528 STAGE_PP(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
3529     store_8888_(ptr_at_xy<uint32_t>(ctx, dx,dy), tail, r,g,b,a);
3530 }
3531 STAGE_GP(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
3532     const uint32_t* ptr;
3533     U32 ix = ix_and_ptr(&ptr, ctx, x,y);
3534     from_8888(gather<U32>(ptr, ix), &r, &g, &b, &a);
3535 }
3536 
3537 // ~~~~~~ 16-bit memory loads and stores ~~~~~~ //
3538 
3539 SI void from_565(U16 rgb, U16* r, U16* g, U16* b) {
3540     // Format for 565 buffers: 15|rrrrr gggggg bbbbb|0
3541     U16 R = (rgb >> 11) & 31,
3542         G = (rgb >>  5) & 63,
3543         B = (rgb >>  0) & 31;
3544 
3545     // These bit replications are the same as multiplying by 255/31 or 255/63 to scale to 8-bit.
3546     *r = (R << 3) | (R >> 2);
3547     *g = (G << 2) | (G >> 4);
3548     *b = (B << 3) | (B >> 2);
3549 }
3550 SI void load_565_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
3551     from_565(load<U16>(ptr, tail), r,g,b);
3552 }
3553 SI void store_565_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b) {
3554     // Round from [0,255] to [0,31] or [0,63], as if x * (31/255.0f) + 0.5f.
3555     // (Don't feel like you need to find some fundamental truth in these...
3556     // they were brute-force searched.)
3557     U16 R = (r *  9 + 36) / 74,   //  9/74 ≈ 31/255, plus 36/74, about half.
3558         G = (g * 21 + 42) / 85,   // 21/85 = 63/255 exactly.
3559         B = (b *  9 + 36) / 74;
3560     // Pack them back into 15|rrrrr gggggg bbbbb|0.
3561     store(ptr, tail, R << 11
3562                    | G <<  5
3563                    | B <<  0);
3564 }
3565 
3566 STAGE_PP(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
3567     load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b);
3568     a = 255;
3569 }
3570 STAGE_PP(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
3571     load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db);
3572     da = 255;
3573 }
3574 STAGE_PP(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
3575     store_565_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b);
3576 }
3577 STAGE_GP(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
3578     const uint16_t* ptr;
3579     U32 ix = ix_and_ptr(&ptr, ctx, x,y);
3580     from_565(gather<U16>(ptr, ix), &r, &g, &b);
3581     a = 255;
3582 }
3583 
3584 SI void from_4444(U16 rgba, U16* r, U16* g, U16* b, U16* a) {
3585     // Format for 4444 buffers: 15|rrrr gggg bbbb aaaa|0.
3586     U16 R = (rgba >> 12) & 15,
3587         G = (rgba >>  8) & 15,
3588         B = (rgba >>  4) & 15,
3589         A = (rgba >>  0) & 15;
3590 
3591     // Scale [0,15] to [0,255].
3592     *r = (R << 4) | R;
3593     *g = (G << 4) | G;
3594     *b = (B << 4) | B;
3595     *a = (A << 4) | A;
3596 }
3597 SI void load_4444_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
3598     from_4444(load<U16>(ptr, tail), r,g,b,a);
3599 }
3600 SI void store_4444_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
3601     // Round from [0,255] to [0,15], producing the same value as (x*(15/255.0f) + 0.5f).
3602     U16 R = (r + 8) / 17,
3603         G = (g + 8) / 17,
3604         B = (b + 8) / 17,
3605         A = (a + 8) / 17;
3606     // Pack them back into 15|rrrr gggg bbbb aaaa|0.
3607     store(ptr, tail, R << 12
3608                    | G <<  8
3609                    | B <<  4
3610                    | A <<  0);
3611 }
3612 
3613 STAGE_PP(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
3614     load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
3615 }
3616 STAGE_PP(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
3617     load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
3618 }
3619 STAGE_PP(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
3620     store_4444_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b,a);
3621 }
3622 STAGE_GP(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
3623     const uint16_t* ptr;
3624     U32 ix = ix_and_ptr(&ptr, ctx, x,y);
3625     from_4444(gather<U16>(ptr, ix), &r,&g,&b,&a);
3626 }
3627 
3628 SI void from_88(U16 rg, U16* r, U16* g) {
3629     *r = (rg & 0xFF);
3630     *g = (rg >> 8);
3631 }
3632 
3633 SI void load_88_(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
3634 #if 1 && defined(JUMPER_IS_NEON)
3635     uint8x8x2_t rg;
3636     switch (tail & (N-1)) {
3637         case 0: rg = vld2_u8     ((const uint8_t*)(ptr+0)         ); break;
3638         case 7: rg = vld2_lane_u8((const uint8_t*)(ptr+6), rg, 6); [[fallthrough]];
3639         case 6: rg = vld2_lane_u8((const uint8_t*)(ptr+5), rg, 5); [[fallthrough]];
3640         case 5: rg = vld2_lane_u8((const uint8_t*)(ptr+4), rg, 4); [[fallthrough]];
3641         case 4: rg = vld2_lane_u8((const uint8_t*)(ptr+3), rg, 3); [[fallthrough]];
3642         case 3: rg = vld2_lane_u8((const uint8_t*)(ptr+2), rg, 2); [[fallthrough]];
3643         case 2: rg = vld2_lane_u8((const uint8_t*)(ptr+1), rg, 1); [[fallthrough]];
3644         case 1: rg = vld2_lane_u8((const uint8_t*)(ptr+0), rg, 0);
3645     }
3646     *r = cast<U16>(rg.val[0]);
3647     *g = cast<U16>(rg.val[1]);
3648 #else
3649     from_88(load<U16>(ptr, tail), r,g);
3650 #endif
3651 }
3652 
3653 SI void store_88_(uint16_t* ptr, size_t tail, U16 r, U16 g) {
3654 #if 1 && defined(JUMPER_IS_NEON)
3655     uint8x8x2_t rg = {{
3656         cast<U8>(r),
3657         cast<U8>(g),
3658     }};
3659     switch (tail & (N-1)) {
3660         case 0: vst2_u8     ((uint8_t*)(ptr+0), rg   ); break;
3661         case 7: vst2_lane_u8((uint8_t*)(ptr+6), rg, 6); [[fallthrough]];
3662         case 6: vst2_lane_u8((uint8_t*)(ptr+5), rg, 5); [[fallthrough]];
3663         case 5: vst2_lane_u8((uint8_t*)(ptr+4), rg, 4); [[fallthrough]];
3664         case 4: vst2_lane_u8((uint8_t*)(ptr+3), rg, 3); [[fallthrough]];
3665         case 3: vst2_lane_u8((uint8_t*)(ptr+2), rg, 2); [[fallthrough]];
3666         case 2: vst2_lane_u8((uint8_t*)(ptr+1), rg, 1); [[fallthrough]];
3667         case 1: vst2_lane_u8((uint8_t*)(ptr+0), rg, 0);
3668     }
3669 #else
3670     store(ptr, tail, cast<U16>(r | (g<<8)) <<  0);
3671 #endif
3672 }
3673 
3674 STAGE_PP(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
3675     load_88_(ptr_at_xy<const uint16_t>(ctx, dx, dy), tail, &r, &g);
3676     b = 0;
3677     a = 255;
3678 }
3679 STAGE_PP(load_rg88_dst, const SkRasterPipeline_MemoryCtx* ctx) {
3680     load_88_(ptr_at_xy<const uint16_t>(ctx, dx, dy), tail, &dr, &dg);
3681     db = 0;
3682     da = 255;
3683 }
3684 STAGE_PP(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
3685     store_88_(ptr_at_xy<uint16_t>(ctx, dx, dy), tail, r, g);
3686 }
3687 STAGE_GP(gather_rg88, const SkRasterPipeline_GatherCtx* ctx) {
3688     const uint16_t* ptr;
3689     U32 ix = ix_and_ptr(&ptr, ctx, x, y);
3690     from_88(gather<U16>(ptr, ix), &r, &g);
3691     b = 0;
3692     a = 255;
3693 }
3694 
3695 // ~~~~~~ 8-bit memory loads and stores ~~~~~~ //
3696 
3697 SI U16 load_8(const uint8_t* ptr, size_t tail) {
3698     return cast<U16>(load<U8>(ptr, tail));
3699 }
3700 SI void store_8(uint8_t* ptr, size_t tail, U16 v) {
3701     store(ptr, tail, cast<U8>(v));
3702 }
3703 
3704 STAGE_PP(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
3705     r = g = b = 0;
3706     a = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
3707 }
3708 STAGE_PP(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
3709     dr = dg = db = 0;
3710     da = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
3711 }
3712 STAGE_PP(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
3713     store_8(ptr_at_xy<uint8_t>(ctx, dx,dy), tail, a);
3714 }
3715 STAGE_GP(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
3716     const uint8_t* ptr;
3717     U32 ix = ix_and_ptr(&ptr, ctx, x,y);
3718     r = g = b = 0;
3719     a = cast<U16>(gather<U8>(ptr, ix));
3720 }
3721 
3722 STAGE_PP(alpha_to_gray, Ctx::None) {
3723     r = g = b = a;
3724     a = 255;
3725 }
3726 STAGE_PP(alpha_to_gray_dst, Ctx::None) {
3727     dr = dg = db = da;
3728     da = 255;
3729 }
3730 STAGE_PP(bt709_luminance_or_luma_to_alpha, Ctx::None) {
3731     a = (r*54 + g*183 + b*19)/256;  // 0.2126, 0.7152, 0.0722 with 256 denominator.
3732     r = g = b = 0;
3733 }
3734 STAGE_PP(bt709_luminance_or_luma_to_rgb, Ctx::None) {
3735     r = g = b =(r*54 + g*183 + b*19)/256;  // 0.2126, 0.7152, 0.0722 with 256 denominator.
3736 }
3737 
3738 // ~~~~~~ Coverage scales / lerps ~~~~~~ //
3739 
3740 STAGE_PP(load_src, const uint16_t* ptr) {
3741     r = sk_unaligned_load<U16>(ptr + 0*N);
3742     g = sk_unaligned_load<U16>(ptr + 1*N);
3743     b = sk_unaligned_load<U16>(ptr + 2*N);
3744     a = sk_unaligned_load<U16>(ptr + 3*N);
3745 }
3746 STAGE_PP(store_src, uint16_t* ptr) {
3747     sk_unaligned_store(ptr + 0*N, r);
3748     sk_unaligned_store(ptr + 1*N, g);
3749     sk_unaligned_store(ptr + 2*N, b);
3750     sk_unaligned_store(ptr + 3*N, a);
3751 }
3752 STAGE_PP(store_src_a, uint16_t* ptr) {
3753     sk_unaligned_store(ptr, a);
3754 }
3755 STAGE_PP(load_dst, const uint16_t* ptr) {
3756     dr = sk_unaligned_load<U16>(ptr + 0*N);
3757     dg = sk_unaligned_load<U16>(ptr + 1*N);
3758     db = sk_unaligned_load<U16>(ptr + 2*N);
3759     da = sk_unaligned_load<U16>(ptr + 3*N);
3760 }
3761 STAGE_PP(store_dst, uint16_t* ptr) {
3762     sk_unaligned_store(ptr + 0*N, dr);
3763     sk_unaligned_store(ptr + 1*N, dg);
3764     sk_unaligned_store(ptr + 2*N, db);
3765     sk_unaligned_store(ptr + 3*N, da);
3766 }
3767 
3768 // ~~~~~~ Coverage scales / lerps ~~~~~~ //
3769 
3770 STAGE_PP(scale_1_float, const float* f) {
3771     U16 c = from_float(*f);
3772     r = div255( r * c );
3773     g = div255( g * c );
3774     b = div255( b * c );
3775     a = div255( a * c );
3776 }
3777 STAGE_PP(lerp_1_float, const float* f) {
3778     U16 c = from_float(*f);
3779     r = lerp(dr, r, c);
3780     g = lerp(dg, g, c);
3781     b = lerp(db, b, c);
3782     a = lerp(da, a, c);
3783 }
3784 STAGE_PP(scale_native, const uint16_t scales[]) {
3785     auto c = sk_unaligned_load<U16>(scales);
3786     r = div255( r * c );
3787     g = div255( g * c );
3788     b = div255( b * c );
3789     a = div255( a * c );
3790 }
3791 
3792 STAGE_PP(lerp_native, const uint16_t scales[]) {
3793     auto c = sk_unaligned_load<U16>(scales);
3794     r = lerp(dr, r, c);
3795     g = lerp(dg, g, c);
3796     b = lerp(db, b, c);
3797     a = lerp(da, a, c);
3798 }
3799 
3800 STAGE_PP(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
3801     U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
3802     r = div255( r * c );
3803     g = div255( g * c );
3804     b = div255( b * c );
3805     a = div255( a * c );
3806 }
3807 STAGE_PP(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
3808     U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
3809     r = lerp(dr, r, c);
3810     g = lerp(dg, g, c);
3811     b = lerp(db, b, c);
3812     a = lerp(da, a, c);
3813 }
3814 
3815 // Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
3816 SI U16 alpha_coverage_from_rgb_coverage(U16 a, U16 da, U16 cr, U16 cg, U16 cb) {
3817     return if_then_else(a < da, min(cr, min(cg,cb))
3818                               , max(cr, max(cg,cb)));
3819 }
3820 STAGE_PP(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
3821     U16 cr,cg,cb;
3822     load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
3823     U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
3824 
3825     r = div255( r * cr );
3826     g = div255( g * cg );
3827     b = div255( b * cb );
3828     a = div255( a * ca );
3829 }
3830 STAGE_PP(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
3831     U16 cr,cg,cb;
3832     load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
3833     U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
3834 
3835     r = lerp(dr, r, cr);
3836     g = lerp(dg, g, cg);
3837     b = lerp(db, b, cb);
3838     a = lerp(da, a, ca);
3839 }
3840 
3841 STAGE_PP(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
3842     U16 mul = load_8(ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy), tail),
3843         add = load_8(ptr_at_xy<const uint8_t>(&ctx->add, dx,dy), tail);
3844 
3845     r = min(div255(r*mul) + add, a);
3846     g = min(div255(g*mul) + add, a);
3847     b = min(div255(b*mul) + add, a);
3848 }
3849 
3850 
3851 // ~~~~~~ Gradient stages ~~~~~~ //
3852 
3853 // Clamp x to [0,1], both sides inclusive (think, gradients).
3854 // Even repeat and mirror funnel through a clamp to handle bad inputs like +Inf, NaN.
3855 SI F clamp_01(F v) { return min(max(0, v), 1); }
3856 
3857 STAGE_GG(clamp_x_1 , Ctx::None) { x = clamp_01(x); }
3858 STAGE_GG(repeat_x_1, Ctx::None) { x = clamp_01(x - floor_(x)); }
3859 STAGE_GG(mirror_x_1, Ctx::None) {
3860     auto two = [](F x){ return x+x; };
3861     x = clamp_01(abs_( (x-1.0f) - two(floor_((x-1.0f)*0.5f)) - 1.0f ));
3862 }
3863 
3864 SI I16 cond_to_mask_16(I32 cond) { return cast<I16>(cond); }
3865 
3866 STAGE_GG(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
3867     auto w = ctx->limit_x;
3868     sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w)));
3869 }
3870 STAGE_GG(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
3871     auto h = ctx->limit_y;
3872     sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= y) & (y < h)));
3873 }
3874 STAGE_GG(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
3875     auto w = ctx->limit_x;
3876     auto h = ctx->limit_y;
3877     sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w) & (0 <= y) & (y < h)));
3878 }
3879 STAGE_PP(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
3880     auto mask = sk_unaligned_load<U16>(ctx->mask);
3881     r = r & mask;
3882     g = g & mask;
3883     b = b & mask;
3884     a = a & mask;
3885 }
3886 
3887 SI void round_F_to_U16(F    R, F    G, F    B, F    A, bool interpolatedInPremul,
3888                        U16* r, U16* g, U16* b, U16* a) {
3889     auto round = [](F x) { return cast<U16>(x * 255.0f + 0.5f); };
3890 
3891     F limit = interpolatedInPremul ? A
3892                                    : 1;
3893     *r = round(min(max(0,R), limit));
3894     *g = round(min(max(0,G), limit));
3895     *b = round(min(max(0,B), limit));
3896     *a = round(A);  // we assume alpha is already in [0,1].
3897 }
3898 
3899 SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
3900                         U16* r, U16* g, U16* b, U16* a) {
3901 
3902     F fr, fg, fb, fa, br, bg, bb, ba;
3903 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
3904     if (c->stopCount <=8) {
3905         __m256i lo, hi;
3906         split(idx, &lo, &hi);
3907 
3908         fr = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), lo),
3909                      _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), hi));
3910         br = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), lo),
3911                      _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), hi));
3912         fg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), lo),
3913                      _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), hi));
3914         bg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), lo),
3915                      _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), hi));
3916         fb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), lo),
3917                      _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), hi));
3918         bb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), lo),
3919                      _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), hi));
3920         fa = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), lo),
3921                      _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), hi));
3922         ba = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), lo),
3923                      _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), hi));
3924     } else
3925 #endif
3926     {
3927         fr = gather<F>(c->fs[0], idx);
3928         fg = gather<F>(c->fs[1], idx);
3929         fb = gather<F>(c->fs[2], idx);
3930         fa = gather<F>(c->fs[3], idx);
3931         br = gather<F>(c->bs[0], idx);
3932         bg = gather<F>(c->bs[1], idx);
3933         bb = gather<F>(c->bs[2], idx);
3934         ba = gather<F>(c->bs[3], idx);
3935     }
3936     round_F_to_U16(mad(t, fr, br),
3937                    mad(t, fg, bg),
3938                    mad(t, fb, bb),
3939                    mad(t, fa, ba),
3940                    c->interpolatedInPremul,
3941                    r,g,b,a);
3942 }
3943 
3944 STAGE_GP(gradient, const SkRasterPipeline_GradientCtx* c) {
3945     auto t = x;
3946     U32 idx = 0;
3947 
3948     // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
3949     for (size_t i = 1; i < c->stopCount; i++) {
3950         idx += if_then_else(t >= c->ts[i], U32(1), U32(0));
3951     }
3952 
3953     gradient_lookup(c, idx, t, &r, &g, &b, &a);
3954 }
3955 
3956 STAGE_GP(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
3957     auto t = x;
3958     auto idx = trunc_(t * (c->stopCount-1));
3959     gradient_lookup(c, idx, t, &r, &g, &b, &a);
3960 }
3961 
3962 STAGE_GP(evenly_spaced_2_stop_gradient, const SkRasterPipeline_EvenlySpaced2StopGradientCtx* c) {
3963     auto t = x;
3964     round_F_to_U16(mad(t, c->f[0], c->b[0]),
3965                    mad(t, c->f[1], c->b[1]),
3966                    mad(t, c->f[2], c->b[2]),
3967                    mad(t, c->f[3], c->b[3]),
3968                    c->interpolatedInPremul,
3969                    &r,&g,&b,&a);
3970 }
3971 
3972 STAGE_GG(xy_to_unit_angle, Ctx::None) {
3973     F xabs = abs_(x),
3974       yabs = abs_(y);
3975 
3976     F slope = min(xabs, yabs)/max(xabs, yabs);
3977     F s = slope * slope;
3978 
3979     // Use a 7th degree polynomial to approximate atan.
3980     // This was generated using sollya.gforge.inria.fr.
3981     // A float optimized polynomial was generated using the following command.
3982     // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
3983     F phi = slope
3984              * (0.15912117063999176025390625f     + s
3985              * (-5.185396969318389892578125e-2f   + s
3986              * (2.476101927459239959716796875e-2f + s
3987              * (-7.0547382347285747528076171875e-3f))));
3988 
3989     phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
3990     phi = if_then_else(x < 0.0f   , 1.0f/2.0f - phi, phi);
3991     phi = if_then_else(y < 0.0f   , 1.0f - phi     , phi);
3992     phi = if_then_else(phi != phi , 0              , phi);  // Check for NaN.
3993     x = phi;
3994 }
3995 STAGE_GG(xy_to_radius, Ctx::None) {
3996     x = sqrt_(x*x + y*y);
3997 }
3998 
3999 // ~~~~~~ Compound stages ~~~~~~ //
4000 
4001 STAGE_PP(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
4002     auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
4003 
4004     load_8888_(ptr, tail, &dr,&dg,&db,&da);
4005     r = r + div255( dr*inv(a) );
4006     g = g + div255( dg*inv(a) );
4007     b = b + div255( db*inv(a) );
4008     a = a + div255( da*inv(a) );
4009     store_8888_(ptr, tail, r,g,b,a);
4010 }
4011 
4012 // ~~~~~~ GrSwizzle stage ~~~~~~ //
4013 
4014 STAGE_PP(swizzle, void* ctx) {
4015     auto ir = r, ig = g, ib = b, ia = a;
4016     U16* o[] = {&r, &g, &b, &a};
4017     char swiz[4];
4018     memcpy(swiz, &ctx, sizeof(swiz));
4019 
4020     for (int i = 0; i < 4; ++i) {
4021         switch (swiz[i]) {
4022             case 'r': *o[i] = ir;       break;
4023             case 'g': *o[i] = ig;       break;
4024             case 'b': *o[i] = ib;       break;
4025             case 'a': *o[i] = ia;       break;
4026             case '0': *o[i] = U16(0);   break;
4027             case '1': *o[i] = U16(255); break;
4028             default:                    break;
4029         }
4030     }
4031 }
4032 
4033 // Now we'll add null stand-ins for stages we haven't implemented in lowp.
4034 // If a pipeline uses these stages, it'll boot it out of lowp into highp.
4035 #define NOT_IMPLEMENTED(st) static void (*st)(void) = nullptr;
4036     NOT_IMPLEMENTED(callback)
4037     NOT_IMPLEMENTED(interpreter)
4038     NOT_IMPLEMENTED(unbounded_set_rgb)
4039     NOT_IMPLEMENTED(unbounded_uniform_color)
4040     NOT_IMPLEMENTED(unpremul)
4041     NOT_IMPLEMENTED(dither)
4042     NOT_IMPLEMENTED(load_16161616)
4043     NOT_IMPLEMENTED(load_16161616_dst)
4044     NOT_IMPLEMENTED(store_16161616)
4045     NOT_IMPLEMENTED(gather_16161616)
4046     NOT_IMPLEMENTED(load_a16)
4047     NOT_IMPLEMENTED(load_a16_dst)
4048     NOT_IMPLEMENTED(store_a16)
4049     NOT_IMPLEMENTED(gather_a16)
4050     NOT_IMPLEMENTED(load_rg1616)
4051     NOT_IMPLEMENTED(load_rg1616_dst)
4052     NOT_IMPLEMENTED(store_rg1616)
4053     NOT_IMPLEMENTED(gather_rg1616)
4054     NOT_IMPLEMENTED(load_f16)
4055     NOT_IMPLEMENTED(load_f16_dst)
4056     NOT_IMPLEMENTED(store_f16)
4057     NOT_IMPLEMENTED(gather_f16)
4058     NOT_IMPLEMENTED(load_af16)
4059     NOT_IMPLEMENTED(load_af16_dst)
4060     NOT_IMPLEMENTED(store_af16)
4061     NOT_IMPLEMENTED(gather_af16)
4062     NOT_IMPLEMENTED(load_rgf16)
4063     NOT_IMPLEMENTED(load_rgf16_dst)
4064     NOT_IMPLEMENTED(store_rgf16)
4065     NOT_IMPLEMENTED(gather_rgf16)
4066     NOT_IMPLEMENTED(load_f32)
4067     NOT_IMPLEMENTED(load_f32_dst)
4068     NOT_IMPLEMENTED(store_f32)
4069     NOT_IMPLEMENTED(gather_f32)
4070     NOT_IMPLEMENTED(load_rgf32)
4071     NOT_IMPLEMENTED(store_rgf32)
4072     NOT_IMPLEMENTED(load_1010102)
4073     NOT_IMPLEMENTED(load_1010102_dst)
4074     NOT_IMPLEMENTED(store_1010102)
4075     NOT_IMPLEMENTED(gather_1010102)
4076     NOT_IMPLEMENTED(store_u16_be)
4077     NOT_IMPLEMENTED(byte_tables)
4078     NOT_IMPLEMENTED(colorburn)
4079     NOT_IMPLEMENTED(colordodge)
4080     NOT_IMPLEMENTED(softlight)
4081     NOT_IMPLEMENTED(hue)
4082     NOT_IMPLEMENTED(saturation)
4083     NOT_IMPLEMENTED(color)
4084     NOT_IMPLEMENTED(luminosity)
4085     NOT_IMPLEMENTED(matrix_3x3)
4086     NOT_IMPLEMENTED(matrix_3x4)
4087     NOT_IMPLEMENTED(matrix_4x5)
4088     NOT_IMPLEMENTED(matrix_4x3)
4089     NOT_IMPLEMENTED(parametric)
4090     NOT_IMPLEMENTED(gamma_)
4091     NOT_IMPLEMENTED(PQish)
4092     NOT_IMPLEMENTED(HLGish)
4093     NOT_IMPLEMENTED(HLGinvish)
4094     NOT_IMPLEMENTED(rgb_to_hsl)
4095     NOT_IMPLEMENTED(hsl_to_rgb)
4096     NOT_IMPLEMENTED(gauss_a_to_rgba)
4097     NOT_IMPLEMENTED(mirror_x)
4098     NOT_IMPLEMENTED(repeat_x)
4099     NOT_IMPLEMENTED(mirror_y)
4100     NOT_IMPLEMENTED(repeat_y)
4101     NOT_IMPLEMENTED(negate_x)
4102     NOT_IMPLEMENTED(bilinear)
4103     NOT_IMPLEMENTED(bilerp_clamp_8888)
4104     NOT_IMPLEMENTED(bicubic)
4105     NOT_IMPLEMENTED(bicubic_clamp_8888)
4106     NOT_IMPLEMENTED(bilinear_nx)
4107     NOT_IMPLEMENTED(bilinear_ny)
4108     NOT_IMPLEMENTED(bilinear_px)
4109     NOT_IMPLEMENTED(bilinear_py)
4110     NOT_IMPLEMENTED(bicubic_n3x)
4111     NOT_IMPLEMENTED(bicubic_n1x)
4112     NOT_IMPLEMENTED(bicubic_p1x)
4113     NOT_IMPLEMENTED(bicubic_p3x)
4114     NOT_IMPLEMENTED(bicubic_n3y)
4115     NOT_IMPLEMENTED(bicubic_n1y)
4116     NOT_IMPLEMENTED(bicubic_p1y)
4117     NOT_IMPLEMENTED(bicubic_p3y)
4118     NOT_IMPLEMENTED(save_xy)
4119     NOT_IMPLEMENTED(accumulate)
4120     NOT_IMPLEMENTED(xy_to_2pt_conical_well_behaved)
4121     NOT_IMPLEMENTED(xy_to_2pt_conical_strip)
4122     NOT_IMPLEMENTED(xy_to_2pt_conical_focal_on_circle)
4123     NOT_IMPLEMENTED(xy_to_2pt_conical_smaller)
4124     NOT_IMPLEMENTED(xy_to_2pt_conical_greater)
4125     NOT_IMPLEMENTED(alter_2pt_conical_compensate_focal)
4126     NOT_IMPLEMENTED(alter_2pt_conical_unswap)
4127     NOT_IMPLEMENTED(mask_2pt_conical_nan)
4128     NOT_IMPLEMENTED(mask_2pt_conical_degenerates)
4129     NOT_IMPLEMENTED(apply_vector_mask)
4130 #undef NOT_IMPLEMENTED
4131 
4132 #endif//defined(JUMPER_IS_SCALAR) controlling whether we build lowp stages
4133 }  // namespace lowp
4134 
4135 }  // namespace SK_OPTS_NS
4136 
4137 #undef SI
4138 
4139 #endif//SkRasterPipeline_opts_DEFINED
4140