1 /* 2 * Copyright 2016 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef SkRasterPipeline_DEFINED 9 #define SkRasterPipeline_DEFINED 10 11 #include "SkImageInfo.h" 12 #include "SkNx.h" 13 #include "SkTArray.h" 14 #include "SkTypes.h" 15 #include <vector> 16 17 /** 18 * SkRasterPipeline provides a cheap way to chain together a pixel processing pipeline. 19 * 20 * It's particularly designed for situations where the potential pipeline is extremely 21 * combinatoric: {N dst formats} x {M source formats} x {K mask formats} x {C transfer modes} ... 22 * No one wants to write specialized routines for all those combinations, and if we did, we'd 23 * end up bloating our code size dramatically. SkRasterPipeline stages can be chained together 24 * at runtime, so we can scale this problem linearly rather than combinatorically. 25 * 26 * Each stage is represented by a function conforming to a common interface, SkRasterPipeline::Fn, 27 * and by an arbitrary context pointer. Fn's arguments, and sometimes custom calling convention, 28 * are designed to maximize the amount of data we can pass along the pipeline cheaply. 29 * On many machines all arguments stay in registers the entire time. 30 * 31 * The meaning of the arguments to Fn are sometimes fixed: 32 * - The Stage* always represents the current stage, mainly providing access to ctx(). 33 * - The first size_t is always the destination x coordinate. 34 * (If you need y, put it in your context.) 35 * - The second size_t is always tail: 0 when working on a full 4-pixel slab, 36 * or 1..3 when using only the bottom 1..3 lanes of each register. 37 * - By the time the shader's done, the first four vectors should hold source red, 38 * green, blue, and alpha, up to 4 pixels' worth each. 39 * 40 * Sometimes arguments are flexible: 41 * - In the shader, the first four vectors can be used for anything, e.g. sample coordinates. 42 * - The last four vectors are scratch registers that can be used to communicate between 43 * stages; transfer modes use these to hold the original destination pixel components. 44 * 45 * On some platforms the last four vectors are slower to work with than the other arguments. 46 * 47 * When done mutating its arguments and/or context, a stage can either: 48 * 1) call st->next() with its mutated arguments, chaining to the next stage of the pipeline; or 49 * 2) return, indicating the pipeline is complete for these pixels. 50 * 51 * Some stages that typically return are those that write a color to a destination pointer, 52 * but any stage can short-circuit the rest of the pipeline by returning instead of calling next(). 53 */ 54 55 // TODO: There may be a better place to stuff tail, e.g. in the bottom alignment bits of 56 // the Stage*. This mostly matters on 64-bit Windows where every register is precious. 57 58 #define SK_RASTER_PIPELINE_STAGES(M) \ 59 M(trace) M(registers) \ 60 M(move_src_dst) M(move_dst_src) M(swap) \ 61 M(clamp_0) M(clamp_1) M(clamp_a) \ 62 M(unpremul) M(premul) \ 63 M(set_rgb) M(swap_rb) \ 64 M(from_srgb) M(to_srgb) \ 65 M(from_2dot2) M(to_2dot2) \ 66 M(constant_color) M(seed_shader) \ 67 M(load_a8) M(store_a8) \ 68 M(load_g8) \ 69 M(load_565) M(store_565) \ 70 M(load_4444) M(store_4444) \ 71 M(load_f16) M(store_f16) \ 72 M(load_f32) M(store_f32) \ 73 M(load_8888) M(store_8888) \ 74 M(load_u16_be) M(load_rgb_u16_be) M(store_u16_be) \ 75 M(load_tables_u16_be) M(load_tables_rgb_u16_be) \ 76 M(load_tables) \ 77 M(scale_u8) M(scale_1_float) \ 78 M(lerp_u8) M(lerp_565) M(lerp_1_float) \ 79 M(dstatop) M(dstin) M(dstout) M(dstover) \ 80 M(srcatop) M(srcin) M(srcout) M(srcover) \ 81 M(clear) M(modulate) M(multiply) M(plus_) M(screen) M(xor_) \ 82 M(colorburn) M(colordodge) M(darken) M(difference) \ 83 M(exclusion) M(hardlight) M(lighten) M(overlay) M(softlight) \ 84 M(luminance_to_alpha) \ 85 M(matrix_2x3) M(matrix_3x4) M(matrix_4x5) \ 86 M(matrix_perspective) \ 87 M(parametric_r) M(parametric_g) M(parametric_b) \ 88 M(parametric_a) \ 89 M(table_r) M(table_g) M(table_b) M(table_a) \ 90 M(color_lookup_table) M(lab_to_xyz) \ 91 M(clamp_x) M(mirror_x) M(repeat_x) \ 92 M(clamp_y) M(mirror_y) M(repeat_y) \ 93 M(gather_a8) M(gather_g8) M(gather_i8) \ 94 M(gather_565) M(gather_4444) M(gather_8888) M(gather_f16) \ 95 M(bilinear_nx) M(bilinear_px) M(bilinear_ny) M(bilinear_py) \ 96 M(bicubic_n3x) M(bicubic_n1x) M(bicubic_p1x) M(bicubic_p3x) \ 97 M(bicubic_n3y) M(bicubic_n1y) M(bicubic_p1y) M(bicubic_p3y) \ 98 M(save_xy) M(accumulate) \ 99 M(linear_gradient_2stops) \ 100 M(byte_tables) M(byte_tables_rgb) \ 101 M(shader_adapter) \ 102 M(rgb_to_hsl) \ 103 M(hsl_to_rgb) 104 105 class SkRasterPipeline { 106 public: 107 SkRasterPipeline(); 108 109 enum StockStage { 110 #define M(stage) stage, 111 SK_RASTER_PIPELINE_STAGES(M) 112 #undef M 113 }; 114 void append(StockStage, void* = nullptr); append(StockStage stage,const void * ctx)115 void append(StockStage stage, const void* ctx) { this->append(stage, const_cast<void*>(ctx)); } 116 117 // Append all stages to this pipeline. 118 void extend(const SkRasterPipeline&); 119 120 // Runs the pipeline walking x through [x,x+n). 121 void run(size_t x, size_t n) const; 122 123 void dump() const; 124 125 struct Stage { 126 StockStage stage; 127 void* ctx; 128 }; 129 130 // Conversion from sRGB can be subtly tricky when premultiplication is involved. 131 // Use these helpers to keep things sane. 132 void append_from_srgb(SkAlphaType); 133 empty()134 bool empty() const { return fStages.empty(); } 135 136 private: 137 bool run_with_jumper(size_t x, size_t n) const; 138 139 std::vector<Stage> fStages; 140 }; 141 142 #endif//SkRasterPipeline_DEFINED 143