1 /* Copyright 2009 Motorola 2 * 3 * Use of this source code is governed by a BSD-style license that can be 4 * found in the LICENSE file. 5 */ 6 7 #include "SkBitmapProcState.h" 8 #include "SkShader.h" 9 #include "SkUtilsArm.h" 10 #include "SkBitmapProcState_utils.h" 11 12 #include <arm_neon.h> 13 14 extern const SkBitmapProcState::MatrixProc ClampX_ClampY_Procs_neon[]; 15 extern const SkBitmapProcState::MatrixProc RepeatX_RepeatY_Procs_neon[]; 16 17 static void decal_nofilter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count); 18 static void decal_filter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count); 19 20 // TILEX_PROCF(fx, max) SkClampMax((fx) >> 16, max) 21 static inline int16x8_t sbpsm_clamp_tile8(int32x4_t low, int32x4_t high, unsigned max) { 22 int16x8_t res; 23 24 // get the hi 16s of all those 32s 25 res = vuzpq_s16(vreinterpretq_s16_s32(low), vreinterpretq_s16_s32(high)).val[1]; 26 27 // clamp 28 res = vmaxq_s16(res, vdupq_n_s16(0)); 29 res = vminq_s16(res, vdupq_n_s16(max)); 30 31 return res; 32 } 33 34 // TILEX_PROCF(fx, max) SkClampMax((fx) >> 16, max) 35 static inline int32x4_t sbpsm_clamp_tile4(int32x4_t f, unsigned max) { 36 int32x4_t res; 37 38 // get the hi 16s of all those 32s 39 res = vshrq_n_s32(f, 16); 40 41 // clamp 42 res = vmaxq_s32(res, vdupq_n_s32(0)); 43 res = vminq_s32(res, vdupq_n_s32(max)); 44 45 return res; 46 } 47 48 // EXTRACT_LOW_BITS(fy, max) (((fy) >> 12) & 0xF) 49 static inline int32x4_t sbpsm_clamp_tile4_low_bits(int32x4_t fx) { 50 int32x4_t ret; 51 52 ret = vshrq_n_s32(fx, 12); 53 54 /* We don't need the mask below because the caller will 55 * overwrite the non-masked bits 56 */ 57 //ret = vandq_s32(ret, vdupq_n_s32(0xF)); 58 59 return ret; 60 } 61 62 // TILEX_PROCF(fx, max) (((fx)&0xFFFF)*((max)+1)>> 16) 63 static inline int16x8_t sbpsm_repeat_tile8(int32x4_t low, int32x4_t high, unsigned max) { 64 uint16x8_t res; 65 uint32x4_t tmpl, tmph; 66 67 // get the lower 16 bits 68 res = vuzpq_u16(vreinterpretq_u16_s32(low), vreinterpretq_u16_s32(high)).val[0]; 69 70 // bare multiplication, not SkFixedMul 71 tmpl = vmull_u16(vget_low_u16(res), vdup_n_u16(max+1)); 72 tmph = vmull_u16(vget_high_u16(res), vdup_n_u16(max+1)); 73 74 // extraction of the 16 upper bits 75 res = vuzpq_u16(vreinterpretq_u16_u32(tmpl), vreinterpretq_u16_u32(tmph)).val[1]; 76 77 return vreinterpretq_s16_u16(res); 78 } 79 80 // TILEX_PROCF(fx, max) (((fx)&0xFFFF)*((max)+1)>> 16) 81 static inline int32x4_t sbpsm_repeat_tile4(int32x4_t f, unsigned max) { 82 uint16x4_t res; 83 uint32x4_t tmp; 84 85 // get the lower 16 bits 86 res = vmovn_u32(vreinterpretq_u32_s32(f)); 87 88 // bare multiplication, not SkFixedMul 89 tmp = vmull_u16(res, vdup_n_u16(max+1)); 90 91 // extraction of the 16 upper bits 92 tmp = vshrq_n_u32(tmp, 16); 93 94 return vreinterpretq_s32_u32(tmp); 95 } 96 97 // EXTRACT_LOW_BITS(fx, max) ((((fx) & 0xFFFF) * ((max) + 1) >> 12) & 0xF) 98 static inline int32x4_t sbpsm_repeat_tile4_low_bits(int32x4_t fx, unsigned max) { 99 uint16x4_t res; 100 uint32x4_t tmp; 101 int32x4_t ret; 102 103 // get the lower 16 bits 104 res = vmovn_u32(vreinterpretq_u32_s32(fx)); 105 106 // bare multiplication, not SkFixedMul 107 tmp = vmull_u16(res, vdup_n_u16(max + 1)); 108 109 // shift and mask 110 ret = vshrq_n_s32(vreinterpretq_s32_u32(tmp), 12); 111 112 /* We don't need the mask below because the caller will 113 * overwrite the non-masked bits 114 */ 115 //ret = vandq_s32(ret, vdupq_n_s32(0xF)); 116 117 return ret; 118 } 119 120 #define MAKENAME(suffix) ClampX_ClampY ## suffix ## _neon 121 #define TILEX_PROCF(fx, max) SkClampMax((fx) >> 16, max) 122 #define TILEY_PROCF(fy, max) SkClampMax((fy) >> 16, max) 123 #define TILEX_PROCF_NEON8(l, h, max) sbpsm_clamp_tile8(l, h, max) 124 #define TILEY_PROCF_NEON8(l, h, max) sbpsm_clamp_tile8(l, h, max) 125 #define TILEX_PROCF_NEON4(fx, max) sbpsm_clamp_tile4(fx, max) 126 #define TILEY_PROCF_NEON4(fy, max) sbpsm_clamp_tile4(fy, max) 127 #define EXTRACT_LOW_BITS(v, max) (((v) >> 12) & 0xF) 128 #define EXTRACT_LOW_BITS_NEON4(v, max) sbpsm_clamp_tile4_low_bits(v) 129 #define CHECK_FOR_DECAL 130 #include "SkBitmapProcState_matrix_neon.h" 131 132 #define MAKENAME(suffix) RepeatX_RepeatY ## suffix ## _neon 133 #define TILEX_PROCF(fx, max) SK_USHIFT16(((fx) & 0xFFFF) * ((max) + 1)) 134 #define TILEY_PROCF(fy, max) SK_USHIFT16(((fy) & 0xFFFF) * ((max) + 1)) 135 #define TILEX_PROCF_NEON8(l, h, max) sbpsm_repeat_tile8(l, h, max) 136 #define TILEY_PROCF_NEON8(l, h, max) sbpsm_repeat_tile8(l, h, max) 137 #define TILEX_PROCF_NEON4(fx, max) sbpsm_repeat_tile4(fx, max) 138 #define TILEY_PROCF_NEON4(fy, max) sbpsm_repeat_tile4(fy, max) 139 #define EXTRACT_LOW_BITS(v, max) ((((v) & 0xFFFF) * ((max) + 1) >> 12) & 0xF) 140 #define EXTRACT_LOW_BITS_NEON4(v, max) sbpsm_repeat_tile4_low_bits(v, max) 141 #include "SkBitmapProcState_matrix_neon.h" 142 143 144 145 void decal_nofilter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) { 146 if (count >= 8) { 147 // SkFixed is 16.16 fixed point 148 SkFixed dx8 = dx * 8; 149 int32x4_t vdx8 = vdupq_n_s32(dx8); 150 151 // setup lbase and hbase 152 int32x4_t lbase, hbase; 153 lbase = vdupq_n_s32(fx); 154 lbase = vsetq_lane_s32(fx + dx, lbase, 1); 155 lbase = vsetq_lane_s32(fx + dx + dx, lbase, 2); 156 lbase = vsetq_lane_s32(fx + dx + dx + dx, lbase, 3); 157 hbase = lbase + vdupq_n_s32(4 * dx); 158 159 do { 160 // store the upper 16 bits 161 vst1q_u32(dst, vreinterpretq_u32_s16( 162 vuzpq_s16(vreinterpretq_s16_s32(lbase), vreinterpretq_s16_s32(hbase)).val[1] 163 )); 164 165 // on to the next group of 8 166 lbase += vdx8; 167 hbase += vdx8; 168 dst += 4; // we did 8 elements but the result is twice smaller 169 count -= 8; 170 fx += dx8; 171 } while (count >= 8); 172 } 173 174 uint16_t* xx = (uint16_t*)dst; 175 for (int i = count; i > 0; --i) { 176 *xx++ = SkToU16(fx >> 16); fx += dx; 177 } 178 } 179 180 void decal_filter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) { 181 if (count >= 8) { 182 SkFixed dx8 = dx * 8; 183 int32x4_t vdx8 = vdupq_n_s32(dx8); 184 185 int32x4_t wide_fx, wide_fx2; 186 wide_fx = vdupq_n_s32(fx); 187 wide_fx = vsetq_lane_s32(fx + dx, wide_fx, 1); 188 wide_fx = vsetq_lane_s32(fx + dx + dx, wide_fx, 2); 189 wide_fx = vsetq_lane_s32(fx + dx + dx + dx, wide_fx, 3); 190 191 wide_fx2 = vaddq_s32(wide_fx, vdupq_n_s32(4 * dx)); 192 193 while (count >= 8) { 194 int32x4_t wide_out; 195 int32x4_t wide_out2; 196 197 wide_out = vshlq_n_s32(vshrq_n_s32(wide_fx, 12), 14); 198 wide_out = wide_out | (vshrq_n_s32(wide_fx,16) + vdupq_n_s32(1)); 199 200 wide_out2 = vshlq_n_s32(vshrq_n_s32(wide_fx2, 12), 14); 201 wide_out2 = wide_out2 | (vshrq_n_s32(wide_fx2,16) + vdupq_n_s32(1)); 202 203 vst1q_u32(dst, vreinterpretq_u32_s32(wide_out)); 204 vst1q_u32(dst+4, vreinterpretq_u32_s32(wide_out2)); 205 206 dst += 8; 207 fx += dx8; 208 wide_fx += vdx8; 209 wide_fx2 += vdx8; 210 count -= 8; 211 } 212 } 213 214 if (count & 1) 215 { 216 SkASSERT((fx >> (16 + 14)) == 0); 217 *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1); 218 fx += dx; 219 } 220 while ((count -= 2) >= 0) 221 { 222 SkASSERT((fx >> (16 + 14)) == 0); 223 *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1); 224 fx += dx; 225 226 *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1); 227 fx += dx; 228 } 229 } 230