1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkTextureCompressor_opts_DEFINED
9 #define SkTextureCompressor_opts_DEFINED
10 
11 #include "SkOpts.h"
12 
13 namespace SK_OPTS_NS {
14 
15 #if defined(SK_ARM_HAS_NEON)
16     // Converts indices in each of the four bits of the register from
17     // 0, 1, 2, 3, 4, 5, 6, 7
18     // to
19     // 3, 2, 1, 0, 4, 5, 6, 7
20     //
21     // A more detailed explanation can be found in SkTextureCompressor::convert_indices
convert_indices(const uint8x16_t & x)22     static inline uint8x16_t convert_indices(const uint8x16_t &x) {
23         static const int8x16_t kThree = {
24             0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
25             0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
26         };
27 
28         static const int8x16_t kZero = {
29             0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
30             0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
31         };
32 
33         // Take top three bits
34         int8x16_t sx = vreinterpretq_s8_u8(x);
35 
36         // Negate ...
37         sx = vnegq_s8(sx);
38 
39         // Add three...
40         sx = vaddq_s8(sx, kThree);
41 
42         // Generate negatives mask
43         const int8x16_t mask = vreinterpretq_s8_u8(vcltq_s8(sx, kZero));
44 
45         // Absolute value
46         sx = vabsq_s8(sx);
47 
48         // Add three to the values that were negative...
49         return vreinterpretq_u8_s8(vaddq_s8(sx, vandq_s8(mask, kThree)));
50     }
51 
52     template<unsigned shift>
shift_swap(const uint64x2_t & x,const uint64x2_t & mask)53     static inline uint64x2_t shift_swap(const uint64x2_t &x, const uint64x2_t &mask) {
54         uint64x2_t t = vandq_u64(mask, veorq_u64(x, vshrq_n_u64(x, shift)));
55         return veorq_u64(x, veorq_u64(t, vshlq_n_u64(t, shift)));
56     }
57 
pack_indices(const uint64x2_t & x)58     static inline uint64x2_t pack_indices(const uint64x2_t &x) {
59         // x: 00 a e 00 b f 00 c g 00 d h 00 i m 00 j n 00 k o 00 l p
60 
61         static const uint64x2_t kMask1 = { 0x3FC0003FC00000ULL, 0x3FC0003FC00000ULL };
62         uint64x2_t ret = shift_swap<10>(x, kMask1);
63 
64         // x: b f 00 00 00 a e c g i m 00 00 00 d h j n 00 k o 00 l p
65         static const uint64x2_t kMask2 = { (0x3FULL << 52), (0x3FULL << 52) };
66         static const uint64x2_t kMask3 = { (0x3FULL << 28), (0x3FULL << 28) };
67         const uint64x2_t x1 = vandq_u64(vshlq_n_u64(ret, 52), kMask2);
68         const uint64x2_t x2 = vandq_u64(vshlq_n_u64(ret, 20), kMask3);
69         ret = vshrq_n_u64(vorrq_u64(ret, vorrq_u64(x1, x2)), 16);
70 
71         // x: 00 00 00 00 00 00 00 00 b f l p a e c g i m k o d h j n
72 
73         static const uint64x2_t kMask4 = { 0xFC0000ULL, 0xFC0000ULL };
74         ret = shift_swap<6>(ret, kMask4);
75 
76     #if defined (SK_CPU_BENDIAN)
77         // x: 00 00 00 00 00 00 00 00 b f l p a e i m c g k o d h j n
78 
79         static const uint64x2_t kMask5 = { 0x3FULL, 0x3FULL };
80         ret = shift_swap<36>(ret, kMask5);
81 
82         // x: 00 00 00 00 00 00 00 00 b f j n a e i m c g k o d h l p
83 
84         static const uint64x2_t kMask6 = { 0xFFF000000ULL, 0xFFF000000ULL };
85         ret = shift_swap<12>(ret, kMask6);
86     #else
87         // x: 00 00 00 00 00 00 00 00 c g i m d h l p b f j n a e k o
88 
89         static const uint64x2_t kMask5 = { 0xFC0ULL, 0xFC0ULL };
90         ret = shift_swap<36>(ret, kMask5);
91 
92         // x: 00 00 00 00 00 00 00 00 a e i m d h l p b f j n c g k o
93 
94         static const uint64x2_t kMask6 = { (0xFFFULL << 36), (0xFFFULL << 36) };
95         static const uint64x2_t kMask7 = { 0xFFFFFFULL, 0xFFFFFFULL };
96         static const uint64x2_t kMask8 = { 0xFFFULL, 0xFFFULL };
97         const uint64x2_t y1 = vandq_u64(ret, kMask6);
98         const uint64x2_t y2 = vshlq_n_u64(vandq_u64(ret, kMask7), 12);
99         const uint64x2_t y3 = vandq_u64(vshrq_n_u64(ret, 24), kMask8);
100         ret = vorrq_u64(y1, vorrq_u64(y2, y3));
101     #endif
102 
103         // x: 00 00 00 00 00 00 00 00 a e i m b f j n c g k o d h l p
104 
105         // Set the header
106         static const uint64x2_t kHeader = { 0x8490000000000000ULL, 0x8490000000000000ULL };
107         return vorrq_u64(kHeader, ret);
108     }
109 
110     // Takes a row of alpha values and places the most significant three bits of each byte into
111     // the least significant bits of the same byte
make_index_row(const uint8x16_t & x)112     static inline uint8x16_t make_index_row(const uint8x16_t &x) {
113         static const uint8x16_t kTopThreeMask = {
114             0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0,
115             0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0,
116         };
117         return vshrq_n_u8(vandq_u8(x, kTopThreeMask), 5);
118     }
119 
120     // Returns true if all of the bits in x are 0.
is_zero(uint8x16_t x)121     static inline bool is_zero(uint8x16_t x) {
122     // First experiments say that this is way slower than just examining the lanes
123     // but it might need a little more investigation.
124     #if 0
125         // This code path tests the system register for overflow. We trigger
126         // overflow by adding x to a register with all of its bits set. The
127         // first instruction sets the bits.
128         int reg;
129         asm ("VTST.8   %%q0, %q1, %q1\n"
130              "VQADD.u8 %q1, %%q0\n"
131              "VMRS     %0, FPSCR\n"
132              : "=r"(reg) : "w"(vreinterpretq_f32_u8(x)) : "q0", "q1");
133 
134         // Bit 21 corresponds to the overflow flag.
135         return reg & (0x1 << 21);
136     #else
137         const uint64x2_t cvt = vreinterpretq_u64_u8(x);
138         const uint64_t l1 = vgetq_lane_u64(cvt, 0);
139         return (l1 == 0) && (l1 == vgetq_lane_u64(cvt, 1));
140     #endif
141     }
142 
143     #if defined (SK_CPU_BENDIAN)
fix_endianness(uint64x2_t x)144     static inline uint64x2_t fix_endianness(uint64x2_t x) {
145         return x;
146     }
147     #else
fix_endianness(uint64x2_t x)148     static inline uint64x2_t fix_endianness(uint64x2_t x) {
149         return vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(x)));
150     }
151     #endif
152 
compress_r11eac_blocks(uint64_t * dst,const uint8_t * src,size_t rowBytes)153     static void compress_r11eac_blocks(uint64_t* dst, const uint8_t* src, size_t rowBytes) {
154         // Try to avoid switching between vector and non-vector ops...
155         const uint8_t *const src1 = src;
156         const uint8_t *const src2 = src + rowBytes;
157         const uint8_t *const src3 = src + 2*rowBytes;
158         const uint8_t *const src4 = src + 3*rowBytes;
159         uint64_t *const dst1 = dst;
160         uint64_t *const dst2 = dst + 2;
161 
162         const uint8x16_t alphaRow1 = vld1q_u8(src1);
163         const uint8x16_t alphaRow2 = vld1q_u8(src2);
164         const uint8x16_t alphaRow3 = vld1q_u8(src3);
165         const uint8x16_t alphaRow4 = vld1q_u8(src4);
166 
167         const uint8x16_t cmp12 = vceqq_u8(alphaRow1, alphaRow2);
168         const uint8x16_t cmp34 = vceqq_u8(alphaRow3, alphaRow4);
169         const uint8x16_t cmp13 = vceqq_u8(alphaRow1, alphaRow3);
170 
171         const uint8x16_t cmp = vandq_u8(vandq_u8(cmp12, cmp34), cmp13);
172         const uint8x16_t ncmp = vmvnq_u8(cmp);
173         const uint8x16_t nAlphaRow1 = vmvnq_u8(alphaRow1);
174         if (is_zero(ncmp)) {
175             if (is_zero(alphaRow1)) {
176                 static const uint64x2_t kTransparent = { 0x0020000000002000ULL,
177                                                          0x0020000000002000ULL };
178                 vst1q_u64(dst1, kTransparent);
179                 vst1q_u64(dst2, kTransparent);
180                 return;
181             } else if (is_zero(nAlphaRow1)) {
182                 vst1q_u64(dst1, vreinterpretq_u64_u8(cmp));
183                 vst1q_u64(dst2, vreinterpretq_u64_u8(cmp));
184                 return;
185             }
186         }
187 
188         const uint8x16_t indexRow1 = convert_indices(make_index_row(alphaRow1));
189         const uint8x16_t indexRow2 = convert_indices(make_index_row(alphaRow2));
190         const uint8x16_t indexRow3 = convert_indices(make_index_row(alphaRow3));
191         const uint8x16_t indexRow4 = convert_indices(make_index_row(alphaRow4));
192 
193         const uint64x2_t indexRow12 = vreinterpretq_u64_u8(
194             vorrq_u8(vshlq_n_u8(indexRow1, 3), indexRow2));
195         const uint64x2_t indexRow34 = vreinterpretq_u64_u8(
196             vorrq_u8(vshlq_n_u8(indexRow3, 3), indexRow4));
197 
198         const uint32x4x2_t blockIndices = vtrnq_u32(vreinterpretq_u32_u64(indexRow12),
199                                                     vreinterpretq_u32_u64(indexRow34));
200         const uint64x2_t blockIndicesLeft = vreinterpretq_u64_u32(vrev64q_u32(blockIndices.val[0]));
201         const uint64x2_t blockIndicesRight = vreinterpretq_u64_u32(vrev64q_u32(blockIndices.val[1]));
202 
203         const uint64x2_t indicesLeft = fix_endianness(pack_indices(blockIndicesLeft));
204         const uint64x2_t indicesRight = fix_endianness(pack_indices(blockIndicesRight));
205 
206         const uint64x2_t d1 = vcombine_u64(vget_low_u64(indicesLeft), vget_low_u64(indicesRight));
207         const uint64x2_t d2 = vcombine_u64(vget_high_u64(indicesLeft), vget_high_u64(indicesRight));
208         vst1q_u64(dst1, d1);
209         vst1q_u64(dst2, d2);
210     }
211 
compress_a8_r11eac(uint8_t * dst,const uint8_t * src,int width,int height,size_t rowBytes)212     static bool compress_a8_r11eac(uint8_t* dst, const uint8_t* src,
213                                    int width, int height, size_t rowBytes) {
214 
215         // Since we're going to operate on 4 blocks at a time, the src width
216         // must be a multiple of 16. However, the height only needs to be a
217         // multiple of 4
218         if (0 == width || 0 == height || (width % 16) != 0 || (height % 4) != 0) {
219             return false;
220         }
221 
222         const int blocksX = width >> 2;
223         const int blocksY = height >> 2;
224 
225         SkASSERT((blocksX % 4) == 0);
226 
227         uint64_t* encPtr = reinterpret_cast<uint64_t*>(dst);
228         for (int y = 0; y < blocksY; ++y) {
229             for (int x = 0; x < blocksX; x+=4) {
230                 // Compress it
231                 compress_r11eac_blocks(encPtr, src + 4*x, rowBytes);
232                 encPtr += 4;
233             }
234             src += 4 * rowBytes;
235         }
236         return true;
237     }
238 
texture_compressor(SkColorType ct,SkTextureCompressor::Format fmt)239     static SkOpts::TextureCompressor texture_compressor(SkColorType ct,
240                                                         SkTextureCompressor::Format fmt) {
241         if (ct == kAlpha_8_SkColorType && fmt == SkTextureCompressor::kR11_EAC_Format) {
242             return compress_a8_r11eac;
243         }
244         return nullptr;
245     }
fill_block_dimensions(SkTextureCompressor::Format fmt,int * x,int * y)246     static bool fill_block_dimensions(SkTextureCompressor::Format fmt, int* x, int* y) {
247         if (fmt == SkTextureCompressor::kR11_EAC_Format) {
248             *x = 16;
249             *y = 4;
250             return true;
251         }
252         return false;
253     }
254 
255 #else
256     static SkOpts::TextureCompressor texture_compressor(SkColorType, SkTextureCompressor::Format) {
257         return nullptr;
258     }
259     static bool fill_block_dimensions(SkTextureCompressor::Format, int*, int*) {
260         return false;
261     }
262 
263 #endif
264 
265 }  // namespace SK_OPTS_NS
266 
267 #endif//SkTextureCompressor_opts_DEFINED
268