1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef SkNx_sse_DEFINED
9 #define SkNx_sse_DEFINED
10
11 #include <immintrin.h>
12
13 // This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
14 // If you do, make sure this is in a static inline function... anywhere else risks violating ODR.
15
16 namespace {
17
18 template <>
19 class SkNx<2, float> {
20 public:
SkNx(const __m128 & vec)21 AI SkNx(const __m128& vec) : fVec(vec) {}
22
SkNx()23 AI SkNx() {}
SkNx(float val)24 AI SkNx(float val) : fVec(_mm_set1_ps(val)) {}
Load(const void * ptr)25 AI static SkNx Load(const void* ptr) {
26 return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr));
27 }
SkNx(float a,float b)28 AI SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
29
store(void * ptr)30 AI void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); }
31
32 AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
33 AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
34 AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
35 AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
36
37 AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
38 AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
39 AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
40 AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
41 AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
42 AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
43
Min(const SkNx & l,const SkNx & r)44 AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
Max(const SkNx & l,const SkNx & r)45 AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
46
sqrt()47 AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
rsqrt()48 AI SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
invert()49 AI SkNx invert() const { return _mm_rcp_ps(fVec); }
50
51 AI float operator[](int k) const {
52 SkASSERT(0 <= k && k < 2);
53 union { __m128 v; float fs[4]; } pun = {fVec};
54 return pun.fs[k&1];
55 }
56
allTrue()57 AI bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
anyTrue()58 AI bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
59
60 __m128 fVec;
61 };
62
63 template <>
64 class SkNx<4, float> {
65 public:
SkNx(const __m128 & vec)66 AI SkNx(const __m128& vec) : fVec(vec) {}
67
SkNx()68 AI SkNx() {}
SkNx(float val)69 AI SkNx(float val) : fVec( _mm_set1_ps(val) ) {}
SkNx(float a,float b,float c,float d)70 AI SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
71
Load(const void * ptr)72 AI static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); }
store(void * ptr)73 AI void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); }
74
Load4(const void * ptr,SkNx * r,SkNx * g,SkNx * b,SkNx * a)75 AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
76 __m128 v0 = _mm_loadu_ps(((float*)ptr) + 0),
77 v1 = _mm_loadu_ps(((float*)ptr) + 4),
78 v2 = _mm_loadu_ps(((float*)ptr) + 8),
79 v3 = _mm_loadu_ps(((float*)ptr) + 12);
80 _MM_TRANSPOSE4_PS(v0, v1, v2, v3);
81 *r = v0;
82 *g = v1;
83 *b = v2;
84 *a = v3;
85 }
Store4(void * dst,const SkNx & r,const SkNx & g,const SkNx & b,const SkNx & a)86 AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
87 __m128 v0 = r.fVec,
88 v1 = g.fVec,
89 v2 = b.fVec,
90 v3 = a.fVec;
91 _MM_TRANSPOSE4_PS(v0, v1, v2, v3);
92 _mm_storeu_ps(((float*) dst) + 0, v0);
93 _mm_storeu_ps(((float*) dst) + 4, v1);
94 _mm_storeu_ps(((float*) dst) + 8, v2);
95 _mm_storeu_ps(((float*) dst) + 12, v3);
96 }
97
98 AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
99 AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
100 AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
101 AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
102
103 AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
104 AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
105 AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
106 AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
107 AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
108 AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
109
Min(const SkNx & l,const SkNx & r)110 AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
Max(const SkNx & l,const SkNx & r)111 AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
112
abs()113 AI SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
floor()114 AI SkNx floor() const {
115 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
116 return _mm_floor_ps(fVec);
117 #else
118 // Emulate _mm_floor_ps() with SSE2:
119 // - roundtrip through integers via truncation
120 // - subtract 1 if that's too big (possible for negative values).
121 // This restricts the domain of our inputs to a maximum somehwere around 2^31.
122 // Seems plenty big.
123 __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec));
124 __m128 too_big = _mm_cmpgt_ps(roundtrip, fVec);
125 return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f)));
126 #endif
127 }
128
sqrt()129 AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
rsqrt()130 AI SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
invert()131 AI SkNx invert() const { return _mm_rcp_ps(fVec); }
132
133 AI float operator[](int k) const {
134 SkASSERT(0 <= k && k < 4);
135 union { __m128 v; float fs[4]; } pun = {fVec};
136 return pun.fs[k&3];
137 }
138
allTrue()139 AI bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); }
anyTrue()140 AI bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); }
141
thenElse(const SkNx & t,const SkNx & e)142 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
143 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
144 return _mm_blendv_ps(e.fVec, t.fVec, fVec);
145 #else
146 return _mm_or_ps(_mm_and_ps (fVec, t.fVec),
147 _mm_andnot_ps(fVec, e.fVec));
148 #endif
149 }
150
151 __m128 fVec;
152 };
153
154 template <>
155 class SkNx<4, int32_t> {
156 public:
SkNx(const __m128i & vec)157 AI SkNx(const __m128i& vec) : fVec(vec) {}
158
SkNx()159 AI SkNx() {}
SkNx(int32_t val)160 AI SkNx(int32_t val) : fVec(_mm_set1_epi32(val)) {}
Load(const void * ptr)161 AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
SkNx(int32_t a,int32_t b,int32_t c,int32_t d)162 AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
163
store(void * ptr)164 AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
165
166 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
167 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
168 AI SkNx operator * (const SkNx& o) const {
169 __m128i mul20 = _mm_mul_epu32(fVec, o.fVec),
170 mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
171 return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)),
172 _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0)));
173 }
174
175 AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
176 AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
177 AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
178
179 AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
180 AI SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
181
182 AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
183 AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
184 AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
185
186 AI int32_t operator[](int k) const {
187 SkASSERT(0 <= k && k < 4);
188 union { __m128i v; int32_t is[4]; } pun = {fVec};
189 return pun.is[k&3];
190 }
191
thenElse(const SkNx & t,const SkNx & e)192 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
193 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
194 return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
195 #else
196 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
197 _mm_andnot_si128(fVec, e.fVec));
198 #endif
199 }
200
201 __m128i fVec;
202 };
203
204 template <>
205 class SkNx<4, uint32_t> {
206 public:
SkNx(const __m128i & vec)207 AI SkNx(const __m128i& vec) : fVec(vec) {}
208
SkNx()209 AI SkNx() {}
SkNx(uint32_t val)210 AI SkNx(uint32_t val) : fVec(_mm_set1_epi32(val)) {}
Load(const void * ptr)211 AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
SkNx(uint32_t a,uint32_t b,uint32_t c,uint32_t d)212 AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
213
store(void * ptr)214 AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
215
216 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
217 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
218 // Not quite sure how to best do operator * in SSE2. We probably don't use it.
219
220 AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
221 AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
222 AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
223
224 AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
225 AI SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); }
226
227 AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
228 // operator < and > take a little extra fiddling to make work for unsigned ints.
229
230 AI uint32_t operator[](int k) const {
231 SkASSERT(0 <= k && k < 4);
232 union { __m128i v; uint32_t us[4]; } pun = {fVec};
233 return pun.us[k&3];
234 }
235
thenElse(const SkNx & t,const SkNx & e)236 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
237 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
238 return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
239 #else
240 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
241 _mm_andnot_si128(fVec, e.fVec));
242 #endif
243 }
244
245 __m128i fVec;
246 };
247
248
249 template <>
250 class SkNx<4, uint16_t> {
251 public:
SkNx(const __m128i & vec)252 AI SkNx(const __m128i& vec) : fVec(vec) {}
253
SkNx()254 AI SkNx() {}
SkNx(uint16_t val)255 AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
SkNx(uint16_t a,uint16_t b,uint16_t c,uint16_t d)256 AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d)
257 : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
258
Load(const void * ptr)259 AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
store(void * ptr)260 AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
261
Load4(const void * ptr,SkNx * r,SkNx * g,SkNx * b,SkNx * a)262 AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
263 __m128i lo = _mm_loadu_si128(((__m128i*)ptr) + 0),
264 hi = _mm_loadu_si128(((__m128i*)ptr) + 1);
265 __m128i even = _mm_unpacklo_epi16(lo, hi), // r0 r2 g0 g2 b0 b2 a0 a2
266 odd = _mm_unpackhi_epi16(lo, hi); // r1 r3 ...
267 __m128i rg = _mm_unpacklo_epi16(even, odd), // r0 r1 r2 r3 g0 g1 g2 g3
268 ba = _mm_unpackhi_epi16(even, odd); // b0 b1 ... a0 a1 ...
269 *r = rg;
270 *g = _mm_srli_si128(rg, 8);
271 *b = ba;
272 *a = _mm_srli_si128(ba, 8);
273 }
Load3(const void * ptr,SkNx * r,SkNx * g,SkNx * b)274 AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) {
275 // The idea here is to get 4 vectors that are R G B _ _ _ _ _.
276 // The second load is at a funny location to make sure we don't read past
277 // the bounds of memory. This is fine, we just need to shift it a little bit.
278 const uint8_t* ptr8 = (const uint8_t*) ptr;
279 __m128i rgb0 = _mm_loadu_si128((const __m128i*) (ptr8 + 0));
280 __m128i rgb1 = _mm_srli_si128(rgb0, 3*2);
281 __m128i rgb2 = _mm_srli_si128(_mm_loadu_si128((const __m128i*) (ptr8 + 4*2)), 2*2);
282 __m128i rgb3 = _mm_srli_si128(rgb2, 3*2);
283
284 __m128i rrggbb01 = _mm_unpacklo_epi16(rgb0, rgb1);
285 __m128i rrggbb23 = _mm_unpacklo_epi16(rgb2, rgb3);
286 *r = _mm_unpacklo_epi32(rrggbb01, rrggbb23);
287 *g = _mm_srli_si128(r->fVec, 4*2);
288 *b = _mm_unpackhi_epi32(rrggbb01, rrggbb23);
289 }
Store4(void * dst,const SkNx & r,const SkNx & g,const SkNx & b,const SkNx & a)290 AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
291 __m128i rg = _mm_unpacklo_epi16(r.fVec, g.fVec);
292 __m128i ba = _mm_unpacklo_epi16(b.fVec, a.fVec);
293 __m128i lo = _mm_unpacklo_epi32(rg, ba);
294 __m128i hi = _mm_unpackhi_epi32(rg, ba);
295 _mm_storeu_si128(((__m128i*) dst) + 0, lo);
296 _mm_storeu_si128(((__m128i*) dst) + 1, hi);
297 }
298
299 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
300 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
301 AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
302 AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
303 AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
304
305 AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
306 AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
307
308 AI uint16_t operator[](int k) const {
309 SkASSERT(0 <= k && k < 4);
310 union { __m128i v; uint16_t us[8]; } pun = {fVec};
311 return pun.us[k&3];
312 }
313
314 __m128i fVec;
315 };
316
317 template <>
318 class SkNx<8, uint16_t> {
319 public:
SkNx(const __m128i & vec)320 AI SkNx(const __m128i& vec) : fVec(vec) {}
321
SkNx()322 AI SkNx() {}
SkNx(uint16_t val)323 AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
SkNx(uint16_t a,uint16_t b,uint16_t c,uint16_t d,uint16_t e,uint16_t f,uint16_t g,uint16_t h)324 AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
325 uint16_t e, uint16_t f, uint16_t g, uint16_t h)
326 : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
327
Load(const void * ptr)328 AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
store(void * ptr)329 AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
330
Load4(const void * ptr,SkNx * r,SkNx * g,SkNx * b,SkNx * a)331 AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
332 // TODO: AVX2 version
333 __m128i _01 = _mm_loadu_si128(((__m128i*)ptr) + 0),
334 _23 = _mm_loadu_si128(((__m128i*)ptr) + 1),
335 _45 = _mm_loadu_si128(((__m128i*)ptr) + 2),
336 _67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
337
338 __m128i _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
339 _13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3
340 _46 = _mm_unpacklo_epi16(_45, _67),
341 _57 = _mm_unpackhi_epi16(_45, _67);
342
343 __m128i rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
344 ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3
345 rg4567 = _mm_unpacklo_epi16(_46, _57),
346 ba4567 = _mm_unpackhi_epi16(_46, _57);
347
348 *r = _mm_unpacklo_epi64(rg0123, rg4567);
349 *g = _mm_unpackhi_epi64(rg0123, rg4567);
350 *b = _mm_unpacklo_epi64(ba0123, ba4567);
351 *a = _mm_unpackhi_epi64(ba0123, ba4567);
352 }
Load3(const void * ptr,SkNx * r,SkNx * g,SkNx * b)353 AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) {
354 // TODO: AVX2 version
355 const uint8_t* ptr8 = (const uint8_t*) ptr;
356 __m128i rgb0 = _mm_loadu_si128((const __m128i*) (ptr8 + 0*2));
357 __m128i rgb1 = _mm_srli_si128(rgb0, 3*2);
358 __m128i rgb2 = _mm_loadu_si128((const __m128i*) (ptr8 + 6*2));
359 __m128i rgb3 = _mm_srli_si128(rgb2, 3*2);
360 __m128i rgb4 = _mm_loadu_si128((const __m128i*) (ptr8 + 12*2));
361 __m128i rgb5 = _mm_srli_si128(rgb4, 3*2);
362 __m128i rgb6 = _mm_srli_si128(_mm_loadu_si128((const __m128i*) (ptr8 + 16*2)), 2*2);
363 __m128i rgb7 = _mm_srli_si128(rgb6, 3*2);
364
365 __m128i rgb01 = _mm_unpacklo_epi16(rgb0, rgb1);
366 __m128i rgb23 = _mm_unpacklo_epi16(rgb2, rgb3);
367 __m128i rgb45 = _mm_unpacklo_epi16(rgb4, rgb5);
368 __m128i rgb67 = _mm_unpacklo_epi16(rgb6, rgb7);
369
370 __m128i rg03 = _mm_unpacklo_epi32(rgb01, rgb23);
371 __m128i bx03 = _mm_unpackhi_epi32(rgb01, rgb23);
372 __m128i rg47 = _mm_unpacklo_epi32(rgb45, rgb67);
373 __m128i bx47 = _mm_unpackhi_epi32(rgb45, rgb67);
374
375 *r = _mm_unpacklo_epi64(rg03, rg47);
376 *g = _mm_unpackhi_epi64(rg03, rg47);
377 *b = _mm_unpacklo_epi64(bx03, bx47);
378 }
Store4(void * ptr,const SkNx & r,const SkNx & g,const SkNx & b,const SkNx & a)379 AI static void Store4(void* ptr, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
380 // TODO: AVX2 version
381 __m128i rg0123 = _mm_unpacklo_epi16(r.fVec, g.fVec), // r0 g0 r1 g1 r2 g2 r3 g3
382 rg4567 = _mm_unpackhi_epi16(r.fVec, g.fVec), // r4 g4 r5 g5 r6 g6 r7 g7
383 ba0123 = _mm_unpacklo_epi16(b.fVec, a.fVec),
384 ba4567 = _mm_unpackhi_epi16(b.fVec, a.fVec);
385
386 _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg0123, ba0123));
387 _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg0123, ba0123));
388 _mm_storeu_si128((__m128i*)ptr + 2, _mm_unpacklo_epi32(rg4567, ba4567));
389 _mm_storeu_si128((__m128i*)ptr + 3, _mm_unpackhi_epi32(rg4567, ba4567));
390 }
391
392 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
393 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
394 AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
395 AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
396 AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
397
398 AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
399 AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
400
Min(const SkNx & a,const SkNx & b)401 AI static SkNx Min(const SkNx& a, const SkNx& b) {
402 // No unsigned _mm_min_epu16, so we'll shift into a space where we can use the
403 // signed version, _mm_min_epi16, then shift back.
404 const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine.
405 const __m128i top_8x = _mm_set1_epi16(top);
406 return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x),
407 _mm_sub_epi8(b.fVec, top_8x)));
408 }
409
thenElse(const SkNx & t,const SkNx & e)410 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
411 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
412 _mm_andnot_si128(fVec, e.fVec));
413 }
414
415 AI uint16_t operator[](int k) const {
416 SkASSERT(0 <= k && k < 8);
417 union { __m128i v; uint16_t us[8]; } pun = {fVec};
418 return pun.us[k&7];
419 }
420
421 __m128i fVec;
422 };
423
424 template <>
425 class SkNx<4, uint8_t> {
426 public:
SkNx()427 AI SkNx() {}
SkNx(const __m128i & vec)428 AI SkNx(const __m128i& vec) : fVec(vec) {}
SkNx(uint8_t a,uint8_t b,uint8_t c,uint8_t d)429 AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
430 : fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {}
431
432
Load(const void * ptr)433 AI static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); }
store(void * ptr)434 AI void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); }
435
436 AI uint8_t operator[](int k) const {
437 SkASSERT(0 <= k && k < 4);
438 union { __m128i v; uint8_t us[16]; } pun = {fVec};
439 return pun.us[k&3];
440 }
441
442 // TODO as needed
443
444 __m128i fVec;
445 };
446
447 template <>
448 class SkNx<16, uint8_t> {
449 public:
SkNx(const __m128i & vec)450 AI SkNx(const __m128i& vec) : fVec(vec) {}
451
SkNx()452 AI SkNx() {}
SkNx(uint8_t val)453 AI SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
Load(const void * ptr)454 AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
SkNx(uint8_t a,uint8_t b,uint8_t c,uint8_t d,uint8_t e,uint8_t f,uint8_t g,uint8_t h,uint8_t i,uint8_t j,uint8_t k,uint8_t l,uint8_t m,uint8_t n,uint8_t o,uint8_t p)455 AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
456 uint8_t e, uint8_t f, uint8_t g, uint8_t h,
457 uint8_t i, uint8_t j, uint8_t k, uint8_t l,
458 uint8_t m, uint8_t n, uint8_t o, uint8_t p)
459 : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {}
460
store(void * ptr)461 AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
462
saturatedAdd(const SkNx & o)463 AI SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
464
465 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
466 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
467
Min(const SkNx & a,const SkNx & b)468 AI static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
469 AI SkNx operator < (const SkNx& o) const {
470 // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare.
471 auto flip = _mm_set1_epi8(char(0x80));
472 return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
473 }
474
475 AI uint8_t operator[](int k) const {
476 SkASSERT(0 <= k && k < 16);
477 union { __m128i v; uint8_t us[16]; } pun = {fVec};
478 return pun.us[k&15];
479 }
480
thenElse(const SkNx & t,const SkNx & e)481 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
482 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
483 _mm_andnot_si128(fVec, e.fVec));
484 }
485
486 __m128i fVec;
487 };
488
489 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
490
491 template <>
492 class SkNx<8, uint8_t> {
493 public:
SkNx(const __m128i & vec)494 AI SkNx(const __m128i& vec) : fVec(vec) {}
495
SkNx()496 AI SkNx() {}
SkNx(uint8_t v)497 AI SkNx(uint8_t v) : fVec(_mm_set1_epi8(v)) {}
SkNx(uint8_t a,uint8_t b,uint8_t c,uint8_t d,uint8_t e,uint8_t f,uint8_t g,uint8_t h)498 AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
499 uint8_t e, uint8_t f, uint8_t g, uint8_t h)
500 : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, 0,0,0,0, 0,0,0,0)) {}
501
502
Load(const void * ptr)503 AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
store(void * ptr)504 AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
505
506 AI uint8_t operator[](int k) const {
507 SkASSERT(0 <= k && k < 8);
508 union { __m128i v; uint8_t us[16]; } pun = {fVec};
509 return pun.us[k&7];
510 }
511
512 __m128i fVec;
513 };
514
515 template <>
516 class SkNx<8, int32_t> {
517 public:
SkNx(const __m256i & vec)518 AI SkNx(const __m256i& vec) : fVec(vec) {}
519
SkNx()520 AI SkNx() {}
SkNx(int32_t v)521 AI SkNx(int32_t v) : fVec(_mm256_set1_epi32(v)) {}
SkNx(int32_t a,int32_t b,int32_t c,int32_t d,int32_t e,int32_t f,int32_t g,int32_t h)522 AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d,
523 int32_t e, int32_t f, int32_t g, int32_t h)
524 : fVec(_mm256_setr_epi32(a,b,c,d, e,f,g,h)) {}
525
Load(const void * ptr)526 AI static SkNx Load(const void* ptr) { return _mm256_loadu_si256((const __m256i*)ptr); }
store(void * ptr)527 AI void store(void* ptr) const { _mm256_storeu_si256((__m256i*)ptr, fVec); }
528
529 AI SkNx operator + (const SkNx& o) const { return _mm256_add_epi32(fVec, o.fVec); }
530 AI SkNx operator - (const SkNx& o) const { return _mm256_sub_epi32(fVec, o.fVec); }
531 AI SkNx operator * (const SkNx& o) const { return _mm256_mullo_epi32(fVec, o.fVec); }
532
533 AI SkNx operator & (const SkNx& o) const { return _mm256_and_si256(fVec, o.fVec); }
534 AI SkNx operator | (const SkNx& o) const { return _mm256_or_si256(fVec, o.fVec); }
535 AI SkNx operator ^ (const SkNx& o) const { return _mm256_xor_si256(fVec, o.fVec); }
536
537 AI SkNx operator << (int bits) const { return _mm256_slli_epi32(fVec, bits); }
538 AI SkNx operator >> (int bits) const { return _mm256_srai_epi32(fVec, bits); }
539
540 AI int32_t operator[](int k) const {
541 SkASSERT(0 <= k && k < 8);
542 union { __m256i v; int32_t is[8]; } pun = {fVec};
543 return pun.is[k&7];
544 }
545
546 __m256i fVec;
547 };
548
549 template <>
550 class SkNx<8, uint32_t> {
551 public:
SkNx(const __m256i & vec)552 AI SkNx(const __m256i& vec) : fVec(vec) {}
553
SkNx()554 AI SkNx() {}
SkNx(uint32_t v)555 AI SkNx(uint32_t v) : fVec(_mm256_set1_epi32(v)) {}
SkNx(uint32_t a,uint32_t b,uint32_t c,uint32_t d,uint32_t e,uint32_t f,uint32_t g,uint32_t h)556 AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d,
557 uint32_t e, uint32_t f, uint32_t g, uint32_t h)
558 : fVec(_mm256_setr_epi32(a,b,c,d, e,f,g,h)) {}
559
Load(const void * ptr)560 AI static SkNx Load(const void* ptr) { return _mm256_loadu_si256((const __m256i*)ptr); }
store(void * ptr)561 AI void store(void* ptr) const { _mm256_storeu_si256((__m256i*)ptr, fVec); }
562
563 AI SkNx operator + (const SkNx& o) const { return _mm256_add_epi32(fVec, o.fVec); }
564 AI SkNx operator - (const SkNx& o) const { return _mm256_sub_epi32(fVec, o.fVec); }
565 AI SkNx operator * (const SkNx& o) const { return _mm256_mullo_epi32(fVec, o.fVec); }
566
567 AI SkNx operator & (const SkNx& o) const { return _mm256_and_si256(fVec, o.fVec); }
568 AI SkNx operator | (const SkNx& o) const { return _mm256_or_si256(fVec, o.fVec); }
569 AI SkNx operator ^ (const SkNx& o) const { return _mm256_xor_si256(fVec, o.fVec); }
570
571 AI SkNx operator << (int bits) const { return _mm256_slli_epi32(fVec, bits); }
572 AI SkNx operator >> (int bits) const { return _mm256_srli_epi32(fVec, bits); }
573
574 AI uint32_t operator[](int k) const {
575 SkASSERT(0 <= k && k < 8);
576 union { __m256i v; uint32_t us[8]; } pun = {fVec};
577 return pun.us[k&7];
578 }
579
580 __m256i fVec;
581 };
582
583 // _mm256_unpack{lo,hi}_pd() auto-casting to and from __m256d.
unpacklo_pd(__m256 x,__m256 y)584 AI static __m256 unpacklo_pd(__m256 x, __m256 y) {
585 return _mm256_castpd_ps(_mm256_unpacklo_pd(_mm256_castps_pd(x), _mm256_castps_pd(y)));
586 }
unpackhi_pd(__m256 x,__m256 y)587 AI static __m256 unpackhi_pd(__m256 x, __m256 y) {
588 return _mm256_castpd_ps(_mm256_unpackhi_pd(_mm256_castps_pd(x), _mm256_castps_pd(y)));
589 }
590
591 template <>
592 class SkNx<8, float> {
593 public:
SkNx(const __m256 & vec)594 AI SkNx(const __m256& vec) : fVec(vec) {}
595
SkNx()596 AI SkNx() {}
SkNx(float val)597 AI SkNx(float val) : fVec(_mm256_set1_ps(val)) {}
SkNx(float a,float b,float c,float d,float e,float f,float g,float h)598 AI SkNx(float a, float b, float c, float d,
599 float e, float f, float g, float h) : fVec(_mm256_setr_ps(a,b,c,d,e,f,g,h)) {}
600
Load(const void * ptr)601 AI static SkNx Load(const void* ptr) { return _mm256_loadu_ps((const float*)ptr); }
store(void * ptr)602 AI void store(void* ptr) const { _mm256_storeu_ps((float*)ptr, fVec); }
603
Store4(void * ptr,const SkNx & r,const SkNx & g,const SkNx & b,const SkNx & a)604 AI static void Store4(void* ptr,
605 const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
606 __m256 rg0145 = _mm256_unpacklo_ps(r.fVec, g.fVec), // r0 g0 r1 g1 | r4 g4 r5 g5
607 rg2367 = _mm256_unpackhi_ps(r.fVec, g.fVec), // r2 ... | r6 ...
608 ba0145 = _mm256_unpacklo_ps(b.fVec, a.fVec), // b0 a0 b1 a1 | b4 a4 b5 a5
609 ba2367 = _mm256_unpackhi_ps(b.fVec, a.fVec); // b2 ... | b6 ...
610
611 __m256 _04 = unpacklo_pd(rg0145, ba0145), // r0 g0 b0 a0 | r4 g4 b4 a4
612 _15 = unpackhi_pd(rg0145, ba0145), // r1 ... | r5 ...
613 _26 = unpacklo_pd(rg2367, ba2367), // r2 ... | r6 ...
614 _37 = unpackhi_pd(rg2367, ba2367); // r3 ... | r7 ...
615
616 __m256 _01 = _mm256_permute2f128_ps(_04, _15, 32), // 32 == 0010 0000 == lo, lo
617 _23 = _mm256_permute2f128_ps(_26, _37, 32),
618 _45 = _mm256_permute2f128_ps(_04, _15, 49), // 49 == 0011 0001 == hi, hi
619 _67 = _mm256_permute2f128_ps(_26, _37, 49);
620
621 _mm256_storeu_ps((float*)ptr + 0*8, _01);
622 _mm256_storeu_ps((float*)ptr + 1*8, _23);
623 _mm256_storeu_ps((float*)ptr + 2*8, _45);
624 _mm256_storeu_ps((float*)ptr + 3*8, _67);
625 }
Load4(const void * ptr,SkNx * r,SkNx * g,SkNx * b,SkNx * a)626 AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
627 Sk4f rl, gl, bl, al,
628 rh, gh, bh, ah;
629 Sk4f::Load4((const float*)ptr + 0, &rl, &gl, &bl, &al);
630 Sk4f::Load4((const float*)ptr + 16, &rh, &gh, &bh, &ah);
631 *r = _mm256_setr_m128(rl.fVec, rh.fVec);
632 *g = _mm256_setr_m128(gl.fVec, gh.fVec);
633 *b = _mm256_setr_m128(bl.fVec, bh.fVec);
634 *a = _mm256_setr_m128(al.fVec, ah.fVec);
635 }
636
637 AI SkNx operator+(const SkNx& o) const { return _mm256_add_ps(fVec, o.fVec); }
638 AI SkNx operator-(const SkNx& o) const { return _mm256_sub_ps(fVec, o.fVec); }
639 AI SkNx operator*(const SkNx& o) const { return _mm256_mul_ps(fVec, o.fVec); }
640 AI SkNx operator/(const SkNx& o) const { return _mm256_div_ps(fVec, o.fVec); }
641
642 AI SkNx operator==(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_EQ_OQ); }
643 AI SkNx operator!=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_NEQ_OQ); }
644 AI SkNx operator <(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_LT_OQ); }
645 AI SkNx operator >(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_GT_OQ); }
646 AI SkNx operator<=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_LE_OQ); }
647 AI SkNx operator>=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_GE_OQ); }
648
Min(const SkNx & l,const SkNx & r)649 AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm256_min_ps(l.fVec, r.fVec); }
Max(const SkNx & l,const SkNx & r)650 AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm256_max_ps(l.fVec, r.fVec); }
651
sqrt()652 AI SkNx sqrt() const { return _mm256_sqrt_ps (fVec); }
rsqrt()653 AI SkNx rsqrt() const { return _mm256_rsqrt_ps(fVec); }
invert()654 AI SkNx invert() const { return _mm256_rcp_ps (fVec); }
655
abs()656 AI SkNx abs() const { return _mm256_andnot_ps(_mm256_set1_ps(-0.0f), fVec); }
floor()657 AI SkNx floor() const { return _mm256_floor_ps(fVec); }
658
659 AI float operator[](int k) const {
660 SkASSERT(0 <= k && k < 8);
661 union { __m256 v; float fs[8]; } pun = {fVec};
662 return pun.fs[k&7];
663 }
664
thenElse(const SkNx & t,const SkNx & e)665 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
666 return _mm256_blendv_ps(e.fVec, t.fVec, fVec);
667 }
668
669 __m256 fVec;
670 };
671
SkNx_split(const Sk8f & v,Sk4f * lo,Sk4f * hi)672 AI static void SkNx_split(const Sk8f& v, Sk4f* lo, Sk4f* hi) {
673 *lo = _mm256_extractf128_ps(v.fVec, 0);
674 *hi = _mm256_extractf128_ps(v.fVec, 1);
675 }
676
SkNx_join(const Sk4f & lo,const Sk4f & hi)677 AI static Sk8f SkNx_join(const Sk4f& lo, const Sk4f& hi) {
678 return _mm256_insertf128_ps(_mm256_castps128_ps256(lo.fVec), hi.fVec, 1);
679 }
680
SkNx_fma(const Sk8f & a,const Sk8f & b,const Sk8f & c)681 AI static Sk8f SkNx_fma(const Sk8f& a, const Sk8f& b, const Sk8f& c) {
682 return _mm256_fmadd_ps(a.fVec, b.fVec, c.fVec);
683 }
684
685 template<> AI /*static*/ Sk8i SkNx_cast<int>(const Sk8b& src) {
686 return _mm256_cvtepu8_epi32(src.fVec);
687 }
688
689 template<> AI /*static*/ Sk8f SkNx_cast<float>(const Sk8b& src) {
690 return _mm256_cvtepi32_ps(SkNx_cast<int>(src).fVec);
691 }
692
693 template<> AI /*static*/ Sk8i SkNx_cast<int>(const Sk8h& src) {
694 return _mm256_cvtepu16_epi32(src.fVec);
695 }
696
697 template<> AI /*static*/ Sk8f SkNx_cast<float>(const Sk8h& src) {
698 return _mm256_cvtepi32_ps(SkNx_cast<int>(src).fVec);
699 }
700
701 template<> AI /*static*/ Sk8f SkNx_cast<float>(const Sk8i& src) {
702 return _mm256_cvtepi32_ps(src.fVec);
703 }
704
705 template<> AI /*static*/ Sk8i SkNx_cast<int>(const Sk8f& src) {
706 return _mm256_cvttps_epi32(src.fVec);
707 }
708
709 template<> AI /*static*/ Sk8h SkNx_cast<uint16_t>(const Sk8i& src) {
710 __m128i lo = _mm256_extractf128_si256(src.fVec, 0),
711 hi = _mm256_extractf128_si256(src.fVec, 1);
712 return _mm_packus_epi32(lo, hi);
713 }
714
715 template<> AI /*static*/ Sk8h SkNx_cast<uint16_t>(const Sk8f& src) {
716 return SkNx_cast<uint16_t>(SkNx_cast<int>(src));
717 }
718
719 template<> AI /*static*/ Sk8b SkNx_cast<uint8_t>(const Sk8i& src) {
720 auto _16 = SkNx_cast<uint16_t>(src);
721 return _mm_packus_epi16(_16.fVec, _16.fVec);
722 }
723
724 #endif
725
726 template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
727 return _mm_cvtepi32_ps(src.fVec);
728 }
729 template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
730 return SkNx_cast<float>(Sk4i::Load(&src));
731 }
732
733 template <> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
734 return _mm_cvttps_epi32(src.fVec);
735 }
736
737 template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
738 #if 0 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
739 // TODO: This seems to be causing code generation problems. Investigate?
740 return _mm_packus_epi32(src.fVec);
741 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
742 // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place.
743 const int _ = ~0;
744 return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_));
745 #else
746 // With SSE2, we have to sign extend our input, making _mm_packs_epi32 do the pack we want.
747 __m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16);
748 return _mm_packs_epi32(x,x);
749 #endif
750 }
751
752 template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
753 return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src));
754 }
755
756 template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
757 auto _32 = _mm_cvttps_epi32(src.fVec);
758 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
759 const int _ = ~0;
760 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_));
761 #else
762 auto _16 = _mm_packus_epi16(_32, _32);
763 return _mm_packus_epi16(_16, _16);
764 #endif
765 }
766
767 template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) {
768 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
769 const int _ = ~0;
770 return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
771 #else
772 auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
773 return _mm_unpacklo_epi16(_16, _mm_setzero_si128());
774 #endif
775 }
776
777 template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
778 return _mm_cvtepi32_ps(SkNx_cast<int32_t>(src).fVec);
779 }
780
781 template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
782 auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
783 return _mm_cvtepi32_ps(_32);
784 }
785
786 template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
787 Sk8f ab, cd;
788 SkNx_split(src, &ab, &cd);
789
790 Sk4f a,b,c,d;
791 SkNx_split(ab, &a, &b);
792 SkNx_split(cd, &c, &d);
793
794 return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
795 _mm_cvttps_epi32(b.fVec)),
796 _mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
797 _mm_cvttps_epi32(d.fVec)));
798 }
799
800 template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
801 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
802 }
803
804 template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
805 return _mm_packus_epi16(src.fVec, src.fVec);
806 }
807
808 template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
809 return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
810 }
811
812 template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
813 return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec);
814 }
815
816 template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
817 return src.fVec;
818 }
819
Sk4f_round(const Sk4f & x)820 AI static Sk4i Sk4f_round(const Sk4f& x) {
821 return _mm_cvtps_epi32(x.fVec);
822 }
823
824 } // namespace
825
826 #endif//SkNx_sse_DEFINED
827