1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "SkBlitRow_opts_SSE4.h"
9
10 // Some compilers can't compile SSSE3 or SSE4 intrinsics. We give them stub methods.
11 // The stubs should never be called, so we make them crash just to confirm that.
12 #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_SSE41
S32A_Opaque_BlitRow32_SSE4(SkPMColor * SK_RESTRICT,const SkPMColor * SK_RESTRICT,int,U8CPU)13 void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT, const SkPMColor* SK_RESTRICT, int, U8CPU) {
14 sk_throw();
15 }
16
17 #else
18
19 #include <smmintrin.h> // SSE4.1 intrinsics
20 #include "SkColorPriv.h"
21 #include "SkColor_opts_SSE2.h"
22 #include "SkMSAN.h"
23
S32A_Opaque_BlitRow32_SSE4(SkPMColor * SK_RESTRICT dst,const SkPMColor * SK_RESTRICT src,int count,U8CPU alpha)24 void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT dst,
25 const SkPMColor* SK_RESTRICT src,
26 int count,
27 U8CPU alpha) {
28 sk_msan_assert_initialized(src, src+count);
29
30 SkASSERT(alpha == 255);
31 // As long as we can, we'll work on 16 pixel pairs at once.
32 int count16 = count / 16;
33 __m128i* dst4 = (__m128i*)dst;
34 const __m128i* src4 = (const __m128i*)src;
35
36 for (int i = 0; i < count16 * 4; i += 4) {
37 // Load 16 source pixels.
38 __m128i s0 = _mm_loadu_si128(src4+i+0),
39 s1 = _mm_loadu_si128(src4+i+1),
40 s2 = _mm_loadu_si128(src4+i+2),
41 s3 = _mm_loadu_si128(src4+i+3);
42
43 const __m128i alphaMask = _mm_set1_epi32(0xFF << SK_A32_SHIFT);
44 const __m128i ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0)));
45 if (_mm_testz_si128(ORed, alphaMask)) {
46 // All 16 source pixels are fully transparent. There's nothing to do!
47 continue;
48 }
49 const __m128i ANDed = _mm_and_si128(s3, _mm_and_si128(s2, _mm_and_si128(s1, s0)));
50 if (_mm_testc_si128(ANDed, alphaMask)) {
51 // All 16 source pixels are fully opaque. There's no need to read dst or blend it.
52 _mm_storeu_si128(dst4+i+0, s0);
53 _mm_storeu_si128(dst4+i+1, s1);
54 _mm_storeu_si128(dst4+i+2, s2);
55 _mm_storeu_si128(dst4+i+3, s3);
56 continue;
57 }
58 // The general slow case: do the blend for all 16 pixels.
59 _mm_storeu_si128(dst4+i+0, SkPMSrcOver_SSE2(s0, _mm_loadu_si128(dst4+i+0)));
60 _mm_storeu_si128(dst4+i+1, SkPMSrcOver_SSE2(s1, _mm_loadu_si128(dst4+i+1)));
61 _mm_storeu_si128(dst4+i+2, SkPMSrcOver_SSE2(s2, _mm_loadu_si128(dst4+i+2)));
62 _mm_storeu_si128(dst4+i+3, SkPMSrcOver_SSE2(s3, _mm_loadu_si128(dst4+i+3)));
63 }
64
65 // Wrap up the last <= 15 pixels.
66 for (int i = count16*16; i < count; i++) {
67 // This check is not really necessarily, but it prevents pointless autovectorization.
68 if (src[i] & 0xFF000000) {
69 dst[i] = SkPMSrcOver(src[i], dst[i]);
70 }
71 }
72 }
73
74 #endif
75