1 /*
2  *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 #include "vpx_ports/arm.h"
13 
14 #ifdef VPX_INCOMPATIBLE_GCC
15 #include "./vp8_rtcd.h"
vp8_short_walsh4x4_neon(int16_t * input,int16_t * output,int pitch)16 void vp8_short_walsh4x4_neon(
17         int16_t *input,
18         int16_t *output,
19         int pitch) {
20   vp8_short_walsh4x4_c(input, output, pitch);
21 }
22 #else
vp8_short_walsh4x4_neon(int16_t * input,int16_t * output,int pitch)23 void vp8_short_walsh4x4_neon(
24         int16_t *input,
25         int16_t *output,
26         int pitch) {
27     uint16x4_t d16u16;
28     int16x8_t q0s16, q1s16;
29     int16x4_t dEmptys16, d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
30     int32x4_t qEmptys32, q0s32, q1s32, q2s32, q3s32, q8s32;
31     int32x4_t q9s32, q10s32, q11s32, q15s32;
32     uint32x4_t q8u32, q9u32, q10u32, q11u32;
33     int16x4x2_t v2tmp0, v2tmp1;
34     int32x2x2_t v2tmp2, v2tmp3;
35 
36     dEmptys16 = vdup_n_s16(0);
37     qEmptys32 = vdupq_n_s32(0);
38     q15s32 = vdupq_n_s32(3);
39 
40     d0s16 = vld1_s16(input);
41     input += pitch/2;
42     d1s16 = vld1_s16(input);
43     input += pitch/2;
44     d2s16 = vld1_s16(input);
45     input += pitch/2;
46     d3s16 = vld1_s16(input);
47 
48     v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16),
49                       vreinterpret_s32_s16(d2s16));
50     v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16),
51                       vreinterpret_s32_s16(d3s16));
52     v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]),   // d0
53                       vreinterpret_s16_s32(v2tmp3.val[0]));  // d1
54     v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]),   // d2
55                       vreinterpret_s16_s32(v2tmp3.val[1]));  // d3
56 
57     d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[0]);
58     d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[1]);
59     d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[1]);
60     d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[0]);
61 
62     d4s16 = vshl_n_s16(d4s16, 2);
63     d5s16 = vshl_n_s16(d5s16, 2);
64     d6s16 = vshl_n_s16(d6s16, 2);
65     d7s16 = vshl_n_s16(d7s16, 2);
66 
67     d16u16 = vceq_s16(d4s16, dEmptys16);
68     d16u16 = vmvn_u16(d16u16);
69 
70     d0s16 = vadd_s16(d4s16, d5s16);
71     d3s16 = vsub_s16(d4s16, d5s16);
72     d1s16 = vadd_s16(d7s16, d6s16);
73     d2s16 = vsub_s16(d7s16, d6s16);
74 
75     d0s16 = vsub_s16(d0s16, vreinterpret_s16_u16(d16u16));
76 
77     // Second for-loop
78     v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d1s16),
79                       vreinterpret_s32_s16(d3s16));
80     v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d0s16),
81                       vreinterpret_s32_s16(d2s16));
82     v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp3.val[1]),   // d2
83                       vreinterpret_s16_s32(v2tmp2.val[1]));  // d3
84     v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp3.val[0]),   // d0
85                       vreinterpret_s16_s32(v2tmp2.val[0]));  // d1
86 
87     q8s32  = vaddl_s16(v2tmp1.val[0], v2tmp0.val[0]);
88     q9s32  = vaddl_s16(v2tmp1.val[1], v2tmp0.val[1]);
89     q10s32 = vsubl_s16(v2tmp1.val[1], v2tmp0.val[1]);
90     q11s32 = vsubl_s16(v2tmp1.val[0], v2tmp0.val[0]);
91 
92     q0s32 = vaddq_s32(q8s32, q9s32);
93     q1s32 = vaddq_s32(q11s32, q10s32);
94     q2s32 = vsubq_s32(q11s32, q10s32);
95     q3s32 = vsubq_s32(q8s32, q9s32);
96 
97     q8u32  = vcltq_s32(q0s32, qEmptys32);
98     q9u32  = vcltq_s32(q1s32, qEmptys32);
99     q10u32 = vcltq_s32(q2s32, qEmptys32);
100     q11u32 = vcltq_s32(q3s32, qEmptys32);
101 
102     q8s32  = vreinterpretq_s32_u32(q8u32);
103     q9s32  = vreinterpretq_s32_u32(q9u32);
104     q10s32 = vreinterpretq_s32_u32(q10u32);
105     q11s32 = vreinterpretq_s32_u32(q11u32);
106 
107     q0s32 = vsubq_s32(q0s32, q8s32);
108     q1s32 = vsubq_s32(q1s32, q9s32);
109     q2s32 = vsubq_s32(q2s32, q10s32);
110     q3s32 = vsubq_s32(q3s32, q11s32);
111 
112     q8s32  = vaddq_s32(q0s32, q15s32);
113     q9s32  = vaddq_s32(q1s32, q15s32);
114     q10s32 = vaddq_s32(q2s32, q15s32);
115     q11s32 = vaddq_s32(q3s32, q15s32);
116 
117     d0s16 = vshrn_n_s32(q8s32, 3);
118     d1s16 = vshrn_n_s32(q9s32, 3);
119     d2s16 = vshrn_n_s32(q10s32, 3);
120     d3s16 = vshrn_n_s32(q11s32, 3);
121 
122     q0s16 = vcombine_s16(d0s16, d1s16);
123     q1s16 = vcombine_s16(d2s16, d3s16);
124 
125     vst1q_s16(output, q0s16);
126     vst1q_s16(output + 8, q1s16);
127     return;
128 }
129 #endif  // VPX_INCOMPATIBLE_GCC
130