1 /*
2  *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 
idct_dequant_0_2x_neon(int16_t * q,int16_t dq,unsigned char * dst,int stride)13 void idct_dequant_0_2x_neon(
14         int16_t *q,
15         int16_t dq,
16         unsigned char *dst,
17         int stride) {
18     unsigned char *dst0;
19     int i, a0, a1;
20     int16x8x2_t q2Add;
21     int32x2_t d2s32 = vdup_n_s32(0),
22               d4s32 = vdup_n_s32(0);
23     uint8x8_t d2u8, d4u8;
24     uint16x8_t q1u16, q2u16;
25 
26     a0 = ((q[0] * dq) + 4) >> 3;
27     a1 = ((q[16] * dq) + 4) >> 3;
28     q[0] = q[16] = 0;
29     q2Add.val[0] = vdupq_n_s16((int16_t)a0);
30     q2Add.val[1] = vdupq_n_s16((int16_t)a1);
31 
32     for (i = 0; i < 2; i++, dst += 4) {
33         dst0 = dst;
34         d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 0);
35         dst0 += stride;
36         d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 1);
37         dst0 += stride;
38         d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 0);
39         dst0 += stride;
40         d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 1);
41 
42         q1u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
43                          vreinterpret_u8_s32(d2s32));
44         q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
45                          vreinterpret_u8_s32(d4s32));
46 
47         d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
48         d4u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16));
49 
50         d2s32 = vreinterpret_s32_u8(d2u8);
51         d4s32 = vreinterpret_s32_u8(d4u8);
52 
53         dst0 = dst;
54         vst1_lane_s32((int32_t *)dst0, d2s32, 0);
55         dst0 += stride;
56         vst1_lane_s32((int32_t *)dst0, d2s32, 1);
57         dst0 += stride;
58         vst1_lane_s32((int32_t *)dst0, d4s32, 0);
59         dst0 += stride;
60         vst1_lane_s32((int32_t *)dst0, d4s32, 1);
61     }
62     return;
63 }
64