1;
2;  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
3;
4;  Use of this source code is governed by a BSD-style license and patent
5;  grant that can be found in the LICENSE file in the root of the source
6;  tree. All contributing project authors may be found in the AUTHORS
7;  file in the root of the source tree.
8;
9
10
11    EXPORT  |vpx_idct8x8_1_add_neon|
12    ARM
13    REQUIRE8
14    PRESERVE8
15
16    AREA ||.text||, CODE, READONLY, ALIGN=2
17
18;void vpx_idct8x8_1_add_neon(int16_t *input, uint8_t *dest,
19;                                  int dest_stride)
20;
21; r0  int16_t input
22; r1  uint8_t *dest
23; r2  int dest_stride)
24
25|vpx_idct8x8_1_add_neon| PROC
26    ldrsh            r0, [r0]
27
28    ; generate cospi_16_64 = 11585
29    mov              r12, #0x2d00
30    add              r12, #0x41
31
32    ; out = dct_const_round_shift(input[0] * cospi_16_64)
33    mul              r0, r0, r12               ; input[0] * cospi_16_64
34    add              r0, r0, #0x2000           ; +(1 << ((DCT_CONST_BITS) - 1))
35    asr              r0, r0, #14               ; >> DCT_CONST_BITS
36
37    ; out = dct_const_round_shift(out * cospi_16_64)
38    mul              r0, r0, r12               ; out * cospi_16_64
39    mov              r12, r1                   ; save dest
40    add              r0, r0, #0x2000           ; +(1 << ((DCT_CONST_BITS) - 1))
41    asr              r0, r0, #14               ; >> DCT_CONST_BITS
42
43    ; a1 = ROUND_POWER_OF_TWO(out, 5)
44    add              r0, r0, #16               ; + (1 <<((5) - 1))
45    asr              r0, r0, #5                ; >> 5
46
47    vdup.s16         q0, r0                    ; duplicate a1
48
49    ; load destination data
50    vld1.64          {d2}, [r1], r2
51    vld1.64          {d3}, [r1], r2
52    vld1.64          {d4}, [r1], r2
53    vld1.64          {d5}, [r1], r2
54    vld1.64          {d6}, [r1], r2
55    vld1.64          {d7}, [r1], r2
56    vld1.64          {d16}, [r1], r2
57    vld1.64          {d17}, [r1]
58
59    vaddw.u8         q9, q0, d2                ; dest[x] + a1
60    vaddw.u8         q10, q0, d3               ; dest[x] + a1
61    vaddw.u8         q11, q0, d4               ; dest[x] + a1
62    vaddw.u8         q12, q0, d5               ; dest[x] + a1
63    vqmovun.s16      d2, q9                    ; clip_pixel
64    vqmovun.s16      d3, q10                   ; clip_pixel
65    vqmovun.s16      d30, q11                  ; clip_pixel
66    vqmovun.s16      d31, q12                  ; clip_pixel
67    vst1.64          {d2}, [r12], r2
68    vst1.64          {d3}, [r12], r2
69    vst1.64          {d30}, [r12], r2
70    vst1.64          {d31}, [r12], r2
71
72    vaddw.u8         q9, q0, d6                 ; dest[x] + a1
73    vaddw.u8         q10, q0, d7                ; dest[x] + a1
74    vaddw.u8         q11, q0, d16               ; dest[x] + a1
75    vaddw.u8         q12, q0, d17               ; dest[x] + a1
76    vqmovun.s16      d2, q9                     ; clip_pixel
77    vqmovun.s16      d3, q10                    ; clip_pixel
78    vqmovun.s16      d30, q11                   ; clip_pixel
79    vqmovun.s16      d31, q12                   ; clip_pixel
80    vst1.64          {d2}, [r12], r2
81    vst1.64          {d3}, [r12], r2
82    vst1.64          {d30}, [r12], r2
83    vst1.64          {d31}, [r12], r2
84
85    bx               lr
86    ENDP             ; |vpx_idct8x8_1_add_neon|
87
88    END
89