1@/*****************************************************************************
2@*
3@* Copyright (C) 2012 Ittiam Systems Pvt Ltd, Bangalore
4@*
5@* Licensed under the Apache License, Version 2.0 (the "License");
6@* you may not use this file except in compliance with the License.
7@* You may obtain a copy of the License at:
8@*
9@* http://www.apache.org/licenses/LICENSE-2.0
10@*
11@* Unless required by applicable law or agreed to in writing, software
12@* distributed under the License is distributed on an "AS IS" BASIS,
13@* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14@* See the License for the specific language governing permissions and
15@* limitations under the License.
16@*
17@*****************************************************************************/
18@/**
19@ *******************************************************************************
20@ * @file
21@ *  ihevc_itrans_recon_4x4_neon.s
22@ *
23@ * @brief
24@ *  contains function definitions for single stage  inverse transform
25@ *
26@ * @author
27@ *  naveen sr
28@ *
29@ * @par list of functions:
30@ *  - ihevc_itrans_recon_4x4()
31@ *
32@ * @remarks
33@ *  none
34@ *
35@ *******************************************************************************
36@*/
37@ /**
38@ *******************************************************************************
39@ *
40@ * @brief
41@ *  this function performs inverse transform  and reconstruction for 4x4
42@ * input block
43@ *
44@ * @par description:
45@ *  performs inverse transform and adds the prediction  data and clips output
46@ * to 8 bit
47@ *
48@ * @param[in] pi2_src
49@ *  input 4x4 coefficients
50@ *
51@ * @param[in] pi2_tmp
52@ *  temporary 4x4 buffer for storing inverse
53@ *
54@ *  transform
55@ *  1st stage output
56@ *
57@ * @param[in] pu1_pred
58@ *  prediction 4x4 block
59@ *
60@ * @param[out] pu1_dst
61@ *  output 4x4 block
62@ *
63@ * @param[in] src_strd
64@ *  input stride
65@ *
66@ * @param[in] pred_strd
67@ *  prediction stride
68@ *
69@ * @param[in] dst_strd
70@ *  output stride
71@ *
72@ * @param[in] shift
73@ *  output shift
74@ *
75@ * @param[in] zero_cols
76@ *  zero columns in pi2_src
77@ *
78@ * @returns  void
79@ *
80@ * @remarks
81@ *  none
82@ *
83@ *******************************************************************************
84@ */
85@void ihevc_itrans_recon_4x4(word16 *pi2_src,
86@       word16 *pi2_tmp,
87@       uword8 *pu1_pred,
88@       uword8 *pu1_dst,
89@       word32 src_strd,
90@       word32 pred_strd,
91@       word32 dst_strd,
92@       word32 zero_cols)
93@**************variables vs registers*************************
94@   r0 => *pi2_src
95@   r1 => *pi2_tmp
96@   r2 => *pu1_pred
97@   r3 => *pu1_dst
98@   r4 => src_strd
99@   r5 => pred_strd
100@   r6 => dst_strd
101@   r7 => zero_cols
102
103.equ    src_strd_offset,    104
104.equ    pred_strd_offset,   108
105.equ    dst_strd_offset,    112
106.equ    zero_cols_offset,   116
107
108.text
109.align 4
110
111
112.set shift_stage1_idct ,   7
113.set shift_stage2_idct ,   12
114
115
116
117.globl ihevc_itrans_recon_4x4_a9q
118
119.extern g_ai2_ihevc_trans_4_transpose
120
121g_ai2_ihevc_trans_4_transpose_addr:
122.long g_ai2_ihevc_trans_4_transpose - ulbl1 - 8
123
124.type ihevc_itrans_recon_4x4_a9q, %function
125
126ihevc_itrans_recon_4x4_a9q:
127
128    stmfd       sp!, {r4-r12, r14}          @stack stores the values of the arguments
129    vpush       {d8  -  d15}
130
131    ldr         r8,g_ai2_ihevc_trans_4_transpose_addr
132ulbl1:
133    add         r8,r8,pc
134
135    ldr         r4,[sp,#src_strd_offset]    @loading src_strd
136    ldr         r5,[sp,#pred_strd_offset]   @loading pred_strd
137    add         r4,r4,r4                    @ src_strd in terms of word16
138
139    ldr         r6,[sp,#dst_strd_offset]    @loading dst_strd
140    ldr         r7,[sp,#zero_cols_offset]   @loading zero_cols
141    add         r9,r0,r4                    @ pi2_src[0] + src_strd
142
143
144
145    vld1.16     d4,[r8]                     @loading first row of g_ai2_ihevc_trans_4_transpose
146    @ d4 = {36,64,83,64}
147    @index = 3  2  1  0
148    add         r10,r9,r4, lsl #1           @ 3*src_strd
149    add         r4,r4,r4
150    vld1.16     d1,[r9]                     @loading pi2_src 2nd row
151    vld1.16     d3,[r10]                    @loading pi2_src 4th row
152    vld1.16     d0,[r0],r4                  @loading pi2_src 1st row
153    vld1.16     d2,[r0],r4                  @loading pi2_src 3rd row
154
155
156    @ first stage computation starts
157    vmull.s16   q3,d1,d4[1]                 @83 * pi2_src[1]
158    vmlal.s16   q3,d3,d4[3]                 @o[0] = 83 * pi2_src[1] + 36 * pi2_src[3]
159    vmull.s16   q4,d1,d4[3]                 @36 * pi2_src[1]
160    vld1.32     d22[0], [r2],r5
161    vmlsl.s16   q4,d3,d4[1]                 @o[1] = 36 * pi2_src[1] - 83 * pi2_src[3]
162
163    vaddl.s16   q5,d0,d2                    @pi2_src[0] + pi2_src[2]
164    vsubl.s16   q6,d0,d2                    @pi2_src[0] - pi2_src[2]
165    vshl.s32    q5,q5,#6                    @e[0] = 64*(pi2_src[0] + pi2_src[2])
166    vshl.s32    q6,q6,#6                    @e[1] = 64*(pi2_src[0] - pi2_src[2])
167
168    vadd.s32    q7,q5,q3                    @((e[0] + o[0] )
169    vadd.s32    q8,q6,q4                    @((e[1] + o[1])
170    vsub.s32    q9,q6,q4                    @((e[1] - o[1])
171    vsub.s32    q10,q5,q3                   @((e[0] - o[0])
172
173    vqrshrn.s32 d0,q7,#shift_stage1_idct    @pi2_out[0] = clip_s16((e[0] + o[0] + add)>>shift) )
174    vqrshrn.s32 d1,q8,#shift_stage1_idct    @pi2_out[1] = clip_s16((e[1] + o[1] + add)>>shift) )
175    vqrshrn.s32 d2,q9,#shift_stage1_idct    @pi2_out[2] = clip_s16((e[0] - o[0] + add)>>shift) )
176    vqrshrn.s32 d3,q10,#shift_stage1_idct   @pi2_out[3] = clip_s16((e[0] - o[0] + add)>>shift) )
177
178    vtrn.16     d0,d1
179    vtrn.16     d2,d3
180    vtrn.32     d0,d2
181    vtrn.32     d1,d3
182
183    @ first stage ends
184    @ output in d0,d1,d2,d3
185    @ second stage starts
186    vmull.s16   q3,d1,d4[1]                 @83 * pi2_src[1]
187    vld1.32     d22[1], [r2],r5
188    vmlal.s16   q3,d3,d4[3]                 @o[0] = 83 * pi2_src[1] + 36 * pi2_src[3]
189    vmull.s16   q4,d1,d4[3]                 @36 * pi2_src[1]
190    vmlsl.s16   q4,d3,d4[1]                 @o[1] = 36 * pi2_src[1] - 83 * pi2_src[3]
191    vld1.32     d23[0], [r2],r5
192
193    vaddl.s16   q5,d0,d2                    @pi2_src[0] + pi2_src[2]
194    vsubl.s16   q6,d0,d2                    @pi2_src[0] - pi2_src[2]
195    vshl.s32    q5,q5,#6                    @e[0] = 64*(pi2_src[0] + pi2_src[2])
196    vshl.s32    q6,q6,#6                    @e[1] = 64*(pi2_src[0] - pi2_src[2])
197
198
199    vadd.s32    q7,q5,q3                    @((e[0] + o[0] )
200    vadd.s32    q8,q6,q4                    @((e[1] + o[1])
201    vsub.s32    q9,q6,q4                    @((e[1] - o[1])
202    vsub.s32    q10,q5,q3                   @((e[0] - o[0])
203
204    vqrshrn.s32 d0,q7,#shift_stage2_idct    @pi2_out[0] = clip_s16((e[0] + o[0] + add)>>shift) )
205    vqrshrn.s32 d1,q8,#shift_stage2_idct    @pi2_out[1] = clip_s16((e[1] + o[1] + add)>>shift) )
206    vqrshrn.s32 d2,q9,#shift_stage2_idct    @pi2_out[2] = clip_s16((e[0] - o[0] + add)>>shift) )
207    vqrshrn.s32 d3,q10,#shift_stage2_idct   @pi2_out[3] = clip_s16((e[0] - o[0] + add)>>shift) )
208    vld1.32     d23[1], [r2],r5
209
210    vtrn.16     d0,d1
211    vtrn.16     d2,d3
212    vtrn.32     d0,d2
213    vtrn.32     d1,d3
214    @ second stage ends
215    @ output in d0,d1,d2,d3
216    @ second stage computation ends
217
218    @ loading pred
219
220    vaddw.u8    q0,q0,d22                   @ pi2_out(16bit) + pu1_pred(8bit)
221    vaddw.u8    q1,q1,d23                   @ pi2_out(16bit) + pu1_pred(8bit)
222    vqmovun.s16 d0,q0                       @ clip_u8(pi2_out(16bit) + pu1_pred(8bit))
223    vqmovun.s16 d1,q1                       @ clip_u8(pi2_out(16bit) + pu1_pred(8bit))
224
225    @ storing destination
226    vst1.32     {d0[0]},[r3],r6
227    vst1.32     {d0[1]},[r3],r6
228    vst1.32     {d1[0]},[r3],r6
229    vst1.32     {d1[1]},[r3],r6
230
231    vpop        {d8  -  d15}
232    ldmfd       sp!,{r4-r12,r15}            @reload the registers from sp
233
234
235
236
237
238