1///*****************************************************************************
2//*
3//* Copyright (C) 2012 Ittiam Systems Pvt Ltd, Bangalore
4//*
5//* Licensed under the Apache License, Version 2.0 (the "License");
6//* you may not use this file except in compliance with the License.
7//* You may obtain a copy of the License at:
8//*
9//* http://www.apache.org/licenses/LICENSE-2.0
10//*
11//* Unless required by applicable law or agreed to in writing, software
12//* distributed under the License is distributed on an "AS IS" BASIS,
13//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14//* See the License for the specific language governing permissions and
15//* limitations under the License.
16//*
17//*****************************************************************************/
18///**
19//*******************************************************************************
20//* ,:file
21//*  ihevc_sao_edge_offset_class2.s
22//*
23//* ,:brief
24//*  Contains function definitions for inter prediction  interpolation.
25//* Functions are coded using NEON  intrinsics and can be compiled using@ ARM
26//* RVCT
27//*
28//* ,:author
29//*  Parthiban V
30//*
31//* ,:par List of Functions:
32//*
33//*
34//* ,:remarks
35//*  None
36//*
37//*******************************************************************************
38//*/
39//void ihevc_sao_edge_offset_class2(UWORD8 *pu1_src,
40//                              WORD32 src_strd,
41//                              UWORD8 *pu1_src_left,
42//                              UWORD8 *pu1_src_top,
43//                              UWORD8 *pu1_src_top_left,
44//                              UWORD8 *pu1_src_top_right,
45//                              UWORD8 *pu1_src_bot_left,
46//                              UWORD8 *pu1_avail,
47//                              WORD8 *pi1_sao_offset,
48//                              WORD32 wd,
49//                              WORD32 ht)
50//**************Variables Vs Registers*****************************************
51//x0 =>    *pu1_src
52//x1 =>    src_strd
53//x2 =>    *pu1_src_left
54//x3 =>    *pu1_src_top
55//x4    =>    *pu1_src_top_left
56//x5    =>    *pu1_avail
57//x6    =>    *pi1_sao_offset
58//x7    =>    wd
59//x8=>    ht
60
61.text
62.p2align 2
63
64.include "ihevc_neon_macros.s"
65
66.globl gi1_table_edge_idx
67.globl ihevc_sao_edge_offset_class2_av8
68
69ihevc_sao_edge_offset_class2_av8:
70
71
72    // STMFD sp!,{x4-x12,x14}            //stack stores the values of the arguments
73    MOV         x5,x7                       //Loads pu1_avail
74
75    LDR         x6,[sp]                     //Loads pi1_sao_offset
76    LDR         w7,[sp,#8]                  //Loads wd
77    LDR         w8,[sp,#16]                 //Loads ht
78
79    MOV         x16,x7 // wd
80    MOV         x17,x8 // ht
81
82
83    stp         x19, x20,[sp,#-16]!
84    stp         x21, x22,[sp,#-16]!
85    stp         x23, x24,[sp,#-16]!
86
87    SUB         x9,x7,#1                    //wd - 1
88
89    LDRB        w10,[x3,x9]                 //pu1_src_top[wd - 1]
90
91    MOV         x19,x0                      //Store pu1_src in sp
92    MOV         x21,x2                      //Store pu1_src_left in sp
93    MOV         x22,x3                      //Store pu1_src_top in sp
94    MOV         x23,x5                      //Store pu1_avail in sp
95    MOV         x24,x4                      //Store pu1_src_top_left in sp
96
97
98    MOV         x9,x7                       //Move width to x9 for loop count
99
100    SUB         sp,sp,#0xA0                 //Decrement the stack pointer to store some temp arr values
101
102    STRB        w10,[sp]                    //u1_src_top_left_tmp = pu1_src_top[wd - 1]
103    SUB         x10,x8,#1                   //ht-1
104    madd        x11, x10, x1, x0            //pu1_src[(ht - 1) * src_strd + col]
105    ADD         x12,sp,#0x02                //temp array
106
107AU1_SRC_TOP_LOOP:
108    LD1         {v0.8b},[x11],#8            //pu1_src[(ht - 1) * src_strd + col]
109    SUBS        x9,x9,#8                    //Decrement the loop count by 8
110    ST1         {v0.8b},[x12],#8            //au1_src_top_tmp[col] = pu1_src[(ht - 1) * src_strd + col]
111    BNE         AU1_SRC_TOP_LOOP
112
113PU1_AVAIL_4_LOOP:
114    LDRB        w10,[x5,#4]                 //pu1_avail[4]
115    CMP         x10,#0
116    LDRB        w9,[x0]                     //u1_pos_0_0_tmp = pu1_src[0]
117    BEQ         PU1_AVAIL_7_LOOP
118
119    LDRB        w11,[x4]                    //pu1_src_top_left[0]
120    ADD         x14,x0,x1                   //pu1_src + src_strd
121
122    SUBS        x12,x9,x11                  //pu1_src[0] - pu1_src_top_left[0]
123    LDRB        w4,[x14,#1]                 //pu1_src[1 + src_strd]
124
125    movn        x20,#0
126    csel        x12, x20, x12,LT
127    MOV         x20,#1
128    csel        x12, x20, x12,GT            //SIGN(pu1_src[0] - pu1_src_top_left[0])
129
130    ADRP        x14, :got:gi1_table_edge_idx //table pointer
131    LDR         x14, [x14, #:got_lo12:gi1_table_edge_idx]
132    SUBS        x11,x9,x4                   //pu1_src[0] - pu1_src[1 + src_strd]
133
134    movn        x20,#0
135    csel        x11, x20, x11,LT
136    MOV         x20,#1
137    csel        x11, x20, x11,GT            //SIGN(pu1_src[0] - pu1_src[1 + src_strd])
138    ADD         x4,x12,x11                  //SIGN(pu1_src[0] - pu1_src_top_left[0]) +  SIGN(pu1_src[0] - pu1_src[1 + src_strd])
139    ADD         x4,x4,#2                    //edge_idx
140
141    LDRSB       x12,[x14,x4]                //edge_idx = gi1_table_edge_idx[edge_idx]
142    CMP         x12,#0                      //0 != edge_idx
143    BEQ         PU1_AVAIL_7_LOOP
144    LDRSB       x10,[x6,x12]                //pi1_sao_offset[edge_idx]
145    ADD         x9,x9,x10                   //pu1_src[0] + pi1_sao_offset[edge_idx]
146    mov         x20,#255
147    cmp         x9,x20
148    csel        x9, x20, x9, ge             //u1_pos_0_0_tmp = CLIP3(pu1_src[0] + pi1_sao_offset[edge_idx], 0, (1 << bit_depth) - 1)
149    mov         x20,#0
150    cmp         x9,x20
151    csel        x9, x20, x9, LT             //u1_pos_0_0_tmp = CLIP3(pu1_src[0] + pi1_sao_offset[edge_idx], 0, (1 << bit_depth) - 1)
152
153PU1_AVAIL_7_LOOP:
154    LDRB        w14,[x5,#7]                 //pu1_avail[7]
155    CMP         x14,#0
156    SUB         x10,x7,#1                   //wd - 1
157    SUB         x11,x8,#1                   //ht - 1
158    madd        x12, x11, x1, x10           //wd - 1 + (ht - 1) * src_strd
159    ADD         x12,x12,x0                  //pu1_src[wd - 1 + (ht - 1) * src_strd]
160    LDRB        w10,[x12]                   //u1_pos_wd_ht_tmp = pu1_src[wd - 1 + (ht - 1) * src_strd]
161    BEQ         PU1_AVAIL
162
163    SUB         x4,x12,x1                   //pu1_src[(wd - 1 + (ht - 1) * src_strd) - src_strd]
164    SUB         x4,x4,#1
165    LDRB        w11,[x4]                    //Load pu1_src[wd - 1 + (ht - 1) * src_strd - 1 - src_strd]
166    ADD         x4,x4,#1
167    ADD         x14,x12,x1                  //pu1_src[(wd - 1 + (ht - 1) * src_strd) + src_strd]
168
169    SUBS        x11,x10,x11                 //pu1_src[wd - 1 + (ht - 1) * src_strd] - pu1_src[wd - 1 + (ht - 1) * src_strd- 1 - src_strd]
170    LDRB        w4,[x14,#1]                 //Load pu1_src[wd - 1 + (ht - 1) * src_strd + 1 + src_strd]
171
172    movn        x20,#0
173    csel        x11, x20, x11,LT
174    MOV         x20,#1
175    csel        x11, x20, x11,GT            //SIGN(pu1_src[wd - 1 + (ht - 1) * src_strd] - pu1_src[wd - 1 + (ht - 1) * src_strd- 1 - src_strd])
176
177    SUBS        x4,x10,x4                   //pu1_src[wd - 1 + (ht - 1) * src_strd] - pu1_src[wd - 1 + (ht - 1) * src_strd + 1 + src_strd]
178    movn        x20,#0
179    csel        x4, x20, x4,LT
180    MOV         x20,#1
181    csel        x4, x20, x4,GT              //SIGN(pu1_src[wd - 1 + (ht - 1) * src_strd] - pu1_src[wd - 1 + (ht - 1) * src_strd + 1 + src_strd])
182
183    ADD         x11,x11,x4                  //Add 2 sign value
184    ADD         x11,x11,#2                  //edge_idx
185    ADRP        x14, :got:gi1_table_edge_idx //table pointer
186    LDR         x14, [x14, #:got_lo12:gi1_table_edge_idx]
187
188    LDRSB       x12,[x14,x11]               //edge_idx = gi1_table_edge_idx[edge_idx]
189    CMP         x12,#0
190    BEQ         PU1_AVAIL
191    LDRSB       x11,[x6,x12]                //pi1_sao_offset[edge_idx]
192    ADD         x10,x10,x11                 //pu1_src[wd - 1 + (ht - 1) * src_strd] + pi1_sao_offset[edge_idx]
193    mov         x20,#255
194    cmp         x10,x20
195    csel        x10, x20, x10, ge           //u1_pos_wd_ht_tmp = CLIP3(pu1_src[wd - 1 + (ht - 1) * src_strd] + pi1_sao_offset[edge_idx], 0, (1 << bit_depth) - 1)
196    mov         x20,#0
197    cmp         x10,x20
198    csel        x10, x20, x10, LT           //u1_pos_wd_ht_tmp = CLIP3(pu1_src[wd - 1 + (ht - 1) * src_strd] + pi1_sao_offset[edge_idx], 0, (1 << bit_depth) - 1)
199
200PU1_AVAIL:
201    MOV         x12,x8                      //Move ht
202    movi        v0.16b, #2                  //const_2 = vdupq_n_s8(2)
203    LDRB        w11,[x5,#3]                 //pu1_avail[3]
204
205    MOV         x14,x2                      //Move pu1_src_left to pu1_src_left_cpy
206    movi        v2.8h, #0                   //const_min_clip = vdupq_n_s16(0)
207    CMP         x11,#0
208
209    LDRB        w5,[x5,#2]                  //pu1_avail[2]
210    movi        v4.8h, #255                 //const_max_clip = vdupq_n_u16((1 << bit_depth) - 1)
211    SUB         x20,x12,#1                  //ht_tmp--
212    csel        x12, x20, x12,EQ
213
214    CMP         x5,#0
215    LD1         {v7.8b},[x6]                //offset_tbl = vld1_s8(pi1_sao_offset)
216    ADRP        x11, :got:gi1_table_edge_idx //table pointer
217    LDR         x11, [x11, #:got_lo12:gi1_table_edge_idx]
218
219
220    ADD         x20,x0,x1                   //pu1_src += src_strd
221    csel        x0, x20, x0,EQ
222    LD1         {v6.8b},[x11]               //edge_idx_tbl = vld1_s8(gi1_table_edge_idx)
223    SUB         x20,x12,#1                  //ht_tmp--
224    csel        x12, x20, x12,EQ
225
226    MOV         x6,x7                       //move wd to x6 loop_count
227    movi        v1.16b, #0xFF               //au1_mask = vdupq_n_s8(-1)
228    ADD         x20,x14,#1                  //pu1_src_left_cpy += 1
229    csel        x14, x20, x14,EQ
230
231    MOV         x15,x0
232    CMP         x7,#16                      //Compare wd with 16
233
234    BLT         WIDTH_RESIDUE               //If not jump to WIDTH_RESIDUE where loop is unrolled for 8 case
235    CMP         x8,#4                       //Compare ht with 4
236    BLE         WD_16_HT_4_LOOP             //If jump to WD_16_HT_4_LOOP
237
238WIDTH_LOOP_16:
239    MOV         x7,x16                      //Loads wd
240
241    MOV         x5,x23                      //Loads pu1_avail
242    CMP         x6,x7                       //col == wd
243    LDRb        w20, [x5]                   //pu1_avail[0]
244    csel        w8,w20,w8,EQ
245    MOV         x20,#-1
246    csel        x8, x20, x8,NE              //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
247
248    mov         v1.b[0], w8                 //au1_mask = vsetq_lane_s8((-1||pu1_avail[0]), au1_mask, 0)
249    CMP         x6,#16                      //if(col == 16)
250    BNE         SKIP_AU1_MASK_VAL
251    LDRB        w8,[x5,#1]                  //pu1_avail[1]
252    mov         v1.b[15], w8                //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
253
254SKIP_AU1_MASK_VAL:
255    LDRB        w11,[x5,#2]                 //pu1_avail[2]
256    CMP         x11,#0
257
258    SUB         x20,x0,x1                   //pu1_src - src_strd
259    csel        x8, x20, x8,EQ
260    csel        x8, x3, x8,NE               //pu1_src_top_cpy
261    SUB         x8,x8,#1                    //pu1_src_top_cpy - 1 || pu1_src - src_strd - 1
262
263    MOV         x7,x16                      //Loads wd
264    LD1         {v3.16b},[x8]               //pu1_top_row = vld1q_u8(pu1_src - src_strd - 1) || vld1q_u8(pu1_src_top_cpy - 1)
265    ADD         x3,x3,#16
266
267    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
268    LD1         {v5.16b},[x0]               //pu1_cur_row = vld1q_u8(pu1_src)
269    MOV         x4,x17                      //Loads ht
270
271    SUB         x7,x7,x6                    //(wd - col)
272    cmhi        v17.16b,  v5.16b ,  v3.16b  //vcgtq_u8(pu1_cur_row, pu1_top_row)
273    MOV         x8,x19                      //Loads *pu1_src
274
275    ADD         x7,x7,#15                   //15 + (wd - col)
276    cmhi        v16.16b,  v3.16b ,  v5.16b  //vcltq_u8(pu1_cur_row, pu1_top_row)
277    ADD         x7,x8,x7                    //pu1_src[0 * src_strd + 15 + (wd - col)]
278
279    SUB         x5,x5,#1
280    SUB         v17.16b,  v16.16b ,  v17.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
281
282AU1_SRC_LEFT_LOOP:
283    LDRB        w8,[x7]                     //load the value and increment by src_strd
284    ADD         x7,x7,x1
285    STRB        w8,[x5,#1]!                 //store it in the stack pointer
286    SUBS        x4,x4,#1                    //decrement the loop count
287    BNE         AU1_SRC_LEFT_LOOP
288
289    ADD         x8,x0,x1                    //I Iteration *pu1_src + src_strd
290    movi        v18.16b, #0
291    MOV         x4,x23                      //I Loads pu1_avail
292
293    MOV         x7,x12                      //row count, move ht_tmp to x7
294    LD1         {v16.16b},[x8]              //I pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
295    LDRB        w4,[x4,#2]                  //I pu1_avail[2]
296
297    LDRB        w5,[x8,#16]                 //I pu1_src_cpy[src_strd + 16]
298    mov         v18.b[0], w5                //I pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd + 16], pu1_next_row_tmp, 0)
299
300    EXT         v18.16b,  v16.16b ,  v18.16b,#1 //I pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 1)
301    CMP         x4,#0                       //I
302    BNE         SIGN_UP_CHANGE_DONE         //I
303
304SIGN_UP_CHANGE:
305    SUB         x2,x12,x7                   //I ht_tmp - row
306    LDRB        w11,[x0]                    //I pu1_src_cpy[0]
307    ADD         x2,x14,x2                   //I pu1_src_left_cpy[ht_tmp - row]
308    SUB         x2,x2,#1
309    LDRB        w5,[x2]                     //I load the value
310    ADD         x2,x2,#1
311    SUBS        x4,x11,x5                   //I pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]
312    movn        x20,#0
313    csel        x4, x20, x4,LT              //I
314    MOV         x20,#1
315    csel        x4, x20, x4,GT              //I SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row])
316    mov         v17.b[0], w4                //I sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]), sign_up, 0)
317
318SIGN_UP_CHANGE_DONE:
319    cmhi        v3.16b,  v5.16b ,  v18.16b  //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
320    ADD         v24.16b,  v0.16b ,  v17.16b //I edge_idx = vaddq_s8(const_2, sign_up)
321
322    cmhi        v18.16b,  v18.16b ,  v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
323    SUB         v3.16b,  v18.16b ,  v3.16b  //I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
324
325    ADD         v24.16b,  v24.16b ,  v3.16b //I edge_idx = vaddq_s8(edge_idx, sign_down)
326    TBL         v18.16b, {v6.16b},v24.16b   //I vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
327//  TBL v19.8b, {v6.16b},v25.8b                //I vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
328
329    AND         v18.16b,  v18.16b ,  v1.16b //I edge_idx = vandq_s8(edge_idx, au1_mask)
330
331    NEG         v17.16b, v3.16b             //I sign_up = vnegq_s8(sign_down)
332    TBL         v3.16b, {v7.16b},v18.16b    //I offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
333    EXT         v17.16b,  v17.16b ,  v17.16b,#15 //I sign_up = vextq_s8(sign_up, sign_up, 15)
334
335    Uxtl        v20.8h, v5.8b               //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
336//  TBL v11.8b, {v7.16b},v19.8b                    //I offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
337    SADDW       v20.8h,  v20.8h ,  v3.8b    //I pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
338
339    SMAX        v20.8h,  v20.8h ,  v2.8h    //I pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
340    Uxtl2       v22.8h, v5.16b              //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
341
342    UMIN        v20.8h,  v20.8h ,  v4.8h    //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
343    mov         v5.16b, v16.16b             //I pu1_cur_row = pu1_next_row
344
345    SADDW2      v22.8h,  v22.8h ,  v3.16b   //I pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
346    xtn         v20.8b,  v20.8h             //I vmovn_s16(pi2_tmp_cur_row.val[0])
347
348    SMAX        v22.8h,  v22.8h ,  v2.8h    //I pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
349    SUB         x7,x7,#1                    //I Decrement the ht_tmp loop count by 1
350
351    UMIN        v22.8h,  v22.8h ,  v4.8h    //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
352
353    xtn2        v20.16b,  v22.8h            //I vmovn_s16(pi2_tmp_cur_row.val[1])
354
355PU1_SRC_LOOP:
356
357    ST1         { v20.16b},[x0],x1          //I vst1q_u8(pu1_src_cpy, pu1_cur_row)
358    ADD         x8,x0,x1                    //II iteration *pu1_src + src_strd
359
360    LD1         {v16.16b},[x8]              //II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
361    ADD         x11,x8,x1                   //III iteration *pu1_src + src_strd
362
363    LDRB        w5,[x8,#16]                 //II pu1_src_cpy[src_strd + 16]
364    LD1         {v30.16b},[x11]             //III pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
365    LDRB        w4,[x0]                     //II pu1_src_cpy[0]
366
367    LDRB        w8,[x11,#16]                //III pu1_src_cpy[src_strd + 16]
368    mov         v28.b[0], w5                //II pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd + 16], pu1_next_row_tmp, 0)
369
370    SUB         x5,x12,x7                   //II ht_tmp - row
371    EXT         v22.16b,  v16.16b ,  v28.16b,#1 //II pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 1)
372    ADD         x5,x14,x5                   //II pu1_src_left_cpy[ht_tmp - row]
373
374    SUB         x5,x5,#1
375    LDRB        w5,[x5]                     //II load the value
376    mov         v18.b[0], w8                //III pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd + 16], pu1_next_row_tmp, 0)
377    SUB         x7,x7,#1                    //II Decrement the ht_tmp loop count by 1
378
379    SUBS        x4,x4,x5                    //II pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]
380    EXT         v18.16b,  v30.16b ,  v18.16b,#1 //III pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 1)
381    LDRB        w2,[x0,x1]                  //III pu1_src_cpy[0]
382
383    cmhi        v24.16b,  v5.16b ,  v22.16b //II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
384    SUB         x5,x12,x7                   //III ht_tmp - row
385
386    movn        x20,#0
387    csel        x4, x20, x4,LT              //II
388    cmhi        v22.16b,  v22.16b ,  v5.16b //II vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
389    ADD         x5,x14,x5                   //III pu1_src_left_cpy[ht_tmp - row]
390
391    MOV         x20,#1
392    csel        x4, x20, x4,GT              //II SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row])
393    SUB         v24.16b,  v22.16b ,  v24.16b //II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
394    SUB         x5,x5,#1
395    LDRB        w5,[x5]                     //III load the value
396
397    SUBS        x2,x2,x5                    //III pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]
398    mov         v17.b[0], w4                //II sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]), sign_up, 0)
399
400    movn        x20,#0
401    csel        x2, x20, x2,LT              //III
402    cmhi        v3.16b,  v16.16b ,  v18.16b //III vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
403    MOV         x20,#1
404    csel        x2, x20, x2,GT              //III SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row])
405
406    ADD         v22.16b,  v0.16b ,  v17.16b //II edge_idx = vaddq_s8(const_2, sign_up)
407    ADD         v22.16b,  v22.16b ,  v24.16b //II edge_idx = vaddq_s8(edge_idx, sign_down)
408
409    cmhi        v18.16b,  v18.16b ,  v16.16b //III vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
410    TBL         v22.16b, {v6.16b},v22.16b   //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
411    NEG         v17.16b, v24.16b            //II sign_up = vnegq_s8(sign_down)
412
413    SUB         v3.16b,  v18.16b ,  v3.16b  //III sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
414//  TBL v23.8b, {v6.16b},v23.8b                //II vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
415    EXT         v17.16b,  v17.16b ,  v17.16b,#15 //II sign_up = vextq_s8(sign_up, sign_up, 15)
416
417    AND         v22.16b,  v22.16b ,  v1.16b //II edge_idx = vandq_s8(edge_idx, au1_mask)
418    mov         v17.b[0], w2                //III sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]), sign_up, 0)
419
420    ADD         v18.16b,  v0.16b ,  v17.16b //III edge_idx = vaddq_s8(const_2, sign_up)
421    TBL         v24.16b, {v7.16b},v22.16b   //II offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
422    ADD         v18.16b,  v18.16b ,  v3.16b //III edge_idx = vaddq_s8(edge_idx, sign_down)
423
424    Uxtl        v26.8h, v5.8b               //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
425    TBL         v18.16b, {v6.16b},v18.16b   //III vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
426    NEG         v17.16b, v3.16b             //III sign_up = vnegq_s8(sign_down)
427
428    SADDW       v26.8h,  v26.8h ,  v24.8b   //II pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
429//  TBL v19.8b, {v6.16b},v19.8b                //III vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
430    EXT         v17.16b,  v17.16b ,  v17.16b,#15 //III sign_up = vextq_s8(sign_up, sign_up, 15)
431
432    AND         v18.16b,  v18.16b ,  v1.16b //III edge_idx = vandq_s8(edge_idx, au1_mask)
433    Uxtl        v20.8h, v16.8b              //III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
434
435    SMAX        v26.8h,  v26.8h ,  v2.8h    //II pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
436    TBL         v3.16b, {v7.16b},v18.16b    //III offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
437    SADDW       v20.8h,  v20.8h ,  v3.8b    //III pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
438
439    UMIN        v26.8h,  v26.8h ,  v4.8h    //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
440//  TBL v25.8b, {v7.16b},v23.8b                    //II offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
441    SMAX        v20.8h,  v20.8h ,  v2.8h    //III pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
442
443    Uxtl2       v28.8h, v5.16b              //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
444    UMIN        v20.8h,  v20.8h ,  v4.8h    //III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
445
446    SADDW2      v28.8h,  v28.8h ,  v24.16b  //II pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
447//  TBL v11.8b, {v7.16b},v19.8b                    //III offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
448    SMAX        v28.8h,  v28.8h ,  v2.8h    //II pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
449
450    UMIN        v28.8h,  v28.8h ,  v4.8h    //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
451    Uxtl2       v18.8h, v16.16b             //III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
452
453    mov         v5.16b, v30.16b             //III pu1_cur_row = pu1_next_row
454    xtn         v26.8b,  v26.8h             //II vmovn_s16(pi2_tmp_cur_row.val[0])
455
456    xtn2        v26.16b,  v28.8h            //II vmovn_s16(pi2_tmp_cur_row.val[1])
457    SADDW2      v18.8h,  v18.8h ,  v3.16b   //III pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
458
459    SMAX        v18.8h,  v18.8h ,  v2.8h    //III pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
460    xtn         v20.8b,  v20.8h             //III vmovn_s16(pi2_tmp_cur_row.val[0])
461
462    SUB         x7,x7,#1                    //III Decrement the ht_tmp loop count by 1
463    UMIN        v18.8h,  v18.8h ,  v4.8h    //III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
464    CMP         x7,#1                       //III
465
466    ST1         { v26.16b},[x0],x1          //II vst1q_u8(pu1_src_cpy, pu1_cur_row)
467    xtn2        v20.16b,  v18.8h            //III vmovn_s16(pi2_tmp_cur_row.val[1])
468
469    BGT         PU1_SRC_LOOP                //III If not equal jump to PU1_SRC_LOOP
470    BLT         INNER_LOOP_DONE
471
472    ST1         { v20.16b},[x0],x1          //III vst1q_u8(pu1_src_cpy, pu1_cur_row)
473    ADD         x8,x0,x1                    //*pu1_src + src_strd
474
475    LDRB        w2,[x0]                     //pu1_src_cpy[0]
476    LD1         {v16.16b},[x8]              //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
477    LDRB        w5,[x8,#16]                 //pu1_src_cpy[src_strd + 16]
478
479    SUB         x11,x12,x7                  //ht_tmp - row
480    mov         v18.b[0], w5                //pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd + 16], pu1_next_row_tmp, 0)
481    ADD         x11,x14,x11                 //pu1_src_left_cpy[ht_tmp - row]
482
483    SUB         x11,x11,#1
484    LDRB        w5,[x11]                    //load the value
485    ADD         x11,x11,#1
486    EXT         v18.16b,  v16.16b ,  v18.16b,#1 //pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 1)
487    SUBS        x4,x2,x5                    //pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]
488
489    cmhi        v3.16b,  v5.16b ,  v18.16b  //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
490    movn        x20,#0
491    csel        x4, x20, x4,LT
492
493    MOV         x20,#1
494    csel        x4, x20, x4,GT              //SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row])
495    cmhi        v18.16b,  v18.16b ,  v5.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
496
497    mov         v17.b[0], w4                //sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]), sign_up, 0)
498    SUB         v3.16b,  v18.16b ,  v3.16b  //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
499
500    ADD         v18.16b,  v0.16b ,  v17.16b //edge_idx = vaddq_s8(const_2, sign_up)
501    ADD         v18.16b,  v18.16b ,  v3.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
502
503    TBL         v18.16b, {v6.16b},v18.16b   //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
504    NEG         v17.16b, v3.16b             //sign_up = vnegq_s8(sign_down)
505
506//  TBL v19.8b, {v6.16b},v19.8b                //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
507    EXT         v17.16b,  v17.16b ,  v17.16b,#15 //sign_up = vextq_s8(sign_up, sign_up, 15)
508
509    AND         v18.16b,  v18.16b ,  v1.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
510
511    TBL         v3.16b, {v7.16b},v18.16b    //offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
512
513    Uxtl        v20.8h, v5.8b               //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
514//  TBL v11.8b, {v7.16b},v19.8b                    //offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
515    SADDW       v20.8h,  v20.8h ,  v3.8b    //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
516
517    SMAX        v20.8h,  v20.8h ,  v2.8h    //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
518    Uxtl2       v5.8h, v5.16b               //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
519
520    UMIN        v20.8h,  v20.8h ,  v4.8h    //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
521    SADDW2      v5.8h,  v5.8h ,  v3.16b     //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
522
523    SMAX        v5.8h,  v5.8h ,  v2.8h      //pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
524    xtn         v20.8b,  v20.8h             //vmovn_s16(pi2_tmp_cur_row.val[0])
525
526    UMIN        v5.8h,  v5.8h ,  v4.8h      //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
527    xtn2        v20.16b,  v5.8h             //vmovn_s16(pi2_tmp_cur_row.val[1])
528
529
530INNER_LOOP_DONE:
531    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
532    ST1         { v20.16b},[x0],x1          //vst1q_u8(pu1_src_cpy, pu1_cur_row)
533    MOV         x2,x21                      //Loads *pu1_src_left
534
535    MOV         x8,x17                      //Loads ht
536    SUB         x5,x5,#1
537
538    SUB         x2,x2,#1
539SRC_LEFT_LOOP:
540    LDRB        w7,[x5,#1]!                 //au1_src_left_tmp[row]
541    SUBS        x8,x8,#1
542    STRB        w7,[x2,#1]!                 //pu1_src_left[row] = au1_src_left_tmp[row]
543    BNE         SRC_LEFT_LOOP
544
545    SUB         x6,x6,#16                   //Decrement the wd loop count by 16
546    CMP         x6,#8                       //Check whether residue remains
547    BLT         RE_ASSINING_LOOP            //Jump to re-assigning loop
548    MOV         x7,x16                      //Loads wd
549    MOV         x0,x15                      //Loads *pu1_src
550    SUB         x7,x7,x6
551    ADD         x0,x0,x7
552    BGT         WIDTH_LOOP_16               //If not equal jump to width_loop
553    BEQ         WIDTH_RESIDUE               //If residue remains jump to residue loop
554
555
556WD_16_HT_4_LOOP:
557    MOV         x7,x16                      //Loads wd
558    MOV         x5,x23                      //Loads pu1_avail
559    CMP         x6,x7                       //col == wd
560    LDRb        w20, [x5]                   //pu1_avail[0]
561    csel        w8,w20,w8,EQ
562    MOV         x20,#-1
563    csel        x8, x20, x8,NE              //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
564
565    mov         v1.b[0], w8                 //au1_mask = vsetq_lane_s8((-1||pu1_avail[0]), au1_mask, 0)
566    CMP         x6,#16                      //if(col == 16)
567    BNE         SKIP_AU1_MASK_VAL_WD_16_HT_4
568    LDRB        w8,[x5,#1]                  //pu1_avail[1]
569    mov         v1.b[15], w8                //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
570
571SKIP_AU1_MASK_VAL_WD_16_HT_4:
572    LDRB        w8,[x5,#2]                  //pu1_avail[2]
573    CMP         x8,#0
574
575    SUB         x20,x0,x1                   //pu1_src - src_strd
576    csel        x8, x20, x8,EQ
577    csel        x8, x3, x8,NE
578    SUB         x8,x8,#1                    //pu1_src_top_cpy - 1 || pu1_src - src_strd - 1
579
580    MOV         x7,x16                      //Loads wd
581    LD1         {v3.16b},[x8]               //pu1_top_row = vld1q_u8(pu1_src - src_strd - 1) || vld1q_u8(pu1_src_top_cpy - 1)
582    ADD         x3,x3,#16
583
584    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
585    LD1         {v5.16b},[x0]               //pu1_cur_row = vld1q_u8(pu1_src)
586    MOV         x4,x17                      //Loads ht
587
588    SUB         x7,x7,x6                    //(wd - col)
589    cmhi        v17.16b,  v5.16b ,  v3.16b  //vcgtq_u8(pu1_cur_row, pu1_top_row)
590    MOV         x8,x19                      //Loads *pu1_src
591
592    ADD         x7,x7,#15                   //15 + (wd - col)
593    cmhi        v16.16b,  v3.16b ,  v5.16b  //vcltq_u8(pu1_cur_row, pu1_top_row)
594    ADD         x7,x8,x7                    //pu1_src[0 * src_strd + 15 + (wd - col)]
595
596    SUB         x5,x5,#1
597    SUB         v17.16b,  v16.16b ,  v17.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
598
599AU1_SRC_LEFT_LOOP_WD_16_HT_4:
600    LDRB        w8,[x7]                     //load the value and increment by src_strd
601    ADD         x7,x7,x1
602    SUBS        x4,x4,#1                    //decrement the loop count
603    STRB        w8,[x5,#1]!                 //store it in the stack pointer
604    BNE         AU1_SRC_LEFT_LOOP_WD_16_HT_4
605
606    movi        v18.16b, #0
607    MOV         x7,x12                      //row count, move ht_tmp to x7
608
609PU1_SRC_LOOP_WD_16_HT_4:
610    ADD         x8,x0,x1                    //*pu1_src + src_strd
611    LD1         {v16.16b},[x8]              //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
612
613    LDRB        w5,[x8,#16]                 //pu1_src_cpy[src_strd + 16]
614    mov         v18.b[0], w5                //pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd + 16], pu1_next_row_tmp, 0)
615    EXT         v18.16b,  v16.16b ,  v18.16b,#1 //pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 1)
616
617    CMP         x7,x12
618    BLT         SIGN_UP_CHANGE_WD_16_HT_4
619    MOV         x5,x23                      //Loads pu1_avail
620    LDRB        w5,[x5,#2]                  //pu1_avail[2]
621    CMP         x5,#0
622    BNE         SIGN_UP_CHANGE_DONE_WD_16_HT_4
623
624SIGN_UP_CHANGE_WD_16_HT_4:
625    LDRB        w8,[x0]                     //pu1_src_cpy[0]
626    SUB         x5,x12,x7                   //ht_tmp - row
627    ADD         x5,x14,x5                   //pu1_src_left_cpy[ht_tmp - row]
628    SUB         x5,x5,#1
629    LDRB        w5,[x5]                     //load the value
630    SUBS        x8,x8,x5                    //pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]
631    movn        x20,#0
632    csel        x8, x20, x8,LT
633    MOV         x20,#1
634    csel        x8, x20, x8,GT              //SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row])
635    mov         v17.b[0], w8                //sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]), sign_up, 0)
636
637SIGN_UP_CHANGE_DONE_WD_16_HT_4:
638    cmhi        v20.16b,  v5.16b ,  v18.16b //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
639    cmhi        v22.16b,  v18.16b ,  v5.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
640    SUB         v24.16b,  v22.16b ,  v20.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
641
642    ADD         v26.16b,  v0.16b ,  v17.16b //edge_idx = vaddq_s8(const_2, sign_up)
643    ADD         v26.16b,  v26.16b ,  v24.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
644    TBL         v26.16b, {v6.16b},v26.16b   //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
645//  TBL v27.8b, {v6.16b},v27.8b                //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
646
647    AND         v26.16b,  v26.16b ,  v1.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
648
649    NEG         v17.16b, v24.16b            //sign_up = vnegq_s8(sign_down)
650    EXT         v17.16b,  v17.16b ,  v17.16b,#15 //sign_up = vextq_s8(sign_up, sign_up, 15)
651
652    TBL         v24.16b, {v7.16b},v26.16b   //offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
653    Uxtl        v28.8h, v5.8b               //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
654    SADDW       v28.8h,  v28.8h ,  v24.8b   //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
655    SMAX        v28.8h,  v28.8h ,  v2.8h    //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
656    UMIN        v28.8h,  v28.8h ,  v4.8h    //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
657
658//  TBL v25.8b, {v7.16b},v27.8b                    //offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
659    Uxtl2       v30.8h, v5.16b              //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
660    SADDW2      v30.8h,  v30.8h ,  v24.16b  //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
661    SMAX        v30.8h,  v30.8h ,  v2.8h    //pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
662    UMIN        v30.8h,  v30.8h ,  v4.8h    //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
663
664    xtn         v28.8b,  v28.8h             //vmovn_s16(pi2_tmp_cur_row.val[0])
665    xtn2        v28.16b,  v30.8h            //vmovn_s16(pi2_tmp_cur_row.val[1])
666
667    ST1         { v28.16b},[x0],x1          //vst1q_u8(pu1_src_cpy, pu1_cur_row)
668
669    mov         v5.16b, v16.16b             //pu1_cur_row = pu1_next_row
670    SUBS        x7,x7,#1                    //Decrement the ht_tmp loop count by 1
671    BNE         PU1_SRC_LOOP_WD_16_HT_4     //If not equal jump to PU1_SRC_LOOP_WD_16_HT_4
672
673    MOV         x8,x17                      //Loads ht
674    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
675    MOV         x2,x21                      //Loads *pu1_src_left
676    SUB         x5,x5,#1
677    SUB         x2,x2,#1
678
679SRC_LEFT_LOOP_WD_16_HT_4:
680    LDRB        w7,[x5,#1]!                 //au1_src_left_tmp[row]
681    STRB        w7,[x2,#1]!                 //pu1_src_left[row] = au1_src_left_tmp[row]
682    SUBS        x8,x8,#1
683    BNE         SRC_LEFT_LOOP_WD_16_HT_4
684
685    SUBS        x6,x6,#16                   //Decrement the wd loop count by 16
686    BLE         RE_ASSINING_LOOP            //Jump to re-assigning loop
687
688
689WIDTH_RESIDUE:
690    MOV         x7,x16                      //Loads wd
691    MOV         x5,x23                      //Loads pu1_avail
692    CMP         x6,x7                       //wd_residue == wd
693    LDRb        w20, [x5]                   //pu1_avail[0]
694    csel        w8,w20,w8,EQ
695
696    MOV         x20,#-1
697    csel        x8, x20, x8,NE
698    mov         v1.b[0], w8                 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
699
700    LDRB        w8,[x5,#1]                  //pu1_avail[1]
701    mov         v1.b[7], w8                 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
702
703PU1_AVAIL_2_RESIDUE:
704    LDRB        w11,[x5,#2]                 //pu1_avail[2]
705    LD1         {v5.16b},[x0]               //pu1_cur_row = vld1q_u8(pu1_src)
706    CMP         x11,#0
707
708    SUB         x20,x0,x1                   //pu1_src - src_strd
709    csel        x8, x20, x8,EQ
710    csel        x8, x3, x8,NE
711
712    SUB         x8,x8,#1
713
714    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
715    LD1         {v3.16b},[x8],#16           //pu1_top_row = vld1q_u8(pu1_src_top_cpy - 1)
716    MOV         x7,x16                      //Loads wd
717
718    MOV         x4,x17                      //Loads ht
719    cmhi        v17.16b,  v5.16b ,  v3.16b  //vcgtq_u8(pu1_cur_row, pu1_top_row)
720    SUB         x7,x7,#1                    //(wd - 1)
721
722    MOV         x8,x19                      //Loads *pu1_src
723    cmhi        v16.16b,  v3.16b ,  v5.16b  //vcltq_u8(pu1_cur_row, pu1_top_row)
724    SUB         x5,x5,#1
725
726    ADD         x7,x8,x7                    //pu1_src[0 * src_strd + (wd - 1)]
727    SUB         v17.16b,  v16.16b ,  v17.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
728
729
730AU1_SRC_LEFT_LOOP_RESIDUE:
731    LDRB        w8,[x7]                     //load the value and increment by src_strd
732    ADD         x7,x7,x1
733    SUBS        x4,x4,#1                    //decrement the loop count
734    STRB        w8,[x5,#1]!                 //store it in the stack pointer
735    BNE         AU1_SRC_LEFT_LOOP_RESIDUE
736
737
738    MOV         x7,x12                      //row count, move ht_tmp to x7
739
740PU1_SRC_LOOP_RESIDUE:
741    movi        v18.16b, #0
742    ADD         x8,x0,x1                    //*pu1_src + src_strd
743    LD1         {v16.16b},[x8]              //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
744
745    LDRB        w8,[x8,#16]                 //pu1_src_cpy[src_strd + 16]
746    mov         v18.b[0], w8                //pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd + 16], pu1_next_row_tmp, 0)
747    EXT         v18.16b,  v16.16b ,  v18.16b,#1 //pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 1)
748
749    CMP         x7,x12
750    BLT         SIGN_UP_CHANGE_RESIDUE
751    MOV         x5,x23                      //Loads pu1_avail
752    LDRB        w5,[x5,#2]                  //pu1_avail[2]
753    CMP         x5,#0
754    BNE         SIGN_UP_CHANGE_DONE_RESIDUE
755
756SIGN_UP_CHANGE_RESIDUE:
757    LDRB        w8,[x0]                     //pu1_src_cpy[0]
758    SUB         x5,x12,x7                   //ht_tmp - row
759
760    ADD         x5,x14,x5
761    SUB         x5,x5,#1
762    LDRB        w5,[x5]                     //load the value
763    SUBS        x8,x8,x5                    //pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]
764    movn        x20,#0
765    csel        x8, x20, x8,LT
766    MOV         x20,#1
767    csel        x8, x20, x8,GT              //SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row])
768    mov         v17.b[0], w8                //sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[ht_tmp - 1 - row]), sign_up, 0)
769
770SIGN_UP_CHANGE_DONE_RESIDUE:
771    cmhi        v20.16b,  v5.16b ,  v18.16b //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
772    cmhi        v22.16b,  v18.16b ,  v5.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
773    SUB         v24.16b,  v22.16b ,  v20.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
774
775    ADD         v26.16b,  v0.16b ,  v17.16b //edge_idx = vaddq_s8(const_2, sign_up)
776    ADD         v26.16b,  v26.16b ,  v24.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
777    TBL         v26.16b, {v6.16b},v26.16b   //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
778//  TBL v27.8b, {v6.16b},v27.8b                //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
779
780    AND         v26.16b,  v26.16b ,  v1.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
781
782    NEG         v17.16b, v24.16b            //sign_up = vnegq_s8(sign_down)
783    EXT         v17.16b,  v17.16b ,  v17.16b,#15 //sign_up = vextq_s8(sign_up, sign_up, 15)
784
785    TBL         v24.8b, {v7.16b},v26.8b     //offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
786    Uxtl        v28.8h, v5.8b               //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
787    SADDW       v28.8h,  v28.8h ,  v24.8b   //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
788    SMAX        v28.8h,  v28.8h ,  v2.8h    //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
789    UMIN        v28.8h,  v28.8h ,  v4.8h    //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
790
791    xtn         v30.8b,  v28.8h             //vmovn_s16(pi2_tmp_cur_row.val[0])
792
793    ST1         {v30.8b},[x0],x1            //vst1q_u8(pu1_src_cpy, pu1_cur_row)
794    mov         v5.16b, v16.16b             //pu1_cur_row = pu1_next_row
795    SUBS        x7,x7,#1
796    BNE         PU1_SRC_LOOP_RESIDUE
797
798    MOV         x8,x17                      //Loads ht
799    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
800
801    MOV         x2,x21                      //Loads *pu1_src_left
802    SUB         x5,x5,#1
803
804    SUB         x2,x2,#1
805
806SRC_LEFT_LOOP_RESIDUE:
807    LDRB        w7,[x5,#1]!                 //au1_src_left_tmp[row]
808    SUBS        x8,x8,#1
809    STRB        w7,[x2,#1]!                 //pu1_src_left[row] = au1_src_left_tmp[row]
810    BNE         SRC_LEFT_LOOP_RESIDUE
811
812
813RE_ASSINING_LOOP:
814    MOV         x8,x17                      //Loads ht
815    MOV         x7,x16                      //Loads wd
816
817    MOV         x0,x19                      //Loads *pu1_src
818    SUB         x8,x8,#1                    //ht - 1
819
820    madd        x6, x8, x1, x7              //wd - 1 + (ht - 1) * src_strd
821    STRB        w9,[x0]                     //pu1_src_org[0] = u1_pos_0_0_tmp
822
823    MOV         x4,x24                      //Loads pu1_src_top_left
824    ADD         x6,x0,x6                    //pu1_src[wd - 1 + (ht - 1) * src_strd]
825
826    ADD         x12,sp,#0x02
827    SUB         x6,x6,#1
828    STRB        w10,[x6]                    //pu1_src_org[wd - 1 + (ht - 1) * src_strd] = u1_pos_wd_ht_tmp
829    ADD         x6,x6,#1
830
831    LDRB        w11,[sp]                    //load u1_src_top_left_tmp from stack pointer
832    MOV         x3,x22                      //Loads pu1_src_top
833
834    STRB        w11,[x4]                    //*pu1_src_top_left = u1_src_top_left_tmp
835
836SRC_TOP_LOOP:
837    LD1         {v0.8b},[x12],#8            //pu1_src_top[col] = au1_src_top_tmp[col]
838    SUBS        x7,x7,#8                    //Decrement the width
839    ST1         {v0.8b},[x3],#8             //pu1_src_top[col] = au1_src_top_tmp[col]
840    BNE         SRC_TOP_LOOP
841
842END_LOOPS:
843    ADD         sp,sp,#0xA0
844    // LDMFD sp!,{x4-x12,x15}             //Reload the registers from SP
845    ldp         x23, x24,[sp],#16
846    ldp         x21, x22,[sp],#16
847    ldp         x19, x20,[sp],#16
848
849    ret
850
851
852
853