1///*****************************************************************************
2//*
3//* Copyright (C) 2012 Ittiam Systems Pvt Ltd, Bangalore
4//*
5//* Licensed under the Apache License, Version 2.0 (the "License");
6//* you may not use this file except in compliance with the License.
7//* You may obtain a copy of the License at:
8//*
9//* http://www.apache.org/licenses/LICENSE-2.0
10//*
11//* Unless required by applicable law or agreed to in writing, software
12//* distributed under the License is distributed on an "AS IS" BASIS,
13//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14//* See the License for the specific language governing permissions and
15//* limitations under the License.
16//*
17//*****************************************************************************/
18///**
19//*******************************************************************************
20//* ,:file
21//*  ihevc_sao_edge_offset_class3.s
22//*
23//* ,:brief
24//*  Contains function definitions for inter prediction  interpolation.
25//* Functions are coded using NEON  intrinsics and can be compiled using@ ARM
26//* RVCT
27//*
28//* ,:author
29//*  Parthiban V
30//*
31//* ,:par List of Functions:
32//*
33//*
34//* ,:remarks
35//*  None
36//*
37//*******************************************************************************
38//*/
39//void ihevc_sao_edge_offset_class3(UWORD8 *pu1_src,
40//                              WORD32 src_strd,
41//                              UWORD8 *pu1_src_left,
42//                              UWORD8 *pu1_src_top,
43//                              UWORD8 *pu1_src_top_left,
44//                              UWORD8 *pu1_src_top_right,
45//                              UWORD8 *pu1_src_bot_left,
46//                              UWORD8 *pu1_avail,
47//                              WORD8 *pi1_sao_offset,
48//                              WORD32 wd,
49//                              WORD32 ht)
50//**************Variables Vs Registers*****************************************
51//x0 =>    *pu1_src
52//x1 =>    src_strd
53//x2 =>    *pu1_src_left
54//x3 =>    *pu1_src_top
55//x4    =>    *pu1_src_top_left
56//x5    =>    *pu1_avail
57//x6    =>    *pi1_sao_offset
58//x7    =>    wd
59//x8=>    ht
60
61.text
62.p2align 2
63
64.include "ihevc_neon_macros.s"
65
66.globl gi1_table_edge_idx
67.globl ihevc_sao_edge_offset_class3_av8
68
69ihevc_sao_edge_offset_class3_av8:
70
71
72    // STMFD sp!,{x4-x12,x14}            //stack stores the values of the arguments
73    stp         x19, x20,[sp,#-16]!
74    stp         x21, x22,[sp,#-16]!
75    stp         x23, x24,[sp,#-16]!
76
77    MOV         x19,x0                      //Store pu1_src in sp
78    MOV         x21,x6                      //Store pu1_src_left in sp
79    MOV         x22,x3                      //Store pu1_src_top in sp
80    MOV         x23,x7                      //Store pu1_avail in sp
81    MOV         x24,x4                      //Store pu1_src_top_left in sp
82    MOV         x20,x5                      //Store pu1_src_top_right in sp
83    MOV         x13,x6                      //Store pu1_src_bot_left in sp
84
85    MOV         x5,x7                       //Loads pu1_avail
86
87    LDR         x6,[sp,#48]                 //Loads pi1_sao_offset
88    LDR         w7,[sp,#56]                 //Loads wd
89    LDR         w8,[sp,#64]                 //Loads ht
90
91    MOV         x16,x7 // wd
92    MOV         x17,x8 // ht
93
94    SUB         x9,x7,#1                    //wd - 1
95
96    LDRB        w10,[x3,x9]                 //pu1_src_top[wd - 1]
97
98    MOV         x9,x7                       //Move width to x9 for loop count
99
100    SUB         sp,sp,#0xA0                 //Decrement the stack pointer to store some temp arr values
101
102    STRB        w10,[sp]                    //u1_src_top_left_tmp = pu1_src_top[wd - 1]
103    SUB         x10,x8,#1                   //ht-1
104    madd        x11, x10, x1, x0            //pu1_src[(ht - 1) * src_strd + col]
105    ADD         x12,sp,#0x02                //temp array
106
107AU1_SRC_TOP_LOOP:
108    LD1         {v0.8b},[x11],#8            //pu1_src[(ht - 1) * src_strd + col]
109    SUBS        x9,x9,#8                    //Decrement the loop count by 8
110    ST1         {v0.8b},[x12],#8            //au1_src_top_tmp[col] = pu1_src[(ht - 1) * src_strd + col]
111    BNE         AU1_SRC_TOP_LOOP
112
113PU1_AVAIL_5_LOOP:
114    LDRB        w9,[x5,#5]                  //pu1_avail[5]
115    CMP         x9,#0
116    SUB         x10,x7,#1                   //[wd - 1]
117    LDRB        w9,[x0,x10]                 //u1_pos_0_0_tmp = pu1_src[wd - 1]
118    BEQ         PU1_AVAIL_6_LOOP
119
120    MOV         x11,x20                     //Load pu1_src_top_right from sp
121    SUB         x10,x10,#1                  //[wd - 1 - 1]
122
123    LDRB        w11,[x11]                   //pu1_src_top_right[0]
124    SUB         x12,x9,x11                  //pu1_src[wd - 1] - pu1_src_top_right[0]
125
126    ADD         x11,x0,x1                   //pu1_src + src_strd
127
128    LDRB        w14,[x11,x10]               //pu1_src[wd - 1 - 1 + src_strd]
129    CMP         x12,#0
130    movn        x20,#0
131    csel        x12, x20, x12,LT
132    SUB         x11,x9,x14                  //pu1_src[wd - 1] - pu1_src[wd - 1 - 1 + src_strd]
133
134    MOV         x20,#1
135    csel        x12, x20, x12,GT            //SIGN(pu1_src[wd - 1] - pu1_src_top_right[0])
136    CMP         x11,#0
137    movn        x20,#0
138    csel        x11, x20, x11,LT
139    MOV         x20,#1
140    csel        x11, x20, x11,GT            //SIGN(pu1_src[wd - 1] - pu1_src[wd - 1 - 1 + src_strd])
141    ADRP        x14, :got:gi1_table_edge_idx //table pointer
142    LDR         x14, [x14, #:got_lo12:gi1_table_edge_idx]
143    ADD         x11,x12,x11                 //SIGN(pu1_src[wd - 1] - pu1_src_top_right[0]) +  SIGN(pu1_src[wd - 1] - pu1_src[wd - 1 - 1 + src_strd])
144    ADD         x11,x11,#2                  //edge_idx
145
146    LDRSB       x12,[x14,x11]               //edge_idx = gi1_table_edge_idx[edge_idx]
147    CMP         x12,#0                      //0 != edge_idx
148    BEQ         PU1_AVAIL_6_LOOP
149    LDRSB       x10,[x6,x12]                //pi1_sao_offset[edge_idx]
150    ADD         x9,x9,x10                   //pu1_src[0] + pi1_sao_offset[edge_idx]
151    mov         x20,#255
152    cmp         x9,x20
153    csel        x9, x20, x9, ge             //u1_pos_0_0_tmp = CLIP3(pu1_src[0] + pi1_sao_offset[edge_idx], 0, (1 << bit_depth) - 1)
154    mov         x20,#0
155    cmp         x9,x20
156    csel        x9, x20, x9, LT             //u1_pos_0_0_tmp = CLIP3(pu1_src[0] + pi1_sao_offset[edge_idx], 0, (1 << bit_depth) - 1)
157
158PU1_AVAIL_6_LOOP:
159    LDRB        w10,[x5,#6]                 //pu1_avail[6]
160    SUB         x11,x8,#1                   //ht - 1
161
162    CMP         x10,#0
163    madd        x12, x11, x1, x0            //pu1_src[(ht - 1) * src_strd]
164
165    LDRB        w10,[x12]                   //u1_pos_wd_ht_tmp = pu1_src[(ht - 1) * src_strd]
166    BEQ         PU1_AVAIL_3_LOOP
167
168    MOV         x14,x13                     //Load pu1_src_bot_left from sp
169    SUB         x11,x12,x1                  //pu1_src[(ht - 1) * src_strd) - src_strd]
170
171    LDRB        w14,[x14]                   //Load pu1_src_bot_left[0]
172    ADD         x11,x11,#1                  //pu1_src[(ht - 1) * src_strd + 1 - src_strd]
173
174    LDRB        w11,[x11]                   //Load pu1_src[(ht - 1) * src_strd + 1 - src_strd]
175    SUB         x14,x10,x14                 //pu1_src[(ht - 1) * src_strd] - pu1_src_bot_left[0]
176
177    SUB         x11,x10,x11                 //pu1_src[(ht - 1) * src_strd] - pu1_src[(ht - 1) * src_strd + 1 - src_strd]
178    CMP         x11,#0
179    movn        x20,#0
180    csel        x11, x20, x11,LT
181    MOV         x20,#1
182    csel        x11, x20, x11,GT            //SIGN(pu1_src[(ht - 1) * src_strd] - pu1_src[(ht - 1) * src_strd + 1 - src_strd])
183
184    CMP         x14,#0
185    movn        x20,#0
186    csel        x14, x20, x14,LT
187    MOV         x20,#1
188    csel        x14, x20, x14,GT            //SIGN(pu1_src[(ht - 1) * src_strd] - pu1_src_bot_left[0])
189
190    ADD         x11,x11,x14                 //Add 2 sign value
191
192    ADRP        x14, :got:gi1_table_edge_idx //table pointer
193    LDR         x14, [x14, #:got_lo12:gi1_table_edge_idx]
194    ADD         x11,x11,#2                  //edge_idx
195
196    LDRSB       x12,[x14,x11]               //edge_idx = gi1_table_edge_idx[edge_idx]
197    CMP         x12,#0
198    BEQ         PU1_AVAIL_3_LOOP
199    LDRSB       x11,[x6,x12]                //pi1_sao_offset[edge_idx]
200    ADD         x10,x10,x11                 //pu1_src[(ht - 1) * src_strd] + pi1_sao_offset[edge_idx]
201    mov         x20,#255
202    cmp         x10,x20
203    csel        x10, x20, x10, ge           //u1_pos_wd_ht_tmp = CLIP3(pu1_src[(ht - 1) * src_strd] + pi1_sao_offset[edge_idx], 0, (1 << bit_depth) - 1)
204    mov         x20,#0
205    cmp         x10,x20
206    csel        x10, x20, x10, LT           //u1_pos_wd_ht_tmp = CLIP3(pu1_src[(ht - 1) * src_strd] + pi1_sao_offset[edge_idx], 0, (1 << bit_depth) - 1)
207
208PU1_AVAIL_3_LOOP:
209    MOV         x21,x2
210    MOV         x12,x8                      //Move ht
211
212    MOV         x14,x2                      //Move pu1_src_left to pu1_src_left_cpy
213    movi        v0.16b, #2                  //const_2 = vdupq_n_s8(2)
214    LDRB        w11,[x5,#3]                 //pu1_avail[3]
215
216    CMP         x11,#0
217    movi        v2.8h, #0                   //const_min_clip = vdupq_n_s16(0)
218    SUB         x20,x12,#1                  //ht_tmp--
219    csel        x12, x20, x12,EQ
220
221    LDRB        w5,[x5,#2]                  //pu1_avail[2]
222    movi        v4.8h, #255                 //const_max_clip = vdupq_n_u16((1 << bit_depth) - 1)
223    CMP         x5,#0
224
225    ADD         x20,x0,x1                   //pu1_src += src_strd
226    csel        x0, x20, x0,EQ
227    LD1         {v7.8b},[x6]                //offset_tbl = vld1_s8(pi1_sao_offset)
228    SUB         x20,x12,#1                  //ht_tmp--
229    csel        x12, x20, x12,EQ
230
231    ADRP        x6, :got:gi1_table_edge_idx //table pointer
232    LDR         x6, [x6, #:got_lo12:gi1_table_edge_idx]
233
234    movi        v1.16b, #0xFF               //au1_mask = vdupq_n_s8(-1)
235    ADD         x20,x14,#1                  //pu1_src_left_cpy += 1
236    csel        x14, x20, x14,EQ
237
238    MOV         x15,x0                      //Store pu1_src in sp
239    LD1         {v6.8b},[x6]                //edge_idx_tbl = vld1_s8(gi1_table_edge_idx)
240    MOV         x6,x7                       //move wd to x6 loop_count
241
242    CMP         x7,#16                      //Compare wd with 16
243    BLT         WIDTH_RESIDUE               //If not jump to WIDTH_RESIDUE where loop is unrolled for 8 case
244    CMP         x8,#4                       //Compare ht with 4
245    BLE         WD_16_HT_4_LOOP             //If jump to WD_16_HT_4_LOOP
246
247WIDTH_LOOP_16:
248    MOV         x7,x16                      //Loads wd
249
250    MOV         x5,x23                      //Loads pu1_avail
251    CMP         x6,x7                       //col == wd
252    LDRb        w20, [x5]                   //pu1_avail[0]
253    csel        w8,w20,w8,EQ
254    MOV         x20,#-1
255    csel        x8, x20, x8,NE
256    mov         v1.b[0], w8                 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
257
258    CMP         x6,#16                      //if(col == 16)
259    BNE         SKIP_AU1_MASK_VAL
260    LDRB        w8,[x5,#1]                  //pu1_avail[1]
261    mov         v1.b[15], w8                //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
262
263SKIP_AU1_MASK_VAL:
264    LDRB        w8,[x5,#2]                  //pu1_avail[2]
265    CMP         x8,#0
266
267    MOV         x4,x17                      //Loads ht
268    SUB         x20,x0,x1                   //pu1_src - src_strd
269    csel        x8, x20, x8,EQ
270
271    csel        x8, x3, x8,NE
272    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
273
274    MOV         x7,x16                      //Loads wd
275    ADD         x8,x8,#1                    //pu1_src - src_strd + 1
276
277    SUB         x7,x7,x6                    //(wd - col)
278    LD1         {v3.16b},[x8]               //pu1_top_row = vld1q_u8(pu1_src - src_strd + 1)
279    ADD         x3,x3,#16
280
281    MOV         x8,x19                      //Loads *pu1_src
282    LD1         {v5.16b},[x0]               //pu1_cur_row = vld1q_u8(pu1_src)
283    ADD         x7,x7,#15                   //15 + (wd - col)
284
285    ADD         x7,x8,x7                    //pu1_src[0 * src_strd + 15 + (wd - col)]
286    cmhi        v17.16b,  v5.16b ,  v3.16b  //vcgtq_u8(pu1_cur_row, pu1_top_row)
287    SUB         x5,x5,#1
288
289AU1_SRC_LEFT_LOOP:
290    LDRB        w8,[x7]                     //load the value and increment by src_strd
291    ADD         x7,x7,x1
292    SUBS        x4,x4,#1                    //decrement the loop count
293    STRB        w8,[x5,#1]!                 //store it in the stack pointer
294    BNE         AU1_SRC_LEFT_LOOP
295
296    movi        v18.16b, #0
297    cmhi        v16.16b,  v3.16b ,  v5.16b  //vcltq_u8(pu1_cur_row, pu1_top_row)
298
299    ADD         x8,x0,x1                    //I *pu1_src + src_strd
300    SUB         v17.16b,  v16.16b ,  v17.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
301    MOV         x7,x12                      //row count, move ht_tmp to x7
302
303    SUB         x5,x12,x7                   //I ht_tmp - row
304    LD1         {v16.16b},[x8]              //I pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
305    ADD         x8,x14,x5                   //I pu1_src_left_cpy[ht_tmp - row]
306
307    ADD         x8,x8,#1                    //I pu1_src_left_cpy[ht_tmp - row + 1]
308    LDRB        w8,[x8]
309
310    MOV         x5,x23                      //I Loads pu1_avail
311    mov         v18.b[15], w8               //I vsetq_lane_u8
312    LDRB        w5,[x5,#2]                  //I pu1_avail[2]
313
314    EXT         v18.16b,  v18.16b ,  v16.16b,#15 //I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15)
315    CMP         x5,#0                       //I
316    BNE         SIGN_UP_CHANGE_DONE         //I
317
318SIGN_UP_CHANGE:
319    LDRB        w8,[x0,#15]                 //I pu1_src_cpy[15]
320    SUB         x5,x0,x1                    //I pu1_src_cpy[16 - src_strd]
321
322    LDRB        w5,[x5,#16]                 //I load the value
323    SUB         x8,x8,x5                    //I pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]
324    CMP         x8,#0                       //I
325    movn        x20,#0
326    csel        x8, x20, x8,LT              //I
327    MOV         x20,#1
328    csel        x8, x20, x8,GT              //I SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd])
329    mov         v17.b[15], w8               //I sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]), sign_up, 15)
330
331SIGN_UP_CHANGE_DONE:
332    cmhi        v3.16b,  v5.16b ,  v18.16b  //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
333    cmhi        v18.16b,  v18.16b ,  v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
334    SUB         v3.16b,  v18.16b ,  v3.16b  //I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
335
336    ADD         v18.16b,  v0.16b ,  v17.16b //I edge_idx = vaddq_s8(const_2, sign_up)
337    ADD         v18.16b,  v18.16b ,  v3.16b //I edge_idx = vaddq_s8(edge_idx, sign_down)
338    TBL         v18.16b, {v6.16b},v18.16b   //I vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
339    NEG         v17.16b, v3.16b             //I sign_up = vnegq_s8(sign_down)
340
341    EXT         v17.16b,  v17.16b ,  v17.16b,#1 //I sign_up = vextq_s8(sign_up, sign_up, 1)
342//  TBL v19.8b, {v6.16b},v19.8b                //I vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
343
344    Uxtl        v20.8h, v5.8b               //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
345    AND         v18.16b,  v18.16b ,  v1.16b //I edge_idx = vandq_s8(edge_idx, au1_mask)
346
347    TBL         v3.16b, {v7.16b},v18.16b    //I offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
348
349    Uxtl2       v22.8h, v5.16b              //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
350    SADDW       v20.8h,  v20.8h ,  v3.8b    //I pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
351
352    SMAX        v20.8h,  v20.8h ,  v2.8h    //I pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
353//  TBL v11.8b, {v7.16b},v19.8b                    //I offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
354    UMIN        v20.8h,  v20.8h ,  v4.8h    //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
355
356    mov         v5.16b, v16.16b
357    SADDW2      v22.8h,  v22.8h ,  v3.16b   //I pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
358
359    SMAX        v22.8h,  v22.8h ,  v2.8h    //I pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
360    UMIN        v22.8h,  v22.8h ,  v4.8h    //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
361
362    SUB         x7,x7,#1                    //I Decrement the ht_tmp loop count by 1
363
364PU1_SRC_LOOP:
365    ADD         x8,x0,x1,LSL #1             //II *pu1_src + src_strd
366    xtn         v20.8b,  v20.8h             //I vmovn_s16(pi2_tmp_cur_row.val[0])
367    SUB         x5,x12,x7                   //II ht_tmp - row
368
369    ADD         x4,x0,x1                    //II pu1_src_cpy[16 - src_strd]
370    xtn2        v20.16b,  v22.8h            //I vmovn_s16(pi2_tmp_cur_row.val[1])
371    ADD         x2,x8,x1                    //III *pu1_src + src_strd
372
373    LDRB        w11,[x4,#15]                //II pu1_src_cpy[15]
374    LD1         {v16.16b},[x8]              //II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
375    SUB         x7,x7,#1                    //II Decrement the ht_tmp loop count by 1
376
377    ADD         x8,x14,x5                   //II pu1_src_left_cpy[ht_tmp - row]
378    LD1         {v30.16b},[x2]              //III pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
379    LDRB        w8,[x8,#1]
380
381    LDRB        w4,[x0,#16]                 //II load the value
382    mov         v18.b[15], w8               //II vsetq_lane_u8
383    SUB         x11,x11,x4                  //II pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]
384
385    CMP         x11,#0                      //II
386    ST1         { v20.16b},[x0],x1          //I vst1q_u8(pu1_src_cpy, pu1_cur_row)
387    SUB         x5,x12,x7                   //III ht_tmp - row
388
389    movn        x20,#0
390    csel        x11, x20, x11,LT            //II
391    EXT         v18.16b,  v18.16b ,  v16.16b,#15 //II pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15)
392    MOV         x20,#1
393    csel        x11, x20, x11,GT            //II SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd])
394
395    ADD         x8,x14,x5                   //III pu1_src_left_cpy[ht_tmp - row]
396    mov         v17.b[15], w11              //II sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]), sign_up, 15)
397    CMP         x7,#1                       //III
398
399    BNE         NEXT_ROW_ELSE_2             //III
400    MOV         x5,x23                      //III Loads pu1_avail
401    LDRB        w5,[x5,#3]                  //III pu1_avail[3]
402    CMP         x5,#0                       //III
403    SUB         x20,x2,#2                   //III pu1_src_cpy[src_strd - 1]
404    csel        x8, x20, x8,NE
405
406NEXT_ROW_ELSE_2:
407    LDRB        w8,[x8,#1]                  //III
408    cmhi        v24.16b,  v5.16b ,  v18.16b //II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
409    ADD         x5,x0,x1
410
411    LDRB        w2,[x5,#15]                 //III pu1_src_cpy[15]
412    cmhi        v26.16b,  v18.16b ,  v5.16b //II vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
413    LDRB        w5,[x0,#16]                 //III load the value
414
415    SUB         x2,x2,x5                    //III pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]
416    SUB         v24.16b,  v26.16b ,  v24.16b //II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
417    CMP         x2,#0                       //III
418
419    movn        x20,#0
420    csel        x2, x20, x2,LT              //III
421    mov         v18.b[15], w8               //III vsetq_lane_u8
422    MOV         x20,#1
423    csel        x2, x20, x2,GT              //III SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd])
424
425    SUB         x7,x7,#1                    //III Decrement the ht_tmp loop count by 1
426    ADD         v26.16b,  v0.16b ,  v17.16b //II edge_idx = vaddq_s8(const_2, sign_up)
427
428    NEG         v17.16b, v24.16b            //II sign_up = vnegq_s8(sign_down)
429    EXT         v18.16b,  v18.16b ,  v30.16b,#15 //III pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15)
430
431    ADD         v26.16b,  v26.16b ,  v24.16b //II edge_idx = vaddq_s8(edge_idx, sign_down)
432
433    EXT         v17.16b,  v17.16b ,  v17.16b,#1 //II sign_up = vextq_s8(sign_up, sign_up, 1)
434    TBL         v26.16b, {v6.16b},v26.16b   //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
435    cmhi        v3.16b,  v16.16b ,  v18.16b //III vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
436
437    mov         v17.b[15], w2               //III sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]), sign_up, 15)
438//  TBL v27.8b, {v6.16b},v27.8b                //II vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
439    cmhi        v18.16b,  v18.16b ,  v16.16b //III vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
440
441    Uxtl        v28.8h, v5.8b               //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
442    AND         v26.16b,  v26.16b ,  v1.16b //II edge_idx = vandq_s8(edge_idx, au1_mask)
443
444    SUB         v3.16b,  v18.16b ,  v3.16b  //III sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
445    TBL         v24.16b, {v7.16b},v26.16b   //II offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
446    ADD         v18.16b,  v0.16b ,  v17.16b //III edge_idx = vaddq_s8(const_2, sign_up)
447
448    ADD         v18.16b,  v18.16b ,  v3.16b //III edge_idx = vaddq_s8(edge_idx, sign_down)
449//  TBL v25.8b, {v7.16b},v27.8b                    //II offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
450    NEG         v17.16b, v3.16b             //III sign_up = vnegq_s8(sign_down)
451
452    SADDW       v28.8h,  v28.8h ,  v24.8b   //II pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
453    TBL         v18.16b, {v6.16b},v18.16b   //III vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
454    SMAX        v28.8h,  v28.8h ,  v2.8h    //II pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
455
456    EXT         v17.16b,  v17.16b ,  v17.16b,#1 //III sign_up = vextq_s8(sign_up, sign_up, 1)
457//  TBL v19.8b, {v6.16b},v19.8b                //III vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
458    UMIN        v28.8h,  v28.8h ,  v4.8h    //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
459
460    Uxtl2       v26.8h, v5.16b              //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
461    AND         v18.16b,  v18.16b ,  v1.16b //III edge_idx = vandq_s8(edge_idx, au1_mask)
462
463    SADDW2      v26.8h,  v26.8h ,  v24.16b  //II pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
464    TBL         v3.16b, {v7.16b},v18.16b    //III offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
465    SMAX        v26.8h,  v26.8h ,  v2.8h    //II pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
466
467    Uxtl        v20.8h, v16.8b              //III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
468    UMIN        v26.8h,  v26.8h ,  v4.8h    //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
469
470    SADDW       v20.8h,  v20.8h ,  v3.8b    //III pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
471//  TBL v11.8b, {v7.16b},v19.8b                    //III offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
472    SMAX        v20.8h,  v20.8h ,  v2.8h    //III pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
473
474    Uxtl2       v22.8h, v16.16b             //III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
475    UMIN        v20.8h,  v20.8h ,  v4.8h    //III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
476
477    xtn         v28.8b,  v28.8h             //II vmovn_s16(pi2_tmp_cur_row.val[0])
478    SADDW2      v22.8h,  v22.8h ,  v3.16b   //III pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
479
480    xtn2        v28.16b,  v26.8h            //II vmovn_s16(pi2_tmp_cur_row.val[1])
481    SMAX        v22.8h,  v22.8h ,  v2.8h    //III pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
482
483    mov         v5.16b, v30.16b             //II pu1_cur_row = pu1_next_row
484    UMIN        v22.8h,  v22.8h ,  v4.8h    //III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
485
486    CMP         x7,#1                       //III
487    ST1         { v28.16b},[x0],x1          //II vst1q_u8(pu1_src_cpy, pu1_cur_row)
488    BGT         PU1_SRC_LOOP                //If not equal jump to PU1_SRC_LOOP
489    BLT         INNER_LOOP_DONE
490
491    ADD         x8,x0,x1,LSL #1             //*pu1_src + src_strd
492    xtn         v20.8b,  v20.8h             //III vmovn_s16(pi2_tmp_cur_row.val[0])
493    MOV         x5,x23                      //Loads pu1_avail
494
495    LDRB        w5,[x5,#3]                  //pu1_avail[3]
496    xtn2        v20.16b,  v22.8h            //III vmovn_s16(pi2_tmp_cur_row.val[1])
497    CMP         x5,#0
498
499    ADD         x4,x0,x1                    //pu1_src_cpy[16 - src_strd]
500    LD1         {v16.16b},[x8]              //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
501    LDRB        w5,[x0,#16]                 //load the value
502
503    BEQ         NEXT_ROW_ELSE_3
504    SUB         x8,x8,#1
505    LDRB        w8,[x8]                     //pu1_src_cpy[src_strd - 1]
506    B           NEXT_ROW_POINTER_ASSIGNED_3
507NEXT_ROW_ELSE_3:
508    SUB         x11,x12,x7                  //ht_tmp - row
509    ADD         x8,x14,x11                  //pu1_src_left_cpy[ht_tmp - row]
510    ADD         x8,x8,#1                    //pu1_src_left_cpy[ht_tmp - row + 1]
511    LDRB        w8,[x8]
512
513NEXT_ROW_POINTER_ASSIGNED_3:
514    LDRB        w11,[x4,#15]                //pu1_src_cpy[15]
515    mov         v18.b[15], w8               //vsetq_lane_u8
516    SUB         x8,x11,x5                   //pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]
517
518    CMP         x8,#0
519    EXT         v18.16b,  v18.16b ,  v16.16b,#15 //pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15)
520    movn        x20,#0
521    csel        x8, x20, x8,LT
522
523    ST1         { v20.16b},[x0],x1          //III vst1q_u8(pu1_src_cpy, pu1_cur_row)
524    cmhi        v24.16b,  v5.16b ,  v18.16b //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
525
526    MOV         x20,#1
527    csel        x8, x20, x8,GT              //SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd])
528    cmhi        v26.16b,  v18.16b ,  v5.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
529
530    mov         v17.b[15], w8               //sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]), sign_up, 15)
531    SUB         v24.16b,  v26.16b ,  v24.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
532
533    Uxtl        v20.8h, v5.8b               //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
534    ADD         v26.16b,  v0.16b ,  v17.16b //edge_idx = vaddq_s8(const_2, sign_up)
535
536    Uxtl2       v22.8h, v5.16b              //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
537    ADD         v26.16b,  v26.16b ,  v24.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
538
539    TBL         v26.16b, {v6.16b},v26.16b   //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
540//  TBL v27.8b, {v6.16b},v27.8b                //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
541
542    AND         v26.16b,  v26.16b ,  v1.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
543
544    TBL         v24.16b, {v7.16b},v26.16b   //offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
545
546    SADDW       v20.8h,  v20.8h ,  v24.8b   //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
547//  TBL v25.8b, {v7.16b},v27.8b                    //offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
548    SMAX        v20.8h,  v20.8h ,  v2.8h    //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
549
550    UMIN        v20.8h,  v20.8h ,  v4.8h    //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
551
552    SADDW2      v22.8h,  v22.8h ,  v24.16b  //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
553    SMAX        v22.8h,  v22.8h ,  v2.8h    //pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
554    UMIN        v22.8h,  v22.8h ,  v4.8h    //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
555
556INNER_LOOP_DONE:
557    xtn         v20.8b,  v20.8h             //vmovn_s16(pi2_tmp_cur_row.val[0])
558    MOV         x8,x17                      //Loads ht
559
560    xtn2        v20.16b,  v22.8h            //vmovn_s16(pi2_tmp_cur_row.val[1])
561    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
562
563    ST1         { v20.16b},[x0],x1          //vst1q_u8(pu1_src_cpy, pu1_cur_row)
564    MOV         x2,x21                      //Loads *pu1_src_left
565SRC_LEFT_LOOP:
566    LDR         w7,[x5],#4                  //au1_src_left_tmp[row]
567    SUBS        x8,x8,#4
568    STR         w7,[x2],#4                  //pu1_src_left[row] = au1_src_left_tmp[row]
569    BNE         SRC_LEFT_LOOP
570
571    SUBS        x6,x6,#16                   //Decrement the wd loop count by 16
572    CMP         x6,#8                       //Check whether residue remains
573    BLT         RE_ASSINING_LOOP            //Jump to re-assigning loop
574    MOV         x7,x16                      //Loads wd
575    MOV         x0,x15                      //Loads *pu1_src
576    SUB         x7,x7,x6
577    ADD         x0,x0,x7
578    BGT         WIDTH_LOOP_16               //If not equal jump to width_loop
579    BEQ         WIDTH_RESIDUE               //If residue remains jump to residue loop
580
581
582
583WD_16_HT_4_LOOP:
584    MOV         x5,x23                      //Loads pu1_avail
585    MOV         x7,x16                      //Loads wd
586    CMP         x6,x7                       //col == wd
587    LDRb        w20, [x5]                   //pu1_avail[0]
588    csel        w8,w20,w8,EQ
589    MOV         x20,#-1
590    csel        x8, x20, x8,NE
591    mov         v1.b[0], w8                 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
592
593    CMP         x6,#16                      //if(col == 16)
594    BNE         SKIP_AU1_MASK_VAL_WD_16_HT_4
595    LDRB        w8,[x5,#1]                  //pu1_avail[1]
596    mov         v1.b[15], w8                //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
597
598SKIP_AU1_MASK_VAL_WD_16_HT_4:
599    LDRB        w8,[x5,#2]                  //pu1_avail[2]
600    CMP         x8,#0
601
602    SUB         x20,x0,x1                   //pu1_src - src_strd
603    csel        x8, x20, x8,EQ
604    csel        x8, x3, x8,NE
605    ADD         x8,x8,#1                    //pu1_src - src_strd + 1
606    LD1         {v3.16b},[x8]               //pu1_top_row = vld1q_u8(pu1_src - src_strd + 1)
607
608    ADD         x3,x3,#16
609    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
610    MOV         x4,x17                      //Loads ht
611    MOV         x7,x16                      //Loads wd
612    SUB         x7,x7,x6                    //(wd - col)
613    ADD         x7,x7,#15                   //15 + (wd - col)
614    MOV         x8,x19                      //Loads *pu1_src
615    ADD         x7,x8,x7                    //pu1_src[0 * src_strd + 15 + (wd - col)]
616    SUB         x5,x5,#1
617
618AU1_SRC_LEFT_LOOP_WD_16_HT_4:
619    LDRB        w8,[x7]                     //load the value and increment by src_strd
620    ADD         x7,x7,x1
621    STRB        w8,[x5,#1]!                 //store it in the stack pointer
622    SUBS        x4,x4,#1                    //decrement the loop count
623    BNE         AU1_SRC_LEFT_LOOP_WD_16_HT_4
624
625    LD1         {v5.16b},[x0]               //pu1_cur_row = vld1q_u8(pu1_src)
626
627    cmhi        v17.16b,  v5.16b ,  v3.16b  //vcgtq_u8(pu1_cur_row, pu1_top_row)
628    cmhi        v16.16b,  v3.16b ,  v5.16b  //vcltq_u8(pu1_cur_row, pu1_top_row)
629    SUB         v17.16b,  v16.16b ,  v17.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
630    movi        v18.16b, #0
631    MOV         x7,x12                      //row count, move ht_tmp to x7
632
633PU1_SRC_LOOP_WD_16_HT_4:
634    ADD         x8,x0,x1                    //*pu1_src + src_strd
635    LD1         {v16.16b},[x8]              //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
636    MOV         x5,x23                      //Loads pu1_avail
637    LDRB        w5,[x5,#3]                  //pu1_avail[3]
638    CMP         x5,#0
639    BEQ         NEXT_ROW_ELSE_WD_16_HT_4
640    CMP         x7,#1
641    SUB         x8,x8,#1
642    LDRb        w20, [x8]                   //pu1_src_cpy[src_strd - 1]
643    csel        w8,w20,w8,EQ
644    BEQ         NEXT_ROW_POINTER_ASSIGNED_WD_16_HT_4
645NEXT_ROW_ELSE_WD_16_HT_4:
646    SUB         x5,x12,x7                   //ht_tmp - row
647    ADD         x8,x14,x5                   //pu1_src_left_cpy[ht_tmp - row]
648    ADD         x8,x8,#1                    //pu1_src_left_cpy[ht_tmp - row + 1]
649    LDRB        w8,[x8]
650
651NEXT_ROW_POINTER_ASSIGNED_WD_16_HT_4:
652    mov         v18.b[15], w8               //vsetq_lane_u8
653    EXT         v18.16b,  v18.16b ,  v16.16b,#15 //pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15)
654
655    CMP         x7,x12
656    BNE         SIGN_UP_CHANGE_WD_16_HT_4
657    MOV         x5,x23                      //Loads pu1_avail
658    LDRB        w5,[x5,#2]                  //pu1_avail[2]
659    CMP         x5,#0
660    BNE         SIGN_UP_CHANGE_DONE_WD_16_HT_4
661
662SIGN_UP_CHANGE_WD_16_HT_4:
663    LDRB        w8,[x0,#15]                 //pu1_src_cpy[15]
664    ADD         x5,x0,#16                   //pu1_src_cpy[16]
665    SUB         x5,x5,x1                    //pu1_src_cpy[16 - src_strd]
666    LDRB        w5,[x5]                     //load the value
667    SUB         x8,x8,x5                    //pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]
668    CMP         x8,#0
669    movn        x20,#0
670    csel        x8, x20, x8,LT
671    MOV         x20,#1
672    csel        x8, x20, x8,GT              //SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd])
673    mov         v17.b[15], w8               //sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]), sign_up, 15)
674
675SIGN_UP_CHANGE_DONE_WD_16_HT_4:
676    cmhi        v20.16b,  v5.16b ,  v18.16b //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
677    cmhi        v22.16b,  v18.16b ,  v5.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
678    SUB         v24.16b,  v22.16b ,  v20.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
679
680    ADD         v26.16b,  v0.16b ,  v17.16b //edge_idx = vaddq_s8(const_2, sign_up)
681    ADD         v26.16b,  v26.16b ,  v24.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
682    TBL         v26.16b, {v6.16b},v26.16b   //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
683//  TBL v27.8b, {v6.16b},v27.8b                //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
684
685    AND         v26.16b,  v26.16b ,  v1.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
686
687    NEG         v17.16b, v24.16b            //sign_up = vnegq_s8(sign_down)
688    EXT         v17.16b,  v17.16b ,  v17.16b,#1 //sign_up = vextq_s8(sign_up, sign_up, 1)
689
690    TBL         v24.16b, {v7.16b},v26.16b   //offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
691    Uxtl        v28.8h, v5.8b               //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
692    SADDW       v28.8h,  v28.8h ,  v24.8b   //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
693    SMAX        v28.8h,  v28.8h ,  v2.8h    //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
694    UMIN        v28.8h,  v28.8h ,  v4.8h    //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
695
696//  TBL v25.8b, {v7.16b},v27.8b                    //offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
697    Uxtl2       v30.8h, v5.16b              //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
698    SADDW2      v30.8h,  v30.8h ,  v24.16b  //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
699    SMAX        v30.8h,  v30.8h ,  v2.8h    //pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
700    UMIN        v30.8h,  v30.8h ,  v4.8h    //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
701
702    xtn         v28.8b,  v28.8h             //vmovn_s16(pi2_tmp_cur_row.val[0])
703    xtn2        v28.16b,  v30.8h            //vmovn_s16(pi2_tmp_cur_row.val[1])
704
705    ST1         { v28.16b},[x0],x1          //vst1q_u8(pu1_src_cpy, pu1_cur_row)
706
707    mov         v5.16b, v16.16b             //pu1_cur_row = pu1_next_row
708    SUBS        x7,x7,#1                    //Decrement the ht_tmp loop count by 1
709    BNE         PU1_SRC_LOOP_WD_16_HT_4     //If not equal jump to PU1_SRC_LOOP_WD_16_HT_4
710
711    MOV         x8,x17                      //Loads ht
712    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
713    MOV         x2,x21                      //Loads *pu1_src_left
714SRC_LEFT_LOOP_WD_16_HT_4:
715    LDR         w7,[x5],#4                  //au1_src_left_tmp[row]
716    STR         w7,[x2],#4                  //pu1_src_left[row] = au1_src_left_tmp[row]
717    SUBS        x8,x8,#4
718    BNE         SRC_LEFT_LOOP_WD_16_HT_4
719
720    SUBS        x6,x6,#16                   //Decrement the wd loop count by 16
721    BLE         RE_ASSINING_LOOP            //Jump to re-assigning loop
722    MOV         x7,x16                      //Loads wd
723    MOV         x0,x15                      //Loads *pu1_src
724    SUB         x7,x7,x6
725    ADD         x0,x0,x7
726    BGT         WD_16_HT_4_LOOP             //If not equal jump to width_loop
727
728
729WIDTH_RESIDUE:
730    MOV         x7,x16                      //Loads wd
731    MOV         x5,x23                      //Loads pu1_avail
732    CMP         x6,x7                       //wd_residue == wd
733    LDRb        w20, [x5]                   //pu1_avail[0]
734    csel        w8,w20,w8,EQ
735
736    MOV         x20,#-1
737    csel        x8, x20, x8,NE
738    mov         v1.b[0], w8                 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
739
740    LDRB        w8,[x5,#1]                  //pu1_avail[1]
741    mov         v1.b[7], w8                 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
742
743PU1_AVAIL_2_RESIDUE:
744    LDRB        w8,[x5,#2]                  //pu1_avail[2]
745    CMP         x8,#0
746
747    SUB         x20,x0,x1                   //pu1_src - src_strd
748    csel        x8, x20, x8,EQ
749    csel        x8, x3, x8,NE
750    ADD         x8,x8,#1                    //pu1_src - src_strd + 1
751    LD1         {v3.16b},[x8]               //pu1_top_row = vld1q_u8(pu1_src - src_strd + 1)
752
753
754    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
755    MOV         x4,x17                      //Loads ht
756    MOV         x7,x16                      //Loads wd
757    MOV         x8,x19                      //Loads *pu1_src
758    SUB         x7,x7,#1                    //(wd - 1)
759    ADD         x7,x8,x7                    //pu1_src[0 * src_strd + (wd - 1)]
760    SUB         x5,x5,#1
761
762AU1_SRC_LEFT_LOOP_RESIDUE:
763    LDRB        w8,[x7]                     //load the value and increment by src_strd
764    ADD         x7,x7,x1
765    STRB        w8,[x5,#1]!                 //store it in the stack pointer
766    SUBS        x4,x4,#1                    //decrement the loop count
767    BNE         AU1_SRC_LEFT_LOOP_RESIDUE
768
769    LD1         {v5.16b},[x0]               //pu1_cur_row = vld1q_u8(pu1_src)
770
771    cmhi        v17.16b,  v5.16b ,  v3.16b  //vcgtq_u8(pu1_cur_row, pu1_top_row)
772    cmhi        v16.16b,  v3.16b ,  v5.16b  //vcltq_u8(pu1_cur_row, pu1_top_row)
773    SUB         v17.16b,  v16.16b ,  v17.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
774    MOV         x7,x12                      //row count, move ht_tmp to x7
775
776PU1_SRC_LOOP_RESIDUE:
777    movi        v18.16b, #0
778    ADD         x8,x0,x1                    //*pu1_src + src_strd
779    LD1         {v16.16b},[x8]              //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
780    MOV         x5,x23                      //Loads pu1_avail
781    LDRB        w5,[x5,#3]                  //pu1_avail[3]
782    CMP         x5,#0
783    BEQ         NEXT_ROW_ELSE_RESIDUE
784    CMP         x7,#1
785    SUB         x8,x8,#1
786    LDRb        w20, [x8]                   //pu1_src_cpy[src_strd - 1]
787    csel        w8,w20,w8,EQ
788    BEQ         NEXT_ROW_POINTER_ASSIGNED_RESIDUE
789NEXT_ROW_ELSE_RESIDUE:
790    SUB         x5,x12,x7                   //ht_tmp - row
791    ADD         x8,x14,x5                   //pu1_src_left_cpy[ht_tmp - row]
792    ADD         x8,x8,#1                    //pu1_src_left_cpy[ht_tmp - row + 1]
793    LDRB        w8,[x8]
794
795NEXT_ROW_POINTER_ASSIGNED_RESIDUE:
796    mov         v18.b[15], w8               //vsetq_lane_u8
797    EXT         v18.16b,  v18.16b ,  v16.16b,#15 //pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15)
798
799    CMP         x7,x12
800    BNE         SIGN_UP_CHANGE_RESIDUE
801    MOV         x5,x23                      //Loads pu1_avail
802    LDRB        w5,[x5,#2]                  //pu1_avail[2]
803    CMP         x5,#0
804    BNE         SIGN_UP_CHANGE_DONE_RESIDUE
805
806SIGN_UP_CHANGE_RESIDUE:
807    LDRB        w8,[x0,#15]                 //pu1_src_cpy[15]
808    ADD         x5,x0,#16                   //pu1_src_cpy[16]
809    SUB         x5,x5,x1                    //pu1_src_cpy[16 - src_strd]
810    LDRB        w5,[x5]                     //load the value
811    SUB         x8,x8,x5                    //pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]
812    CMP         x8,#0
813    movn        x20,#0
814    csel        x8, x20, x8,LT
815    MOV         x20,#1
816    csel        x8, x20, x8,GT              //SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd])
817    mov         v17.b[15], w8               //sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]), sign_up, 15)
818
819SIGN_UP_CHANGE_DONE_RESIDUE:
820    cmhi        v20.16b,  v5.16b ,  v18.16b //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
821    cmhi        v22.16b,  v18.16b ,  v5.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
822    SUB         v24.16b,  v22.16b ,  v20.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
823
824    ADD         v26.16b,  v0.16b ,  v17.16b //edge_idx = vaddq_s8(const_2, sign_up)
825    ADD         v26.16b,  v26.16b ,  v24.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
826    TBL         v26.16b, {v6.16b},v26.16b   //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
827//  TBL v27.8b, {v6.16b},v27.8b                //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
828
829    AND         v26.16b,  v26.16b ,  v1.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
830
831    NEG         v17.16b, v24.16b            //sign_up = vnegq_s8(sign_down)
832    EXT         v17.16b,  v17.16b ,  v17.16b,#1 //sign_up = vextq_s8(sign_up, sign_up, 1)
833
834    TBL         v24.8b, {v7.16b},v26.8b     //offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
835    Uxtl        v28.8h, v5.8b               //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
836    SADDW       v28.8h,  v28.8h ,  v24.8b   //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
837    SMAX        v28.8h,  v28.8h ,  v2.8h    //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
838    UMIN        v28.8h,  v28.8h ,  v4.8h    //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
839
840    xtn         v30.8b,  v28.8h             //vmovn_s16(pi2_tmp_cur_row.val[0])
841
842    ST1         {v30.8b},[x0],x1            //vst1q_u8(pu1_src_cpy, pu1_cur_row)
843    mov         v5.16b, v16.16b             //pu1_cur_row = pu1_next_row
844    SUBS        x7,x7,#1
845    BNE         PU1_SRC_LOOP_RESIDUE
846
847    MOV         x8,x17                      //Loads ht
848    MOV         x2,x21                      //Loads *pu1_src_left
849    ADD         x5,sp,#0x42                 //*au1_src_left_tmp
850
851SRC_LEFT_LOOP_RESIDUE:
852    LDR         w7,[x5],#4                  //au1_src_left_tmp[row]
853    SUBS        x8,x8,#4
854    STR         w7,[x2],#4                  //pu1_src_left[row] = au1_src_left_tmp[row]
855    BNE         SRC_LEFT_LOOP_RESIDUE
856
857
858RE_ASSINING_LOOP:
859    MOV         x7,x16                      //Loads wd
860    MOV         x0,x19                      //Loads *pu1_src
861
862    MOV         x11,x17                     //Loads ht
863    ADD         x8,x0,x7                    //pu1_src[wd]
864
865    MOV         x4,x24                      //Loads pu1_src_top_left
866    SUB         x11,x11,#1                  //ht - 1
867
868    SUB         x8,x8,#1
869    STRB        w9,[x8]                     //pu1_src_org[wd - 1] = u1_pos_wd_0_tmp
870    ADD         x8,x8,#1
871    madd        x6, x11, x1, x0             //pu1_src_org[(ht - 1) * src_strd]
872
873    LDRB        w8,[sp]                     //load u1_src_top_left_tmp from stack pointer
874    ADD         x12,sp,#0x02
875
876    STRB        w10,[x6]                    //pu1_src_org[wd - 1 + (ht - 1) * src_strd] = u1_pos_wd_ht_tmp
877    STRB        w8,[x4]                     //*pu1_src_top_left = u1_src_top_left_tmp
878    MOV         x3,x22                      //Loads pu1_src_top
879
880SRC_TOP_LOOP:
881    LD1         {v0.8b},[x12],#8            //pu1_src_top[col] = au1_src_top_tmp[col]
882    SUBS        x7,x7,#8                    //Decrement the width
883    ST1         {v0.8b},[x3],#8             //pu1_src_top[col] = au1_src_top_tmp[col]
884    BNE         SRC_TOP_LOOP
885
886END_LOOPS:
887    ADD         sp,sp,#0xA0
888    // LDMFD sp!,{x4-x12,x15}             //Reload the registers from SP
889    ldp         x23, x24,[sp], #16
890    ldp         x21, x22,[sp], #16
891    ldp         x19, x20,[sp], #16
892    ret
893
894
895
896