1; 2; Copyright (c) 2013 The WebM project authors. All Rights Reserved. 3; 4; Use of this source code is governed by a BSD-style license 5; that can be found in the LICENSE file in the root of the source 6; tree. An additional intellectual property rights grant can be found 7; in the file PATENTS. All contributing project authors may 8; be found in the AUTHORS file in the root of the source tree. 9; 10 11 12 ; These functions are only valid when: 13 ; x_step_q4 == 16 14 ; w%4 == 0 15 ; h%4 == 0 16 ; taps == 8 17 ; VP9_FILTER_WEIGHT == 128 18 ; VP9_FILTER_SHIFT == 7 19 20 EXPORT |vpx_convolve8_avg_horiz_neon| 21 EXPORT |vpx_convolve8_avg_vert_neon| 22 ARM 23 REQUIRE8 24 PRESERVE8 25 26 AREA ||.text||, CODE, READONLY, ALIGN=2 27 28 ; Multiply and accumulate by q0 29 MACRO 30 MULTIPLY_BY_Q0 $dst, $src0, $src1, $src2, $src3, $src4, $src5, $src6, $src7 31 vmull.s16 $dst, $src0, d0[0] 32 vmlal.s16 $dst, $src1, d0[1] 33 vmlal.s16 $dst, $src2, d0[2] 34 vmlal.s16 $dst, $src3, d0[3] 35 vmlal.s16 $dst, $src4, d1[0] 36 vmlal.s16 $dst, $src5, d1[1] 37 vmlal.s16 $dst, $src6, d1[2] 38 vmlal.s16 $dst, $src7, d1[3] 39 MEND 40 41; r0 const uint8_t *src 42; r1 int src_stride 43; r2 uint8_t *dst 44; r3 int dst_stride 45; sp[]const int16_t *filter_x 46; sp[]int x_step_q4 47; sp[]const int16_t *filter_y ; unused 48; sp[]int y_step_q4 ; unused 49; sp[]int w 50; sp[]int h 51 52|vpx_convolve8_avg_horiz_neon| PROC 53 push {r4-r10, lr} 54 55 sub r0, r0, #3 ; adjust for taps 56 57 ldr r5, [sp, #32] ; filter_x 58 ldr r6, [sp, #48] ; w 59 ldr r7, [sp, #52] ; h 60 61 vld1.s16 {q0}, [r5] ; filter_x 62 63 sub r8, r1, r1, lsl #2 ; -src_stride * 3 64 add r8, r8, #4 ; -src_stride * 3 + 4 65 66 sub r4, r3, r3, lsl #2 ; -dst_stride * 3 67 add r4, r4, #4 ; -dst_stride * 3 + 4 68 69 rsb r9, r6, r1, lsl #2 ; reset src for outer loop 70 sub r9, r9, #7 71 rsb r12, r6, r3, lsl #2 ; reset dst for outer loop 72 73 mov r10, r6 ; w loop counter 74 75vpx_convolve8_avg_loop_horiz_v 76 vld1.8 {d24}, [r0], r1 77 vld1.8 {d25}, [r0], r1 78 vld1.8 {d26}, [r0], r1 79 vld1.8 {d27}, [r0], r8 80 81 vtrn.16 q12, q13 82 vtrn.8 d24, d25 83 vtrn.8 d26, d27 84 85 pld [r0, r1, lsl #2] 86 87 vmovl.u8 q8, d24 88 vmovl.u8 q9, d25 89 vmovl.u8 q10, d26 90 vmovl.u8 q11, d27 91 92 ; save a few instructions in the inner loop 93 vswp d17, d18 94 vmov d23, d21 95 96 add r0, r0, #3 97 98vpx_convolve8_avg_loop_horiz 99 add r5, r0, #64 100 101 vld1.32 {d28[]}, [r0], r1 102 vld1.32 {d29[]}, [r0], r1 103 vld1.32 {d31[]}, [r0], r1 104 vld1.32 {d30[]}, [r0], r8 105 106 pld [r5] 107 108 vtrn.16 d28, d31 109 vtrn.16 d29, d30 110 vtrn.8 d28, d29 111 vtrn.8 d31, d30 112 113 pld [r5, r1] 114 115 ; extract to s16 116 vtrn.32 q14, q15 117 vmovl.u8 q12, d28 118 vmovl.u8 q13, d29 119 120 pld [r5, r1, lsl #1] 121 122 ; slightly out of order load to match the existing data 123 vld1.u32 {d6[0]}, [r2], r3 124 vld1.u32 {d7[0]}, [r2], r3 125 vld1.u32 {d6[1]}, [r2], r3 126 vld1.u32 {d7[1]}, [r2], r3 127 128 sub r2, r2, r3, lsl #2 ; reset for store 129 130 ; src[] * filter_x 131 MULTIPLY_BY_Q0 q1, d16, d17, d20, d22, d18, d19, d23, d24 132 MULTIPLY_BY_Q0 q2, d17, d20, d22, d18, d19, d23, d24, d26 133 MULTIPLY_BY_Q0 q14, d20, d22, d18, d19, d23, d24, d26, d27 134 MULTIPLY_BY_Q0 q15, d22, d18, d19, d23, d24, d26, d27, d25 135 136 pld [r5, -r8] 137 138 ; += 64 >> 7 139 vqrshrun.s32 d2, q1, #7 140 vqrshrun.s32 d3, q2, #7 141 vqrshrun.s32 d4, q14, #7 142 vqrshrun.s32 d5, q15, #7 143 144 ; saturate 145 vqmovn.u16 d2, q1 146 vqmovn.u16 d3, q2 147 148 ; transpose 149 vtrn.16 d2, d3 150 vtrn.32 d2, d3 151 vtrn.8 d2, d3 152 153 ; average the new value and the dst value 154 vrhadd.u8 q1, q1, q3 155 156 vst1.u32 {d2[0]}, [r2@32], r3 157 vst1.u32 {d3[0]}, [r2@32], r3 158 vst1.u32 {d2[1]}, [r2@32], r3 159 vst1.u32 {d3[1]}, [r2@32], r4 160 161 vmov q8, q9 162 vmov d20, d23 163 vmov q11, q12 164 vmov q9, q13 165 166 subs r6, r6, #4 ; w -= 4 167 bgt vpx_convolve8_avg_loop_horiz 168 169 ; outer loop 170 mov r6, r10 ; restore w counter 171 add r0, r0, r9 ; src += src_stride * 4 - w 172 add r2, r2, r12 ; dst += dst_stride * 4 - w 173 subs r7, r7, #4 ; h -= 4 174 bgt vpx_convolve8_avg_loop_horiz_v 175 176 pop {r4-r10, pc} 177 178 ENDP 179 180|vpx_convolve8_avg_vert_neon| PROC 181 push {r4-r8, lr} 182 183 ; adjust for taps 184 sub r0, r0, r1 185 sub r0, r0, r1, lsl #1 186 187 ldr r4, [sp, #32] ; filter_y 188 ldr r6, [sp, #40] ; w 189 ldr lr, [sp, #44] ; h 190 191 vld1.s16 {q0}, [r4] ; filter_y 192 193 lsl r1, r1, #1 194 lsl r3, r3, #1 195 196vpx_convolve8_avg_loop_vert_h 197 mov r4, r0 198 add r7, r0, r1, asr #1 199 mov r5, r2 200 add r8, r2, r3, asr #1 201 mov r12, lr ; h loop counter 202 203 vld1.u32 {d16[0]}, [r4], r1 204 vld1.u32 {d16[1]}, [r7], r1 205 vld1.u32 {d18[0]}, [r4], r1 206 vld1.u32 {d18[1]}, [r7], r1 207 vld1.u32 {d20[0]}, [r4], r1 208 vld1.u32 {d20[1]}, [r7], r1 209 vld1.u32 {d22[0]}, [r4], r1 210 211 vmovl.u8 q8, d16 212 vmovl.u8 q9, d18 213 vmovl.u8 q10, d20 214 vmovl.u8 q11, d22 215 216vpx_convolve8_avg_loop_vert 217 ; always process a 4x4 block at a time 218 vld1.u32 {d24[0]}, [r7], r1 219 vld1.u32 {d26[0]}, [r4], r1 220 vld1.u32 {d26[1]}, [r7], r1 221 vld1.u32 {d24[1]}, [r4], r1 222 223 ; extract to s16 224 vmovl.u8 q12, d24 225 vmovl.u8 q13, d26 226 227 vld1.u32 {d6[0]}, [r5@32], r3 228 vld1.u32 {d6[1]}, [r8@32], r3 229 vld1.u32 {d7[0]}, [r5@32], r3 230 vld1.u32 {d7[1]}, [r8@32], r3 231 232 pld [r7] 233 pld [r4] 234 235 ; src[] * filter_y 236 MULTIPLY_BY_Q0 q1, d16, d17, d18, d19, d20, d21, d22, d24 237 238 pld [r7, r1] 239 pld [r4, r1] 240 241 MULTIPLY_BY_Q0 q2, d17, d18, d19, d20, d21, d22, d24, d26 242 243 pld [r5] 244 pld [r8] 245 246 MULTIPLY_BY_Q0 q14, d18, d19, d20, d21, d22, d24, d26, d27 247 248 pld [r5, r3] 249 pld [r8, r3] 250 251 MULTIPLY_BY_Q0 q15, d19, d20, d21, d22, d24, d26, d27, d25 252 253 ; += 64 >> 7 254 vqrshrun.s32 d2, q1, #7 255 vqrshrun.s32 d3, q2, #7 256 vqrshrun.s32 d4, q14, #7 257 vqrshrun.s32 d5, q15, #7 258 259 ; saturate 260 vqmovn.u16 d2, q1 261 vqmovn.u16 d3, q2 262 263 ; average the new value and the dst value 264 vrhadd.u8 q1, q1, q3 265 266 sub r5, r5, r3, lsl #1 ; reset for store 267 sub r8, r8, r3, lsl #1 268 269 vst1.u32 {d2[0]}, [r5@32], r3 270 vst1.u32 {d2[1]}, [r8@32], r3 271 vst1.u32 {d3[0]}, [r5@32], r3 272 vst1.u32 {d3[1]}, [r8@32], r3 273 274 vmov q8, q10 275 vmov d18, d22 276 vmov d19, d24 277 vmov q10, q13 278 vmov d22, d25 279 280 subs r12, r12, #4 ; h -= 4 281 bgt vpx_convolve8_avg_loop_vert 282 283 ; outer loop 284 add r0, r0, #4 285 add r2, r2, #4 286 subs r6, r6, #4 ; w -= 4 287 bgt vpx_convolve8_avg_loop_vert_h 288 289 pop {r4-r8, pc} 290 291 ENDP 292 END 293