1; 2; Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3; 4; Use of this source code is governed by a BSD-style license 5; that can be found in the LICENSE file in the root of the source 6; tree. An additional intellectual property rights grant can be found 7; in the file PATENTS. All contributing project authors may 8; be found in the AUTHORS file in the root of the source tree. 9; 10 11 12 EXPORT |vp8_short_walsh4x4_neon| 13 14 ARM 15 REQUIRE8 16 PRESERVE8 17 18 AREA ||.text||, CODE, READONLY, ALIGN=2 19;void vp8_short_walsh4x4_neon(short *input, short *output, int pitch) 20; r0 short *input, 21; r1 short *output, 22; r2 int pitch 23|vp8_short_walsh4x4_neon| PROC 24 25 vld1.16 {d0}, [r0@64], r2 ; load input 26 vld1.16 {d1}, [r0@64], r2 27 vld1.16 {d2}, [r0@64], r2 28 vld1.16 {d3}, [r0@64] 29 30 ;First for-loop 31 ;transpose d0, d1, d2, d3. Then, d0=ip[0], d1=ip[1], d2=ip[2], d3=ip[3] 32 vtrn.32 d0, d2 33 vtrn.32 d1, d3 34 35 vmov.s32 q15, #3 ; add 3 to all values 36 37 vtrn.16 d0, d1 38 vtrn.16 d2, d3 39 40 vadd.s16 d4, d0, d2 ; ip[0] + ip[2] 41 vadd.s16 d5, d1, d3 ; ip[1] + ip[3] 42 vsub.s16 d6, d1, d3 ; ip[1] - ip[3] 43 vsub.s16 d7, d0, d2 ; ip[0] - ip[2] 44 45 vshl.s16 d4, d4, #2 ; a1 = (ip[0] + ip[2]) << 2 46 vshl.s16 d5, d5, #2 ; d1 = (ip[1] + ip[3]) << 2 47 vshl.s16 d6, d6, #2 ; c1 = (ip[1] - ip[3]) << 2 48 vceq.s16 d16, d4, #0 ; a1 == 0 49 vshl.s16 d7, d7, #2 ; b1 = (ip[0] - ip[2]) << 2 50 51 vadd.s16 d0, d4, d5 ; a1 + d1 52 vmvn d16, d16 ; a1 != 0 53 vsub.s16 d3, d4, d5 ; op[3] = a1 - d1 54 vadd.s16 d1, d7, d6 ; op[1] = b1 + c1 55 vsub.s16 d2, d7, d6 ; op[2] = b1 - c1 56 vsub.s16 d0, d0, d16 ; op[0] = a1 + d1 + (a1 != 0) 57 58 ;Second for-loop 59 ;transpose d0, d1, d2, d3, Then, d0=ip[0], d1=ip[4], d2=ip[8], d3=ip[12] 60 vtrn.32 d1, d3 61 vtrn.32 d0, d2 62 vtrn.16 d2, d3 63 vtrn.16 d0, d1 64 65 vaddl.s16 q8, d0, d2 ; a1 = ip[0]+ip[8] 66 vaddl.s16 q9, d1, d3 ; d1 = ip[4]+ip[12] 67 vsubl.s16 q10, d1, d3 ; c1 = ip[4]-ip[12] 68 vsubl.s16 q11, d0, d2 ; b1 = ip[0]-ip[8] 69 70 vadd.s32 q0, q8, q9 ; a2 = a1 + d1 71 vadd.s32 q1, q11, q10 ; b2 = b1 + c1 72 vsub.s32 q2, q11, q10 ; c2 = b1 - c1 73 vsub.s32 q3, q8, q9 ; d2 = a1 - d1 74 75 vclt.s32 q8, q0, #0 76 vclt.s32 q9, q1, #0 77 vclt.s32 q10, q2, #0 78 vclt.s32 q11, q3, #0 79 80 ; subtract -1 (or 0) 81 vsub.s32 q0, q0, q8 ; a2 += a2 < 0 82 vsub.s32 q1, q1, q9 ; b2 += b2 < 0 83 vsub.s32 q2, q2, q10 ; c2 += c2 < 0 84 vsub.s32 q3, q3, q11 ; d2 += d2 < 0 85 86 vadd.s32 q8, q0, q15 ; a2 + 3 87 vadd.s32 q9, q1, q15 ; b2 + 3 88 vadd.s32 q10, q2, q15 ; c2 + 3 89 vadd.s32 q11, q3, q15 ; d2 + 3 90 91 ; vrshrn? would add 1 << 3-1 = 2 92 vshrn.s32 d0, q8, #3 93 vshrn.s32 d1, q9, #3 94 vshrn.s32 d2, q10, #3 95 vshrn.s32 d3, q11, #3 96 97 vst1.16 {q0, q1}, [r1@128] 98 99 bx lr 100 101 ENDP 102 103 END 104