/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/ |
D | Norm_Corr_opt.s | 32 @ r6 --- corr_norm[] 73 MOV r6, #0 @L_tmp = 0 81 SMLABB r6, r9, r9, r6 @L_tmp += (xn[i] * xn[i]) 82 SMLATT r6, r9, r9, r6 @L_tmp += (xn[i+1] * xn[i+1]) 83 SMLABB r6, r10, r10, r6 84 SMLATT r6, r10, r10, r6 85 SMLABB r6, r11, r11, r6 86 SMLATT r6, r11, r11, r6 87 SMLABB r6, r12, r12, r6 88 SMLATT r6, r12, r12, r6 [all …]
|
D | cor_h_vec_opt.s | 31 @r6 ---- cor_2[] 48 MOV r6, #0 @L_sum2 = 0 59 MLA r6, r12, r14, r6 63 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2) 67 ADD r10, r6, r14 70 MOV r6, r10, ASR #16 76 MUL r14, r6, r11 78 MOV r6, r14, ASR #15 86 ADD r6, r6, r11 88 STRH r6, [r12] [all …]
|
D | pred_lt4_1_opt.s | 50 LDR r6, [r8] 51 ADD r6, r8 54 ADD r8, r6, r8 @ptr2 = &(inter4_2[k][0]) 67 LDRSH r6, [r1], #2 @x[1] 71 SMULBB r11, r6, r3 @x[1] * h[0] 75 SMLABT r10, r6, r3, r10 @x[1] * h[1] 80 LDRSH r6, [r1], #2 @x[4] 83 SMLABB r12, r6, r3, r12 @x[4] * h[2] 87 SMLABT r11, r6, r3, r11 @x[4] * h[3] 92 SMLABB r10, r6, r3, r10 @x[4] * h[4] [all …]
|
D | convolve_opt.s | 41 MOV r6, r0 @ tmpX = x 42 LDRSH r9, [r6], #2 @ *tmpX++ 50 LDRSH r9, [r6], #2 @ *tmpX++ 52 LDRSH r12, [r6], #2 @ *tmpX++ 56 LDRSH r9, [r6], #2 @ *tmpX++ 58 LDRSH r12, [r6], #2 @ *tmpX++ 76 MOV r6, r0 77 LDRSH r9, [r6], #2 @ *tmpX++ 79 LDRSH r12, [r6], #2 89 LDRSH r9, [r6], #2 @ *tmpX++ [all …]
|
D | Syn_filt_32_opt.s | 34 @ lg --- r6 45 LDRSH r6, [r0] @ load Aq[0] 47 MOV r3, r6, ASR r7 @ a0 = Aq[0] >> (4 + Q_new) 50 LDRSH r6, [r0, #2] @ load Aq[1] 54 AND r6, r6, r14 56 ORR r10, r6, r7, LSL #16 @ Aq[2] -- Aq[1] 61 LDRSH r6, [r0, #10] @ load Aq[5] 65 AND r6, r6, r14 67 ORR r10, r6, r7, LSL #16 @ Aq[6] -- Aq[5] 72 LDRSH r6, [r0, #18] @ load Aq[9] [all …]
|
D | syn_filt_opt.s | 47 LDRH r6, [r4], #2 56 STRH r6, [r5], #2 65 LDRH r6, [r4], #2 74 STRH r6, [r5], #2 90 LDRSH r6, [r0, #2] @ load a[1] 94 AND r6, r6, r14 96 ORR r10, r6, r7, LSL #16 @ -a[2] -- -a[1] 101 LDRSH r6, [r0, #10] @ load a[5] 105 AND r6, r6, r14 107 ORR r10, r6, r7, LSL #16 @ -a[6] -- -a[5] [all …]
|
D | Filt_6k_7k_opt.s | 53 ADD r6, r13, #60 @ get x[L_FIR - 1] address 64 STRH r8, [r6], #2 65 STRH r9, [r6], #2 66 STRH r11, [r6], #2 67 STRH r12, [r6], #2 76 STRH r8, [r6], #2 77 STRH r9, [r6], #2 78 STRH r11, [r6], #2 79 STRH r12, [r6], #2 95 LDRSH r6, [r4, #2] @ load x[i + 1] [all …]
|
D | Dot_p_opt.s | 39 LDR r6, [r0], #4 42 SMLABB r4, r6, r7, r4 44 SMLATT r4, r6, r7, r4 46 LDR r6, [r0], #4 53 SMLABB r4, r6, r7, r4 55 SMLATT r4, r6, r7, r4
|
D | Deemph_32_opt.s | 38 LDRSH r6, [r0], #2 @load x_hi[0] 44 MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16 50 LDRSH r6, [r0], #2 @load x_hi[1] 58 MOV r10, r6, LSL #16 69 LDRSH r6, [r0], #2 @load x_hi[] 72 MOV r10, r6, LSL #16 77 LDRSH r6, [r0], #2 @load x_hi[] 83 MOV r10, r6, LSL #16
|
D | residu_asm_opt.s | 36 LDRH r6, [r0], #2 37 ORR r5, r6, r5, LSL #16 @r5 --- a0, a1 39 LDRH r6, [r0], #2 41 ORR r6, r7, r6, LSL #16 @r6 --- a2, a3 89 SMLATB r11, r6, r2, r11 @i3(2) --- r11 += x[0] * a2 90 SMLATT r12, r6, r2, r12 @i4(2) --- r12 += x[1] * a2 91 SMLABB r12, r6, r2, r12 @i4(3) --- r12 += x[0] * a3 96 SMLATT r4, r6, r2, r4 @i2(2) --- r4 += x[-1] * a2 97 SMLABT r11, r6, r2, r11 @i3(3) --- r11 += x[-1] * a3 99 SMLATB r3, r6, r2, r3 @i1(2) --- r3 += x[-2] * a2 [all …]
|
D | scale_sig_opt.s | 58 MOV r6, r5, LSL #16 @L_tmp = x[i] << 16 59 MOV r5, r6, ASR r7 @L_tmp >>= exp
|
/frameworks/av/media/libstagefright/codecs/mp3dec/src/asm/ |
D | pvmp3_polyphase_filter_window_gcc.s | 72 ldr r6,[r12,#0x780] 74 smlal r2,r11,lr,r6 78 smull r6,r5,r2,r6 83 smlal r6,r9,r5,r7 84 smull r6,r2,r5,r8 91 ldr r6,[r12,#0x680] 93 smlal lr,r11,r2,r6 97 smull r6,r5,r2,r6 98 ldr r6,[r1,#0x18] 101 smlal r5,r9,r6,r7 [all …]
|
D | pvmp3_mdct_18_gcc.s | 47 mov r6,r1 143 ldr r3,[r6,r4,lsl #2] 153 str r2,[r6,r4,lsl #2] 161 ldr r2,[r6,#0x18] 170 ldr r2,[r6,#0x1c] 172 str r3,[r6,#0x18] 176 str r0,[r6,#0x1c] 178 ldr r0,[r6,#0x20] 189 str r1,[r6,#0x20] 195 ldr r3,[r6,#0x24] [all …]
|
D | pvmp3_dct_16_gcc.s | 64 ldr r6,constant5 82 smull r7,r4,r6,r4 91 ldr r6,constant7 93 smull r8,lr,r6,lr 99 sub r6,r12,lr 100 mov r6,r6,lsl #1 101 smull r9,r6,r8,r6 193 add r4,r4,r6 194 sub r5,r5,r6 195 ldr r6,constant9 [all …]
|
D | pvmp3_dct_9_gcc.s | 59 add r6,r5,r7 63 add r7,r7,r6 109 mov r1,r6,lsl #1 111 and r6,r10,r11,asr #14 112 smlal r12,r8,r6,r1 114 add r12,r11,r6 122 smull r2,r6,r9,r1 126 smlal r12,r6,r10,r2 132 smlal r12,r6,r7,r3 144 smlal r8,r6,r7,r12 [all …]
|
/frameworks/av/media/libstagefright/codecs/m4v_h263/dec/src/ |
D | idct.cpp | 131 int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */ in idct_intra() local 155 r6 = blk[B_SIZE * 5 + i]; in idct_intra() 158 if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7)) in idct_intra() 187 r8 = W3 * (r6 + r7); in idct_intra() 188 r6 = (r8 - (W3 - W5) * r6); in idct_intra() 199 r1 = r4 + r6; in idct_intra() 200 r4 -= r6; in idct_intra() 201 r6 = r5 + r7; in idct_intra() 221 tmpBLK32[(3<<3) + i] = (r8 + r6) >> 8; in idct_intra() 222 tmpBLK32[(4<<3) + i] = (r8 - r6) >> 8; in idct_intra() [all …]
|
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
D | cor_h_vec_neon.s | 32 @r6 ---- cor_2[] 49 MOV r6, #0 @L_sum2 = 0 60 MLA r6, r12, r14, r6 64 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2) 68 ADD r10, r6, r14 71 MOV r6, r10, ASR #16 77 MUL r14, r6, r11 79 MOV r6, r14, ASR #15 87 ADD r6, r6, r11 89 STRH r6, [r12] [all …]
|
D | Norm_Corr_neon.s | 32 @ r6 --- corr_norm[] 98 QADD r6, r9, r10 99 QADD r6, r6, r6 100 QADD r9, r6, r7 @L_tmp = (L_tmp << 1) + 1; 102 SUB r6, r7, #1 @exp = norm_l(L_tmp) 103 RSB r7, r6, #32 @exp = 32 - exp 104 MOV r6, r7, ASR #1 105 RSB r7, r6, #0 @scale = -(exp >> 1) 164 VMOV.S32 r6, D20[0] 167 @r5 --- L_tmp, r6 --- L_tmp1 [all …]
|
D | convolve_neon.s | 42 MOV r6, r0 43 LDRSH r9, [r6], #2 @ *tmpX++ 54 VLD1.S16 D0, [r6]! 74 MOV r6, r0 75 LDRSH r9, [r6], #2 @ *tmpX++ 77 LDRSH r12, [r6], #2 90 VLD1.S16 D0, [r6]! 110 MOV r6, r0 111 LDRSH r9, [r6], #2 113 LDRSH r12, [r6], #2 [all …]
|
D | Deemph_32_neon.s | 38 LDRSH r6, [r0], #2 @load x_hi[0] 44 MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16 50 LDRSH r6, [r0], #2 @load x_hi[1] 58 MOV r10, r6, LSL #16 69 LDRSH r6, [r0], #2 @load x_hi[] 72 MOV r10, r6, LSL #16 77 LDRSH r6, [r0], #2 @load x_hi[] 83 MOV r10, r6, LSL #16
|
D | Syn_filt_32_neon.s | 34 @ lg --- r6 45 LDRSH r6, [r0], #2 @ load Aq[0] 47 MOV r3, r6, ASR r7 @ a0 = Aq[0] >> (4 + Q_new) 66 LDRSH r6, [r2], #2 @exc[i] 78 MUL r12, r6, r3 @exc[i] * a0
|
D | Filt_6k_7k_neon.s | 57 ADD r6, r13, #60 @ get x[L_FIR - 1] address 72 VST1.S16 {Q10, Q11}, [r6]! 79 VST1.S16 {Q12, Q13}, [r6]! 80 VST1.S16 {Q0, Q1}, [r6]! 81 VST1.S16 {Q10, Q11}, [r6]! 82 VST1.S16 {Q2, Q3}, [r6]!
|
/frameworks/rs/cpu_ref/ |
D | rsCpuIntrinsics_neon_3DLUT.S | 25 vmov r6, r7, \src 27 add r6, r6, r3 30 vld1.u8 d16, [r6], r4 33 vld1.u8 d18, [r6], r5 46 vld1.u8 d18, [r6] 49 sub r6, r6, r4 52 vld1.u8 d16, [r6] 109 push {r4,r5,r6,r7} 112 ldr r6, [sp, #24] 118 vmov.u16 d8[0], r6 [all …]
|
D | rsCpuIntrinsics_neon_Convolve.S | 129 ldr r6, [sp, #28 + 64] 132 vld1.16 {d0, d1, d2, d3}, [r6]! 133 vld1.16 {d4, d5, d6}, [r6] 138 ldr r6, [sp, #32 + 64] 279 subs r6, r6, #1
|
D | rsCpuIntrinsics_neon_Resize.S | 57 vld1.u8 d20, [r6]! 85 vld1.u32 d20[0], [r6]! 178 push {r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} 201 ldrd r6,r7, [lr,#112] // src1, src2 276 sub r6, r6, r10, LSL #COMPONENT_SHIFT 328 sub r6, r6, #8 332 add r6, r6, lr, LSL #COMPONENT_SHIFT 379 sub r6, r6, #COMPONENT_COUNT 797 pop {r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
|