/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
D | cor_h_vec_neon.s | 50 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos] 56 LDRSH r8, [r9], #2 57 LDRSH r14, [r9] 69 ADD r9, r5, r14 70 MOV r5, r9, ASR #16 72 ADD r9, r3, r2, LSL #1 @address of sign[pos] 74 LDRSH r10, [r9], #2 @sign[pos] 75 LDRSH r11, [r9] @sign[pos + 1] 80 LDR r9, [r13, #44] 84 ADD r9, r9, r4, LSL #1 [all …]
|
D | residu_asm_neon.s | 41 ADD r9, r1, r7, LSL #1 @copy the address 43 MOV r8, r9 47 SUB r8, r9, #2 @get the x[i-1] address 51 SUB r8, r9, #4 @load the x[i-2] address 55 SUB r8, r9, #6 @load the x[i-3] address 59 SUB r8, r9, #8 @load the x[i-4] address 63 SUB r8, r9, #10 @load the x[i-5] address 67 SUB r8, r9, #12 @load the x[i-6] address 71 SUB r8, r9, #14 @load the x[i-7] address 75 SUB r8, r9, #16 @load the x[i-8] address [all …]
|
D | convolve_neon.s | 43 LDRSH r9, [r6], #2 @ *tmpX++ 47 MUL r8, r9, r10 53 MOV r9, r4 55 VLD1.S16 D1, [r9]! 75 LDRSH r9, [r6], #2 @ *tmpX++ 80 MUL r8, r9, r10 89 MOV r9, r4 91 VLD1.S16 D1, [r9]! 111 LDRSH r9, [r6], #2 115 MUL r8, r9, r10 [all …]
|
D | Deemph_32_neon.s | 49 MUL r9, r5, r8 51 QDADD r10, r10, r9 62 MUL r9, r14, r8 63 QDADD r10, r10, r9 74 MUL r9, r14, r8 76 QDADD r10, r10, r9 86 MUL r9, r14, r8 88 QDADD r10, r10, r9
|
D | Norm_Corr_neon.s | 96 VMOV.S32 r9, D20[0] 98 QADD r6, r9, r10 100 QADD r9, r6, r7 @L_tmp = (L_tmp << 1) + 1; 101 CLZ r7, r9 236 MOV r9, r2 @ h[] 240 ADD r9, r9, r6, LSL #1 @ h[i] address 245 LDRSH r8, [r9], #-2 @ load h[i] 254 LDRSH r8, [r9] @ load h[0]
|
D | syn_filt_neon.s | 86 VMOV.S16 r9, D20[0] 89 STRH r9, [r10] @ yy[i] 90 STRH r9, [r2], #2 @ y[i]
|
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/ |
D | cor_h_vec_opt.s | 49 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos] 55 LDRSH r8, [r9], #2 56 LDRSH r14, [r9] 68 ADD r9, r5, r14 69 MOV r5, r9, ASR #16 71 ADD r9, r3, r2, LSL #1 @address of sign[pos] 73 LDRSH r10, [r9], #2 @sign[pos] 74 LDRSH r11, [r9] @sign[pos + 1] 79 LDR r9, [r13, #44] 83 ADD r9, r9, r4, LSL #1 [all …]
|
D | pred_lt4_1_opt.s | 68 LDRSH r9, [r1], #2 @x[2] 72 SMULBB r12, r9, r3 @x[2] * h[0] 76 SMLABT r11, r9, r3, r11 @x[2] * h[1] 81 SMLABB r10, r9, r3, r10 @x[2] * h[2] 85 LDRSH r9, [r1], #2 @x[5] 88 SMLABT r12, r9, r3, r12 @x[5] * h[3] 93 SMLABB r11, r9, r3, r11 @x[5] * h[4] 97 SMLABT r10, r9, r3, r10 @x[5] * h[5] 102 LDRSH r9, [r1], #2 @x[8] 105 SMLABB r12, r9, r3, r12 @x[8] * h[6] [all …]
|
D | convolve_opt.s | 42 LDRSH r9, [r6], #2 @ *tmpX++ 45 MUL r8, r9, r10 50 LDRSH r9, [r6], #2 @ *tmpX++ 54 MLA r8, r9, r10, r8 56 LDRSH r9, [r6], #2 @ *tmpX++ 60 MLA r8, r9, r10, r8 77 LDRSH r9, [r6], #2 @ *tmpX++ 82 MUL r8, r9, r10 89 LDRSH r9, [r6], #2 @ *tmpX++ 93 MLA r8, r9, r10, r8 [all …]
|
D | syn_filt_opt.s | 50 LDRH r9, [r4], #2 59 STRH r9, [r5], #2 68 LDRH r9, [r4], #2 77 STRH r9, [r5], #2 92 LDRSH r9, [r0, #6] @ load a[3] 95 AND r9, r9, r14 97 ORR r12, r9, r11, LSL #16 @ -a[4] -- -a[3] 103 LDRSH r9, [r0, #14] @ load a[7] 106 AND r9, r9, r14 108 ORR r12, r9, r11, LSL #16 @ -a[8] -- -a[7] [all …]
|
D | Syn_filt_32_opt.s | 53 LDRSH r9, [r0, #8] @ load Aq[4] 57 ORR r11, r8, r9, LSL #16 @ Aq[4] -- Aq[3] 64 LDRSH r9, [r0, #16] @ load Aq[8] 68 ORR r11, r8, r9, LSL #16 @ Aq[8] -- Aq[7] 75 LDRSH r9, [r0, #24] @ load Aq[12] 79 ORR r11, r8, r9, LSL #16 @ Aq[12] -- Aq[11] 86 LDRSH r9, [r0, #32] @ load Aq[16] 90 ORR r11, r8, r9, LSL #16 @ Aq[16] -- Aq[15] 101 LDRSH r9, [r5, #-6] @ load sig_lo[i-3] 110 SMLABB r12, r9, r11, r12 @ sig_lo[i-3] * Aq[3] [all …]
|
D | residu_asm_opt.s | 48 LDRH r9, [r0], #2 49 ORR r8, r9, r8, LSL #16 @r8 --- a6, a7 51 LDRH r9, [r0], #2 53 ORR r9, r10, r9, LSL #16 @r9 --- a8, a9 119 SMLATT r12,r9, r2, r12 @ i4 (8) 122 SMLATB r11,r9, r2, r11 @ i3 (8) 123 SMLABB r12,r9, r2, r12 @ i4 (9) 128 SMLATT r4, r9, r2, r4 @ i2 (8) 129 SMLABT r11,r9, r2, r11 @ i3 (9) 131 SMLATB r3, r9, r2, r3 @ i1 (8) [all …]
|
D | Filt_6k_7k_opt.s | 57 LDRSH r9, [r7], #2 59 MOV r9, r9, ASR #2 65 STRH r9, [r6], #2 69 LDRSH r9, [r7], #2 71 MOV r9, r9, ASR #2 77 STRH r9, [r6], #2 100 LDRSH r9, [r4, #56] @ load x[i + 28] 103 ADD r8, r8, r9 @ x[i + 2] + x[i + 28] 115 LDRSH r9, [r4, #50] @ load x[i+25] 117 ADD r8, r8, r9 @ x[i+5] + x[i+25] [all …]
|
D | Dot_p_opt.s | 43 LDR r9, [r1], #4 47 SMLABB r4, r8, r9, r4 50 SMLATT r4, r8, r9, r4 54 LDR r9, [r1], #4 57 SMLABB r4, r8, r9, r4 59 SMLATT r4, r8, r9, r4
|
D | Deemph_32_opt.s | 49 MUL r9, r5, r8 51 QDADD r10, r10, r9 62 MUL r9, r14, r8 63 QDADD r10, r10, r9 74 MUL r9, r14, r8 76 QDADD r10, r10, r9 86 MUL r9, r14, r8 88 QDADD r10, r10, r9
|
D | Norm_Corr_opt.s | 77 LDR r9, [r14], #4 81 SMLABB r6, r9, r9, r6 @L_tmp += (xn[i] * xn[i]) 82 SMLATT r6, r9, r9, r6 @L_tmp += (xn[i+1] * xn[i+1]) 92 ADD r9, r7, r6, LSL #1 @L_tmp = (L_tmp << 1) + 1 93 CLZ r7, r9 106 MOV r9, #64 124 SUBS r9, r9, #4 197 MOV r9, r2 @ h[] 201 ADD r9, r9, r6, LSL #1 @ h[i] address 206 LDRSH r8, [r9], #-2 @ load h[i] [all …]
|
D | scale_sig_opt.s | 39 MOV r9, #0x8000 49 QADD r11, r12, r9 60 QADD r11, r5, r9
|
/frameworks/av/media/libstagefright/codecs/mp3dec/src/asm/ |
D | pvmp3_polyphase_filter_window_gcc.s | 59 @ Accumulators r9, r11::> Initialization 62 mov r9, #0x20 73 smlal r2,r9,lr,r5 79 sub r9,r9,r5 83 smlal r6,r9,r5,r7 87 smlal r8,r9,r5,r8 92 smlal lr,r9,r2,r5 100 sub r9,r9,r5 101 smlal r5,r9,r6,r7 105 smlal r8,r9,r6,r8 [all …]
|
D | pvmp3_dct_9_gcc.s | 58 ldr r9,[r0, #0x10] 62 add r8,r9,r2 67 rsb r2,r9,r2,asr #1 73 mov r9,r1,lsl #1 78 smlal r1,r8,r11,r9 80 mov r1,r9 @@@@@@ !!!!!! 84 smlal r1,r7,r10,r9 91 smlal r9,r2,r11,r9 92 rsb r9,r10,#0 95 smlal r12,r2,r9,r1 [all …]
|
D | pvmp3_dct_16_gcc.s | 101 smull r9,r6,r8,r6 103 ldr r9,constant9 106 smull r12,lr,r9,r12 109 sub r9,r1,r12 110 smull r11,r9,r10,r9 112 str r9,[sp,#0xc] 113 ldr r9,[r0,#0x10] 115 str r9,[sp,#0x18] 117 sub r9,r9,r1 118 smull r11,r9,r10,r9 [all …]
|
D | pvmp3_mdct_18_gcc.s | 71 ldr r9,[r1] 73 smull r9,r10,r8,r9 74 mov r8,r9,lsr #27 80 add r9,lr,r8 83 str r9,[r0],#4 84 smull r8,r9,lr,r8 86 add lr,lr,r9,lsl #4 217 rsb r9,r0,#0 218 str r9,[r5,#0x2c] 241 mov r0,r9,lsl #1
|
/frameworks/rs/cpu_ref/ |
D | rsCpuIntrinsics_neon_Resize.S | 178 push {r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} 191 adr r9, 8f 193 vld1.s16 {q5}, [r9] 210 mov r9, r3, LSL #VECSHIFT 211 add r8, r8, r9 216 str r9, [sp,#OSCSTEP_STORE] 239 add r9, r12, #(2 * CHUNKSIZE - 4) * COMPONENT_COUNT * 2 246 vst1.s16 {d24}, [r9] 252 vst1.s16 {q12}, [r9] 264 vst1.s16 {q11,q12}, [r9] [all …]
|
D | rsCpuIntrinsics_neon_Blur.S | 741 112: add r12, r9, #0x1a0 748 111: add r12, r9, #0x1a8 757 110: add r12, r9, #0x1b0 764 109: add r12, r9, #0x1b8 773 108: add r12, r9, #0x1c0 780 107: add r12, r9, #0x1c8 789 106: add r12, r9, #0x1d0 796 105: add r12, r9, #0x1d8 805 104: add r12, r9, #0x1e0 812 103: add r12, r9, #0x1e8 [all …]
|
D | rsCpuIntrinsics_x86.cpp | 603 __m128i r0, r1, r2, r3, r4, r5, r6, r7, r8, r9; in rsdIntrinsicConvolve5x5_K() local
|