/external/boringssl/linux-arm/crypto/sha/ |
D | sha1-armv4-large.S | 23 stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} 37 ldrb r11,[r1,#1] 42 orr r9,r9,r11,lsl#16 62 ldrb r11,[r1,#1] 67 orr r9,r9,r11,lsl#16 87 ldrb r11,[r1,#1] 92 orr r9,r9,r11,lsl#16 112 ldrb r11,[r1,#1] 117 orr r9,r9,r11,lsl#16 137 ldrb r11,[r1,#1] [all …]
|
/external/libvpx/libvpx/vp8/common/arm/armv6/ |
D | filter_v6.asm | 32 stmdb sp!, {r4 - r11, lr} 34 ldr r11, [sp, #40] ; vp8_filter address 45 ldr r4, [r11] ; load up packed filter coefficients 46 ldr r5, [r11, #4] 47 ldr r6, [r11, #8] 60 ldrb r11, [r0, #-1] 68 pkhbt r10, r10, r11, lsl #16 ; r11 | r10 70 pkhbt r11, r11, r9, lsl #16 ; r9 | r11 74 smlad r8, r11, r5, r8 75 ldrb r11, [r0, #2] [all …]
|
D | loopfilter_v6.asm | 64 stmdb sp!, {r4 - r11, lr} 75 ldr r11, [src], pstep ; p1 92 uqsub8 r8, r10, r11 ; p2 - p1 93 uqsub8 r10, r11, r10 ; p1 - p2 99 uqsub8 r6, r11, r12 ; p1 - p0 101 uqsub8 r7, r12, r11 ; p0 - p1 109 uqsub8 r6, r11, r10 ; p1 - q1 110 uqsub8 r7, r10, r11 ; q1 - p1 111 uqsub8 r11, r12, r9 ; p0 - q0 115 orr r12, r11, r12 ; abs (p0-q0) [all …]
|
/external/boringssl/src/crypto/bn/asm/ |
D | rsaz-x86_64.pl | 176 movq %rdx, %r11 177 adcq \$0, %r11 180 addq %rax, %r11 225 addq %rax, %r11 228 addq %rbx, %r11 267 movq %r11, %rbx 268 adcq %r11, %r11 #shld \$1, %r10, %r11 273 adcq \$0, %r11 327 addq %rax, %r11 331 movq %r11, 32(%rsp) [all …]
|
D | x86_64-mont5.pl | 68 $hi0="%r11"; 111 lea 2($num),%r11 112 neg %r11 113 lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+2)) 124 mov %r10,%r11 126 and \$`$N/8-1`,%r11 130 lea 96($bp,%r11,8),$bp # pointer within 1st cache line 367 my @A=("%r10","%r11"); 409 lea -64(%rsp,$num,2),%r11 410 sub $ap,%r11 [all …]
|
D | x86_64-mont.pl | 79 $hi0="%r11"; 121 mov %rsp,%r11 126 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp 309 my @A=("%r10","%r11"); 332 mov %rsp,%r11 337 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp 740 my @A0=("%r10","%r11"); 772 lea -64(%rsp,$num,4),%r11 774 sub $aptr,%r11 775 and \$4095,%r11 [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | frame-09.ll | 1 ; Test the handling of the frame pointer (%r11). 5 ; We should always initialise %r11 when FP elimination is disabled. 10 ; CHECK: stmg %r11, %r15, 88(%r15) 11 ; CHECK: .cfi_offset %r11, -72 14 ; CHECK: lgr %r11, %r15 15 ; CHECK: .cfi_def_cfa_register %r11 16 ; CHECK: lmg %r11, %r15, 88(%r11) 23 ; to %r11 rather than %r15. 26 ; CHECK: stmg %r11, %r15, 88(%r15) 27 ; CHECK: .cfi_offset %r11, -72 [all …]
|
D | frame-07.ll | 44 ; CHECK-FP: stmg %r11, %r15, 88(%r15) 47 ; CHECK-FP: lgr %r11, %r15 48 ; CHECK-FP: .cfi_def_cfa_register %r11 49 ; CHECK-FP: stdy %f8, 4120(%r11) 50 ; CHECK-FP: stdy %f9, 4112(%r11) 51 ; CHECK-FP: stdy %f10, 4104(%r11) 52 ; CHECK-FP: stdy %f11, 4096(%r11) 53 ; CHECK-FP: std %f12, 4088(%r11) 54 ; CHECK-FP: std %f13, 4080(%r11) 55 ; CHECK-FP: std %f14, 4072(%r11) [all …]
|
/external/tremolo/Tremolo/ |
D | mdctARM.s | 188 STMFD r13!,{r4,r6-r11,r14} 198 LDR r11,[r9],#4 @ r11= *wL++ 203 SMULL r14,r11,r12,r11 @ (r14,r11) = *l * *wL++ 205 ADD r6, r6, r11 216 LDMFD r13!,{r4,r6-r11,PC} 227 STMFD r13!,{r4,r6-r11,r14} 237 LDR r11,[r9],#4 @ r11= *wL++ 242 SMULL r14,r11,r12,r11 @ (r14,r11) = *l * *wL++ 244 SUB r6, r6, r11 255 LDMFD r13!,{r4,r6-r11,PC} [all …]
|
D | mdctLARM.s | 186 STMFD r13!,{r4,r6-r11,r14} 198 LDRB r11,[r9],#1 @ r11= *wL++ 202 MUL r11,r12,r11 @ r11 = *l * *wL++ 204 MLA r6, r7, r6, r11 @ r6 = *--r * *--wR 215 LDMFD r13!,{r4,r6-r11,PC} 226 STMFD r13!,{r4,r6-r11,r14} 237 LDRB r11,[r9],#1 @ r11= *wL++ 242 MUL r11,r12,r11 @ (r14,r11) = *l * *wL++ 245 SUB r6, r6, r11 256 LDMFD r13!,{r4,r6-r11,PC} [all …]
|
D | bitwiseARM.s | 45 STMFD r13!,{r10,r11,r14} 56 LDRLT r11,[r3,#4]! @ r11= ptr[1] 60 ORRLT r10,r10,r11,LSL r14 @ r10= Next 32 bits. 64 LDMFD r13!,{r10,r11,PC} 80 MOV r11,#1 83 RSB r11,r11,r11,LSL r5 @ r11= mask 84 AND r10,r10,r11 @ r10= first r5 bits 88 LDR r11,[r0,#12] @ r11= head = b->head 92 LDR r11,[r11,#12] @ r11= head = head->next 95 CMP r11,#0 [all …]
|
/external/boringssl/mac-x86_64/crypto/bn/ |
D | x86_64-mont5.S | 26 leaq 2(%r9),%r11 27 negq %r11 28 leaq (%rsp,%r11,8),%rsp 34 movq %r10,%r11 36 andq $7,%r11 40 leaq 96(%r12,%r11,8),%r12 84 movq %rdx,%r11 104 addq %r11,%r13 105 movq %r10,%r11 112 addq %rax,%r11 [all …]
|
D | x86_64-mont.S | 32 movq %rsp,%r11 37 movq %r11,8(%rsp,%r9,8) 53 movq %rdx,%r11 69 addq %r11,%r13 70 movq %r10,%r11 77 addq %rax,%r11 90 addq %r11,%r13 94 movq %r10,%r11 97 addq %r11,%r13 116 movq %rdx,%r11 [all …]
|
/external/boringssl/linux-x86_64/crypto/bn/ |
D | x86_64-mont5.S | 27 leaq 2(%r9),%r11 28 negq %r11 29 leaq (%rsp,%r11,8),%rsp 35 movq %r10,%r11 37 andq $7,%r11 41 leaq 96(%r12,%r11,8),%r12 85 movq %rdx,%r11 105 addq %r11,%r13 106 movq %r10,%r11 113 addq %rax,%r11 [all …]
|
D | x86_64-mont.S | 33 movq %rsp,%r11 38 movq %r11,8(%rsp,%r9,8) 54 movq %rdx,%r11 70 addq %r11,%r13 71 movq %r10,%r11 78 addq %rax,%r11 91 addq %r11,%r13 95 movq %r10,%r11 98 addq %r11,%r13 117 movq %rdx,%r11 [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_intra_pred_chroma_ver.s | 122 lsl r11, r3, #2 124 add r11, r11, #0xfffffff0 132 vst2.8 {d22,d23}, [r2], r11 133 vst2.8 {d22,d23}, [r5], r11 134 vst2.8 {d22,d23}, [r8], r11 135 vst2.8 {d22,d23}, [r10], r11 145 vst2.8 {d22,d23}, [r2], r11 146 vst2.8 {d22,d23}, [r5], r11 147 vst2.8 {d22,d23}, [r8], r11 148 vst2.8 {d22,d23}, [r10], r11 [all …]
|
D | ihevc_sao_edge_offset_class3_chroma.s | 107 MLA r11,r10,r1,r0 @pu1_src[(ht - 1) * src_strd + col] 111 VLD1.8 D0,[r11]! @pu1_src[(ht - 1) * src_strd + col] 121 SUB r11,r7,#1 @[wd - 1] 122 LDRB r10,[r0,r11] @u1_pos_0_0_tmp_v = pu1_src[wd - 1] 125 LDR r11,[sp,#0x100] @Load pu1_src_top_right from sp 126 LDRB r11,[r11] @pu1_src_top_right[0] 127 SUB r12,r9,r11 @pu1_src[wd - 2] - pu1_src_top_right[0] 131 ADD r11,r0,r1 @pu1_src + src_strd 133 LDRB r14,[r11,r14] @pu1_src[wd - 2 - 2 + src_strd] 134 SUB r11,r9,r14 @pu1_src[wd - 2] - pu1_src[wd - 2 - 2 + src_strd] [all …]
|
D | ihevc_inter_pred_luma_horz_w16out.s | 107 @r11 - #1 132 mov r11,#1 202 vld1.u32 {d0},[r12],r11 @vector load pu1_src 203 vld1.u32 {d1},[r12],r11 204 vld1.u32 {d2},[r12],r11 205 vld1.u32 {d3},[r12],r11 206 vld1.u32 {d4},[r12],r11 207 vld1.u32 {d5},[r12],r11 208 vld1.u32 {d6},[r12],r11 209 vld1.u32 {d7},[r12],r11 [all …]
|
D | ihevc_inter_pred_filters_luma_horz.s | 130 mov r11,#1 191 vld1.u32 {d0},[r12],r11 @vector load pu1_src 192 vld1.u32 {d1},[r12],r11 193 vld1.u32 {d2},[r12],r11 194 vld1.u32 {d3},[r12],r11 215 vld1.u32 {d4},[r12],r11 217 vld1.u32 {d5},[r12],r11 219 vld1.u32 {d6},[r12],r11 221 vld1.u32 {d7},[r12],r11 223 vld1.u32 {d12},[r4],r11 @vector load pu1_src + src_strd [all …]
|
/external/llvm/test/MC/X86/ |
D | x86_64-bmi-encoding.s | 9 blsmskq %r11, %r10 25 blsiq %r11, %r10 41 blsrq %r11, %r10 57 andnq (%rax), %r11, %r10 73 bextrq %r12, %r11, %r10 89 bzhiq %r12, %r11, %r10 101 pextq %r12, %r11, %r10 105 pextq (%rax), %r11, %r10 117 pdepq %r12, %r11, %r10 121 pdepq (%rax), %r11, %r10 [all …]
|
/external/boringssl/win-x86_64/crypto/bn/ |
D | x86_64-mont5.asm | 44 lea r11,[2+r9] 45 neg r11 46 lea rsp,[r11*8+rsp] 52 mov r11,r10 54 and r11,7 58 lea r12,[96+r11*8+r12] 102 mov r11,rdx 122 add r13,r11 123 mov r11,r10 130 add r11,rax [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | ehabi.ll | 145 ; CHECK-FP: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} 146 ; CHECK-FP: push {r4, r5, r6, r7, r8, r9, r10, r11, lr} 147 ; CHECK-FP: .setfp r11, sp, #28 148 ; CHECK-FP: add r11, sp, #28 157 ; CHECK-FP-ELIM: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} 158 ; CHECK-FP-ELIM: push {r4, r5, r6, r7, r8, r9, r10, r11, lr} 167 ; CHECK-V7-FP: .save {r4, r10, r11, lr} 168 ; CHECK-V7-FP: push {r4, r10, r11, lr} 169 ; CHECK-V7-FP: .setfp r11, sp, #8 170 ; CHECK-V7-FP: add r11, sp, #8 [all …]
|
D | interrupt-attr.ll | 15 ; CHECK-A: push {r0, r1, r2, r3, r10, r11, r12, lr} 16 ; CHECK-A: add r11, sp, #20 20 ; CHECK-A: sub sp, r11, #20 21 ; CHECK-A: pop {r0, r1, r2, r3, r10, r11, r12, lr} 38 ; CHECK-M: push.w {r4, r10, r11, lr} 39 ; CHECK-M: add.w r11, sp, #8 44 ; CHECK-M: sub.w r4, r11, #8 46 ; CHECK-M: pop.w {r4, r10, r11, pc} 55 ; CHECK-A: push {r0, r1, r2, r3, r4, r5, r6, r7, r11, lr} 57 ; CHECK-A: add r11, sp, #32 [all …]
|
/external/valgrind/coregrind/m_syswrap/ |
D | syscall-amd64-darwin.S | 117 movq -16(%rbp), %r11 /* r11 = VexGuestAMD64State * */ 118 movq OFFSET_amd64_RDI(%r11), %rdi 119 movq OFFSET_amd64_RSI(%r11), %rsi 120 movq OFFSET_amd64_RDX(%r11), %rdx 121 movq OFFSET_amd64_RCX(%r11), %r10 /* rcx is passed in r10 instead */ 122 movq OFFSET_amd64_R8(%r11), %r8 123 movq OFFSET_amd64_R9(%r11), %r9 125 movq OFFSET_amd64_RSP(%r11), %r11 /* r11 = simulated RSP */ 126 movq 16(%r11), %rax 128 movq 8(%r11), %rax [all …]
|
/external/libvpx/libvpx/vp8/encoder/arm/armv6/ |
D | vp8_subtract_armv6.asm | 81 stmfd sp!, {r4-r11} 96 uxtb16 r11, r7, ror #8 ; [p3 | p1] (A) 99 usub16 r7, r10, r11 ; [d3 | d1] (A) 102 ldr r11, [r5, #4] ; upred (B) 111 uxtb16 r9, r11 ; [p2 | p0] (B) 113 uxtb16 r11, r11, ror #8 ; [p3 | p1] (B) 116 usub16 r7, r10, r11 ; [d3 | d1] (B) 141 uxtb16 r11, r7, ror #8 ; [p3 | p1] (A) 144 usub16 r7, r10, r11 ; [d3 | d1] (A) 147 ldr r11, [r5, #4] ; vpred (B) [all …]
|