/external/llvm/test/Transforms/InstSimplify/ |
D | shr-nop.ll | 17 %shr = lshr exact i8 0, %a 18 %cmp = icmp eq i8 %shr, 0 25 %shr = ashr exact i8 0, %a 26 %cmp = icmp eq i8 %shr, 0 33 %shr = ashr i8 0, %a 34 %cmp = icmp eq i8 %shr, 0 41 %shr = lshr exact i8 0, %a 42 %cmp = icmp ne i8 %shr, 0 49 %shr = ashr exact i8 0, %a 50 %cmp = icmp ne i8 %shr, 0 [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | icmp-shr.ll | 8 %shr = lshr i8 127, %a 9 %cmp = icmp eq i8 %shr, 0 16 %shr = ashr i8 127, %a 17 %cmp = icmp eq i8 %shr, 0 24 %shr = lshr i8 127, %a 25 %cmp = icmp ne i8 %shr, 0 32 %shr = ashr i8 127, %a 33 %cmp = icmp ne i8 %shr, 0 40 %shr = ashr i8 128, %a 41 %cmp = icmp eq i8 %shr, 128 [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | dagcombine-shifts.ll | 14 %shr = lshr i8 %v, 4 15 %ext = zext i8 %shr to i16 22 ; CHECK-NOT: shr 28 %shr = lshr i8 %v, 4 29 %ext = zext i8 %shr to i32 36 ; CHECK-NOT: shr 42 %shr = lshr i16 %v, 4 43 %ext = zext i16 %shr to i32 50 ; CHECK-NOT: shr 56 %shr = lshr i8 %v, 4 [all …]
|
D | rotate4.ll | 15 %shr = lshr i32 %a, %and3 16 %or = or i32 %shl, %shr 29 %shr = shl i32 %a, %and3 30 %or = or i32 %shl, %shr 43 %shr = lshr i64 %a, %and3 44 %or = or i64 %shl, %shr 57 %shr = shl i64 %a, %and3 58 %or = or i64 %shl, %shr 76 %shr = lshr i32 %a, %and3 77 %or = or i32 %shl, %shr [all …]
|
D | x86-64-double-precision-shift-left.ll | 20 %shr = lshr i64 %b, 63 21 %or = or i64 %shr, %shl 38 %shr = lshr i64 %b, 62 39 %or = or i64 %shr, %shl 56 %shr = lshr i64 %b, 57 57 %or = or i64 %shr, %shl 74 %shr = lshr i64 %b, 1 75 %or = or i64 %shr, %shl
|
D | movmsk.ll | 19 ; CHECK-NOT: shr 23 %shr.i = trunc i64 %tmp1 to i32 24 ret i32 %shr.i 38 ; CHECK-NOT: shr 42 %shr.i = trunc i64 %tmp1 to i32 43 ret i32 %shr.i 56 ; CHECK-NOT: shr 59 %shr.i = lshr i32 %2, 31 60 ret i32 %shr.i 74 ; CHECK-NOT: shr [all …]
|
/external/llvm/test/CodeGen/Mips/ |
D | mips64shift.ll | 13 %shr = ashr i64 %a0, %a1 14 ret i64 %shr 20 %shr = lshr i64 %a0, %a1 21 ret i64 %shr 34 %shr = ashr i64 %a0, 10 35 ret i64 %shr 41 %shr = lshr i64 %a0, 10 42 ret i64 %shr 55 %shr = ashr i64 %a0, 40 56 ret i64 %shr [all …]
|
D | rotate.ll | 10 %shr = lshr i32 %a, %sub 11 %or = or i32 %shr, %shl 20 %shr = lshr i32 %a, 22 21 %or = or i32 %shl, %shr 29 %shr = lshr i32 %a, %b 32 %or = or i32 %shl, %shr 40 %shr = lshr i32 %a, 10 42 %or = or i32 %shr, %shl
|
D | mips64extins.ll | 6 %shr = lshr i64 %i, 5 7 %and = and i64 %shr, 1023 14 %shr = lshr i64 %i, 5 15 %and = and i64 %shr, 17179869183 22 %shr = lshr i64 %i, 34 23 %and = and i64 %shr, 63
|
/external/llvm/test/Transforms/IndVarSimplify/ |
D | iv-fold.ll | 5 ; Indvars should be able to fold IV increments into shr when low bits are zero. 8 ; CHECK: shr.1 = lshr i32 %0, 5 15 %shr = lshr i32 %0, 5 16 %arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr 19 %shr.1 = lshr i32 %inc.1, 5 20 %arrayidx.1 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.1 31 ; Invdars should not fold an increment into shr unless 2^shiftBits is 35 ; CHECK: shr.1 = lshr i32 %inc.1, 5 42 %shr = lshr i32 %0, 5 43 %arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr [all …]
|
/external/llvm/test/CodeGen/BPF/ |
D | shifts.ll | 8 %shr = lshr i8 %a, %cnt 9 ret i8 %shr 16 %shr = ashr i8 %a, %cnt 17 ret i8 %shr 32 %shr = lshr i16 %a, %cnt 33 ret i16 %shr 40 %shr = ashr i16 %a, %cnt 41 ret i16 %shr 57 %shr = lshr i32 %a, %cnt 58 ret i32 %shr [all …]
|
/external/boringssl/win-x86_64/crypto/modes/ |
D | ghash-x86_64.asm | 42 shr r8,4 46 shr r9,4 58 shr r8,4 61 shr r9,4 73 shr r8,4 76 shr r9,4 85 shr r8,4 88 shr r9,4 137 shr rax,4 139 shr r8,4 [all …]
|
/external/llvm/test/MC/Mips/ |
D | mips64shift.ll | 17 %shr = ashr i64 %a0, 10 18 ret i64 %shr 24 %shr = lshr i64 %a0, 10 25 ret i64 %shr 38 %shr = ashr i64 %a0, 40 39 ret i64 %shr 45 %shr = lshr i64 %a0, 40 46 ret i64 %shr
|
D | mips64extins.ll | 8 %shr = lshr i64 %i, 5 9 %and = and i64 %shr, 1023 16 %shr = lshr i64 %i, 34 17 %and = and i64 %shr, 63 24 %shr = lshr i64 %i, 5 25 %and = and i64 %shr, 17179869183
|
/external/llvm/test/CodeGen/SystemZ/ |
D | shift-10.ll | 11 %shr = lshr i32 %a, 1 12 %trunc = trunc i32 %shr to i1 24 %shr = lshr i32 %a, 30 25 %trunc = trunc i32 %shr to i1 36 %shr = lshr i32 %a, 1 37 %ext = zext i32 %shr to i64 48 %shr = lshr i32 %a, 30 49 %ext = sext i32 %shr to i64 62 %shr = lshr i32 %a, 30 63 %ext = sext i32 %shr to i64 [all …]
|
D | risbg-01.ll | 11 %shr = lshr i32 %foo, 10 12 %and = and i32 %shr, 1 21 %shr = lshr i64 %foo, 10 22 %and = and i64 %shr, 1 31 %shr = lshr i32 %foo, 22 32 %and = and i32 %shr, 12 41 %shr = lshr i64 %foo, 22 42 %and = and i64 %shr, 12 52 %shr = lshr i32 %foo, 2 53 %and = and i32 %shr, -8 [all …]
|
/external/llvm/test/CodeGen/MSP430/ |
D | shifts.ll | 9 %shr = lshr i8 %a, %cnt 10 ret i8 %shr 17 %shr = ashr i8 %a, %cnt 18 ret i8 %shr 33 %shr = lshr i16 %a, %cnt 34 ret i16 %shr 41 %shr = ashr i16 %a, %cnt 42 ret i16 %shr
|
/external/boringssl/src/crypto/sha/asm/ |
D | sha512-586.pl | 164 &shr ("ecx",9); # lo>>9 166 &shr ("edx",9); # hi>>9 173 &shr ("ecx",14-9); # lo>>14 175 &shr ("edx",14-9); # hi>>14 182 &shr ("ecx",18-14); # lo>>18 184 &shr ("edx",18-14); # hi>>18 229 &shr ("ecx",2); # lo>>2 231 &shr ("edx",2); # hi>>2 238 &shr ("ecx",7-2); # lo>>7 240 &shr ("edx",7-2); # hi>>7 [all …]
|
/external/llvm/test/CodeGen/NVPTX/ |
D | sext-in-reg.ll | 14 %shr = ashr i64 %a, 16 18 %add17 = add nsw i64 %shr9, %shr 32 %shr = ashr i64 %a, 16 36 %add17 = add nsw i64 %shr9, %shr 50 %shr = ashr i64 %a, 16 54 %add17 = add nsw i64 %shr9, %shr 68 %shr = ashr i32 %a, 16 72 %add17 = add nsw i32 %shr9, %shr 86 %shr = ashr i32 %a, 16 90 %add17 = add nsw i32 %shr9, %shr [all …]
|
/external/llvm/test/Transforms/LoopUnroll/ |
D | 2011-08-09-IVSimplify.ll | 14 ; CHECK: %shr.1 = lshr i32 %bit_addr.addr.01, 5 15 ; CHECK: %arrayidx.1 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.1 16 ; CHECK: %shr.2 = lshr i32 %bit_addr.addr.01, 5 17 ; CHECK: %arrayidx.2 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.2 18 ; CHECK: %shr.3 = lshr i32 %bit_addr.addr.01, 5 19 ; CHECK: %arrayidx.3 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.3 28 %shr = lshr i32 %bit_addr.addr.01, 5 31 %arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-shifted-sext.ll | 72 %shr = ashr i32 %conv, 4 73 ret i32 %shr 95 %shr = ashr i32 %conv, 8 96 ret i32 %shr 117 %shr = ashr i64 %conv, 4 118 ret i64 %shr 140 %shr = ashr i64 %conv, 8 141 ret i64 %shr 162 %shr = ashr i32 %conv, 4 163 ret i32 %shr [all …]
|
/external/llvm/test/Transforms/SimplifyCFG/ |
D | branch-fold.ll | 33 %shr.i4.i = lshr i64 %i0, 48 34 %and.i5.i = and i64 %shr.i4.i, 32767 35 %shr.i.i = lshr i64 %i1, 48 36 %and.i2.i = and i64 %shr.i.i, 32767 42 %shr.i13.i9 = lshr i64 %i1, 48 43 %and.i14.i10 = and i64 %shr.i13.i9, 32767 44 %shr.i.i11 = lshr i64 %i0, 48 45 %and.i11.i12 = and i64 %shr.i.i11, 32767
|
/external/llvm/test/CodeGen/ARM/ |
D | thumb2-size-opt.ll | 18 %shr = ashr i32 %a, 13 19 ret i32 %shr 27 %shr = ashr i32 %a, %b 28 ret i32 %shr 73 %shr = lshr i32 %a, 13 74 ret i32 %shr 82 %shr = lshr i32 %a, %b 83 ret i32 %shr
|
/external/boringssl/win-x86/crypto/aes/ |
D | aes-586.asm | 46 shr edi,16 52 shr edi,24 59 shr ebx,16 66 shr edi,16 72 shr edi,24 79 shr ecx,24 86 shr edi,16 116 shr ebp,7 134 shr ebp,7 152 shr ebp,7 [all …]
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | iwalsh_sse2.asm | 84 shr eax, 16 85 shr ecx, 16 94 shr eax, 16 95 shr ecx, 16 105 shr eax, 16 106 shr ecx, 16 113 shr eax, 16 114 shr ecx, 16
|