/external/llvm/test/Analysis/CostModel/X86/ |
D | testshiftlshr.ll | 8 ; SSE2: cost of 20 {{.*}} lshr 12 %0 = lshr %shifttype %a , %b 20 ; SSE2: cost of 40 {{.*}} lshr 24 %0 = lshr %shifttype4i16 %a , %b 32 ; SSE2: cost of 80 {{.*}} lshr 36 %0 = lshr %shifttype8i16 %a , %b 44 ; SSE2: cost of 160 {{.*}} lshr 48 %0 = lshr %shifttype16i16 %a , %b 56 ; SSE2: cost of 320 {{.*}} lshr 60 %0 = lshr %shifttype32i16 %a , %b [all …]
|
D | arith.ll | 86 ; AVX: cost of 2 {{.*}} lshr 87 ; AVX2: cost of 1 {{.*}} lshr 88 %B0 = lshr <4 x i32> undef, undef 89 ; AVX: cost of 2 {{.*}} lshr 90 ; AVX2: cost of 1 {{.*}} lshr 91 %B1 = lshr <2 x i64> undef, undef 113 ; AVX: cost of 2 {{.*}} lshr 114 ; AVX2: cost of 1 {{.*}} lshr 115 %B0 = lshr <8 x i32> undef, undef 116 ; AVX: cost of 2 {{.*}} lshr [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vshift-2.ll | 10 %lshr = lshr <2 x i64> %val, < i64 32, i64 32 > 11 store <2 x i64> %lshr, <2 x i64>* %dst 22 %lshr = lshr <2 x i64> %val, %1 23 store <2 x i64> %lshr, <2 x i64>* %dst 31 %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 > 32 store <4 x i32> %lshr, <4 x i32>* %dst 45 %lshr = lshr <4 x i32> %val, %3 46 store <4 x i32> %lshr, <4 x i32>* %dst 55 %lshr = lshr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > 56 store <8 x i16> %lshr, <8 x i16>* %dst [all …]
|
D | lower-vec-shift.ll | 12 %lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> 13 ret <8 x i16> %lshr 29 %lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 2, i16 2, i16 2, i16 2> 30 ret <8 x i16> %lshr 46 %lshr = lshr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2> 47 ret <4 x i32> %lshr 61 %lshr = lshr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2> 62 ret <4 x i32> %lshr 76 %lshr = ashr <8 x i16> %a, <i16 3, i16 3, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> 77 ret <8 x i16> %lshr [all …]
|
D | 2013-01-09-DAGCombineBug.ll | 51 …t (i64 xor (i64 zext (i1 trunc (i192 lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i12… 57 …lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr (i384 or (i384 shl… 60 …lshr (i192 or (i192 and (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr…
|
D | rot64.ll | 12 %2 = lshr i64 %x, %1 21 %2 = lshr i64 %x, %1 28 %0 = lshr i64 %x, %z 37 %0 = lshr i64 %y, %z 48 %0 = lshr i64 %x, 57 59 %a = lshr i64 %x, 57 68 %1 = lshr i64 %x, 57 77 %0 = lshr i64 %x, 7 88 %a = lshr i64 %x, 7 96 %0 = lshr i64 %y, 7
|
D | h-registers-1.ll | 21 %sa = lshr i64 %a, 8 23 %sb = lshr i64 %b, 8 25 %sc = lshr i64 %c, 8 27 %sd = lshr i64 %d, 8 29 %se = lshr i64 %e, 8 31 %sf = lshr i64 %f, 8 33 %sg = lshr i64 %g, 8 35 %sh = lshr i64 %h, 8
|
D | rot32.ll | 10 %2 = lshr i32 %x, %1 21 %2 = lshr i32 %x, %1 30 %0 = lshr i32 %x, %z 41 %0 = lshr i32 %y, %z 54 %0 = lshr i32 %x, 25 65 %a = lshr i32 %x, 25 76 %1 = lshr i32 %x, 25 87 %0 = lshr i32 %x, 7 98 %a = lshr i32 %x, 7 108 %0 = lshr i32 %y, 7
|
D | rot16.ll | 9 %2 = lshr i16 %x, %1 20 %2 = lshr i16 %x, %1 29 %0 = lshr i16 %x, %z 40 %0 = lshr i16 %y, %z 51 %0 = lshr i16 %x, 11 62 %1 = lshr i16 %x, 11 71 %0 = lshr i16 %x, 5 81 %0 = lshr i16 %y, 5
|
D | rotate.ll | 9 %C = lshr i32 %A, %shift.upgrd.2 ; <i32> [#uses=1] 16 %B = lshr i32 %A, %shift.upgrd.3 ; <i32> [#uses=1] 26 %C = lshr i32 %A, 27 ; <i32> [#uses=1] 32 %B = lshr i32 %A, 5 ; <i32> [#uses=1] 43 %C = lshr i16 %A, %shift.upgrd.6 ; <i16> [#uses=1] 50 %B = lshr i16 %A, %shift.upgrd.7 ; <i16> [#uses=1] 60 %C = lshr i16 %A, 11 ; <i16> [#uses=1] 66 %B = lshr i16 %A, 5 ; <i16> [#uses=1] 75 %C = lshr i8 %A, %Amt2 ; <i8> [#uses=1] 81 %B = lshr i8 %A, %Amt ; <i8> [#uses=1] [all …]
|
D | dagcombine-shifts.ll | 3 ; fold (shl (zext (lshr (A, X))), X) -> (zext (shl (lshr (A, X)), X)) 5 ; Canolicalize the sequence shl/zext/lshr performing the zeroextend 14 %shr = lshr i8 %v, 4 28 %shr = lshr i8 %v, 4 42 %shr = lshr i16 %v, 4 56 %shr = lshr i8 %v, 4 70 %shr = lshr i16 %v, 4 84 %shr = lshr i32 %v, 4 142 %shr = lshr i8 %v, 4 156 %shr = lshr i16 %v, 4 [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | fast-isel-shifter.ll | 19 define i32 @lshr() nounwind ssp { 21 ; ELF64: lshr 23 %lshr = lshr i32 -1, 2 24 ret i32 %lshr 31 %lshr = lshr i32 %src1, %src2 32 ret i32 %lshr
|
D | ppc32-lshrti3.ll | 17 %bf.lshr = lshr i72 %bf.set3, 40 18 %bf.lshr.tr = trunc i72 %bf.lshr to i32 19 %bf.cast = and i32 %bf.lshr.tr, 65535 20 %dec = add nsw i32 %bf.lshr.tr, 65535
|
/external/llvm/test/CodeGen/ARM/ |
D | fast-isel-shifter.ll | 20 define i32 @lshr() nounwind ssp { 22 ; ARM: lshr 24 %lshr = lshr i32 -1, 2 25 ret i32 %lshr 32 %lshr = lshr i32 %src1, %src2 33 ret i32 %lshr
|
D | uxtb.ll | 10 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 16 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 22 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 28 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 34 %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1] 43 %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1] 54 %tmp5 = lshr i32 %x, 24 ; <i32> [#uses=1] 60 %tmp1 = lshr i32 %x, 24 ; <i32> [#uses=1] 68 %tmp1 = lshr i32 %p0, 7 ; <i32> [#uses=1] 70 %tmp4 = lshr i32 %tmp2, 5 ; <i32> [#uses=1]
|
/external/llvm/test/CodeGen/Thumb/ |
D | 2009-08-12-ConstIslandAssert.ll | 16 %6 = lshr i32 %4, 24 ; <i32> [#uses=1] 19 %9 = lshr i32 %4, 16 ; <i32> [#uses=1] 37 %27 = lshr i32 %24, 24 ; <i32> [#uses=1] 40 %30 = lshr i32 %24, 16 ; <i32> [#uses=1] 46 %36 = lshr i32 %24, 8 ; <i32> [#uses=1] 61 %51 = lshr i32 %48, 24 ; <i32> [#uses=1] 64 %54 = lshr i32 %48, 16 ; <i32> [#uses=1] 70 %60 = lshr i32 %48, 8 ; <i32> [#uses=1] 84 %74 = lshr i32 %72, 24 ; <i32> [#uses=1] 87 %77 = lshr i32 %72, 16 ; <i32> [#uses=1] [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | shift-06.ll | 10 %shift = lshr i64 %a, 1 19 %shift = lshr i64 %a, 63 28 %shift = lshr i64 %a, 64 37 %shift = lshr i64 %a, %amt 47 %shift = lshr i64 %a, %add 58 %shift = lshr i64 %a, %addext 69 %shift = lshr i64 %a, %addext 80 %shift = lshr i64 %a, %add 92 %shift = lshr i64 %a, %add 102 %shift = lshr i64 %a, %sub [all …]
|
D | shift-02.ll | 10 %shift = lshr i32 %a, 1 19 %shift = lshr i32 %a, 31 28 %shift = lshr i32 %a, 32 38 %shift = lshr i32 %a, %sub 47 %shift = lshr i32 %a, %amt 57 %shift = lshr i32 %a, %add 68 %shift = lshr i32 %a, %trunc 79 %shift = lshr i32 %a, %add 90 %shift = lshr i32 %a, %add 101 %shift = lshr i32 %a, %add [all …]
|
D | shift-04.ll | 11 %partb = lshr i32 %a, 31 22 %partb = lshr i32 %a, 1 33 %partb = lshr i32 %a, 0 45 %partb = lshr i32 %a, %amtb 58 %partb = lshr i32 %a, %sub 72 %partb = lshr i32 %a, %sub 87 %partb = lshr i32 %a, %subtrunc 101 %partb = lshr i32 %a, %sub 116 %partb = lshr i32 %a, %sub 129 %partb = lshr i32 %a, %subb [all …]
|
D | int-mul-08.ll | 16 %highx = lshr i128 %mulx, 64 36 %highx = lshr i128 %mulx, 64 52 %highx = lshr i128 %mulx, 67 68 %highx = lshr i128 %mulx, 64 95 %highx = lshr i128 %mulx, 64 110 %highx = lshr i128 %mulx, 64 127 %highx = lshr i128 %mulx, 64 142 %highx = lshr i128 %mulx, 64 157 %highx = lshr i128 %mulx, 64 174 %highx = lshr i128 %mulx, 64 [all …]
|
D | shift-08.ll | 11 %partb = lshr i64 %a, 63 22 %partb = lshr i64 %a, 1 33 %partb = lshr i64 %a, 0 45 %partb = lshr i64 %a, %amtb 58 %partb = lshr i64 %a, %sub 73 %partb = lshr i64 %a, %subext 88 %partb = lshr i64 %a, %subext 102 %partb = lshr i64 %a, %sub 117 %partb = lshr i64 %a, %sub 130 %partb = lshr i64 %a, %subb [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | shift.ll | 39 %B = lshr i32 %A, 32 ;; shift all bits out 46 %B = lshr <4 x i32> %A, <i32 32, i32 32, i32 32, i32 32> ;; shift all bits out 53 %B = lshr <4 x i32> %A, zeroinitializer 60 %B = lshr <4 x i32> %A, <i32 32, i32 1, i32 2, i32 3> 140 %C = lshr i8 %B, 7 ; <i8> [#uses=1] 151 %B = lshr i8 %A, 7 ; <i8> [#uses=1] 156 ;; Allow the simplification when the lshr shift is exact. 160 %B = lshr exact i8 %A, 7 173 %B = lshr i8 %a, 3 ; <i8> [#uses=1] 178 ;; Allow the simplification in InstCombine when the lshr shift is exact. [all …]
|
D | icmp-shr.ll | 8 %shr = lshr i8 127, %a 24 %shr = lshr i8 127, %a 56 %shr = lshr i8 127, %a 64 %shr = lshr i8 127, %a 88 %shr = lshr exact i8 126, %a 96 %shr = lshr exact i8 126, %a 104 %shr = lshr exact i8 -128, %a 112 %shr = lshr i8 -128, %a 120 %shr = lshr exact i8 -128, %a 128 %shr = lshr i8 -128, %a [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-bitfield-extract.ll | 17 %bf.clear = lshr i32 %tmp1, 3 51 %bf.clear = lshr i64 %tmp1, 3 82 %shr = lshr i64 %x, 16 98 %shr = lshr i32 %x, 16 117 %shr = lshr i32 %x, 16 120 %shr1 = lshr i32 %or, 2 138 %shr = lshr i32 %x, 16 160 %shr = lshr i64 %x, 16 163 %shr1 = lshr i64 %or, 2 182 %shr = lshr i64 %x, 16 [all …]
|
/external/llvm/test/CodeGen/Mips/ |
D | mips64shift.ll | 20 %shr = lshr i64 %a0, %a1 41 %shr = lshr i64 %a0, 10 62 %shr = lshr i64 %a0, 40 69 %shr = lshr i64 %a0, %a1 81 %shr = lshr i64 %a0, %sub 89 %shr = lshr i64 %a0, 10 99 %shr = lshr i64 %a0, 54
|