/external/swiftshader/third_party/LLVM/test/CodeGen/Mips/ |
D | madd-msub.ll | 7 %conv2 = sext i32 %b to i64 8 %mul = mul nsw i64 %conv2, %conv 18 %conv2 = zext i32 %b to i64 19 %mul = mul nsw i64 %conv2, %conv 29 %conv2 = sext i32 %b to i64 30 %mul = mul nsw i64 %conv2, %conv 39 %conv2 = sext i32 %a to i64 41 %mul = mul nsw i64 %conv4, %conv2 50 %conv2 = zext i32 %a to i64 52 %mul = mul nsw i64 %conv4, %conv2
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | dsp-mlal.ll | 40 %conv2 = trunc i64 %0 to i32 41 ret i32 %conv2 54 %conv2 = trunc i64 %0 to i32 55 ret i32 %conv2 67 %conv2 = sext i32 %c to i64 68 %mul = mul nsw i64 %conv2, %conv1 87 %conv2 = trunc i64 %0 to i32 88 %sub = sub nsw i32 %a, %conv2 101 %conv2 = sext i32 %c to i64 102 %mul = mul nsw i64 %conv2, %conv1 [all …]
|
D | fast-isel-icmp.ll | 16 %conv2 = zext i1 %cmp to i32 17 ret i32 %conv2 31 %conv2 = zext i1 %cmp to i32 32 ret i32 %conv2 46 %conv2 = zext i1 %cmp to i32 47 ret i32 %conv2 61 %conv2 = zext i1 %cmp to i32 62 ret i32 %conv2 76 %conv2 = zext i1 %cmp to i32 77 ret i32 %conv2
|
/external/llvm/test/CodeGen/ARM/ |
D | fast-isel-icmp.ll | 16 %conv2 = zext i1 %cmp to i32 17 ret i32 %conv2 31 %conv2 = zext i1 %cmp to i32 32 ret i32 %conv2 46 %conv2 = zext i1 %cmp to i32 47 ret i32 %conv2 61 %conv2 = zext i1 %cmp to i32 62 ret i32 %conv2 76 %conv2 = zext i1 %cmp to i32 77 ret i32 %conv2
|
D | longMAC.ll | 33 %conv2 = zext i32 %c to i64 34 %add = add i64 %mul, %conv2 44 %conv2 = sext i32 %c to i64 45 %add = add nsw i64 %mul, %conv2 86 %conv2 = sext i32 %c to i64 88 %mul4 = mul nsw i64 %conv3, %conv2 127 %conv2 = zext i32 %lo to i64 128 %add = add i64 %mul, %conv2 142 %conv2 = zext i32 %lo to i64 144 %add = add i64 %conv2, %conv3
|
/external/swiftshader/third_party/LLVM/test/Transforms/InstCombine/ |
D | udivrem-change-width.ll | 8 %conv2 = zext i8 %b to i32 9 %div = udiv i32 %conv, %conv2 18 %conv2 = zext i8 %b to i32 19 %div = urem i32 %conv, %conv2 28 %conv2 = zext i8 %b to i32 29 %div = udiv i32 %conv, %conv2 38 %conv2 = zext i8 %b to i32 39 %div = urem i32 %conv, %conv2
|
D | overflow.ll | 11 %conv2 = sext i32 %b to i64 12 %add = add nsw i64 %conv2, %conv 36 %conv2 = sext i32 %b to i64 37 %add = add nsw i64 %conv2, %conv 63 %conv2 = sext i32 %b to i64 64 %add = add nsw i64 %conv2, %conv 84 %conv2 = sext i8 %b to i32 85 %add = add nsw i32 %conv2, %conv
|
D | pr8547.ll | 17 %conv2 = lshr i32 %shl, 24 19 ; CHECK: %conv2 = and i32 %0, 64 20 %tobool = icmp eq i32 %conv2, 0 24 …8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i64 0, i64 0), i32 %conv2) nounwind
|
/external/llvm/test/Transforms/InstCombine/ |
D | udivrem-change-width.ll | 8 %conv2 = zext i8 %b to i32 9 %div = udiv i32 %conv, %conv2 18 %conv2 = zext i8 %b to i32 19 %div = urem i32 %conv, %conv2 28 %conv2 = zext i8 %b to i32 29 %div = udiv i32 %conv, %conv2 38 %conv2 = zext i8 %b to i32 39 %div = urem i32 %conv, %conv2
|
D | overflow.ll | 11 %conv2 = sext i32 %b to i64 12 %add = add nsw i64 %conv2, %conv 36 %conv2 = sext i32 %b to i64 37 %add = add nsw i64 %conv2, %conv 63 %conv2 = sext i32 %b to i64 64 %add = add nsw i64 %conv2, %conv 84 %conv2 = sext i8 %b to i32 85 %add = add nsw i32 %conv2, %conv
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | vec_int_ext.ll | 21 %conv2 = sext i8 %vecext1 to i32 22 %vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1 48 %conv2 = sext i8 %vecext1 to i64 49 %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1 69 %conv2 = sext i16 %vecext1 to i32 70 %vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1 96 %conv2 = sext i16 %vecext1 to i64 97 %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1 117 %conv2 = sext i32 %vecext1 to i64 118 %vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1 [all …]
|
D | testComparesllgtuc.ll | 43 %conv2 = zext i1 %cmp to i64 44 ret i64 %conv2 57 %conv2 = sext i1 %cmp to i64 58 ret i64 %conv2 95 %conv2 = zext i1 %cmp to i8 96 store i8 %conv2, i8* @glob, align 1 111 %conv2 = sext i1 %cmp to i8 112 store i8 %conv2, i8* @glob, align 1
|
D | testComparesllleuc.ll | 40 %conv2 = zext i1 %cmp to i64 41 ret i64 %conv2 52 %conv2 = sext i1 %cmp to i64 53 ret i64 %conv2 94 %conv2 = zext i1 %cmp to i8 95 store i8 %conv2, i8* @glob 107 %conv2 = sext i1 %cmp to i8 108 store i8 %conv2, i8* @glob
|
D | testComparesllleus.ll | 40 %conv2 = zext i1 %cmp to i64 41 ret i64 %conv2 52 %conv2 = sext i1 %cmp to i64 53 ret i64 %conv2 94 %conv2 = zext i1 %cmp to i16 95 store i16 %conv2, i16* @glob 107 %conv2 = sext i1 %cmp to i16 108 store i16 %conv2, i16* @glob
|
D | testComparesllgtus.ll | 46 %conv2 = zext i1 %cmp to i64 47 ret i64 %conv2 61 %conv2 = sext i1 %cmp to i64 62 ret i64 %conv2 104 %conv2 = zext i1 %cmp to i16 105 store i16 %conv2, i16* @glob, align 2 123 %conv2 = sext i1 %cmp to i16 124 store i16 %conv2, i16* @glob, align 2
|
/external/llvm/test/CodeGen/Hexagon/ |
D | extload-combine.ll | 23 %conv2 = zext i16 %0 to i64 24 ret i64 %conv2 34 %conv2 = sext i16 %0 to i64 35 ret i64 %conv2 45 %conv2 = zext i8 %0 to i64 46 ret i64 %conv2 56 %conv2 = sext i8 %0 to i64 57 ret i64 %conv2
|
D | memops.ll | 22 %conv2 = trunc i32 %add to i8 23 store i8 %conv2, i8* %p, align 1 34 %conv2 = trunc i32 %sub to i8 35 store i8 %conv2, i8* %p, align 1 99 %conv2 = trunc i32 %add to i8 100 store i8 %conv2, i8* %add.ptr, align 1 112 %conv2 = trunc i32 %sub to i8 113 store i8 %conv2, i8* %add.ptr, align 1 181 %conv2 = trunc i32 %add to i8 182 store i8 %conv2, i8* %add.ptr, align 1 [all …]
|
D | memops2.ll | 10 %conv2 = zext i16 %0 to i32 11 %sub = add nsw i32 %conv2, 65535 25 %conv2 = trunc i32 %sub to i16 26 store i16 %conv2, i16* %add.ptr1, align 2
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | extload-combine.ll | 23 %conv2 = zext i16 %0 to i64 24 ret i64 %conv2 34 %conv2 = sext i16 %0 to i64 35 ret i64 %conv2 45 %conv2 = zext i8 %0 to i64 46 ret i64 %conv2 56 %conv2 = sext i8 %0 to i64 57 ret i64 %conv2
|
D | memops.ll | 24 %conv2 = trunc i32 %add to i8 25 store i8 %conv2, i8* %p, align 1 37 %conv2 = trunc i32 %sub to i8 38 store i8 %conv2, i8* %p, align 1 108 %conv2 = trunc i32 %add to i8 109 store i8 %conv2, i8* %add.ptr, align 1 122 %conv2 = trunc i32 %sub to i8 123 store i8 %conv2, i8* %add.ptr, align 1 197 %conv2 = trunc i32 %add to i8 198 store i8 %conv2, i8* %add.ptr, align 1 [all …]
|
D | memops2.ll | 10 %conv2 = zext i16 %0 to i32 11 %sub = add nsw i32 %conv2, 65535 25 %conv2 = trunc i32 %sub to i16 26 store i16 %conv2, i16* %add.ptr1, align 2
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/CodeGenPrepare/X86/ |
D | ext-logicop.ll | 21 %conv2 = zext i8 %and to i32 22 %add = add nsw i32 %conv2, %ll 45 %conv2 = zext i8 %or to i32 46 %add = add nsw i32 %conv2, %ll 71 %conv2 = zext i8 %and to i32 72 %add = add nsw i32 %conv2, %ll 95 %conv2 = zext i8 %lshr to i32 96 %add = add nsw i32 %conv2, %ll 119 %conv2 = zext i8 %xor to i32 120 %add = add nsw i32 %conv2, %ll
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | arm64-fast-isel-icmp.ll | 180 %conv2 = zext i1 %cmp to i32 181 ret i32 %conv2 191 %conv2 = zext i1 %cmp to i32 192 ret i32 %conv2 203 %conv2 = zext i1 %cmp to i32 204 ret i32 %conv2 215 %conv2 = zext i1 %cmp to i32 216 ret i32 %conv2 227 %conv2 = zext i1 %cmp to i32 228 ret i32 %conv2 [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-fast-isel-icmp.ll | 180 %conv2 = zext i1 %cmp to i32 181 ret i32 %conv2 191 %conv2 = zext i1 %cmp to i32 192 ret i32 %conv2 203 %conv2 = zext i1 %cmp to i32 204 ret i32 %conv2 215 %conv2 = zext i1 %cmp to i32 216 ret i32 %conv2 227 %conv2 = zext i1 %cmp to i32 228 ret i32 %conv2 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | overflow.ll | 11 %conv2 = sext i32 %b to i64 12 %add = add nsw i64 %conv2, %conv 36 %conv2 = sext i32 %b to i64 37 %add = add nsw i64 %conv2, %conv 63 %conv2 = sext i32 %b to i64 64 %add = add nsw i64 %conv2, %conv 84 %conv2 = sext i8 %b to i32 85 %add = add nsw i32 %conv2, %conv
|