/external/llvm/test/Analysis/BasicAA/ |
D | q.bad.ll | 9 %sext.zext.1 = zext i16 %sext.1 to i64 11 %sext.zext.2 = zext i32 %sext.2 to i64 12 %a = getelementptr inbounds i8, i8* %mem, i64 %sext.zext.1 13 %b = getelementptr inbounds i8, i8* %mem, i64 %sext.zext.2 19 ; %a and %b only PartialAlias as, although they're both zext(sext(%num)) they'll extend the sign by… 20 ; number of bits before zext-ing the remainder. 23 %sext.zext.1 = zext i16 %sext.1 to i64 25 %sext.zext.2 = zext i32 %sext.2 to i64 26 %a = getelementptr inbounds i8, i8* %mem, i64 %sext.zext.1 27 %b = getelementptr inbounds i8, i8* %mem, i64 %sext.zext.2 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/BasicAA/ |
D | q.bad.ll | 9 %sext.zext.1 = zext i16 %sext.1 to i64 11 %sext.zext.2 = zext i32 %sext.2 to i64 12 %a = getelementptr inbounds i8, i8* %mem, i64 %sext.zext.1 13 %b = getelementptr inbounds i8, i8* %mem, i64 %sext.zext.2 19 ; %a and %b only PartialAlias as, although they're both zext(sext(%num)) they'll extend the sign by… 20 ; number of bits before zext-ing the remainder. 23 %sext.zext.1 = zext i16 %sext.1 to i64 25 %sext.zext.2 = zext i32 %sext.2 to i64 26 %a = getelementptr inbounds i8, i8* %mem, i64 %sext.zext.1 27 %b = getelementptr inbounds i8, i8* %mem, i64 %sext.zext.2 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/ScalarEvolution/ |
D | zext-divrem.ll | 6 %zext = zext i32 %div to i64 7 ; CHECK: %zext 8 ; CHECK-NEXT: --> ((zext i32 %a to i64) /u (zext i32 %b to i64)) 9 ret i64 %zext 15 %zext = zext i32 %rem to i64 16 ; CHECK: %zext 17 ; CHECK-NEXT: --> ((zext i32 %a to i64) + (-1 * (zext i32 %b to i64) * ((zext i32 %a to i64) /u (z… 18 ret i64 %zext 26 %zext = zext i32 %sub to i64 27 ; CHECK: %zext [all …]
|
D | no-wrap-add-exprs.ll | 66 ; CHECK-NEXT: --> (-1 + (zext i8 %len to i16))<nsw> U: [-1,126) S: [-1,126) 68 ; CHECK-NEXT: --> (-2 + (zext i8 %len to i16))<nsw> U: [-2,125) S: [-2,125) 104 %t0.zext = zext i8 %t0 to i16 105 %t1.zext = zext i8 %t1 to i16 106 ; CHECK: %t0.zext = zext i8 %t0 to i16 107 ; CHECK-NEXT: --> (1 + (zext i8 %len to i16))<nuw><nsw> U: [1,128) S: [1,128) 108 ; CHECK: %t1.zext = zext i8 %t1 to i16 109 ; CHECK-NEXT: --> (2 + (zext i8 %len to i16))<nuw><nsw> U: [2,129) S: [2,129) 113 %q0.zext = zext i8 %q0 to i16 114 %q1.zext = zext i8 %q1 to i16 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | overflow-mul.ll | 3 ; return mul(zext x, zext y) > MAX 7 %l = zext i32 %x to i64 8 %r = zext i32 %y to i64 9 ; CHECK-NOT: zext i32 14 %retval = zext i1 %overflow to i32 18 ; return mul(zext x, zext y) >= MAX+1 22 %l = zext i32 %x to i64 23 %r = zext i32 %y to i64 24 ; CHECK-NOT: zext i32 29 %retval = zext i1 %overflow to i32 [all …]
|
D | zext.ll | 6 ; CHECK-NEXT: [[C2:%.*]] = zext i16 %A to i64 9 %c1 = zext i16 %A to i32 17 ; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i1> [[XOR]] to <2 x i64> 21 %zext = zext <2 x i1> %xor to <2 x i64> 22 ret <2 x i64> %zext 32 %zext = zext <2 x i32> %and to <2 x i64> 33 ret <2 x i64> %zext 45 %zext = zext <2 x i32> %xor to <2 x i64> 46 ret <2 x i64> %zext 52 ; CHECK-NEXT: [[ZEXT2:%.*]] = zext i1 [[TMP1]] to i64 [all …]
|
D | udivrem-change-width.ll | 11 %za = zext i8 %a to i32 12 %zb = zext i8 %b to i32 23 %za = zext <2 x i8> %a to <2 x i32> 24 %zb = zext <2 x i8> %b to <2 x i32> 35 %za = zext i8 %a to i32 36 %zb = zext i8 %b to i32 47 %za = zext <2 x i8> %a to <2 x i32> 48 %zb = zext <2 x i8> %b to <2 x i32> 57 ; CHECK-NEXT: [[UDIV:%.*]] = zext i8 [[DIV]] to i32 60 %za = zext i8 %a to i32 [all …]
|
D | zext-bool-add-sub.ll | 13 %conv = zext i1 %x to i32 14 %conv3 = zext i1 %y to i32 23 ; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[A:%.*]] to i32 28 %zext = zext i1 %a to i32 30 %sel2 = select i1 %b, i32 %sel1, i32 %zext 36 ; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[A:%.*]] to i32 37 ; CHECK-NEXT: [[CONV3:%.*]] = zext i1 [[B:%.*]] to i32 41 %conv = zext i1 %a to i32 42 %conv3 = zext i1 %b to i32 49 ; CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[X:%.*]] to i32 [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | overflow-mul.ll | 3 ; return mul(zext x, zext y) > MAX 7 %l = zext i32 %x to i64 8 %r = zext i32 %y to i64 9 ; CHECK-NOT: zext i32 14 %retval = zext i1 %overflow to i32 18 ; return mul(zext x, zext y) >= MAX+1 22 %l = zext i32 %x to i64 23 %r = zext i32 %y to i64 24 ; CHECK-NOT: zext i32 29 %retval = zext i1 %overflow to i32 [all …]
|
D | zext.ll | 6 ; CHECK-NEXT: [[C2:%.*]] = zext i16 %A to i64 9 %c1 = zext i16 %A to i32 16 ; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i1> %A to <2 x i64> 21 %zext = zext <2 x i1> %xor to <2 x i64> 22 ret <2 x i64> %zext 32 %zext = zext <2 x i32> %and to <2 x i64> 33 ret <2 x i64> %zext 45 %zext = zext <2 x i32> %xor to <2 x i64> 46 ret <2 x i64> %zext 53 ; CHECK-NEXT: [[ZEXT1:%.*]] = zext i1 %a to i32 [all …]
|
D | udivrem-change-width.ll | 7 %conv = zext i8 %a to i32 8 %conv2 = zext i8 %b to i32 17 %conv = zext i8 %a to i32 18 %conv2 = zext i8 %b to i32 27 %conv = zext i8 %a to i32 28 %conv2 = zext i8 %b to i32 33 ; CHECK: zext 37 %conv = zext i8 %a to i32 38 %conv2 = zext i8 %b to i32 43 ; CHECK: zext [all …]
|
/external/llvm/test/Transforms/LoadCombine/ |
D | load-combine.ll | 10 %3 = zext i8 %2 to i64 14 %7 = zext i8 %6 to i64 19 %12 = zext i8 %11 to i64 24 %17 = zext i8 %16 to i64 29 %22 = zext i8 %21 to i64 34 %27 = zext i8 %26 to i64 39 %32 = zext i8 %31 to i64 44 %37 = zext i8 %36 to i64 57 %4 = zext i16 %3 to i32 59 %6 = zext i16 %1 to i32 [all …]
|
/external/swiftshader/third_party/subzero/crosstest/ |
D | test_vector_ops_ll.ll | 35 %res0 = zext <4 x i1> %res0_i1 to <4 x i32> 39 %res1 = zext <4 x i1> %res1_i1 to <4 x i32> 43 %res2 = zext <4 x i1> %res2_i1 to <4 x i32> 47 %res3 = zext <4 x i1> %res3_i1 to <4 x i32> 68 %res0 = zext <8 x i1> %res0_i1 to <8 x i16> 72 %res1 = zext <8 x i1> %res1_i1 to <8 x i16> 76 %res2 = zext <8 x i1> %res2_i1 to <8 x i16> 80 %res3 = zext <8 x i1> %res3_i1 to <8 x i16> 84 %res4 = zext <8 x i1> %res4_i1 to <8 x i16> 88 %res5 = zext <8 x i1> %res5_i1 to <8 x i16> [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/AArch64/ |
D | gather-reduce.ll | 25 ; GENERIC: zext <8 x i16> [[L]] to <8 x i32> 51 %conv = zext i16 %0 to i32 54 %conv2 = zext i16 %1 to i32 58 %conv3 = zext i16 %2 to i32 62 %conv5 = zext i16 %3 to i32 65 %conv7 = zext i16 %4 to i32 69 %conv11 = zext i16 %5 to i32 73 %conv14 = zext i16 %6 to i32 76 %conv16 = zext i16 %7 to i32 80 %conv20 = zext i16 %8 to i32 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SystemZ/ |
D | int-conv-11.ll | 77 %ext0 = zext i8 %trunc0 to i32 78 %ext1 = zext i8 %trunc1 to i32 79 %ext2 = zext i8 %trunc2 to i32 80 %ext3 = zext i8 %trunc3 to i32 81 %ext4 = zext i8 %trunc4 to i32 82 %ext5 = zext i8 %trunc5 to i32 83 %ext6 = zext i8 %trunc6 to i32 84 %ext7 = zext i8 %trunc7 to i32 85 %ext8 = zext i8 %trunc8 to i32 86 %ext9 = zext i8 %trunc9 to i32 [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | int-conv-11.ll | 77 %ext0 = zext i8 %trunc0 to i32 78 %ext1 = zext i8 %trunc1 to i32 79 %ext2 = zext i8 %trunc2 to i32 80 %ext3 = zext i8 %trunc3 to i32 81 %ext4 = zext i8 %trunc4 to i32 82 %ext5 = zext i8 %trunc5 to i32 83 %ext6 = zext i8 %trunc6 to i32 84 %ext7 = zext i8 %trunc7 to i32 85 %ext8 = zext i8 %trunc8 to i32 86 %ext9 = zext i8 %trunc9 to i32 [all …]
|
/external/swiftshader/third_party/LLVM/test/Transforms/InstCombine/ |
D | udivrem-change-width.ll | 7 %conv = zext i8 %a to i32 8 %conv2 = zext i8 %b to i32 17 %conv = zext i8 %a to i32 18 %conv2 = zext i8 %b to i32 27 %conv = zext i8 %a to i32 28 %conv2 = zext i8 %b to i32 33 ; CHECK: zext 37 %conv = zext i8 %a to i32 38 %conv2 = zext i8 %b to i32 43 ; CHECK: zext [all …]
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | no-wrap-add-exprs.ll | 66 ; CHECK-NEXT: --> (-1 + (zext i8 %len to i16))<nsw> U: [-1,126) S: [-1,126) 68 ; CHECK-NEXT: --> (-2 + (zext i8 %len to i16))<nsw> U: [-2,125) S: [-2,125) 104 %t0.zext = zext i8 %t0 to i16 105 %t1.zext = zext i8 %t1 to i16 106 ; CHECK: %t0.zext = zext i8 %t0 to i16 107 ; CHECK-NEXT: --> (1 + (zext i8 %len to i16))<nuw><nsw> U: [1,128) S: [1,128) 108 ; CHECK: %t1.zext = zext i8 %t1 to i16 109 ; CHECK-NEXT: --> (2 + (zext i8 %len to i16))<nuw><nsw> U: [2,129) S: [2,129) 113 %q0.zext = zext i8 %q0 to i16 114 %q1.zext = zext i8 %q1 to i16 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | load-combine.ll | 12 %tmp3 = zext i8 %tmp2 to i32 15 %tmp6 = zext i8 %tmp5 to i32 20 %tmp11 = zext i8 %tmp10 to i32 25 %tmp16 = zext i8 %tmp15 to i32 40 %tmp3 = zext i8 %tmp2 to i32 43 %tmp6 = zext i8 %tmp5 to i32 48 %tmp11 = zext i8 %tmp10 to i32 53 %tmp16 = zext i8 %tmp15 to i32 68 %tmp2 = zext i8 %tmp1 to i32 72 %tmp6 = zext i8 %tmp5 to i32 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/SystemZ/ |
D | int-cast.ll | 69 define void @zext() { 70 %v0 = zext i8 undef to i16 71 %v1 = zext i8 undef to i32 72 %v2 = zext i8 undef to i64 73 %v3 = zext i16 undef to i32 74 %v4 = zext i16 undef to i64 75 %v5 = zext i32 undef to i64 76 %v6 = zext <2 x i8> undef to <2 x i16> 77 %v7 = zext <2 x i8> undef to <2 x i32> 78 %v8 = zext <2 x i8> undef to <2 x i64> [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | cast.ll | 11 ;CHECK: cost of 1 {{.*}} zext 12 %A = zext <4 x i1> undef to <4 x i32> 19 ;CHECK-NOT: cost of 1 {{.*}} zext 20 %D = zext <8 x i1> undef to <8 x i32> 28 ;CHECK: cost of 1 {{.*}} zext 29 %G = zext i1 undef to i32 40 ;CHECK-AVX2: cost of 3 {{.*}} zext 41 ;CHECK-AVX: cost of 4 {{.*}} zext 42 %Z = zext <8 x i1> %in to <8 x i32> 47 ;CHECK-AVX2: cost of 1 {{.*}} zext [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | vshll.ll | 34 %zext = zext <8 x i8> %tmp1 to <8 x i16> 35 %shift = shl <8 x i16> %zext, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> 43 %zext = zext <4 x i16> %tmp1 to <4 x i32> 44 %shift = shl <4 x i32> %zext, <i32 15, i32 15, i32 15, i32 15> 52 %zext = zext <2 x i32> %tmp1 to <2 x i64> 53 %shift = shl <2 x i64> %zext, <i64 31, i64 31> 72 %zext = zext <4 x i16> %tmp1 to <4 x i32> 73 %shift = shl <4 x i32> %zext, <i32 16, i32 16, i32 16, i32 16> 81 %zext = zext <2 x i32> %tmp1 to <2 x i64> 82 %shift = shl <2 x i64> %zext, <i64 32, i64 32> [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | vshll.ll | 34 %zext = zext <8 x i8> %tmp1 to <8 x i16> 35 %shift = shl <8 x i16> %zext, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> 43 %zext = zext <4 x i16> %tmp1 to <4 x i32> 44 %shift = shl <4 x i32> %zext, <i32 15, i32 15, i32 15, i32 15> 52 %zext = zext <2 x i32> %tmp1 to <2 x i64> 53 %shift = shl <2 x i64> %zext, <i64 31, i64 31> 72 %zext = zext <4 x i16> %tmp1 to <4 x i32> 73 %shift = shl <4 x i32> %zext, <i32 16, i32 16, i32 16, i32 16> 81 %zext = zext <2 x i32> %tmp1 to <2 x i64> 82 %shift = shl <2 x i64> %zext, <i64 32, i64 32> [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | ext-bool-trunc-repl.ll | 25 …zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) t…
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | ext-bool-trunc-repl.ll | 25 …zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) t…
|