Searched refs:zext (Results 1 – 25 of 1133) sorted by relevance
12345678910>>...46
/external/llvm/test/Transforms/InstCombine/ |
D | overflow-mul.ll | 3 ; return mul(zext x, zext y) > MAX 7 %l = zext i32 %x to i64 8 %r = zext i32 %y to i64 9 ; CHECK-NOT: zext i32 14 %retval = zext i1 %overflow to i32 18 ; return mul(zext x, zext y) >= MAX+1 22 %l = zext i32 %x to i64 23 %r = zext i32 %y to i64 24 ; CHECK-NOT: zext i32 29 %retval = zext i1 %overflow to i32 [all …]
|
D | udivrem-change-width.ll | 7 %conv = zext i8 %a to i32 8 %conv2 = zext i8 %b to i32 17 %conv = zext i8 %a to i32 18 %conv2 = zext i8 %b to i32 27 %conv = zext i8 %a to i32 28 %conv2 = zext i8 %b to i32 33 ; CHECK: zext 37 %conv = zext i8 %a to i32 38 %conv2 = zext i8 %b to i32 43 ; CHECK: zext [all …]
|
D | zext.ll | 5 %c1 = zext i16 %A to i32 ; <i32> [#uses=1] 11 ; CHECK: %c2 = zext i16 %A to i64 17 %zext = zext <2 x i1> %xor to <2 x i64> 18 ret <2 x i64> %zext 21 ; CHECK-NEXT: zext <2 x i1> %A to <2 x i64> 28 %zext = zext <2 x i32> %and to <2 x i64> 29 ret <2 x i64> %zext 39 %zext = zext <2 x i32> %xor to <2 x i64> 40 ret <2 x i64> %zext
|
/external/llvm/test/Transforms/LoadCombine/ |
D | load-combine.ll | 10 %3 = zext i8 %2 to i64 14 %7 = zext i8 %6 to i64 19 %12 = zext i8 %11 to i64 24 %17 = zext i8 %16 to i64 29 %22 = zext i8 %21 to i64 34 %27 = zext i8 %26 to i64 39 %32 = zext i8 %31 to i64 44 %37 = zext i8 %36 to i64 57 %4 = zext i16 %3 to i32 59 %6 = zext i16 %1 to i32 [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | int-conv-11.ll | 77 %ext0 = zext i8 %trunc0 to i32 78 %ext1 = zext i8 %trunc1 to i32 79 %ext2 = zext i8 %trunc2 to i32 80 %ext3 = zext i8 %trunc3 to i32 81 %ext4 = zext i8 %trunc4 to i32 82 %ext5 = zext i8 %trunc5 to i32 83 %ext6 = zext i8 %trunc6 to i32 84 %ext7 = zext i8 %trunc7 to i32 85 %ext8 = zext i8 %trunc8 to i32 86 %ext9 = zext i8 %trunc9 to i32 [all …]
|
D | int-mul-08.ll | 13 %ax = zext i64 %a to i128 14 %bx = zext i64 %b to i128 49 %ax = zext i64 %a to i128 50 %bx = zext i64 %b to i128 65 %ax = zext i64 %a to i128 66 %bx = zext i64 %b to i128 92 %ax = zext i64 %a to i128 93 %bx = zext i64 %b to i128 107 %ax = zext i64 %a to i128 108 %bx = zext i64 %b to i128 [all …]
|
D | int-conv-06.ll | 12 %ext = zext i16 %half to i32 22 %ext = zext i16 %half to i32 41 %ext = zext i16 %half to i32 52 %ext = zext i16 %half to i32 65 %ext = zext i16 %half to i32 76 %ext = zext i16 %half to i32 87 %ext = zext i16 %half to i32 100 %ext = zext i16 %half to i32 113 %ext = zext i16 %half to i32 157 %ext0 = zext i16 %trunc0 to i32 [all …]
|
D | int-conv-02.ll | 12 %ext = zext i8 %byte to i32 22 %ext = zext i8 %byte to i32 41 %ext = zext i8 %byte to i32 52 %ext = zext i8 %byte to i32 65 %ext = zext i8 %byte to i32 76 %ext = zext i8 %byte to i32 87 %ext = zext i8 %byte to i32 100 %ext = zext i8 %byte to i32 113 %ext = zext i8 %byte to i32 157 %ext0 = zext i8 %trunc0 to i32 [all …]
|
D | int-conv-08.ll | 11 %ext = zext i16 %half to i64 21 %ext = zext i16 %half to i64 40 %ext = zext i16 %half to i64 51 %ext = zext i16 %half to i64 64 %ext = zext i16 %half to i64 75 %ext = zext i16 %half to i64 86 %ext = zext i16 %half to i64 99 %ext = zext i16 %half to i64 112 %ext = zext i16 %half to i64 156 %ext0 = zext i16 %trunc0 to i64 [all …]
|
D | int-conv-04.ll | 11 %ext = zext i8 %byte to i64 21 %ext = zext i8 %byte to i64 40 %ext = zext i8 %byte to i64 51 %ext = zext i8 %byte to i64 64 %ext = zext i8 %byte to i64 75 %ext = zext i8 %byte to i64 86 %ext = zext i8 %byte to i64 99 %ext = zext i8 %byte to i64 112 %ext = zext i8 %byte to i64 156 %ext0 = zext i8 %trunc0 to i64 [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | cast.ll | 11 ;CHECK: cost of 1 {{.*}} zext 12 %A = zext <4 x i1> undef to <4 x i32> 19 ;CHECK-NOT: cost of 1 {{.*}} zext 20 %D = zext <8 x i1> undef to <8 x i32> 28 ;CHECK: cost of 1 {{.*}} zext 29 %G = zext i1 undef to i32 40 ;CHECK-AVX2: cost of 3 {{.*}} zext 41 ;CHECK-AVX: cost of 4 {{.*}} zext 42 %Z = zext <8 x i1> %in to <8 x i32> 47 ;CHECK-AVX2: cost of 1 {{.*}} zext [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | vshll.ll | 34 %zext = zext <8 x i8> %tmp1 to <8 x i16> 35 %shift = shl <8 x i16> %zext, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> 43 %zext = zext <4 x i16> %tmp1 to <4 x i32> 44 %shift = shl <4 x i32> %zext, <i32 15, i32 15, i32 15, i32 15> 52 %zext = zext <2 x i32> %tmp1 to <2 x i64> 53 %shift = shl <2 x i64> %zext, <i64 31, i64 31> 72 %zext = zext <4 x i16> %tmp1 to <4 x i32> 73 %shift = shl <4 x i32> %zext, <i32 16, i32 16, i32 16, i32 16> 81 %zext = zext <2 x i32> %tmp1 to <2 x i64> 82 %shift = shl <2 x i64> %zext, <i64 32, i64 32> [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | 2008-12-02-IllegalResultType.ll | 12 %2 = zext i8 %1 to i64 ; <i64> [#uses=1] 14 %4 = zext i1 %3 to i64 ; <i64> [#uses=1] 17 %7 = zext i1 %6 to i8 ; <i8> [#uses=1] 19 %9 = zext i8 %8 to i64 ; <i64> [#uses=1] 21 %11 = zext i1 %10 to i8 ; <i8> [#uses=1] 23 %13 = zext i8 %12 to i64 ; <i64> [#uses=1] 25 %15 = zext i1 %14 to i8 ; <i8> [#uses=1] 27 %17 = zext i8 %16 to i64 ; <i64> [#uses=1] 29 %19 = zext i1 %18 to i8 ; <i8> [#uses=1] 31 %21 = zext i8 %20 to i64 ; <i64> [#uses=1] [all …]
|
D | shift-folding.ll | 11 %gep.upgrd.1 = zext i32 %Y to i64 24 %gep.upgrd.2 = zext i32 %Y to i64 62 %i.zext = zext i16 %i to i32 63 %index = lshr i32 %i.zext, 11 64 %index.zext = zext i32 %index to i64 65 %val.ptr = getelementptr inbounds i32, i32* %arr, i64 %index.zext 67 %val.zext = zext i32 %val to i64 68 %sum = add i64 %val.zext, %index.zext
|
D | Atomics-64.ll | 312 %1 = zext i8 %0 to i32 318 %6 = zext i8 %5 to i32 324 %11 = zext i8 %10 to i32 331 %17 = zext i8 %16 to i32 338 %23 = zext i8 %22 to i32 344 %28 = zext i8 %27 to i32 350 %33 = zext i8 %32 to i64 356 %38 = zext i8 %37 to i64 362 %43 = zext i8 %42 to i64 368 %48 = zext i8 %47 to i64 [all …]
|
D | codegen-prepare-extload.ll | 8 ; CodeGenPrepare should move the zext into the block with the load 16 ; OPTALL-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 25 %s = zext i8 %t to i32 36 ; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 40 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32 50 %s = zext i8 %add to i32 95 ; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 96 ; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32 100 ; NONSTRESS: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32 103 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32 [all …]
|
D | 2013-01-09-DAGCombineBug.ll | 51 …4 xor (i64 zext (i1 trunc (i192 lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 tru… 57 …zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr (i384 or (i384 shl (i384 zext (i64 ptrtoint ([2 … 60 …zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr (i384 or (i384 shl (i384 zext (i64 ptrtoint ([2 …
|
D | fold-and-shift.ll | 39 ; To make matters worse, because of the two-phase zext of %i and their reuse in 50 %i.zext = zext i16 %i to i32 51 %index = lshr i32 %i.zext, 11 54 %sum = add i32 %val, %i.zext 69 %i.zext = zext i16 %i to i32 70 %index = lshr i32 %i.zext, 11 71 %index.zext = zext i32 %index to i64 72 %val.ptr = getelementptr inbounds i32, i32* %arr, i64 %index.zext 74 %sum.1 = add i32 %val, %i.zext
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | fold.ll | 4 %A = zext i8 %x to i12 6 ; CHECK: zext i8 %x to i16 11 %A = zext i8 %x to i16 19 %A = zext i8 %x to i16 47 ; CHECK-NEXT: (zext i32 ([[EXPR]]) to i34) 48 %F = zext i16 %B to i30 51 %G = zext i16 %B to i32 54 %H = zext i16 %B to i34 56 ; CHECK-NEXT: (zext i32 ([[EXPR]]) to i34) 67 ; CHECK: --> (zext i1 (trunc i32 %i to i1) to i32) [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-codegen-prepare-extload.ll | 5 ; CodeGenPrepare should move the zext into the block with the load 10 ; OPTALL-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 19 %s = zext i8 %t to i32 30 ; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 34 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32 44 %s = zext i8 %add to i32 89 ; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 90 ; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32 94 ; NONSTRESS: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32 97 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32 [all …]
|
/external/llvm/test/CodeGen/NVPTX/ |
D | compare-int.ll | 16 %ret = zext i1 %cmp to i64 25 %ret = zext i1 %cmp to i64 34 %ret = zext i1 %cmp to i64 43 %ret = zext i1 %cmp to i64 52 %ret = zext i1 %cmp to i64 61 %ret = zext i1 %cmp to i64 70 %ret = zext i1 %cmp to i64 79 %ret = zext i1 %cmp to i64 88 %ret = zext i1 %cmp to i64 97 %ret = zext i1 %cmp to i64 [all …]
|
/external/llvm/test/CodeGen/Hexagon/ |
D | cmp_pred_reg.ll | 10 %selv = zext i1 %cmp to i32 18 %selv = zext i1 %cmp to i32 26 %selv = zext i1 %cmp to i32 34 %selv = zext i1 %cmp to i32 42 %selv = zext i1 %cmp to i32 50 %selv = zext i1 %cmp to i32 58 %selv = zext i1 %cmp to i32 66 %selv = zext i1 %cmp to i32 74 %selv = zext i1 %cmp to i32 82 %selv = zext i1 %cmp to i32 [all …]
|
D | cmp_pred.ll | 10 %selv = zext i1 %cmp to i32 18 %selv = zext i1 %cmp to i32 26 %selv = zext i1 %cmp to i32 34 %selv = zext i1 %cmp to i32 42 %selv = zext i1 %cmp to i32 50 %selv = zext i1 %cmp to i32 58 %selv = zext i1 %cmp to i32 66 %selv = zext i1 %cmp to i32 74 %selv = zext i1 %cmp to i32 82 %selv = zext i1 %cmp to i32 [all …]
|
/external/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/ |
D | split-gep-and-gvn.ll | 104 ; array[zext(x)][zext(y)] 105 ; array[zext(x)][zext(y +nuw 1)] 106 ; array[zext(x +nuw 1)][zext(y)] 107 ; array[zext(x +nuw 1)][zext(y +nuw 1)]. 110 ; 1) extends array indices using zext instead of sext; 111 ; 2) annotates the addition with "nuw"; otherwise, zext(x + 1) => zext(x) + 1 115 %0 = zext i32 %y to i64 116 %1 = zext i32 %x to i64 122 %7 = zext i32 %6 to i64 128 %13 = zext i32 %12 to i64 [all …]
|
/external/llvm/test/CodeGen/BPF/ |
D | setcc.ll | 6 %t3 = zext i1 %t2 to i16 15 %t3 = zext i1 %t2 to i16 23 %t2 = zext i1 %t1 to i16 31 %t2 = zext i1 %t1 to i16 39 %t2 = zext i1 %t1 to i16 47 %t2 = zext i1 %t1 to i16 55 %t2 = zext i1 %t1 to i16 63 %t2 = zext i1 %t1 to i16 71 %t2 = zext i1 %t1 to i16 79 %t2 = zext i1 %t1 to i16 [all …]
|
12345678910>>...46