/external/llvm/test/Transforms/InstCombine/ |
D | double-float-shrink-1.ll | 15 %conv1 = fptrunc double %call to float 16 ret float %conv1 32 %conv1 = fptrunc double %call to float 33 ret float %conv1 49 %conv1 = fptrunc double %call to float 50 ret float %conv1 66 %conv1 = fptrunc double %call to float 67 ret float %conv1 83 %conv1 = fptrunc double %call to float 84 ret float %conv1 [all …]
|
D | cos-1.ll | 22 %conv1 = fpext float %f to double 23 %neg = fsub double -0.000000e+00, %conv1 32 %conv1 = fpext float %f to double 33 %neg = fsub double -0.000000e+00, %conv1 35 ; NO-FLOAT-SHRINK: call double @cos(double %conv1)
|
D | fold-sqrt-sqrtf.ll | 10 ; CHECK: %conv1 = fptrunc double %call to float 13 %conv1 = fptrunc double %call to float 14 ret float %conv1
|
D | sqrt.ll | 12 %conv1 = fptrunc double %call to float ; <float> [#uses=1] 14 ret float %conv1 27 %conv1 = fptrunc double %call to float ; <float> [#uses=1] 29 ret float %conv1
|
/external/llvm/test/CodeGen/PowerPC/ |
D | ctrloop-i64.ll | 12 %x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ] 16 %conv1 = add i64 %conv, %0 22 ret i64 %conv1 34 %x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ] 38 %conv1 = add i64 %conv, %0 44 ret i64 %conv1 56 %x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ] 60 %conv1 = add i64 %conv, %0 66 ret i64 %conv1 78 %x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ] [all …]
|
D | fp-to-int-ext.ll | 10 %conv1 = sitofp i64 %conv to double 11 ret double %conv1 23 %conv1 = sitofp i64 %conv to double 24 ret double %conv1 37 %conv1 = sitofp i64 %conv to double 38 ret double %conv1 55 %conv1 = sitofp i64 %conv to double 56 ret double %conv1
|
D | fp-to-int-to-fp.ll | 10 %conv1 = sitofp i64 %conv to float 11 ret float %conv1 29 %conv1 = sitofp i64 %conv to double 30 ret double %conv1 47 %conv1 = uitofp i64 %conv to float 48 ret float %conv1 60 %conv1 = uitofp i64 %conv to double 61 ret double %conv1
|
D | no-extra-fp-conv-ldst.ll | 35 %conv1 = sitofp i32 %conv to float 36 ret float %conv1 51 %conv1 = sitofp i32 %conv to double 52 ret double %conv1 67 %conv1 = uitofp i32 %conv to float 68 ret float %conv1 83 %conv1 = uitofp i32 %conv to double 84 ret double %conv1
|
D | ctrloop-lt.ll | 25 %conv1 = trunc i32 %add to i8 26 store i8 %conv1, i8* %arrayidx, align 1 55 %conv1 = trunc i32 %add to i8 56 store i8 %conv1, i8* %arrayidx, align 1 85 %conv1 = trunc i32 %add to i8 86 store i8 %conv1, i8* %arrayidx, align 1 114 %conv1 = trunc i32 %add to i8 115 store i8 %conv1, i8* %arrayidx, align 1 143 %conv1 = trunc i32 %add to i8 144 store i8 %conv1, i8* %arrayidx, align 1 [all …]
|
D | ctrloop-le.ll | 25 %conv1 = trunc i32 %add to i8 26 store i8 %conv1, i8* %arrayidx, align 1 54 %conv1 = trunc i32 %add to i8 55 store i8 %conv1, i8* %arrayidx, align 1 83 %conv1 = trunc i32 %add to i8 84 store i8 %conv1, i8* %arrayidx, align 1 112 %conv1 = trunc i32 %add to i8 113 store i8 %conv1, i8* %arrayidx, align 1 141 %conv1 = trunc i32 %add to i8 142 store i8 %conv1, i8* %arrayidx, align 1 [all …]
|
D | ctrloop-ne.ll | 22 %conv1 = trunc i32 %add to i8 23 store i8 %conv1, i8* %arrayidx, align 1 52 %conv1 = trunc i32 %add to i8 53 store i8 %conv1, i8* %arrayidx, align 1 82 %conv1 = trunc i32 %add to i8 83 store i8 %conv1, i8* %arrayidx, align 1 112 %conv1 = trunc i32 %add to i8 113 store i8 %conv1, i8* %arrayidx, align 1 142 %conv1 = trunc i32 %add to i8 143 store i8 %conv1, i8* %arrayidx, align 1 [all …]
|
/external/llvm/test/CodeGen/Thumb2/ |
D | longMACt.ll | 8 %conv1 = zext i32 %b to i64 9 %mul = mul i64 %conv1, %conv 18 %conv1 = sext i32 %b to i64 19 %mul = mul nsw i64 %conv1, %conv 28 %conv1 = zext i32 %a to i64 29 %mul = mul i64 %conv, %conv1 39 %conv1 = sext i32 %a to i64 40 %mul = mul nsw i64 %conv, %conv1
|
/external/llvm/test/CodeGen/ARM/ |
D | longMAC.ll | 11 %conv1 = zext i32 %b to i64 12 %mul = mul i64 %conv1, %conv 21 %conv1 = sext i32 %b to i64 22 %mul = mul nsw i64 %conv1, %conv 31 %conv1 = zext i32 %a to i64 32 %mul = mul i64 %conv, %conv1 42 %conv1 = sext i32 %a to i64 43 %mul = mul nsw i64 %conv, %conv1 73 %conv1 = zext i32 %b to i64 84 %conv1 = sext i32 %b to i64 [all …]
|
/external/llvm/test/CodeGen/NVPTX/ |
D | sext-in-reg.ll | 11 %conv1 = ashr exact i64 %sext, 56 16 %add = add nsw i64 %conv4, %conv1 29 %conv1 = ashr exact i64 %sext, 32 34 %add = add nsw i64 %conv4, %conv1 47 %conv1 = ashr exact i64 %sext, 48 52 %add = add nsw i64 %conv4, %conv1 65 %conv1 = ashr exact i32 %sext, 24 70 %add = add nsw i32 %conv4, %conv1 83 %conv1 = ashr exact i32 %sext, 16 88 %add = add nsw i32 %conv4, %conv1 [all …]
|
/external/llvm/test/CodeGen/Hexagon/ |
D | memops.ll | 10 %conv1 = trunc i32 %add to i8 11 store i8 %conv1, i8* %p, align 1 20 %conv1 = zext i8 %0 to i32 21 %add = add nsw i32 %conv1, %conv 32 %conv1 = zext i8 %0 to i32 33 %sub = sub nsw i32 %conv1, %conv 63 %conv1 = trunc i32 %and to i8 64 store i8 %conv1, i8* %p, align 1 74 %conv1 = trunc i32 %or to i8 75 store i8 %conv1, i8* %p, align 1 [all …]
|
D | hwloop-lt.ll | 20 %conv1 = trunc i32 %add to i8 21 store i8 %conv1, i8* %arrayidx, align 1 47 %conv1 = trunc i32 %add to i8 48 store i8 %conv1, i8* %arrayidx, align 1 74 %conv1 = trunc i32 %add to i8 75 store i8 %conv1, i8* %arrayidx, align 1 101 %conv1 = trunc i32 %add to i8 102 store i8 %conv1, i8* %arrayidx, align 1 128 %conv1 = trunc i32 %add to i8 129 store i8 %conv1, i8* %arrayidx, align 1 [all …]
|
D | hwloop-le.ll | 21 %conv1 = trunc i32 %add to i8 22 store i8 %conv1, i8* %arrayidx, align 1 50 %conv1 = trunc i32 %add to i8 51 store i8 %conv1, i8* %arrayidx, align 1 79 %conv1 = trunc i32 %add to i8 80 store i8 %conv1, i8* %arrayidx, align 1 108 %conv1 = trunc i32 %add to i8 109 store i8 %conv1, i8* %arrayidx, align 1 137 %conv1 = trunc i32 %add to i8 138 store i8 %conv1, i8* %arrayidx, align 1 [all …]
|
D | hwloop-ne.ll | 21 %conv1 = trunc i32 %add to i8 22 store i8 %conv1, i8* %arrayidx, align 1 50 %conv1 = trunc i32 %add to i8 51 store i8 %conv1, i8* %arrayidx, align 1 79 %conv1 = trunc i32 %add to i8 80 store i8 %conv1, i8* %arrayidx, align 1 108 %conv1 = trunc i32 %add to i8 109 store i8 %conv1, i8* %arrayidx, align 1 137 %conv1 = trunc i32 %add to i8 138 store i8 %conv1, i8* %arrayidx, align 1 [all …]
|
D | absaddr-store.ll | 19 %conv1 = trunc i32 %mul to i8 20 store i8 %conv1, i8* @b1, align 1 21 ret i8 %conv1 30 %conv1 = trunc i32 %mul to i16 31 store i16 %conv1, i16* @c1, align 2 32 ret i16 %conv1
|
/external/llvm/test/CodeGen/X86/ |
D | float-conv-elim.ll | 9 %conv1 = fptosi float %conv to i32 10 ret i32 %conv1 18 %conv1 = fptosi float %conv to i32 19 ret i32 %conv1 27 %conv1 = fptoui float %conv to i8 28 ret i8 %conv1
|
D | fp-elim-and-no-fp-elim.ll | 9 %conv1 = fptrunc double %mul to float 12 tail call void @foo(float %conv1, float %conv3) 22 %conv1 = fptrunc double %mul to float 25 tail call void @foo(float %conv1, float %conv3)
|
/external/llvm/test/CodeGen/Mips/Fast-ISel/ |
D | loadstoreconv.ll | 58 %conv1 = zext i8 %1 to i32 62 store i32 %conv1, i32* @j, align 4 78 %conv1 = sext i8 %1 to i32 79 store i32 %conv1, i32* @j, align 4 98 %conv1 = zext i16 %1 to i32 99 store i32 %conv1, i32* @j, align 4 116 %conv1 = sext i16 %1 to i32 117 store i32 %conv1, i32* @j, align 4 150 %conv1 = zext i8 %1 to i16 154 store i16 %conv1, i16* @ssj, align 2 [all …]
|
/external/llvm/test/CodeGen/Mips/ |
D | stchar.ll | 11 %conv1 = sext i8 %c to i32 12 …* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %conv, i32 %conv1) nounwind 25 %conv1.i = sext i8 %3 to i32 26 …telementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %conv.i, i32 %conv1.i) nounwind 50 %conv1.i = sext i8 %3 to i32 51 …telementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %conv.i, i32 %conv1.i) nounwind 80 %conv1.i.i = sext i8 %4 to i32 81 …mentptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %conv.i.i, i32 %conv1.i.i) nounwind
|
D | selectcc.ll | 30 %conv1 = sitofp i32 %conv to float 31 ret float %conv1 40 %conv1 = sitofp i32 %conv to double 41 ret double %conv1
|
D | nomips16.ll | 12 %conv1 = fptrunc double %add to float 13 store float %conv1, float* @x, align 4 26 %conv1 = fptrunc double %add to float 27 store float %conv1, float* @x, align 4
|