/external/llvm/test/Transforms/InstCombine/ |
D | zext-bool-add-sub.ll | 11 %conv3 = zext i1 %y to i32 12 %conv3.neg = sub i32 0, %conv3 14 %add = add i32 %sub, %conv3.neg
|
D | udivrem-change-width.ll | 10 %conv3 = trunc i32 %div to i8 11 ret i8 %conv3 20 %conv3 = trunc i32 %div to i8 21 ret i8 %conv3
|
D | 2012-05-28-select-hang.ll | 17 %conv3 = trunc i32 %and to i8 18 store i8 %conv3, i8* @b, align 1 21 %conv5 = zext i8 %conv3 to i32
|
D | 2012-02-13-FCmp.ll | 22 %conv3 = fptosi double %add to i64 32 %retval.0 = phi i64 [ %conv3, %if.then ], [ -1, %if.end ]
|
/external/llvm/test/CodeGen/X86/ |
D | fp-elim-and-no-fp-elim.ll | 11 %conv3 = fptrunc double %div to float 12 tail call void @foo(float %conv1, float %conv3) 24 %conv3 = fptrunc double %div to float 25 tail call void @foo(float %conv1, float %conv3)
|
D | sse_partial_update.ll | 24 %conv3 = fpext float %a.addr.4.extract to double 25 tail call void @callee(double %conv, double %conv3) nounwind 45 %conv3 = fpext float %a.addr.4.extract to double 46 tail call void @callee(double %conv, double %conv3) nounwind 65 %conv3 = fpext float %a.addr.4.extract to double 66 tail call void @callee(double %conv, double %conv3) nounwind 85 %conv3 = fptrunc double %a1 to float 86 tail call void @callee2(float %conv, float %conv3) nounwind
|
D | pr9127.ll | 8 %conv3 = zext i1 %cmp to i8 9 ret i8 %conv3
|
D | coalescer-identity.ll | 30 %conv3.us = zext i16 %add.us to i32 32 %tobool5.us = icmp eq i32 %conv3.us, %add4.us 64 %conv3 = zext i16 %add to i32 66 %not.tobool5 = icmp ne i32 %conv3, %add4
|
D | pr14088.ll | 8 %conv3 = trunc i32 %rem to i16 9 store i16 %conv3, i16* %tm_year2
|
D | extract-combine.ll | 9 …%conv3.i25615 = shufflevector <4 x float> %movss.i25611, <4 x float> undef, <4 x i32> <i32 1, i32 … 10 %sub.i25620 = fsub <4 x float> %conv3.i25615, zeroinitializer ; <<4 x float>> [#uses=1]
|
/external/llvm/test/CodeGen/ARM/ |
D | test-sharedidx.ll | 33 %conv3 = trunc i32 %add to i8 35 store i8 %conv3, i8* %arrayidx4, align 1 54 %conv3.1 = trunc i32 %add.1 to i8 56 store i8 %conv3.1, i8* %arrayidx4.1, align 1 69 %conv3.2 = trunc i32 %add.2 to i8 71 store i8 %conv3.2, i8* %arrayidx4.2, align 1 87 %conv3.3 = trunc i32 %add.3 to i8 89 store i8 %conv3.3, i8* %arrayidx4.3, align 1
|
D | smml.ll | 12 %conv3 = trunc i64 %sub to i32 13 ret i32 %conv3
|
D | rev.ll | 111 %conv3 = ashr exact i32 %sext, 16 112 ret i32 %conv3 124 %conv3 = trunc i32 %or to i16 125 ret i16 %conv3
|
D | debug-info-s16-reg.ll | 21 %conv3 = zext i8 %c to i32, !dbg !27 22 …([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %ptr, double %conv, i32 %conv3) nounwind optsize,… 34 %conv3 = zext i8 %c to i32, !dbg !33 35 …([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %ptr, double %conv, i32 %conv3) nounwind optsize,… 55 %conv3.i = and i32 %add5, 255, !dbg !44 56 … i8], [11 x i8]* @.str, i32 0, i32 0), i8* %add.ptr, double %conv.i, i32 %conv3.i) nounwind optsiz…
|
D | noreturn.ll | 25 %conv3 = sext i32 %conv2 to i64 26 %cmp = icmp eq i64 %mul, %conv3
|
/external/llvm/test/Transforms/LoopStrengthReduce/X86/ |
D | ivchain-stress-X86.ll | 42 %conv3 = trunc i32 %add to i8 44 store i8 %conv3, i8* %arrayidx4, align 1 60 %conv3.1 = trunc i32 %add.1 to i8 62 store i8 %conv3.1, i8* %arrayidx4.1, align 1 75 %conv3.2 = trunc i32 %add.2 to i8 77 store i8 %conv3.2, i8* %arrayidx4.2, align 1 90 %conv3.3 = trunc i32 %add.3 to i8 92 store i8 %conv3.3, i8* %arrayidx4.3, align 1
|
D | ivchain-X86.ll | 182 %conv3 = trunc i32 %add to i8 184 store i8 %conv3, i8* %arrayidx4, align 1 193 %conv3.1 = trunc i32 %add.1 to i8 195 store i8 %conv3.1, i8* %arrayidx4.1, align 1 204 %conv3.2 = trunc i32 %add.2 to i8 206 store i8 %conv3.2, i8* %arrayidx4.2, align 1 215 %conv3.3 = trunc i32 %add.3 to i8 217 store i8 %conv3.3, i8* %arrayidx4.3, align 1
|
/external/llvm/test/Transforms/LoopVectorize/AArch64/ |
D | reduction-small-size.ll | 59 %conv3 = zext i8 %1 to i32 62 %add5 = add nuw nsw i32 %add, %conv3 120 %conv3.15 = zext i16 %1 to i32 123 %add5 = add nuw nsw i32 %add, %conv3.15 183 %conv3 = zext i8 %1 to i32 186 %add5 = add nuw nsw i32 %add, %conv3
|
/external/llvm/test/CodeGen/Hexagon/ |
D | memops2.ll | 23 %conv3 = zext i16 %0 to i32 24 %sub = add nsw i32 %conv3, 65535
|
/external/llvm/test/CodeGen/PowerPC/ |
D | ctrloop-udivti3.ll | 18 %conv3.i11 = trunc i128 %div.i to i64 19 store i64 %conv3.i11, i64* undef, align 8
|
D | vsx-fma-mutate-trivial-copy.ll | 14 %conv3 = fpext float %W to double 22 %div4 = fdiv fast double %conv3, %0
|
/external/llvm/test/CodeGen/NVPTX/ |
D | addrspacecast.ll | 29 define i32 @conv3(i32 addrspace(4)* %ptr) { 30 ; PTX32: conv3 33 ; PTX64: conv3
|
/external/llvm/test/Transforms/LoopStrengthReduce/ |
D | preserve-gep-loop-variant.ll | 28 %conv3 = sext i8 %conv to i64 30 %add4 = add nsw i64 %add, %conv3
|
/external/clang/test/SemaCXX/ |
D | cxx1y-deduced-return-type.cpp | 31 } conv3; variable 32 int *conv3a = conv3; // expected-error {{ambiguous}} 33 int *conv3b = conv3.operator auto(); 34 int *conv3c = conv3.operator auto*();
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | cast.ll | 23 %conv3 = sext i8 %1 to i32 25 store i32 %conv3, i32* %arrayidx4, align 4
|