/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | call.ll | 27 %mul5 = fmul double %i3, %i4 28 %call5 = tail call double @sin(double %mul5) nounwind readnone 48 %mul5 = fmul double %i3, %i4 49 %call5 = tail call double @cos(double %mul5) nounwind readnone 69 %mul5 = fmul double %i3, %i4 70 %call5 = tail call double @pow(double %mul5,double %mul5) nounwind readnone 91 %mul5 = fmul double %i3, %i4 92 %call5 = tail call double @exp2(double %mul5) nounwind readnone 113 %mul5 = fmul double %i3, %i4 114 %call5 = tail call nnan double @sqrt(double %mul5) nounwind readnone [all …]
|
D | simplebb.ll | 19 %mul5 = fmul double %i3, %i4 22 store double %mul5, double* %arrayidx5, align 8 39 %mul5 = fmul double %i3, %i4 44 store double %mul5, double* %arrayidx5, align 8 62 %mul5 = fmul double %i3, %i4 65 store double %mul5, double* %arrayidx5, align 8 82 %mul5 = fmul double %i3, %i4 85 store volatile double %mul5, double* %arrayidx5, align 8
|
D | metadata.ll | 22 %mul5 = fmul double %i3, %i4, !fpmath !0 25 store double %mul5, double* %arrayidx5, align 8, !tbaa !4 45 %mul5 = fmul double %i3, %i4, !fpmath !1 50 store double %mul5, double* %arrayidx5, align 8, !tbaa !4
|
D | cycle_dup.ll | 42 %g.026 = phi i32 [ %mul5, %for.body ], [ %1, %entry ] 45 %mul5 = mul nsw i32 %g.026, 19 55 %g.0.lcssa = phi i32 [ %1, %entry ], [ %mul5, %for.body ]
|
D | implicitfloat.ll | 19 %mul5 = fmul double %i3, %i4 22 store double %mul5, double* %arrayidx5, align 8
|
/external/llvm/test/Transforms/SLPVectorizer/AMDGPU/ |
D | simplebb.ll | 25 %mul5 = fmul double %i3, %i4 28 store double %mul5, double addrspace(3)* %arrayidx5, align 8 45 %mul5 = fmul double %i3, %i4 48 store double %mul5, double* %arrayidx5, align 8 65 %mul5 = fmul double %i3, %i4 68 store double %mul5, double addrspace(3)* %arrayidx5, align 8
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | lea-opt-cse3.ll | 29 %mul5 = mul nsw i32 %add1, %add4 30 ret i32 %mul5 57 %mul5 = mul nsw i32 %add1, %add4 58 ret i32 %mul5 106 %mul5 = mul nsw i32 %add1, %add4 110 %retmul = phi i32 [%mul5 , %mid] , [0 , %entry] 159 %mul5 = mul nsw i32 %add1, %add4 162 %retmul = phi i32 [%mul5 , %mid] , [0 , %entry]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | simplebb.ll | 26 %mul5 = fmul double %i3, %i4 29 store double %mul5, double* %arrayidx5, align 8 53 %mul5 = fmul double %i3, %i4 58 store double %mul5, double* %arrayidx5, align 8 85 %mul5 = fmul double %i3, %i4 88 store double %mul5, double* %arrayidx5, align 8 115 %mul5 = fmul double %i3, %i4 118 store volatile double %mul5, double* %arrayidx5, align 8
|
D | metadata.ll | 22 %mul5 = fmul double %i3, %i4, !fpmath !0 25 store double %mul5, double* %arrayidx5, align 8, !tbaa !4 45 %mul5 = fmul double %i3, %i4, !fpmath !1 50 store double %mul5, double* %arrayidx5, align 8, !tbaa !4
|
D | cycle_dup.ll | 42 %g.026 = phi i32 [ %mul5, %for.body ], [ %1, %entry ] 45 %mul5 = mul nsw i32 %g.026, 19 55 %g.0.lcssa = phi i32 [ %1, %entry ], [ %mul5, %for.body ]
|
D | implicitfloat.ll | 19 %mul5 = fmul double %i3, %i4 22 store double %mul5, double* %arrayidx5, align 8
|
/external/llvm/test/Transforms/BBVectorize/ |
D | metadata.ll | 14 %mul5 = fmul double %i3, %i4, !fpmath !3 17 store double %mul5, double* %arrayidx5, align 8 34 %mul5 = mul i64 %i3, %i4 37 store i64 %mul5, i64* %arrayidx5, align 8
|
D | simple-ldstr.ll | 15 %mul5 = fmul double %i3, %i4 18 store double %mul5, double* %arrayidx5, align 8 47 %mul5 = fmul double %i3, %i4 50 store double %mul5, double* %arrayidx5, align 8 78 %mul5 = fmul double %i3, %i4 79 %mul5f = fptrunc double %mul5 to float 129 %mul5 = fmul double %i3, %i4 132 store double %mul5, double* %arrayidx5, align 8 153 %mul5 = fmul double %i3, %i4 155 store double %mul5, double* %arrayidx5, align 8
|
/external/swiftshader/third_party/LLVM/test/CodeGen/PTX/ |
D | 20110926-sitofp.ll | 9 %mul5 = mul i32 %x, %y 10 %rem = srem i32 %mul5, 1024 17 %rem14 = srem i32 %mul5, 1024
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/AMDGPU/ |
D | packed-math.ll | 21 %mul5 = fmul half %i3, %i4 24 store half %mul5, half addrspace(3)* %arrayidx5, align 2 42 %mul5 = fmul half %i3, %i4 45 store half %mul5, half* %arrayidx5, align 2 63 %mul5 = fmul half %i3, %i4 66 store half %mul5, half addrspace(3)* %arrayidx5, align 2 103 %mul5 = fmul half %i3, %scalar 106 store half %mul5, half addrspace(3)* %arrayidx5, align 2
|
/external/swiftshader/third_party/LLVM/test/CodeGen/Thumb2/ |
D | thumb2-mul.ll | 22 %mul5 = mul i32 %add, 36 23 %add6 = add i32 %mul5, %0
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | early-inline.ll | 11 %mul5 = mul i32 %mul4, %mul3 12 ret i32 %mul5
|
/external/llvm/test/Transforms/SLPVectorizer/XCore/ |
D | no-vector-registers.ll | 18 %mul5 = fmul double %i3, %i4 21 store double %mul5, double* %arrayidx5, align 8
|
/external/llvm/test/CodeGen/Thumb2/ |
D | thumb2-mul.ll | 22 %mul5 = mul i32 %add, 36 23 %add6 = add i32 %mul5, %0
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Thumb2/ |
D | thumb2-mul.ll | 22 %mul5 = mul i32 %add, 36 23 %add6 = add i32 %mul5, %0
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/XCore/ |
D | no-vector-registers.ll | 18 %mul5 = fmul double %i3, %i4 21 store double %mul5, double* %arrayidx5, align 8
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | avoid-cpsr-rmw.ll | 43 %mul5 = mul i32 %mul, %2 44 %mul6 = mul i32 %mul5, %3 76 %mul5 = mul i32 %mul, %2 77 %mul6 = mul i32 %mul5, %3
|
/external/llvm/test/CodeGen/ARM/ |
D | avoid-cpsr-rmw.ll | 43 %mul5 = mul i32 %mul, %2 44 %mul6 = mul i32 %mul5, %3 76 %mul5 = mul i32 %mul, %2 77 %mul6 = mul i32 %mul5, %3
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | aarch64-a57-fp-load-balancing.ll | 49 %mul5 = fmul fast double %1, %2 50 %add6 = fadd fast double %mul5, %add 147 %mul5 = fmul fast double %1, %2 148 %add6 = fadd fast double %mul5, %add 242 %mul5 = fmul fast float %1, %2 243 %add6 = fadd fast float %mul5, %add 284 %mul5 = fmul fast double %1, %2 285 %add6 = fadd fast double %mul5, %add 319 %mul5 = fmul fast double %1, %2 320 %add6 = fadd fast double %mul5, %add
|
/external/llvm/test/CodeGen/AArch64/ |
D | aarch64-a57-fp-load-balancing.ll | 49 %mul5 = fmul fast double %1, %2 50 %add6 = fadd fast double %mul5, %add 147 %mul5 = fmul fast double %1, %2 148 %add6 = fadd fast double %mul5, %add 242 %mul5 = fmul fast float %1, %2 243 %add6 = fadd fast float %mul5, %add 284 %mul5 = fmul fast double %1, %2 285 %add6 = fadd fast double %mul5, %add 319 %mul5 = fmul fast double %1, %2 320 %add6 = fadd fast double %mul5, %add
|