/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | fmul-2-combine-multi-use.ll | 49 %mul2 = fmul fast float %x, 2.0 50 %mad = fadd fast float %mul2, %y 51 store volatile float %mul2, float addrspace(1)* %out 65 %mul2 = fmul fast float %x.abs, 2.0 66 %mad = fadd fast float %mul2, %y 67 store volatile float %mul2, float addrspace(1)* %out 78 %mul2 = fmul fast float %x.abs, 2.0 79 %mad0 = fadd fast float %mul2, %y 80 %mad1 = fadd fast float %mul2, %z 92 %mul2 = fmul fast float %x, 2.0 [all …]
|
D | fdot2.ll | 34 %mul2 = fmul half %src1.el2, %src2.el2 37 %acc1 = fadd half %mul2, %acc 75 %mul2 = fmul float %csrc1.el2, %csrc2.el2 78 %acc1 = fadd float %mul2, %acc 114 %mul2 = fmul float %csrc2.el2, %csrc1.el2 117 %acc1 = fadd float %mul2, %acc 151 %mul2 = fmul float %csrc1.el2, %csrc2.el2 154 %acc1 = fadd float %mul2, %acc 188 %mul2 = fmul float %csrc1.el2, %csrc1.el1 191 %acc1 = fadd float %mul2, %acc [all …]
|
D | early-inline.ll | 8 %mul2 = mul i32 %mul1, %x 9 %mul3 = mul i32 %mul1, %mul2 10 %mul4 = mul i32 %mul3, %mul2
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/StraightLineStrengthReduce/ |
D | slsr-mul.ll | 20 %mul2 = mul i32 %b2, %s 21 call void @foo(i32 %mul2) 41 %mul2 = mul i32 %b2, %s 42 call void @foo(i32 %mul2) 63 %mul2 = mul i32 %b2, %s 65 call void @foo(i32 %mul2) 82 %mul2 = mul i32 %a, %b1 87 call void @foo(i32 %mul2) 101 ; mul1 = mul0 + bump; // GVN ensures mul1 and mul2 use the same bump. 102 ; mul2 = mul1 + bump; [all …]
|
/external/llvm/test/Transforms/StraightLineStrengthReduce/ |
D | slsr-mul.ll | 20 %mul2 = mul i32 %b2, %s 21 call void @foo(i32 %mul2) 41 %mul2 = mul i32 %b2, %s 42 call void @foo(i32 %mul2) 63 %mul2 = mul i32 %b2, %s 65 call void @foo(i32 %mul2) 82 %mul2 = mul i32 %a, %b1 87 call void @foo(i32 %mul2) 101 ; mul1 = mul0 + bump; // GVN ensures mul1 and mul2 use the same bump. 102 ; mul2 = mul1 + bump; [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | fmul-2-combine-multi-use.ll | 37 %mul2 = fmul fast float %x, 2.0 38 %mad = fadd fast float %mul2, %y 39 store volatile float %mul2, float addrspace(1)* %out 53 %mul2 = fmul fast float %x.abs, 2.0 54 %mad = fadd fast float %mul2, %y 55 store volatile float %mul2, float addrspace(1)* %out 66 %mul2 = fmul fast float %x.abs, 2.0 67 %mad0 = fadd fast float %mul2, %y 68 %mad1 = fadd fast float %mul2, %z 80 %mul2 = fmul fast float %x, 2.0 [all …]
|
/external/llvm/test/Transforms/Reassociate/ |
D | wrap-flags.ll | 12 %mul2 = add i32 %mul, 1 13 ret i32 %mul2 22 %mul2 = add i32 %mul, 1 23 ret i32 %mul2 32 %mul2 = add i32 %mul, 1 33 ret i32 %mul2
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Reassociate/ |
D | wrap-flags.ll | 12 %mul2 = add i32 %mul, 1 13 ret i32 %mul2 22 %mul2 = add i32 %mul, 1 23 ret i32 %mul2 32 %mul2 = add i32 %mul, 1 33 ret i32 %mul2
|
D | mixed-fast-nonfast-fp.ll | 13 %mul2 = fmul fast float %a, %b 17 %add2 = fadd fast float %mul4, %mul2 34 %mul2 = fmul fast float %a, %b 38 %add2 = fadd reassoc float %mul4, %mul2
|
D | propagate-flags.ll | 10 %mul2 = fmul fast double %b, %b 11 %mul3 = fmul fast double %mul1, %mul2
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | crash_smallpt.ll | 44 %mul2.i256.us = fmul double %add4.i267.us, 1.400000e+02 46 %add4.i246.us = fadd double %mul2.i256.us, 5.200000e+01 48 %mul2.i.i.us = fmul double undef, %add4.i267.us 52 store double %mul2.i.i.us, double* %agg.tmp101211.sroa.1.8.idx390, align 8 83 %mul2.i738 = fmul double undef, %sub10.i773 85 %mul2.i729 = fmul double undef, %mul2.i738 87 %add4.i719 = fadd double undef, %mul2.i729 91 %mul2.i.i680 = fmul double undef, %add4.i698 95 store double %mul2.i.i680, double* %agg.tmp74663.sroa.1.8.idx943, align 8
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | crash_smallpt.ll | 44 %mul2.i256.us = fmul double %add4.i267.us, 1.400000e+02 46 %add4.i246.us = fadd double %mul2.i256.us, 5.200000e+01 48 %mul2.i.i.us = fmul double undef, %add4.i267.us 52 store double %mul2.i.i.us, double* %agg.tmp101211.sroa.1.8.idx390, align 8 83 %mul2.i738 = fmul double undef, %sub10.i773 85 %mul2.i729 = fmul double undef, %mul2.i738 87 %add4.i719 = fadd double undef, %mul2.i729 91 %mul2.i.i680 = fmul double undef, %add4.i698 95 store double %mul2.i.i680, double* %agg.tmp74663.sroa.1.8.idx943, align 8
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/GVNHoist/ |
D | hoist-convergent.ll | 19 %mul2 = call float @convergent_func(float %sub1, float %div) 28 %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] 49 %mul2 = call float @func(float %sub1, float %div) #0 58 %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] 75 %mul2 = call float @func(float %sub1, float %div) 84 %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ]
|
D | hoist.ll | 26 %mul2 = fmul float %sub1, %div 37 %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] 71 %mul2 = fmul float %sub1, %div 86 %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] 124 %mul2 = fmul float %sub1, %div 138 %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] 171 %mul2 = fmul float %sub1, %div 185 %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] 287 %mul2 = fmul float %sub1, %div 291 %p1 = phi float [ %mul2, %if.then ], [ 0.000000e+00, %entry ] [all …]
|
D | hoist-newgvn.ll | 28 %mul2 = fmul float %sub1, %div 46 %x = fadd float %p1, %mul2 73 %mul2 = fmul float %sub1, %div 101 %x = fadd float %p1, %mul2
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | fma-aggr-FMF.ll | 13 %mul2 = fmul contract float %f3, %f4 14 %add = fadd contract float %mul1, %mul2 31 %mul2 = fmul float %f3, %f4 32 %add = fadd contract float %mul1, %mul2
|
/external/epid-sdk/ext/ipp/sources/ippcp/ |
D | pcpgfpec_dblpoint.c | 81 mod_mul2 mul2= GFP_METHOD(pGFE)->mul2; /* gf mul2 */ in gfec_point_double() local 99 mul2(S, pY, pGFE); /* S = 2*Y */ in gfec_point_double() 126 mul2(U, S, pGFE); /* U = 8*X*Y^2 */ in gfec_point_double()
|
D | pcpgfpec_addaffine.c | 69 mod_mul2 mul2= GFP_METHOD(pGFE)->mul2; /* gf mul2 */ in gfec_affine_point_add() local 115 mul2(pRx, U2, pGFE); // X3 = 2*X1*H^2 in gfec_affine_point_add()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | cortex-a57-misched-vfma.ll | 37 %mul2 = fmul float %f3, %f4 39 %add1 = fadd float %mul1, %mul2 76 %mul2 = fmul <2 x float> %f3, %f4 78 %add1 = fadd <2 x float> %mul1, %mul2 114 %mul2 = fmul float %f3, %f4 116 %sub1 = fsub float %mul1, %mul2 153 %mul2 = fmul <2 x float> %f3, %f4 155 %sub1 = fsub <2 x float> %mul1, %mul2
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | lea-opt-cse3.ll | 27 %mul2 = shl i32 %b, 2 28 %add4 = add i32 %add, %mul2 55 %mul2 = shl i32 %b, 3 56 %add4 = add i32 %add, %mul2 104 %mul2 = shl i32 %b, 3 105 %add4 = add i32 %addn, %mul2 157 %mul2 = shl i32 %b, 3 158 %add4 = add i32 %addn, %mul2
|
D | MachineSink-SubReg.ll | 17 %mul2 = mul nuw nsw i64 %conv, 5 25 %add7 = add i64 %mul2, %value 29 %conv9 = trunc i64 %mul2 to i32
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | aarch64-fold-lslfast.ll | 64 %mul2 = shl i64 %b, 3 65 %cmp = icmp slt i64 %mul1, %mul2 71 %cmp2 = icmp sgt i64 %mul1, %mul2 76 ret i64 %mul2
|
/external/llvm/test/Transforms/InstCombine/ |
D | add2.ll | 237 %mul2 = mul i16 %a, 3 238 %add = add nsw i16 %mul1, %mul2 247 %mul2 = mul nsw i16 %a, 7 248 %add = add nsw i16 %mul1, %mul2 257 %mul2 = mul nsw i16 %a, 7 258 %add = add nsw i16 %mul1, %mul2 267 %mul2 = mul nsw i32 %mul1, 5 268 %add = add nsw i32 %mul1, %mul2 287 %mul2 = mul nsw i16 %a, 16384 288 %add = add nsw i16 %mul1, %mul2 [all …]
|
D | fmul.ll | 68 %mul2 = fmul float %mul, %sub1 69 ret float %mul2 148 %mul2 = fmul double %mul1, %sqrt 149 ret double %mul2 152 ; CHECK-NEXT: %mul2 = fmul double %sqrt, %f 153 ; CHECK-NEXT: ret double %mul2
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | add2.ll | 253 %mul2 = mul i16 %a, 3 254 %add = add nsw i16 %mul1, %mul2 264 %mul2 = mul nsw i16 %a, 7 265 %add = add nsw i16 %mul1, %mul2 275 %mul2 = mul nsw i16 %a, 7 276 %add = add nsw i16 %mul1, %mul2 287 %mul2 = mul nsw i32 %mul1, 5 288 %add = add nsw i32 %mul1, %mul2 308 %mul2 = mul nsw i16 %a, 16384 309 %add = add nsw i16 %mul1, %mul2 [all …]
|