/external/llvm/test/Transforms/Reassociate/ |
D | mixed-fast-nonfast-fp.ll | 4 ; CHECK: %mul3 = fmul float %a, %b 8 ; CHECK-NEXT: fadd fast float %tmp2, %mul3 12 %mul3 = fmul float %a, %b 14 %add1 = fadd fast float %mul1, %mul3
|
D | canonicalize-neg-const.ll | 16 %mul3 = fmul double %add, %add2 17 ret double %mul3 32 %mul3 = fmul double %add, %add2 33 ret double %mul3 48 %mul3 = fmul double %add, %add2 49 ret double %mul3
|
/external/llvm/test/Transforms/SimplifyCFG/AArch64/ |
D | prefer-fma.ll | 55 ; CHECK: %mul3 = fmul fast double %5, 3.000000e+00 56 ; CHECK-NEXT: %neg = fsub fast double 0.000000e+00, %mul3 58 %mul3 = fmul fast double %6, 3.0000000e+00 59 %neg = fsub fast double 0.0000000e+00, %mul3
|
/external/llvm/test/Transforms/BBVectorize/X86/ |
D | loop1.ll | 20 %mul3 = fmul double %0, %1 21 %add = fadd double %mul, %mul3 47 ; CHECK-UNRL: %mul3 = fmul <2 x double> %2, %3 48 ; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3
|
/external/llvm/test/Transforms/BBVectorize/ |
D | loop1.ll | 20 %mul3 = fmul double %0, %1 21 %add = fadd double %mul, %mul3 43 ; CHECK: %mul3 = fmul double %0, %1 44 ; CHECK: %add = fadd double %mul, %mul3 73 ; CHECK-UNRL: %mul3 = fmul <2 x double> %2, %3 74 ; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | 2012-05-29-MulAddRec.ll | 5 ; outer loop. While reducing the recurrence at %mul3, unsigned integer overflow 34 %mul3 = phi i8 [ undef, %entry ], [ %mul.lcssa, %for.cond.loopexit ] 40 %mul45 = phi i8 [ %mul3, %for.cond ], [ %mul, %for.body ]
|
/external/llvm/test/Transforms/LICM/ |
D | extra-copies.ll | 14 %mul3 = add nsw i32 %add2, %mul 20 %a9.0.lcssa = phi i32 [ %mul3, %for.body ]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | horizontal.ll | 104 %mul3 = fmul float %0, %5 109 %add8 = fadd fast float %mul3, %mul7 186 %mul3 = fmul fast float %0, %10 191 %add8 = fadd fast float %mul3, %mul7 278 %mul3 = fmul fast float %0, %5 279 %add = fadd fast float %sum.042, %mul3 342 %mul3 = fmul fast float %1, %2 348 %add8 = fadd fast float %mul3, %mul7 403 %mul3 = fmul fast double %0, %3 408 %add8 = fadd fast double %mul3, %mul7
|
D | external_user.ll | 48 %mul3 = fmul double %add2, 4.000000e+00 50 %add5 = fadd double %mul3, 4.000000e+00 59 ret double %mul3
|
D | crash_smallpt.ll | 75 %mul3.i.i792 = fmul double undef, undef 76 %mul.i764 = fmul double undef, %mul3.i.i792 80 %mul9.i772 = fmul double undef, %mul3.i.i792
|
/external/llvm/test/Transforms/LoopVectorize/PowerPC/ |
D | agg-interleave-a2.ll | 22 %mul3 = fmul double %0, %mul 27 %add = fadd double %mul3, %mul9
|
/external/llvm/test/CodeGen/X86/ |
D | fmul-combines.ll | 110 %mul3 = fmul fast <4 x float> %a, %mul2 111 ret <4 x float> %mul3 126 %mul3 = fmul fast <4 x float> %a, %mul2 127 ret <4 x float> %mul3
|
/external/vulkan-validation-layers/libs/glm/gtx/ |
D | simd_quat.inl | 126 __m128 mul3 = _mm_mul_ps(q1.Data, q2.Data); local 132 __m128 add3 = _mm_dp_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f), 0xff); 146 mul3 = _mm_mul_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f)); 147 __m128 add3 = _mm_add_ps(mul3, _mm_movehl_ps(mul3, mul3));
|
/external/llvm/test/CodeGen/ARM/ |
D | 2011-11-14-EarlyClobber.ll | 39 %mul3 = fmul double %mul, %sub 42 %add = fadd double %mul3, %mul5
|
/external/llvm/test/Transforms/LoopVectorize/ |
D | version-mem-access.ll | 43 %mul3 = mul nsw i64 %indvars.iv, %AStride 44 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %mul3
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | postproc_msa.c | 591 v4i32 mul0, mul1, mul2, mul3; in vp8_mbpost_proc_across_ip_msa() local 625 mul0, mul1, mul2, mul3); in vp8_mbpost_proc_across_ip_msa() 641 sum_sq3[0] = sum_sq2[3] + mul3[0]; in vp8_mbpost_proc_across_ip_msa() 644 sum_sq3[cnt + 1] = sum_sq3[cnt] + mul3[cnt + 1]; in vp8_mbpost_proc_across_ip_msa() 710 v4i32 mul3 = { 0 }; in vp8_mbpost_proc_down_msa() local 739 mul3 += (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)mult1); in vp8_mbpost_proc_down_msa() 767 mul3 += add3 * sub3; in vp8_mbpost_proc_down_msa() 782 total3 = mul3 * __msa_ldi_w(15); in vp8_mbpost_proc_down_msa()
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | wrong-transalu-pos-fix.ll | 13 %mul3 = mul i32 %mul, %z.i17 34 store i32 %mul3, i32 addrspace(1)* %arrayidx, align 4
|
/external/llvm/test/CodeGen/SystemZ/ |
D | fp-mul-01.ll | 109 %mul3 = fmul float %mul2, %val3 110 %mul4 = fmul float %mul3, %val4
|
D | fp-mul-03.ll | 111 %mul3 = fmul double %mul2, %val3 112 %mul4 = fmul double %mul3, %val4
|
D | fp-move-02.ll | 290 %mul3 = fmul double %conv3, %factor 291 store volatile double %mul3, double *@dptr 333 %double3 = phi double [ 1.0, %entry ], [ %mul3, %loop ] 345 %mul3 = fmul double %double3, %factor 368 %conv3 = bitcast double %mul3 to i64
|
/external/llvm/test/Transforms/StraightLineStrengthReduce/ |
D | slsr-mul.ll | 83 %mul3 = mul i32 %a1, %b1 88 call void @foo(i32 %mul3)
|
/external/llvm/test/Analysis/DependenceAnalysis/ |
D | SymbolicSIV.ll | 82 %mul3 = shl i64 %n, 1 83 %add4 = add i64 %mul2, %mul3 357 %mul3 = mul i64 %mul2, %i.03 359 %add5 = add i64 %mul3, %mul4 406 %mul3 = mul i64 %mul2, %i.03 408 %sub = add i64 %mul3, %0
|
/external/llvm/test/Transforms/LoopVectorize/ARM/ |
D | gather-cost.ll | 40 %mul3 = fmul fast float %0, %1 43 %mul5 = fmul fast float %mul3, %2
|
/external/llvm/test/Transforms/LoopVectorize/X86/ |
D | gather-cost.ll | 38 %mul3 = fmul fast float %0, %1 41 %mul5 = fmul fast float %mul3, %2
|
/external/llvm/test/Transforms/LoopVectorize/AArch64/ |
D | gather-cost.ll | 37 %mul3 = fmul fast float %0, %1 40 %mul5 = fmul fast float %mul3, %2
|