Searched refs:mul16 (Results 1 – 8 of 8) sorted by relevance
/external/valgrind/none/tests/ |
D | shortpush.c | 4 UShort mul16 ( UShort a, UShort b ); 8 UShort x = mul16 ( 10, 20 ); in main() 12 UShort mul16 ( UShort a, UShort b ) in mul16() function
|
/external/llvm/test/Transforms/BBVectorize/ |
D | ld1.ll | 23 %mul16 = fmul double %add11, %add15 24 ret double %mul16 38 ; CHECK: %mul16 = fmul double %add11.v.r1, %add11.v.r2 39 ; CHECK: ret double %mul16
|
/external/llvm/test/Transforms/LoopVectorize/ |
D | i8-induction.ll | 16 %mul16 = phi i8 [ 0, %scalar.ph ], [ %mul, %for.body ] ; <------- i8 induction var. 21 %mul = mul i8 %mul16, %.sink
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | cse.ll | 78 %mul16 = fmul double %3, 7.400000e+00 79 %mul18 = fmul double %conv, %mul16 170 %mul16 = fmul double %3, 7.900000e+00 171 %mul18 = fmul double %conv, %mul16
|
/external/llvm/test/CodeGen/AArch64/ |
D | aarch64-a57-fp-load-balancing.ll | 57 %mul16 = fmul fast double %0, %5 58 %add17 = fadd fast double %mul16, %mul15 110 %mul16 = fmul fast double %2, %3 111 %add17 = fadd fast double %mul16, %sub 205 %mul16 = fmul fast float %2, %3 206 %add17 = fadd fast float %mul16, %sub
|
/external/llvm/test/Transforms/BBVectorize/X86/ |
D | wr-aliases.ll | 83 %mul16.i = fmul double 5.000000e-01, %add15.i 85 store double %mul16.i, double* %x317.i, align 16
|
/external/valgrind/VEX/priv/ |
D | host_generic_simd64.c | 225 static inline Short mul16 ( Short xx, Short yy ) in mul16() function 658 mul16( sel16x4_3(xx), sel16x4_3(yy) ), in h_generic_calc_Mul16x4() 659 mul16( sel16x4_2(xx), sel16x4_2(yy) ), in h_generic_calc_Mul16x4() 660 mul16( sel16x4_1(xx), sel16x4_1(yy) ), in h_generic_calc_Mul16x4() 661 mul16( sel16x4_0(xx), sel16x4_0(yy) ) in h_generic_calc_Mul16x4()
|
/external/llvm/test/CodeGen/Thumb2/ |
D | constant-islands.ll | 305 %mul16 = fmul float 0x3FA99999A0000000, %12 306 …s.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %10, float %mul15, float %mul16)
|