Home
last modified time | relevance | path

Searched refs:f64 (Results 1 – 25 of 2152) sorted by relevance

12345678910>>...87

/external/valgrind/VEX/useful/
Dfp_80_64.c44 static void convert_f80le_to_f64le_HW ( /*IN*/UChar* f80, /*OUT*/UChar* f64 ) in convert_f80le_to_f64le_HW() argument
48 : "r" (&f80[0]), "r" (&f64[0]) in convert_f80le_to_f64le_HW()
52 static void convert_f64le_to_f80le_HW ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) in convert_f64le_to_f80le_HW() argument
56 : "r" (&f64[0]), "r" (&f80[0]) in convert_f64le_to_f80le_HW()
103 static void convert_f64le_to_f80le ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) in convert_f64le_to_f80le() argument
109 sign = toUChar( (f64[7] >> 7) & 1 ); in convert_f64le_to_f80le()
110 bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F); in convert_f64le_to_f80le()
120 (f64[6] & 0x0F) == 0 in convert_f64le_to_f80le()
121 && f64[5] == 0 && f64[4] == 0 && f64[3] == 0 in convert_f64le_to_f80le()
122 && f64[2] == 0 && f64[1] == 0 && f64[0] == 0 in convert_f64le_to_f80le()
[all …]
/external/llvm/test/MC/ARM/
Dsingle-precision-fp.s5 vadd.f64 d0, d1, d2
6 vsub.f64 d2, d3, d4
7 vdiv.f64 d4, d5, d6
8 vmul.f64 d6, d7, d8
9 vnmul.f64 d8, d9, d10
11 @ CHECK-ERRORS-NEXT: vadd.f64 d0, d1, d2
13 @ CHECK-ERRORS-NEXT: vsub.f64 d2, d3, d4
15 @ CHECK-ERRORS-NEXT: vdiv.f64 d4, d5, d6
17 @ CHECK-ERRORS-NEXT: vmul.f64 d6, d7, d8
19 @ CHECK-ERRORS-NEXT: vnmul.f64 d8, d9, d10
[all …]
Dthumb-fp-armv8.s5 vcvtt.f64.f16 d3, s1
6 @ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0xe0,0x3b]
7 vcvtt.f16.f64 s5, d12
8 @ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xf3,0xee,0xcc,0x2b]
10 vcvtb.f64.f16 d3, s1
11 @ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0x60,0x3b]
12 vcvtb.f16.f64 s4, d1
13 @ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0xb3,0xee,0x41,0x2b]
16 vcvttge.f64.f16 d3, s1
17 @ CHECK: vcvttge.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0xe0,0x3b]
[all …]
Dfp-armv8.s5 vcvtt.f64.f16 d3, s1
6 @ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee]
7 vcvtt.f16.f64 s5, d12
8 @ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee]
10 vcvtb.f64.f16 d3, s1
11 @ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0x60,0x3b,0xb2,0xee]
12 vcvtb.f16.f64 s4, d1
13 @ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0x41,0x2b,0xb3,0xee]
15 vcvttge.f64.f16 d3, s1
16 @ CHECK: vcvttge.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xae]
[all …]
Ddirective-arch_extension-fp.s35 vselgt.f64 d0, d0, d0
37 vselge.f64 d0, d0, d0
39 vseleq.f64 d0, d0, d0
41 vselvs.f64 d0, d0, d0
43 vmaxnm.f64 d0, d0, d0
45 vminnm.f64 d0, d0, d0
48 vcvtb.f64.f16 d0, s0
50 vcvtb.f16.f64 s0, d0
52 vcvtt.f64.f16 d0, s0
54 vcvtt.f16.f64 s0, d0
[all …]
Ddirective-arch_extension-simd.s24 vmaxnm.f64 d0, d0, d0
26 vminnm.f64 d0, d0, d0
33 vcvta.s32.f64 s0, d0
35 vcvta.u32.f64 s0, d0
41 vcvtn.s32.f64 s0, d0
43 vcvtn.u32.f64 s0, d0
49 vcvtp.s32.f64 s0, d0
51 vcvtp.u32.f64 s0, d0
57 vcvtm.s32.f64 s0, d0
59 vcvtm.u32.f64 s0, d0
[all …]
Dsimple-fp-encoding.s3 vadd.f64 d16, d17, d16
5 @ CHECK: vadd.f64 d16, d17, d16 @ encoding: [0xa0,0x0b,0x71,0xee]
8 vsub.f64 d16, d17, d16
10 @ CHECK: vsub.f64 d16, d17, d16 @ encoding: [0xe0,0x0b,0x71,0xee]
13 vdiv.f64 d16, d17, d16
16 vdiv.f64 d5, d7
18 @ CHECK: vdiv.f64 d16, d17, d16 @ encoding: [0xa0,0x0b,0xc1,0xee]
21 @ CHECK: vdiv.f64 d5, d5, d7 @ encoding: [0x07,0x5b,0x85,0xee]
24 vmul.f64 d16, d17, d16
25 vmul.f64 d20, d17
[all …]
Dinvalid-fp-armv8.s5 vcvtt.f64.f16 d3, s1
6 @ V7-NOT: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee]
7 vcvtt.f16.f64 s5, d12
8 @ V7-NOT: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee]
39 vselgt.f64 s3, s2, s1
43 vselgt.f64 q0, s3, q1
48 vminnm.f64 s3, s2, s1
52 vmaxnm.f64 q0, s3, q1
54 vmaxnmgt.f64 q0, s3, q1
57 vcvta.s32.f64 d3, s2
[all …]
Dd16.s7 @ D16-NEXT: vadd.f64 d1, d2, d16
8 vadd.f64 d1, d2, d16
11 @ D16-NEXT: vadd.f64 d1, d17, d6
12 vadd.f64 d1, d17, d6
15 @ D16-NEXT: vadd.f64 d19, d7, d6
16 vadd.f64 d19, d7, d6
19 @ D16-NEXT: vcvt.f64.f32 d22, s4
20 vcvt.f64.f32 d22, s4
23 @ D16-NEXT: vcvt.f32.f64 s26, d30
24 vcvt.f32.f64 s26, d30
Dvfp4.s7 @ ARM: vfma.f64 d16, d18, d17 @ encoding: [0xa1,0x0b,0xe2,0xee]
8 @ THUMB: vfma.f64 d16, d18, d17 @ encoding: [0xe2,0xee,0xa1,0x0b]
10 @ THUMB_V7EM-ERRORS-NEXT: vfma.f64 d16, d18, d17
11 vfma.f64 d16, d18, d17
30 @ ARM: vfnma.f64 d16, d18, d17 @ encoding: [0xe1,0x0b,0xd2,0xee]
31 @ THUMB: vfnma.f64 d16, d18, d17 @ encoding: [0xd2,0xee,0xe1,0x0b]
33 @ THUMB_V7EM-ERRORS-NEXT: vfnma.f64 d16, d18, d17
34 vfnma.f64 d16, d18, d17
41 @ ARM: vfms.f64 d16, d18, d17 @ encoding: [0xe1,0x0b,0xe2,0xee]
42 @ THUMB: vfms.f64 d16, d18, d17 @ encoding: [0xe2,0xee,0xe1,0x0b]
[all …]
/external/valgrind/VEX/priv/
Dguest_generic_x87.c105 void convert_f64le_to_f80le ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) in convert_f64le_to_f80le() argument
111 sign = toUChar( (f64[7] >> 7) & 1 ); in convert_f64le_to_f80le()
112 bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F); in convert_f64le_to_f80le()
122 (f64[6] & 0x0F) == 0 in convert_f64le_to_f80le()
123 && f64[5] == 0 && f64[4] == 0 && f64[3] == 0 in convert_f64le_to_f80le()
124 && f64[2] == 0 && f64[1] == 0 && f64[0] == 0 in convert_f64le_to_f80le()
147 if (read_bit_array(f64, i)) in convert_f64le_to_f80le()
156 read_bit_array( f64, i ) ); in convert_f64le_to_f80le()
194 if (f64[6] & 8) { in convert_f64le_to_f80le()
223 f80[7] = toUChar( (1 << 7) | ((f64[6] << 3) & 0x78) in convert_f64le_to_f80le()
[all …]
/external/llvm/test/CodeGen/Thumb2/
Dfloat-intrinsics-double.ll8 declare double @llvm.sqrt.f64(double %Val)
12 ; HARD: vsqrt.f64 d0, d0
13 %1 = call double @llvm.sqrt.f64(double %a)
17 declare double @llvm.powi.f64(double %Val, i32 %power)
22 %1 = call double @llvm.powi.f64(double %a, i32 %b)
26 declare double @llvm.sin.f64(double %Val)
31 %1 = call double @llvm.sin.f64(double %a)
35 declare double @llvm.cos.f64(double %Val)
40 %1 = call double @llvm.cos.f64(double %a)
44 declare double @llvm.pow.f64(double %Val, double %power)
[all …]
/external/llvm/test/CodeGen/WebAssembly/
Dcomparisons_f64.ll10 ; CHECK-NEXT: .param f64, f64{{$}}
12 ; CHECK-NEXT: f64.eq $push[[NUM0:[0-9]+]]=, $0, $0{{$}}
13 ; CHECK-NEXT: f64.eq $push[[NUM1:[0-9]+]]=, $1, $1{{$}}
23 ; CHECK-NEXT: .param f64, f64{{$}}
25 ; CHECK-NEXT: f64.ne $push[[NUM0:[0-9]+]]=, $0, $0{{$}}
26 ; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $1, $1{{$}}
36 ; CHECK-NEXT: .param f64, f64{{$}}
38 ; CHECK-NEXT: f64.eq $push[[NUM:[0-9]+]]=, $0, $1{{$}}
47 ; CHECK: f64.ne $push[[NUM:[0-9]+]]=, $0, $1{{$}}
56 ; CHECK: f64.lt $push[[NUM:[0-9]+]]=, $0, $1{{$}}
[all …]
Df64.ll8 declare double @llvm.fabs.f64(double)
9 declare double @llvm.copysign.f64(double, double)
10 declare double @llvm.sqrt.f64(double)
11 declare double @llvm.ceil.f64(double)
12 declare double @llvm.floor.f64(double)
13 declare double @llvm.trunc.f64(double)
14 declare double @llvm.nearbyint.f64(double)
15 declare double @llvm.rint.f64(double)
16 declare double @llvm.fma.f64(double, double, double)
19 ; CHECK-NEXT: .param f64, f64{{$}}
[all …]
/external/llvm/test/MC/Disassembler/ARM/
Dfp-armv8.txt4 # CHECK: vcvtt.f64.f16 d3, s1
7 # CHECK: vcvtt.f16.f64 s5, d12
10 # CHECK: vcvtb.f64.f16 d3, s1
13 # CHECK: vcvtb.f16.f64 s4, d1
16 # CHECK: vcvttge.f64.f16 d3, s1
19 # CHECK: vcvttgt.f16.f64 s5, d12
22 # CHECK: vcvtbeq.f64.f16 d3, s1
25 # CHECK: vcvtblt.f16.f64 s4, d1
32 # CHECK: vcvta.s32.f64 s2, d3
38 # CHECK: vcvtn.s32.f64 s6, d23
[all …]
Dthumb-fp-armv8.txt4 # CHECK: vcvtt.f64.f16 d3, s1
7 # CHECK: vcvtt.f16.f64 s5, d12
10 # CHECK: vcvtb.f64.f16 d3, s1
13 # CHECK: vcvtb.f16.f64 s4, d1
17 # CHECK: vcvttge.f64.f16 d3, s1
21 # CHECK: vcvttgt.f16.f64 s5, d12
25 # CHECK: vcvtbeq.f64.f16 d3, s1
29 # CHECK: vcvtblt.f16.f64 s4, d1
36 # CHECK: vcvta.s32.f64 s2, d3
42 # CHECK: vcvtn.s32.f64 s6, d23
[all …]
/external/llvm/lib/Target/WebAssembly/
DWebAssemblyInstrFloat.td49 def : Pat<(frint f64:$src), (NEAREST_F64 f64:$src)>;
71 def : Pat<(seteq f64:$lhs, f64:$rhs), (EQ_F64 f64:$lhs, f64:$rhs)>;
72 def : Pat<(setne f64:$lhs, f64:$rhs), (NE_F64 f64:$lhs, f64:$rhs)>;
73 def : Pat<(setlt f64:$lhs, f64:$rhs), (LT_F64 f64:$lhs, f64:$rhs)>;
74 def : Pat<(setle f64:$lhs, f64:$rhs), (LE_F64 f64:$lhs, f64:$rhs)>;
75 def : Pat<(setgt f64:$lhs, f64:$rhs), (GT_F64 f64:$lhs, f64:$rhs)>;
76 def : Pat<(setge f64:$lhs, f64:$rhs), (GE_F64 f64:$lhs, f64:$rhs)>;
85 "f64.select\t$dst, $cond, $lhs, $rhs">;
/external/llvm/test/ExecutionEngine/Interpreter/
Dintrinsics.ll5 declare double @llvm.sin.f64(double)
7 declare double @llvm.cos.f64(double)
9 declare double @llvm.floor.f64(double)
11 declare double @llvm.ceil.f64(double)
13 declare double @llvm.trunc.f64(double)
15 declare double @llvm.round.f64(double)
17 declare double @llvm.copysign.f64(double, double)
21 %sin64 = call double @llvm.sin.f64(double 0.000000e+00)
23 %cos64 = call double @llvm.cos.f64(double 0.000000e+00)
25 %floor64 = call double @llvm.floor.f64(double 0.000000e+00)
[all …]
/external/llvm/test/CodeGen/XCore/
Dfloat-intrinsics.ll2 declare double @llvm.cos.f64(double)
3 declare double @llvm.exp.f64(double)
4 declare double @llvm.exp2.f64(double)
5 declare double @llvm.log.f64(double)
6 declare double @llvm.log10.f64(double)
7 declare double @llvm.log2.f64(double)
8 declare double @llvm.pow.f64(double, double)
9 declare double @llvm.powi.f64(double, i32)
10 declare double @llvm.sin.f64(double)
11 declare double @llvm.sqrt.f64(double)
[all …]
/external/llvm/test/Transforms/InstSimplify/
Dfold-builtin-fma.ll7 declare double @llvm.fma.f64(double, double, double)
11 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0.0)
19 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 5.0)
27 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0x7FF8000000000000)
34 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0xFFF8000000000000)
42 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0x7FF0000000000000)
49 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0xFFF0000000000000)
57 %1 = call double @llvm.fma.f64(double 0x7FF8000000000000, double 8.0, double 0.0)
65 %1 = call double @llvm.fma.f64(double 7.0, double 0x7FF8000000000000, double 0.0)
73 %1 = call double @llvm.fma.f64(double 0xFFF8000000000000, double 8.0, double 0.0)
[all …]
/external/llvm/lib/Target/ARM/
DARMCallingConv.td26 // Handle all vector types as either f64 or v2f64.
27 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
30 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack
31 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>,
37 CCIfType<[f64], CCAssignToStack<8, 4>>,
45 // Handle all vector types as either f64 or v2f64.
46 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
49 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>,
59 // Handle all vector types as either f64 or v2f64.
60 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
[all …]
/external/llvm/test/CodeGen/SystemZ/
Dfp-round-01.ll15 ; Test rint for f64.
16 declare double @llvm.rint.f64(double %f)
21 %res = call double @llvm.rint.f64(double %f)
47 ; Test nearbyint for f64.
48 declare double @llvm.nearbyint.f64(double %f)
53 %res = call double @llvm.nearbyint.f64(double %f)
70 ; Test floor for f64.
71 declare double @llvm.floor.f64(double %f)
76 %res = call double @llvm.floor.f64(double %f)
93 ; Test ceil for f64.
[all …]
Dfp-sqrt-02.ll7 declare double @llvm.sqrt.f64(double %f)
15 %res = call double @llvm.sqrt.f64(double %val)
25 %res = call double @llvm.sqrt.f64(double %val)
36 %res = call double @llvm.sqrt.f64(double %val)
49 %res = call double @llvm.sqrt.f64(double %val)
61 %res = call double @llvm.sqrt.f64(double %val)
74 %res = call double @llvm.sqrt.f64(double %val)
102 %sqrt0 = call double @llvm.sqrt.f64(double %val0)
103 %sqrt1 = call double @llvm.sqrt.f64(double %val1)
104 %sqrt2 = call double @llvm.sqrt.f64(double %val2)
[all …]
/external/valgrind/none/tests/amd64/
Dnan80and64.c61 static void rev64 ( UChar* f64 ) in rev64() argument
63 SWAPC( f64[0], f64[7] ); in rev64()
64 SWAPC( f64[1], f64[6] ); in rev64()
65 SWAPC( f64[2], f64[5] ); in rev64()
66 SWAPC( f64[3], f64[4] ); in rev64()
/external/llvm/test/Transforms/BBVectorize/X86/
Dsimple-int.ll1 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v…
4 declare double @llvm.fma.f64(double, double, double)
5 declare double @llvm.fmuladd.f64(double, double, double)
6 declare double @llvm.cos.f64(double)
7 declare double @llvm.powi.f64(double, i32)
13 %Y1 = call double @llvm.fma.f64(double %X1, double %A1, double %C1)
14 %Y2 = call double @llvm.fma.f64(double %X2, double %A2, double %C2)
27 %Y1 = call double @llvm.fmuladd.f64(double %X1, double %A1, double %C1)
28 %Y2 = call double @llvm.fmuladd.f64(double %X2, double %A2, double %C2)
41 %Y1 = call double @llvm.cos.f64(double %X1)
[all …]

12345678910>>...87