1; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
2; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
3
4declare double @llvm.fabs.f64(double) #0
5
6; SI-LABEL: {{^}}fsub_f64:
7; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
8define amdgpu_kernel void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
9                      double addrspace(1)* %in2) {
10  %r0 = load double, double addrspace(1)* %in1
11  %r1 = load double, double addrspace(1)* %in2
12  %r2 = fsub double %r0, %r1
13  store double %r2, double addrspace(1)* %out
14  ret void
15}
16
17; SI-LABEL: {{^}}fsub_fabs_f64:
18; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -\|v\[[0-9]+:[0-9]+\]\|}}
19define amdgpu_kernel void @fsub_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
20                           double addrspace(1)* %in2) {
21  %r0 = load double, double addrspace(1)* %in1
22  %r1 = load double, double addrspace(1)* %in2
23  %r1.fabs = call double @llvm.fabs.f64(double %r1) #0
24  %r2 = fsub double %r0, %r1.fabs
25  store double %r2, double addrspace(1)* %out
26  ret void
27}
28
29; SI-LABEL: {{^}}fsub_fabs_inv_f64:
30; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], |v\[[0-9]+:[0-9]+\]|, -v\[[0-9]+:[0-9]+\]}}
31define amdgpu_kernel void @fsub_fabs_inv_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
32                               double addrspace(1)* %in2) {
33  %r0 = load double, double addrspace(1)* %in1
34  %r1 = load double, double addrspace(1)* %in2
35  %r0.fabs = call double @llvm.fabs.f64(double %r0) #0
36  %r2 = fsub double %r0.fabs, %r1
37  store double %r2, double addrspace(1)* %out
38  ret void
39}
40
41; SI-LABEL: {{^}}s_fsub_f64:
42; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
43define amdgpu_kernel void @s_fsub_f64(double addrspace(1)* %out, double %a, double %b) {
44  %sub = fsub double %a, %b
45  store double %sub, double addrspace(1)* %out
46  ret void
47}
48
49; SI-LABEL: {{^}}s_fsub_imm_f64:
50; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], -s\[[0-9]+:[0-9]+\]}}, 4.0
51define amdgpu_kernel void @s_fsub_imm_f64(double addrspace(1)* %out, double %a, double %b) {
52  %sub = fsub double 4.0, %a
53  store double %sub, double addrspace(1)* %out
54  ret void
55}
56
57; SI-LABEL: {{^}}s_fsub_imm_inv_f64:
58; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\]}}, -4.0
59define amdgpu_kernel void @s_fsub_imm_inv_f64(double addrspace(1)* %out, double %a, double %b) {
60  %sub = fsub double %a, 4.0
61  store double %sub, double addrspace(1)* %out
62  ret void
63}
64
65; SI-LABEL: {{^}}s_fsub_self_f64:
66; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -s\[[0-9]+:[0-9]+\]}}
67define amdgpu_kernel void @s_fsub_self_f64(double addrspace(1)* %out, double %a) {
68  %sub = fsub double %a, %a
69  store double %sub, double addrspace(1)* %out
70  ret void
71}
72
73; SI-LABEL: {{^}}fsub_v2f64:
74; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
75; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
76define amdgpu_kernel void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) {
77  %sub = fsub <2 x double> %a, %b
78  store <2 x double> %sub, <2 x double> addrspace(1)* %out
79  ret void
80}
81
82; SI-LABEL: {{^}}fsub_v4f64:
83; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
84; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
85; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
86; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
87define amdgpu_kernel void @fsub_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) {
88  %b_ptr = getelementptr <4 x double>, <4 x double> addrspace(1)* %in, i32 1
89  %a = load <4 x double>, <4 x double> addrspace(1)* %in
90  %b = load <4 x double>, <4 x double> addrspace(1)* %b_ptr
91  %result = fsub <4 x double> %a, %b
92  store <4 x double> %result, <4 x double> addrspace(1)* %out
93  ret void
94}
95
96; SI-LABEL: {{^}}s_fsub_v4f64:
97; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
98; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
99; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
100; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
101define amdgpu_kernel void @s_fsub_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) {
102  %result = fsub <4 x double> %a, %b
103  store <4 x double> %result, <4 x double> addrspace(1)* %out, align 16
104  ret void
105}
106
107attributes #0 = { nounwind readnone }
108