1; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=FASTF64,FASTF16,ALL %s
2; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=SLOWF64,SLOWF16,ALL %s
3; RUN: opt -cost-model -analyze -cost-kind=code-size -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=SIZEALL,FASTF16,ALL %s
4; RUN: opt -cost-model -analyze -cost-kind=code-size -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=SIZEALL,SLOWF16,ALL %s
5
6; ALL-LABEL: 'fsub_f32'
7; ALL: estimated cost of 1 for {{.*}} fsub float
8define amdgpu_kernel void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
9  %vec = load float, float addrspace(1)* %vaddr
10  %add = fsub float %vec, %b
11  store float %add, float addrspace(1)* %out
12  ret void
13}
14
15; ALL-LABEL: 'fsub_v2f32'
16; ALL: estimated cost of 2 for {{.*}} fsub <2 x float>
17define amdgpu_kernel void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
18  %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
19  %add = fsub <2 x float> %vec, %b
20  store <2 x float> %add, <2 x float> addrspace(1)* %out
21  ret void
22}
23
24; ALL-LABEL: 'fsub_v3f32'
25; ALL: estimated cost of 3 for {{.*}} fsub <3 x float>
26define amdgpu_kernel void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
27  %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
28  %add = fsub <3 x float> %vec, %b
29  store <3 x float> %add, <3 x float> addrspace(1)* %out
30  ret void
31}
32
33; ALL-LABEL: 'fsub_v5f32'
34; ALL: estimated cost of 5 for {{.*}} fsub <5 x float>
35define amdgpu_kernel void @fsub_v5f32(<5 x float> addrspace(1)* %out, <5 x float> addrspace(1)* %vaddr, <5 x float> %b) #0 {
36  %vec = load <5 x float>, <5 x float> addrspace(1)* %vaddr
37  %add = fsub <5 x float> %vec, %b
38  store <5 x float> %add, <5 x float> addrspace(1)* %out
39  ret void
40}
41
42; ALL-LABEL: 'fsub_f64'
43; FASTF64: estimated cost of 2 for {{.*}} fsub double
44; SLOWF64: estimated cost of 4 for {{.*}} fsub double
45; SIZEALL: estimated cost of 2 for {{.*}} fsub double
46define amdgpu_kernel void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
47  %vec = load double, double addrspace(1)* %vaddr
48  %add = fsub double %vec, %b
49  store double %add, double addrspace(1)* %out
50  ret void
51}
52
53; ALL-LABEL: 'fsub_v2f64'
54; FASTF64: estimated cost of 4 for {{.*}} fsub <2 x double>
55; SLOWF64: estimated cost of 8 for {{.*}} fsub <2 x double>
56; SIZEALL: estimated cost of 4 for {{.*}} fsub <2 x double>
57define amdgpu_kernel void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
58  %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
59  %add = fsub <2 x double> %vec, %b
60  store <2 x double> %add, <2 x double> addrspace(1)* %out
61  ret void
62}
63
64; ALL-LABEL: 'fsub_v3f64'
65; FASTF64: estimated cost of 6 for {{.*}} fsub <3 x double>
66; SLOWF64: estimated cost of 12 for {{.*}} fsub <3 x double>
67; SIZEALL: estimated cost of 6 for {{.*}} fsub <3 x double>
68define amdgpu_kernel void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
69  %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
70  %add = fsub <3 x double> %vec, %b
71  store <3 x double> %add, <3 x double> addrspace(1)* %out
72  ret void
73}
74
75; ALL-LABEL: 'fsub_f16'
76; ALL: estimated cost of 1 for {{.*}} fsub half
77define amdgpu_kernel void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
78  %vec = load half, half addrspace(1)* %vaddr
79  %add = fsub half %vec, %b
80  store half %add, half addrspace(1)* %out
81  ret void
82}
83
84; ALL-LABEL: 'fsub_v2f16'
85; SLOWF16: estimated cost of 2 for {{.*}} fsub <2 x half>
86; FASTF16: estimated cost of 1 for {{.*}} fsub <2 x half>
87define amdgpu_kernel void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
88  %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
89  %add = fsub <2 x half> %vec, %b
90  store <2 x half> %add, <2 x half> addrspace(1)* %out
91  ret void
92}
93
94; ALL-LABEL: 'fsub_v3f16'
95; SLOWF16: estimated cost of 4 for {{.*}} fsub <3 x half>
96; FASTF16: estimated cost of 2 for {{.*}} fsub <3 x half>
97define amdgpu_kernel void @fsub_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %vaddr, <3 x half> %b) #0 {
98  %vec = load <3 x half>, <3 x half> addrspace(1)* %vaddr
99  %add = fsub <3 x half> %vec, %b
100  store <3 x half> %add, <3 x half> addrspace(1)* %out
101  ret void
102}
103
104; ALL-LABEL: 'fsub_v4f16'
105; SLOWF16: estimated cost of 4 for {{.*}} fsub <4 x half>
106; FASTF16: estimated cost of 2 for {{.*}} fsub <4 x half>
107define amdgpu_kernel void @fsub_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
108  %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
109  %add = fsub <4 x half> %vec, %b
110  store <4 x half> %add, <4 x half> addrspace(1)* %out
111  ret void
112}
113