1; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -verify-machineinstrs  < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
2; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
3; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
4
5; FUNC-LABEL: {{^}}frem_f32:
6; GCN-DAG: buffer_load_dword [[X:v[0-9]+]], {{.*$}}
7; GCN-DAG: buffer_load_dword [[Y:v[0-9]+]], {{.*}} offset:16
8; GCN: v_div_scale_f32
9
10; GCN: v_rcp_f32_e32
11; GCN: v_fma_f32
12; GCN: v_mul_f32_e32
13; GCN: v_div_fmas_f32
14; GCN: v_div_fixup_f32
15; GCN: v_trunc_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}
16; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
17; GCN: s_endpgm
18define amdgpu_kernel void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
19                      float addrspace(1)* %in2) #0 {
20   %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
21   %r0 = load float, float addrspace(1)* %in1, align 4
22   %r1 = load float, float addrspace(1)* %gep2, align 4
23   %r2 = frem float %r0, %r1
24   store float %r2, float addrspace(1)* %out, align 4
25   ret void
26}
27
28; FUNC-LABEL: {{^}}unsafe_frem_f32:
29; GCN: buffer_load_dword [[Y:v[0-9]+]], {{.*}} offset:16
30; GCN: buffer_load_dword [[X:v[0-9]+]], {{.*}}
31; GCN: v_rcp_f32_e32 [[INVY:v[0-9]+]], [[Y]]
32; GCN: v_mul_f32_e32 [[DIV:v[0-9]+]], [[X]], [[INVY]]
33; GCN: v_trunc_f32_e32 [[TRUNC:v[0-9]+]], [[DIV]]
34; GCN: v_mad_f32 [[RESULT:v[0-9]+]], -[[TRUNC]], [[Y]], [[X]]
35; GCN: buffer_store_dword [[RESULT]]
36define amdgpu_kernel void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
37                             float addrspace(1)* %in2) #1 {
38   %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
39   %r0 = load float, float addrspace(1)* %in1, align 4
40   %r1 = load float, float addrspace(1)* %gep2, align 4
41   %r2 = frem float %r0, %r1
42   store float %r2, float addrspace(1)* %out, align 4
43   ret void
44}
45
46; FUNC-LABEL: {{^}}frem_f64:
47; GCN: buffer_load_dwordx2 [[Y:v\[[0-9]+:[0-9]+\]]], {{.*}}, 0
48; GCN: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]], {{.*}}, 0
49; GCN-DAG: v_div_fmas_f64
50; GCN-DAG: v_div_scale_f64
51; GCN-DAG: v_mul_f64
52; CI: v_trunc_f64_e32
53; CI: v_mul_f64
54; GCN: v_add_f64
55; GCN: buffer_store_dwordx2
56; GCN: s_endpgm
57define amdgpu_kernel void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
58                      double addrspace(1)* %in2) #0 {
59   %r0 = load double, double addrspace(1)* %in1, align 8
60   %r1 = load double, double addrspace(1)* %in2, align 8
61   %r2 = frem double %r0, %r1
62   store double %r2, double addrspace(1)* %out, align 8
63   ret void
64}
65
66; FUNC-LABEL: {{^}}unsafe_frem_f64:
67; GCN: v_rcp_f64_e32
68; GCN: v_mul_f64
69; SI: v_bfe_u32
70; CI: v_trunc_f64_e32
71; GCN: v_fma_f64
72; GCN: s_endpgm
73define amdgpu_kernel void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
74                             double addrspace(1)* %in2) #1 {
75   %r0 = load double, double addrspace(1)* %in1, align 8
76   %r1 = load double, double addrspace(1)* %in2, align 8
77   %r2 = frem double %r0, %r1
78   store double %r2, double addrspace(1)* %out, align 8
79   ret void
80}
81
82define amdgpu_kernel void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
83                        <2 x float> addrspace(1)* %in2) #0 {
84   %gep2 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in2, i32 4
85   %r0 = load <2 x float>, <2 x float> addrspace(1)* %in1, align 8
86   %r1 = load <2 x float>, <2 x float> addrspace(1)* %gep2, align 8
87   %r2 = frem <2 x float> %r0, %r1
88   store <2 x float> %r2, <2 x float> addrspace(1)* %out, align 8
89   ret void
90}
91
92define amdgpu_kernel void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
93                        <4 x float> addrspace(1)* %in2) #0 {
94   %gep2 = getelementptr <4 x float>, <4 x float> addrspace(1)* %in2, i32 4
95   %r0 = load <4 x float>, <4 x float> addrspace(1)* %in1, align 16
96   %r1 = load <4 x float>, <4 x float> addrspace(1)* %gep2, align 16
97   %r2 = frem <4 x float> %r0, %r1
98   store <4 x float> %r2, <4 x float> addrspace(1)* %out, align 16
99   ret void
100}
101
102define amdgpu_kernel void @frem_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
103                        <2 x double> addrspace(1)* %in2) #0 {
104   %gep2 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in2, i32 4
105   %r0 = load <2 x double>, <2 x double> addrspace(1)* %in1, align 16
106   %r1 = load <2 x double>, <2 x double> addrspace(1)* %gep2, align 16
107   %r2 = frem <2 x double> %r0, %r1
108   store <2 x double> %r2, <2 x double> addrspace(1)* %out, align 16
109   ret void
110}
111
112attributes #0 = { nounwind "unsafe-fp-math"="false" }
113attributes #1 = { nounwind "unsafe-fp-math"="true" }
114