1; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
2; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
3; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
4; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
5
6; The code generated by sdiv is long and complex and may frequently change.
7; The goal of this test is to make sure the ISel doesn't fail.
8;
9; This program was previously failing to compile when one of the selectcc
10; opcodes generated by the sdiv lowering was being legalized and optimized to:
11; selectcc Remainder -1, 0, -1, SETGT
12; This was fixed by adding an additional pattern in R600Instructions.td to
13; match this pattern with a CNDGE_INT.
14
15; FUNC-LABEL: {{^}}sdiv_i32:
16; EG: CF_END
17define amdgpu_kernel void @sdiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
18  %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
19  %num = load i32, i32 addrspace(1) * %in
20  %den = load i32, i32 addrspace(1) * %den_ptr
21  %result = sdiv i32 %num, %den
22  store i32 %result, i32 addrspace(1)* %out
23  ret void
24}
25
26; FUNC-LABEL: {{^}}sdiv_i32_4:
27define amdgpu_kernel void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
28  %num = load i32, i32 addrspace(1) * %in
29  %result = sdiv i32 %num, 4
30  store i32 %result, i32 addrspace(1)* %out
31  ret void
32}
33
34; Multiply by a weird constant to make sure setIntDivIsCheap is
35; working.
36
37; FUNC-LABEL: {{^}}slow_sdiv_i32_3435:
38; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]],
39; SI-DAG: v_mov_b32_e32 [[MAGIC:v[0-9]+]], 0x98a1930b
40; SI: v_mul_hi_i32 [[TMP:v[0-9]+]], [[VAL]], [[MAGIC]]
41; SI: v_add_{{[iu]}}32
42; SI: v_lshrrev_b32
43; SI: v_ashrrev_i32
44; SI: v_add_{{[iu]}}32
45; SI: buffer_store_dword
46; SI: s_endpgm
47define amdgpu_kernel void @slow_sdiv_i32_3435(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
48  %num = load i32, i32 addrspace(1) * %in
49  %result = sdiv i32 %num, 3435
50  store i32 %result, i32 addrspace(1)* %out
51  ret void
52}
53
54define amdgpu_kernel void @sdiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
55  %den_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
56  %num = load <2 x i32>, <2 x i32> addrspace(1) * %in
57  %den = load <2 x i32>, <2 x i32> addrspace(1) * %den_ptr
58  %result = sdiv <2 x i32> %num, %den
59  store <2 x i32> %result, <2 x i32> addrspace(1)* %out
60  ret void
61}
62
63define amdgpu_kernel void @sdiv_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
64  %num = load <2 x i32>, <2 x i32> addrspace(1) * %in
65  %result = sdiv <2 x i32> %num, <i32 4, i32 4>
66  store <2 x i32> %result, <2 x i32> addrspace(1)* %out
67  ret void
68}
69
70define amdgpu_kernel void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
71  %den_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
72  %num = load <4 x i32>, <4 x i32> addrspace(1) * %in
73  %den = load <4 x i32>, <4 x i32> addrspace(1) * %den_ptr
74  %result = sdiv <4 x i32> %num, %den
75  store <4 x i32> %result, <4 x i32> addrspace(1)* %out
76  ret void
77}
78
79define amdgpu_kernel void @sdiv_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
80  %num = load <4 x i32>, <4 x i32> addrspace(1) * %in
81  %result = sdiv <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4>
82  store <4 x i32> %result, <4 x i32> addrspace(1)* %out
83  ret void
84}
85
86; FUNC-LABEL: {{^}}v_sdiv_i8:
87; SI: v_rcp_iflag_f32
88; SI: v_bfe_i32 [[BFE:v[0-9]+]], v{{[0-9]+}}, 0, 8
89; SI: buffer_store_dword [[BFE]]
90define amdgpu_kernel void @v_sdiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
91  %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
92  %num = load i8, i8 addrspace(1) * %in
93  %den = load i8, i8 addrspace(1) * %den_ptr
94  %result = sdiv i8 %num, %den
95  %result.ext = sext i8 %result to i32
96  store i32 %result.ext, i32 addrspace(1)* %out
97  ret void
98}
99
100; FUNC-LABEL: {{^}}v_sdiv_i23:
101; SI: v_rcp_iflag_f32
102; SI: v_bfe_i32 [[BFE:v[0-9]+]], v{{[0-9]+}}, 0, 23
103; SI: buffer_store_dword [[BFE]]
104define amdgpu_kernel void @v_sdiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)* %in) {
105  %den_ptr = getelementptr i23, i23 addrspace(1)* %in, i23 1
106  %num = load i23, i23 addrspace(1) * %in
107  %den = load i23, i23 addrspace(1) * %den_ptr
108  %result = sdiv i23 %num, %den
109  %result.ext = sext i23 %result to i32
110  store i32 %result.ext, i32 addrspace(1)* %out
111  ret void
112}
113
114; FUNC-LABEL: {{^}}v_sdiv_i24:
115; SI: v_rcp_iflag_f32
116; SI: v_bfe_i32 [[BFE:v[0-9]+]], v{{[0-9]+}}, 0, 24
117; SI: buffer_store_dword [[BFE]]
118define amdgpu_kernel void @v_sdiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) {
119  %den_ptr = getelementptr i24, i24 addrspace(1)* %in, i24 1
120  %num = load i24, i24 addrspace(1) * %in
121  %den = load i24, i24 addrspace(1) * %den_ptr
122  %result = sdiv i24 %num, %den
123  %result.ext = sext i24 %result to i32
124  store i32 %result.ext, i32 addrspace(1)* %out
125  ret void
126}
127
128; FUNC-LABEL: {{^}}v_sdiv_i25:
129; SI-NOT: v_rcp_f32
130define amdgpu_kernel void @v_sdiv_i25(i32 addrspace(1)* %out, i25 addrspace(1)* %in) {
131  %den_ptr = getelementptr i25, i25 addrspace(1)* %in, i25 1
132  %num = load i25, i25 addrspace(1) * %in
133  %den = load i25, i25 addrspace(1) * %den_ptr
134  %result = sdiv i25 %num, %den
135  %result.ext = sext i25 %result to i32
136  store i32 %result.ext, i32 addrspace(1)* %out
137  ret void
138}
139
140; Tests for 64-bit divide bypass.
141; define amdgpu_kernel void @test_get_quotient(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
142;   %result = sdiv i64 %a, %b
143;   store i64 %result, i64 addrspace(1)* %out, align 8
144;   ret void
145; }
146
147; define amdgpu_kernel void @test_get_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
148;   %result = srem i64 %a, %b
149;   store i64 %result, i64 addrspace(1)* %out, align 8
150;   ret void
151; }
152
153; define amdgpu_kernel void @test_get_quotient_and_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
154;   %resultdiv = sdiv i64 %a, %b
155;   %resultrem = srem i64 %a, %b
156;   %result = add i64 %resultdiv, %resultrem
157;   store i64 %result, i64 addrspace(1)* %out, align 8
158;   ret void
159; }
160
161; FUNC-LABEL: @scalarize_mulhs_4xi32
162; SI: v_mul_hi_i32
163; SI: v_mul_hi_i32
164; SI: v_mul_hi_i32
165; SI: v_mul_hi_i32
166
167define amdgpu_kernel void @scalarize_mulhs_4xi32(<4 x i32> addrspace(1)* nocapture readonly %in, <4 x i32> addrspace(1)* nocapture %out) {
168  %1 = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16
169  %2 = sdiv <4 x i32> %1, <i32 53668, i32 53668, i32 53668, i32 53668>
170  store <4 x i32> %2, <4 x i32> addrspace(1)* %out, align 16
171  ret void
172}
173