1; RUN: llc -march=amdgcn -mcpu=verde < %s | FileCheck %s
2
3; Test a simple uniform loop that lives inside non-uniform control flow.
4
5; CHECK-LABEL: {{^}}test1:
6; CHECK: v_cmp_ne_u32_e32 vcc, 0
7; CHECK: s_and_saveexec_b64
8; CHECK-NEXT: s_cbranch_execz BB{{[0-9]+_[0-9]+}}
9
10; CHECK: [[LOOP_BODY_LABEL:BB[0-9]+_[0-9]+]]: ; %loop_body
11; CHECK: s_cbranch_scc1 [[LOOP_BODY_LABEL]]
12
13; CHECK: s_endpgm
14define amdgpu_ps void @test1(<8 x i32> inreg %rsrc, <2 x i32> %addr.base, i32 %y, i32 %p) {
15main_body:
16  %cc = icmp eq i32 %p, 0
17  br i1 %cc, label %out, label %loop_body
18
19loop_body:
20  %counter = phi i32 [ 0, %main_body ], [ %incr, %loop_body ]
21
22  ; Prevent the loop from being optimized out
23  call void asm sideeffect "", "" ()
24
25  %incr = add i32 %counter, 1
26  %lc = icmp sge i32 %incr, 1000
27  br i1 %lc, label %out, label %loop_body
28
29out:
30  ret void
31}
32
33; CHECK-LABEL: {{^}}test2:
34; CHECK: s_and_saveexec_b64
35; CHECK-NEXT: s_cbranch_execz
36define amdgpu_kernel void @test2(i32 addrspace(1)* %out, i32 %a, i32 %b) {
37main_body:
38  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
39  %cc = icmp eq i32 %tid, 0
40  br i1 %cc, label %done1, label %if
41
42if:
43  %cmp = icmp eq i32 %a, 0
44  br i1 %cmp, label %done0, label %loop_body
45
46loop_body:
47  %counter = phi i32 [ 0, %if ], [0, %done0], [ %incr, %loop_body ]
48
49  ; Prevent the loop from being optimized out
50  call void asm sideeffect "", "" ()
51
52  %incr = add i32 %counter, 1
53  %lc = icmp sge i32 %incr, 1000
54  br i1 %lc, label %done1, label %loop_body
55
56done0:
57  %cmp0 = icmp eq i32 %b, 0
58  br i1 %cmp0, label %done1, label %loop_body
59
60done1:
61  ret void
62}
63
64declare i32 @llvm.amdgcn.workitem.id.x() #1
65
66attributes #1 = { nounwind readonly }
67