Lines Matching refs:IR

1 …-nodes -verify -structurizecfg -verify -si-annotate-control-flow %s | FileCheck -check-prefix=IR %s
4 ; Add an extra verifier runs. There were some cases where invalid IR
11 ; IR-LABEL: @multi_divergent_region_exit_ret_ret(
12 ; IR: %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %Pivot.inv)
13 ; IR: %1 = extractvalue { i1, i64 } %0, 0
14 ; IR: %2 = extractvalue { i1, i64 } %0, 1
15 ; IR: br i1 %1, label %LeafBlock1, label %Flow
17 ; IR: Flow:
18 ; IR: %3 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
19 ; IR: %4 = phi i1 [ %SwitchLeaf2.inv, %LeafBlock1 ], [ false, %entry ]
20 ; IR: %5 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %2)
21 ; IR: %6 = extractvalue { i1, i64 } %5, 0
22 ; IR: %7 = extractvalue { i1, i64 } %5, 1
23 ; IR: br i1 %6, label %LeafBlock, label %Flow1
25 ; IR: LeafBlock:
26 ; IR: br label %Flow1
28 ; IR: LeafBlock1:
29 ; IR: br label %Flow{{$}}
31 ; IR: Flow2:
32 ; IR: %8 = phi i1 [ false, %exit1 ], [ %12, %Flow1 ]
33 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %16)
34 ; IR: %9 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %8)
35 ; IR: %10 = extractvalue { i1, i64 } %9, 0
36 ; IR: %11 = extractvalue { i1, i64 } %9, 1
37 ; IR: br i1 %10, label %exit0, label %UnifiedReturnBlock
39 ; IR: exit0:
40 ; IR: store volatile i32 9, i32 addrspace(1)* undef
41 ; IR: br label %UnifiedReturnBlock
43 ; IR: Flow1:
44 ; IR: %12 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %3, %Flow ]
45 ; IR: %13 = phi i1 [ %SwitchLeaf.inv, %LeafBlock ], [ %4, %Flow ]
46 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %7)
47 ; IR: %14 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %13)
48 ; IR: %15 = extractvalue { i1, i64 } %14, 0
49 ; IR: %16 = extractvalue { i1, i64 } %14, 1
50 ; IR: br i1 %15, label %exit1, label %Flow2
52 ; IR: exit1:
53 ; IR: store volatile i32 17, i32 addrspace(3)* undef
54 ; IR: br label %Flow2
56 ; IR: UnifiedReturnBlock:
57 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %11)
58 ; IR: ret void
143 ; IR-LABEL: @multi_divergent_region_exit_unreachable_unreachable(
144 ; IR: %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %Pivot.inv)
146 ; IR: %5 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %2)
148 ; IR: %8 = phi i1 [ false, %exit1 ], [ %12, %Flow1 ]
149 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %16)
150 ; IR: %9 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %8)
151 ; IR: br i1 %10, label %exit0, label %UnifiedUnreachableBlock
154 ; IR: UnifiedUnreachableBlock:
155 ; IR-NEXT: unreachable
198 ; IR-LABEL: @multi_exit_region_divergent_ret_uniform_ret(
199 ; IR: %divergent.cond0 = icmp slt i32 %tmp16, 2
200 ; IR: llvm.amdgcn.if
201 ; IR: br i1
203 ; IR: {{^}}Flow:
204 ; IR: %3 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
205 ; IR: %4 = phi i1 [ %uniform.cond0.inv, %LeafBlock1 ], [ false, %entry ]
206 ; IR: %5 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %2)
207 ; IR: br i1 %6, label %LeafBlock, label %Flow1
209 ; IR: {{^}}LeafBlock:
210 ; IR: %divergent.cond1 = icmp eq i32 %tmp16, 1
211 ; IR: %divergent.cond1.inv = xor i1 %divergent.cond1, true
212 ; IR: br label %Flow1
214 ; IR: LeafBlock1:
215 ; IR: %uniform.cond0 = icmp eq i32 %arg3, 2
216 ; IR: %uniform.cond0.inv = xor i1 %uniform.cond0, true
217 ; IR: br label %Flow
219 ; IR: Flow2:
220 ; IR: %8 = phi i1 [ false, %exit1 ], [ %12, %Flow1 ]
221 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %16)
222 ; IR: %9 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %8)
223 ; IR: br i1 %10, label %exit0, label %UnifiedReturnBlock
225 ; IR: exit0:
226 ; IR: store volatile i32 9, i32 addrspace(1)* undef
227 ; IR: br label %UnifiedReturnBlock
229 ; IR: {{^}}Flow1:
230 ; IR: %12 = phi i1 [ %divergent.cond1, %LeafBlock ], [ %3, %Flow ]
231 ; IR: %13 = phi i1 [ %divergent.cond1.inv, %LeafBlock ], [ %4, %Flow ]
232 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %7)
233 ; IR: %14 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %13)
234 ; IR: %15 = extractvalue { i1, i64 } %14, 0
235 ; IR: %16 = extractvalue { i1, i64 } %14, 1
236 ; IR: br i1 %15, label %exit1, label %Flow2
238 ; IR: exit1:
239 ; IR: store volatile i32 17, i32 addrspace(3)* undef
240 ; IR: br label %Flow2
242 ; IR: UnifiedReturnBlock:
243 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %11)
244 ; IR: ret void
281 ; IR-LABEL: @multi_exit_region_uniform_ret_divergent_ret(
282 ; IR: %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %Pivot.inv)
283 ; IR: br i1 %1, label %LeafBlock1, label %Flow
285 ; IR: Flow:
286 ; IR: %3 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
287 ; IR: %4 = phi i1 [ %SwitchLeaf2.inv, %LeafBlock1 ], [ false, %entry ]
288 ; IR: %5 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %2)
290 ; IR: %8 = phi i1 [ false, %exit1 ], [ %12, %Flow1 ]
291 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %16)
292 ; IR: %9 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %8)
330 ; IR-LABEL: @multi_divergent_region_exit_ret_ret_return_value(
331 ; IR: Flow2:
332 ; IR: %8 = phi i1 [ false, %exit1 ], [ %12, %Flow1 ]
333 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %16)
335 ; IR: UnifiedReturnBlock:
336 ; IR: %UnifiedRetVal = phi float [ 2.000000e+00, %Flow2 ], [ 1.000000e+00, %exit0 ]
337 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %11)
338 ; IR: ret float %UnifiedRetVal
361 ; IR-LABEL: @uniform_branch_to_multi_divergent_region_exit_ret_ret_return_value(
403 ; IR-LABEL: @multi_divergent_region_exit_ret_unreachable(
404 ; IR: %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %Pivot.inv)
406 ; IR: Flow:
407 ; IR: %3 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
408 ; IR: %4 = phi i1 [ %SwitchLeaf2.inv, %LeafBlock1 ], [ false, %entry ]
409 ; IR: %5 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %2)
411 ; IR: Flow2:
412 ; IR: %8 = phi i1 [ false, %exit1 ], [ %12, %Flow1 ]
413 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %16)
414 ; IR: %9 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %8)
415 ; IR: br i1 %10, label %exit0, label %UnifiedReturnBlock
417 ; IR: exit0:
418 ; IR-NEXT: store volatile i32 17, i32 addrspace(3)* undef
419 ; IR-NEXT: br label %UnifiedReturnBlock
421 ; IR: Flow1:
422 ; IR: %12 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %3, %Flow ]
423 ; IR: %13 = phi i1 [ %SwitchLeaf.inv, %LeafBlock ], [ %4, %Flow ]
424 ; IR: call void @llvm.amdgcn.end.cf.i64(i64 %7)
425 ; IR: %14 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %13)
426 ; IR: %15 = extractvalue { i1, i64 } %14, 0
427 ; IR: %16 = extractvalue { i1, i64 } %14, 1
428 ; IR: br i1 %15, label %exit1, label %Flow2
430 ; IR: exit1:
431 ; IR-NEXT: store volatile i32 9, i32 addrspace(1)* undef
432 ; IR-NEXT: call void @llvm.amdgcn.unreachable()
433 ; IR-NEXT: br label %Flow2
435 ; IR: UnifiedReturnBlock:
436 ; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %11)
437 ; IR-NEXT: ret void
477 ; IR-LABEL: @indirect_multi_divergent_region_exit_ret_unreachable(
479 ; IR: exit0: ; preds = %Flow2
480 ; IR-NEXT: store volatile i32 17, i32 addrspace(3)* undef
481 ; IR-NEXT: br label %UnifiedReturnBlock
484 ; IR: indirect.exit1:
485 ; IR: %load = load volatile i32, i32 addrspace(1)* undef
486 ; IR: store volatile i32 %load, i32 addrspace(1)* undef
487 ; IR: store volatile i32 9, i32 addrspace(1)* undef
488 ; IR: call void @llvm.amdgcn.unreachable()
489 ; IR-NEXT: br label %Flow2
491 ; IR: UnifiedReturnBlock: ; preds = %exit0, %Flow2
492 ; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %11)
493 ; IR-NEXT: ret void
535 ; IR-LABEL: @multi_divergent_region_exit_ret_switch(
574 ; IR-LABEL: @divergent_multi_ret_nest_in_uniform_triangle(
598 ; IR-LABEL: @divergent_complex_multi_ret_nest_in_uniform_triangle(
637 ; IR-LABEL: @uniform_complex_multi_ret_nest_in_divergent_triangle(
638 ; IR: Flow1: ; preds = %uniform.ret1, %uniform.multi.exi…
639 ; IR: %6 = phi i1 [ false, %uniform.ret1 ], [ true, %uniform.multi.exit.region ]
640 ; IR: br i1 %6, label %uniform.if, label %Flow2
642 ; IR: Flow: ; preds = %uniform.then, %uniform.if
643 ; IR: %7 = phi i1 [ %uniform.cond2.inv, %uniform.then ], [ %uniform.cond1.inv, %uniform.if ]
644 ; IR: br i1 %7, label %uniform.endif, label %uniform.ret0
646 ; IR: UnifiedReturnBlock: ; preds = %Flow3, %Flow2
647 ; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %5)
648 ; IR-NEXT: ret void
687 ; IR-LABEL: @multi_divergent_unreachable_exit(
688 ; IR: UnifiedUnreachableBlock:
689 ; IR-NEXT: call void @llvm.amdgcn.unreachable()
690 ; IR-NEXT: br label %UnifiedReturnBlock
692 ; IR: UnifiedReturnBlock:
693 ; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64
694 ; IR-NEXT: ret void
725 ; IR-LABEL: @uniformly_reached_export
726 ; IR-NEXT: .entry:
727 ; IR: br i1 [[CND:%.*]], label %[[EXP:.*]], label %[[FLOW:.*]]
729 ; IR: [[FLOW]]:
730 ; IR-NEXT: phi
731 ; IR-NEXT: br i1 [[CND2:%.*]], label %[[LOOP:.*]], label %UnifiedReturnBlock
733 ; IR: [[LOOP]]:
734 ; IR-NEXT: br i1 false, label %[[FLOW1:.*]], label %[[LOOP]]
736 ; IR: [[EXP]]:
737 ; IR-NEXT: call void @llvm.amdgcn.exp.compr.v2f16(i32 immarg 0, i32 immarg 15, <2 x half> <half 0xH…
738 ; IR-NEXT: br label %[[FLOW]]
740 ; IR: [[FLOW1]]:
741 ; IR-NEXT: br label %UnifiedReturnBlock
743 ; IR: UnifiedReturnBlock:
744 ; IR-NEXT: call void @llvm.amdgcn.exp.f32(i32 9, i32 0, float undef, float undef, float undef, floa…
745 ; IR-NEXT: ret void