Lines Matching refs:OPT

3 … -mtriple=amdgcn-- -S -structurizecfg -si-annotate-control-flow %s | FileCheck -check-prefix=OPT %s
9 ; OPT-LABEL: @break_loop(
10 ; OPT-NEXT: bb:
11 ; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
12 ; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
13 ; OPT-NEXT: br label [[BB1:%.*]]
14 ; OPT: bb1:
15 ; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP2:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
16 ; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[LSR_IV_NEXT:%.*]], [[FLOW]] ]
17 ; OPT-NEXT: [[LSR_IV_NEXT]] = add i32 [[LSR_IV]], 1
18 ; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
19 ; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
20 ; OPT: bb4:
21 ; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
22 ; OPT-NEXT: [[CMP1:%.*]] = icmp slt i32 [[MY_TMP]], [[LOAD]]
23 ; OPT-NEXT: [[TMP0:%.*]] = xor i1 [[CMP1]], true
24 ; OPT-NEXT: br label [[FLOW]]
25 ; OPT: Flow:
26 ; OPT-NEXT: [[TMP1:%.*]] = phi i1 [ [[TMP0]], [[BB4]] ], [ true, [[BB1]] ]
27 ; OPT-NEXT: [[TMP2]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP1]], i64 [[PHI_BROKEN]])
28 ; OPT-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP2]])
29 ; OPT-NEXT: br i1 [[TMP3]], label [[BB9:%.*]], label [[BB1]]
30 ; OPT: bb9:
31 ; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP2]])
32 ; OPT-NEXT: ret void
87 ; OPT-LABEL: @undef_phi_cond_break_loop(
88 ; OPT-NEXT: bb:
89 ; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
90 ; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
91 ; OPT-NEXT: br label [[BB1:%.*]]
92 ; OPT: bb1:
93 ; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP0:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
94 ; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[LSR_IV_NEXT:%.*]], [[FLOW]] ]
95 ; OPT-NEXT: [[LSR_IV_NEXT]] = add i32 [[LSR_IV]], 1
96 ; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
97 ; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
98 ; OPT: bb4:
99 ; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
100 ; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[MY_TMP]], [[LOAD]]
101 ; OPT-NEXT: br label [[FLOW]]
102 ; OPT: Flow:
103 ; OPT-NEXT: [[MY_TMP3:%.*]] = phi i1 [ [[CMP1]], [[BB4]] ], [ undef, [[BB1]] ]
104 ; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[MY_TMP3]], i64 [[PHI_BROKEN]])
105 ; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP0]])
106 ; OPT-NEXT: br i1 [[TMP1]], label [[BB9:%.*]], label [[BB1]]
107 ; OPT: bb9:
108 ; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP0]])
109 ; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
110 ; OPT-NEXT: ret void
180 ; OPT-LABEL: @constexpr_phi_cond_break_loop(
181 ; OPT-NEXT: bb:
182 ; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
183 ; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
184 ; OPT-NEXT: br label [[BB1:%.*]]
185 ; OPT: bb1:
186 ; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP0:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
187 ; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[LSR_IV_NEXT:%.*]], [[FLOW]] ]
188 ; OPT-NEXT: [[LSR_IV_NEXT]] = add i32 [[LSR_IV]], 1
189 ; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
190 ; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
191 ; OPT: bb4:
192 ; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
193 ; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[MY_TMP]], [[LOAD]]
194 ; OPT-NEXT: br label [[FLOW]]
195 ; OPT: Flow:
196 ; OPT-NEXT: [[MY_TMP3:%.*]] = phi i1 [ [[CMP1]], [[BB4]] ], [ icmp ne (i32 addrspace(3)* inttopt…
197 ; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[MY_TMP3]], i64 [[PHI_BROKEN]])
198 ; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP0]])
199 ; OPT-NEXT: br i1 [[TMP1]], label [[BB9:%.*]], label [[BB1]]
200 ; OPT: bb9:
201 ; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP0]])
202 ; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
203 ; OPT-NEXT: ret void
272 ; OPT-LABEL: @true_phi_cond_break_loop(
273 ; OPT-NEXT: bb:
274 ; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
275 ; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
276 ; OPT-NEXT: br label [[BB1:%.*]]
277 ; OPT: bb1:
278 ; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP0:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
279 ; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[LSR_IV_NEXT:%.*]], [[FLOW]] ]
280 ; OPT-NEXT: [[LSR_IV_NEXT]] = add i32 [[LSR_IV]], 1
281 ; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
282 ; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
283 ; OPT: bb4:
284 ; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
285 ; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[MY_TMP]], [[LOAD]]
286 ; OPT-NEXT: br label [[FLOW]]
287 ; OPT: Flow:
288 ; OPT-NEXT: [[MY_TMP3:%.*]] = phi i1 [ [[CMP1]], [[BB4]] ], [ true, [[BB1]] ]
289 ; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[MY_TMP3]], i64 [[PHI_BROKEN]])
290 ; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP0]])
291 ; OPT-NEXT: br i1 [[TMP1]], label [[BB9:%.*]], label [[BB1]]
292 ; OPT: bb9:
293 ; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP0]])
294 ; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
295 ; OPT-NEXT: ret void
360 ; OPT-LABEL: @false_phi_cond_break_loop(
361 ; OPT-NEXT: bb:
362 ; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
363 ; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
364 ; OPT-NEXT: br label [[BB1:%.*]]
365 ; OPT: bb1:
366 ; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP0:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
367 ; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[LSR_IV_NEXT:%.*]], [[FLOW]] ]
368 ; OPT-NEXT: [[LSR_IV_NEXT]] = add i32 [[LSR_IV]], 1
369 ; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
370 ; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
371 ; OPT: bb4:
372 ; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
373 ; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[MY_TMP]], [[LOAD]]
374 ; OPT-NEXT: br label [[FLOW]]
375 ; OPT: Flow:
376 ; OPT-NEXT: [[MY_TMP3:%.*]] = phi i1 [ [[CMP1]], [[BB4]] ], [ false, [[BB1]] ]
377 ; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[MY_TMP3]], i64 [[PHI_BROKEN]])
378 ; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP0]])
379 ; OPT-NEXT: br i1 [[TMP1]], label [[BB9:%.*]], label [[BB1]]
380 ; OPT: bb9:
381 ; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP0]])
382 ; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
383 ; OPT-NEXT: ret void
451 ; OPT-LABEL: @invert_true_phi_cond_break_loop(
452 ; OPT-NEXT: bb:
453 ; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
454 ; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
455 ; OPT-NEXT: br label [[BB1:%.*]]
456 ; OPT: bb1:
457 ; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP1:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
458 ; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[LSR_IV_NEXT:%.*]], [[FLOW]] ]
459 ; OPT-NEXT: [[LSR_IV_NEXT]] = add i32 [[LSR_IV]], 1
460 ; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
461 ; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
462 ; OPT: bb4:
463 ; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
464 ; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[MY_TMP]], [[LOAD]]
465 ; OPT-NEXT: br label [[FLOW]]
466 ; OPT: Flow:
467 ; OPT-NEXT: [[MY_TMP3:%.*]] = phi i1 [ [[CMP1]], [[BB4]] ], [ true, [[BB1]] ]
468 ; OPT-NEXT: [[TMP0:%.*]] = xor i1 [[MY_TMP3]], true
469 ; OPT-NEXT: [[TMP1]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP0]], i64 [[PHI_BROKEN]])
470 ; OPT-NEXT: [[TMP2:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP1]])
471 ; OPT-NEXT: br i1 [[TMP2]], label [[BB9:%.*]], label [[BB1]]
472 ; OPT: bb9:
473 ; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP1]])
474 ; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
475 ; OPT-NEXT: ret void