/external/llvm/test/Transforms/EarlyCSE/ |
D | guards.ll | 49 ; CHECK-NEXT: %cond0 = icmp slt i32 %val, 40 50 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ] 53 %cond0 = icmp slt i32 %val, 40 54 call void(i1,...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ] 68 ; CHECK-NEXT: %cond0 = icmp slt i32 %val, 40 69 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ] 76 %cond0 = icmp slt i32 %val, 40 77 call void(i1,...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ] 88 ; CHECK-NEXT: %cond0 = icmp slt i32 %val, 40 89 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond0 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | sminmax.v2i16.ll | 163 %cond0 = icmp sgt <2 x i16> %val0, %val1 164 %sel0 = select <2 x i1> %cond0, <2 x i16> %val0, <2 x i16> %val1 165 %sel1 = select <2 x i1> %cond0, <2 x i16> %val1, <2 x i16> %val0 179 %cond0 = icmp sgt <2 x i16> %val0, %val1 180 %sel0 = select <2 x i1> %cond0, <2 x i16> %val0, <2 x i16> %val1 181 %sel1 = select <2 x i1> %cond0, <2 x i16> %val1, <2 x i16> %val0 194 %cond0 = icmp sgt <4 x i16> %val0, %val1 195 %sel0 = select <4 x i1> %cond0, <4 x i16> %val0, <4 x i16> %val1 196 %sel1 = select <4 x i1> %cond0, <4 x i16> %val1, <4 x i16> %val0 208 %cond0 = icmp sgt <2 x i16> %val0, %val1 [all …]
|
D | sminmax.ll | 197 %cond0 = icmp sgt i32 %val0, %val1 198 %sel0 = select i1 %cond0, i32 %val0, i32 %val1 199 %sel1 = select i1 %cond0, i32 %val1, i32 %val0 216 %cond0 = icmp sgt i32 %val0, %val1 217 %sel0 = select i1 %cond0, i32 %val0, i32 %val1 218 %sel1 = select i1 %cond0, i32 %val1, i32 %val0 235 %cond0 = icmp sgt <4 x i32> %val0, %val1 236 %sel0 = select <4 x i1> %cond0, <4 x i32> %val0, <4 x i32> %val1 237 %sel1 = select <4 x i1> %cond0, <4 x i32> %val1, <4 x i32> %val0 253 %cond0 = icmp sgt i32 %val0, %val1 [all …]
|
D | multi-divergent-exit-region.ll | 182 ; IR: %divergent.cond0 = icmp slt i32 %tmp16, 2 198 ; IR: %uniform.cond0 = icmp eq i32 %arg3, 2 199 ; IR: %10 = xor i1 %uniform.cond0, true 244 %divergent.cond0 = icmp slt i32 %tmp16, 2 245 br i1 %divergent.cond0, label %LeafBlock, label %LeafBlock1 252 %uniform.cond0 = icmp eq i32 %arg3, 2 253 br i1 %uniform.cond0, label %exit0, label %exit1 372 %divergent.cond0 = icmp eq i32 %vgpr, 3 373 br i1 %divergent.cond0, label %exit0, label %exit1 562 %uniform.cond0 = icmp eq i32 %arg0, 4 [all …]
|
D | structurize1.ll | 7 ; if (cond0) {
|
D | uniform-cfg.ll | 261 define amdgpu_kernel void @icmp_users_different_blocks(i32 %cond0, i32 %cond1, i32 addrspace(1)* %o… 264 %cmp0 = icmp sgt i32 %cond0, 0
|
D | branch-relaxation.ll | 354 define amdgpu_kernel void @expand_requires_expand(i32 %cond0) #0 { 357 %cmp0 = icmp slt i32 %cond0, 0
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | sminmax.ll | 160 %cond0 = icmp sgt i32 %val0, %val1 161 %sel0 = select i1 %cond0, i32 %val0, i32 %val1 162 %sel1 = select i1 %cond0, i32 %val1, i32 %val0 179 %cond0 = icmp sgt i32 %val0, %val1 180 %sel0 = select i1 %cond0, i32 %val0, i32 %val1 181 %sel1 = select i1 %cond0, i32 %val1, i32 %val0 198 %cond0 = icmp sgt <4 x i32> %val0, %val1 199 %sel0 = select <4 x i1> %cond0, <4 x i32> %val0, <4 x i32> %val1 200 %sel1 = select <4 x i1> %cond0, <4 x i32> %val1, <4 x i32> %val0 216 %cond0 = icmp sgt i32 %val0, %val1 [all …]
|
D | structurize1.ll | 7 ; if (cond0) {
|
D | uniform-cfg.ll | 262 define void @icmp_users_different_blocks(i32 %cond0, i32 %cond1, i32 addrspace(1)* %out) { 265 %cmp0 = icmp sgt i32 %cond0, 0
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/EarlyCSE/ |
D | guards.ll | 52 ; CHECK-NEXT: %cond0 = icmp slt i32 %val, 40 53 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ] 56 %cond0 = icmp slt i32 %val, 40 57 call void(i1,...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ] 71 ; CHECK-NEXT: %cond0 = icmp slt i32 %val, 40 72 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ] 79 %cond0 = icmp slt i32 %val, 40 80 call void(i1,...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ] 91 ; CHECK-NEXT: %cond0 = icmp slt i32 %val, 40 92 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond0 [all …]
|
/external/llvm/test/Transforms/Util/ |
D | flattencfg.ll | 12 %cond0 = and i1 %cmp0, %cmp1 13 br i1 %cond0, label %b0, label %b1
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Util/ |
D | flattencfg.ll | 12 %cond0 = and i1 %cmp0, %cmp1 13 br i1 %cond0, label %b0, label %b1
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | pr27315.ll | 21 %cond0 = call i1 @use(i64 %iv.inc.maywrap.sext) 22 br i1 %cond0, label %be, label %leave
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/ScalarEvolution/ |
D | pr27315.ll | 21 %cond0 = call i1 @use(i64 %iv.inc.maywrap.sext) 22 br i1 %cond0, label %be, label %leave
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | peephole-recurrence.mir | 10 %cond0 = icmp eq i32 %a, 0 11 br i1 %cond0, label %bb4, label %bb3 41 %cond0 = icmp eq i32 %a, 0 42 br i1 %cond0, label %bb4, label %bb3
|
D | sink-blockfreq.ll | 24 %cond0 = icmp slt i32 %xx, 0 25 br i1 %cond0, label %F, label %exit, !prof !0
|
D | licm-dominance.ll | 28 %cond0 = icmp eq i8 %0, 0 29 br i1 %cond0, label %for.inc.i, label %if.then26.i
|
/external/llvm/test/CodeGen/ARM/ |
D | 2011-12-14-machine-sink.ll | 17 %cond0 = icmp ne i32 %arg1, 42 18 %v.5 = select i1 %cond0, i32 undef, i32 0
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | 2011-12-14-machine-sink.ll | 17 %cond0 = icmp ne i32 %arg1, 42 18 %v.5 = select i1 %cond0, i32 undef, i32 0
|
/external/llvm/test/CodeGen/X86/ |
D | sink-blockfreq.ll | 24 %cond0 = icmp slt i32 %xx, 0 25 br i1 %cond0, label %F, label %exit, !prof !0
|
D | licm-dominance.ll | 28 %cond0 = icmp eq i8 %0, 0 29 br i1 %cond0, label %for.inc.i, label %if.then26.i
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopVectorize/AArch64/ |
D | aarch64-predication.ll | 61 %cond0 = icmp sgt i64 %tmp2, 0 62 br i1 %cond0, label %if.then, label %for.inc
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | buffer_assignment_test.cc | 1933 auto cond0 = in TEST_F() local 1941 HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0)); in TEST_F() 2166 auto cond0 = module->AddEmbeddedComputation(build_cond()); in TEST_F() local 2169 HloInstruction::CreateWhile(r0s32, cond0, body0, infeed_data)); in TEST_F() 2252 auto cond0 = in TEST_F() local 2260 HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0)); in TEST_F() 2472 auto cond0 = in TEST_F() local 2480 HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0)); in TEST_F()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/JumpThreading/ |
D | thread-loads.ll | 454 ; CHECK: br i1 %c0, label %cond3, label %cond0 467 br i1 %c0, label %cond2, label %cond0 469 cond0:
|