/external/llvm-project/llvm/test/Transforms/InstSimplify/ |
D | cmp_ext.ll | 6 ; CHECK-NEXT: [[SEXT:%.*]] = sext i32 [[X:%.*]] to i64 8 ; CHECK-NEXT: [[CMP:%.*]] = icmp uge i64 [[ZEXT]], [[SEXT]] 29 ; CHECK-NEXT: [[SEXT:%.*]] = sext i32 [[X:%.*]] to i64 31 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[ZEXT]], [[SEXT]] 62 ; CHECK-NEXT: [[SEXT:%.*]] = sext i32 [[X:%.*]] to i64 64 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[ZEXT]], [[SEXT]] 85 ; CHECK-NEXT: [[SEXT:%.*]] = sext i32 [[X:%.*]] to i64 87 ; CHECK-NEXT: [[CMP:%.*]] = icmp sle i64 [[ZEXT]], [[SEXT]] 108 ; CHECK-NEXT: [[SEXT:%.*]] = sext i32 [[X:%.*]] to i64 110 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[SEXT]], [[ZEXT]] [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | legalize-sextload-flat.mir | 76 ; SI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32) 77 ; SI: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 83 ; VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXT_INREG]](s32) 84 ; VI: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 98 ; SI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32) 99 ; SI: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 105 ; VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXT_INREG]](s32) 106 ; VI: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 120 ; SI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD]](s32) 121 ; SI: $vgpr0_vgpr1 = COPY [[SEXT]](s64) [all …]
|
D | regbankselect-sext.mir | 14 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s64) = G_SEXT [[COPY]](s32) 29 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s64) = G_SEXT [[TRUNC]](s16) 64 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s16) = G_SEXT [[TRUNC]](s1) 83 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s32) = G_SEXT [[TRUNC]](s1) 102 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s64) = G_SEXT [[TRUNC]](s1) 182 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s16) = G_SEXT [[TRUNC]](s1) 198 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s32) = G_SEXT [[TRUNC]](s1) 214 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s64) = G_SEXT [[TRUNC]](s1) 230 ; CHECK: [[SEXT:%[0-9]+]]:vgpr(s16) = G_SEXT [[TRUNC]](s1) 246 ; CHECK: [[SEXT:%[0-9]+]]:vgpr(s32) = G_SEXT [[TRUNC]](s1) [all …]
|
D | legalize-sextload-global.mir | 82 ; GFX8: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32) 83 ; GFX8: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 87 ; GFX6: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32) 88 ; GFX6: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 102 ; GFX8: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32) 103 ; GFX8: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 107 ; GFX6: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32) 108 ; GFX6: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 122 ; GFX8: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD]](s32) 123 ; GFX8: $vgpr0_vgpr1 = COPY [[SEXT]](s64) [all …]
|
D | legalize-sextload-local.mir | 57 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32) 58 ; CHECK: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 72 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32) 73 ; CHECK: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 87 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD]](s32) 88 ; CHECK: $vgpr0_vgpr1 = COPY [[SEXT]](s64)
|
D | legalize-sextload-private.mir | 59 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32) 60 ; CHECK: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 74 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SEXTLOAD]](s32) 75 ; CHECK: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 89 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD]](s32) 90 ; CHECK: $vgpr0_vgpr1 = COPY [[SEXT]](s64)
|
D | legalize-sextload-constant-32bit.mir | 15 ; CI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD]](s32) 16 ; CI: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 44 ; CI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[OR]](s32) 45 ; CI: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 89 ; CI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[OR2]](s32) 90 ; CI: $vgpr0_vgpr1 = COPY [[SEXT]](s64) 158 ; CI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[OR]](s16) 159 ; CI: $vgpr0 = COPY [[SEXT]](s32)
|
D | legalize-icmp.mir | 441 ; GFX7: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1) 442 ; GFX7: $vgpr0 = COPY [[SEXT]](s32) 447 ; GFX8: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1) 448 ; GFX8: $vgpr0 = COPY [[SEXT]](s32) 453 ; GFX9: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1) 454 ; GFX9: $vgpr0 = COPY [[SEXT]](s32) 471 ; GFX7: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1) 472 ; GFX7: $vgpr0 = COPY [[SEXT]](s32) 477 ; GFX8: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1) 478 ; GFX8: $vgpr0 = COPY [[SEXT]](s32) [all …]
|
D | regbankselect-smin.mir | 118 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s32) = G_SEXT [[TRUNC]](s16) 121 ; CHECK: [[ICMP:%[0-9]+]]:sgpr(s32) = G_ICMP intpred(slt), [[SEXT]](s32), [[SEXT1]] 122 ; CHECK: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[ICMP]](s32), [[SEXT]], [[SEXT1]] 148 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s32) = G_SEXT [[TRUNC]](s16) 151 ; CHECK: [[ICMP:%[0-9]+]]:sgpr(s32) = G_ICMP intpred(slt), [[SEXT]](s32), [[SEXT1]] 152 ; CHECK: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[ICMP]](s32), [[SEXT]], [[SEXT1]]
|
D | regbankselect-smax.mir | 115 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s32) = G_SEXT [[TRUNC]](s16) 118 ; CHECK: [[ICMP:%[0-9]+]]:sgpr(s32) = G_ICMP intpred(sgt), [[SEXT]](s32), [[SEXT1]] 119 ; CHECK: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[ICMP]](s32), [[SEXT]], [[SEXT1]] 145 ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s32) = G_SEXT [[TRUNC]](s16) 148 ; CHECK: [[ICMP:%[0-9]+]]:sgpr(s32) = G_ICMP intpred(sgt), [[SEXT]](s32), [[SEXT1]] 149 ; CHECK: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[ICMP]](s32), [[SEXT]], [[SEXT1]]
|
D | legalize-fptosi.mir | 391 ; SI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32) 405 ; SI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]] 407 ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64) 426 ; VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32) 440 ; VI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]] 442 ; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64) 473 ; SI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32) 487 ; SI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]] 489 ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64) 533 ; VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32) [all …]
|
D | irtranslator-struct-return-intrinsics.ll | 15 ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[INT1]](s1) 17 …; CHECK: G_STORE [[SEXT]](s32), [[COPY2]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrsp…
|
/external/llvm-project/mlir/test/Conversion/SPIRVToLLVM/ |
D | shift-ops-to-llvm.mlir | 15 // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : !llvm.i16 to !llvm.i32 16 // CHECK: llvm.ashr %{{.*}}, %[[SEXT]] : !llvm.i32 33 // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : !llvm.vec<4 x i32> to !llvm.vec<4 x i64> 34 // CHECK: llvm.ashr %{{.*}}, %[[SEXT]] : !llvm.vec<4 x i64> 55 // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : !llvm.i16 to !llvm.i32 56 // CHECK: llvm.lshr %{{.*}}, %[[SEXT]] : !llvm.i32 73 // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : !llvm.vec<4 x i32> to !llvm.vec<4 x i64> 74 // CHECK: llvm.lshr %{{.*}}, %[[SEXT]] : !llvm.vec<4 x i64> 95 // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : !llvm.i16 to !llvm.i32 96 // CHECK: llvm.shl %{{.*}}, %[[SEXT]] : !llvm.i32 [all …]
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | sve-fixed-length-int-compares.ll | 53 ; CHECK-NEXT: mov [[SEXT:z[0-9]+]].b, [[CMP]]/z, #-1 54 ; CHECK-NEXT: st1b { [[SEXT]].b }, [[PG]], [x0] 70 ; VBITS_GE_512-NEXT: mov [[SEXT:z[0-9]+]].b, [[CMP]]/z, #-1 71 ; VBITS_GE_512-NEXT: st1b { [[SEXT]].b }, [[PG]], [x0] 102 ; VBITS_GE_1024-NEXT: mov [[SEXT:z[0-9]+]].b, [[CMP]]/z, #-1 103 ; VBITS_GE_1024-NEXT: st1b { [[SEXT]].b }, [[PG]], [x0] 119 ; VBITS_GE_2048-NEXT: mov [[SEXT:z[0-9]+]].b, [[CMP]]/z, #-1 120 ; VBITS_GE_2048-NEXT: st1b { [[SEXT]].b }, [[PG]], [x0] 156 ; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1 157 ; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x0] [all …]
|
D | aarch64-codegen-prepare-atp.ll | 50 ; CHECK: %[[SEXT:.*]] = sext i32 %i to i64 51 ; CHECK: %add = add nsw i64 %[[SEXT]], 1 52 ; CHECK: %add2 = add nsw i64 %[[SEXT]], 2
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | signext.ll | 10 ; CHECK-NEXT: [[SEXT:%.*]] = shl i32 [[X:%.*]], 16 11 ; CHECK-NEXT: [[T3:%.*]] = ashr exact i32 [[SEXT]], 16 37 ; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i32> [[X:%.*]], <i32 16, i32 16> 38 ; CHECK-NEXT: [[T3:%.*]] = ashr exact <2 x i32> [[SEXT]], <i32 16, i32 16> 49 ; CHECK-NEXT: [[SEXT:%.*]] = shl i32 [[X:%.*]], 16 50 ; CHECK-NEXT: [[T3:%.*]] = ashr exact i32 [[SEXT]], 16 61 ; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i32> [[X:%.*]], <i32 16, i32 16> 62 ; CHECK-NEXT: [[T3:%.*]] = ashr exact <2 x i32> [[SEXT]], <i32 16, i32 16> 110 ; CHECK-NEXT: [[SEXT:%.*]] = shl i32 [[X:%.*]], 24 111 ; CHECK-NEXT: [[T3:%.*]] = ashr exact i32 [[SEXT]], 24 [all …]
|
D | sext.ll | 208 ; CHECK-NEXT: [[SEXT:%.*]] = sext <2 x i1> [[CMP]] to <2 x i16> 209 ; CHECK-NEXT: store <2 x i16> [[SEXT]], <2 x i16>* [[DST:%.*]], align 4 236 ; CHECK-NEXT: [[SEXT:%.*]] = add nsw i32 [[TMP1]], -1 237 ; CHECK-NEXT: ret i32 [[SEXT]] 249 ; CHECK-NEXT: [[SEXT:%.*]] = add nsw i16 [[TMP1]], -1 250 ; CHECK-NEXT: [[EXT:%.*]] = sext i16 [[SEXT]] to i32 262 ; CHECK-NEXT: [[SEXT:%.*]] = ashr i32 [[AND]], 31 263 ; CHECK-NEXT: ret i32 [[SEXT]] 274 ; CHECK-NEXT: [[SEXT:%.*]] = ashr i16 [[AND]], 15 275 ; CHECK-NEXT: [[EXT:%.*]] = sext i16 [[SEXT]] to i32
|
/external/llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/ |
D | combine-ext.mir | 64 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s16) 65 ; CHECK: $x0 = COPY [[SEXT]](s64) 78 ; CHECK: [[SEXT:%[0-9]+]]:_(<2 x s64>) = G_SEXT [[COPY]](<2 x s16>) 79 ; CHECK: $q0 = COPY [[SEXT]](<2 x s64>) 120 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s16) 121 ; CHECK: $x0 = COPY [[SEXT]](s64) 134 ; CHECK: [[SEXT:%[0-9]+]]:_(<2 x s64>) = G_SEXT [[COPY]](<2 x s16>) 135 ; CHECK: $q0 = COPY [[SEXT]](<2 x s64>)
|
D | arm64-irtranslator-gep.ll | 13 ; O0: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) 15 ; O0: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] 19 ; O0: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] 32 ; O3: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) 34 ; O3: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
|
D | legalize-sext-copy.mir | 13 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) 14 ; CHECK: $x0 = COPY [[SEXT]](s64)
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | sint_to_fp.f64.ll | 75 ; SI: s_sext_i32_i8 [[SEXT:s[0-9]+]], [[VAL]] 78 ; VI: s_sext_i32_i16 [[SEXT:s[0-9]+]], [[BFE]] 80 ; GCN: v_cvt_f64_i32_e32 v{{\[[0-9]+:[0-9]+\]}}, [[SEXT]] 88 ; GCN: v_bfe_i32 [[SEXT:v[0-9]+]] 89 ; GCN: v_cvt_f64_i32_e32 v{{\[[0-9]+:[0-9]+\]}}, [[SEXT]]
|
/external/llvm-project/llvm/test/CodeGen/X86/GlobalISel/ |
D | legalize-ptr-add.mir | 37 ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[C]](s8) 38 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[DEF]], [[SEXT]](s32) 59 ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[C]](s16) 60 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[DEF]], [[SEXT]](s32)
|
D | legalize-ext.mir | 371 ; X32: [[SEXT:%[0-9]+]]:_(s16) = G_SEXT [[COPY]](s8) 372 ; X32: $ax = COPY [[SEXT]](s16) 376 ; X64: [[SEXT:%[0-9]+]]:_(s16) = G_SEXT [[COPY]](s8) 377 ; X64: $ax = COPY [[SEXT]](s16) 399 ; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s8) 400 ; X32: $eax = COPY [[SEXT]](s32) 404 ; X64: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s8) 405 ; X64: $eax = COPY [[SEXT]](s32) 427 ; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s16) 428 ; X32: $eax = COPY [[SEXT]](s32) [all …]
|
D | legalize-ext-x86-64.mir | 108 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s8) 109 ; CHECK: $rax = COPY [[SEXT]](s64) 131 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s16) 132 ; CHECK: $rax = COPY [[SEXT]](s64) 154 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s32) 155 ; CHECK: $rax = COPY [[SEXT]](s64)
|
/external/llvm-project/llvm/test/Instrumentation/InstrProfiling/ |
D | icall.ll | 10 …le=mips-unknown-linux -instrprof -vp-static-alloc=true -S | FileCheck %s --check-prefix=STATIC-SEXT 11 …=mips64-unknown-linux -instrprof -vp-static-alloc=true -S | FileCheck %s --check-prefix=STATIC-SEXT 55 ; STATIC-SEXT: call void @__llvm_profile_instrument_target(i64 %3, i8* bitcast ({ i64, i64, i64*, i… 59 ; STATIC-SEXT: declare void @__llvm_profile_instrument_target(i64, i8*, i32 signext)
|