1; RUN: opt -lower-constant-intrinsics -S < %s | FileCheck %s 2 3;; Ensure that an unfoldable is.constant gets lowered reasonably in 4;; optimized codegen, in particular, that the "true" branch is 5;; eliminated. 6 7;; Also ensure that any unfoldable objectsize is resolved in order. 8 9;; CHECK-NOT: tail call i32 @subfun_1() 10;; CHECK: tail call i32 @subfun_2() 11;; CHECK-NOT: tail call i32 @subfun_1() 12 13declare i1 @llvm.is.constant.i32(i32 %a) nounwind readnone 14declare i1 @llvm.is.constant.i64(i64 %a) nounwind readnone 15declare i1 @llvm.is.constant.i256(i256 %a) nounwind readnone 16declare i1 @llvm.is.constant.v2i64(<2 x i64> %a) nounwind readnone 17declare i1 @llvm.is.constant.f32(float %a) nounwind readnone 18declare i1 @llvm.is.constant.sl_i32i32s({i32, i32} %a) nounwind readnone 19declare i1 @llvm.is.constant.a2i64([2 x i64] %a) nounwind readnone 20declare i1 @llvm.is.constant.p0i64(i64* %a) nounwind readnone 21 22declare i64 @llvm.objectsize.i64.p0i8(i8*, i1, i1, i1) nounwind readnone 23 24declare i32 @subfun_1() 25declare i32 @subfun_2() 26 27define i32 @test_branch(i32 %in) nounwind { 28 %v = call i1 @llvm.is.constant.i32(i32 %in) 29 br i1 %v, label %True, label %False 30 31True: 32 %call1 = tail call i32 @subfun_1() 33 ret i32 %call1 34 35False: 36 %call2 = tail call i32 @subfun_2() 37 ret i32 %call2 38} 39 40;; llvm.objectsize is another tricky case which gets folded to -1 very 41;; late in the game. We'd like to ensure that llvm.is.constant of 42;; llvm.objectsize is true. 43define i1 @test_objectsize(i8* %obj) nounwind { 44;; CHECK-LABEL: test_objectsize 45;; CHECK-NOT: llvm.objectsize 46;; CHECK-NOT: llvm.is.constant 47;; CHECK: ret i1 true 48 %os = call i64 @llvm.objectsize.i64.p0i8(i8* %obj, i1 false, i1 false, i1 false) 49 %os1 = add i64 %os, 1 50 %v = call i1 @llvm.is.constant.i64(i64 %os1) 51 ret i1 %v 52} 53 54@test_phi_a = dso_local global i32 0, align 4 55declare dso_local i32 @test_phi_b(...) 56 57; Function Attrs: nounwind uwtable 58define dso_local i32 @test_phi() { 59entry: 60 %0 = load i32, i32* @test_phi_a, align 4 61 %1 = tail call i1 @llvm.is.constant.i32(i32 %0) 62 br i1 %1, label %cond.end, label %cond.false 63 64cond.false: ; preds = %entry 65 %call = tail call i32 bitcast (i32 (...)* @test_phi_b to i32 ()*)() #3 66 %.pre = load i32, i32* @test_phi_a, align 4 67 br label %cond.end 68 69cond.end: ; preds = %entry, %cond.false 70 %2 = phi i32 [ %.pre, %cond.false ], [ %0, %entry ] 71 %cond = phi i32 [ %call, %cond.false ], [ 1, %entry ] 72 %cmp = icmp eq i32 %cond, %2 73 br i1 %cmp, label %cond.true1, label %cond.end4 74 75cond.true1: ; preds = %cond.end 76 %call2 = tail call i32 bitcast (i32 (...)* @test_phi_b to i32 ()*)() #3 77 br label %cond.end4 78 79cond.end4: ; preds = %cond.end, %cond.true1 80 ret i32 undef 81} 82 83define i1 @test_various_types(i256 %int, float %float, <2 x i64> %vec, {i32, i32} %struct, [2 x i64] %arr, i64* %ptr) #0 { 84; CHECK-LABEL: @test_various_types( 85; CHECK-NOT: llvm.is.constant 86 %v1 = call i1 @llvm.is.constant.i256(i256 %int) 87 %v2 = call i1 @llvm.is.constant.f32(float %float) 88 %v3 = call i1 @llvm.is.constant.v2i64(<2 x i64> %vec) 89 %v4 = call i1 @llvm.is.constant.sl_i32i32s({i32, i32} %struct) 90 %v5 = call i1 @llvm.is.constant.a2i64([2 x i64] %arr) 91 %v6 = call i1 @llvm.is.constant.p0i64(i64* %ptr) 92 93 %c1 = call i1 @llvm.is.constant.i256(i256 -1) 94 %c2 = call i1 @llvm.is.constant.f32(float 17.0) 95 %c3 = call i1 @llvm.is.constant.v2i64(<2 x i64> <i64 -1, i64 44>) 96 %c4 = call i1 @llvm.is.constant.sl_i32i32s({i32, i32} {i32 -1, i32 32}) 97 %c5 = call i1 @llvm.is.constant.a2i64([2 x i64] [i64 -1, i64 32]) 98 %c6 = call i1 @llvm.is.constant.p0i64(i64* inttoptr (i32 42 to i64*)) 99 100 %x1 = add i1 %v1, %c1 101 %x2 = add i1 %v2, %c2 102 %x3 = add i1 %v3, %c3 103 %x4 = add i1 %v4, %c4 104 %x5 = add i1 %v5, %c5 105 %x6 = add i1 %v6, %c6 106 107 %res2 = add i1 %x1, %x2 108 %res3 = add i1 %res2, %x3 109 %res4 = add i1 %res3, %x4 110 %res5 = add i1 %res4, %x5 111 %res6 = add i1 %res5, %x6 112 113 ret i1 %res6 114} 115