1; RUN: opt -instnamer -O1 -mtriple=amdgcn-- -S -o - %s | FileCheck -check-prefixes=GCN,O1 %s 2; RUN: opt -instnamer -O2 -mtriple=amdgcn-- -S -o - %s | FileCheck -check-prefixes=GCN,O2 %s 3; RUN: opt -instnamer -O3 -mtriple=amdgcn-- -S -o - %s | FileCheck -check-prefixes=GCN,O3 %s 4target datalayout = "A5" 5 6; GCN-LABEL: t0 7; O1-NOT: alloca 8; O2-NOT: alloca 9; O3-NOT: alloca 10; GCN-COUNT-27: = load 11; GCN-COUNT-26: = add 12define protected amdgpu_kernel void @t0(i32 addrspace(1)* %p.coerce) #0 { 13entry: 14 %p = alloca i32*, align 8, addrspace(5) 15 %p.ascast = addrspacecast i32* addrspace(5)* %p to i32** 16 %p.addr = alloca i32*, align 8, addrspace(5) 17 %p.addr.ascast = addrspacecast i32* addrspace(5)* %p.addr to i32** 18 %t = alloca [27 x i32], align 16, addrspace(5) 19 %t.ascast = addrspacecast [27 x i32] addrspace(5)* %t to [27 x i32]* 20 %sum = alloca i32, align 4, addrspace(5) 21 %sum.ascast = addrspacecast i32 addrspace(5)* %sum to i32* 22 %i = alloca i32, align 4, addrspace(5) 23 %i.ascast = addrspacecast i32 addrspace(5)* %i to i32* 24 %cleanup.dest.slot = alloca i32, align 4, addrspace(5) 25 %0 = addrspacecast i32 addrspace(1)* %p.coerce to i32* 26 store i32* %0, i32** %p.ascast, align 8 27 %p1 = load i32*, i32** %p.ascast, align 8 28 store i32* %p1, i32** %p.addr.ascast, align 8 29 %1 = bitcast [27 x i32] addrspace(5)* %t to i8 addrspace(5)* 30 call void @llvm.lifetime.start.p5i8(i64 48, i8 addrspace(5)* %1) 31 %arraydecay = getelementptr inbounds [27 x i32], [27 x i32]* %t.ascast, i64 0, i64 0 32 %2 = load i32*, i32** %p.addr.ascast, align 8 33 call void @copy(i32* %arraydecay, i32* %2, i32 27) 34 %3 = bitcast i32 addrspace(5)* %sum to i8 addrspace(5)* 35 call void @llvm.lifetime.start.p5i8(i64 4, i8 addrspace(5)* %3) 36 store i32 0, i32* %sum.ascast, align 4 37 %4 = bitcast i32 addrspace(5)* %i to i8 addrspace(5)* 38 call void @llvm.lifetime.start.p5i8(i64 4, i8 addrspace(5)* %4) 39 store i32 0, i32* %i.ascast, align 4 40 br label %for.cond 41 42for.cond: ; preds = %for.inc, %entry 43 %5 = load i32, i32* %i.ascast, align 4 44 %cmp = icmp slt i32 %5, 27 45 br i1 %cmp, label %for.body, label %for.cond.cleanup 46 47for.cond.cleanup: ; preds = %for.cond 48 %6 = bitcast i32 addrspace(5)* %i to i8 addrspace(5)* 49 call void @llvm.lifetime.end.p5i8(i64 4, i8 addrspace(5)* %6) 50 br label %for.end 51 52for.body: ; preds = %for.cond 53 %7 = load i32, i32* %i.ascast, align 4 54 %idxprom = sext i32 %7 to i64 55 %arrayidx = getelementptr inbounds [27 x i32], [27 x i32]* %t.ascast, i64 0, i64 %idxprom 56 %8 = load i32, i32* %arrayidx, align 4 57 %9 = load i32, i32* %sum.ascast, align 4 58 %add = add nsw i32 %9, %8 59 store i32 %add, i32* %sum.ascast, align 4 60 br label %for.inc 61 62for.inc: ; preds = %for.body 63 %10 = load i32, i32* %i.ascast, align 4 64 %inc = add nsw i32 %10, 1 65 store i32 %inc, i32* %i.ascast, align 4 66 br label %for.cond 67 68for.end: ; preds = %for.cond.cleanup 69 %11 = load i32, i32* %sum.ascast, align 4 70 %12 = load i32*, i32** %p.addr.ascast, align 8 71 store i32 %11, i32* %12, align 4 72 %13 = bitcast i32 addrspace(5)* %sum to i8 addrspace(5)* 73 call void @llvm.lifetime.end.p5i8(i64 4, i8 addrspace(5)* %13) 74 %14 = bitcast [27 x i32] addrspace(5)* %t to i8 addrspace(5)* 75 call void @llvm.lifetime.end.p5i8(i64 48, i8 addrspace(5)* %14) 76 ret void 77} 78 79define internal void @copy(i32* %d, i32* %s, i32 %N) { 80entry: 81 %d8 = bitcast i32* %d to i8* 82 %s8 = bitcast i32* %s to i8* 83 %N8 = mul i32 %N, 4 84 tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d8, i8* %s8, i32 %N8, i1 false) 85 ret void 86} 87 88declare void @llvm.lifetime.start.p5i8(i64 immarg, i8 addrspace(5)* nocapture) 89declare void @llvm.lifetime.end.p5i8(i64 immarg, i8 addrspace(5)* nocapture) 90declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) 91