1; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s 2 3; In this test both the pointer and the offset operands to the 4; BUFFER_LOAD instructions end up being stored in vgprs. This 5; requires us to add the pointer and offset together, store the 6; result in the offset operand (vaddr), and then store 0 in an 7; sgpr register pair and use that for the pointer operand 8; (low 64-bits of srsrc). 9 10; CHECK-LABEL: {{^}}mubuf: 11 12; Make sure we aren't using VGPRs for the source operand of s_mov_b64 13; CHECK-NOT: s_mov_b64 s[{{[0-9]+:[0-9]+}}], v 14 15; Make sure we aren't using VGPR's for the srsrc operand of BUFFER_LOAD_* 16; instructions 17; CHECK: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 18; CHECK: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 19define void @mubuf(i32 addrspace(1)* %out, i8 addrspace(1)* %in) { 20entry: 21 %0 = call i32 @llvm.r600.read.tidig.x() #1 22 %1 = call i32 @llvm.r600.read.tidig.y() #1 23 %2 = sext i32 %0 to i64 24 %3 = sext i32 %1 to i64 25 br label %loop 26 27loop: 28 %4 = phi i64 [0, %entry], [%5, %loop] 29 %5 = add i64 %2, %4 30 %6 = getelementptr i8, i8 addrspace(1)* %in, i64 %5 31 %7 = load i8, i8 addrspace(1)* %6, align 1 32 %8 = or i64 %5, 1 33 %9 = getelementptr i8, i8 addrspace(1)* %in, i64 %8 34 %10 = load i8, i8 addrspace(1)* %9, align 1 35 %11 = add i8 %7, %10 36 %12 = sext i8 %11 to i32 37 store i32 %12, i32 addrspace(1)* %out 38 %13 = icmp slt i64 %5, 10 39 br i1 %13, label %loop, label %done 40 41done: 42 ret void 43} 44 45declare i32 @llvm.r600.read.tidig.x() #1 46declare i32 @llvm.r600.read.tidig.y() #1 47 48attributes #1 = { nounwind readnone } 49 50; Test moving an SMRD instruction to the VALU 51 52; CHECK-LABEL: {{^}}smrd_valu: 53; CHECK: buffer_load_dword [[OUT:v[0-9]+]] 54; CHECK: buffer_store_dword [[OUT]] 55 56define void @smrd_valu(i32 addrspace(2)* addrspace(1)* %in, i32 %a, i32 addrspace(1)* %out) { 57entry: 58 %0 = icmp ne i32 %a, 0 59 br i1 %0, label %if, label %else 60 61if: 62 %1 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %in 63 br label %endif 64 65else: 66 %2 = getelementptr i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %in 67 %3 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %2 68 br label %endif 69 70endif: 71 %4 = phi i32 addrspace(2)* [%1, %if], [%3, %else] 72 %5 = getelementptr i32, i32 addrspace(2)* %4, i32 3000 73 %6 = load i32, i32 addrspace(2)* %5 74 store i32 %6, i32 addrspace(1)* %out 75 ret void 76} 77 78; Test moving ann SMRD with an immediate offset to the VALU 79 80; CHECK-LABEL: {{^}}smrd_valu2: 81; CHECK: buffer_load_dword 82define void @smrd_valu2(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in) { 83entry: 84 %0 = call i32 @llvm.r600.read.tidig.x() nounwind readnone 85 %1 = add i32 %0, 4 86 %2 = getelementptr [8 x i32], [8 x i32] addrspace(2)* %in, i32 %0, i32 4 87 %3 = load i32, i32 addrspace(2)* %2 88 store i32 %3, i32 addrspace(1)* %out 89 ret void 90} 91 92; CHECK-LABEL: {{^}}s_load_imm_v8i32: 93; CHECK: buffer_load_dwordx4 94; CHECK: buffer_load_dwordx4 95define void @s_load_imm_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) { 96entry: 97 %tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1 98 %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0 99 %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <8 x i32> addrspace(2)* 100 %tmp3 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp2, align 4 101 store <8 x i32> %tmp3, <8 x i32> addrspace(1)* %out, align 32 102 ret void 103} 104 105; CHECK-LABEL: {{^}}s_load_imm_v16i32: 106; CHECK: buffer_load_dwordx4 107; CHECK: buffer_load_dwordx4 108; CHECK: buffer_load_dwordx4 109; CHECK: buffer_load_dwordx4 110define void @s_load_imm_v16i32(<16 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) { 111entry: 112 %tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1 113 %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0 114 %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <16 x i32> addrspace(2)* 115 %tmp3 = load <16 x i32>, <16 x i32> addrspace(2)* %tmp2, align 4 116 store <16 x i32> %tmp3, <16 x i32> addrspace(1)* %out, align 32 117 ret void 118} 119