1; RUN: llc -march=amdgcn -mcpu=hawaii -enable-amdgpu-aa=0 -verify-machineinstrs -mattr=-promote-alloca,-load-store-opt < %s | FileCheck -check-prefix=GCN %s 2 3@sPrivateStorage = internal addrspace(3) global [256 x [8 x <4 x i64>]] undef 4 5; GCN-LABEL: {{^}}ds_reorder_vector_split: 6 7; Write zeroinitializer 8; GCN-DAG: ds_write_b64 [[PTR:v[0-9]+]], [[VAL:v\[[0-9]+:[0-9]+\]]] offset:24 9; GCN-DAG: ds_write_b64 [[PTR]], [[VAL]] offset:16 10; GCN-DAG: ds_write_b64 [[PTR]], [[VAL]] offset:8 11; GCN-DAG: ds_write_b64 [[PTR]], [[VAL]]{{$}} 12 13; GCN: s_waitcnt vmcnt 14 15; GCN-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:24 16; GCN-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:16 17; GCN-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:8 18; Appears to be dead store of vector component. 19; GCN-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]$}} 20 21 22; GCN-DAG: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:8 23; GCN-DAG: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:16 24; GCN-DAG: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:24 25 26; GCN-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 27; GCN-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8 28; GCN-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16 29; GCN-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:24 30 31; GCN: s_endpgm 32define amdgpu_kernel void @ds_reorder_vector_split(<4 x i64> addrspace(1)* nocapture readonly %srcValues, i32 addrspace(1)* nocapture readonly %offsets, <4 x i64> addrspace(1)* nocapture %destBuffer, i32 %alignmentOffset) #0 { 33entry: 34 %tmp = tail call i32 @llvm.r600.read.local.size.y() 35 %tmp1 = tail call i32 @llvm.r600.read.local.size.z() 36 %tmp2 = tail call i32 @llvm.amdgcn.workitem.id.x() 37 %tmp3 = tail call i32 @llvm.amdgcn.workitem.id.y() 38 %tmp4 = tail call i32 @llvm.amdgcn.workitem.id.z() 39 %tmp6 = mul i32 %tmp2, %tmp 40 %tmp10 = add i32 %tmp3, %tmp6 41 %tmp11 = mul i32 %tmp10, %tmp1 42 %tmp9 = add i32 %tmp11, %tmp4 43 %x.i.i = tail call i32 @llvm.amdgcn.workgroup.id.x() #1 44 %x.i.12.i = tail call i32 @llvm.r600.read.local.size.x() #1 45 %mul.26.i = mul i32 %x.i.12.i, %x.i.i 46 %add.i = add i32 %tmp2, %mul.26.i 47 %arrayidx = getelementptr [256 x [8 x <4 x i64>]], [256 x [8 x <4 x i64>]] addrspace(3)* @sPrivateStorage, i32 0, i32 %tmp9, i32 %add.i 48 store <4 x i64> zeroinitializer, <4 x i64> addrspace(3)* %arrayidx 49 %tmp12 = sext i32 %add.i to i64 50 %arrayidx1 = getelementptr inbounds <4 x i64>, <4 x i64> addrspace(1)* %srcValues, i64 %tmp12 51 %tmp13 = load <4 x i64>, <4 x i64> addrspace(1)* %arrayidx1 52 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %offsets, i64 %tmp12 53 %tmp14 = load i32, i32 addrspace(1)* %arrayidx2 54 %add.ptr = getelementptr [256 x [8 x <4 x i64>]], [256 x [8 x <4 x i64>]] addrspace(3)* @sPrivateStorage, i32 0, i32 %tmp9, i32 0, i32 %alignmentOffset 55 %mul.i = shl i32 %tmp14, 2 56 %arrayidx.i = getelementptr inbounds i64, i64 addrspace(3)* %add.ptr, i32 %mul.i 57 %tmp15 = bitcast i64 addrspace(3)* %arrayidx.i to <4 x i64> addrspace(3)* 58 store <4 x i64> %tmp13, <4 x i64> addrspace(3)* %tmp15 59 %add.ptr6 = getelementptr [256 x [8 x <4 x i64>]], [256 x [8 x <4 x i64>]] addrspace(3)* @sPrivateStorage, i32 0, i32 %tmp9, i32 %tmp14, i32 %alignmentOffset 60 %tmp16 = sext i32 %tmp14 to i64 61 %tmp17 = sext i32 %alignmentOffset to i64 62 %add.ptr9 = getelementptr inbounds <4 x i64>, <4 x i64> addrspace(1)* %destBuffer, i64 %tmp16, i64 %tmp17 63 %tmp18 = bitcast <4 x i64> %tmp13 to i256 64 %trunc = trunc i256 %tmp18 to i64 65 store i64 %trunc, i64 addrspace(1)* %add.ptr9 66 %arrayidx10.1 = getelementptr inbounds i64, i64 addrspace(3)* %add.ptr6, i32 1 67 %tmp19 = load i64, i64 addrspace(3)* %arrayidx10.1 68 %arrayidx11.1 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr9, i64 1 69 store i64 %tmp19, i64 addrspace(1)* %arrayidx11.1 70 %arrayidx10.2 = getelementptr inbounds i64, i64 addrspace(3)* %add.ptr6, i32 2 71 %tmp20 = load i64, i64 addrspace(3)* %arrayidx10.2 72 %arrayidx11.2 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr9, i64 2 73 store i64 %tmp20, i64 addrspace(1)* %arrayidx11.2 74 %arrayidx10.3 = getelementptr inbounds i64, i64 addrspace(3)* %add.ptr6, i32 3 75 %tmp21 = load i64, i64 addrspace(3)* %arrayidx10.3 76 %arrayidx11.3 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr9, i64 3 77 store i64 %tmp21, i64 addrspace(1)* %arrayidx11.3 78 ret void 79} 80 81; Function Attrs: nounwind readnone 82declare i32 @llvm.amdgcn.workgroup.id.x() #1 83 84; Function Attrs: nounwind readnone 85declare i32 @llvm.r600.read.local.size.x() #1 86 87; Function Attrs: nounwind readnone 88declare i32 @llvm.amdgcn.workitem.id.x() #1 89 90; Function Attrs: nounwind readnone 91declare i32 @llvm.r600.read.local.size.y() #1 92 93; Function Attrs: nounwind readnone 94declare i32 @llvm.r600.read.local.size.z() #1 95 96; Function Attrs: nounwind readnone 97declare i32 @llvm.amdgcn.workitem.id.y() #1 98 99; Function Attrs: nounwind readnone 100declare i32 @llvm.amdgcn.workitem.id.z() #1 101 102attributes #0 = { norecurse nounwind } 103attributes #1 = { nounwind readnone } 104