1; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s 2 3; GCN-LABEL: {{^}}load_idx_idy: 4; GCN-NOT: global_load 5; GCN: s_load_dword [[ID_XY:s[0-9]+]], s[4:5], 0x4 6; GCN-NOT: global_load 7; GCN: s_lshr_b32 [[ID_Y:s[0-9]+]], [[ID_XY]], 16 8; GCN: s_add_i32 [[ID_SUM:s[0-9]+]], [[ID_Y]], [[ID_XY]] 9; GCN: s_and_b32 s{{[0-9]+}}, [[ID_SUM]], 0xffff 10define protected amdgpu_kernel void @load_idx_idy(i32 addrspace(1)* %out) { 11entry: 12 %disp = tail call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() 13 %gep_x = getelementptr i8, i8 addrspace(4)* %disp, i64 4 14 %gep_x.cast = bitcast i8 addrspace(4)* %gep_x to i16 addrspace(4)* 15 %id_x = load i16, i16 addrspace(4)* %gep_x.cast, align 4, !invariant.load !0 ; load workgroup size x 16 %gep_y = getelementptr i8, i8 addrspace(4)* %disp, i64 6 17 %gep_y.cast = bitcast i8 addrspace(4)* %gep_y to i16 addrspace(4)* 18 %id_y = load i16, i16 addrspace(4)* %gep_y.cast, align 2, !invariant.load !0 ; load workgroup size y 19 %add = add nuw nsw i16 %id_y, %id_x 20 %conv = zext i16 %add to i32 21 store i32 %conv, i32 addrspace(1)* %out, align 4 22 ret void 23} 24 25; A little more complicated case where more sub-dword loads could be coalesced 26; if they are not widening earlier. 27; GCN-LABEL: {{^}}load_4i16: 28; GCN: s_load_dwordx2 s{{\[}}[[D0:[0-9]+]]:[[D1:[0-9]+]]{{\]}}, s[4:5], 0x4 29; GCN-NOT: s_load_dword {{s[0-9]+}}, s[4:5], 0x4 30; GCN-DAG: s_lshr_b32 s{{[0-9]+}}, s[[D0]], 16 31; GCN-DAG: s_lshr_b32 s{{[0-9]+}}, s[[D1]], 16 32; GCN: s_endpgm 33define protected amdgpu_kernel void @load_4i16(i32 addrspace(1)* %out) { 34entry: 35 %disp = tail call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() 36 %gep_x = getelementptr i8, i8 addrspace(4)* %disp, i64 4 37 %gep_x.cast = bitcast i8 addrspace(4)* %gep_x to i16 addrspace(4)* 38 %id_x = load i16, i16 addrspace(4)* %gep_x.cast, align 4, !invariant.load !0 ; load workgroup size x 39 %gep_y = getelementptr i8, i8 addrspace(4)* %disp, i64 6 40 %gep_y.cast = bitcast i8 addrspace(4)* %gep_y to i16 addrspace(4)* 41 %id_y = load i16, i16 addrspace(4)* %gep_y.cast, align 2, !invariant.load !0 ; load workgroup size y 42 %gep_z = getelementptr i8, i8 addrspace(4)* %disp, i64 8 43 %gep_z.cast = bitcast i8 addrspace(4)* %gep_z to i16 addrspace(4)* 44 %id_z = load i16, i16 addrspace(4)* %gep_z.cast, align 4, !invariant.load !0 ; load workgroup size x 45 %gep_w = getelementptr i8, i8 addrspace(4)* %disp, i64 10 46 %gep_w.cast = bitcast i8 addrspace(4)* %gep_w to i16 addrspace(4)* 47 %id_w = load i16, i16 addrspace(4)* %gep_w.cast, align 2, !invariant.load !0 ; load workgroup size y 48 %add = add nuw nsw i16 %id_y, %id_x 49 %add2 = add nuw nsw i16 %id_z, %id_w 50 %add3 = add nuw nsw i16 %add, %add2 51 %conv = zext i16 %add3 to i32 52 store i32 %conv, i32 addrspace(1)* %out, align 4 53 ret void 54} 55 56declare i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() 57 58!0 = !{!0} 59