1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=verde -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s 2; XUN: llc -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s 3; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mtriple=r600---amdgiz -mcpu=redwood -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=EG -check-prefix=FUNC %s 4 5declare i32 @llvm.r600.read.tidig.x() #0 6 7declare i32 @llvm.r600.read.tgid.x() #0 8 9 10;EG: {{^}}shl_v2i32: 11;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 12;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 13 14;SI: {{^}}shl_v2i32: 15;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 16;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 17 18;VI: {{^}}shl_v2i32: 19;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 20;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 21 22define amdgpu_kernel void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { 23 %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1 24 %a = load <2 x i32>, <2 x i32> addrspace(1)* %in 25 %b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr 26 %result = shl <2 x i32> %a, %b 27 store <2 x i32> %result, <2 x i32> addrspace(1)* %out 28 ret void 29} 30 31;EG: {{^}}shl_v4i32: 32;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 33;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 34;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 35;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 36 37;SI: {{^}}shl_v4i32: 38;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 39;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 40;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 41;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 42 43;VI: {{^}}shl_v4i32: 44;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 45;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 46;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 47;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 48 49define amdgpu_kernel void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { 50 %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1 51 %a = load <4 x i32>, <4 x i32> addrspace(1)* %in 52 %b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr 53 %result = shl <4 x i32> %a, %b 54 store <4 x i32> %result, <4 x i32> addrspace(1)* %out 55 ret void 56} 57 58; GCN-LABEL: {{^}}shl_i16: 59; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 60 61; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 62define amdgpu_kernel void @shl_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) { 63 %b_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1 64 %a = load i16, i16 addrspace(1)* %in 65 %b = load i16, i16 addrspace(1)* %b_ptr 66 %result = shl i16 %a, %b 67 store i16 %result, i16 addrspace(1)* %out 68 ret void 69} 70 71; GCN-LABEL: {{^}}shl_i16_v_s: 72; VI: v_lshlrev_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} 73 74; VI: v_lshlrev_b16_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} 75define amdgpu_kernel void @shl_i16_v_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) { 76 %a = load i16, i16 addrspace(1)* %in 77 %result = shl i16 %a, %b 78 store i16 %result, i16 addrspace(1)* %out 79 ret void 80} 81 82; GCN-LABEL: {{^}}shl_i16_v_compute_s: 83; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} 84 85; VI: v_lshlrev_b16_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} 86define amdgpu_kernel void @shl_i16_v_compute_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) { 87 %a = load i16, i16 addrspace(1)* %in 88 %b.add = add i16 %b, 3 89 %result = shl i16 %a, %b.add 90 store i16 %result, i16 addrspace(1)* %out 91 ret void 92} 93 94; GCN-LABEL: {{^}}shl_i16_computed_amount: 95; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 3, v{{[0-9]+}} 96; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, [[ADD]], v{{[0-9]+}} 97define amdgpu_kernel void @shl_i16_computed_amount(i16 addrspace(1)* %out, i16 addrspace(1)* %in) { 98 %tid = call i32 @llvm.r600.read.tidig.x() #0 99 %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i32 %tid 100 %gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid 101 %b_ptr = getelementptr i16, i16 addrspace(1)* %gep, i16 1 102 %a = load volatile i16, i16 addrspace(1)* %in 103 %b = load volatile i16, i16 addrspace(1)* %b_ptr 104 %b.add = add i16 %b, 3 105 %result = shl i16 %a, %b.add 106 store i16 %result, i16 addrspace(1)* %out 107 ret void 108} 109 110; GCN-LABEL: {{^}}shl_i16_i_s: 111; GCN: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 12 112define amdgpu_kernel void @shl_i16_i_s(i16 addrspace(1)* %out, i16 zeroext %a) { 113 %result = shl i16 %a, 12 114 store i16 %result, i16 addrspace(1)* %out 115 ret void 116} 117 118; GCN-LABEL: {{^}}shl_v2i16: 119; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 120; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 121define amdgpu_kernel void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) { 122 %tid = call i32 @llvm.r600.read.tidig.x() #0 123 %gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %tid 124 %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid 125 %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %gep, i16 1 126 %a = load <2 x i16>, <2 x i16> addrspace(1)* %in 127 %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr 128 %result = shl <2 x i16> %a, %b 129 store <2 x i16> %result, <2 x i16> addrspace(1)* %out 130 ret void 131} 132 133; GCN-LABEL: {{^}}shl_v4i16: 134; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 135; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 136; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 137; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 138define amdgpu_kernel void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) { 139 %tid = call i32 @llvm.r600.read.tidig.x() #0 140 %gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i32 %tid 141 %gep.out = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i32 %tid 142 %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %gep, i16 1 143 %a = load <4 x i16>, <4 x i16> addrspace(1)* %gep 144 %b = load <4 x i16>, <4 x i16> addrspace(1)* %b_ptr 145 %result = shl <4 x i16> %a, %b 146 store <4 x i16> %result, <4 x i16> addrspace(1)* %gep.out 147 ret void 148} 149 150;EG-LABEL: {{^}}shl_i64: 151;EG: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]] 152;EG: LSHR {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}} 153;EG-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal 154;EG-DAG: LSHR {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1 155;EG-DAG: LSHL {{\*? *}}[[HISMTMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], [[SHIFT]] 156;EG-DAG: OR_INT {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], {{[[HISMTMP]]|PV.[XYZW]|PS}}, {{[[OVERF]]|PV.[XYZW]}} 157;EG-DAG: LSHL {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], [[OPLO]], {{PS|[[SHIFT]]|PV.[XYZW]}} 158;EG-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal 159;EG-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}} 160;EG-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0 161 162; GCN-LABEL: {{^}}shl_i64: 163; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}} 164; VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} 165define amdgpu_kernel void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) { 166 %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1 167 %a = load i64, i64 addrspace(1)* %in 168 %b = load i64, i64 addrspace(1)* %b_ptr 169 %result = shl i64 %a, %b 170 store i64 %result, i64 addrspace(1)* %out 171 ret void 172} 173 174;EG-LABEL: {{^}}shl_v2i64: 175;EG-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]] 176;EG-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]] 177;EG-DAG: LSHR {{\*? *}}[[COMPSHA]] 178;EG-DAG: LSHR {{\*? *}}[[COMPSHB]] 179;EG-DAG: LSHR {{.*}}, 1 180;EG-DAG: LSHR {{.*}}, 1 181;EG-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal 182;EG-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal 183;EG-DAG: LSHL {{.*}}, [[SHA]] 184;EG-DAG: LSHL {{.*}}, [[SHB]] 185;EG-DAG: LSHL {{.*}}, [[SHA]] 186;EG-DAG: LSHL {{.*}}, [[SHB]] 187;EG-DAG: LSHL 188;EG-DAG: LSHL 189;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal 190;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal 191;EG-DAG: CNDE_INT {{.*}}, 0.0 192;EG-DAG: CNDE_INT {{.*}}, 0.0 193;EG-DAG: CNDE_INT 194;EG-DAG: CNDE_INT 195 196;SI: {{^}}shl_v2i64: 197;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}} 198;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}} 199 200;VI: {{^}}shl_v2i64: 201;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} 202;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} 203 204define amdgpu_kernel void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) { 205 %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1 206 %a = load <2 x i64>, <2 x i64> addrspace(1)* %in 207 %b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr 208 %result = shl <2 x i64> %a, %b 209 store <2 x i64> %result, <2 x i64> addrspace(1)* %out 210 ret void 211} 212 213;EG: {{^}}shl_v4i64: 214;EG-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]] 215;EG-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]] 216;EG-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]] 217;EG-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]] 218;EG-DAG: LSHR {{\*? *}}[[COMPSHA]] 219;EG-DAG: LSHR {{\*? *}}[[COMPSHB]] 220;EG-DAG: LSHR {{\*? *}}[[COMPSHC]] 221;EG-DAG: LSHR {{\*? *}}[[COMPSHD]] 222;EG-DAG: LSHR {{.*}}, 1 223;EG-DAG: LSHR {{.*}}, 1 224;EG-DAG: LSHR {{.*}}, 1 225;EG-DAG: LSHR {{.*}}, 1 226;EG-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal 227;EG-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal 228;EG-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal 229;EG-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal 230;EG-DAG: LSHL {{.*}}, [[SHA]] 231;EG-DAG: LSHL {{.*}}, [[SHB]] 232;EG-DAG: LSHL {{.*}}, [[SHC]] 233;EG-DAG: LSHL {{.*}}, [[SHD]] 234;EG-DAG: LSHL {{.*}}, [[SHA]] 235;EG-DAG: LSHL {{.*}}, [[SHB]] 236;EG-DAG: LSHL {{.*}}, [[SHC]] 237;EG-DAG: LSHL {{.*}}, [[SHD]] 238;EG-DAG: LSHL 239;EG-DAG: LSHL 240;EG-DAG: LSHL 241;EG-DAG: LSHL 242;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal 243;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal 244;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal 245;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal 246;EG-DAG: CNDE_INT {{.*}}, 0.0 247;EG-DAG: CNDE_INT {{.*}}, 0.0 248;EG-DAG: CNDE_INT {{.*}}, 0.0 249;EG-DAG: CNDE_INT {{.*}}, 0.0 250;EG-DAG: CNDE_INT 251;EG-DAG: CNDE_INT 252;EG-DAG: CNDE_INT 253;EG-DAG: CNDE_INT 254 255;SI: {{^}}shl_v4i64: 256;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}} 257;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}} 258;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}} 259;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}} 260 261;VI: {{^}}shl_v4i64: 262;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} 263;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} 264;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} 265;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} 266 267define amdgpu_kernel void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) { 268 %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1 269 %a = load <4 x i64>, <4 x i64> addrspace(1)* %in 270 %b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr 271 %result = shl <4 x i64> %a, %b 272 store <4 x i64> %result, <4 x i64> addrspace(1)* %out 273 ret void 274} 275 276; Make sure load width gets reduced to i32 load. 277; GCN-LABEL: {{^}}s_shl_32_i64: 278; GCN-DAG: s_load_dword [[LO_A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x13{{$}} 279; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], 0{{$}} 280; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[LO_A]] 281; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}} 282define amdgpu_kernel void @s_shl_32_i64(i64 addrspace(1)* %out, [8 x i32], i64 %a) { 283 %result = shl i64 %a, 32 284 store i64 %result, i64 addrspace(1)* %out 285 ret void 286} 287 288; GCN-LABEL: {{^}}v_shl_32_i64: 289; GCN-DAG: buffer_load_dword v[[LO_A:[0-9]+]], 290; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], 0{{$}} 291; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[LO_A]]{{\]}} 292define amdgpu_kernel void @v_shl_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) { 293 %tid = call i32 @llvm.r600.read.tgid.x() #0 294 %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid 295 %gep.out = getelementptr i64, i64 addrspace(1)* %out, i32 %tid 296 %a = load i64, i64 addrspace(1)* %gep.in 297 %result = shl i64 %a, 32 298 store i64 %result, i64 addrspace(1)* %gep.out 299 ret void 300} 301 302; FUNC-LABEL: {{^}}s_shl_constant_i64 303; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} 304define amdgpu_kernel void @s_shl_constant_i64(i64 addrspace(1)* %out, i64 %a) { 305 %shl = shl i64 281474976710655, %a 306 store i64 %shl, i64 addrspace(1)* %out, align 8 307 ret void 308} 309 310; FUNC-LABEL: {{^}}v_shl_constant_i64: 311; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]] 312; SI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0xab19b207 313; SI-DAG: s_movk_i32 s[[KHI:[0-9]+]], 0x11e{{$}} 314; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}, [[VAL]] 315; SI: buffer_store_dwordx2 316define amdgpu_kernel void @v_shl_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) { 317 %a = load i64, i64 addrspace(1)* %aptr, align 8 318 %shl = shl i64 1231231234567, %a 319 store i64 %shl, i64 addrspace(1)* %out, align 8 320 ret void 321} 322 323; FUNC-LABEL: {{^}}v_shl_i64_32_bit_constant: 324; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]] 325; SI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0x12d687{{$}} 326; SI-DAG: s_mov_b32 s[[KHI:[0-9]+]], 0{{$}} 327; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}, [[VAL]] 328define amdgpu_kernel void @v_shl_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) { 329 %a = load i64, i64 addrspace(1)* %aptr, align 8 330 %shl = shl i64 1234567, %a 331 store i64 %shl, i64 addrspace(1)* %out, align 8 332 ret void 333} 334 335; FUNC-LABEL: {{^}}v_shl_inline_imm_64_i64: 336; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, 64, {{v[0-9]+}} 337define amdgpu_kernel void @v_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) { 338 %a = load i64, i64 addrspace(1)* %aptr, align 8 339 %shl = shl i64 64, %a 340 store i64 %shl, i64 addrspace(1)* %out, align 8 341 ret void 342} 343 344; FUNC-LABEL: {{^}}s_shl_inline_imm_64_i64: 345; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 64, s{{[0-9]+}} 346define amdgpu_kernel void @s_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 347 %shl = shl i64 64, %a 348 store i64 %shl, i64 addrspace(1)* %out, align 8 349 ret void 350} 351 352; FUNC-LABEL: {{^}}s_shl_inline_imm_1_i64: 353; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 1, s{{[0-9]+}} 354define amdgpu_kernel void @s_shl_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 355 %shl = shl i64 1, %a 356 store i64 %shl, i64 addrspace(1)* %out, align 8 357 ret void 358} 359 360; FUNC-LABEL: {{^}}s_shl_inline_imm_1.0_i64: 361; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 1.0, s{{[0-9]+}} 362define amdgpu_kernel void @s_shl_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 363 %shl = shl i64 4607182418800017408, %a 364 store i64 %shl, i64 addrspace(1)* %out, align 8 365 ret void 366} 367 368; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_1.0_i64: 369; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -1.0, s{{[0-9]+}} 370define amdgpu_kernel void @s_shl_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 371 %shl = shl i64 13830554455654793216, %a 372 store i64 %shl, i64 addrspace(1)* %out, align 8 373 ret void 374} 375 376; FUNC-LABEL: {{^}}s_shl_inline_imm_0.5_i64: 377; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 0.5, s{{[0-9]+}} 378define amdgpu_kernel void @s_shl_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 379 %shl = shl i64 4602678819172646912, %a 380 store i64 %shl, i64 addrspace(1)* %out, align 8 381 ret void 382} 383 384; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_0.5_i64: 385; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -0.5, s{{[0-9]+}} 386define amdgpu_kernel void @s_shl_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 387 %shl = shl i64 13826050856027422720, %a 388 store i64 %shl, i64 addrspace(1)* %out, align 8 389 ret void 390} 391 392; FUNC-LABEL: {{^}}s_shl_inline_imm_2.0_i64: 393; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 2.0, s{{[0-9]+}} 394define amdgpu_kernel void @s_shl_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 395 %shl = shl i64 4611686018427387904, %a 396 store i64 %shl, i64 addrspace(1)* %out, align 8 397 ret void 398} 399 400; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_2.0_i64: 401; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -2.0, s{{[0-9]+}} 402define amdgpu_kernel void @s_shl_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 403 %shl = shl i64 13835058055282163712, %a 404 store i64 %shl, i64 addrspace(1)* %out, align 8 405 ret void 406} 407 408; FUNC-LABEL: {{^}}s_shl_inline_imm_4.0_i64: 409; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 4.0, s{{[0-9]+}} 410define amdgpu_kernel void @s_shl_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 411 %shl = shl i64 4616189618054758400, %a 412 store i64 %shl, i64 addrspace(1)* %out, align 8 413 ret void 414} 415 416; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_4.0_i64: 417; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -4.0, s{{[0-9]+}} 418define amdgpu_kernel void @s_shl_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 419 %shl = shl i64 13839561654909534208, %a 420 store i64 %shl, i64 addrspace(1)* %out, align 8 421 ret void 422} 423 424 425; Test with the 64-bit integer bitpattern for a 32-bit float in the 426; low 32-bits, which is not a valid 64-bit inline immmediate. 427 428; FUNC-LABEL: {{^}}s_shl_inline_imm_f32_4.0_i64: 429; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 4.0 430; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0{{$}} 431; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}} 432define amdgpu_kernel void @s_shl_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 433 %shl = shl i64 1082130432, %a 434 store i64 %shl, i64 addrspace(1)* %out, align 8 435 ret void 436} 437 438; FIXME: Copy of -1 register 439; FUNC-LABEL: {{^}}s_shl_inline_imm_f32_neg_4.0_i64: 440; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], -4.0 441; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -1{{$}} 442; SI-DAG: s_mov_b32 s[[K_HI_COPY:[0-9]+]], s[[K_HI]] 443; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI_COPY]]{{\]}}, s{{[0-9]+}} 444define amdgpu_kernel void @s_shl_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 445 %shl = shl i64 -1065353216, %a 446 store i64 %shl, i64 addrspace(1)* %out, align 8 447 ret void 448} 449 450; Shift into upper 32-bits 451; FUNC-LABEL: {{^}}s_shl_inline_high_imm_f32_4.0_i64: 452; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 4.0 453; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}} 454; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}} 455define amdgpu_kernel void @s_shl_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 456 %shl = shl i64 4647714815446351872, %a 457 store i64 %shl, i64 addrspace(1)* %out, align 8 458 ret void 459} 460 461; FUNC-LABEL: {{^}}s_shl_inline_high_imm_f32_neg_4.0_i64: 462; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -4.0 463; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}} 464; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}} 465define amdgpu_kernel void @s_shl_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { 466 %shl = shl i64 13871086852301127680, %a 467 store i64 %shl, i64 addrspace(1)* %out, align 8 468 ret void 469} 470 471; FUNC-LABEL: {{^}}test_mul2: 472; GCN: s_lshl_b32 s{{[0-9]}}, s{{[0-9]}}, 1 473define amdgpu_kernel void @test_mul2(i32 %p) { 474 %i = mul i32 %p, 2 475 store volatile i32 %i, i32 addrspace(1)* undef 476 ret void 477} 478 479; FUNC-LABEL: {{^}}shl_or_k: 480; SI: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 2, v{{[0-9]+}} 481; SI: v_or_b32_e32 [[OR:v[0-9]+]], 4, [[SHL]] 482; SI: buffer_store_dword [[OR]] 483define void @shl_or_k(i32 addrspace(1)* %out, i32 %in) { 484 %tmp0 = or i32 %in, 1 485 %tmp2 = shl i32 %tmp0, 2 486 store i32 %tmp2, i32 addrspace(1)* %out 487 ret void 488} 489 490; FUNC-LABEL: {{^}}shl_or_k_two_uses: 491; SI: v_or_b32_e32 [[OR:v[0-9]+]], 1, v{{[0-9]+}} 492; SI: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 2, [[OR]] 493; SI-DAG: buffer_store_dword [[OR]] 494; SI-DAG: buffer_store_dword [[SHL]] 495define void @shl_or_k_two_uses(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %in) { 496 %tmp0 = or i32 %in, 1 497 %tmp2 = shl i32 %tmp0, 2 498 store i32 %tmp2, i32 addrspace(1)* %out0 499 store i32 %tmp0, i32 addrspace(1)* %out1 500 ret void 501} 502 503attributes #0 = { nounwind readnone } 504