1; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=SI %s 2 3declare i32 @llvm.amdgcn.workitem.id.x() #1 4 5; Test with inline immediate 6 7; SI-LABEL: {{^}}shl_2_add_9_i32: 8; SI: v_lshlrev_b32_e32 [[REG:v[0-9]+]], 2, {{v[0-9]+}} 9; SI: v_add_i32_e32 [[RESULT:v[0-9]+]], vcc, 36, [[REG]] 10; SI: buffer_store_dword [[RESULT]] 11; SI: s_endpgm 12define amdgpu_kernel void @shl_2_add_9_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { 13 %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 14 %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x 15 %val = load i32, i32 addrspace(1)* %ptr, align 4 16 %add = add i32 %val, 9 17 %result = shl i32 %add, 2 18 store i32 %result, i32 addrspace(1)* %out, align 4 19 ret void 20} 21 22; SI-LABEL: {{^}}shl_2_add_9_i32_2_add_uses: 23; SI-DAG: v_add_i32_e32 [[ADDREG:v[0-9]+]], vcc, 9, {{v[0-9]+}} 24; SI-DAG: v_lshlrev_b32_e32 [[SHLREG:v[0-9]+]], 2, {{v[0-9]+}} 25; SI-DAG: buffer_store_dword [[ADDREG]] 26; SI-DAG: buffer_store_dword [[SHLREG]] 27; SI: s_endpgm 28define amdgpu_kernel void @shl_2_add_9_i32_2_add_uses(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 { 29 %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 30 %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x 31 %val = load i32, i32 addrspace(1)* %ptr, align 4 32 %add = add i32 %val, 9 33 %result = shl i32 %add, 2 34 store i32 %result, i32 addrspace(1)* %out0, align 4 35 store i32 %add, i32 addrspace(1)* %out1, align 4 36 ret void 37} 38 39; Test with add literal constant 40 41; SI-LABEL: {{^}}shl_2_add_999_i32: 42; SI: v_lshlrev_b32_e32 [[REG:v[0-9]+]], 2, {{v[0-9]+}} 43; SI: v_add_i32_e32 [[RESULT:v[0-9]+]], vcc, 0xf9c, [[REG]] 44; SI: buffer_store_dword [[RESULT]] 45; SI: s_endpgm 46define amdgpu_kernel void @shl_2_add_999_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { 47 %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 48 %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x 49 %val = load i32, i32 addrspace(1)* %ptr, align 4 50 %shl = add i32 %val, 999 51 %result = shl i32 %shl, 2 52 store i32 %result, i32 addrspace(1)* %out, align 4 53 ret void 54} 55 56; SI-LABEL: {{^}}test_add_shl_add_constant: 57; SI-DAG: s_load_dwordx2 s{{\[}}[[X:[0-9]+]]:[[Y:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x13 58; SI-DAG: s_lshl_b32 [[SHL3:s[0-9]+]], s[[X]], 3 59; SI: s_add_i32 [[RESULT:s[0-9]+]], [[SHL3]], s[[Y]] 60; SI: s_addk_i32 [[RESULT]], 0x3d8 61; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[RESULT]] 62; SI: buffer_store_dword [[VRESULT]] 63define amdgpu_kernel void @test_add_shl_add_constant(i32 addrspace(1)* %out, [8 x i32], i32 %x, i32 %y) #0 { 64 %add.0 = add i32 %x, 123 65 %shl = shl i32 %add.0, 3 66 %add.1 = add i32 %shl, %y 67 store i32 %add.1, i32 addrspace(1)* %out, align 4 68 ret void 69} 70 71; SI-LABEL: {{^}}test_add_shl_add_constant_inv: 72; SI-DAG: s_load_dwordx2 s{{\[}}[[X:[0-9]+]]:[[Y:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x13 73; SI: s_lshl_b32 [[SHL3:s[0-9]+]], s[[X]], 3 74; SI: s_add_i32 [[TMP:s[0-9]+]], [[SHL3]], s[[Y]] 75; SI: s_addk_i32 [[TMP]], 0x3d8 76; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[TMP]] 77; SI: buffer_store_dword [[VRESULT]] 78 79define amdgpu_kernel void @test_add_shl_add_constant_inv(i32 addrspace(1)* %out, [8 x i32], i32 %x, i32 %y) #0 { 80 %add.0 = add i32 %x, 123 81 %shl = shl i32 %add.0, 3 82 %add.1 = add i32 %y, %shl 83 store i32 %add.1, i32 addrspace(1)* %out, align 4 84 ret void 85} 86 87attributes #0 = { nounwind } 88attributes #1 = { nounwind readnone } 89