1; RUN: llc < %s -march=amdgcn -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=GCN %s 2; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=GCN %s 3 4; GCN-LABEL: {{^}}inline_reg_constraints: 5; GCN: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}] 6; GCN: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] 7; GCN: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] 8; GCN: flat_load_dwordx4 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] 9; GCN: flat_load_dwordx4 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] 10; GCN: s_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}] 11; GCN: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] 12; GCN: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] 13; GCN: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] 14; GCN: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] 15; GCN: s_load_dwordx8 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] 16 17define amdgpu_kernel void @inline_reg_constraints(i32 addrspace(1)* %ptr) { 18entry: 19 %v32 = tail call i32 asm sideeffect "flat_load_dword $0, $1", "=v,v"(i32 addrspace(1)* %ptr) 20 %v2_32 = tail call <2 x i32> asm sideeffect "flat_load_dwordx2 $0, $1", "=v,v"(i32 addrspace(1)* %ptr) 21 %v64 = tail call i64 asm sideeffect "flat_load_dwordx2 $0, $1", "=v,v"(i32 addrspace(1)* %ptr) 22 %v4_32 = tail call <4 x i32> asm sideeffect "flat_load_dwordx4 $0, $1", "=v,v"(i32 addrspace(1)* %ptr) 23 %v128 = tail call i128 asm sideeffect "flat_load_dwordx4 $0, $1", "=v,v"(i32 addrspace(1)* %ptr) 24 %s32 = tail call i32 asm sideeffect "s_load_dword $0, $1", "=s,s"(i32 addrspace(1)* %ptr) 25 %s32_2 = tail call <2 x i32> asm sideeffect "s_load_dwordx2 $0, $1", "=s,s"(i32 addrspace(1)* %ptr) 26 %s64 = tail call i64 asm sideeffect "s_load_dwordx2 $0, $1", "=s,s"(i32 addrspace(1)* %ptr) 27 %s4_32 = tail call <4 x i32> asm sideeffect "s_load_dwordx4 $0, $1", "=s,s"(i32 addrspace(1)* %ptr) 28 %s128 = tail call i128 asm sideeffect "s_load_dwordx4 $0, $1", "=s,s"(i32 addrspace(1)* %ptr) 29 %s256 = tail call <8 x i32> asm sideeffect "s_load_dwordx8 $0, $1", "=s,s"(i32 addrspace(1)* %ptr) 30 ret void 31} 32 33; FIXME: Should be able to avoid copy 34; GCN-LABEL: {{^}}inline_sreg_constraint_m0: 35; GCN: s_mov_b32 m0, -1 36; GCN: s_mov_b32 [[COPY_M0:s[0-9]+]], m0 37; GCN: ; use [[COPY_M0]] 38define amdgpu_kernel void @inline_sreg_constraint_m0() { 39 %m0 = tail call i32 asm sideeffect "s_mov_b32 m0, -1", "={M0}"() 40 tail call void asm sideeffect "; use $0", "s"(i32 %m0) 41 ret void 42} 43 44; GCN-LABEL: {{^}}inline_sreg_constraint_imm_i32: 45; GCN: s_mov_b32 [[REG:s[0-9]+]], 32 46; GCN: ; use [[REG]] 47define amdgpu_kernel void @inline_sreg_constraint_imm_i32() { 48 tail call void asm sideeffect "; use $0", "s"(i32 32) 49 ret void 50} 51 52; GCN-LABEL: {{^}}inline_sreg_constraint_imm_f32: 53; GCN: s_mov_b32 [[REG:s[0-9]+]], 1.0 54; GCN: ; use [[REG]] 55define amdgpu_kernel void @inline_sreg_constraint_imm_f32() { 56 tail call void asm sideeffect "; use $0", "s"(float 1.0) 57 ret void 58} 59 60; FIXME: Should be able to use s_mov_b64 61; GCN-LABEL: {{^}}inline_sreg_constraint_imm_i64: 62; GCN-DAG: s_mov_b32 s[[REG_LO:[0-9]+]], -4{{$}} 63; GCN-DAG: s_mov_b32 s[[REG_HI:[0-9]+]], -1{{$}} 64; GCN: ; use s{{\[}}[[REG_LO]]:[[REG_HI]]{{\]}} 65define amdgpu_kernel void @inline_sreg_constraint_imm_i64() { 66 tail call void asm sideeffect "; use $0", "s"(i64 -4) 67 ret void 68} 69 70; GCN-LABEL: {{^}}inline_sreg_constraint_imm_f64: 71; GCN-DAG: s_mov_b32 s[[REG_LO:[0-9]+]], 0{{$}} 72; GCN-DAG: s_mov_b32 s[[REG_HI:[0-9]+]], 0x3ff00000{{$}} 73; GCN: ; use s{{\[}}[[REG_LO]]:[[REG_HI]]{{\]}} 74define amdgpu_kernel void @inline_sreg_constraint_imm_f64() { 75 tail call void asm sideeffect "; use $0", "s"(double 1.0) 76 ret void 77} 78