1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s 2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX906 -check-prefix=FUNC %s 3; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s 4; RUN: not llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cedar -verify-machineinstrs < %s 5; RUN: not llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=juniper -verify-machineinstrs < %s 6; RUN: not llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s 7; RUN: not llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=sumo -verify-machineinstrs < %s 8; RUN: not llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=barts -verify-machineinstrs < %s 9; RUN: not llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=caicos -verify-machineinstrs < %s 10; RUN: not llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=turks -verify-machineinstrs < %s 11 12declare float @llvm.fma.f32(float, float, float) nounwind readnone 13declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) nounwind readnone 14declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone 15 16declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone 17 18; FUNC-LABEL: {{^}}fma_f32: 19; SI: v_fma_f32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}} 20; GFX906: v_fmac_f32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 21 22; EG: MEM_RAT_{{.*}} STORE_{{.*}} [[RES:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}}, 23; EG: FMA {{\*? *}}[[RES]] 24define amdgpu_kernel void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1, 25 float addrspace(1)* %in2, float addrspace(1)* %in3) { 26 %r0 = load float, float addrspace(1)* %in1 27 %r1 = load float, float addrspace(1)* %in2 28 %r2 = load float, float addrspace(1)* %in3 29 %r3 = tail call float @llvm.fma.f32(float %r0, float %r1, float %r2) 30 store float %r3, float addrspace(1)* %out 31 ret void 32} 33 34; GCN-LABEL: {{^}}fmac_to_3addr_f32: 35; GCN: v_fma_f32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}} 36define float @fmac_to_3addr_f32(float %r0, float %r1, float %r2) { 37 %r3 = tail call float @llvm.fma.f32(float %r0, float %r1, float %r2) 38 ret float %r3 39} 40 41; FUNC-LABEL: {{^}}fma_v2f32: 42; SI: v_fma_f32 43; SI: v_fma_f32 44 45; GFX906: v_fma_f32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}} 46; GFX906: v_fmac_f32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 47 48; EG: MEM_RAT_{{.*}} STORE_{{.*}} [[RES:T[0-9]]].[[CHLO:[XYZW]]][[CHHI:[XYZW]]], {{T[0-9]\.[XYZW]}}, 49; EG-DAG: FMA {{\*? *}}[[RES]].[[CHLO]] 50; EG-DAG: FMA {{\*? *}}[[RES]].[[CHHI]] 51define amdgpu_kernel void @fma_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1, 52 <2 x float> addrspace(1)* %in2, <2 x float> addrspace(1)* %in3) { 53 %r0 = load <2 x float>, <2 x float> addrspace(1)* %in1 54 %r1 = load <2 x float>, <2 x float> addrspace(1)* %in2 55 %r2 = load <2 x float>, <2 x float> addrspace(1)* %in3 56 %r3 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %r0, <2 x float> %r1, <2 x float> %r2) 57 store <2 x float> %r3, <2 x float> addrspace(1)* %out 58 ret void 59} 60 61; FUNC-LABEL: {{^}}fma_v4f32: 62; SI: v_fma_f32 63; SI: v_fma_f32 64; SI: v_fma_f32 65; SI: v_fma_f32 66; GFX906: v_fma_f32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}} 67; GFX906: v_fma_f32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}} 68; GFX906: v_fma_f32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}} 69; GFX906: v_fmac_f32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+$}} 70 71; EG: MEM_RAT_{{.*}} STORE_{{.*}} [[RES:T[0-9]]].{{[XYZW][XYZW][XYZW][XYZW]}}, {{T[0-9]\.[XYZW]}}, 72; EG-DAG: FMA {{\*? *}}[[RES]].X 73; EG-DAG: FMA {{\*? *}}[[RES]].Y 74; EG-DAG: FMA {{\*? *}}[[RES]].Z 75; EG-DAG: FMA {{\*? *}}[[RES]].W 76define amdgpu_kernel void @fma_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1, 77 <4 x float> addrspace(1)* %in2, <4 x float> addrspace(1)* %in3) { 78 %r0 = load <4 x float>, <4 x float> addrspace(1)* %in1 79 %r1 = load <4 x float>, <4 x float> addrspace(1)* %in2 80 %r2 = load <4 x float>, <4 x float> addrspace(1)* %in3 81 %r3 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %r0, <4 x float> %r1, <4 x float> %r2) 82 store <4 x float> %r3, <4 x float> addrspace(1)* %out 83 ret void 84} 85 86; FUNC-LABEL: @fma_commute_mul_inline_imm_f32 87; SI: v_fma_f32 {{v[0-9]+}}, {{v[0-9]+}}, 2.0, {{v[0-9]+}} 88define amdgpu_kernel void @fma_commute_mul_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind { 89 %tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone 90 %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid 91 %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid 92 %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid 93 94 %a = load float, float addrspace(1)* %in.a.gep, align 4 95 %b = load float, float addrspace(1)* %in.b.gep, align 4 96 97 %fma = call float @llvm.fma.f32(float %a, float 2.0, float %b) 98 store float %fma, float addrspace(1)* %out.gep, align 4 99 ret void 100} 101 102; FUNC-LABEL: @fma_commute_mul_s_f32 103define amdgpu_kernel void @fma_commute_mul_s_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b, float %b) nounwind { 104 %tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone 105 %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid 106 %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid 107 %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid 108 109 %a = load float, float addrspace(1)* %in.a.gep, align 4 110 %c = load float, float addrspace(1)* %in.b.gep, align 4 111 112 %fma = call float @llvm.fma.f32(float %a, float %b, float %c) 113 store float %fma, float addrspace(1)* %out.gep, align 4 114 ret void 115} 116 117; Without special casing the inline constant check for v_fmac_f32's 118; src2, this fails to fold the 1.0 into an fma. 119 120; FUNC-LABEL: {{^}}fold_inline_imm_into_fmac_src2_f32: 121; GFX906: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]] 122; GFX906: {{buffer|flat|global}}_load_dword [[B:v[0-9]+]] 123 124; GFX906: v_add_f32_e32 [[TMP2:v[0-9]+]], [[A]], [[A]] 125; GFX906: v_fma_f32 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0 126define amdgpu_kernel void @fold_inline_imm_into_fmac_src2_f32(float addrspace(1)* %out, float addrspace(1)* %a, float addrspace(1)* %b) nounwind { 127bb: 128 %tid = call i32 @llvm.amdgcn.workitem.id.x() 129 %tid.ext = sext i32 %tid to i64 130 %gep.a = getelementptr inbounds float, float addrspace(1)* %a, i64 %tid.ext 131 %gep.b = getelementptr inbounds float, float addrspace(1)* %b, i64 %tid.ext 132 %gep.out = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext 133 %tmp = load volatile float, float addrspace(1)* %gep.a 134 %tmp1 = load volatile float, float addrspace(1)* %gep.b 135 %tmp2 = fadd contract float %tmp, %tmp 136 %tmp3 = fmul contract float %tmp2, 4.0 137 %tmp4 = fsub contract float 1.0, %tmp3 138 %tmp5 = fadd contract float %tmp4, %tmp1 139 %tmp6 = fadd contract float %tmp1, %tmp1 140 %tmp7 = fmul contract float %tmp6, %tmp 141 %tmp8 = fsub contract float 1.0, %tmp7 142 %tmp9 = fmul contract float %tmp8, 8.0 143 %tmp10 = fadd contract float %tmp5, %tmp9 144 store float %tmp10, float addrspace(1)* %gep.out 145 ret void 146} 147