1; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s 2; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s 3; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s 4 5; GCN-LABEL: {{^}}test_fmax3_olt_0_f32: 6; GCN: buffer_load_dword [[REGC:v[0-9]+]] 7; GCN: buffer_load_dword [[REGB:v[0-9]+]] 8; GCN: buffer_load_dword [[REGA:v[0-9]+]] 9; GCN: v_max3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]] 10; GCN: buffer_store_dword [[RESULT]], 11; GCN: s_endpgm 12define amdgpu_kernel void @test_fmax3_olt_0_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #0 { 13 %a = load volatile float, float addrspace(1)* %aptr, align 4 14 %b = load volatile float, float addrspace(1)* %bptr, align 4 15 %c = load volatile float, float addrspace(1)* %cptr, align 4 16 %f0 = call float @llvm.maxnum.f32(float %a, float %b) 17 %f1 = call float @llvm.maxnum.f32(float %f0, float %c) 18 store float %f1, float addrspace(1)* %out, align 4 19 ret void 20} 21 22; Commute operand of second fmax 23; GCN-LABEL: {{^}}test_fmax3_olt_1_f32: 24; GCN: buffer_load_dword [[REGB:v[0-9]+]] 25; GCN: buffer_load_dword [[REGA:v[0-9]+]] 26; GCN: buffer_load_dword [[REGC:v[0-9]+]] 27; GCN: v_max3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]] 28; GCN: buffer_store_dword [[RESULT]], 29; GCN: s_endpgm 30define amdgpu_kernel void @test_fmax3_olt_1_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #0 { 31 %a = load volatile float, float addrspace(1)* %aptr, align 4 32 %b = load volatile float, float addrspace(1)* %bptr, align 4 33 %c = load volatile float, float addrspace(1)* %cptr, align 4 34 %f0 = call float @llvm.maxnum.f32(float %a, float %b) 35 %f1 = call float @llvm.maxnum.f32(float %c, float %f0) 36 store float %f1, float addrspace(1)* %out, align 4 37 ret void 38} 39 40; GCN-LABEL: {{^}}test_fmax3_olt_0_f16: 41; GCN: buffer_load_ushort [[REGA:v[0-9]+]] 42; GCN: buffer_load_ushort [[REGB:v[0-9]+]] 43; GCN: buffer_load_ushort [[REGC:v[0-9]+]] 44 45; SI-DAG: v_cvt_f32_f16_e32 [[CVT_A:v[0-9]+]], [[REGA]] 46; SI-DAG: v_cvt_f32_f16_e32 [[CVT_B:v[0-9]+]], [[REGB]] 47; SI-DAG: v_cvt_f32_f16_e32 [[CVT_C:v[0-9]+]], [[REGC]] 48; SI: v_max3_f32 [[RESULT_F32:v[0-9]+]], [[CVT_A]], [[CVT_B]], [[CVT_C]] 49; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT_F32]] 50 51; VI: v_max_f16_e32 52; VI: v_max_f16_e32 [[RESULT:v[0-9]+]], 53 54; GFX9: v_max3_f16 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], [[REGC]] 55; GCN: buffer_store_short [[RESULT]], 56define amdgpu_kernel void @test_fmax3_olt_0_f16(half addrspace(1)* %out, half addrspace(1)* %aptr, half addrspace(1)* %bptr, half addrspace(1)* %cptr) #0 { 57 %a = load volatile half, half addrspace(1)* %aptr, align 2 58 %b = load volatile half, half addrspace(1)* %bptr, align 2 59 %c = load volatile half, half addrspace(1)* %cptr, align 2 60 %f0 = call half @llvm.maxnum.f16(half %a, half %b) 61 %f1 = call half @llvm.maxnum.f16(half %f0, half %c) 62 store half %f1, half addrspace(1)* %out, align 2 63 ret void 64} 65 66; Commute operand of second fmax 67; GCN-LABEL: {{^}}test_fmax3_olt_1_f16: 68; GCN: buffer_load_ushort [[REGA:v[0-9]+]] 69; GCN: buffer_load_ushort [[REGB:v[0-9]+]] 70; GCN: buffer_load_ushort [[REGC:v[0-9]+]] 71 72; SI-DAG: v_cvt_f32_f16_e32 [[CVT_A:v[0-9]+]], [[REGA]] 73; SI-DAG: v_cvt_f32_f16_e32 [[CVT_B:v[0-9]+]], [[REGB]] 74; SI-DAG: v_cvt_f32_f16_e32 [[CVT_C:v[0-9]+]], [[REGC]] 75; SI: v_max3_f32 [[RESULT_F32:v[0-9]+]], [[CVT_C]], [[CVT_A]], [[CVT_B]] 76; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT_F32]] 77 78; VI: v_max_f16_e32 79; VI: v_max_f16_e32 [[RESULT:v[0-9]+]], 80 81; GFX9: v_max3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGA]], [[REGB]] 82; GCN: buffer_store_short [[RESULT]], 83define amdgpu_kernel void @test_fmax3_olt_1_f16(half addrspace(1)* %out, half addrspace(1)* %aptr, half addrspace(1)* %bptr, half addrspace(1)* %cptr) #0 { 84 %a = load volatile half, half addrspace(1)* %aptr, align 2 85 %b = load volatile half, half addrspace(1)* %bptr, align 2 86 %c = load volatile half, half addrspace(1)* %cptr, align 2 87 %f0 = call half @llvm.maxnum.f16(half %a, half %b) 88 %f1 = call half @llvm.maxnum.f16(half %c, half %f0) 89 store half %f1, half addrspace(1)* %out, align 2 90 ret void 91} 92 93; Checks whether the test passes; performMinMaxCombine() should not optimize vector patterns of max3 94; since there are no pack instructions for fmax3. 95; GCN-LABEL: {{^}}no_fmax3_v2f16: 96 97; SI: v_cvt_f16_f32_e32 98; SI: v_max_f32_e32 99; SI-NEXT: v_max_f32_e32 100; SI-NEXT: v_max3_f32 101; SI-NEXT: v_max3_f32 102 103; VI: v_max_f16_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 104; VI: v_max_f16_e32 v0, v0, v1 105; VI: v_max_f16_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD 106; VI: v_max_f16_e32 v0, v2, v0 107; VI: v_max_f16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 108; VI: v_max_f16_e32 v0, v0, v3 109; VI: v_or_b32_e32 v0, v0, v1 110 111; GFX9: v_pk_max_f16 112; GFX9-NEXT: v_pk_max_f16 113; GFX9-NEXT: v_pk_max_f16 114define <2 x half> @no_fmax3_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x half> %d) { 115entry: 116 %max = tail call fast <2 x half> @llvm.maxnum.v2f16(<2 x half> %a, <2 x half> %b) 117 %max1 = tail call fast <2 x half> @llvm.maxnum.v2f16(<2 x half> %c, <2 x half> %max) 118 %res = tail call fast <2 x half> @llvm.maxnum.v2f16(<2 x half> %max1, <2 x half> %d) 119 ret <2 x half> %res 120} 121 122declare i32 @llvm.amdgcn.workitem.id.x() #1 123declare float @llvm.maxnum.f32(float, float) #1 124declare half @llvm.maxnum.f16(half, half) #1 125declare <2 x half> @llvm.maxnum.v2f16(<2 x half>, <2 x half>) 126 127attributes #0 = { nounwind } 128attributes #1 = { nounwind readnone speculatable } 129