1; RUN: llc -amdgpu-scalarize-global-loads=false -stress-early-ifcvt -amdgpu-early-ifcvt=1 -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s 2 3; FIXME: Most of these cases that don't trigger because of broken cost 4; heuristics. Should not need -stress-early-ifcvt 5 6; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle64: 7; GCN: buffer_load_dwordx2 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}} 8; GCN: v_cmp_neq_f64_e32 vcc, 1.0, v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}} 9; GCN: v_add_f64 v{{\[}}[[ADD_LO:[0-9]+]]:[[ADD_HI:[0-9]+]]{{\]}}, v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}}, v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}} 10; GCN-DAG: v_cndmask_b32_e32 v[[RESULT_LO:[0-9]+]], v[[ADD_LO]], v[[VAL_LO]], vcc 11; GCN-DAG: v_cndmask_b32_e32 v[[RESULT_HI:[0-9]+]], v[[ADD_HI]], v[[VAL_HI]], vcc 12; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}} 13define amdgpu_kernel void @test_vccnz_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(1)* %in) #0 { 14entry: 15 %v = load double, double addrspace(1)* %in 16 %cc = fcmp oeq double %v, 1.000000e+00 17 br i1 %cc, label %if, label %endif 18 19if: 20 %u = fadd double %v, %v 21 br label %endif 22 23endif: 24 %r = phi double [ %v, %entry ], [ %u, %if ] 25 store double %r, double addrspace(1)* %out 26 ret void 27} 28 29; vcc branch with SGPR inputs 30; GCN-LABEL: {{^}}test_vccnz_sgpr_ifcvt_triangle64: 31; GCN: v_cmp_neq_f64 32; GCN: v_add_f64 33; GCN: v_cndmask_b32_e32 34; GCN: v_cndmask_b32_e32 35define amdgpu_kernel void @test_vccnz_sgpr_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(4)* %in) #0 { 36entry: 37 %v = load double, double addrspace(4)* %in 38 %cc = fcmp oeq double %v, 1.000000e+00 39 br i1 %cc, label %if, label %endif 40 41if: 42 %u = fadd double %v, %v 43 br label %endif 44 45endif: 46 %r = phi double [ %v, %entry ], [ %u, %if ] 47 store double %r, double addrspace(1)* %out 48 ret void 49} 50 51; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle96: 52; GCN: v_cmp_neq_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0 53 54; GCN: v_add_i32_e32 55; GCN: v_add_i32_e32 56; GCN: v_add_i32_e32 57; GCN: s_mov_b64 vcc, [[CMP]] 58 59; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 60; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 61; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 62 63; GCN-DAG: buffer_store_dword v 64; GCN-DAG: buffer_store_dwordx2 65define amdgpu_kernel void @test_vccnz_ifcvt_triangle96(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %in, float %cnd) #0 { 66entry: 67 %v = load <3 x i32>, <3 x i32> addrspace(1)* %in 68 %cc = fcmp oeq float %cnd, 1.000000e+00 69 br i1 %cc, label %if, label %endif 70 71if: 72 %u = add <3 x i32> %v, %v 73 br label %endif 74 75endif: 76 %r = phi <3 x i32> [ %v, %entry ], [ %u, %if ] 77 store <3 x i32> %r, <3 x i32> addrspace(1)* %out 78 ret void 79} 80 81; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle128: 82; GCN: v_cmp_neq_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0 83 84; GCN: v_add_i32_e32 85; GCN: v_add_i32_e32 86; GCN: v_add_i32_e32 87; GCN: v_add_i32_e32 88; GCN: s_mov_b64 vcc, [[CMP]] 89 90; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 91; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 92; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 93; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 94 95; GCN: buffer_store_dwordx4 96define amdgpu_kernel void @test_vccnz_ifcvt_triangle128(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in, float %cnd) #0 { 97entry: 98 %v = load <4 x i32>, <4 x i32> addrspace(1)* %in 99 %cc = fcmp oeq float %cnd, 1.000000e+00 100 br i1 %cc, label %if, label %endif 101 102if: 103 %u = add <4 x i32> %v, %v 104 br label %endif 105 106endif: 107 %r = phi <4 x i32> [ %v, %entry ], [ %u, %if ] 108 store <4 x i32> %r, <4 x i32> addrspace(1)* %out 109 ret void 110} 111