1; RUN: llc -amdgpu-scalarize-global-loads=false -stress-early-ifcvt -amdgpu-early-ifcvt=1 -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s 2; RUN: llc -amdgpu-scalarize-global-loads=false -stress-early-ifcvt -amdgpu-early-ifcvt=1 -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCNX3 %s 3 4; FIXME: Most of these cases that don't trigger because of broken cost 5; heuristics. Should not need -stress-early-ifcvt 6 7; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle64: 8; GCN: buffer_load_dwordx2 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}} 9; GCN: v_cmp_neq_f64_e32 vcc, 1.0, v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}} 10; GCN: v_add_f64 v{{\[}}[[ADD_LO:[0-9]+]]:[[ADD_HI:[0-9]+]]{{\]}}, v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}}, v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}} 11; GCN-DAG: v_cndmask_b32_e32 v[[RESULT_LO:[0-9]+]], v[[ADD_LO]], v[[VAL_LO]], vcc 12; GCN-DAG: v_cndmask_b32_e32 v[[RESULT_HI:[0-9]+]], v[[ADD_HI]], v[[VAL_HI]], vcc 13; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}} 14define amdgpu_kernel void @test_vccnz_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(1)* %in) #0 { 15entry: 16 %v = load double, double addrspace(1)* %in 17 %cc = fcmp oeq double %v, 1.000000e+00 18 br i1 %cc, label %if, label %endif 19 20if: 21 %u = fadd double %v, %v 22 br label %endif 23 24endif: 25 %r = phi double [ %v, %entry ], [ %u, %if ] 26 store double %r, double addrspace(1)* %out 27 ret void 28} 29 30; vcc branch with SGPR inputs 31; GCN-LABEL: {{^}}test_vccnz_sgpr_ifcvt_triangle64: 32; GCN: v_cmp_neq_f64 33; GCN: v_add_f64 34; GCN: v_cndmask_b32_e32 35; GCN: v_cndmask_b32_e32 36define amdgpu_kernel void @test_vccnz_sgpr_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(4)* %in) #0 { 37entry: 38 %v = load double, double addrspace(4)* %in 39 %cc = fcmp oeq double %v, 1.000000e+00 40 br i1 %cc, label %if, label %endif 41 42if: 43 %u = fadd double %v, %v 44 br label %endif 45 46endif: 47 %r = phi double [ %v, %entry ], [ %u, %if ] 48 store double %r, double addrspace(1)* %out 49 ret void 50} 51 52; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle96: 53; GCN: v_cmp_neq_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0 54 55; GCN: v_add_i32_e32 56; GCN: v_add_i32_e32 57; GCN: v_add_i32_e32 58; GCN: s_mov_b64 vcc, [[CMP]] 59 60; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 61; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 62; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 63 64; SI-DAG: buffer_store_dwordx2 65; SI-DAG: buffer_store_dword v 66; GCNX3: buffer_store_dwordx3 67define amdgpu_kernel void @test_vccnz_ifcvt_triangle96(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %in, float %cnd) #0 { 68entry: 69 %v = load <3 x i32>, <3 x i32> addrspace(1)* %in 70 %cc = fcmp oeq float %cnd, 1.000000e+00 71 br i1 %cc, label %if, label %endif 72 73if: 74 %u = add <3 x i32> %v, %v 75 br label %endif 76 77endif: 78 %r = phi <3 x i32> [ %v, %entry ], [ %u, %if ] 79 store <3 x i32> %r, <3 x i32> addrspace(1)* %out 80 ret void 81} 82 83; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle128: 84; GCN: v_cmp_neq_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0 85 86; GCN: v_add_i32_e32 87; GCN: v_add_i32_e32 88; GCN: v_add_i32_e32 89; GCN: v_add_i32_e32 90; GCN: s_mov_b64 vcc, [[CMP]] 91 92; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 93; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 94; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 95; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc 96 97; GCN: buffer_store_dwordx4 98define amdgpu_kernel void @test_vccnz_ifcvt_triangle128(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in, float %cnd) #0 { 99entry: 100 %v = load <4 x i32>, <4 x i32> addrspace(1)* %in 101 %cc = fcmp oeq float %cnd, 1.000000e+00 102 br i1 %cc, label %if, label %endif 103 104if: 105 %u = add <4 x i32> %v, %v 106 br label %endif 107 108endif: 109 %r = phi <4 x i32> [ %v, %entry ], [ %u, %if ] 110 store <4 x i32> %r, <4 x i32> addrspace(1)* %out 111 ret void 112} 113