1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -verify-machineinstrs -regbankselect-fast -o - %s | FileCheck %s 3# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -verify-machineinstrs -regbankselect-greedy -o - %s | FileCheck %s 4 5--- 6name: div_fmas_sss_scc 7legalized: true 8 9body: | 10 bb.0: 11 liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 12 ; CHECK-LABEL: name: div_fmas_sss_scc 13 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 14 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 15 ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 16 ; CHECK: [[COPY3:%[0-9]+]]:sgpr(s32) = COPY $sgpr3 17 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 18 ; CHECK: [[ICMP:%[0-9]+]]:sgpr(s32) = G_ICMP intpred(eq), [[COPY3]](s32), [[C]] 19 ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s1) = G_TRUNC [[ICMP]](s32) 20 ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) 21 ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 22 ; CHECK: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32) 23 ; CHECK: [[COPY7:%[0-9]+]]:vcc(s1) = COPY [[TRUNC]](s1) 24 ; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s1) 25 %0:_(s32) = COPY $sgpr0 26 %1:_(s32) = COPY $sgpr1 27 %2:_(s32) = COPY $sgpr2 28 %3:_(s32) = COPY $sgpr3 29 %4:_(s32) = G_CONSTANT i32 0 30 %5:_(s1) = G_ICMP intpred(eq), %3, %4 31 %6:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), %0, %1, %2, %5 32... 33 34--- 35name: div_fmas_sss_vcc 36legalized: true 37 38body: | 39 bb.0: 40 liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 41 ; CHECK-LABEL: name: div_fmas_sss_vcc 42 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 43 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 44 ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 45 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 46 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 47 ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32) 48 ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[COPY4]] 49 ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) 50 ; CHECK: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 51 ; CHECK: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32) 52 ; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[ICMP]](s1) 53 %0:_(s32) = COPY $sgpr0 54 %1:_(s32) = COPY $sgpr1 55 %2:_(s32) = COPY $sgpr2 56 %3:_(s32) = COPY $vgpr0 57 %4:_(s32) = G_CONSTANT i32 0 58 %5:_(s1) = G_ICMP intpred(eq), %3, %4 59 %6:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), %0, %1, %2, %5 60... 61 62--- 63name: div_fmas_vss_vcc 64legalized: true 65 66body: | 67 bb.0: 68 liveins: $vgpr0, $vgpr1, $sgpr0, $sgpr1 69 ; CHECK-LABEL: name: div_fmas_vss_vcc 70 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 71 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 72 ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 73 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 74 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 75 ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32) 76 ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[COPY4]] 77 ; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) 78 ; CHECK: [[COPY6:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32) 79 ; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[COPY]](s32), [[COPY5]](s32), [[COPY6]](s32), [[ICMP]](s1) 80 %0:_(s32) = COPY $vgpr0 81 %1:_(s32) = COPY $sgpr0 82 %2:_(s32) = COPY $sgpr1 83 %3:_(s32) = COPY $vgpr1 84 %4:_(s32) = G_CONSTANT i32 0 85 %5:_(s1) = G_ICMP intpred(eq), %3, %4 86 %6:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), %0, %1, %2, %5 87... 88 89--- 90name: div_fmas_vvv_vcc 91legalized: true 92 93body: | 94 bb.0: 95 liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 96 ; CHECK-LABEL: name: div_fmas_vvv_vcc 97 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 98 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 99 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 100 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY $vgpr3 101 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 102 ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32) 103 ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[COPY4]] 104 ; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[ICMP]](s1) 105 %0:_(s32) = COPY $vgpr0 106 %1:_(s32) = COPY $vgpr1 107 %2:_(s32) = COPY $vgpr2 108 %3:_(s32) = COPY $vgpr3 109 %4:_(s32) = G_CONSTANT i32 0 110 %5:_(s1) = G_ICMP intpred(eq), %3, %4 111 %6:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), %0, %1, %2, %5 112... 113