1 //===-- AMDGPUInstrInfo.h - AMDGPU Instruction Information ------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Contains the definition of a TargetInstrInfo class that is common 12 /// to all AMD GPUs. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_LIB_TARGET_R600_AMDGPUINSTRINFO_H 17 #define LLVM_LIB_TARGET_R600_AMDGPUINSTRINFO_H 18 19 #include "AMDGPURegisterInfo.h" 20 #include "llvm/Target/TargetInstrInfo.h" 21 #include <map> 22 23 #define GET_INSTRINFO_HEADER 24 #define GET_INSTRINFO_ENUM 25 #define GET_INSTRINFO_OPERAND_ENUM 26 #include "AMDGPUGenInstrInfo.inc" 27 28 #define OPCODE_IS_ZERO_INT AMDGPU::PRED_SETE_INT 29 #define OPCODE_IS_NOT_ZERO_INT AMDGPU::PRED_SETNE_INT 30 #define OPCODE_IS_ZERO AMDGPU::PRED_SETE 31 #define OPCODE_IS_NOT_ZERO AMDGPU::PRED_SETNE 32 33 namespace llvm { 34 35 class AMDGPUSubtarget; 36 class MachineFunction; 37 class MachineInstr; 38 class MachineInstrBuilder; 39 40 class AMDGPUInstrInfo : public AMDGPUGenInstrInfo { 41 private: 42 const AMDGPURegisterInfo RI; 43 virtual void anchor(); 44 protected: 45 const AMDGPUSubtarget &ST; 46 public: 47 explicit AMDGPUInstrInfo(const AMDGPUSubtarget &st); 48 49 virtual const AMDGPURegisterInfo &getRegisterInfo() const = 0; 50 51 bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg, 52 unsigned &DstReg, unsigned &SubIdx) const override; 53 54 unsigned isLoadFromStackSlot(const MachineInstr *MI, 55 int &FrameIndex) const override; 56 unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI, 57 int &FrameIndex) const override; 58 bool hasLoadFromStackSlot(const MachineInstr *MI, 59 const MachineMemOperand *&MMO, 60 int &FrameIndex) const override; 61 unsigned isStoreFromStackSlot(const MachineInstr *MI, int &FrameIndex) const; 62 unsigned isStoreFromStackSlotPostFE(const MachineInstr *MI, 63 int &FrameIndex) const; 64 bool hasStoreFromStackSlot(const MachineInstr *MI, 65 const MachineMemOperand *&MMO, 66 int &FrameIndex) const; 67 68 MachineInstr * 69 convertToThreeAddress(MachineFunction::iterator &MFI, 70 MachineBasicBlock::iterator &MBBI, 71 LiveVariables *LV) const override; 72 73 74 bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override; 75 76 void storeRegToStackSlot(MachineBasicBlock &MBB, 77 MachineBasicBlock::iterator MI, 78 unsigned SrcReg, bool isKill, int FrameIndex, 79 const TargetRegisterClass *RC, 80 const TargetRegisterInfo *TRI) const override; 81 void loadRegFromStackSlot(MachineBasicBlock &MBB, 82 MachineBasicBlock::iterator MI, 83 unsigned DestReg, int FrameIndex, 84 const TargetRegisterClass *RC, 85 const TargetRegisterInfo *TRI) const override; 86 87 protected: 88 MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, 89 ArrayRef<unsigned> Ops, 90 int FrameIndex) const override; 91 MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, 92 ArrayRef<unsigned> Ops, 93 MachineInstr *LoadMI) const override; 94 95 public: 96 /// \returns the smallest register index that will be accessed by an indirect 97 /// read or write or -1 if indirect addressing is not used by this program. 98 int getIndirectIndexBegin(const MachineFunction &MF) const; 99 100 /// \returns the largest register index that will be accessed by an indirect 101 /// read or write or -1 if indirect addressing is not used by this program. 102 int getIndirectIndexEnd(const MachineFunction &MF) const; 103 104 bool canFoldMemoryOperand(const MachineInstr *MI, 105 ArrayRef<unsigned> Ops) const override; 106 bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, 107 unsigned Reg, bool UnfoldLoad, bool UnfoldStore, 108 SmallVectorImpl<MachineInstr *> &NewMIs) const override; 109 bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 110 SmallVectorImpl<SDNode *> &NewNodes) const override; 111 unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, 112 bool UnfoldLoad, bool UnfoldStore, 113 unsigned *LoadRegIndex = nullptr) const override; 114 115 bool enableClusterLoads() const override; 116 117 bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 118 int64_t Offset1, int64_t Offset2, 119 unsigned NumLoads) const override; 120 121 bool 122 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override; 123 void insertNoop(MachineBasicBlock &MBB, 124 MachineBasicBlock::iterator MI) const override; 125 bool isPredicated(const MachineInstr *MI) const override; 126 bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, 127 const SmallVectorImpl<MachineOperand> &Pred2) const override; 128 bool DefinesPredicate(MachineInstr *MI, 129 std::vector<MachineOperand> &Pred) const override; 130 bool isPredicable(MachineInstr *MI) const override; 131 bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override; 132 133 // Helper functions that check the opcode for status information 134 bool isRegisterStore(const MachineInstr &MI) const; 135 bool isRegisterLoad(const MachineInstr &MI) const; 136 137 /// \brief Return a target-specific opcode if Opcode is a pseudo instruction. 138 /// Return -1 if the target-specific opcode for the pseudo instruction does 139 /// not exist. If Opcode is not a pseudo instruction, this is identity. 140 int pseudoToMCOpcode(int Opcode) const; 141 142 /// \brief Return the descriptor of the target-specific machine instruction 143 /// that corresponds to the specified pseudo or native opcode. getMCOpcodeFromPseudo(unsigned Opcode)144 const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const { 145 return get(pseudoToMCOpcode(Opcode)); 146 } 147 148 //===---------------------------------------------------------------------===// 149 // Pure virtual funtions to be implemented by sub-classes. 150 //===---------------------------------------------------------------------===// 151 152 virtual bool isMov(unsigned opcode) const = 0; 153 154 /// \brief Calculate the "Indirect Address" for the given \p RegIndex and 155 /// \p Channel 156 /// 157 /// We model indirect addressing using a virtual address space that can be 158 /// accesed with loads and stores. The "Indirect Address" is the memory 159 /// address in this virtual address space that maps to the given \p RegIndex 160 /// and \p Channel. 161 virtual unsigned calculateIndirectAddress(unsigned RegIndex, 162 unsigned Channel) const = 0; 163 164 /// \returns The register class to be used for loading and storing values 165 /// from an "Indirect Address" . 166 virtual const TargetRegisterClass *getIndirectAddrRegClass() const = 0; 167 168 /// \brief Build instruction(s) for an indirect register write. 169 /// 170 /// \returns The instruction that performs the indirect register write 171 virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB, 172 MachineBasicBlock::iterator I, 173 unsigned ValueReg, unsigned Address, 174 unsigned OffsetReg) const = 0; 175 176 /// \brief Build instruction(s) for an indirect register read. 177 /// 178 /// \returns The instruction that performs the indirect register read 179 virtual MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB, 180 MachineBasicBlock::iterator I, 181 unsigned ValueReg, unsigned Address, 182 unsigned OffsetReg) const = 0; 183 184 /// \brief Build a MOV instruction. 185 virtual MachineInstr *buildMovInstr(MachineBasicBlock *MBB, 186 MachineBasicBlock::iterator I, 187 unsigned DstReg, unsigned SrcReg) const = 0; 188 189 /// \brief Given a MIMG \p Opcode that writes all 4 channels, return the 190 /// equivalent opcode that writes \p Channels Channels. 191 int getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const; 192 193 }; 194 195 namespace AMDGPU { 196 int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex); 197 } // End namespace AMDGPU 198 199 } // End llvm namespace 200 201 #define AMDGPU_FLAG_REGISTER_LOAD (UINT64_C(1) << 63) 202 #define AMDGPU_FLAG_REGISTER_STORE (UINT64_C(1) << 62) 203 204 #endif 205