1 //===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// Copies from VGPR to SGPR registers are illegal and the register coalescer
12 /// will sometimes generate these illegal copies in situations like this:
13 ///
14 /// Register Class <vsrc> is the union of <vgpr> and <sgpr>
15 ///
16 /// BB0:
17 /// %vreg0 <sgpr> = SCALAR_INST
18 /// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
19 /// ...
20 /// BRANCH %cond BB1, BB2
21 /// BB1:
22 /// %vreg2 <vgpr> = VECTOR_INST
23 /// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
24 /// BB2:
25 /// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
26 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
27 ///
28 ///
29 /// The coalescer will begin at BB0 and eliminate its copy, then the resulting
30 /// code will look like this:
31 ///
32 /// BB0:
33 /// %vreg0 <sgpr> = SCALAR_INST
34 /// ...
35 /// BRANCH %cond BB1, BB2
36 /// BB1:
37 /// %vreg2 <vgpr> = VECTOR_INST
38 /// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
39 /// BB2:
40 /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
41 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
42 ///
43 /// Now that the result of the PHI instruction is an SGPR, the register
44 /// allocator is now forced to constrain the register class of %vreg3 to
45 /// <sgpr> so we end up with final code like this:
46 ///
47 /// BB0:
48 /// %vreg0 <sgpr> = SCALAR_INST
49 /// ...
50 /// BRANCH %cond BB1, BB2
51 /// BB1:
52 /// %vreg2 <vgpr> = VECTOR_INST
53 /// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
54 /// BB2:
55 /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
56 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
57 ///
58 /// Now this code contains an illegal copy from a VGPR to an SGPR.
59 ///
60 /// In order to avoid this problem, this pass searches for PHI instructions
61 /// which define a <vsrc> register and constrains its definition class to
62 /// <vgpr> if the user of the PHI's definition register is a vector instruction.
63 /// If the PHI's definition class is constrained to <vgpr> then the coalescer
64 /// will be unable to perform the COPY removal from the above example which
65 /// ultimately led to the creation of an illegal COPY.
66 //===----------------------------------------------------------------------===//
67
68 #include "AMDGPU.h"
69 #include "AMDGPUSubtarget.h"
70 #include "SIInstrInfo.h"
71 #include "llvm/CodeGen/MachineFunctionPass.h"
72 #include "llvm/CodeGen/MachineInstrBuilder.h"
73 #include "llvm/CodeGen/MachineRegisterInfo.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/raw_ostream.h"
76 #include "llvm/Target/TargetMachine.h"
77
78 using namespace llvm;
79
80 #define DEBUG_TYPE "sgpr-copies"
81
82 namespace {
83
84 class SIFixSGPRCopies : public MachineFunctionPass {
85
86 private:
87 static char ID;
88 const TargetRegisterClass *inferRegClassFromUses(const SIRegisterInfo *TRI,
89 const MachineRegisterInfo &MRI,
90 unsigned Reg,
91 unsigned SubReg) const;
92 const TargetRegisterClass *inferRegClassFromDef(const SIRegisterInfo *TRI,
93 const MachineRegisterInfo &MRI,
94 unsigned Reg,
95 unsigned SubReg) const;
96 bool isVGPRToSGPRCopy(const MachineInstr &Copy, const SIRegisterInfo *TRI,
97 const MachineRegisterInfo &MRI) const;
98
99 public:
SIFixSGPRCopies(TargetMachine & tm)100 SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { }
101
102 bool runOnMachineFunction(MachineFunction &MF) override;
103
getPassName() const104 const char *getPassName() const override {
105 return "SI Fix SGPR copies";
106 }
107
108 };
109
110 } // End anonymous namespace
111
112 char SIFixSGPRCopies::ID = 0;
113
createSIFixSGPRCopiesPass(TargetMachine & tm)114 FunctionPass *llvm::createSIFixSGPRCopiesPass(TargetMachine &tm) {
115 return new SIFixSGPRCopies(tm);
116 }
117
hasVGPROperands(const MachineInstr & MI,const SIRegisterInfo * TRI)118 static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
119 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
120 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
121 if (!MI.getOperand(i).isReg() ||
122 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
123 continue;
124
125 if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
126 return true;
127 }
128 return false;
129 }
130
131 /// This functions walks the use list of Reg until it finds an Instruction
132 /// that isn't a COPY returns the register class of that instruction.
133 /// \return The register defined by the first non-COPY instruction.
inferRegClassFromUses(const SIRegisterInfo * TRI,const MachineRegisterInfo & MRI,unsigned Reg,unsigned SubReg) const134 const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromUses(
135 const SIRegisterInfo *TRI,
136 const MachineRegisterInfo &MRI,
137 unsigned Reg,
138 unsigned SubReg) const {
139
140 const TargetRegisterClass *RC
141 = TargetRegisterInfo::isVirtualRegister(Reg) ?
142 MRI.getRegClass(Reg) :
143 TRI->getRegClass(Reg);
144
145 RC = TRI->getSubRegClass(RC, SubReg);
146 for (MachineRegisterInfo::use_instr_iterator
147 I = MRI.use_instr_begin(Reg), E = MRI.use_instr_end(); I != E; ++I) {
148 switch (I->getOpcode()) {
149 case AMDGPU::COPY:
150 RC = TRI->getCommonSubClass(RC, inferRegClassFromUses(TRI, MRI,
151 I->getOperand(0).getReg(),
152 I->getOperand(0).getSubReg()));
153 break;
154 }
155 }
156
157 return RC;
158 }
159
inferRegClassFromDef(const SIRegisterInfo * TRI,const MachineRegisterInfo & MRI,unsigned Reg,unsigned SubReg) const160 const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromDef(
161 const SIRegisterInfo *TRI,
162 const MachineRegisterInfo &MRI,
163 unsigned Reg,
164 unsigned SubReg) const {
165 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
166 const TargetRegisterClass *RC = TRI->getPhysRegClass(Reg);
167 return TRI->getSubRegClass(RC, SubReg);
168 }
169 MachineInstr *Def = MRI.getVRegDef(Reg);
170 if (Def->getOpcode() != AMDGPU::COPY) {
171 return TRI->getSubRegClass(MRI.getRegClass(Reg), SubReg);
172 }
173
174 return inferRegClassFromDef(TRI, MRI, Def->getOperand(1).getReg(),
175 Def->getOperand(1).getSubReg());
176 }
177
isVGPRToSGPRCopy(const MachineInstr & Copy,const SIRegisterInfo * TRI,const MachineRegisterInfo & MRI) const178 bool SIFixSGPRCopies::isVGPRToSGPRCopy(const MachineInstr &Copy,
179 const SIRegisterInfo *TRI,
180 const MachineRegisterInfo &MRI) const {
181
182 unsigned DstReg = Copy.getOperand(0).getReg();
183 unsigned SrcReg = Copy.getOperand(1).getReg();
184 unsigned SrcSubReg = Copy.getOperand(1).getSubReg();
185
186 const TargetRegisterClass *DstRC
187 = TargetRegisterInfo::isVirtualRegister(DstReg) ?
188 MRI.getRegClass(DstReg) :
189 TRI->getRegClass(DstReg);
190
191 const TargetRegisterClass *SrcRC;
192
193 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
194 DstRC == &AMDGPU::M0RegRegClass ||
195 MRI.getRegClass(SrcReg) == &AMDGPU::VReg_1RegClass)
196 return false;
197
198 SrcRC = TRI->getSubRegClass(MRI.getRegClass(SrcReg), SrcSubReg);
199 return TRI->isSGPRClass(DstRC) && TRI->hasVGPRs(SrcRC);
200 }
201
runOnMachineFunction(MachineFunction & MF)202 bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
203 MachineRegisterInfo &MRI = MF.getRegInfo();
204 const SIRegisterInfo *TRI =
205 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
206 const SIInstrInfo *TII =
207 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
208 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
209 BI != BE; ++BI) {
210
211 MachineBasicBlock &MBB = *BI;
212 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
213 I != E; ++I) {
214 MachineInstr &MI = *I;
215 if (MI.getOpcode() == AMDGPU::COPY && isVGPRToSGPRCopy(MI, TRI, MRI)) {
216 DEBUG(dbgs() << "Fixing VGPR -> SGPR copy:\n");
217 DEBUG(MI.print(dbgs()));
218 TII->moveToVALU(MI);
219
220 }
221
222 switch (MI.getOpcode()) {
223 default: continue;
224 case AMDGPU::PHI: {
225 DEBUG(dbgs() << "Fixing PHI: " << MI);
226
227 for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
228 const MachineOperand &Op = MI.getOperand(i);
229 unsigned Reg = Op.getReg();
230 const TargetRegisterClass *RC
231 = inferRegClassFromDef(TRI, MRI, Reg, Op.getSubReg());
232
233 MRI.constrainRegClass(Op.getReg(), RC);
234 }
235 unsigned Reg = MI.getOperand(0).getReg();
236 const TargetRegisterClass *RC = inferRegClassFromUses(TRI, MRI, Reg,
237 MI.getOperand(0).getSubReg());
238 if (TRI->getCommonSubClass(RC, &AMDGPU::VGPR_32RegClass)) {
239 MRI.constrainRegClass(Reg, &AMDGPU::VGPR_32RegClass);
240 }
241
242 if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
243 break;
244
245 // If a PHI node defines an SGPR and any of its operands are VGPRs,
246 // then we need to move it to the VALU.
247 //
248 // Also, if a PHI node defines an SGPR and has all SGPR operands
249 // we must move it to the VALU, because the SGPR operands will
250 // all end up being assigned the same register, which means
251 // there is a potential for a conflict if different threads take
252 // different control flow paths.
253 //
254 // For Example:
255 //
256 // sgpr0 = def;
257 // ...
258 // sgpr1 = def;
259 // ...
260 // sgpr2 = PHI sgpr0, sgpr1
261 // use sgpr2;
262 //
263 // Will Become:
264 //
265 // sgpr2 = def;
266 // ...
267 // sgpr2 = def;
268 // ...
269 // use sgpr2
270 //
271 // FIXME: This is OK if the branching decision is made based on an
272 // SGPR value.
273 bool SGPRBranch = false;
274
275 // The one exception to this rule is when one of the operands
276 // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
277 // instruction. In this case, there we know the program will
278 // never enter the second block (the loop) without entering
279 // the first block (where the condition is computed), so there
280 // is no chance for values to be over-written.
281
282 bool HasBreakDef = false;
283 for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
284 unsigned Reg = MI.getOperand(i).getReg();
285 if (TRI->hasVGPRs(MRI.getRegClass(Reg))) {
286 TII->moveToVALU(MI);
287 break;
288 }
289 MachineInstr *DefInstr = MRI.getUniqueVRegDef(Reg);
290 assert(DefInstr);
291 switch(DefInstr->getOpcode()) {
292
293 case AMDGPU::SI_BREAK:
294 case AMDGPU::SI_IF_BREAK:
295 case AMDGPU::SI_ELSE_BREAK:
296 // If we see a PHI instruction that defines an SGPR, then that PHI
297 // instruction has already been considered and should have
298 // a *_BREAK as an operand.
299 case AMDGPU::PHI:
300 HasBreakDef = true;
301 break;
302 }
303 }
304
305 if (!SGPRBranch && !HasBreakDef)
306 TII->moveToVALU(MI);
307 break;
308 }
309 case AMDGPU::REG_SEQUENCE: {
310 if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
311 !hasVGPROperands(MI, TRI))
312 continue;
313
314 DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
315
316 TII->moveToVALU(MI);
317 break;
318 }
319 case AMDGPU::INSERT_SUBREG: {
320 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
321 DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
322 Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
323 Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
324 if (TRI->isSGPRClass(DstRC) &&
325 (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
326 DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
327 TII->moveToVALU(MI);
328 }
329 break;
330 }
331 }
332 }
333 }
334
335 return true;
336 }
337