1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief SI implementation of the TargetRegisterInfo class.
12 //
13 //===----------------------------------------------------------------------===//
14
15
16 #include "SIRegisterInfo.h"
17 #include "SIInstrInfo.h"
18 #include "SIMachineFunctionInfo.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/RegisterScavenging.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/LLVMContext.h"
24
25 using namespace llvm;
26
SIRegisterInfo()27 SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo() {}
28
getReservedRegs(const MachineFunction & MF) const29 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
30 BitVector Reserved(getNumRegs());
31 Reserved.set(AMDGPU::EXEC);
32
33 // EXEC_LO and EXEC_HI could be allocated and used as regular register,
34 // but this seems likely to result in bugs, so I'm marking them as reserved.
35 Reserved.set(AMDGPU::EXEC_LO);
36 Reserved.set(AMDGPU::EXEC_HI);
37
38 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
39 Reserved.set(AMDGPU::FLAT_SCR);
40 Reserved.set(AMDGPU::FLAT_SCR_LO);
41 Reserved.set(AMDGPU::FLAT_SCR_HI);
42
43 // Reserve some VGPRs to use as temp registers in case we have to spill VGPRs
44 Reserved.set(AMDGPU::VGPR255);
45 Reserved.set(AMDGPU::VGPR254);
46
47 // Tonga and Iceland can only allocate a fixed number of SGPRs due
48 // to a hw bug.
49 if (MF.getSubtarget<AMDGPUSubtarget>().hasSGPRInitBug()) {
50 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
51 // Reserve some SGPRs for FLAT_SCRATCH and VCC (4 SGPRs).
52 // Assume XNACK_MASK is unused.
53 unsigned Limit = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 4;
54
55 for (unsigned i = Limit; i < NumSGPRs; ++i) {
56 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
57 MCRegAliasIterator R = MCRegAliasIterator(Reg, this, true);
58
59 for (; R.isValid(); ++R)
60 Reserved.set(*R);
61 }
62 }
63
64 return Reserved;
65 }
66
getRegPressureSetLimit(const MachineFunction & MF,unsigned Idx) const67 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
68 unsigned Idx) const {
69
70 const AMDGPUSubtarget &STI = MF.getSubtarget<AMDGPUSubtarget>();
71 // FIXME: We should adjust the max number of waves based on LDS size.
72 unsigned SGPRLimit = getNumSGPRsAllowed(STI.getGeneration(),
73 STI.getMaxWavesPerCU());
74 unsigned VGPRLimit = getNumVGPRsAllowed(STI.getMaxWavesPerCU());
75
76 for (regclass_iterator I = regclass_begin(), E = regclass_end();
77 I != E; ++I) {
78
79 unsigned NumSubRegs = std::max((int)(*I)->getSize() / 4, 1);
80 unsigned Limit;
81
82 if (isSGPRClass(*I)) {
83 Limit = SGPRLimit / NumSubRegs;
84 } else {
85 Limit = VGPRLimit / NumSubRegs;
86 }
87
88 const int *Sets = getRegClassPressureSets(*I);
89 assert(Sets);
90 for (unsigned i = 0; Sets[i] != -1; ++i) {
91 if (Sets[i] == (int)Idx)
92 return Limit;
93 }
94 }
95 return 256;
96 }
97
requiresRegisterScavenging(const MachineFunction & Fn) const98 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
99 return Fn.getFrameInfo()->hasStackObjects();
100 }
101
getNumSubRegsForSpillOp(unsigned Op)102 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
103
104 switch (Op) {
105 case AMDGPU::SI_SPILL_S512_SAVE:
106 case AMDGPU::SI_SPILL_S512_RESTORE:
107 case AMDGPU::SI_SPILL_V512_SAVE:
108 case AMDGPU::SI_SPILL_V512_RESTORE:
109 return 16;
110 case AMDGPU::SI_SPILL_S256_SAVE:
111 case AMDGPU::SI_SPILL_S256_RESTORE:
112 case AMDGPU::SI_SPILL_V256_SAVE:
113 case AMDGPU::SI_SPILL_V256_RESTORE:
114 return 8;
115 case AMDGPU::SI_SPILL_S128_SAVE:
116 case AMDGPU::SI_SPILL_S128_RESTORE:
117 case AMDGPU::SI_SPILL_V128_SAVE:
118 case AMDGPU::SI_SPILL_V128_RESTORE:
119 return 4;
120 case AMDGPU::SI_SPILL_V96_SAVE:
121 case AMDGPU::SI_SPILL_V96_RESTORE:
122 return 3;
123 case AMDGPU::SI_SPILL_S64_SAVE:
124 case AMDGPU::SI_SPILL_S64_RESTORE:
125 case AMDGPU::SI_SPILL_V64_SAVE:
126 case AMDGPU::SI_SPILL_V64_RESTORE:
127 return 2;
128 case AMDGPU::SI_SPILL_S32_SAVE:
129 case AMDGPU::SI_SPILL_S32_RESTORE:
130 case AMDGPU::SI_SPILL_V32_SAVE:
131 case AMDGPU::SI_SPILL_V32_RESTORE:
132 return 1;
133 default: llvm_unreachable("Invalid spill opcode");
134 }
135 }
136
buildScratchLoadStore(MachineBasicBlock::iterator MI,unsigned LoadStoreOp,unsigned Value,unsigned ScratchRsrcReg,unsigned ScratchOffset,int64_t Offset,RegScavenger * RS) const137 void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
138 unsigned LoadStoreOp,
139 unsigned Value,
140 unsigned ScratchRsrcReg,
141 unsigned ScratchOffset,
142 int64_t Offset,
143 RegScavenger *RS) const {
144
145 MachineBasicBlock *MBB = MI->getParent();
146 const MachineFunction *MF = MI->getParent()->getParent();
147 const SIInstrInfo *TII =
148 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
149 LLVMContext &Ctx = MF->getFunction()->getContext();
150 DebugLoc DL = MI->getDebugLoc();
151 bool IsLoad = TII->get(LoadStoreOp).mayLoad();
152
153 bool RanOutOfSGPRs = false;
154 unsigned SOffset = ScratchOffset;
155
156 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
157 unsigned Size = NumSubRegs * 4;
158
159 if (!isUInt<12>(Offset + Size)) {
160 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
161 if (SOffset == AMDGPU::NoRegister) {
162 RanOutOfSGPRs = true;
163 SOffset = AMDGPU::SGPR0;
164 }
165 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
166 .addReg(ScratchOffset)
167 .addImm(Offset);
168 Offset = 0;
169 }
170
171 if (RanOutOfSGPRs)
172 Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
173
174 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
175 unsigned SubReg = NumSubRegs > 1 ?
176 getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
177 Value;
178 bool IsKill = (i == e - 1);
179
180 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
181 .addReg(SubReg, getDefRegState(IsLoad))
182 .addReg(ScratchRsrcReg, getKillRegState(IsKill))
183 .addReg(SOffset)
184 .addImm(Offset)
185 .addImm(0) // glc
186 .addImm(0) // slc
187 .addImm(0) // tfe
188 .addReg(Value, RegState::Implicit | getDefRegState(IsLoad));
189 }
190 }
191
eliminateFrameIndex(MachineBasicBlock::iterator MI,int SPAdj,unsigned FIOperandNum,RegScavenger * RS) const192 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
193 int SPAdj, unsigned FIOperandNum,
194 RegScavenger *RS) const {
195 MachineFunction *MF = MI->getParent()->getParent();
196 MachineBasicBlock *MBB = MI->getParent();
197 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
198 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
199 const SIInstrInfo *TII =
200 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
201 DebugLoc DL = MI->getDebugLoc();
202
203 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
204 int Index = MI->getOperand(FIOperandNum).getIndex();
205
206 switch (MI->getOpcode()) {
207 // SGPR register spill
208 case AMDGPU::SI_SPILL_S512_SAVE:
209 case AMDGPU::SI_SPILL_S256_SAVE:
210 case AMDGPU::SI_SPILL_S128_SAVE:
211 case AMDGPU::SI_SPILL_S64_SAVE:
212 case AMDGPU::SI_SPILL_S32_SAVE: {
213 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
214
215 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
216 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
217 &AMDGPU::SGPR_32RegClass, i);
218 struct SIMachineFunctionInfo::SpilledReg Spill =
219 MFI->getSpilledReg(MF, Index, i);
220
221 if (Spill.VGPR == AMDGPU::NoRegister) {
222 LLVMContext &Ctx = MF->getFunction()->getContext();
223 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
224 }
225
226 BuildMI(*MBB, MI, DL,
227 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
228 Spill.VGPR)
229 .addReg(SubReg)
230 .addImm(Spill.Lane);
231
232 }
233 MI->eraseFromParent();
234 break;
235 }
236
237 // SGPR register restore
238 case AMDGPU::SI_SPILL_S512_RESTORE:
239 case AMDGPU::SI_SPILL_S256_RESTORE:
240 case AMDGPU::SI_SPILL_S128_RESTORE:
241 case AMDGPU::SI_SPILL_S64_RESTORE:
242 case AMDGPU::SI_SPILL_S32_RESTORE: {
243 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
244
245 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
246 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
247 &AMDGPU::SGPR_32RegClass, i);
248 bool isM0 = SubReg == AMDGPU::M0;
249 struct SIMachineFunctionInfo::SpilledReg Spill =
250 MFI->getSpilledReg(MF, Index, i);
251
252 if (Spill.VGPR == AMDGPU::NoRegister) {
253 LLVMContext &Ctx = MF->getFunction()->getContext();
254 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
255 }
256
257 if (isM0)
258 SubReg = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
259
260 BuildMI(*MBB, MI, DL,
261 TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
262 SubReg)
263 .addReg(Spill.VGPR)
264 .addImm(Spill.Lane)
265 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
266 if (isM0) {
267 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
268 .addReg(SubReg);
269 }
270 }
271
272 // TODO: only do this when it is needed
273 switch (MF->getSubtarget<AMDGPUSubtarget>().getGeneration()) {
274 case AMDGPUSubtarget::SOUTHERN_ISLANDS:
275 // "VALU writes SGPR" -> "SMRD reads that SGPR" needs "S_NOP 3" on SI
276 TII->insertNOPs(MI, 3);
277 break;
278 case AMDGPUSubtarget::SEA_ISLANDS:
279 break;
280 default: // VOLCANIC_ISLANDS and later
281 // "VALU writes SGPR -> VMEM reads that SGPR" needs "S_NOP 4" on VI
282 // and later. This also applies to VALUs which write VCC, but we're
283 // unlikely to see VMEM use VCC.
284 TII->insertNOPs(MI, 4);
285 }
286
287 MI->eraseFromParent();
288 break;
289 }
290
291 // VGPR register spill
292 case AMDGPU::SI_SPILL_V512_SAVE:
293 case AMDGPU::SI_SPILL_V256_SAVE:
294 case AMDGPU::SI_SPILL_V128_SAVE:
295 case AMDGPU::SI_SPILL_V96_SAVE:
296 case AMDGPU::SI_SPILL_V64_SAVE:
297 case AMDGPU::SI_SPILL_V32_SAVE:
298 buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
299 TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
300 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
301 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
302 FrameInfo->getObjectOffset(Index), RS);
303 MI->eraseFromParent();
304 break;
305 case AMDGPU::SI_SPILL_V32_RESTORE:
306 case AMDGPU::SI_SPILL_V64_RESTORE:
307 case AMDGPU::SI_SPILL_V96_RESTORE:
308 case AMDGPU::SI_SPILL_V128_RESTORE:
309 case AMDGPU::SI_SPILL_V256_RESTORE:
310 case AMDGPU::SI_SPILL_V512_RESTORE: {
311 buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
312 TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
313 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
314 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
315 FrameInfo->getObjectOffset(Index), RS);
316 MI->eraseFromParent();
317 break;
318 }
319
320 default: {
321 int64_t Offset = FrameInfo->getObjectOffset(Index);
322 FIOp.ChangeToImmediate(Offset);
323 if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
324 unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
325 BuildMI(*MBB, MI, MI->getDebugLoc(),
326 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
327 .addImm(Offset);
328 FIOp.ChangeToRegister(TmpReg, false, false, true);
329 }
330 }
331 }
332 }
333
getCFGStructurizerRegClass(MVT VT) const334 const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
335 MVT VT) const {
336 switch(VT.SimpleTy) {
337 default:
338 case MVT::i32: return &AMDGPU::VGPR_32RegClass;
339 }
340 }
341
getHWRegIndex(unsigned Reg) const342 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
343 return getEncodingValue(Reg) & 0xff;
344 }
345
getPhysRegClass(unsigned Reg) const346 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
347 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
348
349 static const TargetRegisterClass *BaseClasses[] = {
350 &AMDGPU::VGPR_32RegClass,
351 &AMDGPU::SReg_32RegClass,
352 &AMDGPU::VReg_64RegClass,
353 &AMDGPU::SReg_64RegClass,
354 &AMDGPU::VReg_96RegClass,
355 &AMDGPU::VReg_128RegClass,
356 &AMDGPU::SReg_128RegClass,
357 &AMDGPU::VReg_256RegClass,
358 &AMDGPU::SReg_256RegClass,
359 &AMDGPU::VReg_512RegClass
360 };
361
362 for (const TargetRegisterClass *BaseClass : BaseClasses) {
363 if (BaseClass->contains(Reg)) {
364 return BaseClass;
365 }
366 }
367 return nullptr;
368 }
369
hasVGPRs(const TargetRegisterClass * RC) const370 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
371 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) ||
372 getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) ||
373 getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) ||
374 getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) ||
375 getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) ||
376 getCommonSubClass(&AMDGPU::VReg_512RegClass, RC);
377 }
378
getEquivalentVGPRClass(const TargetRegisterClass * SRC) const379 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
380 const TargetRegisterClass *SRC) const {
381 if (hasVGPRs(SRC)) {
382 return SRC;
383 } else if (SRC == &AMDGPU::SCCRegRegClass) {
384 return &AMDGPU::VCCRegRegClass;
385 } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_32RegClass)) {
386 return &AMDGPU::VGPR_32RegClass;
387 } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_64RegClass)) {
388 return &AMDGPU::VReg_64RegClass;
389 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_128RegClass)) {
390 return &AMDGPU::VReg_128RegClass;
391 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_256RegClass)) {
392 return &AMDGPU::VReg_256RegClass;
393 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_512RegClass)) {
394 return &AMDGPU::VReg_512RegClass;
395 }
396 return nullptr;
397 }
398
getSubRegClass(const TargetRegisterClass * RC,unsigned SubIdx) const399 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
400 const TargetRegisterClass *RC, unsigned SubIdx) const {
401 if (SubIdx == AMDGPU::NoSubRegister)
402 return RC;
403
404 // If this register has a sub-register, we can safely assume it is a 32-bit
405 // register, because all of SI's sub-registers are 32-bit.
406 if (isSGPRClass(RC)) {
407 return &AMDGPU::SGPR_32RegClass;
408 } else {
409 return &AMDGPU::VGPR_32RegClass;
410 }
411 }
412
getPhysRegSubReg(unsigned Reg,const TargetRegisterClass * SubRC,unsigned Channel) const413 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
414 const TargetRegisterClass *SubRC,
415 unsigned Channel) const {
416
417 switch (Reg) {
418 case AMDGPU::VCC:
419 switch(Channel) {
420 case 0: return AMDGPU::VCC_LO;
421 case 1: return AMDGPU::VCC_HI;
422 default: llvm_unreachable("Invalid SubIdx for VCC");
423 }
424
425 case AMDGPU::FLAT_SCR:
426 switch (Channel) {
427 case 0:
428 return AMDGPU::FLAT_SCR_LO;
429 case 1:
430 return AMDGPU::FLAT_SCR_HI;
431 default:
432 llvm_unreachable("Invalid SubIdx for FLAT_SCR");
433 }
434 break;
435
436 case AMDGPU::EXEC:
437 switch (Channel) {
438 case 0:
439 return AMDGPU::EXEC_LO;
440 case 1:
441 return AMDGPU::EXEC_HI;
442 default:
443 llvm_unreachable("Invalid SubIdx for EXEC");
444 }
445 break;
446 }
447
448 const TargetRegisterClass *RC = getPhysRegClass(Reg);
449 // 32-bit registers don't have sub-registers, so we can just return the
450 // Reg. We need to have this check here, because the calculation below
451 // using getHWRegIndex() will fail with special 32-bit registers like
452 // VCC_LO, VCC_HI, EXEC_LO, EXEC_HI and M0.
453 if (RC->getSize() == 4) {
454 assert(Channel == 0);
455 return Reg;
456 }
457
458 unsigned Index = getHWRegIndex(Reg);
459 return SubRC->getRegister(Index + Channel);
460 }
461
opCanUseLiteralConstant(unsigned OpType) const462 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
463 return OpType == AMDGPU::OPERAND_REG_IMM32;
464 }
465
opCanUseInlineConstant(unsigned OpType) const466 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
467 if (opCanUseLiteralConstant(OpType))
468 return true;
469
470 return OpType == AMDGPU::OPERAND_REG_INLINE_C;
471 }
472
getPreloadedValue(const MachineFunction & MF,enum PreloadedValue Value) const473 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
474 enum PreloadedValue Value) const {
475
476 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
477 switch (Value) {
478 case SIRegisterInfo::TGID_X:
479 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0);
480 case SIRegisterInfo::TGID_Y:
481 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1);
482 case SIRegisterInfo::TGID_Z:
483 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
484 case SIRegisterInfo::SCRATCH_WAVE_OFFSET:
485 if (MFI->getShaderType() != ShaderType::COMPUTE)
486 return MFI->ScratchOffsetReg;
487 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
488 case SIRegisterInfo::SCRATCH_PTR:
489 return AMDGPU::SGPR2_SGPR3;
490 case SIRegisterInfo::INPUT_PTR:
491 return AMDGPU::SGPR0_SGPR1;
492 case SIRegisterInfo::TIDIG_X:
493 return AMDGPU::VGPR0;
494 case SIRegisterInfo::TIDIG_Y:
495 return AMDGPU::VGPR1;
496 case SIRegisterInfo::TIDIG_Z:
497 return AMDGPU::VGPR2;
498 }
499 llvm_unreachable("unexpected preloaded value type");
500 }
501
502 /// \brief Returns a register that is not used at any point in the function.
503 /// If all registers are used, then this function will return
504 // AMDGPU::NoRegister.
findUnusedRegister(const MachineRegisterInfo & MRI,const TargetRegisterClass * RC) const505 unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
506 const TargetRegisterClass *RC) const {
507
508 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
509 I != E; ++I) {
510 if (!MRI.isPhysRegUsed(*I))
511 return *I;
512 }
513 return AMDGPU::NoRegister;
514 }
515
getNumVGPRsAllowed(unsigned WaveCount) const516 unsigned SIRegisterInfo::getNumVGPRsAllowed(unsigned WaveCount) const {
517 switch(WaveCount) {
518 case 10: return 24;
519 case 9: return 28;
520 case 8: return 32;
521 case 7: return 36;
522 case 6: return 40;
523 case 5: return 48;
524 case 4: return 64;
525 case 3: return 84;
526 case 2: return 128;
527 default: return 256;
528 }
529 }
530
getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,unsigned WaveCount) const531 unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
532 unsigned WaveCount) const {
533 if (gen >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
534 switch (WaveCount) {
535 case 10: return 80;
536 case 9: return 80;
537 case 8: return 96;
538 default: return 102;
539 }
540 } else {
541 switch(WaveCount) {
542 case 10: return 48;
543 case 9: return 56;
544 case 8: return 64;
545 case 7: return 72;
546 case 6: return 80;
547 case 5: return 96;
548 default: return 103;
549 }
550 }
551 }
552