1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief R600 Implementation of TargetInstrInfo.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "R600InstrInfo.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUSubtarget.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "R600Defines.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 
26 using namespace llvm;
27 
28 #define GET_INSTRINFO_CTOR_DTOR
29 #include "AMDGPUGenDFAPacketizer.inc"
30 
R600InstrInfo(const AMDGPUSubtarget & st)31 R600InstrInfo::R600InstrInfo(const AMDGPUSubtarget &st)
32     : AMDGPUInstrInfo(st), RI() {}
33 
getRegisterInfo() const34 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
35   return RI;
36 }
37 
isTrig(const MachineInstr & MI) const38 bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
39   return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
40 }
41 
isVector(const MachineInstr & MI) const42 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
43   return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
44 }
45 
46 void
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,DebugLoc DL,unsigned DestReg,unsigned SrcReg,bool KillSrc) const47 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
48                            MachineBasicBlock::iterator MI, DebugLoc DL,
49                            unsigned DestReg, unsigned SrcReg,
50                            bool KillSrc) const {
51   unsigned VectorComponents = 0;
52   if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) ||
53       AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) &&
54       (AMDGPU::R600_Reg128RegClass.contains(SrcReg) ||
55        AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) {
56     VectorComponents = 4;
57   } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) ||
58             AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) &&
59             (AMDGPU::R600_Reg64RegClass.contains(SrcReg) ||
60              AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) {
61     VectorComponents = 2;
62   }
63 
64   if (VectorComponents > 0) {
65     for (unsigned I = 0; I < VectorComponents; I++) {
66       unsigned SubRegIndex = RI.getSubRegFromChannel(I);
67       buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
68                               RI.getSubReg(DestReg, SubRegIndex),
69                               RI.getSubReg(SrcReg, SubRegIndex))
70                               .addReg(DestReg,
71                                       RegState::Define | RegState::Implicit);
72     }
73   } else {
74     MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
75                                                   DestReg, SrcReg);
76     NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
77                                     .setIsKill(KillSrc);
78   }
79 }
80 
81 /// \returns true if \p MBBI can be moved into a new basic.
isLegalToSplitMBBAt(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI) const82 bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
83                                        MachineBasicBlock::iterator MBBI) const {
84   for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
85                                         E = MBBI->operands_end(); I != E; ++I) {
86     if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
87         I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
88       return false;
89   }
90   return true;
91 }
92 
isMov(unsigned Opcode) const93 bool R600InstrInfo::isMov(unsigned Opcode) const {
94 
95 
96   switch(Opcode) {
97   default: return false;
98   case AMDGPU::MOV:
99   case AMDGPU::MOV_IMM_F32:
100   case AMDGPU::MOV_IMM_I32:
101     return true;
102   }
103 }
104 
105 // Some instructions act as place holders to emulate operations that the GPU
106 // hardware does automatically. This function can be used to check if
107 // an opcode falls into this category.
isPlaceHolderOpcode(unsigned Opcode) const108 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
109   switch (Opcode) {
110   default: return false;
111   case AMDGPU::RETURN:
112     return true;
113   }
114 }
115 
isReductionOp(unsigned Opcode) const116 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
117   return false;
118 }
119 
isCubeOp(unsigned Opcode) const120 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
121   switch(Opcode) {
122     default: return false;
123     case AMDGPU::CUBE_r600_pseudo:
124     case AMDGPU::CUBE_r600_real:
125     case AMDGPU::CUBE_eg_pseudo:
126     case AMDGPU::CUBE_eg_real:
127       return true;
128   }
129 }
130 
isALUInstr(unsigned Opcode) const131 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
132   unsigned TargetFlags = get(Opcode).TSFlags;
133 
134   return (TargetFlags & R600_InstFlag::ALU_INST);
135 }
136 
hasInstrModifiers(unsigned Opcode) const137 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
138   unsigned TargetFlags = get(Opcode).TSFlags;
139 
140   return ((TargetFlags & R600_InstFlag::OP1) |
141           (TargetFlags & R600_InstFlag::OP2) |
142           (TargetFlags & R600_InstFlag::OP3));
143 }
144 
isLDSInstr(unsigned Opcode) const145 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
146   unsigned TargetFlags = get(Opcode).TSFlags;
147 
148   return ((TargetFlags & R600_InstFlag::LDS_1A) |
149           (TargetFlags & R600_InstFlag::LDS_1A1D) |
150           (TargetFlags & R600_InstFlag::LDS_1A2D));
151 }
152 
isLDSNoRetInstr(unsigned Opcode) const153 bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const {
154   return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1;
155 }
156 
isLDSRetInstr(unsigned Opcode) const157 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
158   return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
159 }
160 
canBeConsideredALU(const MachineInstr * MI) const161 bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const {
162   if (isALUInstr(MI->getOpcode()))
163     return true;
164   if (isVector(*MI) || isCubeOp(MI->getOpcode()))
165     return true;
166   switch (MI->getOpcode()) {
167   case AMDGPU::PRED_X:
168   case AMDGPU::INTERP_PAIR_XY:
169   case AMDGPU::INTERP_PAIR_ZW:
170   case AMDGPU::INTERP_VEC_LOAD:
171   case AMDGPU::COPY:
172   case AMDGPU::DOT_4:
173     return true;
174   default:
175     return false;
176   }
177 }
178 
isTransOnly(unsigned Opcode) const179 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
180   if (ST.hasCaymanISA())
181     return false;
182   return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
183 }
184 
isTransOnly(const MachineInstr * MI) const185 bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
186   return isTransOnly(MI->getOpcode());
187 }
188 
isVectorOnly(unsigned Opcode) const189 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
190   return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
191 }
192 
isVectorOnly(const MachineInstr * MI) const193 bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const {
194   return isVectorOnly(MI->getOpcode());
195 }
196 
isExport(unsigned Opcode) const197 bool R600InstrInfo::isExport(unsigned Opcode) const {
198   return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
199 }
200 
usesVertexCache(unsigned Opcode) const201 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
202   return ST.hasVertexCache() && IS_VTX(get(Opcode));
203 }
204 
usesVertexCache(const MachineInstr * MI) const205 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
206   const MachineFunction *MF = MI->getParent()->getParent();
207   const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
208   return MFI->getShaderType() != ShaderType::COMPUTE &&
209     usesVertexCache(MI->getOpcode());
210 }
211 
usesTextureCache(unsigned Opcode) const212 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
213   return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
214 }
215 
usesTextureCache(const MachineInstr * MI) const216 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
217   const MachineFunction *MF = MI->getParent()->getParent();
218   const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
219   return (MFI->getShaderType() == ShaderType::COMPUTE &&
220           usesVertexCache(MI->getOpcode())) ||
221     usesTextureCache(MI->getOpcode());
222 }
223 
mustBeLastInClause(unsigned Opcode) const224 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
225   switch (Opcode) {
226   case AMDGPU::KILLGT:
227   case AMDGPU::GROUP_BARRIER:
228     return true;
229   default:
230     return false;
231   }
232 }
233 
usesAddressRegister(MachineInstr * MI) const234 bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const {
235   return  MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
236 }
237 
definesAddressRegister(MachineInstr * MI) const238 bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const {
239   return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
240 }
241 
readsLDSSrcReg(const MachineInstr * MI) const242 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const {
243   if (!isALUInstr(MI->getOpcode())) {
244     return false;
245   }
246   for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
247                                         E = MI->operands_end(); I != E; ++I) {
248     if (!I->isReg() || !I->isUse() ||
249         TargetRegisterInfo::isVirtualRegister(I->getReg()))
250       continue;
251 
252     if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
253       return true;
254   }
255   return false;
256 }
257 
getSrcIdx(unsigned Opcode,unsigned SrcNum) const258 int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const {
259   static const unsigned OpTable[] = {
260     AMDGPU::OpName::src0,
261     AMDGPU::OpName::src1,
262     AMDGPU::OpName::src2
263   };
264 
265   assert (SrcNum < 3);
266   return getOperandIdx(Opcode, OpTable[SrcNum]);
267 }
268 
getSelIdx(unsigned Opcode,unsigned SrcIdx) const269 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
270   static const unsigned SrcSelTable[][2] = {
271     {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
272     {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
273     {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
274     {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
275     {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
276     {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
277     {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
278     {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
279     {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
280     {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
281     {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
282   };
283 
284   for (const auto &Row : SrcSelTable) {
285     if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) {
286       return getOperandIdx(Opcode, Row[1]);
287     }
288   }
289   return -1;
290 }
291 
292 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
getSrcs(MachineInstr * MI) const293 R600InstrInfo::getSrcs(MachineInstr *MI) const {
294   SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
295 
296   if (MI->getOpcode() == AMDGPU::DOT_4) {
297     static const unsigned OpTable[8][2] = {
298       {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
299       {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
300       {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
301       {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
302       {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
303       {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
304       {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
305       {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
306     };
307 
308     for (unsigned j = 0; j < 8; j++) {
309       MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
310                                                         OpTable[j][0]));
311       unsigned Reg = MO.getReg();
312       if (Reg == AMDGPU::ALU_CONST) {
313         unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
314                                                     OpTable[j][1])).getImm();
315         Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
316         continue;
317       }
318 
319     }
320     return Result;
321   }
322 
323   static const unsigned OpTable[3][2] = {
324     {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
325     {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
326     {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
327   };
328 
329   for (unsigned j = 0; j < 3; j++) {
330     int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
331     if (SrcIdx < 0)
332       break;
333     MachineOperand &MO = MI->getOperand(SrcIdx);
334     unsigned Reg = MI->getOperand(SrcIdx).getReg();
335     if (Reg == AMDGPU::ALU_CONST) {
336       unsigned Sel = MI->getOperand(
337           getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
338       Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
339       continue;
340     }
341     if (Reg == AMDGPU::ALU_LITERAL_X) {
342       unsigned Imm = MI->getOperand(
343           getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
344       Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
345       continue;
346     }
347     Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
348   }
349   return Result;
350 }
351 
352 std::vector<std::pair<int, unsigned> >
ExtractSrcs(MachineInstr * MI,const DenseMap<unsigned,unsigned> & PV,unsigned & ConstCount) const353 R600InstrInfo::ExtractSrcs(MachineInstr *MI,
354                            const DenseMap<unsigned, unsigned> &PV,
355                            unsigned &ConstCount) const {
356   ConstCount = 0;
357   const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI);
358   const std::pair<int, unsigned> DummyPair(-1, 0);
359   std::vector<std::pair<int, unsigned> > Result;
360   unsigned i = 0;
361   for (unsigned n = Srcs.size(); i < n; ++i) {
362     unsigned Reg = Srcs[i].first->getReg();
363     unsigned Index = RI.getEncodingValue(Reg) & 0xff;
364     if (Reg == AMDGPU::OQAP) {
365       Result.push_back(std::pair<int, unsigned>(Index, 0));
366     }
367     if (PV.find(Reg) != PV.end()) {
368       // 255 is used to tells its a PS/PV reg
369       Result.push_back(std::pair<int, unsigned>(255, 0));
370       continue;
371     }
372     if (Index > 127) {
373       ConstCount++;
374       Result.push_back(DummyPair);
375       continue;
376     }
377     unsigned Chan = RI.getHWRegChan(Reg);
378     Result.push_back(std::pair<int, unsigned>(Index, Chan));
379   }
380   for (; i < 3; ++i)
381     Result.push_back(DummyPair);
382   return Result;
383 }
384 
385 static std::vector<std::pair<int, unsigned> >
Swizzle(std::vector<std::pair<int,unsigned>> Src,R600InstrInfo::BankSwizzle Swz)386 Swizzle(std::vector<std::pair<int, unsigned> > Src,
387         R600InstrInfo::BankSwizzle Swz) {
388   if (Src[0] == Src[1])
389     Src[1].first = -1;
390   switch (Swz) {
391   case R600InstrInfo::ALU_VEC_012_SCL_210:
392     break;
393   case R600InstrInfo::ALU_VEC_021_SCL_122:
394     std::swap(Src[1], Src[2]);
395     break;
396   case R600InstrInfo::ALU_VEC_102_SCL_221:
397     std::swap(Src[0], Src[1]);
398     break;
399   case R600InstrInfo::ALU_VEC_120_SCL_212:
400     std::swap(Src[0], Src[1]);
401     std::swap(Src[0], Src[2]);
402     break;
403   case R600InstrInfo::ALU_VEC_201:
404     std::swap(Src[0], Src[2]);
405     std::swap(Src[0], Src[1]);
406     break;
407   case R600InstrInfo::ALU_VEC_210:
408     std::swap(Src[0], Src[2]);
409     break;
410   }
411   return Src;
412 }
413 
414 static unsigned
getTransSwizzle(R600InstrInfo::BankSwizzle Swz,unsigned Op)415 getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
416   switch (Swz) {
417   case R600InstrInfo::ALU_VEC_012_SCL_210: {
418     unsigned Cycles[3] = { 2, 1, 0};
419     return Cycles[Op];
420   }
421   case R600InstrInfo::ALU_VEC_021_SCL_122: {
422     unsigned Cycles[3] = { 1, 2, 2};
423     return Cycles[Op];
424   }
425   case R600InstrInfo::ALU_VEC_120_SCL_212: {
426     unsigned Cycles[3] = { 2, 1, 2};
427     return Cycles[Op];
428   }
429   case R600InstrInfo::ALU_VEC_102_SCL_221: {
430     unsigned Cycles[3] = { 2, 2, 1};
431     return Cycles[Op];
432   }
433   default:
434     llvm_unreachable("Wrong Swizzle for Trans Slot");
435     return 0;
436   }
437 }
438 
439 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
440 /// in the same Instruction Group while meeting read port limitations given a
441 /// Swz swizzle sequence.
isLegalUpTo(const std::vector<std::vector<std::pair<int,unsigned>>> & IGSrcs,const std::vector<R600InstrInfo::BankSwizzle> & Swz,const std::vector<std::pair<int,unsigned>> & TransSrcs,R600InstrInfo::BankSwizzle TransSwz) const442 unsigned  R600InstrInfo::isLegalUpTo(
443     const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
444     const std::vector<R600InstrInfo::BankSwizzle> &Swz,
445     const std::vector<std::pair<int, unsigned> > &TransSrcs,
446     R600InstrInfo::BankSwizzle TransSwz) const {
447   int Vector[4][3];
448   memset(Vector, -1, sizeof(Vector));
449   for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
450     const std::vector<std::pair<int, unsigned> > &Srcs =
451         Swizzle(IGSrcs[i], Swz[i]);
452     for (unsigned j = 0; j < 3; j++) {
453       const std::pair<int, unsigned> &Src = Srcs[j];
454       if (Src.first < 0 || Src.first == 255)
455         continue;
456       if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
457         if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
458             Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
459             // The value from output queue A (denoted by register OQAP) can
460             // only be fetched during the first cycle.
461             return false;
462         }
463         // OQAP does not count towards the normal read port restrictions
464         continue;
465       }
466       if (Vector[Src.second][j] < 0)
467         Vector[Src.second][j] = Src.first;
468       if (Vector[Src.second][j] != Src.first)
469         return i;
470     }
471   }
472   // Now check Trans Alu
473   for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
474     const std::pair<int, unsigned> &Src = TransSrcs[i];
475     unsigned Cycle = getTransSwizzle(TransSwz, i);
476     if (Src.first < 0)
477       continue;
478     if (Src.first == 255)
479       continue;
480     if (Vector[Src.second][Cycle] < 0)
481       Vector[Src.second][Cycle] = Src.first;
482     if (Vector[Src.second][Cycle] != Src.first)
483       return IGSrcs.size() - 1;
484   }
485   return IGSrcs.size();
486 }
487 
488 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
489 /// (in lexicographic term) swizzle sequence assuming that all swizzles after
490 /// Idx can be skipped
491 static bool
NextPossibleSolution(std::vector<R600InstrInfo::BankSwizzle> & SwzCandidate,unsigned Idx)492 NextPossibleSolution(
493     std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
494     unsigned Idx) {
495   assert(Idx < SwzCandidate.size());
496   int ResetIdx = Idx;
497   while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
498     ResetIdx --;
499   for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
500     SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
501   }
502   if (ResetIdx == -1)
503     return false;
504   int NextSwizzle = SwzCandidate[ResetIdx] + 1;
505   SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
506   return true;
507 }
508 
509 /// Enumerate all possible Swizzle sequence to find one that can meet all
510 /// read port requirements.
FindSwizzleForVectorSlot(const std::vector<std::vector<std::pair<int,unsigned>>> & IGSrcs,std::vector<R600InstrInfo::BankSwizzle> & SwzCandidate,const std::vector<std::pair<int,unsigned>> & TransSrcs,R600InstrInfo::BankSwizzle TransSwz) const511 bool R600InstrInfo::FindSwizzleForVectorSlot(
512     const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
513     std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
514     const std::vector<std::pair<int, unsigned> > &TransSrcs,
515     R600InstrInfo::BankSwizzle TransSwz) const {
516   unsigned ValidUpTo = 0;
517   do {
518     ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
519     if (ValidUpTo == IGSrcs.size())
520       return true;
521   } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
522   return false;
523 }
524 
525 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read
526 /// a const, and can't read a gpr at cycle 1 if they read 2 const.
527 static bool
isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,const std::vector<std::pair<int,unsigned>> & TransOps,unsigned ConstCount)528 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
529                   const std::vector<std::pair<int, unsigned> > &TransOps,
530                   unsigned ConstCount) {
531   // TransALU can't read 3 constants
532   if (ConstCount > 2)
533     return false;
534   for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
535     const std::pair<int, unsigned> &Src = TransOps[i];
536     unsigned Cycle = getTransSwizzle(TransSwz, i);
537     if (Src.first < 0)
538       continue;
539     if (ConstCount > 0 && Cycle == 0)
540       return false;
541     if (ConstCount > 1 && Cycle == 1)
542       return false;
543   }
544   return true;
545 }
546 
547 bool
fitsReadPortLimitations(const std::vector<MachineInstr * > & IG,const DenseMap<unsigned,unsigned> & PV,std::vector<BankSwizzle> & ValidSwizzle,bool isLastAluTrans) const548 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
549                                        const DenseMap<unsigned, unsigned> &PV,
550                                        std::vector<BankSwizzle> &ValidSwizzle,
551                                        bool isLastAluTrans)
552     const {
553   //Todo : support shared src0 - src1 operand
554 
555   std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
556   ValidSwizzle.clear();
557   unsigned ConstCount;
558   BankSwizzle TransBS = ALU_VEC_012_SCL_210;
559   for (unsigned i = 0, e = IG.size(); i < e; ++i) {
560     IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount));
561     unsigned Op = getOperandIdx(IG[i]->getOpcode(),
562         AMDGPU::OpName::bank_swizzle);
563     ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
564         IG[i]->getOperand(Op).getImm());
565   }
566   std::vector<std::pair<int, unsigned> > TransOps;
567   if (!isLastAluTrans)
568     return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
569 
570   TransOps = std::move(IGSrcs.back());
571   IGSrcs.pop_back();
572   ValidSwizzle.pop_back();
573 
574   static const R600InstrInfo::BankSwizzle TransSwz[] = {
575     ALU_VEC_012_SCL_210,
576     ALU_VEC_021_SCL_122,
577     ALU_VEC_120_SCL_212,
578     ALU_VEC_102_SCL_221
579   };
580   for (unsigned i = 0; i < 4; i++) {
581     TransBS = TransSwz[i];
582     if (!isConstCompatible(TransBS, TransOps, ConstCount))
583       continue;
584     bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
585         TransBS);
586     if (Result) {
587       ValidSwizzle.push_back(TransBS);
588       return true;
589     }
590   }
591 
592   return false;
593 }
594 
595 
596 bool
fitsConstReadLimitations(const std::vector<unsigned> & Consts) const597 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
598     const {
599   assert (Consts.size() <= 12 && "Too many operands in instructions group");
600   unsigned Pair1 = 0, Pair2 = 0;
601   for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
602     unsigned ReadConstHalf = Consts[i] & 2;
603     unsigned ReadConstIndex = Consts[i] & (~3);
604     unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
605     if (!Pair1) {
606       Pair1 = ReadHalfConst;
607       continue;
608     }
609     if (Pair1 == ReadHalfConst)
610       continue;
611     if (!Pair2) {
612       Pair2 = ReadHalfConst;
613       continue;
614     }
615     if (Pair2 != ReadHalfConst)
616       return false;
617   }
618   return true;
619 }
620 
621 bool
fitsConstReadLimitations(const std::vector<MachineInstr * > & MIs) const622 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
623     const {
624   std::vector<unsigned> Consts;
625   SmallSet<int64_t, 4> Literals;
626   for (unsigned i = 0, n = MIs.size(); i < n; i++) {
627     MachineInstr *MI = MIs[i];
628     if (!isALUInstr(MI->getOpcode()))
629       continue;
630 
631     const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Srcs =
632         getSrcs(MI);
633 
634     for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
635       std::pair<MachineOperand *, unsigned> Src = Srcs[j];
636       if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
637         Literals.insert(Src.second);
638       if (Literals.size() > 4)
639         return false;
640       if (Src.first->getReg() == AMDGPU::ALU_CONST)
641         Consts.push_back(Src.second);
642       if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
643           AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
644         unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
645         unsigned Chan = RI.getHWRegChan(Src.first->getReg());
646         Consts.push_back((Index << 2) | Chan);
647       }
648     }
649   }
650   return fitsConstReadLimitations(Consts);
651 }
652 
653 DFAPacketizer *
CreateTargetScheduleState(const TargetSubtargetInfo & STI) const654 R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const {
655   const InstrItineraryData *II = STI.getInstrItineraryData();
656   return static_cast<const AMDGPUSubtarget &>(STI).createDFAPacketizer(II);
657 }
658 
659 static bool
isPredicateSetter(unsigned Opcode)660 isPredicateSetter(unsigned Opcode) {
661   switch (Opcode) {
662   case AMDGPU::PRED_X:
663     return true;
664   default:
665     return false;
666   }
667 }
668 
669 static MachineInstr *
findFirstPredicateSetterFrom(MachineBasicBlock & MBB,MachineBasicBlock::iterator I)670 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
671                              MachineBasicBlock::iterator I) {
672   while (I != MBB.begin()) {
673     --I;
674     MachineInstr *MI = I;
675     if (isPredicateSetter(MI->getOpcode()))
676       return MI;
677   }
678 
679   return nullptr;
680 }
681 
682 static
isJump(unsigned Opcode)683 bool isJump(unsigned Opcode) {
684   return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
685 }
686 
isBranch(unsigned Opcode)687 static bool isBranch(unsigned Opcode) {
688   return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 ||
689       Opcode == AMDGPU::BRANCH_COND_f32;
690 }
691 
692 bool
AnalyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const693 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
694                              MachineBasicBlock *&TBB,
695                              MachineBasicBlock *&FBB,
696                              SmallVectorImpl<MachineOperand> &Cond,
697                              bool AllowModify) const {
698   // Most of the following comes from the ARM implementation of AnalyzeBranch
699 
700   // If the block has no terminators, it just falls into the block after it.
701   MachineBasicBlock::iterator I = MBB.end();
702   if (I == MBB.begin())
703     return false;
704   --I;
705   while (I->isDebugValue()) {
706     if (I == MBB.begin())
707       return false;
708     --I;
709   }
710   // AMDGPU::BRANCH* instructions are only available after isel and are not
711   // handled
712   if (isBranch(I->getOpcode()))
713     return true;
714   if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
715     return false;
716   }
717 
718   // Remove successive JUMP
719   while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) {
720       MachineBasicBlock::iterator PriorI = std::prev(I);
721       if (AllowModify)
722         I->removeFromParent();
723       I = PriorI;
724   }
725   MachineInstr *LastInst = I;
726 
727   // If there is only one terminator instruction, process it.
728   unsigned LastOpc = LastInst->getOpcode();
729   if (I == MBB.begin() ||
730           !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
731     if (LastOpc == AMDGPU::JUMP) {
732       TBB = LastInst->getOperand(0).getMBB();
733       return false;
734     } else if (LastOpc == AMDGPU::JUMP_COND) {
735       MachineInstr *predSet = I;
736       while (!isPredicateSetter(predSet->getOpcode())) {
737         predSet = --I;
738       }
739       TBB = LastInst->getOperand(0).getMBB();
740       Cond.push_back(predSet->getOperand(1));
741       Cond.push_back(predSet->getOperand(2));
742       Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
743       return false;
744     }
745     return true;  // Can't handle indirect branch.
746   }
747 
748   // Get the instruction before it if it is a terminator.
749   MachineInstr *SecondLastInst = I;
750   unsigned SecondLastOpc = SecondLastInst->getOpcode();
751 
752   // If the block ends with a B and a Bcc, handle it.
753   if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
754     MachineInstr *predSet = --I;
755     while (!isPredicateSetter(predSet->getOpcode())) {
756       predSet = --I;
757     }
758     TBB = SecondLastInst->getOperand(0).getMBB();
759     FBB = LastInst->getOperand(0).getMBB();
760     Cond.push_back(predSet->getOperand(1));
761     Cond.push_back(predSet->getOperand(2));
762     Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
763     return false;
764   }
765 
766   // Otherwise, can't handle this.
767   return true;
768 }
769 
770 static
FindLastAluClause(MachineBasicBlock & MBB)771 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
772   for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
773       It != E; ++It) {
774     if (It->getOpcode() == AMDGPU::CF_ALU ||
775         It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
776       return std::prev(It.base());
777   }
778   return MBB.end();
779 }
780 
781 unsigned
InsertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,const SmallVectorImpl<MachineOperand> & Cond,DebugLoc DL) const782 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
783                             MachineBasicBlock *TBB,
784                             MachineBasicBlock *FBB,
785                             const SmallVectorImpl<MachineOperand> &Cond,
786                             DebugLoc DL) const {
787   assert(TBB && "InsertBranch must not be told to insert a fallthrough");
788 
789   if (!FBB) {
790     if (Cond.empty()) {
791       BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
792       return 1;
793     } else {
794       MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
795       assert(PredSet && "No previous predicate !");
796       addFlag(PredSet, 0, MO_FLAG_PUSH);
797       PredSet->getOperand(2).setImm(Cond[1].getImm());
798 
799       BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
800              .addMBB(TBB)
801              .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
802       MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
803       if (CfAlu == MBB.end())
804         return 1;
805       assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
806       CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
807       return 1;
808     }
809   } else {
810     MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
811     assert(PredSet && "No previous predicate !");
812     addFlag(PredSet, 0, MO_FLAG_PUSH);
813     PredSet->getOperand(2).setImm(Cond[1].getImm());
814     BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
815             .addMBB(TBB)
816             .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
817     BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
818     MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
819     if (CfAlu == MBB.end())
820       return 2;
821     assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
822     CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
823     return 2;
824   }
825 }
826 
827 unsigned
RemoveBranch(MachineBasicBlock & MBB) const828 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
829 
830   // Note : we leave PRED* instructions there.
831   // They may be needed when predicating instructions.
832 
833   MachineBasicBlock::iterator I = MBB.end();
834 
835   if (I == MBB.begin()) {
836     return 0;
837   }
838   --I;
839   switch (I->getOpcode()) {
840   default:
841     return 0;
842   case AMDGPU::JUMP_COND: {
843     MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
844     clearFlag(predSet, 0, MO_FLAG_PUSH);
845     I->eraseFromParent();
846     MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
847     if (CfAlu == MBB.end())
848       break;
849     assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
850     CfAlu->setDesc(get(AMDGPU::CF_ALU));
851     break;
852   }
853   case AMDGPU::JUMP:
854     I->eraseFromParent();
855     break;
856   }
857   I = MBB.end();
858 
859   if (I == MBB.begin()) {
860     return 1;
861   }
862   --I;
863   switch (I->getOpcode()) {
864     // FIXME: only one case??
865   default:
866     return 1;
867   case AMDGPU::JUMP_COND: {
868     MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
869     clearFlag(predSet, 0, MO_FLAG_PUSH);
870     I->eraseFromParent();
871     MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
872     if (CfAlu == MBB.end())
873       break;
874     assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
875     CfAlu->setDesc(get(AMDGPU::CF_ALU));
876     break;
877   }
878   case AMDGPU::JUMP:
879     I->eraseFromParent();
880     break;
881   }
882   return 2;
883 }
884 
885 bool
isPredicated(const MachineInstr * MI) const886 R600InstrInfo::isPredicated(const MachineInstr *MI) const {
887   int idx = MI->findFirstPredOperandIdx();
888   if (idx < 0)
889     return false;
890 
891   unsigned Reg = MI->getOperand(idx).getReg();
892   switch (Reg) {
893   default: return false;
894   case AMDGPU::PRED_SEL_ONE:
895   case AMDGPU::PRED_SEL_ZERO:
896   case AMDGPU::PREDICATE_BIT:
897     return true;
898   }
899 }
900 
901 bool
isPredicable(MachineInstr * MI) const902 R600InstrInfo::isPredicable(MachineInstr *MI) const {
903   // XXX: KILL* instructions can be predicated, but they must be the last
904   // instruction in a clause, so this means any instructions after them cannot
905   // be predicated.  Until we have proper support for instruction clauses in the
906   // backend, we will mark KILL* instructions as unpredicable.
907 
908   if (MI->getOpcode() == AMDGPU::KILLGT) {
909     return false;
910   } else if (MI->getOpcode() == AMDGPU::CF_ALU) {
911     // If the clause start in the middle of MBB then the MBB has more
912     // than a single clause, unable to predicate several clauses.
913     if (MI->getParent()->begin() != MachineBasicBlock::iterator(MI))
914       return false;
915     // TODO: We don't support KC merging atm
916     if (MI->getOperand(3).getImm() != 0 || MI->getOperand(4).getImm() != 0)
917       return false;
918     return true;
919   } else if (isVector(*MI)) {
920     return false;
921   } else {
922     return AMDGPUInstrInfo::isPredicable(MI);
923   }
924 }
925 
926 
927 bool
isProfitableToIfCvt(MachineBasicBlock & MBB,unsigned NumCyles,unsigned ExtraPredCycles,const BranchProbability & Probability) const928 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
929                                    unsigned NumCyles,
930                                    unsigned ExtraPredCycles,
931                                    const BranchProbability &Probability) const{
932   return true;
933 }
934 
935 bool
isProfitableToIfCvt(MachineBasicBlock & TMBB,unsigned NumTCycles,unsigned ExtraTCycles,MachineBasicBlock & FMBB,unsigned NumFCycles,unsigned ExtraFCycles,const BranchProbability & Probability) const936 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
937                                    unsigned NumTCycles,
938                                    unsigned ExtraTCycles,
939                                    MachineBasicBlock &FMBB,
940                                    unsigned NumFCycles,
941                                    unsigned ExtraFCycles,
942                                    const BranchProbability &Probability) const {
943   return true;
944 }
945 
946 bool
isProfitableToDupForIfCvt(MachineBasicBlock & MBB,unsigned NumCyles,const BranchProbability & Probability) const947 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
948                                          unsigned NumCyles,
949                                          const BranchProbability &Probability)
950                                          const {
951   return true;
952 }
953 
954 bool
isProfitableToUnpredicate(MachineBasicBlock & TMBB,MachineBasicBlock & FMBB) const955 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
956                                          MachineBasicBlock &FMBB) const {
957   return false;
958 }
959 
960 
961 bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const962 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
963   MachineOperand &MO = Cond[1];
964   switch (MO.getImm()) {
965   case OPCODE_IS_ZERO_INT:
966     MO.setImm(OPCODE_IS_NOT_ZERO_INT);
967     break;
968   case OPCODE_IS_NOT_ZERO_INT:
969     MO.setImm(OPCODE_IS_ZERO_INT);
970     break;
971   case OPCODE_IS_ZERO:
972     MO.setImm(OPCODE_IS_NOT_ZERO);
973     break;
974   case OPCODE_IS_NOT_ZERO:
975     MO.setImm(OPCODE_IS_ZERO);
976     break;
977   default:
978     return true;
979   }
980 
981   MachineOperand &MO2 = Cond[2];
982   switch (MO2.getReg()) {
983   case AMDGPU::PRED_SEL_ZERO:
984     MO2.setReg(AMDGPU::PRED_SEL_ONE);
985     break;
986   case AMDGPU::PRED_SEL_ONE:
987     MO2.setReg(AMDGPU::PRED_SEL_ZERO);
988     break;
989   default:
990     return true;
991   }
992   return false;
993 }
994 
995 bool
DefinesPredicate(MachineInstr * MI,std::vector<MachineOperand> & Pred) const996 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
997                                 std::vector<MachineOperand> &Pred) const {
998   return isPredicateSetter(MI->getOpcode());
999 }
1000 
1001 
1002 bool
SubsumesPredicate(const SmallVectorImpl<MachineOperand> & Pred1,const SmallVectorImpl<MachineOperand> & Pred2) const1003 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
1004                        const SmallVectorImpl<MachineOperand> &Pred2) const {
1005   return false;
1006 }
1007 
1008 
1009 bool
PredicateInstruction(MachineInstr * MI,const SmallVectorImpl<MachineOperand> & Pred) const1010 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
1011                       const SmallVectorImpl<MachineOperand> &Pred) const {
1012   int PIdx = MI->findFirstPredOperandIdx();
1013 
1014   if (MI->getOpcode() == AMDGPU::CF_ALU) {
1015     MI->getOperand(8).setImm(0);
1016     return true;
1017   }
1018 
1019   if (MI->getOpcode() == AMDGPU::DOT_4) {
1020     MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_X))
1021         .setReg(Pred[2].getReg());
1022     MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Y))
1023         .setReg(Pred[2].getReg());
1024     MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Z))
1025         .setReg(Pred[2].getReg());
1026     MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_W))
1027         .setReg(Pred[2].getReg());
1028     MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
1029     MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
1030     return true;
1031   }
1032 
1033   if (PIdx != -1) {
1034     MachineOperand &PMO = MI->getOperand(PIdx);
1035     PMO.setReg(Pred[2].getReg());
1036     MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
1037     MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
1038     return true;
1039   }
1040 
1041   return false;
1042 }
1043 
getPredicationCost(const MachineInstr *) const1044 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr *) const {
1045   return 2;
1046 }
1047 
getInstrLatency(const InstrItineraryData * ItinData,const MachineInstr * MI,unsigned * PredCost) const1048 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1049                                             const MachineInstr *MI,
1050                                             unsigned *PredCost) const {
1051   if (PredCost)
1052     *PredCost = 2;
1053   return 2;
1054 }
1055 
expandPostRAPseudo(MachineBasicBlock::iterator MI) const1056 bool R600InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
1057 
1058   switch(MI->getOpcode()) {
1059   default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
1060   case AMDGPU::R600_EXTRACT_ELT_V2:
1061   case AMDGPU::R600_EXTRACT_ELT_V4:
1062     buildIndirectRead(MI->getParent(), MI, MI->getOperand(0).getReg(),
1063                       RI.getHWRegIndex(MI->getOperand(1).getReg()), //  Address
1064                       MI->getOperand(2).getReg(),
1065                       RI.getHWRegChan(MI->getOperand(1).getReg()));
1066     break;
1067   case AMDGPU::R600_INSERT_ELT_V2:
1068   case AMDGPU::R600_INSERT_ELT_V4:
1069     buildIndirectWrite(MI->getParent(), MI, MI->getOperand(2).getReg(), // Value
1070                        RI.getHWRegIndex(MI->getOperand(1).getReg()),  // Address
1071                        MI->getOperand(3).getReg(),                    // Offset
1072                        RI.getHWRegChan(MI->getOperand(1).getReg()));  // Channel
1073     break;
1074   }
1075   MI->eraseFromParent();
1076   return true;
1077 }
1078 
reserveIndirectRegisters(BitVector & Reserved,const MachineFunction & MF) const1079 void  R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1080                                              const MachineFunction &MF) const {
1081   const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>(
1082       MF.getSubtarget().getFrameLowering());
1083 
1084   unsigned StackWidth = TFL->getStackWidth(MF);
1085   int End = getIndirectIndexEnd(MF);
1086 
1087   if (End == -1)
1088     return;
1089 
1090   for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1091     unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
1092     Reserved.set(SuperReg);
1093     for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1094       unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
1095       Reserved.set(Reg);
1096     }
1097   }
1098 }
1099 
calculateIndirectAddress(unsigned RegIndex,unsigned Channel) const1100 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
1101                                                  unsigned Channel) const {
1102   // XXX: Remove when we support a stack width > 2
1103   assert(Channel == 0);
1104   return RegIndex;
1105 }
1106 
getIndirectAddrRegClass() const1107 const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
1108   return &AMDGPU::R600_TReg32_XRegClass;
1109 }
1110 
buildIndirectWrite(MachineBasicBlock * MBB,MachineBasicBlock::iterator I,unsigned ValueReg,unsigned Address,unsigned OffsetReg) const1111 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1112                                        MachineBasicBlock::iterator I,
1113                                        unsigned ValueReg, unsigned Address,
1114                                        unsigned OffsetReg) const {
1115   return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0);
1116 }
1117 
buildIndirectWrite(MachineBasicBlock * MBB,MachineBasicBlock::iterator I,unsigned ValueReg,unsigned Address,unsigned OffsetReg,unsigned AddrChan) const1118 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1119                                        MachineBasicBlock::iterator I,
1120                                        unsigned ValueReg, unsigned Address,
1121                                        unsigned OffsetReg,
1122                                        unsigned AddrChan) const {
1123   unsigned AddrReg;
1124   switch (AddrChan) {
1125     default: llvm_unreachable("Invalid Channel");
1126     case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1127     case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1128     case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1129     case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1130   }
1131   MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1132                                                AMDGPU::AR_X, OffsetReg);
1133   setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1134 
1135   MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1136                                       AddrReg, ValueReg)
1137                                       .addReg(AMDGPU::AR_X,
1138                                            RegState::Implicit | RegState::Kill);
1139   setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
1140   return Mov;
1141 }
1142 
buildIndirectRead(MachineBasicBlock * MBB,MachineBasicBlock::iterator I,unsigned ValueReg,unsigned Address,unsigned OffsetReg) const1143 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1144                                        MachineBasicBlock::iterator I,
1145                                        unsigned ValueReg, unsigned Address,
1146                                        unsigned OffsetReg) const {
1147   return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0);
1148 }
1149 
buildIndirectRead(MachineBasicBlock * MBB,MachineBasicBlock::iterator I,unsigned ValueReg,unsigned Address,unsigned OffsetReg,unsigned AddrChan) const1150 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1151                                        MachineBasicBlock::iterator I,
1152                                        unsigned ValueReg, unsigned Address,
1153                                        unsigned OffsetReg,
1154                                        unsigned AddrChan) const {
1155   unsigned AddrReg;
1156   switch (AddrChan) {
1157     default: llvm_unreachable("Invalid Channel");
1158     case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1159     case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1160     case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1161     case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1162   }
1163   MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1164                                                        AMDGPU::AR_X,
1165                                                        OffsetReg);
1166   setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1167   MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1168                                       ValueReg,
1169                                       AddrReg)
1170                                       .addReg(AMDGPU::AR_X,
1171                                            RegState::Implicit | RegState::Kill);
1172   setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
1173 
1174   return Mov;
1175 }
1176 
getMaxAlusPerClause() const1177 unsigned R600InstrInfo::getMaxAlusPerClause() const {
1178   return 115;
1179 }
1180 
buildDefaultInstruction(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,unsigned Opcode,unsigned DstReg,unsigned Src0Reg,unsigned Src1Reg) const1181 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1182                                                   MachineBasicBlock::iterator I,
1183                                                   unsigned Opcode,
1184                                                   unsigned DstReg,
1185                                                   unsigned Src0Reg,
1186                                                   unsigned Src1Reg) const {
1187   MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1188     DstReg);           // $dst
1189 
1190   if (Src1Reg) {
1191     MIB.addImm(0)     // $update_exec_mask
1192        .addImm(0);    // $update_predicate
1193   }
1194   MIB.addImm(1)        // $write
1195      .addImm(0)        // $omod
1196      .addImm(0)        // $dst_rel
1197      .addImm(0)        // $dst_clamp
1198      .addReg(Src0Reg)  // $src0
1199      .addImm(0)        // $src0_neg
1200      .addImm(0)        // $src0_rel
1201      .addImm(0)        // $src0_abs
1202      .addImm(-1);       // $src0_sel
1203 
1204   if (Src1Reg) {
1205     MIB.addReg(Src1Reg) // $src1
1206        .addImm(0)       // $src1_neg
1207        .addImm(0)       // $src1_rel
1208        .addImm(0)       // $src1_abs
1209        .addImm(-1);      // $src1_sel
1210   }
1211 
1212   //XXX: The r600g finalizer expects this to be 1, once we've moved the
1213   //scheduling to the backend, we can change the default to 0.
1214   MIB.addImm(1)        // $last
1215       .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
1216       .addImm(0)         // $literal
1217       .addImm(0);        // $bank_swizzle
1218 
1219   return MIB;
1220 }
1221 
1222 #define OPERAND_CASE(Label) \
1223   case Label: { \
1224     static const unsigned Ops[] = \
1225     { \
1226       Label##_X, \
1227       Label##_Y, \
1228       Label##_Z, \
1229       Label##_W \
1230     }; \
1231     return Ops[Slot]; \
1232   }
1233 
getSlotedOps(unsigned Op,unsigned Slot)1234 static unsigned getSlotedOps(unsigned  Op, unsigned Slot) {
1235   switch (Op) {
1236   OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
1237   OPERAND_CASE(AMDGPU::OpName::update_pred)
1238   OPERAND_CASE(AMDGPU::OpName::write)
1239   OPERAND_CASE(AMDGPU::OpName::omod)
1240   OPERAND_CASE(AMDGPU::OpName::dst_rel)
1241   OPERAND_CASE(AMDGPU::OpName::clamp)
1242   OPERAND_CASE(AMDGPU::OpName::src0)
1243   OPERAND_CASE(AMDGPU::OpName::src0_neg)
1244   OPERAND_CASE(AMDGPU::OpName::src0_rel)
1245   OPERAND_CASE(AMDGPU::OpName::src0_abs)
1246   OPERAND_CASE(AMDGPU::OpName::src0_sel)
1247   OPERAND_CASE(AMDGPU::OpName::src1)
1248   OPERAND_CASE(AMDGPU::OpName::src1_neg)
1249   OPERAND_CASE(AMDGPU::OpName::src1_rel)
1250   OPERAND_CASE(AMDGPU::OpName::src1_abs)
1251   OPERAND_CASE(AMDGPU::OpName::src1_sel)
1252   OPERAND_CASE(AMDGPU::OpName::pred_sel)
1253   default:
1254     llvm_unreachable("Wrong Operand");
1255   }
1256 }
1257 
1258 #undef OPERAND_CASE
1259 
buildSlotOfVectorInstruction(MachineBasicBlock & MBB,MachineInstr * MI,unsigned Slot,unsigned DstReg) const1260 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1261     MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1262     const {
1263   assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
1264   unsigned Opcode;
1265   if (ST.getGeneration() <= AMDGPUSubtarget::R700)
1266     Opcode = AMDGPU::DOT4_r600;
1267   else
1268     Opcode = AMDGPU::DOT4_eg;
1269   MachineBasicBlock::iterator I = MI;
1270   MachineOperand &Src0 = MI->getOperand(
1271       getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
1272   MachineOperand &Src1 = MI->getOperand(
1273       getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
1274   MachineInstr *MIB = buildDefaultInstruction(
1275       MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1276   static const unsigned  Operands[14] = {
1277     AMDGPU::OpName::update_exec_mask,
1278     AMDGPU::OpName::update_pred,
1279     AMDGPU::OpName::write,
1280     AMDGPU::OpName::omod,
1281     AMDGPU::OpName::dst_rel,
1282     AMDGPU::OpName::clamp,
1283     AMDGPU::OpName::src0_neg,
1284     AMDGPU::OpName::src0_rel,
1285     AMDGPU::OpName::src0_abs,
1286     AMDGPU::OpName::src0_sel,
1287     AMDGPU::OpName::src1_neg,
1288     AMDGPU::OpName::src1_rel,
1289     AMDGPU::OpName::src1_abs,
1290     AMDGPU::OpName::src1_sel,
1291   };
1292 
1293   MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
1294       getSlotedOps(AMDGPU::OpName::pred_sel, Slot)));
1295   MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel))
1296       .setReg(MO.getReg());
1297 
1298   for (unsigned i = 0; i < 14; i++) {
1299     MachineOperand &MO = MI->getOperand(
1300         getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1301     assert (MO.isImm());
1302     setImmOperand(MIB, Operands[i], MO.getImm());
1303   }
1304   MIB->getOperand(20).setImm(0);
1305   return MIB;
1306 }
1307 
buildMovImm(MachineBasicBlock & BB,MachineBasicBlock::iterator I,unsigned DstReg,uint64_t Imm) const1308 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1309                                          MachineBasicBlock::iterator I,
1310                                          unsigned DstReg,
1311                                          uint64_t Imm) const {
1312   MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
1313                                                   AMDGPU::ALU_LITERAL_X);
1314   setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
1315   return MovImm;
1316 }
1317 
buildMovInstr(MachineBasicBlock * MBB,MachineBasicBlock::iterator I,unsigned DstReg,unsigned SrcReg) const1318 MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
1319                                        MachineBasicBlock::iterator I,
1320                                        unsigned DstReg, unsigned SrcReg) const {
1321   return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg);
1322 }
1323 
getOperandIdx(const MachineInstr & MI,unsigned Op) const1324 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1325   return getOperandIdx(MI.getOpcode(), Op);
1326 }
1327 
getOperandIdx(unsigned Opcode,unsigned Op) const1328 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1329   return AMDGPU::getNamedOperandIdx(Opcode, Op);
1330 }
1331 
setImmOperand(MachineInstr * MI,unsigned Op,int64_t Imm) const1332 void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
1333                                   int64_t Imm) const {
1334   int Idx = getOperandIdx(*MI, Op);
1335   assert(Idx != -1 && "Operand not supported for this instruction.");
1336   assert(MI->getOperand(Idx).isImm());
1337   MI->getOperand(Idx).setImm(Imm);
1338 }
1339 
1340 //===----------------------------------------------------------------------===//
1341 // Instruction flag getters/setters
1342 //===----------------------------------------------------------------------===//
1343 
hasFlagOperand(const MachineInstr & MI) const1344 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
1345   return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
1346 }
1347 
getFlagOp(MachineInstr * MI,unsigned SrcIdx,unsigned Flag) const1348 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
1349                                          unsigned Flag) const {
1350   unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1351   int FlagIndex = 0;
1352   if (Flag != 0) {
1353     // If we pass something other than the default value of Flag to this
1354     // function, it means we are want to set a flag on an instruction
1355     // that uses native encoding.
1356     assert(HAS_NATIVE_OPERANDS(TargetFlags));
1357     bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1358     switch (Flag) {
1359     case MO_FLAG_CLAMP:
1360       FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
1361       break;
1362     case MO_FLAG_MASK:
1363       FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
1364       break;
1365     case MO_FLAG_NOT_LAST:
1366     case MO_FLAG_LAST:
1367       FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
1368       break;
1369     case MO_FLAG_NEG:
1370       switch (SrcIdx) {
1371       case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
1372       case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
1373       case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
1374       }
1375       break;
1376 
1377     case MO_FLAG_ABS:
1378       assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1379                        "instructions.");
1380       (void)IsOP3;
1381       switch (SrcIdx) {
1382       case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
1383       case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
1384       }
1385       break;
1386 
1387     default:
1388       FlagIndex = -1;
1389       break;
1390     }
1391     assert(FlagIndex != -1 && "Flag not supported for this instruction");
1392   } else {
1393       FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1394       assert(FlagIndex != 0 &&
1395          "Instruction flags not supported for this instruction");
1396   }
1397 
1398   MachineOperand &FlagOp = MI->getOperand(FlagIndex);
1399   assert(FlagOp.isImm());
1400   return FlagOp;
1401 }
1402 
addFlag(MachineInstr * MI,unsigned Operand,unsigned Flag) const1403 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
1404                             unsigned Flag) const {
1405   unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1406   if (Flag == 0) {
1407     return;
1408   }
1409   if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1410     MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1411     if (Flag == MO_FLAG_NOT_LAST) {
1412       clearFlag(MI, Operand, MO_FLAG_LAST);
1413     } else if (Flag == MO_FLAG_MASK) {
1414       clearFlag(MI, Operand, Flag);
1415     } else {
1416       FlagOp.setImm(1);
1417     }
1418   } else {
1419       MachineOperand &FlagOp = getFlagOp(MI, Operand);
1420       FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1421   }
1422 }
1423 
clearFlag(MachineInstr * MI,unsigned Operand,unsigned Flag) const1424 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
1425                               unsigned Flag) const {
1426   unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1427   if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1428     MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1429     FlagOp.setImm(0);
1430   } else {
1431     MachineOperand &FlagOp = getFlagOp(MI);
1432     unsigned InstFlags = FlagOp.getImm();
1433     InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1434     FlagOp.setImm(InstFlags);
1435   }
1436 }
1437