1 //===-- PPCInstrInfo.cpp - PowerPC Instruction Information ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the PowerPC implementation of the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "PPCInstrInfo.h"
15 #include "MCTargetDesc/PPCPredicates.h"
16 #include "PPC.h"
17 #include "PPCHazardRecognizers.h"
18 #include "PPCInstrBuilder.h"
19 #include "PPCMachineFunctionInfo.h"
20 #include "PPCTargetMachine.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/CodeGen/ScheduleDAG.h"
31 #include "llvm/CodeGen/SlotIndexes.h"
32 #include "llvm/CodeGen/StackMaps.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/TargetRegistry.h"
38 #include "llvm/Support/raw_ostream.h"
39
40 using namespace llvm;
41
42 #define DEBUG_TYPE "ppc-instr-info"
43
44 #define GET_INSTRMAP_INFO
45 #define GET_INSTRINFO_CTOR_DTOR
46 #include "PPCGenInstrInfo.inc"
47
48 static cl::
49 opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden,
50 cl::desc("Disable analysis for CTR loops"));
51
52 static cl::opt<bool> DisableCmpOpt("disable-ppc-cmp-opt",
53 cl::desc("Disable compare instruction optimization"), cl::Hidden);
54
55 static cl::opt<bool> VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy",
56 cl::desc("Causes the backend to crash instead of generating a nop VSX copy"),
57 cl::Hidden);
58
59 // Pin the vtable to this file.
anchor()60 void PPCInstrInfo::anchor() {}
61
PPCInstrInfo(PPCSubtarget & STI)62 PPCInstrInfo::PPCInstrInfo(PPCSubtarget &STI)
63 : PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP),
64 Subtarget(STI), RI(STI.getTargetMachine()) {}
65
66 /// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
67 /// this target when scheduling the DAG.
68 ScheduleHazardRecognizer *
CreateTargetHazardRecognizer(const TargetSubtargetInfo * STI,const ScheduleDAG * DAG) const69 PPCInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
70 const ScheduleDAG *DAG) const {
71 unsigned Directive =
72 static_cast<const PPCSubtarget *>(STI)->getDarwinDirective();
73 if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 ||
74 Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) {
75 const InstrItineraryData *II =
76 static_cast<const PPCSubtarget *>(STI)->getInstrItineraryData();
77 return new ScoreboardHazardRecognizer(II, DAG);
78 }
79
80 return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG);
81 }
82
83 /// CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer
84 /// to use for this target when scheduling the DAG.
85 ScheduleHazardRecognizer *
CreateTargetPostRAHazardRecognizer(const InstrItineraryData * II,const ScheduleDAG * DAG) const86 PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
87 const ScheduleDAG *DAG) const {
88 unsigned Directive =
89 DAG->MF.getSubtarget<PPCSubtarget>().getDarwinDirective();
90
91 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8)
92 return new PPCDispatchGroupSBHazardRecognizer(II, DAG);
93
94 // Most subtargets use a PPC970 recognizer.
95 if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 &&
96 Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) {
97 assert(DAG->TII && "No InstrInfo?");
98
99 return new PPCHazardRecognizer970(*DAG);
100 }
101
102 return new ScoreboardHazardRecognizer(II, DAG);
103 }
104
105
getOperandLatency(const InstrItineraryData * ItinData,const MachineInstr * DefMI,unsigned DefIdx,const MachineInstr * UseMI,unsigned UseIdx) const106 int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
107 const MachineInstr *DefMI, unsigned DefIdx,
108 const MachineInstr *UseMI,
109 unsigned UseIdx) const {
110 int Latency = PPCGenInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
111 UseMI, UseIdx);
112
113 const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
114 unsigned Reg = DefMO.getReg();
115
116 bool IsRegCR;
117 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
118 const MachineRegisterInfo *MRI =
119 &DefMI->getParent()->getParent()->getRegInfo();
120 IsRegCR = MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
121 MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
122 } else {
123 IsRegCR = PPC::CRRCRegClass.contains(Reg) ||
124 PPC::CRBITRCRegClass.contains(Reg);
125 }
126
127 if (UseMI->isBranch() && IsRegCR) {
128 if (Latency < 0)
129 Latency = getInstrLatency(ItinData, DefMI);
130
131 // On some cores, there is an additional delay between writing to a condition
132 // register, and using it from a branch.
133 unsigned Directive = Subtarget.getDarwinDirective();
134 switch (Directive) {
135 default: break;
136 case PPC::DIR_7400:
137 case PPC::DIR_750:
138 case PPC::DIR_970:
139 case PPC::DIR_E5500:
140 case PPC::DIR_PWR4:
141 case PPC::DIR_PWR5:
142 case PPC::DIR_PWR5X:
143 case PPC::DIR_PWR6:
144 case PPC::DIR_PWR6X:
145 case PPC::DIR_PWR7:
146 case PPC::DIR_PWR8:
147 Latency += 2;
148 break;
149 }
150 }
151
152 return Latency;
153 }
154
155 // Detect 32 -> 64-bit extensions where we may reuse the low sub-register.
isCoalescableExtInstr(const MachineInstr & MI,unsigned & SrcReg,unsigned & DstReg,unsigned & SubIdx) const156 bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
157 unsigned &SrcReg, unsigned &DstReg,
158 unsigned &SubIdx) const {
159 switch (MI.getOpcode()) {
160 default: return false;
161 case PPC::EXTSW:
162 case PPC::EXTSW_32_64:
163 SrcReg = MI.getOperand(1).getReg();
164 DstReg = MI.getOperand(0).getReg();
165 SubIdx = PPC::sub_32;
166 return true;
167 }
168 }
169
isLoadFromStackSlot(const MachineInstr * MI,int & FrameIndex) const170 unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
171 int &FrameIndex) const {
172 // Note: This list must be kept consistent with LoadRegFromStackSlot.
173 switch (MI->getOpcode()) {
174 default: break;
175 case PPC::LD:
176 case PPC::LWZ:
177 case PPC::LFS:
178 case PPC::LFD:
179 case PPC::RESTORE_CR:
180 case PPC::RESTORE_CRBIT:
181 case PPC::LVX:
182 case PPC::LXVD2X:
183 case PPC::QVLFDX:
184 case PPC::QVLFSXs:
185 case PPC::QVLFDXb:
186 case PPC::RESTORE_VRSAVE:
187 // Check for the operands added by addFrameReference (the immediate is the
188 // offset which defaults to 0).
189 if (MI->getOperand(1).isImm() && !MI->getOperand(1).getImm() &&
190 MI->getOperand(2).isFI()) {
191 FrameIndex = MI->getOperand(2).getIndex();
192 return MI->getOperand(0).getReg();
193 }
194 break;
195 }
196 return 0;
197 }
198
isStoreToStackSlot(const MachineInstr * MI,int & FrameIndex) const199 unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
200 int &FrameIndex) const {
201 // Note: This list must be kept consistent with StoreRegToStackSlot.
202 switch (MI->getOpcode()) {
203 default: break;
204 case PPC::STD:
205 case PPC::STW:
206 case PPC::STFS:
207 case PPC::STFD:
208 case PPC::SPILL_CR:
209 case PPC::SPILL_CRBIT:
210 case PPC::STVX:
211 case PPC::STXVD2X:
212 case PPC::QVSTFDX:
213 case PPC::QVSTFSXs:
214 case PPC::QVSTFDXb:
215 case PPC::SPILL_VRSAVE:
216 // Check for the operands added by addFrameReference (the immediate is the
217 // offset which defaults to 0).
218 if (MI->getOperand(1).isImm() && !MI->getOperand(1).getImm() &&
219 MI->getOperand(2).isFI()) {
220 FrameIndex = MI->getOperand(2).getIndex();
221 return MI->getOperand(0).getReg();
222 }
223 break;
224 }
225 return 0;
226 }
227
228 // commuteInstruction - We can commute rlwimi instructions, but only if the
229 // rotate amt is zero. We also have to munge the immediates a bit.
230 MachineInstr *
commuteInstruction(MachineInstr * MI,bool NewMI) const231 PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
232 MachineFunction &MF = *MI->getParent()->getParent();
233
234 // Normal instructions can be commuted the obvious way.
235 if (MI->getOpcode() != PPC::RLWIMI &&
236 MI->getOpcode() != PPC::RLWIMIo)
237 return TargetInstrInfo::commuteInstruction(MI, NewMI);
238 // Note that RLWIMI can be commuted as a 32-bit instruction, but not as a
239 // 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because
240 // changing the relative order of the mask operands might change what happens
241 // to the high-bits of the mask (and, thus, the result).
242
243 // Cannot commute if it has a non-zero rotate count.
244 if (MI->getOperand(3).getImm() != 0)
245 return nullptr;
246
247 // If we have a zero rotate count, we have:
248 // M = mask(MB,ME)
249 // Op0 = (Op1 & ~M) | (Op2 & M)
250 // Change this to:
251 // M = mask((ME+1)&31, (MB-1)&31)
252 // Op0 = (Op2 & ~M) | (Op1 & M)
253
254 // Swap op1/op2
255 unsigned Reg0 = MI->getOperand(0).getReg();
256 unsigned Reg1 = MI->getOperand(1).getReg();
257 unsigned Reg2 = MI->getOperand(2).getReg();
258 unsigned SubReg1 = MI->getOperand(1).getSubReg();
259 unsigned SubReg2 = MI->getOperand(2).getSubReg();
260 bool Reg1IsKill = MI->getOperand(1).isKill();
261 bool Reg2IsKill = MI->getOperand(2).isKill();
262 bool ChangeReg0 = false;
263 // If machine instrs are no longer in two-address forms, update
264 // destination register as well.
265 if (Reg0 == Reg1) {
266 // Must be two address instruction!
267 assert(MI->getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
268 "Expecting a two-address instruction!");
269 assert(MI->getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch");
270 Reg2IsKill = false;
271 ChangeReg0 = true;
272 }
273
274 // Masks.
275 unsigned MB = MI->getOperand(4).getImm();
276 unsigned ME = MI->getOperand(5).getImm();
277
278 if (NewMI) {
279 // Create a new instruction.
280 unsigned Reg0 = ChangeReg0 ? Reg2 : MI->getOperand(0).getReg();
281 bool Reg0IsDead = MI->getOperand(0).isDead();
282 return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
283 .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
284 .addReg(Reg2, getKillRegState(Reg2IsKill))
285 .addReg(Reg1, getKillRegState(Reg1IsKill))
286 .addImm((ME+1) & 31)
287 .addImm((MB-1) & 31);
288 }
289
290 if (ChangeReg0) {
291 MI->getOperand(0).setReg(Reg2);
292 MI->getOperand(0).setSubReg(SubReg2);
293 }
294 MI->getOperand(2).setReg(Reg1);
295 MI->getOperand(1).setReg(Reg2);
296 MI->getOperand(2).setSubReg(SubReg1);
297 MI->getOperand(1).setSubReg(SubReg2);
298 MI->getOperand(2).setIsKill(Reg1IsKill);
299 MI->getOperand(1).setIsKill(Reg2IsKill);
300
301 // Swap the mask around.
302 MI->getOperand(4).setImm((ME+1) & 31);
303 MI->getOperand(5).setImm((MB-1) & 31);
304 return MI;
305 }
306
findCommutedOpIndices(MachineInstr * MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2) const307 bool PPCInstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
308 unsigned &SrcOpIdx2) const {
309 // For VSX A-Type FMA instructions, it is the first two operands that can be
310 // commuted, however, because the non-encoded tied input operand is listed
311 // first, the operands to swap are actually the second and third.
312
313 int AltOpc = PPC::getAltVSXFMAOpcode(MI->getOpcode());
314 if (AltOpc == -1)
315 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
316
317 SrcOpIdx1 = 2;
318 SrcOpIdx2 = 3;
319 return true;
320 }
321
insertNoop(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI) const322 void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
323 MachineBasicBlock::iterator MI) const {
324 // This function is used for scheduling, and the nop wanted here is the type
325 // that terminates dispatch groups on the POWER cores.
326 unsigned Directive = Subtarget.getDarwinDirective();
327 unsigned Opcode;
328 switch (Directive) {
329 default: Opcode = PPC::NOP; break;
330 case PPC::DIR_PWR6: Opcode = PPC::NOP_GT_PWR6; break;
331 case PPC::DIR_PWR7: Opcode = PPC::NOP_GT_PWR7; break;
332 case PPC::DIR_PWR8: Opcode = PPC::NOP_GT_PWR7; break; /* FIXME: Update when P8 InstrScheduling model is ready */
333 }
334
335 DebugLoc DL;
336 BuildMI(MBB, MI, DL, get(Opcode));
337 }
338
339 /// getNoopForMachoTarget - Return the noop instruction to use for a noop.
getNoopForMachoTarget(MCInst & NopInst) const340 void PPCInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
341 NopInst.setOpcode(PPC::NOP);
342 }
343
344 // Branch analysis.
345 // Note: If the condition register is set to CTR or CTR8 then this is a
346 // BDNZ (imm == 1) or BDZ (imm == 0) branch.
AnalyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const347 bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
348 MachineBasicBlock *&FBB,
349 SmallVectorImpl<MachineOperand> &Cond,
350 bool AllowModify) const {
351 bool isPPC64 = Subtarget.isPPC64();
352
353 // If the block has no terminators, it just falls into the block after it.
354 MachineBasicBlock::iterator I = MBB.end();
355 if (I == MBB.begin())
356 return false;
357 --I;
358 while (I->isDebugValue()) {
359 if (I == MBB.begin())
360 return false;
361 --I;
362 }
363 if (!isUnpredicatedTerminator(I))
364 return false;
365
366 // Get the last instruction in the block.
367 MachineInstr *LastInst = I;
368
369 // If there is only one terminator instruction, process it.
370 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
371 if (LastInst->getOpcode() == PPC::B) {
372 if (!LastInst->getOperand(0).isMBB())
373 return true;
374 TBB = LastInst->getOperand(0).getMBB();
375 return false;
376 } else if (LastInst->getOpcode() == PPC::BCC) {
377 if (!LastInst->getOperand(2).isMBB())
378 return true;
379 // Block ends with fall-through condbranch.
380 TBB = LastInst->getOperand(2).getMBB();
381 Cond.push_back(LastInst->getOperand(0));
382 Cond.push_back(LastInst->getOperand(1));
383 return false;
384 } else if (LastInst->getOpcode() == PPC::BC) {
385 if (!LastInst->getOperand(1).isMBB())
386 return true;
387 // Block ends with fall-through condbranch.
388 TBB = LastInst->getOperand(1).getMBB();
389 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
390 Cond.push_back(LastInst->getOperand(0));
391 return false;
392 } else if (LastInst->getOpcode() == PPC::BCn) {
393 if (!LastInst->getOperand(1).isMBB())
394 return true;
395 // Block ends with fall-through condbranch.
396 TBB = LastInst->getOperand(1).getMBB();
397 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET));
398 Cond.push_back(LastInst->getOperand(0));
399 return false;
400 } else if (LastInst->getOpcode() == PPC::BDNZ8 ||
401 LastInst->getOpcode() == PPC::BDNZ) {
402 if (!LastInst->getOperand(0).isMBB())
403 return true;
404 if (DisableCTRLoopAnal)
405 return true;
406 TBB = LastInst->getOperand(0).getMBB();
407 Cond.push_back(MachineOperand::CreateImm(1));
408 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
409 true));
410 return false;
411 } else if (LastInst->getOpcode() == PPC::BDZ8 ||
412 LastInst->getOpcode() == PPC::BDZ) {
413 if (!LastInst->getOperand(0).isMBB())
414 return true;
415 if (DisableCTRLoopAnal)
416 return true;
417 TBB = LastInst->getOperand(0).getMBB();
418 Cond.push_back(MachineOperand::CreateImm(0));
419 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
420 true));
421 return false;
422 }
423
424 // Otherwise, don't know what this is.
425 return true;
426 }
427
428 // Get the instruction before it if it's a terminator.
429 MachineInstr *SecondLastInst = I;
430
431 // If there are three terminators, we don't know what sort of block this is.
432 if (SecondLastInst && I != MBB.begin() &&
433 isUnpredicatedTerminator(--I))
434 return true;
435
436 // If the block ends with PPC::B and PPC:BCC, handle it.
437 if (SecondLastInst->getOpcode() == PPC::BCC &&
438 LastInst->getOpcode() == PPC::B) {
439 if (!SecondLastInst->getOperand(2).isMBB() ||
440 !LastInst->getOperand(0).isMBB())
441 return true;
442 TBB = SecondLastInst->getOperand(2).getMBB();
443 Cond.push_back(SecondLastInst->getOperand(0));
444 Cond.push_back(SecondLastInst->getOperand(1));
445 FBB = LastInst->getOperand(0).getMBB();
446 return false;
447 } else if (SecondLastInst->getOpcode() == PPC::BC &&
448 LastInst->getOpcode() == PPC::B) {
449 if (!SecondLastInst->getOperand(1).isMBB() ||
450 !LastInst->getOperand(0).isMBB())
451 return true;
452 TBB = SecondLastInst->getOperand(1).getMBB();
453 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
454 Cond.push_back(SecondLastInst->getOperand(0));
455 FBB = LastInst->getOperand(0).getMBB();
456 return false;
457 } else if (SecondLastInst->getOpcode() == PPC::BCn &&
458 LastInst->getOpcode() == PPC::B) {
459 if (!SecondLastInst->getOperand(1).isMBB() ||
460 !LastInst->getOperand(0).isMBB())
461 return true;
462 TBB = SecondLastInst->getOperand(1).getMBB();
463 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET));
464 Cond.push_back(SecondLastInst->getOperand(0));
465 FBB = LastInst->getOperand(0).getMBB();
466 return false;
467 } else if ((SecondLastInst->getOpcode() == PPC::BDNZ8 ||
468 SecondLastInst->getOpcode() == PPC::BDNZ) &&
469 LastInst->getOpcode() == PPC::B) {
470 if (!SecondLastInst->getOperand(0).isMBB() ||
471 !LastInst->getOperand(0).isMBB())
472 return true;
473 if (DisableCTRLoopAnal)
474 return true;
475 TBB = SecondLastInst->getOperand(0).getMBB();
476 Cond.push_back(MachineOperand::CreateImm(1));
477 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
478 true));
479 FBB = LastInst->getOperand(0).getMBB();
480 return false;
481 } else if ((SecondLastInst->getOpcode() == PPC::BDZ8 ||
482 SecondLastInst->getOpcode() == PPC::BDZ) &&
483 LastInst->getOpcode() == PPC::B) {
484 if (!SecondLastInst->getOperand(0).isMBB() ||
485 !LastInst->getOperand(0).isMBB())
486 return true;
487 if (DisableCTRLoopAnal)
488 return true;
489 TBB = SecondLastInst->getOperand(0).getMBB();
490 Cond.push_back(MachineOperand::CreateImm(0));
491 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
492 true));
493 FBB = LastInst->getOperand(0).getMBB();
494 return false;
495 }
496
497 // If the block ends with two PPC:Bs, handle it. The second one is not
498 // executed, so remove it.
499 if (SecondLastInst->getOpcode() == PPC::B &&
500 LastInst->getOpcode() == PPC::B) {
501 if (!SecondLastInst->getOperand(0).isMBB())
502 return true;
503 TBB = SecondLastInst->getOperand(0).getMBB();
504 I = LastInst;
505 if (AllowModify)
506 I->eraseFromParent();
507 return false;
508 }
509
510 // Otherwise, can't handle this.
511 return true;
512 }
513
RemoveBranch(MachineBasicBlock & MBB) const514 unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
515 MachineBasicBlock::iterator I = MBB.end();
516 if (I == MBB.begin()) return 0;
517 --I;
518 while (I->isDebugValue()) {
519 if (I == MBB.begin())
520 return 0;
521 --I;
522 }
523 if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC &&
524 I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn &&
525 I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
526 I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
527 return 0;
528
529 // Remove the branch.
530 I->eraseFromParent();
531
532 I = MBB.end();
533
534 if (I == MBB.begin()) return 1;
535 --I;
536 if (I->getOpcode() != PPC::BCC &&
537 I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn &&
538 I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
539 I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
540 return 1;
541
542 // Remove the branch.
543 I->eraseFromParent();
544 return 2;
545 }
546
547 unsigned
InsertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,const SmallVectorImpl<MachineOperand> & Cond,DebugLoc DL) const548 PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
549 MachineBasicBlock *FBB,
550 const SmallVectorImpl<MachineOperand> &Cond,
551 DebugLoc DL) const {
552 // Shouldn't be a fall through.
553 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
554 assert((Cond.size() == 2 || Cond.size() == 0) &&
555 "PPC branch conditions have two components!");
556
557 bool isPPC64 = Subtarget.isPPC64();
558
559 // One-way branch.
560 if (!FBB) {
561 if (Cond.empty()) // Unconditional branch
562 BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB);
563 else if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
564 BuildMI(&MBB, DL, get(Cond[0].getImm() ?
565 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
566 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB);
567 else if (Cond[0].getImm() == PPC::PRED_BIT_SET)
568 BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB);
569 else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET)
570 BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB);
571 else // Conditional branch
572 BuildMI(&MBB, DL, get(PPC::BCC))
573 .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB);
574 return 1;
575 }
576
577 // Two-way Conditional Branch.
578 if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
579 BuildMI(&MBB, DL, get(Cond[0].getImm() ?
580 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
581 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB);
582 else if (Cond[0].getImm() == PPC::PRED_BIT_SET)
583 BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB);
584 else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET)
585 BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB);
586 else
587 BuildMI(&MBB, DL, get(PPC::BCC))
588 .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB);
589 BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB);
590 return 2;
591 }
592
593 // Select analysis.
canInsertSelect(const MachineBasicBlock & MBB,const SmallVectorImpl<MachineOperand> & Cond,unsigned TrueReg,unsigned FalseReg,int & CondCycles,int & TrueCycles,int & FalseCycles) const594 bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
595 const SmallVectorImpl<MachineOperand> &Cond,
596 unsigned TrueReg, unsigned FalseReg,
597 int &CondCycles, int &TrueCycles, int &FalseCycles) const {
598 if (!Subtarget.hasISEL())
599 return false;
600
601 if (Cond.size() != 2)
602 return false;
603
604 // If this is really a bdnz-like condition, then it cannot be turned into a
605 // select.
606 if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
607 return false;
608
609 // Check register classes.
610 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
611 const TargetRegisterClass *RC =
612 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
613 if (!RC)
614 return false;
615
616 // isel is for regular integer GPRs only.
617 if (!PPC::GPRCRegClass.hasSubClassEq(RC) &&
618 !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) &&
619 !PPC::G8RCRegClass.hasSubClassEq(RC) &&
620 !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC))
621 return false;
622
623 // FIXME: These numbers are for the A2, how well they work for other cores is
624 // an open question. On the A2, the isel instruction has a 2-cycle latency
625 // but single-cycle throughput. These numbers are used in combination with
626 // the MispredictPenalty setting from the active SchedMachineModel.
627 CondCycles = 1;
628 TrueCycles = 1;
629 FalseCycles = 1;
630
631 return true;
632 }
633
insertSelect(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,DebugLoc dl,unsigned DestReg,const SmallVectorImpl<MachineOperand> & Cond,unsigned TrueReg,unsigned FalseReg) const634 void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
635 MachineBasicBlock::iterator MI, DebugLoc dl,
636 unsigned DestReg,
637 const SmallVectorImpl<MachineOperand> &Cond,
638 unsigned TrueReg, unsigned FalseReg) const {
639 assert(Cond.size() == 2 &&
640 "PPC branch conditions have two components!");
641
642 assert(Subtarget.hasISEL() &&
643 "Cannot insert select on target without ISEL support");
644
645 // Get the register classes.
646 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
647 const TargetRegisterClass *RC =
648 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
649 assert(RC && "TrueReg and FalseReg must have overlapping register classes");
650
651 bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) ||
652 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC);
653 assert((Is64Bit ||
654 PPC::GPRCRegClass.hasSubClassEq(RC) ||
655 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) &&
656 "isel is for regular integer GPRs only");
657
658 unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
659 unsigned SelectPred = Cond[0].getImm();
660
661 unsigned SubIdx;
662 bool SwapOps;
663 switch (SelectPred) {
664 default: llvm_unreachable("invalid predicate for isel");
665 case PPC::PRED_EQ: SubIdx = PPC::sub_eq; SwapOps = false; break;
666 case PPC::PRED_NE: SubIdx = PPC::sub_eq; SwapOps = true; break;
667 case PPC::PRED_LT: SubIdx = PPC::sub_lt; SwapOps = false; break;
668 case PPC::PRED_GE: SubIdx = PPC::sub_lt; SwapOps = true; break;
669 case PPC::PRED_GT: SubIdx = PPC::sub_gt; SwapOps = false; break;
670 case PPC::PRED_LE: SubIdx = PPC::sub_gt; SwapOps = true; break;
671 case PPC::PRED_UN: SubIdx = PPC::sub_un; SwapOps = false; break;
672 case PPC::PRED_NU: SubIdx = PPC::sub_un; SwapOps = true; break;
673 case PPC::PRED_BIT_SET: SubIdx = 0; SwapOps = false; break;
674 case PPC::PRED_BIT_UNSET: SubIdx = 0; SwapOps = true; break;
675 }
676
677 unsigned FirstReg = SwapOps ? FalseReg : TrueReg,
678 SecondReg = SwapOps ? TrueReg : FalseReg;
679
680 // The first input register of isel cannot be r0. If it is a member
681 // of a register class that can be r0, then copy it first (the
682 // register allocator should eliminate the copy).
683 if (MRI.getRegClass(FirstReg)->contains(PPC::R0) ||
684 MRI.getRegClass(FirstReg)->contains(PPC::X0)) {
685 const TargetRegisterClass *FirstRC =
686 MRI.getRegClass(FirstReg)->contains(PPC::X0) ?
687 &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass;
688 unsigned OldFirstReg = FirstReg;
689 FirstReg = MRI.createVirtualRegister(FirstRC);
690 BuildMI(MBB, MI, dl, get(TargetOpcode::COPY), FirstReg)
691 .addReg(OldFirstReg);
692 }
693
694 BuildMI(MBB, MI, dl, get(OpCode), DestReg)
695 .addReg(FirstReg).addReg(SecondReg)
696 .addReg(Cond[1].getReg(), 0, SubIdx);
697 }
698
getCRBitValue(unsigned CRBit)699 static unsigned getCRBitValue(unsigned CRBit) {
700 unsigned Ret = 4;
701 if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT ||
702 CRBit == PPC::CR2LT || CRBit == PPC::CR3LT ||
703 CRBit == PPC::CR4LT || CRBit == PPC::CR5LT ||
704 CRBit == PPC::CR6LT || CRBit == PPC::CR7LT)
705 Ret = 3;
706 if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT ||
707 CRBit == PPC::CR2GT || CRBit == PPC::CR3GT ||
708 CRBit == PPC::CR4GT || CRBit == PPC::CR5GT ||
709 CRBit == PPC::CR6GT || CRBit == PPC::CR7GT)
710 Ret = 2;
711 if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ ||
712 CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ ||
713 CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ ||
714 CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ)
715 Ret = 1;
716 if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN ||
717 CRBit == PPC::CR2UN || CRBit == PPC::CR3UN ||
718 CRBit == PPC::CR4UN || CRBit == PPC::CR5UN ||
719 CRBit == PPC::CR6UN || CRBit == PPC::CR7UN)
720 Ret = 0;
721
722 assert(Ret != 4 && "Invalid CR bit register");
723 return Ret;
724 }
725
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,DebugLoc DL,unsigned DestReg,unsigned SrcReg,bool KillSrc) const726 void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
727 MachineBasicBlock::iterator I, DebugLoc DL,
728 unsigned DestReg, unsigned SrcReg,
729 bool KillSrc) const {
730 // We can end up with self copies and similar things as a result of VSX copy
731 // legalization. Promote them here.
732 const TargetRegisterInfo *TRI = &getRegisterInfo();
733 if (PPC::F8RCRegClass.contains(DestReg) &&
734 PPC::VSRCRegClass.contains(SrcReg)) {
735 unsigned SuperReg =
736 TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
737
738 if (VSXSelfCopyCrash && SrcReg == SuperReg)
739 llvm_unreachable("nop VSX copy");
740
741 DestReg = SuperReg;
742 } else if (PPC::VRRCRegClass.contains(DestReg) &&
743 PPC::VSRCRegClass.contains(SrcReg)) {
744 unsigned SuperReg =
745 TRI->getMatchingSuperReg(DestReg, PPC::sub_128, &PPC::VSRCRegClass);
746
747 if (VSXSelfCopyCrash && SrcReg == SuperReg)
748 llvm_unreachable("nop VSX copy");
749
750 DestReg = SuperReg;
751 } else if (PPC::F8RCRegClass.contains(SrcReg) &&
752 PPC::VSRCRegClass.contains(DestReg)) {
753 unsigned SuperReg =
754 TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
755
756 if (VSXSelfCopyCrash && DestReg == SuperReg)
757 llvm_unreachable("nop VSX copy");
758
759 SrcReg = SuperReg;
760 } else if (PPC::VRRCRegClass.contains(SrcReg) &&
761 PPC::VSRCRegClass.contains(DestReg)) {
762 unsigned SuperReg =
763 TRI->getMatchingSuperReg(SrcReg, PPC::sub_128, &PPC::VSRCRegClass);
764
765 if (VSXSelfCopyCrash && DestReg == SuperReg)
766 llvm_unreachable("nop VSX copy");
767
768 SrcReg = SuperReg;
769 }
770
771 // Different class register copy
772 if (PPC::CRBITRCRegClass.contains(SrcReg) &&
773 PPC::GPRCRegClass.contains(DestReg)) {
774 unsigned CRReg = getCRFromCRBit(SrcReg);
775 BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg)
776 .addReg(CRReg), getKillRegState(KillSrc);
777 // Rotate the CR bit in the CR fields to be the least significant bit and
778 // then mask with 0x1 (MB = ME = 31).
779 BuildMI(MBB, I, DL, get(PPC::RLWINM), DestReg)
780 .addReg(DestReg, RegState::Kill)
781 .addImm(TRI->getEncodingValue(CRReg) * 4 + (4 - getCRBitValue(SrcReg)))
782 .addImm(31)
783 .addImm(31);
784 return;
785 } else if (PPC::CRRCRegClass.contains(SrcReg) &&
786 PPC::G8RCRegClass.contains(DestReg)) {
787 BuildMI(MBB, I, DL, get(PPC::MFOCRF8), DestReg)
788 .addReg(SrcReg), getKillRegState(KillSrc);
789 return;
790 } else if (PPC::CRRCRegClass.contains(SrcReg) &&
791 PPC::GPRCRegClass.contains(DestReg)) {
792 BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg)
793 .addReg(SrcReg), getKillRegState(KillSrc);
794 return;
795 }
796
797 unsigned Opc;
798 if (PPC::GPRCRegClass.contains(DestReg, SrcReg))
799 Opc = PPC::OR;
800 else if (PPC::G8RCRegClass.contains(DestReg, SrcReg))
801 Opc = PPC::OR8;
802 else if (PPC::F4RCRegClass.contains(DestReg, SrcReg))
803 Opc = PPC::FMR;
804 else if (PPC::CRRCRegClass.contains(DestReg, SrcReg))
805 Opc = PPC::MCRF;
806 else if (PPC::VRRCRegClass.contains(DestReg, SrcReg))
807 Opc = PPC::VOR;
808 else if (PPC::VSRCRegClass.contains(DestReg, SrcReg))
809 // There are two different ways this can be done:
810 // 1. xxlor : This has lower latency (on the P7), 2 cycles, but can only
811 // issue in VSU pipeline 0.
812 // 2. xmovdp/xmovsp: This has higher latency (on the P7), 6 cycles, but
813 // can go to either pipeline.
814 // We'll always use xxlor here, because in practically all cases where
815 // copies are generated, they are close enough to some use that the
816 // lower-latency form is preferable.
817 Opc = PPC::XXLOR;
818 else if (PPC::VSFRCRegClass.contains(DestReg, SrcReg))
819 Opc = PPC::XXLORf;
820 else if (PPC::QFRCRegClass.contains(DestReg, SrcReg))
821 Opc = PPC::QVFMR;
822 else if (PPC::QSRCRegClass.contains(DestReg, SrcReg))
823 Opc = PPC::QVFMRs;
824 else if (PPC::QBRCRegClass.contains(DestReg, SrcReg))
825 Opc = PPC::QVFMRb;
826 else if (PPC::CRBITRCRegClass.contains(DestReg, SrcReg))
827 Opc = PPC::CROR;
828 else
829 llvm_unreachable("Impossible reg-to-reg copy");
830
831 const MCInstrDesc &MCID = get(Opc);
832 if (MCID.getNumOperands() == 3)
833 BuildMI(MBB, I, DL, MCID, DestReg)
834 .addReg(SrcReg).addReg(SrcReg, getKillRegState(KillSrc));
835 else
836 BuildMI(MBB, I, DL, MCID, DestReg).addReg(SrcReg, getKillRegState(KillSrc));
837 }
838
839 // This function returns true if a CR spill is necessary and false otherwise.
840 bool
StoreRegToStackSlot(MachineFunction & MF,unsigned SrcReg,bool isKill,int FrameIdx,const TargetRegisterClass * RC,SmallVectorImpl<MachineInstr * > & NewMIs,bool & NonRI,bool & SpillsVRS) const841 PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
842 unsigned SrcReg, bool isKill,
843 int FrameIdx,
844 const TargetRegisterClass *RC,
845 SmallVectorImpl<MachineInstr*> &NewMIs,
846 bool &NonRI, bool &SpillsVRS) const{
847 // Note: If additional store instructions are added here,
848 // update isStoreToStackSlot.
849
850 DebugLoc DL;
851 if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
852 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
853 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW))
854 .addReg(SrcReg,
855 getKillRegState(isKill)),
856 FrameIdx));
857 } else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
858 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
859 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STD))
860 .addReg(SrcReg,
861 getKillRegState(isKill)),
862 FrameIdx));
863 } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
864 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STFD))
865 .addReg(SrcReg,
866 getKillRegState(isKill)),
867 FrameIdx));
868 } else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
869 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STFS))
870 .addReg(SrcReg,
871 getKillRegState(isKill)),
872 FrameIdx));
873 } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
874 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_CR))
875 .addReg(SrcReg,
876 getKillRegState(isKill)),
877 FrameIdx));
878 return true;
879 } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
880 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_CRBIT))
881 .addReg(SrcReg,
882 getKillRegState(isKill)),
883 FrameIdx));
884 return true;
885 } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
886 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STVX))
887 .addReg(SrcReg,
888 getKillRegState(isKill)),
889 FrameIdx));
890 NonRI = true;
891 } else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
892 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXVD2X))
893 .addReg(SrcReg,
894 getKillRegState(isKill)),
895 FrameIdx));
896 NonRI = true;
897 } else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
898 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXSDX))
899 .addReg(SrcReg,
900 getKillRegState(isKill)),
901 FrameIdx));
902 NonRI = true;
903 } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) {
904 assert(Subtarget.isDarwin() &&
905 "VRSAVE only needs spill/restore on Darwin");
906 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_VRSAVE))
907 .addReg(SrcReg,
908 getKillRegState(isKill)),
909 FrameIdx));
910 SpillsVRS = true;
911 } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) {
912 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFDX))
913 .addReg(SrcReg,
914 getKillRegState(isKill)),
915 FrameIdx));
916 NonRI = true;
917 } else if (PPC::QSRCRegClass.hasSubClassEq(RC)) {
918 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFSXs))
919 .addReg(SrcReg,
920 getKillRegState(isKill)),
921 FrameIdx));
922 NonRI = true;
923 } else if (PPC::QBRCRegClass.hasSubClassEq(RC)) {
924 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFDXb))
925 .addReg(SrcReg,
926 getKillRegState(isKill)),
927 FrameIdx));
928 NonRI = true;
929 } else {
930 llvm_unreachable("Unknown regclass!");
931 }
932
933 return false;
934 }
935
936 void
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned SrcReg,bool isKill,int FrameIdx,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const937 PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
938 MachineBasicBlock::iterator MI,
939 unsigned SrcReg, bool isKill, int FrameIdx,
940 const TargetRegisterClass *RC,
941 const TargetRegisterInfo *TRI) const {
942 MachineFunction &MF = *MBB.getParent();
943 SmallVector<MachineInstr*, 4> NewMIs;
944
945 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
946 FuncInfo->setHasSpills();
947
948 bool NonRI = false, SpillsVRS = false;
949 if (StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs,
950 NonRI, SpillsVRS))
951 FuncInfo->setSpillsCR();
952
953 if (SpillsVRS)
954 FuncInfo->setSpillsVRSAVE();
955
956 if (NonRI)
957 FuncInfo->setHasNonRISpills();
958
959 for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
960 MBB.insert(MI, NewMIs[i]);
961
962 const MachineFrameInfo &MFI = *MF.getFrameInfo();
963 MachineMemOperand *MMO =
964 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
965 MachineMemOperand::MOStore,
966 MFI.getObjectSize(FrameIdx),
967 MFI.getObjectAlignment(FrameIdx));
968 NewMIs.back()->addMemOperand(MF, MMO);
969 }
970
971 bool
LoadRegFromStackSlot(MachineFunction & MF,DebugLoc DL,unsigned DestReg,int FrameIdx,const TargetRegisterClass * RC,SmallVectorImpl<MachineInstr * > & NewMIs,bool & NonRI,bool & SpillsVRS) const972 PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
973 unsigned DestReg, int FrameIdx,
974 const TargetRegisterClass *RC,
975 SmallVectorImpl<MachineInstr*> &NewMIs,
976 bool &NonRI, bool &SpillsVRS) const{
977 // Note: If additional load instructions are added here,
978 // update isLoadFromStackSlot.
979
980 if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
981 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
982 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ),
983 DestReg), FrameIdx));
984 } else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
985 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
986 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LD), DestReg),
987 FrameIdx));
988 } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
989 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFD), DestReg),
990 FrameIdx));
991 } else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
992 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFS), DestReg),
993 FrameIdx));
994 } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
995 NewMIs.push_back(addFrameReference(BuildMI(MF, DL,
996 get(PPC::RESTORE_CR), DestReg),
997 FrameIdx));
998 return true;
999 } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
1000 NewMIs.push_back(addFrameReference(BuildMI(MF, DL,
1001 get(PPC::RESTORE_CRBIT), DestReg),
1002 FrameIdx));
1003 return true;
1004 } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
1005 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LVX), DestReg),
1006 FrameIdx));
1007 NonRI = true;
1008 } else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
1009 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXVD2X), DestReg),
1010 FrameIdx));
1011 NonRI = true;
1012 } else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
1013 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXSDX), DestReg),
1014 FrameIdx));
1015 NonRI = true;
1016 } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) {
1017 assert(Subtarget.isDarwin() &&
1018 "VRSAVE only needs spill/restore on Darwin");
1019 NewMIs.push_back(addFrameReference(BuildMI(MF, DL,
1020 get(PPC::RESTORE_VRSAVE),
1021 DestReg),
1022 FrameIdx));
1023 SpillsVRS = true;
1024 } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) {
1025 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFDX), DestReg),
1026 FrameIdx));
1027 NonRI = true;
1028 } else if (PPC::QSRCRegClass.hasSubClassEq(RC)) {
1029 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFSXs), DestReg),
1030 FrameIdx));
1031 NonRI = true;
1032 } else if (PPC::QBRCRegClass.hasSubClassEq(RC)) {
1033 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFDXb), DestReg),
1034 FrameIdx));
1035 NonRI = true;
1036 } else {
1037 llvm_unreachable("Unknown regclass!");
1038 }
1039
1040 return false;
1041 }
1042
1043 void
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned DestReg,int FrameIdx,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const1044 PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
1045 MachineBasicBlock::iterator MI,
1046 unsigned DestReg, int FrameIdx,
1047 const TargetRegisterClass *RC,
1048 const TargetRegisterInfo *TRI) const {
1049 MachineFunction &MF = *MBB.getParent();
1050 SmallVector<MachineInstr*, 4> NewMIs;
1051 DebugLoc DL;
1052 if (MI != MBB.end()) DL = MI->getDebugLoc();
1053
1054 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
1055 FuncInfo->setHasSpills();
1056
1057 bool NonRI = false, SpillsVRS = false;
1058 if (LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs,
1059 NonRI, SpillsVRS))
1060 FuncInfo->setSpillsCR();
1061
1062 if (SpillsVRS)
1063 FuncInfo->setSpillsVRSAVE();
1064
1065 if (NonRI)
1066 FuncInfo->setHasNonRISpills();
1067
1068 for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
1069 MBB.insert(MI, NewMIs[i]);
1070
1071 const MachineFrameInfo &MFI = *MF.getFrameInfo();
1072 MachineMemOperand *MMO =
1073 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
1074 MachineMemOperand::MOLoad,
1075 MFI.getObjectSize(FrameIdx),
1076 MFI.getObjectAlignment(FrameIdx));
1077 NewMIs.back()->addMemOperand(MF, MMO);
1078 }
1079
1080 bool PPCInstrInfo::
ReverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const1081 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
1082 assert(Cond.size() == 2 && "Invalid PPC branch opcode!");
1083 if (Cond[1].getReg() == PPC::CTR8 || Cond[1].getReg() == PPC::CTR)
1084 Cond[0].setImm(Cond[0].getImm() == 0 ? 1 : 0);
1085 else
1086 // Leave the CR# the same, but invert the condition.
1087 Cond[0].setImm(PPC::InvertPredicate((PPC::Predicate)Cond[0].getImm()));
1088 return false;
1089 }
1090
FoldImmediate(MachineInstr * UseMI,MachineInstr * DefMI,unsigned Reg,MachineRegisterInfo * MRI) const1091 bool PPCInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
1092 unsigned Reg, MachineRegisterInfo *MRI) const {
1093 // For some instructions, it is legal to fold ZERO into the RA register field.
1094 // A zero immediate should always be loaded with a single li.
1095 unsigned DefOpc = DefMI->getOpcode();
1096 if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
1097 return false;
1098 if (!DefMI->getOperand(1).isImm())
1099 return false;
1100 if (DefMI->getOperand(1).getImm() != 0)
1101 return false;
1102
1103 // Note that we cannot here invert the arguments of an isel in order to fold
1104 // a ZERO into what is presented as the second argument. All we have here
1105 // is the condition bit, and that might come from a CR-logical bit operation.
1106
1107 const MCInstrDesc &UseMCID = UseMI->getDesc();
1108
1109 // Only fold into real machine instructions.
1110 if (UseMCID.isPseudo())
1111 return false;
1112
1113 unsigned UseIdx;
1114 for (UseIdx = 0; UseIdx < UseMI->getNumOperands(); ++UseIdx)
1115 if (UseMI->getOperand(UseIdx).isReg() &&
1116 UseMI->getOperand(UseIdx).getReg() == Reg)
1117 break;
1118
1119 assert(UseIdx < UseMI->getNumOperands() && "Cannot find Reg in UseMI");
1120 assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg");
1121
1122 const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx];
1123
1124 // We can fold the zero if this register requires a GPRC_NOR0/G8RC_NOX0
1125 // register (which might also be specified as a pointer class kind).
1126 if (UseInfo->isLookupPtrRegClass()) {
1127 if (UseInfo->RegClass /* Kind */ != 1)
1128 return false;
1129 } else {
1130 if (UseInfo->RegClass != PPC::GPRC_NOR0RegClassID &&
1131 UseInfo->RegClass != PPC::G8RC_NOX0RegClassID)
1132 return false;
1133 }
1134
1135 // Make sure this is not tied to an output register (or otherwise
1136 // constrained). This is true for ST?UX registers, for example, which
1137 // are tied to their output registers.
1138 if (UseInfo->Constraints != 0)
1139 return false;
1140
1141 unsigned ZeroReg;
1142 if (UseInfo->isLookupPtrRegClass()) {
1143 bool isPPC64 = Subtarget.isPPC64();
1144 ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO;
1145 } else {
1146 ZeroReg = UseInfo->RegClass == PPC::G8RC_NOX0RegClassID ?
1147 PPC::ZERO8 : PPC::ZERO;
1148 }
1149
1150 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1151 UseMI->getOperand(UseIdx).setReg(ZeroReg);
1152
1153 if (DeleteDef)
1154 DefMI->eraseFromParent();
1155
1156 return true;
1157 }
1158
MBBDefinesCTR(MachineBasicBlock & MBB)1159 static bool MBBDefinesCTR(MachineBasicBlock &MBB) {
1160 for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
1161 I != IE; ++I)
1162 if (I->definesRegister(PPC::CTR) || I->definesRegister(PPC::CTR8))
1163 return true;
1164 return false;
1165 }
1166
1167 // We should make sure that, if we're going to predicate both sides of a
1168 // condition (a diamond), that both sides don't define the counter register. We
1169 // can predicate counter-decrement-based branches, but while that predicates
1170 // the branching, it does not predicate the counter decrement. If we tried to
1171 // merge the triangle into one predicated block, we'd decrement the counter
1172 // twice.
isProfitableToIfCvt(MachineBasicBlock & TMBB,unsigned NumT,unsigned ExtraT,MachineBasicBlock & FMBB,unsigned NumF,unsigned ExtraF,const BranchProbability & Probability) const1173 bool PPCInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
1174 unsigned NumT, unsigned ExtraT,
1175 MachineBasicBlock &FMBB,
1176 unsigned NumF, unsigned ExtraF,
1177 const BranchProbability &Probability) const {
1178 return !(MBBDefinesCTR(TMBB) && MBBDefinesCTR(FMBB));
1179 }
1180
1181
isPredicated(const MachineInstr * MI) const1182 bool PPCInstrInfo::isPredicated(const MachineInstr *MI) const {
1183 // The predicated branches are identified by their type, not really by the
1184 // explicit presence of a predicate. Furthermore, some of them can be
1185 // predicated more than once. Because if conversion won't try to predicate
1186 // any instruction which already claims to be predicated (by returning true
1187 // here), always return false. In doing so, we let isPredicable() be the
1188 // final word on whether not the instruction can be (further) predicated.
1189
1190 return false;
1191 }
1192
isUnpredicatedTerminator(const MachineInstr * MI) const1193 bool PPCInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
1194 if (!MI->isTerminator())
1195 return false;
1196
1197 // Conditional branch is a special case.
1198 if (MI->isBranch() && !MI->isBarrier())
1199 return true;
1200
1201 return !isPredicated(MI);
1202 }
1203
PredicateInstruction(MachineInstr * MI,const SmallVectorImpl<MachineOperand> & Pred) const1204 bool PPCInstrInfo::PredicateInstruction(
1205 MachineInstr *MI,
1206 const SmallVectorImpl<MachineOperand> &Pred) const {
1207 unsigned OpC = MI->getOpcode();
1208 if (OpC == PPC::BLR || OpC == PPC::BLR8) {
1209 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
1210 bool isPPC64 = Subtarget.isPPC64();
1211 MI->setDesc(get(Pred[0].getImm() ?
1212 (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR) :
1213 (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
1214 } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
1215 MI->setDesc(get(PPC::BCLR));
1216 MachineInstrBuilder(*MI->getParent()->getParent(), MI)
1217 .addReg(Pred[1].getReg());
1218 } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
1219 MI->setDesc(get(PPC::BCLRn));
1220 MachineInstrBuilder(*MI->getParent()->getParent(), MI)
1221 .addReg(Pred[1].getReg());
1222 } else {
1223 MI->setDesc(get(PPC::BCCLR));
1224 MachineInstrBuilder(*MI->getParent()->getParent(), MI)
1225 .addImm(Pred[0].getImm())
1226 .addReg(Pred[1].getReg());
1227 }
1228
1229 return true;
1230 } else if (OpC == PPC::B) {
1231 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
1232 bool isPPC64 = Subtarget.isPPC64();
1233 MI->setDesc(get(Pred[0].getImm() ?
1234 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1235 (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
1236 } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
1237 MachineBasicBlock *MBB = MI->getOperand(0).getMBB();
1238 MI->RemoveOperand(0);
1239
1240 MI->setDesc(get(PPC::BC));
1241 MachineInstrBuilder(*MI->getParent()->getParent(), MI)
1242 .addReg(Pred[1].getReg())
1243 .addMBB(MBB);
1244 } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
1245 MachineBasicBlock *MBB = MI->getOperand(0).getMBB();
1246 MI->RemoveOperand(0);
1247
1248 MI->setDesc(get(PPC::BCn));
1249 MachineInstrBuilder(*MI->getParent()->getParent(), MI)
1250 .addReg(Pred[1].getReg())
1251 .addMBB(MBB);
1252 } else {
1253 MachineBasicBlock *MBB = MI->getOperand(0).getMBB();
1254 MI->RemoveOperand(0);
1255
1256 MI->setDesc(get(PPC::BCC));
1257 MachineInstrBuilder(*MI->getParent()->getParent(), MI)
1258 .addImm(Pred[0].getImm())
1259 .addReg(Pred[1].getReg())
1260 .addMBB(MBB);
1261 }
1262
1263 return true;
1264 } else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 ||
1265 OpC == PPC::BCTRL || OpC == PPC::BCTRL8) {
1266 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR)
1267 llvm_unreachable("Cannot predicate bctr[l] on the ctr register");
1268
1269 bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8;
1270 bool isPPC64 = Subtarget.isPPC64();
1271
1272 if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
1273 MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8) :
1274 (setLR ? PPC::BCCTRL : PPC::BCCTR)));
1275 MachineInstrBuilder(*MI->getParent()->getParent(), MI)
1276 .addReg(Pred[1].getReg());
1277 return true;
1278 } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
1279 MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n) :
1280 (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
1281 MachineInstrBuilder(*MI->getParent()->getParent(), MI)
1282 .addReg(Pred[1].getReg());
1283 return true;
1284 }
1285
1286 MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8) :
1287 (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
1288 MachineInstrBuilder(*MI->getParent()->getParent(), MI)
1289 .addImm(Pred[0].getImm())
1290 .addReg(Pred[1].getReg());
1291 return true;
1292 }
1293
1294 return false;
1295 }
1296
SubsumesPredicate(const SmallVectorImpl<MachineOperand> & Pred1,const SmallVectorImpl<MachineOperand> & Pred2) const1297 bool PPCInstrInfo::SubsumesPredicate(
1298 const SmallVectorImpl<MachineOperand> &Pred1,
1299 const SmallVectorImpl<MachineOperand> &Pred2) const {
1300 assert(Pred1.size() == 2 && "Invalid PPC first predicate");
1301 assert(Pred2.size() == 2 && "Invalid PPC second predicate");
1302
1303 if (Pred1[1].getReg() == PPC::CTR8 || Pred1[1].getReg() == PPC::CTR)
1304 return false;
1305 if (Pred2[1].getReg() == PPC::CTR8 || Pred2[1].getReg() == PPC::CTR)
1306 return false;
1307
1308 // P1 can only subsume P2 if they test the same condition register.
1309 if (Pred1[1].getReg() != Pred2[1].getReg())
1310 return false;
1311
1312 PPC::Predicate P1 = (PPC::Predicate) Pred1[0].getImm();
1313 PPC::Predicate P2 = (PPC::Predicate) Pred2[0].getImm();
1314
1315 if (P1 == P2)
1316 return true;
1317
1318 // Does P1 subsume P2, e.g. GE subsumes GT.
1319 if (P1 == PPC::PRED_LE &&
1320 (P2 == PPC::PRED_LT || P2 == PPC::PRED_EQ))
1321 return true;
1322 if (P1 == PPC::PRED_GE &&
1323 (P2 == PPC::PRED_GT || P2 == PPC::PRED_EQ))
1324 return true;
1325
1326 return false;
1327 }
1328
DefinesPredicate(MachineInstr * MI,std::vector<MachineOperand> & Pred) const1329 bool PPCInstrInfo::DefinesPredicate(MachineInstr *MI,
1330 std::vector<MachineOperand> &Pred) const {
1331 // Note: At the present time, the contents of Pred from this function is
1332 // unused by IfConversion. This implementation follows ARM by pushing the
1333 // CR-defining operand. Because the 'DZ' and 'DNZ' count as types of
1334 // predicate, instructions defining CTR or CTR8 are also included as
1335 // predicate-defining instructions.
1336
1337 const TargetRegisterClass *RCs[] =
1338 { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass,
1339 &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass };
1340
1341 bool Found = false;
1342 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1343 const MachineOperand &MO = MI->getOperand(i);
1344 for (unsigned c = 0; c < array_lengthof(RCs) && !Found; ++c) {
1345 const TargetRegisterClass *RC = RCs[c];
1346 if (MO.isReg()) {
1347 if (MO.isDef() && RC->contains(MO.getReg())) {
1348 Pred.push_back(MO);
1349 Found = true;
1350 }
1351 } else if (MO.isRegMask()) {
1352 for (TargetRegisterClass::iterator I = RC->begin(),
1353 IE = RC->end(); I != IE; ++I)
1354 if (MO.clobbersPhysReg(*I)) {
1355 Pred.push_back(MO);
1356 Found = true;
1357 }
1358 }
1359 }
1360 }
1361
1362 return Found;
1363 }
1364
isPredicable(MachineInstr * MI) const1365 bool PPCInstrInfo::isPredicable(MachineInstr *MI) const {
1366 unsigned OpC = MI->getOpcode();
1367 switch (OpC) {
1368 default:
1369 return false;
1370 case PPC::B:
1371 case PPC::BLR:
1372 case PPC::BLR8:
1373 case PPC::BCTR:
1374 case PPC::BCTR8:
1375 case PPC::BCTRL:
1376 case PPC::BCTRL8:
1377 return true;
1378 }
1379 }
1380
analyzeCompare(const MachineInstr * MI,unsigned & SrcReg,unsigned & SrcReg2,int & Mask,int & Value) const1381 bool PPCInstrInfo::analyzeCompare(const MachineInstr *MI,
1382 unsigned &SrcReg, unsigned &SrcReg2,
1383 int &Mask, int &Value) const {
1384 unsigned Opc = MI->getOpcode();
1385
1386 switch (Opc) {
1387 default: return false;
1388 case PPC::CMPWI:
1389 case PPC::CMPLWI:
1390 case PPC::CMPDI:
1391 case PPC::CMPLDI:
1392 SrcReg = MI->getOperand(1).getReg();
1393 SrcReg2 = 0;
1394 Value = MI->getOperand(2).getImm();
1395 Mask = 0xFFFF;
1396 return true;
1397 case PPC::CMPW:
1398 case PPC::CMPLW:
1399 case PPC::CMPD:
1400 case PPC::CMPLD:
1401 case PPC::FCMPUS:
1402 case PPC::FCMPUD:
1403 SrcReg = MI->getOperand(1).getReg();
1404 SrcReg2 = MI->getOperand(2).getReg();
1405 return true;
1406 }
1407 }
1408
optimizeCompareInstr(MachineInstr * CmpInstr,unsigned SrcReg,unsigned SrcReg2,int Mask,int Value,const MachineRegisterInfo * MRI) const1409 bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
1410 unsigned SrcReg, unsigned SrcReg2,
1411 int Mask, int Value,
1412 const MachineRegisterInfo *MRI) const {
1413 if (DisableCmpOpt)
1414 return false;
1415
1416 int OpC = CmpInstr->getOpcode();
1417 unsigned CRReg = CmpInstr->getOperand(0).getReg();
1418
1419 // FP record forms set CR1 based on the execption status bits, not a
1420 // comparison with zero.
1421 if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD)
1422 return false;
1423
1424 // The record forms set the condition register based on a signed comparison
1425 // with zero (so says the ISA manual). This is not as straightforward as it
1426 // seems, however, because this is always a 64-bit comparison on PPC64, even
1427 // for instructions that are 32-bit in nature (like slw for example).
1428 // So, on PPC32, for unsigned comparisons, we can use the record forms only
1429 // for equality checks (as those don't depend on the sign). On PPC64,
1430 // we are restricted to equality for unsigned 64-bit comparisons and for
1431 // signed 32-bit comparisons the applicability is more restricted.
1432 bool isPPC64 = Subtarget.isPPC64();
1433 bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW;
1434 bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW;
1435 bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
1436
1437 // Get the unique definition of SrcReg.
1438 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
1439 if (!MI) return false;
1440 int MIOpC = MI->getOpcode();
1441
1442 bool equalityOnly = false;
1443 bool noSub = false;
1444 if (isPPC64) {
1445 if (is32BitSignedCompare) {
1446 // We can perform this optimization only if MI is sign-extending.
1447 if (MIOpC == PPC::SRAW || MIOpC == PPC::SRAWo ||
1448 MIOpC == PPC::SRAWI || MIOpC == PPC::SRAWIo ||
1449 MIOpC == PPC::EXTSB || MIOpC == PPC::EXTSBo ||
1450 MIOpC == PPC::EXTSH || MIOpC == PPC::EXTSHo ||
1451 MIOpC == PPC::EXTSW || MIOpC == PPC::EXTSWo) {
1452 noSub = true;
1453 } else
1454 return false;
1455 } else if (is32BitUnsignedCompare) {
1456 // We can perform this optimization, equality only, if MI is
1457 // zero-extending.
1458 if (MIOpC == PPC::CNTLZW || MIOpC == PPC::CNTLZWo ||
1459 MIOpC == PPC::SLW || MIOpC == PPC::SLWo ||
1460 MIOpC == PPC::SRW || MIOpC == PPC::SRWo) {
1461 noSub = true;
1462 equalityOnly = true;
1463 } else
1464 return false;
1465 } else
1466 equalityOnly = is64BitUnsignedCompare;
1467 } else
1468 equalityOnly = is32BitUnsignedCompare;
1469
1470 if (equalityOnly) {
1471 // We need to check the uses of the condition register in order to reject
1472 // non-equality comparisons.
1473 for (MachineRegisterInfo::use_instr_iterator I =MRI->use_instr_begin(CRReg),
1474 IE = MRI->use_instr_end(); I != IE; ++I) {
1475 MachineInstr *UseMI = &*I;
1476 if (UseMI->getOpcode() == PPC::BCC) {
1477 unsigned Pred = UseMI->getOperand(0).getImm();
1478 if (Pred != PPC::PRED_EQ && Pred != PPC::PRED_NE)
1479 return false;
1480 } else if (UseMI->getOpcode() == PPC::ISEL ||
1481 UseMI->getOpcode() == PPC::ISEL8) {
1482 unsigned SubIdx = UseMI->getOperand(3).getSubReg();
1483 if (SubIdx != PPC::sub_eq)
1484 return false;
1485 } else
1486 return false;
1487 }
1488 }
1489
1490 MachineBasicBlock::iterator I = CmpInstr;
1491
1492 // Scan forward to find the first use of the compare.
1493 for (MachineBasicBlock::iterator EL = CmpInstr->getParent()->end();
1494 I != EL; ++I) {
1495 bool FoundUse = false;
1496 for (MachineRegisterInfo::use_instr_iterator J =MRI->use_instr_begin(CRReg),
1497 JE = MRI->use_instr_end(); J != JE; ++J)
1498 if (&*J == &*I) {
1499 FoundUse = true;
1500 break;
1501 }
1502
1503 if (FoundUse)
1504 break;
1505 }
1506
1507 // There are two possible candidates which can be changed to set CR[01].
1508 // One is MI, the other is a SUB instruction.
1509 // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
1510 MachineInstr *Sub = nullptr;
1511 if (SrcReg2 != 0)
1512 // MI is not a candidate for CMPrr.
1513 MI = nullptr;
1514 // FIXME: Conservatively refuse to convert an instruction which isn't in the
1515 // same BB as the comparison. This is to allow the check below to avoid calls
1516 // (and other explicit clobbers); instead we should really check for these
1517 // more explicitly (in at least a few predecessors).
1518 else if (MI->getParent() != CmpInstr->getParent() || Value != 0) {
1519 // PPC does not have a record-form SUBri.
1520 return false;
1521 }
1522
1523 // Search for Sub.
1524 const TargetRegisterInfo *TRI = &getRegisterInfo();
1525 --I;
1526
1527 // Get ready to iterate backward from CmpInstr.
1528 MachineBasicBlock::iterator E = MI,
1529 B = CmpInstr->getParent()->begin();
1530
1531 for (; I != E && !noSub; --I) {
1532 const MachineInstr &Instr = *I;
1533 unsigned IOpC = Instr.getOpcode();
1534
1535 if (&*I != CmpInstr && (
1536 Instr.modifiesRegister(PPC::CR0, TRI) ||
1537 Instr.readsRegister(PPC::CR0, TRI)))
1538 // This instruction modifies or uses the record condition register after
1539 // the one we want to change. While we could do this transformation, it
1540 // would likely not be profitable. This transformation removes one
1541 // instruction, and so even forcing RA to generate one move probably
1542 // makes it unprofitable.
1543 return false;
1544
1545 // Check whether CmpInstr can be made redundant by the current instruction.
1546 if ((OpC == PPC::CMPW || OpC == PPC::CMPLW ||
1547 OpC == PPC::CMPD || OpC == PPC::CMPLD) &&
1548 (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) &&
1549 ((Instr.getOperand(1).getReg() == SrcReg &&
1550 Instr.getOperand(2).getReg() == SrcReg2) ||
1551 (Instr.getOperand(1).getReg() == SrcReg2 &&
1552 Instr.getOperand(2).getReg() == SrcReg))) {
1553 Sub = &*I;
1554 break;
1555 }
1556
1557 if (I == B)
1558 // The 'and' is below the comparison instruction.
1559 return false;
1560 }
1561
1562 // Return false if no candidates exist.
1563 if (!MI && !Sub)
1564 return false;
1565
1566 // The single candidate is called MI.
1567 if (!MI) MI = Sub;
1568
1569 int NewOpC = -1;
1570 MIOpC = MI->getOpcode();
1571 if (MIOpC == PPC::ANDIo || MIOpC == PPC::ANDIo8)
1572 NewOpC = MIOpC;
1573 else {
1574 NewOpC = PPC::getRecordFormOpcode(MIOpC);
1575 if (NewOpC == -1 && PPC::getNonRecordFormOpcode(MIOpC) != -1)
1576 NewOpC = MIOpC;
1577 }
1578
1579 // FIXME: On the non-embedded POWER architectures, only some of the record
1580 // forms are fast, and we should use only the fast ones.
1581
1582 // The defining instruction has a record form (or is already a record
1583 // form). It is possible, however, that we'll need to reverse the condition
1584 // code of the users.
1585 if (NewOpC == -1)
1586 return false;
1587
1588 SmallVector<std::pair<MachineOperand*, PPC::Predicate>, 4> PredsToUpdate;
1589 SmallVector<std::pair<MachineOperand*, unsigned>, 4> SubRegsToUpdate;
1590
1591 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based on CMP
1592 // needs to be updated to be based on SUB. Push the condition code
1593 // operands to OperandsToUpdate. If it is safe to remove CmpInstr, the
1594 // condition code of these operands will be modified.
1595 bool ShouldSwap = false;
1596 if (Sub) {
1597 ShouldSwap = SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
1598 Sub->getOperand(2).getReg() == SrcReg;
1599
1600 // The operands to subf are the opposite of sub, so only in the fixed-point
1601 // case, invert the order.
1602 ShouldSwap = !ShouldSwap;
1603 }
1604
1605 if (ShouldSwap)
1606 for (MachineRegisterInfo::use_instr_iterator
1607 I = MRI->use_instr_begin(CRReg), IE = MRI->use_instr_end();
1608 I != IE; ++I) {
1609 MachineInstr *UseMI = &*I;
1610 if (UseMI->getOpcode() == PPC::BCC) {
1611 PPC::Predicate Pred = (PPC::Predicate) UseMI->getOperand(0).getImm();
1612 assert((!equalityOnly ||
1613 Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE) &&
1614 "Invalid predicate for equality-only optimization");
1615 PredsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(0)),
1616 PPC::getSwappedPredicate(Pred)));
1617 } else if (UseMI->getOpcode() == PPC::ISEL ||
1618 UseMI->getOpcode() == PPC::ISEL8) {
1619 unsigned NewSubReg = UseMI->getOperand(3).getSubReg();
1620 assert((!equalityOnly || NewSubReg == PPC::sub_eq) &&
1621 "Invalid CR bit for equality-only optimization");
1622
1623 if (NewSubReg == PPC::sub_lt)
1624 NewSubReg = PPC::sub_gt;
1625 else if (NewSubReg == PPC::sub_gt)
1626 NewSubReg = PPC::sub_lt;
1627
1628 SubRegsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(3)),
1629 NewSubReg));
1630 } else // We need to abort on a user we don't understand.
1631 return false;
1632 }
1633
1634 // Create a new virtual register to hold the value of the CR set by the
1635 // record-form instruction. If the instruction was not previously in
1636 // record form, then set the kill flag on the CR.
1637 CmpInstr->eraseFromParent();
1638
1639 MachineBasicBlock::iterator MII = MI;
1640 BuildMI(*MI->getParent(), std::next(MII), MI->getDebugLoc(),
1641 get(TargetOpcode::COPY), CRReg)
1642 .addReg(PPC::CR0, MIOpC != NewOpC ? RegState::Kill : 0);
1643
1644 if (MIOpC != NewOpC) {
1645 // We need to be careful here: we're replacing one instruction with
1646 // another, and we need to make sure that we get all of the right
1647 // implicit uses and defs. On the other hand, the caller may be holding
1648 // an iterator to this instruction, and so we can't delete it (this is
1649 // specifically the case if this is the instruction directly after the
1650 // compare).
1651
1652 const MCInstrDesc &NewDesc = get(NewOpC);
1653 MI->setDesc(NewDesc);
1654
1655 if (NewDesc.ImplicitDefs)
1656 for (const uint16_t *ImpDefs = NewDesc.getImplicitDefs();
1657 *ImpDefs; ++ImpDefs)
1658 if (!MI->definesRegister(*ImpDefs))
1659 MI->addOperand(*MI->getParent()->getParent(),
1660 MachineOperand::CreateReg(*ImpDefs, true, true));
1661 if (NewDesc.ImplicitUses)
1662 for (const uint16_t *ImpUses = NewDesc.getImplicitUses();
1663 *ImpUses; ++ImpUses)
1664 if (!MI->readsRegister(*ImpUses))
1665 MI->addOperand(*MI->getParent()->getParent(),
1666 MachineOperand::CreateReg(*ImpUses, false, true));
1667 }
1668
1669 // Modify the condition code of operands in OperandsToUpdate.
1670 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
1671 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
1672 for (unsigned i = 0, e = PredsToUpdate.size(); i < e; i++)
1673 PredsToUpdate[i].first->setImm(PredsToUpdate[i].second);
1674
1675 for (unsigned i = 0, e = SubRegsToUpdate.size(); i < e; i++)
1676 SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second);
1677
1678 return true;
1679 }
1680
1681 /// GetInstSize - Return the number of bytes of code the specified
1682 /// instruction may be. This returns the maximum number of bytes.
1683 ///
GetInstSizeInBytes(const MachineInstr * MI) const1684 unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
1685 unsigned Opcode = MI->getOpcode();
1686
1687 if (Opcode == PPC::INLINEASM) {
1688 const MachineFunction *MF = MI->getParent()->getParent();
1689 const char *AsmStr = MI->getOperand(0).getSymbolName();
1690 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1691 } else if (Opcode == TargetOpcode::STACKMAP) {
1692 return MI->getOperand(1).getImm();
1693 } else if (Opcode == TargetOpcode::PATCHPOINT) {
1694 PatchPointOpers Opers(MI);
1695 return Opers.getMetaOper(PatchPointOpers::NBytesPos).getImm();
1696 } else {
1697 const MCInstrDesc &Desc = get(Opcode);
1698 return Desc.getSize();
1699 }
1700 }
1701
1702