1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the SystemZ implementation of the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "SystemZInstrInfo.h"
15 #include "MCTargetDesc/SystemZMCTargetDesc.h"
16 #include "SystemZ.h"
17 #include "SystemZInstrBuilder.h"
18 #include "SystemZSubtarget.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/CodeGen/LiveInterval.h"
21 #include "llvm/CodeGen/LiveIntervals.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/SlotIndexes.h"
31 #include "llvm/CodeGen/TargetInstrInfo.h"
32 #include "llvm/CodeGen/TargetSubtargetInfo.h"
33 #include "llvm/MC/MCInstrDesc.h"
34 #include "llvm/MC/MCRegisterInfo.h"
35 #include "llvm/Support/BranchProbability.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include <cassert>
40 #include <cstdint>
41 #include <iterator>
42
43 using namespace llvm;
44
45 #define GET_INSTRINFO_CTOR_DTOR
46 #define GET_INSTRMAP_INFO
47 #include "SystemZGenInstrInfo.inc"
48
49 #define DEBUG_TYPE "systemz-II"
50 STATISTIC(LOCRMuxJumps, "Number of LOCRMux jump-sequences (lower is better)");
51
52 // Return a mask with Count low bits set.
allOnes(unsigned int Count)53 static uint64_t allOnes(unsigned int Count) {
54 return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
55 }
56
57 // Reg should be a 32-bit GPR. Return true if it is a high register rather
58 // than a low register.
isHighReg(unsigned int Reg)59 static bool isHighReg(unsigned int Reg) {
60 if (SystemZ::GRH32BitRegClass.contains(Reg))
61 return true;
62 assert(SystemZ::GR32BitRegClass.contains(Reg) && "Invalid GRX32");
63 return false;
64 }
65
66 // Pin the vtable to this file.
anchor()67 void SystemZInstrInfo::anchor() {}
68
SystemZInstrInfo(SystemZSubtarget & sti)69 SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti)
70 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
71 RI(), STI(sti) {
72 }
73
74 // MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
75 // each having the opcode given by NewOpcode.
splitMove(MachineBasicBlock::iterator MI,unsigned NewOpcode) const76 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
77 unsigned NewOpcode) const {
78 MachineBasicBlock *MBB = MI->getParent();
79 MachineFunction &MF = *MBB->getParent();
80
81 // Get two load or store instructions. Use the original instruction for one
82 // of them (arbitrarily the second here) and create a clone for the other.
83 MachineInstr *EarlierMI = MF.CloneMachineInstr(&*MI);
84 MBB->insert(MI, EarlierMI);
85
86 // Set up the two 64-bit registers and remember super reg and its flags.
87 MachineOperand &HighRegOp = EarlierMI->getOperand(0);
88 MachineOperand &LowRegOp = MI->getOperand(0);
89 unsigned Reg128 = LowRegOp.getReg();
90 unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());
91 unsigned Reg128Undef = getUndefRegState(LowRegOp.isUndef());
92 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
93 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
94
95 if (MI->mayStore()) {
96 // Add implicit uses of the super register in case one of the subregs is
97 // undefined. We could track liveness and skip storing an undefined
98 // subreg, but this is hopefully rare (discovered with llvm-stress).
99 // If Reg128 was killed, set kill flag on MI.
100 unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
101 MachineInstrBuilder(MF, EarlierMI).addReg(Reg128, Reg128UndefImpl);
102 MachineInstrBuilder(MF, MI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
103 }
104
105 // The address in the first (high) instruction is already correct.
106 // Adjust the offset in the second (low) instruction.
107 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
108 MachineOperand &LowOffsetOp = MI->getOperand(2);
109 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
110
111 // Clear the kill flags on the registers in the first instruction.
112 if (EarlierMI->getOperand(0).isReg() && EarlierMI->getOperand(0).isUse())
113 EarlierMI->getOperand(0).setIsKill(false);
114 EarlierMI->getOperand(1).setIsKill(false);
115 EarlierMI->getOperand(3).setIsKill(false);
116
117 // Set the opcodes.
118 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
119 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
120 assert(HighOpcode && LowOpcode && "Both offsets should be in range");
121
122 EarlierMI->setDesc(get(HighOpcode));
123 MI->setDesc(get(LowOpcode));
124 }
125
126 // Split ADJDYNALLOC instruction MI.
splitAdjDynAlloc(MachineBasicBlock::iterator MI) const127 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
128 MachineBasicBlock *MBB = MI->getParent();
129 MachineFunction &MF = *MBB->getParent();
130 MachineFrameInfo &MFFrame = MF.getFrameInfo();
131 MachineOperand &OffsetMO = MI->getOperand(2);
132
133 uint64_t Offset = (MFFrame.getMaxCallFrameSize() +
134 SystemZMC::CallFrameSize +
135 OffsetMO.getImm());
136 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
137 assert(NewOpcode && "No support for huge argument lists yet");
138 MI->setDesc(get(NewOpcode));
139 OffsetMO.setImm(Offset);
140 }
141
142 // MI is an RI-style pseudo instruction. Replace it with LowOpcode
143 // if the first operand is a low GR32 and HighOpcode if the first operand
144 // is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
145 // and HighOpcode takes an unsigned 32-bit operand. In those cases,
146 // MI has the same kind of operand as LowOpcode, so needs to be converted
147 // if HighOpcode is used.
expandRIPseudo(MachineInstr & MI,unsigned LowOpcode,unsigned HighOpcode,bool ConvertHigh) const148 void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
149 unsigned HighOpcode,
150 bool ConvertHigh) const {
151 unsigned Reg = MI.getOperand(0).getReg();
152 bool IsHigh = isHighReg(Reg);
153 MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
154 if (IsHigh && ConvertHigh)
155 MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
156 }
157
158 // MI is a three-operand RIE-style pseudo instruction. Replace it with
159 // LowOpcodeK if the registers are both low GR32s, otherwise use a move
160 // followed by HighOpcode or LowOpcode, depending on whether the target
161 // is a high or low GR32.
expandRIEPseudo(MachineInstr & MI,unsigned LowOpcode,unsigned LowOpcodeK,unsigned HighOpcode) const162 void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
163 unsigned LowOpcodeK,
164 unsigned HighOpcode) const {
165 unsigned DestReg = MI.getOperand(0).getReg();
166 unsigned SrcReg = MI.getOperand(1).getReg();
167 bool DestIsHigh = isHighReg(DestReg);
168 bool SrcIsHigh = isHighReg(SrcReg);
169 if (!DestIsHigh && !SrcIsHigh)
170 MI.setDesc(get(LowOpcodeK));
171 else {
172 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
173 SystemZ::LR, 32, MI.getOperand(1).isKill(),
174 MI.getOperand(1).isUndef());
175 MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
176 MI.getOperand(1).setReg(DestReg);
177 MI.tieOperands(0, 1);
178 }
179 }
180
181 // MI is an RXY-style pseudo instruction. Replace it with LowOpcode
182 // if the first operand is a low GR32 and HighOpcode if the first operand
183 // is a high GR32.
expandRXYPseudo(MachineInstr & MI,unsigned LowOpcode,unsigned HighOpcode) const184 void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
185 unsigned HighOpcode) const {
186 unsigned Reg = MI.getOperand(0).getReg();
187 unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode,
188 MI.getOperand(2).getImm());
189 MI.setDesc(get(Opcode));
190 }
191
192 // MI is a load-on-condition pseudo instruction with a single register
193 // (source or destination) operand. Replace it with LowOpcode if the
194 // register is a low GR32 and HighOpcode if the register is a high GR32.
expandLOCPseudo(MachineInstr & MI,unsigned LowOpcode,unsigned HighOpcode) const195 void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,
196 unsigned HighOpcode) const {
197 unsigned Reg = MI.getOperand(0).getReg();
198 unsigned Opcode = isHighReg(Reg) ? HighOpcode : LowOpcode;
199 MI.setDesc(get(Opcode));
200 }
201
202 // MI is a load-register-on-condition pseudo instruction. Replace it with
203 // LowOpcode if source and destination are both low GR32s and HighOpcode if
204 // source and destination are both high GR32s.
expandLOCRPseudo(MachineInstr & MI,unsigned LowOpcode,unsigned HighOpcode) const205 void SystemZInstrInfo::expandLOCRPseudo(MachineInstr &MI, unsigned LowOpcode,
206 unsigned HighOpcode) const {
207 unsigned DestReg = MI.getOperand(0).getReg();
208 unsigned SrcReg = MI.getOperand(2).getReg();
209 bool DestIsHigh = isHighReg(DestReg);
210 bool SrcIsHigh = isHighReg(SrcReg);
211
212 if (!DestIsHigh && !SrcIsHigh)
213 MI.setDesc(get(LowOpcode));
214 else if (DestIsHigh && SrcIsHigh)
215 MI.setDesc(get(HighOpcode));
216 else
217 LOCRMuxJumps++;
218
219 // If we were unable to implement the pseudo with a single instruction, we
220 // need to convert it back into a branch sequence. This cannot be done here
221 // since the caller of expandPostRAPseudo does not handle changes to the CFG
222 // correctly. This change is defered to the SystemZExpandPseudo pass.
223 }
224
225 // MI is an RR-style pseudo instruction that zero-extends the low Size bits
226 // of one GRX32 into another. Replace it with LowOpcode if both operands
227 // are low registers, otherwise use RISB[LH]G.
expandZExtPseudo(MachineInstr & MI,unsigned LowOpcode,unsigned Size) const228 void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
229 unsigned Size) const {
230 MachineInstrBuilder MIB =
231 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
232 MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
233 Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef());
234
235 // Keep the remaining operands as-is.
236 for (unsigned I = 2; I < MI.getNumOperands(); ++I)
237 MIB.add(MI.getOperand(I));
238
239 MI.eraseFromParent();
240 }
241
expandLoadStackGuard(MachineInstr * MI) const242 void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
243 MachineBasicBlock *MBB = MI->getParent();
244 MachineFunction &MF = *MBB->getParent();
245 const unsigned Reg64 = MI->getOperand(0).getReg();
246 const unsigned Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32);
247
248 // EAR can only load the low subregister so us a shift for %a0 to produce
249 // the GR containing %a0 and %a1.
250
251 // ear <reg>, %a0
252 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
253 .addReg(SystemZ::A0)
254 .addReg(Reg64, RegState::ImplicitDefine);
255
256 // sllg <reg>, <reg>, 32
257 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64)
258 .addReg(Reg64)
259 .addReg(0)
260 .addImm(32);
261
262 // ear <reg>, %a1
263 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
264 .addReg(SystemZ::A1);
265
266 // lg <reg>, 40(<reg>)
267 MI->setDesc(get(SystemZ::LG));
268 MachineInstrBuilder(MF, MI).addReg(Reg64).addImm(40).addReg(0);
269 }
270
271 // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
272 // DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
273 // are low registers, otherwise use RISB[LH]G. Size is the number of bits
274 // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
275 // KillSrc is true if this move is the last use of SrcReg.
276 MachineInstrBuilder
emitGRX32Move(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,unsigned DestReg,unsigned SrcReg,unsigned LowLowOpcode,unsigned Size,bool KillSrc,bool UndefSrc) const277 SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
278 MachineBasicBlock::iterator MBBI,
279 const DebugLoc &DL, unsigned DestReg,
280 unsigned SrcReg, unsigned LowLowOpcode,
281 unsigned Size, bool KillSrc,
282 bool UndefSrc) const {
283 unsigned Opcode;
284 bool DestIsHigh = isHighReg(DestReg);
285 bool SrcIsHigh = isHighReg(SrcReg);
286 if (DestIsHigh && SrcIsHigh)
287 Opcode = SystemZ::RISBHH;
288 else if (DestIsHigh && !SrcIsHigh)
289 Opcode = SystemZ::RISBHL;
290 else if (!DestIsHigh && SrcIsHigh)
291 Opcode = SystemZ::RISBLH;
292 else {
293 return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
294 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc));
295 }
296 unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
297 return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
298 .addReg(DestReg, RegState::Undef)
299 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc))
300 .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
301 }
302
commuteInstructionImpl(MachineInstr & MI,bool NewMI,unsigned OpIdx1,unsigned OpIdx2) const303 MachineInstr *SystemZInstrInfo::commuteInstructionImpl(MachineInstr &MI,
304 bool NewMI,
305 unsigned OpIdx1,
306 unsigned OpIdx2) const {
307 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
308 if (NewMI)
309 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
310 return MI;
311 };
312
313 switch (MI.getOpcode()) {
314 case SystemZ::LOCRMux:
315 case SystemZ::LOCFHR:
316 case SystemZ::LOCR:
317 case SystemZ::LOCGR: {
318 auto &WorkingMI = cloneIfNew(MI);
319 // Invert condition.
320 unsigned CCValid = WorkingMI.getOperand(3).getImm();
321 unsigned CCMask = WorkingMI.getOperand(4).getImm();
322 WorkingMI.getOperand(4).setImm(CCMask ^ CCValid);
323 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
324 OpIdx1, OpIdx2);
325 }
326 default:
327 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
328 }
329 }
330
331 // If MI is a simple load or store for a frame object, return the register
332 // it loads or stores and set FrameIndex to the index of the frame object.
333 // Return 0 otherwise.
334 //
335 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
isSimpleMove(const MachineInstr & MI,int & FrameIndex,unsigned Flag)336 static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
337 unsigned Flag) {
338 const MCInstrDesc &MCID = MI.getDesc();
339 if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
340 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
341 FrameIndex = MI.getOperand(1).getIndex();
342 return MI.getOperand(0).getReg();
343 }
344 return 0;
345 }
346
isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex) const347 unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
348 int &FrameIndex) const {
349 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
350 }
351
isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex) const352 unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
353 int &FrameIndex) const {
354 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
355 }
356
isStackSlotCopy(const MachineInstr & MI,int & DestFrameIndex,int & SrcFrameIndex) const357 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI,
358 int &DestFrameIndex,
359 int &SrcFrameIndex) const {
360 // Check for MVC 0(Length,FI1),0(FI2)
361 const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo();
362 if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
363 MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
364 MI.getOperand(4).getImm() != 0)
365 return false;
366
367 // Check that Length covers the full slots.
368 int64_t Length = MI.getOperand(2).getImm();
369 unsigned FI1 = MI.getOperand(0).getIndex();
370 unsigned FI2 = MI.getOperand(3).getIndex();
371 if (MFI.getObjectSize(FI1) != Length ||
372 MFI.getObjectSize(FI2) != Length)
373 return false;
374
375 DestFrameIndex = FI1;
376 SrcFrameIndex = FI2;
377 return true;
378 }
379
analyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const380 bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
381 MachineBasicBlock *&TBB,
382 MachineBasicBlock *&FBB,
383 SmallVectorImpl<MachineOperand> &Cond,
384 bool AllowModify) const {
385 // Most of the code and comments here are boilerplate.
386
387 // Start from the bottom of the block and work up, examining the
388 // terminator instructions.
389 MachineBasicBlock::iterator I = MBB.end();
390 while (I != MBB.begin()) {
391 --I;
392 if (I->isDebugInstr())
393 continue;
394
395 // Working from the bottom, when we see a non-terminator instruction, we're
396 // done.
397 if (!isUnpredicatedTerminator(*I))
398 break;
399
400 // A terminator that isn't a branch can't easily be handled by this
401 // analysis.
402 if (!I->isBranch())
403 return true;
404
405 // Can't handle indirect branches.
406 SystemZII::Branch Branch(getBranchInfo(*I));
407 if (!Branch.Target->isMBB())
408 return true;
409
410 // Punt on compound branches.
411 if (Branch.Type != SystemZII::BranchNormal)
412 return true;
413
414 if (Branch.CCMask == SystemZ::CCMASK_ANY) {
415 // Handle unconditional branches.
416 if (!AllowModify) {
417 TBB = Branch.Target->getMBB();
418 continue;
419 }
420
421 // If the block has any instructions after a JMP, delete them.
422 while (std::next(I) != MBB.end())
423 std::next(I)->eraseFromParent();
424
425 Cond.clear();
426 FBB = nullptr;
427
428 // Delete the JMP if it's equivalent to a fall-through.
429 if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) {
430 TBB = nullptr;
431 I->eraseFromParent();
432 I = MBB.end();
433 continue;
434 }
435
436 // TBB is used to indicate the unconditinal destination.
437 TBB = Branch.Target->getMBB();
438 continue;
439 }
440
441 // Working from the bottom, handle the first conditional branch.
442 if (Cond.empty()) {
443 // FIXME: add X86-style branch swap
444 FBB = TBB;
445 TBB = Branch.Target->getMBB();
446 Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
447 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
448 continue;
449 }
450
451 // Handle subsequent conditional branches.
452 assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
453
454 // Only handle the case where all conditional branches branch to the same
455 // destination.
456 if (TBB != Branch.Target->getMBB())
457 return true;
458
459 // If the conditions are the same, we can leave them alone.
460 unsigned OldCCValid = Cond[0].getImm();
461 unsigned OldCCMask = Cond[1].getImm();
462 if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
463 continue;
464
465 // FIXME: Try combining conditions like X86 does. Should be easy on Z!
466 return false;
467 }
468
469 return false;
470 }
471
removeBranch(MachineBasicBlock & MBB,int * BytesRemoved) const472 unsigned SystemZInstrInfo::removeBranch(MachineBasicBlock &MBB,
473 int *BytesRemoved) const {
474 assert(!BytesRemoved && "code size not handled");
475
476 // Most of the code and comments here are boilerplate.
477 MachineBasicBlock::iterator I = MBB.end();
478 unsigned Count = 0;
479
480 while (I != MBB.begin()) {
481 --I;
482 if (I->isDebugInstr())
483 continue;
484 if (!I->isBranch())
485 break;
486 if (!getBranchInfo(*I).Target->isMBB())
487 break;
488 // Remove the branch.
489 I->eraseFromParent();
490 I = MBB.end();
491 ++Count;
492 }
493
494 return Count;
495 }
496
497 bool SystemZInstrInfo::
reverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const498 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
499 assert(Cond.size() == 2 && "Invalid condition");
500 Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
501 return false;
502 }
503
insertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,ArrayRef<MachineOperand> Cond,const DebugLoc & DL,int * BytesAdded) const504 unsigned SystemZInstrInfo::insertBranch(MachineBasicBlock &MBB,
505 MachineBasicBlock *TBB,
506 MachineBasicBlock *FBB,
507 ArrayRef<MachineOperand> Cond,
508 const DebugLoc &DL,
509 int *BytesAdded) const {
510 // In this function we output 32-bit branches, which should always
511 // have enough range. They can be shortened and relaxed by later code
512 // in the pipeline, if desired.
513
514 // Shouldn't be a fall through.
515 assert(TBB && "insertBranch must not be told to insert a fallthrough");
516 assert((Cond.size() == 2 || Cond.size() == 0) &&
517 "SystemZ branch conditions have one component!");
518 assert(!BytesAdded && "code size not handled");
519
520 if (Cond.empty()) {
521 // Unconditional branch?
522 assert(!FBB && "Unconditional branch with multiple successors!");
523 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
524 return 1;
525 }
526
527 // Conditional branch.
528 unsigned Count = 0;
529 unsigned CCValid = Cond[0].getImm();
530 unsigned CCMask = Cond[1].getImm();
531 BuildMI(&MBB, DL, get(SystemZ::BRC))
532 .addImm(CCValid).addImm(CCMask).addMBB(TBB);
533 ++Count;
534
535 if (FBB) {
536 // Two-way Conditional branch. Insert the second branch.
537 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
538 ++Count;
539 }
540 return Count;
541 }
542
analyzeCompare(const MachineInstr & MI,unsigned & SrcReg,unsigned & SrcReg2,int & Mask,int & Value) const543 bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
544 unsigned &SrcReg2, int &Mask,
545 int &Value) const {
546 assert(MI.isCompare() && "Caller should have checked for a comparison");
547
548 if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
549 MI.getOperand(1).isImm()) {
550 SrcReg = MI.getOperand(0).getReg();
551 SrcReg2 = 0;
552 Value = MI.getOperand(1).getImm();
553 Mask = ~0;
554 return true;
555 }
556
557 return false;
558 }
559
560 // If Reg is a virtual register, return its definition, otherwise return null.
getDef(unsigned Reg,const MachineRegisterInfo * MRI)561 static MachineInstr *getDef(unsigned Reg,
562 const MachineRegisterInfo *MRI) {
563 if (TargetRegisterInfo::isPhysicalRegister(Reg))
564 return nullptr;
565 return MRI->getUniqueVRegDef(Reg);
566 }
567
568 // Return true if MI is a shift of type Opcode by Imm bits.
isShift(MachineInstr * MI,unsigned Opcode,int64_t Imm)569 static bool isShift(MachineInstr *MI, unsigned Opcode, int64_t Imm) {
570 return (MI->getOpcode() == Opcode &&
571 !MI->getOperand(2).getReg() &&
572 MI->getOperand(3).getImm() == Imm);
573 }
574
575 // If the destination of MI has no uses, delete it as dead.
eraseIfDead(MachineInstr * MI,const MachineRegisterInfo * MRI)576 static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) {
577 if (MRI->use_nodbg_empty(MI->getOperand(0).getReg()))
578 MI->eraseFromParent();
579 }
580
581 // Compare compares SrcReg against zero. Check whether SrcReg contains
582 // the result of an IPM sequence whose input CC survives until Compare,
583 // and whether Compare is therefore redundant. Delete it and return
584 // true if so.
removeIPMBasedCompare(MachineInstr & Compare,unsigned SrcReg,const MachineRegisterInfo * MRI,const TargetRegisterInfo * TRI)585 static bool removeIPMBasedCompare(MachineInstr &Compare, unsigned SrcReg,
586 const MachineRegisterInfo *MRI,
587 const TargetRegisterInfo *TRI) {
588 MachineInstr *LGFR = nullptr;
589 MachineInstr *RLL = getDef(SrcReg, MRI);
590 if (RLL && RLL->getOpcode() == SystemZ::LGFR) {
591 LGFR = RLL;
592 RLL = getDef(LGFR->getOperand(1).getReg(), MRI);
593 }
594 if (!RLL || !isShift(RLL, SystemZ::RLL, 31))
595 return false;
596
597 MachineInstr *SRL = getDef(RLL->getOperand(1).getReg(), MRI);
598 if (!SRL || !isShift(SRL, SystemZ::SRL, SystemZ::IPM_CC))
599 return false;
600
601 MachineInstr *IPM = getDef(SRL->getOperand(1).getReg(), MRI);
602 if (!IPM || IPM->getOpcode() != SystemZ::IPM)
603 return false;
604
605 // Check that there are no assignments to CC between the IPM and Compare,
606 if (IPM->getParent() != Compare.getParent())
607 return false;
608 MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare.getIterator();
609 for (++MBBI; MBBI != MBBE; ++MBBI) {
610 MachineInstr &MI = *MBBI;
611 if (MI.modifiesRegister(SystemZ::CC, TRI))
612 return false;
613 }
614
615 Compare.eraseFromParent();
616 if (LGFR)
617 eraseIfDead(LGFR, MRI);
618 eraseIfDead(RLL, MRI);
619 eraseIfDead(SRL, MRI);
620 eraseIfDead(IPM, MRI);
621
622 return true;
623 }
624
optimizeCompareInstr(MachineInstr & Compare,unsigned SrcReg,unsigned SrcReg2,int Mask,int Value,const MachineRegisterInfo * MRI) const625 bool SystemZInstrInfo::optimizeCompareInstr(
626 MachineInstr &Compare, unsigned SrcReg, unsigned SrcReg2, int Mask,
627 int Value, const MachineRegisterInfo *MRI) const {
628 assert(!SrcReg2 && "Only optimizing constant comparisons so far");
629 bool IsLogical = (Compare.getDesc().TSFlags & SystemZII::IsLogical) != 0;
630 return Value == 0 && !IsLogical &&
631 removeIPMBasedCompare(Compare, SrcReg, MRI, &RI);
632 }
633
canInsertSelect(const MachineBasicBlock & MBB,ArrayRef<MachineOperand> Pred,unsigned TrueReg,unsigned FalseReg,int & CondCycles,int & TrueCycles,int & FalseCycles) const634 bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
635 ArrayRef<MachineOperand> Pred,
636 unsigned TrueReg, unsigned FalseReg,
637 int &CondCycles, int &TrueCycles,
638 int &FalseCycles) const {
639 // Not all subtargets have LOCR instructions.
640 if (!STI.hasLoadStoreOnCond())
641 return false;
642 if (Pred.size() != 2)
643 return false;
644
645 // Check register classes.
646 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
647 const TargetRegisterClass *RC =
648 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
649 if (!RC)
650 return false;
651
652 // We have LOCR instructions for 32 and 64 bit general purpose registers.
653 if ((STI.hasLoadStoreOnCond2() &&
654 SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) ||
655 SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
656 SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
657 CondCycles = 2;
658 TrueCycles = 2;
659 FalseCycles = 2;
660 return true;
661 }
662
663 // Can't do anything else.
664 return false;
665 }
666
insertSelect(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,unsigned DstReg,ArrayRef<MachineOperand> Pred,unsigned TrueReg,unsigned FalseReg) const667 void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB,
668 MachineBasicBlock::iterator I,
669 const DebugLoc &DL, unsigned DstReg,
670 ArrayRef<MachineOperand> Pred,
671 unsigned TrueReg,
672 unsigned FalseReg) const {
673 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
674 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
675
676 assert(Pred.size() == 2 && "Invalid condition");
677 unsigned CCValid = Pred[0].getImm();
678 unsigned CCMask = Pred[1].getImm();
679
680 unsigned Opc;
681 if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) {
682 if (STI.hasLoadStoreOnCond2())
683 Opc = SystemZ::LOCRMux;
684 else {
685 Opc = SystemZ::LOCR;
686 MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass);
687 unsigned TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
688 unsigned FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
689 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg);
690 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg);
691 TrueReg = TReg;
692 FalseReg = FReg;
693 }
694 } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC))
695 Opc = SystemZ::LOCGR;
696 else
697 llvm_unreachable("Invalid register class");
698
699 BuildMI(MBB, I, DL, get(Opc), DstReg)
700 .addReg(FalseReg).addReg(TrueReg)
701 .addImm(CCValid).addImm(CCMask);
702 }
703
FoldImmediate(MachineInstr & UseMI,MachineInstr & DefMI,unsigned Reg,MachineRegisterInfo * MRI) const704 bool SystemZInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
705 unsigned Reg,
706 MachineRegisterInfo *MRI) const {
707 unsigned DefOpc = DefMI.getOpcode();
708 if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI &&
709 DefOpc != SystemZ::LGHI)
710 return false;
711 if (DefMI.getOperand(0).getReg() != Reg)
712 return false;
713 int32_t ImmVal = (int32_t)DefMI.getOperand(1).getImm();
714
715 unsigned UseOpc = UseMI.getOpcode();
716 unsigned NewUseOpc;
717 unsigned UseIdx;
718 int CommuteIdx = -1;
719 switch (UseOpc) {
720 case SystemZ::LOCRMux:
721 if (!STI.hasLoadStoreOnCond2())
722 return false;
723 NewUseOpc = SystemZ::LOCHIMux;
724 if (UseMI.getOperand(2).getReg() == Reg)
725 UseIdx = 2;
726 else if (UseMI.getOperand(1).getReg() == Reg)
727 UseIdx = 2, CommuteIdx = 1;
728 else
729 return false;
730 break;
731 case SystemZ::LOCGR:
732 if (!STI.hasLoadStoreOnCond2())
733 return false;
734 NewUseOpc = SystemZ::LOCGHI;
735 if (UseMI.getOperand(2).getReg() == Reg)
736 UseIdx = 2;
737 else if (UseMI.getOperand(1).getReg() == Reg)
738 UseIdx = 2, CommuteIdx = 1;
739 else
740 return false;
741 break;
742 default:
743 return false;
744 }
745
746 if (CommuteIdx != -1)
747 if (!commuteInstruction(UseMI, false, CommuteIdx, UseIdx))
748 return false;
749
750 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
751 UseMI.setDesc(get(NewUseOpc));
752 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
753 if (DeleteDef)
754 DefMI.eraseFromParent();
755
756 return true;
757 }
758
isPredicable(const MachineInstr & MI) const759 bool SystemZInstrInfo::isPredicable(const MachineInstr &MI) const {
760 unsigned Opcode = MI.getOpcode();
761 if (Opcode == SystemZ::Return ||
762 Opcode == SystemZ::Trap ||
763 Opcode == SystemZ::CallJG ||
764 Opcode == SystemZ::CallBR)
765 return true;
766 return false;
767 }
768
769 bool SystemZInstrInfo::
isProfitableToIfCvt(MachineBasicBlock & MBB,unsigned NumCycles,unsigned ExtraPredCycles,BranchProbability Probability) const770 isProfitableToIfCvt(MachineBasicBlock &MBB,
771 unsigned NumCycles, unsigned ExtraPredCycles,
772 BranchProbability Probability) const {
773 // Avoid using conditional returns at the end of a loop (since then
774 // we'd need to emit an unconditional branch to the beginning anyway,
775 // making the loop body longer). This doesn't apply for low-probability
776 // loops (eg. compare-and-swap retry), so just decide based on branch
777 // probability instead of looping structure.
778 // However, since Compare and Trap instructions cost the same as a regular
779 // Compare instruction, we should allow the if conversion to convert this
780 // into a Conditional Compare regardless of the branch probability.
781 if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap &&
782 MBB.succ_empty() && Probability < BranchProbability(1, 8))
783 return false;
784 // For now only convert single instructions.
785 return NumCycles == 1;
786 }
787
788 bool SystemZInstrInfo::
isProfitableToIfCvt(MachineBasicBlock & TMBB,unsigned NumCyclesT,unsigned ExtraPredCyclesT,MachineBasicBlock & FMBB,unsigned NumCyclesF,unsigned ExtraPredCyclesF,BranchProbability Probability) const789 isProfitableToIfCvt(MachineBasicBlock &TMBB,
790 unsigned NumCyclesT, unsigned ExtraPredCyclesT,
791 MachineBasicBlock &FMBB,
792 unsigned NumCyclesF, unsigned ExtraPredCyclesF,
793 BranchProbability Probability) const {
794 // For now avoid converting mutually-exclusive cases.
795 return false;
796 }
797
798 bool SystemZInstrInfo::
isProfitableToDupForIfCvt(MachineBasicBlock & MBB,unsigned NumCycles,BranchProbability Probability) const799 isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
800 BranchProbability Probability) const {
801 // For now only duplicate single instructions.
802 return NumCycles == 1;
803 }
804
PredicateInstruction(MachineInstr & MI,ArrayRef<MachineOperand> Pred) const805 bool SystemZInstrInfo::PredicateInstruction(
806 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
807 assert(Pred.size() == 2 && "Invalid condition");
808 unsigned CCValid = Pred[0].getImm();
809 unsigned CCMask = Pred[1].getImm();
810 assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
811 unsigned Opcode = MI.getOpcode();
812 if (Opcode == SystemZ::Trap) {
813 MI.setDesc(get(SystemZ::CondTrap));
814 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
815 .addImm(CCValid).addImm(CCMask)
816 .addReg(SystemZ::CC, RegState::Implicit);
817 return true;
818 }
819 if (Opcode == SystemZ::Return) {
820 MI.setDesc(get(SystemZ::CondReturn));
821 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
822 .addImm(CCValid).addImm(CCMask)
823 .addReg(SystemZ::CC, RegState::Implicit);
824 return true;
825 }
826 if (Opcode == SystemZ::CallJG) {
827 MachineOperand FirstOp = MI.getOperand(0);
828 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
829 MI.RemoveOperand(1);
830 MI.RemoveOperand(0);
831 MI.setDesc(get(SystemZ::CallBRCL));
832 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
833 .addImm(CCValid)
834 .addImm(CCMask)
835 .add(FirstOp)
836 .addRegMask(RegMask)
837 .addReg(SystemZ::CC, RegState::Implicit);
838 return true;
839 }
840 if (Opcode == SystemZ::CallBR) {
841 const uint32_t *RegMask = MI.getOperand(0).getRegMask();
842 MI.RemoveOperand(0);
843 MI.setDesc(get(SystemZ::CallBCR));
844 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
845 .addImm(CCValid).addImm(CCMask)
846 .addRegMask(RegMask)
847 .addReg(SystemZ::CC, RegState::Implicit);
848 return true;
849 }
850 return false;
851 }
852
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,unsigned DestReg,unsigned SrcReg,bool KillSrc) const853 void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
854 MachineBasicBlock::iterator MBBI,
855 const DebugLoc &DL, unsigned DestReg,
856 unsigned SrcReg, bool KillSrc) const {
857 // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
858 // super register in case one of the subregs is undefined.
859 // This handles ADDR128 too.
860 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
861 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
862 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
863 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
864 .addReg(SrcReg, RegState::Implicit);
865 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
866 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
867 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
868 .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
869 return;
870 }
871
872 if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
873 emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc,
874 false);
875 return;
876 }
877
878 // Move 128-bit floating-point values between VR128 and FP128.
879 if (SystemZ::VR128BitRegClass.contains(DestReg) &&
880 SystemZ::FP128BitRegClass.contains(SrcReg)) {
881 unsigned SrcRegHi =
882 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64),
883 SystemZ::subreg_r64, &SystemZ::VR128BitRegClass);
884 unsigned SrcRegLo =
885 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64),
886 SystemZ::subreg_r64, &SystemZ::VR128BitRegClass);
887
888 BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg)
889 .addReg(SrcRegHi, getKillRegState(KillSrc))
890 .addReg(SrcRegLo, getKillRegState(KillSrc));
891 return;
892 }
893 if (SystemZ::FP128BitRegClass.contains(DestReg) &&
894 SystemZ::VR128BitRegClass.contains(SrcReg)) {
895 unsigned DestRegHi =
896 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64),
897 SystemZ::subreg_r64, &SystemZ::VR128BitRegClass);
898 unsigned DestRegLo =
899 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64),
900 SystemZ::subreg_r64, &SystemZ::VR128BitRegClass);
901
902 if (DestRegHi != SrcReg)
903 copyPhysReg(MBB, MBBI, DL, DestRegHi, SrcReg, false);
904 BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo)
905 .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1);
906 return;
907 }
908
909 // Move CC value from/to a GR32.
910 if (SrcReg == SystemZ::CC) {
911 auto MIB = BuildMI(MBB, MBBI, DL, get(SystemZ::IPM), DestReg);
912 if (KillSrc) {
913 const MachineFunction *MF = MBB.getParent();
914 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
915 MIB->addRegisterKilled(SrcReg, TRI);
916 }
917 return;
918 }
919 if (DestReg == SystemZ::CC) {
920 BuildMI(MBB, MBBI, DL, get(SystemZ::TMLH))
921 .addReg(SrcReg, getKillRegState(KillSrc))
922 .addImm(3 << (SystemZ::IPM_CC - 16));
923 return;
924 }
925
926 // Everything else needs only one instruction.
927 unsigned Opcode;
928 if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
929 Opcode = SystemZ::LGR;
930 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
931 // For z13 we prefer LDR over LER to avoid partial register dependencies.
932 Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER;
933 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
934 Opcode = SystemZ::LDR;
935 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
936 Opcode = SystemZ::LXR;
937 else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg))
938 Opcode = SystemZ::VLR32;
939 else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg))
940 Opcode = SystemZ::VLR64;
941 else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg))
942 Opcode = SystemZ::VLR;
943 else if (SystemZ::AR32BitRegClass.contains(DestReg, SrcReg))
944 Opcode = SystemZ::CPYA;
945 else if (SystemZ::AR32BitRegClass.contains(DestReg) &&
946 SystemZ::GR32BitRegClass.contains(SrcReg))
947 Opcode = SystemZ::SAR;
948 else if (SystemZ::GR32BitRegClass.contains(DestReg) &&
949 SystemZ::AR32BitRegClass.contains(SrcReg))
950 Opcode = SystemZ::EAR;
951 else
952 llvm_unreachable("Impossible reg-to-reg copy");
953
954 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
955 .addReg(SrcReg, getKillRegState(KillSrc));
956 }
957
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,unsigned SrcReg,bool isKill,int FrameIdx,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const958 void SystemZInstrInfo::storeRegToStackSlot(
959 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
960 bool isKill, int FrameIdx, const TargetRegisterClass *RC,
961 const TargetRegisterInfo *TRI) const {
962 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
963
964 // Callers may expect a single instruction, so keep 128-bit moves
965 // together for now and lower them after register allocation.
966 unsigned LoadOpcode, StoreOpcode;
967 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
968 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
969 .addReg(SrcReg, getKillRegState(isKill)),
970 FrameIdx);
971 }
972
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,unsigned DestReg,int FrameIdx,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const973 void SystemZInstrInfo::loadRegFromStackSlot(
974 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
975 int FrameIdx, const TargetRegisterClass *RC,
976 const TargetRegisterInfo *TRI) const {
977 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
978
979 // Callers may expect a single instruction, so keep 128-bit moves
980 // together for now and lower them after register allocation.
981 unsigned LoadOpcode, StoreOpcode;
982 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
983 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
984 FrameIdx);
985 }
986
987 // Return true if MI is a simple load or store with a 12-bit displacement
988 // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
isSimpleBD12Move(const MachineInstr * MI,unsigned Flag)989 static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
990 const MCInstrDesc &MCID = MI->getDesc();
991 return ((MCID.TSFlags & Flag) &&
992 isUInt<12>(MI->getOperand(2).getImm()) &&
993 MI->getOperand(3).getReg() == 0);
994 }
995
996 namespace {
997
998 struct LogicOp {
999 LogicOp() = default;
LogicOp__anon4561fd690211::LogicOp1000 LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
1001 : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
1002
operator bool__anon4561fd690211::LogicOp1003 explicit operator bool() const { return RegSize; }
1004
1005 unsigned RegSize = 0;
1006 unsigned ImmLSB = 0;
1007 unsigned ImmSize = 0;
1008 };
1009
1010 } // end anonymous namespace
1011
interpretAndImmediate(unsigned Opcode)1012 static LogicOp interpretAndImmediate(unsigned Opcode) {
1013 switch (Opcode) {
1014 case SystemZ::NILMux: return LogicOp(32, 0, 16);
1015 case SystemZ::NIHMux: return LogicOp(32, 16, 16);
1016 case SystemZ::NILL64: return LogicOp(64, 0, 16);
1017 case SystemZ::NILH64: return LogicOp(64, 16, 16);
1018 case SystemZ::NIHL64: return LogicOp(64, 32, 16);
1019 case SystemZ::NIHH64: return LogicOp(64, 48, 16);
1020 case SystemZ::NIFMux: return LogicOp(32, 0, 32);
1021 case SystemZ::NILF64: return LogicOp(64, 0, 32);
1022 case SystemZ::NIHF64: return LogicOp(64, 32, 32);
1023 default: return LogicOp();
1024 }
1025 }
1026
transferDeadCC(MachineInstr * OldMI,MachineInstr * NewMI)1027 static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
1028 if (OldMI->registerDefIsDead(SystemZ::CC)) {
1029 MachineOperand *CCDef = NewMI->findRegisterDefOperand(SystemZ::CC);
1030 if (CCDef != nullptr)
1031 CCDef->setIsDead(true);
1032 }
1033 }
1034
1035 // Used to return from convertToThreeAddress after replacing two-address
1036 // instruction OldMI with three-address instruction NewMI.
finishConvertToThreeAddress(MachineInstr * OldMI,MachineInstr * NewMI,LiveVariables * LV)1037 static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI,
1038 MachineInstr *NewMI,
1039 LiveVariables *LV) {
1040 if (LV) {
1041 unsigned NumOps = OldMI->getNumOperands();
1042 for (unsigned I = 1; I < NumOps; ++I) {
1043 MachineOperand &Op = OldMI->getOperand(I);
1044 if (Op.isReg() && Op.isKill())
1045 LV->replaceKillInstruction(Op.getReg(), *OldMI, *NewMI);
1046 }
1047 }
1048 transferDeadCC(OldMI, NewMI);
1049 return NewMI;
1050 }
1051
convertToThreeAddress(MachineFunction::iterator & MFI,MachineInstr & MI,LiveVariables * LV) const1052 MachineInstr *SystemZInstrInfo::convertToThreeAddress(
1053 MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const {
1054 MachineBasicBlock *MBB = MI.getParent();
1055 MachineFunction *MF = MBB->getParent();
1056 MachineRegisterInfo &MRI = MF->getRegInfo();
1057
1058 unsigned Opcode = MI.getOpcode();
1059 unsigned NumOps = MI.getNumOperands();
1060
1061 // Try to convert something like SLL into SLLK, if supported.
1062 // We prefer to keep the two-operand form where possible both
1063 // because it tends to be shorter and because some instructions
1064 // have memory forms that can be used during spilling.
1065 if (STI.hasDistinctOps()) {
1066 MachineOperand &Dest = MI.getOperand(0);
1067 MachineOperand &Src = MI.getOperand(1);
1068 unsigned DestReg = Dest.getReg();
1069 unsigned SrcReg = Src.getReg();
1070 // AHIMux is only really a three-operand instruction when both operands
1071 // are low registers. Try to constrain both operands to be low if
1072 // possible.
1073 if (Opcode == SystemZ::AHIMux &&
1074 TargetRegisterInfo::isVirtualRegister(DestReg) &&
1075 TargetRegisterInfo::isVirtualRegister(SrcReg) &&
1076 MRI.getRegClass(DestReg)->contains(SystemZ::R1L) &&
1077 MRI.getRegClass(SrcReg)->contains(SystemZ::R1L)) {
1078 MRI.constrainRegClass(DestReg, &SystemZ::GR32BitRegClass);
1079 MRI.constrainRegClass(SrcReg, &SystemZ::GR32BitRegClass);
1080 }
1081 int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode);
1082 if (ThreeOperandOpcode >= 0) {
1083 // Create three address instruction without adding the implicit
1084 // operands. Those will instead be copied over from the original
1085 // instruction by the loop below.
1086 MachineInstrBuilder MIB(
1087 *MF, MF->CreateMachineInstr(get(ThreeOperandOpcode), MI.getDebugLoc(),
1088 /*NoImplicit=*/true));
1089 MIB.add(Dest);
1090 // Keep the kill state, but drop the tied flag.
1091 MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg());
1092 // Keep the remaining operands as-is.
1093 for (unsigned I = 2; I < NumOps; ++I)
1094 MIB.add(MI.getOperand(I));
1095 MBB->insert(MI, MIB);
1096 return finishConvertToThreeAddress(&MI, MIB, LV);
1097 }
1098 }
1099
1100 // Try to convert an AND into an RISBG-type instruction.
1101 if (LogicOp And = interpretAndImmediate(Opcode)) {
1102 uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
1103 // AND IMMEDIATE leaves the other bits of the register unchanged.
1104 Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
1105 unsigned Start, End;
1106 if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
1107 unsigned NewOpcode;
1108 if (And.RegSize == 64) {
1109 NewOpcode = SystemZ::RISBG;
1110 // Prefer RISBGN if available, since it does not clobber CC.
1111 if (STI.hasMiscellaneousExtensions())
1112 NewOpcode = SystemZ::RISBGN;
1113 } else {
1114 NewOpcode = SystemZ::RISBMux;
1115 Start &= 31;
1116 End &= 31;
1117 }
1118 MachineOperand &Dest = MI.getOperand(0);
1119 MachineOperand &Src = MI.getOperand(1);
1120 MachineInstrBuilder MIB =
1121 BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
1122 .add(Dest)
1123 .addReg(0)
1124 .addReg(Src.getReg(), getKillRegState(Src.isKill()),
1125 Src.getSubReg())
1126 .addImm(Start)
1127 .addImm(End + 128)
1128 .addImm(0);
1129 return finishConvertToThreeAddress(&MI, MIB, LV);
1130 }
1131 }
1132 return nullptr;
1133 }
1134
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,int FrameIndex,LiveIntervals * LIS) const1135 MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
1136 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
1137 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1138 LiveIntervals *LIS) const {
1139 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1140 const MachineFrameInfo &MFI = MF.getFrameInfo();
1141 unsigned Size = MFI.getObjectSize(FrameIndex);
1142 unsigned Opcode = MI.getOpcode();
1143
1144 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1145 if (LIS != nullptr && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
1146 isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
1147
1148 // Check CC liveness, since new instruction introduces a dead
1149 // def of CC.
1150 MCRegUnitIterator CCUnit(SystemZ::CC, TRI);
1151 LiveRange &CCLiveRange = LIS->getRegUnit(*CCUnit);
1152 ++CCUnit;
1153 assert(!CCUnit.isValid() && "CC only has one reg unit.");
1154 SlotIndex MISlot =
1155 LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
1156 if (!CCLiveRange.liveAt(MISlot)) {
1157 // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
1158 MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
1159 MI.getDebugLoc(), get(SystemZ::AGSI))
1160 .addFrameIndex(FrameIndex)
1161 .addImm(0)
1162 .addImm(MI.getOperand(2).getImm());
1163 BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true);
1164 CCLiveRange.createDeadDef(MISlot, LIS->getVNInfoAllocator());
1165 return BuiltMI;
1166 }
1167 }
1168 return nullptr;
1169 }
1170
1171 // All other cases require a single operand.
1172 if (Ops.size() != 1)
1173 return nullptr;
1174
1175 unsigned OpNum = Ops[0];
1176 assert(Size * 8 ==
1177 TRI->getRegSizeInBits(*MF.getRegInfo()
1178 .getRegClass(MI.getOperand(OpNum).getReg())) &&
1179 "Invalid size combination");
1180
1181 if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
1182 isInt<8>(MI.getOperand(2).getImm())) {
1183 // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
1184 Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
1185 MachineInstr *BuiltMI =
1186 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1187 .addFrameIndex(FrameIndex)
1188 .addImm(0)
1189 .addImm(MI.getOperand(2).getImm());
1190 transferDeadCC(&MI, BuiltMI);
1191 return BuiltMI;
1192 }
1193
1194 if ((Opcode == SystemZ::ALFI && OpNum == 0 &&
1195 isInt<8>((int32_t)MI.getOperand(2).getImm())) ||
1196 (Opcode == SystemZ::ALGFI && OpNum == 0 &&
1197 isInt<8>((int64_t)MI.getOperand(2).getImm()))) {
1198 // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
1199 Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI);
1200 MachineInstr *BuiltMI =
1201 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1202 .addFrameIndex(FrameIndex)
1203 .addImm(0)
1204 .addImm((int8_t)MI.getOperand(2).getImm());
1205 transferDeadCC(&MI, BuiltMI);
1206 return BuiltMI;
1207 }
1208
1209 if ((Opcode == SystemZ::SLFI && OpNum == 0 &&
1210 isInt<8>((int32_t)-MI.getOperand(2).getImm())) ||
1211 (Opcode == SystemZ::SLGFI && OpNum == 0 &&
1212 isInt<8>((int64_t)-MI.getOperand(2).getImm()))) {
1213 // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
1214 Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI);
1215 MachineInstr *BuiltMI =
1216 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1217 .addFrameIndex(FrameIndex)
1218 .addImm(0)
1219 .addImm((int8_t)-MI.getOperand(2).getImm());
1220 transferDeadCC(&MI, BuiltMI);
1221 return BuiltMI;
1222 }
1223
1224 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
1225 bool Op0IsGPR = (Opcode == SystemZ::LGDR);
1226 bool Op1IsGPR = (Opcode == SystemZ::LDGR);
1227 // If we're spilling the destination of an LDGR or LGDR, store the
1228 // source register instead.
1229 if (OpNum == 0) {
1230 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
1231 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1232 get(StoreOpcode))
1233 .add(MI.getOperand(1))
1234 .addFrameIndex(FrameIndex)
1235 .addImm(0)
1236 .addReg(0);
1237 }
1238 // If we're spilling the source of an LDGR or LGDR, load the
1239 // destination register instead.
1240 if (OpNum == 1) {
1241 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
1242 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1243 get(LoadOpcode))
1244 .add(MI.getOperand(0))
1245 .addFrameIndex(FrameIndex)
1246 .addImm(0)
1247 .addReg(0);
1248 }
1249 }
1250
1251 // Look for cases where the source of a simple store or the destination
1252 // of a simple load is being spilled. Try to use MVC instead.
1253 //
1254 // Although MVC is in practice a fast choice in these cases, it is still
1255 // logically a bytewise copy. This means that we cannot use it if the
1256 // load or store is volatile. We also wouldn't be able to use MVC if
1257 // the two memories partially overlap, but that case cannot occur here,
1258 // because we know that one of the memories is a full frame index.
1259 //
1260 // For performance reasons, we also want to avoid using MVC if the addresses
1261 // might be equal. We don't worry about that case here, because spill slot
1262 // coloring happens later, and because we have special code to remove
1263 // MVCs that turn out to be redundant.
1264 if (OpNum == 0 && MI.hasOneMemOperand()) {
1265 MachineMemOperand *MMO = *MI.memoperands_begin();
1266 if (MMO->getSize() == Size && !MMO->isVolatile()) {
1267 // Handle conversion of loads.
1268 if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXLoad)) {
1269 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1270 get(SystemZ::MVC))
1271 .addFrameIndex(FrameIndex)
1272 .addImm(0)
1273 .addImm(Size)
1274 .add(MI.getOperand(1))
1275 .addImm(MI.getOperand(2).getImm())
1276 .addMemOperand(MMO);
1277 }
1278 // Handle conversion of stores.
1279 if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXStore)) {
1280 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1281 get(SystemZ::MVC))
1282 .add(MI.getOperand(1))
1283 .addImm(MI.getOperand(2).getImm())
1284 .addImm(Size)
1285 .addFrameIndex(FrameIndex)
1286 .addImm(0)
1287 .addMemOperand(MMO);
1288 }
1289 }
1290 }
1291
1292 // If the spilled operand is the final one, try to change <INSN>R
1293 // into <INSN>.
1294 int MemOpcode = SystemZ::getMemOpcode(Opcode);
1295 if (MemOpcode >= 0) {
1296 unsigned NumOps = MI.getNumExplicitOperands();
1297 if (OpNum == NumOps - 1) {
1298 const MCInstrDesc &MemDesc = get(MemOpcode);
1299 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
1300 assert(AccessBytes != 0 && "Size of access should be known");
1301 assert(AccessBytes <= Size && "Access outside the frame index");
1302 uint64_t Offset = Size - AccessBytes;
1303 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
1304 MI.getDebugLoc(), get(MemOpcode));
1305 for (unsigned I = 0; I < OpNum; ++I)
1306 MIB.add(MI.getOperand(I));
1307 MIB.addFrameIndex(FrameIndex).addImm(Offset);
1308 if (MemDesc.TSFlags & SystemZII::HasIndex)
1309 MIB.addReg(0);
1310 transferDeadCC(&MI, MIB);
1311 return MIB;
1312 }
1313 }
1314
1315 return nullptr;
1316 }
1317
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,MachineInstr & LoadMI,LiveIntervals * LIS) const1318 MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
1319 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
1320 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1321 LiveIntervals *LIS) const {
1322 return nullptr;
1323 }
1324
expandPostRAPseudo(MachineInstr & MI) const1325 bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1326 switch (MI.getOpcode()) {
1327 case SystemZ::L128:
1328 splitMove(MI, SystemZ::LG);
1329 return true;
1330
1331 case SystemZ::ST128:
1332 splitMove(MI, SystemZ::STG);
1333 return true;
1334
1335 case SystemZ::LX:
1336 splitMove(MI, SystemZ::LD);
1337 return true;
1338
1339 case SystemZ::STX:
1340 splitMove(MI, SystemZ::STD);
1341 return true;
1342
1343 case SystemZ::LBMux:
1344 expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
1345 return true;
1346
1347 case SystemZ::LHMux:
1348 expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
1349 return true;
1350
1351 case SystemZ::LLCRMux:
1352 expandZExtPseudo(MI, SystemZ::LLCR, 8);
1353 return true;
1354
1355 case SystemZ::LLHRMux:
1356 expandZExtPseudo(MI, SystemZ::LLHR, 16);
1357 return true;
1358
1359 case SystemZ::LLCMux:
1360 expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
1361 return true;
1362
1363 case SystemZ::LLHMux:
1364 expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
1365 return true;
1366
1367 case SystemZ::LMux:
1368 expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
1369 return true;
1370
1371 case SystemZ::LOCMux:
1372 expandLOCPseudo(MI, SystemZ::LOC, SystemZ::LOCFH);
1373 return true;
1374
1375 case SystemZ::LOCHIMux:
1376 expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI);
1377 return true;
1378
1379 case SystemZ::LOCRMux:
1380 expandLOCRPseudo(MI, SystemZ::LOCR, SystemZ::LOCFHR);
1381 return true;
1382
1383 case SystemZ::STCMux:
1384 expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
1385 return true;
1386
1387 case SystemZ::STHMux:
1388 expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
1389 return true;
1390
1391 case SystemZ::STMux:
1392 expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
1393 return true;
1394
1395 case SystemZ::STOCMux:
1396 expandLOCPseudo(MI, SystemZ::STOC, SystemZ::STOCFH);
1397 return true;
1398
1399 case SystemZ::LHIMux:
1400 expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
1401 return true;
1402
1403 case SystemZ::IIFMux:
1404 expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
1405 return true;
1406
1407 case SystemZ::IILMux:
1408 expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
1409 return true;
1410
1411 case SystemZ::IIHMux:
1412 expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
1413 return true;
1414
1415 case SystemZ::NIFMux:
1416 expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
1417 return true;
1418
1419 case SystemZ::NILMux:
1420 expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
1421 return true;
1422
1423 case SystemZ::NIHMux:
1424 expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
1425 return true;
1426
1427 case SystemZ::OIFMux:
1428 expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
1429 return true;
1430
1431 case SystemZ::OILMux:
1432 expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
1433 return true;
1434
1435 case SystemZ::OIHMux:
1436 expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
1437 return true;
1438
1439 case SystemZ::XIFMux:
1440 expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
1441 return true;
1442
1443 case SystemZ::TMLMux:
1444 expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
1445 return true;
1446
1447 case SystemZ::TMHMux:
1448 expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
1449 return true;
1450
1451 case SystemZ::AHIMux:
1452 expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
1453 return true;
1454
1455 case SystemZ::AHIMuxK:
1456 expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
1457 return true;
1458
1459 case SystemZ::AFIMux:
1460 expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
1461 return true;
1462
1463 case SystemZ::CHIMux:
1464 expandRIPseudo(MI, SystemZ::CHI, SystemZ::CIH, false);
1465 return true;
1466
1467 case SystemZ::CFIMux:
1468 expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
1469 return true;
1470
1471 case SystemZ::CLFIMux:
1472 expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
1473 return true;
1474
1475 case SystemZ::CMux:
1476 expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
1477 return true;
1478
1479 case SystemZ::CLMux:
1480 expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
1481 return true;
1482
1483 case SystemZ::RISBMux: {
1484 bool DestIsHigh = isHighReg(MI.getOperand(0).getReg());
1485 bool SrcIsHigh = isHighReg(MI.getOperand(2).getReg());
1486 if (SrcIsHigh == DestIsHigh)
1487 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
1488 else {
1489 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
1490 MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
1491 }
1492 return true;
1493 }
1494
1495 case SystemZ::ADJDYNALLOC:
1496 splitAdjDynAlloc(MI);
1497 return true;
1498
1499 case TargetOpcode::LOAD_STACK_GUARD:
1500 expandLoadStackGuard(&MI);
1501 return true;
1502
1503 default:
1504 return false;
1505 }
1506 }
1507
getInstSizeInBytes(const MachineInstr & MI) const1508 unsigned SystemZInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
1509 if (MI.getOpcode() == TargetOpcode::INLINEASM) {
1510 const MachineFunction *MF = MI.getParent()->getParent();
1511 const char *AsmStr = MI.getOperand(0).getSymbolName();
1512 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1513 }
1514 return MI.getDesc().getSize();
1515 }
1516
1517 SystemZII::Branch
getBranchInfo(const MachineInstr & MI) const1518 SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const {
1519 switch (MI.getOpcode()) {
1520 case SystemZ::BR:
1521 case SystemZ::BI:
1522 case SystemZ::J:
1523 case SystemZ::JG:
1524 return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
1525 SystemZ::CCMASK_ANY, &MI.getOperand(0));
1526
1527 case SystemZ::BRC:
1528 case SystemZ::BRCL:
1529 return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
1530 MI.getOperand(1).getImm(), &MI.getOperand(2));
1531
1532 case SystemZ::BRCT:
1533 case SystemZ::BRCTH:
1534 return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
1535 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1536
1537 case SystemZ::BRCTG:
1538 return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
1539 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1540
1541 case SystemZ::CIJ:
1542 case SystemZ::CRJ:
1543 return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
1544 MI.getOperand(2).getImm(), &MI.getOperand(3));
1545
1546 case SystemZ::CLIJ:
1547 case SystemZ::CLRJ:
1548 return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
1549 MI.getOperand(2).getImm(), &MI.getOperand(3));
1550
1551 case SystemZ::CGIJ:
1552 case SystemZ::CGRJ:
1553 return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
1554 MI.getOperand(2).getImm(), &MI.getOperand(3));
1555
1556 case SystemZ::CLGIJ:
1557 case SystemZ::CLGRJ:
1558 return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
1559 MI.getOperand(2).getImm(), &MI.getOperand(3));
1560
1561 default:
1562 llvm_unreachable("Unrecognized branch opcode");
1563 }
1564 }
1565
getLoadStoreOpcodes(const TargetRegisterClass * RC,unsigned & LoadOpcode,unsigned & StoreOpcode) const1566 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
1567 unsigned &LoadOpcode,
1568 unsigned &StoreOpcode) const {
1569 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
1570 LoadOpcode = SystemZ::L;
1571 StoreOpcode = SystemZ::ST;
1572 } else if (RC == &SystemZ::GRH32BitRegClass) {
1573 LoadOpcode = SystemZ::LFH;
1574 StoreOpcode = SystemZ::STFH;
1575 } else if (RC == &SystemZ::GRX32BitRegClass) {
1576 LoadOpcode = SystemZ::LMux;
1577 StoreOpcode = SystemZ::STMux;
1578 } else if (RC == &SystemZ::GR64BitRegClass ||
1579 RC == &SystemZ::ADDR64BitRegClass) {
1580 LoadOpcode = SystemZ::LG;
1581 StoreOpcode = SystemZ::STG;
1582 } else if (RC == &SystemZ::GR128BitRegClass ||
1583 RC == &SystemZ::ADDR128BitRegClass) {
1584 LoadOpcode = SystemZ::L128;
1585 StoreOpcode = SystemZ::ST128;
1586 } else if (RC == &SystemZ::FP32BitRegClass) {
1587 LoadOpcode = SystemZ::LE;
1588 StoreOpcode = SystemZ::STE;
1589 } else if (RC == &SystemZ::FP64BitRegClass) {
1590 LoadOpcode = SystemZ::LD;
1591 StoreOpcode = SystemZ::STD;
1592 } else if (RC == &SystemZ::FP128BitRegClass) {
1593 LoadOpcode = SystemZ::LX;
1594 StoreOpcode = SystemZ::STX;
1595 } else if (RC == &SystemZ::VR32BitRegClass) {
1596 LoadOpcode = SystemZ::VL32;
1597 StoreOpcode = SystemZ::VST32;
1598 } else if (RC == &SystemZ::VR64BitRegClass) {
1599 LoadOpcode = SystemZ::VL64;
1600 StoreOpcode = SystemZ::VST64;
1601 } else if (RC == &SystemZ::VF128BitRegClass ||
1602 RC == &SystemZ::VR128BitRegClass) {
1603 LoadOpcode = SystemZ::VL;
1604 StoreOpcode = SystemZ::VST;
1605 } else
1606 llvm_unreachable("Unsupported regclass to load or store");
1607 }
1608
getOpcodeForOffset(unsigned Opcode,int64_t Offset) const1609 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
1610 int64_t Offset) const {
1611 const MCInstrDesc &MCID = get(Opcode);
1612 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
1613 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
1614 // Get the instruction to use for unsigned 12-bit displacements.
1615 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
1616 if (Disp12Opcode >= 0)
1617 return Disp12Opcode;
1618
1619 // All address-related instructions can use unsigned 12-bit
1620 // displacements.
1621 return Opcode;
1622 }
1623 if (isInt<20>(Offset) && isInt<20>(Offset2)) {
1624 // Get the instruction to use for signed 20-bit displacements.
1625 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
1626 if (Disp20Opcode >= 0)
1627 return Disp20Opcode;
1628
1629 // Check whether Opcode allows signed 20-bit displacements.
1630 if (MCID.TSFlags & SystemZII::Has20BitOffset)
1631 return Opcode;
1632 }
1633 return 0;
1634 }
1635
getLoadAndTest(unsigned Opcode) const1636 unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
1637 switch (Opcode) {
1638 case SystemZ::L: return SystemZ::LT;
1639 case SystemZ::LY: return SystemZ::LT;
1640 case SystemZ::LG: return SystemZ::LTG;
1641 case SystemZ::LGF: return SystemZ::LTGF;
1642 case SystemZ::LR: return SystemZ::LTR;
1643 case SystemZ::LGFR: return SystemZ::LTGFR;
1644 case SystemZ::LGR: return SystemZ::LTGR;
1645 case SystemZ::LER: return SystemZ::LTEBR;
1646 case SystemZ::LDR: return SystemZ::LTDBR;
1647 case SystemZ::LXR: return SystemZ::LTXBR;
1648 case SystemZ::LCDFR: return SystemZ::LCDBR;
1649 case SystemZ::LPDFR: return SystemZ::LPDBR;
1650 case SystemZ::LNDFR: return SystemZ::LNDBR;
1651 case SystemZ::LCDFR_32: return SystemZ::LCEBR;
1652 case SystemZ::LPDFR_32: return SystemZ::LPEBR;
1653 case SystemZ::LNDFR_32: return SystemZ::LNEBR;
1654 // On zEC12 we prefer to use RISBGN. But if there is a chance to
1655 // actually use the condition code, we may turn it back into RISGB.
1656 // Note that RISBG is not really a "load-and-test" instruction,
1657 // but sets the same condition code values, so is OK to use here.
1658 case SystemZ::RISBGN: return SystemZ::RISBG;
1659 default: return 0;
1660 }
1661 }
1662
1663 // Return true if Mask matches the regexp 0*1+0*, given that zero masks
1664 // have already been filtered out. Store the first set bit in LSB and
1665 // the number of set bits in Length if so.
isStringOfOnes(uint64_t Mask,unsigned & LSB,unsigned & Length)1666 static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) {
1667 unsigned First = findFirstSet(Mask);
1668 uint64_t Top = (Mask >> First) + 1;
1669 if ((Top & -Top) == Top) {
1670 LSB = First;
1671 Length = findFirstSet(Top);
1672 return true;
1673 }
1674 return false;
1675 }
1676
isRxSBGMask(uint64_t Mask,unsigned BitSize,unsigned & Start,unsigned & End) const1677 bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
1678 unsigned &Start, unsigned &End) const {
1679 // Reject trivial all-zero masks.
1680 Mask &= allOnes(BitSize);
1681 if (Mask == 0)
1682 return false;
1683
1684 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
1685 // the msb and End specifies the index of the lsb.
1686 unsigned LSB, Length;
1687 if (isStringOfOnes(Mask, LSB, Length)) {
1688 Start = 63 - (LSB + Length - 1);
1689 End = 63 - LSB;
1690 return true;
1691 }
1692
1693 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
1694 // of the low 1s and End specifies the lsb of the high 1s.
1695 if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) {
1696 assert(LSB > 0 && "Bottom bit must be set");
1697 assert(LSB + Length < BitSize && "Top bit must be set");
1698 Start = 63 - (LSB - 1);
1699 End = 63 - (LSB + Length);
1700 return true;
1701 }
1702
1703 return false;
1704 }
1705
getFusedCompare(unsigned Opcode,SystemZII::FusedCompareType Type,const MachineInstr * MI) const1706 unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode,
1707 SystemZII::FusedCompareType Type,
1708 const MachineInstr *MI) const {
1709 switch (Opcode) {
1710 case SystemZ::CHI:
1711 case SystemZ::CGHI:
1712 if (!(MI && isInt<8>(MI->getOperand(1).getImm())))
1713 return 0;
1714 break;
1715 case SystemZ::CLFI:
1716 case SystemZ::CLGFI:
1717 if (!(MI && isUInt<8>(MI->getOperand(1).getImm())))
1718 return 0;
1719 break;
1720 case SystemZ::CL:
1721 case SystemZ::CLG:
1722 if (!STI.hasMiscellaneousExtensions())
1723 return 0;
1724 if (!(MI && MI->getOperand(3).getReg() == 0))
1725 return 0;
1726 break;
1727 }
1728 switch (Type) {
1729 case SystemZII::CompareAndBranch:
1730 switch (Opcode) {
1731 case SystemZ::CR:
1732 return SystemZ::CRJ;
1733 case SystemZ::CGR:
1734 return SystemZ::CGRJ;
1735 case SystemZ::CHI:
1736 return SystemZ::CIJ;
1737 case SystemZ::CGHI:
1738 return SystemZ::CGIJ;
1739 case SystemZ::CLR:
1740 return SystemZ::CLRJ;
1741 case SystemZ::CLGR:
1742 return SystemZ::CLGRJ;
1743 case SystemZ::CLFI:
1744 return SystemZ::CLIJ;
1745 case SystemZ::CLGFI:
1746 return SystemZ::CLGIJ;
1747 default:
1748 return 0;
1749 }
1750 case SystemZII::CompareAndReturn:
1751 switch (Opcode) {
1752 case SystemZ::CR:
1753 return SystemZ::CRBReturn;
1754 case SystemZ::CGR:
1755 return SystemZ::CGRBReturn;
1756 case SystemZ::CHI:
1757 return SystemZ::CIBReturn;
1758 case SystemZ::CGHI:
1759 return SystemZ::CGIBReturn;
1760 case SystemZ::CLR:
1761 return SystemZ::CLRBReturn;
1762 case SystemZ::CLGR:
1763 return SystemZ::CLGRBReturn;
1764 case SystemZ::CLFI:
1765 return SystemZ::CLIBReturn;
1766 case SystemZ::CLGFI:
1767 return SystemZ::CLGIBReturn;
1768 default:
1769 return 0;
1770 }
1771 case SystemZII::CompareAndSibcall:
1772 switch (Opcode) {
1773 case SystemZ::CR:
1774 return SystemZ::CRBCall;
1775 case SystemZ::CGR:
1776 return SystemZ::CGRBCall;
1777 case SystemZ::CHI:
1778 return SystemZ::CIBCall;
1779 case SystemZ::CGHI:
1780 return SystemZ::CGIBCall;
1781 case SystemZ::CLR:
1782 return SystemZ::CLRBCall;
1783 case SystemZ::CLGR:
1784 return SystemZ::CLGRBCall;
1785 case SystemZ::CLFI:
1786 return SystemZ::CLIBCall;
1787 case SystemZ::CLGFI:
1788 return SystemZ::CLGIBCall;
1789 default:
1790 return 0;
1791 }
1792 case SystemZII::CompareAndTrap:
1793 switch (Opcode) {
1794 case SystemZ::CR:
1795 return SystemZ::CRT;
1796 case SystemZ::CGR:
1797 return SystemZ::CGRT;
1798 case SystemZ::CHI:
1799 return SystemZ::CIT;
1800 case SystemZ::CGHI:
1801 return SystemZ::CGIT;
1802 case SystemZ::CLR:
1803 return SystemZ::CLRT;
1804 case SystemZ::CLGR:
1805 return SystemZ::CLGRT;
1806 case SystemZ::CLFI:
1807 return SystemZ::CLFIT;
1808 case SystemZ::CLGFI:
1809 return SystemZ::CLGIT;
1810 case SystemZ::CL:
1811 return SystemZ::CLT;
1812 case SystemZ::CLG:
1813 return SystemZ::CLGT;
1814 default:
1815 return 0;
1816 }
1817 }
1818 return 0;
1819 }
1820
getLoadAndTrap(unsigned Opcode) const1821 unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const {
1822 if (!STI.hasLoadAndTrap())
1823 return 0;
1824 switch (Opcode) {
1825 case SystemZ::L:
1826 case SystemZ::LY:
1827 return SystemZ::LAT;
1828 case SystemZ::LG:
1829 return SystemZ::LGAT;
1830 case SystemZ::LFH:
1831 return SystemZ::LFHAT;
1832 case SystemZ::LLGF:
1833 return SystemZ::LLGFAT;
1834 case SystemZ::LLGT:
1835 return SystemZ::LLGTAT;
1836 }
1837 return 0;
1838 }
1839
loadImmediate(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,unsigned Reg,uint64_t Value) const1840 void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
1841 MachineBasicBlock::iterator MBBI,
1842 unsigned Reg, uint64_t Value) const {
1843 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1844 unsigned Opcode;
1845 if (isInt<16>(Value))
1846 Opcode = SystemZ::LGHI;
1847 else if (SystemZ::isImmLL(Value))
1848 Opcode = SystemZ::LLILL;
1849 else if (SystemZ::isImmLH(Value)) {
1850 Opcode = SystemZ::LLILH;
1851 Value >>= 16;
1852 } else {
1853 assert(isInt<32>(Value) && "Huge values not handled yet");
1854 Opcode = SystemZ::LGFI;
1855 }
1856 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
1857 }
1858
1859 bool SystemZInstrInfo::
areMemAccessesTriviallyDisjoint(MachineInstr & MIa,MachineInstr & MIb,AliasAnalysis * AA) const1860 areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
1861 AliasAnalysis *AA) const {
1862
1863 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())
1864 return false;
1865
1866 // If mem-operands show that the same address Value is used by both
1867 // instructions, check for non-overlapping offsets and widths. Not
1868 // sure if a register based analysis would be an improvement...
1869
1870 MachineMemOperand *MMOa = *MIa.memoperands_begin();
1871 MachineMemOperand *MMOb = *MIb.memoperands_begin();
1872 const Value *VALa = MMOa->getValue();
1873 const Value *VALb = MMOb->getValue();
1874 bool SameVal = (VALa && VALb && (VALa == VALb));
1875 if (!SameVal) {
1876 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1877 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1878 if (PSVa && PSVb && (PSVa == PSVb))
1879 SameVal = true;
1880 }
1881 if (SameVal) {
1882 int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset();
1883 int WidthA = MMOa->getSize(), WidthB = MMOb->getSize();
1884 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
1885 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
1886 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1887 if (LowOffset + LowWidth <= HighOffset)
1888 return true;
1889 }
1890
1891 return false;
1892 }
1893