1 //===- MipsSEFrameLowering.cpp - Mips32/64 Frame Information --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the Mips32/64 implementation of TargetFrameLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "MipsSEFrameLowering.h"
15 #include "MCTargetDesc/MipsABIInfo.h"
16 #include "MipsMachineFunction.h"
17 #include "MipsRegisterInfo.h"
18 #include "MipsSEInstrInfo.h"
19 #include "MipsSubtarget.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/StringSwitch.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/RegisterScavenging.h"
32 #include "llvm/CodeGen/TargetInstrInfo.h"
33 #include "llvm/CodeGen/TargetRegisterInfo.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/IR/DebugLoc.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/MC/MCDwarf.h"
38 #include "llvm/MC/MCRegisterInfo.h"
39 #include "llvm/MC/MachineLocation.h"
40 #include "llvm/Support/CodeGen.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Support/MathExtras.h"
43 #include <cassert>
44 #include <cstdint>
45 #include <utility>
46 #include <vector>
47
48 using namespace llvm;
49
getMFHiLoOpc(unsigned Src)50 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) {
51 if (Mips::ACC64RegClass.contains(Src))
52 return std::make_pair((unsigned)Mips::PseudoMFHI,
53 (unsigned)Mips::PseudoMFLO);
54
55 if (Mips::ACC64DSPRegClass.contains(Src))
56 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP);
57
58 if (Mips::ACC128RegClass.contains(Src))
59 return std::make_pair((unsigned)Mips::PseudoMFHI64,
60 (unsigned)Mips::PseudoMFLO64);
61
62 return std::make_pair(0, 0);
63 }
64
65 namespace {
66
67 /// Helper class to expand pseudos.
68 class ExpandPseudo {
69 public:
70 ExpandPseudo(MachineFunction &MF);
71 bool expand();
72
73 private:
74 using Iter = MachineBasicBlock::iterator;
75
76 bool expandInstr(MachineBasicBlock &MBB, Iter I);
77 void expandLoadCCond(MachineBasicBlock &MBB, Iter I);
78 void expandStoreCCond(MachineBasicBlock &MBB, Iter I);
79 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize);
80 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
81 unsigned MFLoOpc, unsigned RegSize);
82 bool expandCopy(MachineBasicBlock &MBB, Iter I);
83 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
84 unsigned MFLoOpc);
85 bool expandBuildPairF64(MachineBasicBlock &MBB,
86 MachineBasicBlock::iterator I, bool FP64) const;
87 bool expandExtractElementF64(MachineBasicBlock &MBB,
88 MachineBasicBlock::iterator I, bool FP64) const;
89
90 MachineFunction &MF;
91 MachineRegisterInfo &MRI;
92 const MipsSubtarget &Subtarget;
93 const MipsSEInstrInfo &TII;
94 const MipsRegisterInfo &RegInfo;
95 };
96
97 } // end anonymous namespace
98
ExpandPseudo(MachineFunction & MF_)99 ExpandPseudo::ExpandPseudo(MachineFunction &MF_)
100 : MF(MF_), MRI(MF.getRegInfo()),
101 Subtarget(static_cast<const MipsSubtarget &>(MF.getSubtarget())),
102 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())),
103 RegInfo(*Subtarget.getRegisterInfo()) {}
104
expand()105 bool ExpandPseudo::expand() {
106 bool Expanded = false;
107
108 for (auto &MBB : MF) {
109 for (Iter I = MBB.begin(), End = MBB.end(); I != End;)
110 Expanded |= expandInstr(MBB, I++);
111 }
112
113 return Expanded;
114 }
115
expandInstr(MachineBasicBlock & MBB,Iter I)116 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) {
117 switch(I->getOpcode()) {
118 case Mips::LOAD_CCOND_DSP:
119 expandLoadCCond(MBB, I);
120 break;
121 case Mips::STORE_CCOND_DSP:
122 expandStoreCCond(MBB, I);
123 break;
124 case Mips::LOAD_ACC64:
125 case Mips::LOAD_ACC64DSP:
126 expandLoadACC(MBB, I, 4);
127 break;
128 case Mips::LOAD_ACC128:
129 expandLoadACC(MBB, I, 8);
130 break;
131 case Mips::STORE_ACC64:
132 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4);
133 break;
134 case Mips::STORE_ACC64DSP:
135 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4);
136 break;
137 case Mips::STORE_ACC128:
138 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8);
139 break;
140 case Mips::BuildPairF64:
141 if (expandBuildPairF64(MBB, I, false))
142 MBB.erase(I);
143 return false;
144 case Mips::BuildPairF64_64:
145 if (expandBuildPairF64(MBB, I, true))
146 MBB.erase(I);
147 return false;
148 case Mips::ExtractElementF64:
149 if (expandExtractElementF64(MBB, I, false))
150 MBB.erase(I);
151 return false;
152 case Mips::ExtractElementF64_64:
153 if (expandExtractElementF64(MBB, I, true))
154 MBB.erase(I);
155 return false;
156 case TargetOpcode::COPY:
157 if (!expandCopy(MBB, I))
158 return false;
159 break;
160 default:
161 return false;
162 }
163
164 MBB.erase(I);
165 return true;
166 }
167
expandLoadCCond(MachineBasicBlock & MBB,Iter I)168 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) {
169 // load $vr, FI
170 // copy ccond, $vr
171
172 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
173
174 const TargetRegisterClass *RC = RegInfo.intRegClass(4);
175 unsigned VR = MRI.createVirtualRegister(RC);
176 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
177
178 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0);
179 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst)
180 .addReg(VR, RegState::Kill);
181 }
182
expandStoreCCond(MachineBasicBlock & MBB,Iter I)183 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) {
184 // copy $vr, ccond
185 // store $vr, FI
186
187 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
188
189 const TargetRegisterClass *RC = RegInfo.intRegClass(4);
190 unsigned VR = MRI.createVirtualRegister(RC);
191 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
192
193 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR)
194 .addReg(Src, getKillRegState(I->getOperand(0).isKill()));
195 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0);
196 }
197
expandLoadACC(MachineBasicBlock & MBB,Iter I,unsigned RegSize)198 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
199 unsigned RegSize) {
200 // load $vr0, FI
201 // copy lo, $vr0
202 // load $vr1, FI + 4
203 // copy hi, $vr1
204
205 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
206
207 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
208 unsigned VR0 = MRI.createVirtualRegister(RC);
209 unsigned VR1 = MRI.createVirtualRegister(RC);
210 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
211 unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo);
212 unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi);
213 DebugLoc DL = I->getDebugLoc();
214 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY);
215
216 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0);
217 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill);
218 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize);
219 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill);
220 }
221
expandStoreACC(MachineBasicBlock & MBB,Iter I,unsigned MFHiOpc,unsigned MFLoOpc,unsigned RegSize)222 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
223 unsigned MFHiOpc, unsigned MFLoOpc,
224 unsigned RegSize) {
225 // mflo $vr0, src
226 // store $vr0, FI
227 // mfhi $vr1, src
228 // store $vr1, FI + 4
229
230 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
231
232 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
233 unsigned VR0 = MRI.createVirtualRegister(RC);
234 unsigned VR1 = MRI.createVirtualRegister(RC);
235 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
236 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill());
237 DebugLoc DL = I->getDebugLoc();
238
239 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
240 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0);
241 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
242 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize);
243 }
244
expandCopy(MachineBasicBlock & MBB,Iter I)245 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) {
246 unsigned Src = I->getOperand(1).getReg();
247 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src);
248
249 if (!Opcodes.first)
250 return false;
251
252 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second);
253 }
254
expandCopyACC(MachineBasicBlock & MBB,Iter I,unsigned MFHiOpc,unsigned MFLoOpc)255 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I,
256 unsigned MFHiOpc, unsigned MFLoOpc) {
257 // mflo $vr0, src
258 // copy dst_lo, $vr0
259 // mfhi $vr1, src
260 // copy dst_hi, $vr1
261
262 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg();
263 const TargetRegisterClass *DstRC = RegInfo.getMinimalPhysRegClass(Dst);
264 unsigned VRegSize = RegInfo.getRegSizeInBits(*DstRC) / 16;
265 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize);
266 unsigned VR0 = MRI.createVirtualRegister(RC);
267 unsigned VR1 = MRI.createVirtualRegister(RC);
268 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill());
269 unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo);
270 unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi);
271 DebugLoc DL = I->getDebugLoc();
272
273 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
274 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo)
275 .addReg(VR0, RegState::Kill);
276 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
277 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi)
278 .addReg(VR1, RegState::Kill);
279 return true;
280 }
281
282 /// This method expands the same instruction that MipsSEInstrInfo::
283 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not
284 /// available and the case where the ABI is FP64A. It is implemented here
285 /// because frame indexes are eliminated before MipsSEInstrInfo::
286 /// expandBuildPairF64 is called.
expandBuildPairF64(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,bool FP64) const287 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
288 MachineBasicBlock::iterator I,
289 bool FP64) const {
290 // For fpxx and when mthc1 is not available, use:
291 // spill + reload via ldc1
292 //
293 // The case where dmtc1 is available doesn't need to be handled here
294 // because it never creates a BuildPairF64 node.
295 //
296 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
297 // for odd-numbered double precision values (because the lower 32-bits is
298 // transferred with mtc1 which is redirected to the upper half of the even
299 // register). Unfortunately, we have to make this decision before register
300 // allocation so for now we use a spill/reload sequence for all
301 // double-precision values in regardless of being an odd/even register.
302 //
303 // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as
304 // implicit operand, so other passes (like ShrinkWrapping) are aware that
305 // stack is used.
306 if (I->getNumOperands() == 4 && I->getOperand(3).isReg()
307 && I->getOperand(3).getReg() == Mips::SP) {
308 unsigned DstReg = I->getOperand(0).getReg();
309 unsigned LoReg = I->getOperand(1).getReg();
310 unsigned HiReg = I->getOperand(2).getReg();
311
312 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
313 // the cases where mthc1 is not available). 64-bit architectures and
314 // MIPS32r2 or later can use FGR64 though.
315 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
316 !Subtarget.isFP64bit());
317
318 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
319 const TargetRegisterClass *RC2 =
320 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
321
322 // We re-use the same spill slot each time so that the stack frame doesn't
323 // grow too much in functions with a large number of moves.
324 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2);
325 if (!Subtarget.isLittle())
326 std::swap(LoReg, HiReg);
327 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC,
328 &RegInfo, 0);
329 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC,
330 &RegInfo, 4);
331 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0);
332 return true;
333 }
334
335 return false;
336 }
337
338 /// This method expands the same instruction that MipsSEInstrInfo::
339 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not
340 /// available and the case where the ABI is FP64A. It is implemented here
341 /// because frame indexes are eliminated before MipsSEInstrInfo::
342 /// expandExtractElementF64 is called.
expandExtractElementF64(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,bool FP64) const343 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
344 MachineBasicBlock::iterator I,
345 bool FP64) const {
346 const MachineOperand &Op1 = I->getOperand(1);
347 const MachineOperand &Op2 = I->getOperand(2);
348
349 if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) {
350 unsigned DstReg = I->getOperand(0).getReg();
351 BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg);
352 return true;
353 }
354
355 // For fpxx and when mfhc1 is not available, use:
356 // spill + reload via ldc1
357 //
358 // The case where dmfc1 is available doesn't need to be handled here
359 // because it never creates a ExtractElementF64 node.
360 //
361 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
362 // for odd-numbered double precision values (because the lower 32-bits is
363 // transferred with mfc1 which is redirected to the upper half of the even
364 // register). Unfortunately, we have to make this decision before register
365 // allocation so for now we use a spill/reload sequence for all
366 // double-precision values in regardless of being an odd/even register.
367 //
368 // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as
369 // implicit operand, so other passes (like ShrinkWrapping) are aware that
370 // stack is used.
371 if (I->getNumOperands() == 4 && I->getOperand(3).isReg()
372 && I->getOperand(3).getReg() == Mips::SP) {
373 unsigned DstReg = I->getOperand(0).getReg();
374 unsigned SrcReg = Op1.getReg();
375 unsigned N = Op2.getImm();
376 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N));
377
378 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
379 // the cases where mfhc1 is not available). 64-bit architectures and
380 // MIPS32r2 or later can use FGR64 though.
381 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
382 !Subtarget.isFP64bit());
383
384 const TargetRegisterClass *RC =
385 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
386 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass;
387
388 // We re-use the same spill slot each time so that the stack frame doesn't
389 // grow too much in functions with a large number of moves.
390 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC);
391 TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0);
392 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset);
393 return true;
394 }
395
396 return false;
397 }
398
MipsSEFrameLowering(const MipsSubtarget & STI)399 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI)
400 : MipsFrameLowering(STI, STI.getStackAlignment()) {}
401
emitPrologue(MachineFunction & MF,MachineBasicBlock & MBB) const402 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF,
403 MachineBasicBlock &MBB) const {
404 MachineFrameInfo &MFI = MF.getFrameInfo();
405 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
406
407 const MipsSEInstrInfo &TII =
408 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
409 const MipsRegisterInfo &RegInfo =
410 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
411
412 MachineBasicBlock::iterator MBBI = MBB.begin();
413 DebugLoc dl;
414 MipsABIInfo ABI = STI.getABI();
415 unsigned SP = ABI.GetStackPtr();
416 unsigned FP = ABI.GetFramePtr();
417 unsigned ZERO = ABI.GetNullPtr();
418 unsigned MOVE = ABI.GetGPRMoveOp();
419 unsigned ADDiu = ABI.GetPtrAddiuOp();
420 unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND;
421
422 const TargetRegisterClass *RC = ABI.ArePtrs64bit() ?
423 &Mips::GPR64RegClass : &Mips::GPR32RegClass;
424
425 // First, compute final stack size.
426 uint64_t StackSize = MFI.getStackSize();
427
428 // No need to allocate space on the stack.
429 if (StackSize == 0 && !MFI.adjustsStack()) return;
430
431 MachineModuleInfo &MMI = MF.getMMI();
432 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
433
434 // Adjust stack.
435 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI);
436
437 // emit ".cfi_def_cfa_offset StackSize"
438 unsigned CFIIndex = MF.addFrameInst(
439 MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize));
440 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
441 .addCFIIndex(CFIIndex);
442
443 if (MF.getFunction().hasFnAttribute("interrupt"))
444 emitInterruptPrologueStub(MF, MBB);
445
446 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
447
448 if (!CSI.empty()) {
449 // Find the instruction past the last instruction that saves a callee-saved
450 // register to the stack.
451 for (unsigned i = 0; i < CSI.size(); ++i)
452 ++MBBI;
453
454 // Iterate over list of callee-saved registers and emit .cfi_offset
455 // directives.
456 for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
457 E = CSI.end(); I != E; ++I) {
458 int64_t Offset = MFI.getObjectOffset(I->getFrameIdx());
459 unsigned Reg = I->getReg();
460
461 // If Reg is a double precision register, emit two cfa_offsets,
462 // one for each of the paired single precision registers.
463 if (Mips::AFGR64RegClass.contains(Reg)) {
464 unsigned Reg0 =
465 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true);
466 unsigned Reg1 =
467 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true);
468
469 if (!STI.isLittle())
470 std::swap(Reg0, Reg1);
471
472 unsigned CFIIndex = MF.addFrameInst(
473 MCCFIInstruction::createOffset(nullptr, Reg0, Offset));
474 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
475 .addCFIIndex(CFIIndex);
476
477 CFIIndex = MF.addFrameInst(
478 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4));
479 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
480 .addCFIIndex(CFIIndex);
481 } else if (Mips::FGR64RegClass.contains(Reg)) {
482 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true);
483 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1;
484
485 if (!STI.isLittle())
486 std::swap(Reg0, Reg1);
487
488 unsigned CFIIndex = MF.addFrameInst(
489 MCCFIInstruction::createOffset(nullptr, Reg0, Offset));
490 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
491 .addCFIIndex(CFIIndex);
492
493 CFIIndex = MF.addFrameInst(
494 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4));
495 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
496 .addCFIIndex(CFIIndex);
497 } else {
498 // Reg is either in GPR32 or FGR32.
499 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
500 nullptr, MRI->getDwarfRegNum(Reg, true), Offset));
501 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
502 .addCFIIndex(CFIIndex);
503 }
504 }
505 }
506
507 if (MipsFI->callsEhReturn()) {
508 // Insert instructions that spill eh data registers.
509 for (int I = 0; I < 4; ++I) {
510 if (!MBB.isLiveIn(ABI.GetEhDataReg(I)))
511 MBB.addLiveIn(ABI.GetEhDataReg(I));
512 TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false,
513 MipsFI->getEhDataRegFI(I), RC, &RegInfo);
514 }
515
516 // Emit .cfi_offset directives for eh data registers.
517 for (int I = 0; I < 4; ++I) {
518 int64_t Offset = MFI.getObjectOffset(MipsFI->getEhDataRegFI(I));
519 unsigned Reg = MRI->getDwarfRegNum(ABI.GetEhDataReg(I), true);
520 unsigned CFIIndex = MF.addFrameInst(
521 MCCFIInstruction::createOffset(nullptr, Reg, Offset));
522 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
523 .addCFIIndex(CFIIndex);
524 }
525 }
526
527 // if framepointer enabled, set it to point to the stack pointer.
528 if (hasFP(MF)) {
529 // Insert instruction "move $fp, $sp" at this location.
530 BuildMI(MBB, MBBI, dl, TII.get(MOVE), FP).addReg(SP).addReg(ZERO)
531 .setMIFlag(MachineInstr::FrameSetup);
532
533 // emit ".cfi_def_cfa_register $fp"
534 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfaRegister(
535 nullptr, MRI->getDwarfRegNum(FP, true)));
536 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
537 .addCFIIndex(CFIIndex);
538
539 if (RegInfo.needsStackRealignment(MF)) {
540 // addiu $Reg, $zero, -MaxAlignment
541 // andi $sp, $sp, $Reg
542 unsigned VR = MF.getRegInfo().createVirtualRegister(RC);
543 assert(isInt<16>(MFI.getMaxAlignment()) &&
544 "Function's alignment size requirement is not supported.");
545 int MaxAlign = -(int)MFI.getMaxAlignment();
546
547 BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO) .addImm(MaxAlign);
548 BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR);
549
550 if (hasBP(MF)) {
551 // move $s7, $sp
552 unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7;
553 BuildMI(MBB, MBBI, dl, TII.get(MOVE), BP)
554 .addReg(SP)
555 .addReg(ZERO);
556 }
557 }
558 }
559 }
560
emitInterruptPrologueStub(MachineFunction & MF,MachineBasicBlock & MBB) const561 void MipsSEFrameLowering::emitInterruptPrologueStub(
562 MachineFunction &MF, MachineBasicBlock &MBB) const {
563 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
564 MachineBasicBlock::iterator MBBI = MBB.begin();
565 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
566
567 // Report an error the target doesn't support Mips32r2 or later.
568 // The epilogue relies on the use of the "ehb" to clear execution
569 // hazards. Pre R2 Mips relies on an implementation defined number
570 // of "ssnop"s to clear the execution hazard. Support for ssnop hazard
571 // clearing is not provided so reject that configuration.
572 if (!STI.hasMips32r2())
573 report_fatal_error(
574 "\"interrupt\" attribute is not supported on pre-MIPS32R2 or "
575 "MIPS16 targets.");
576
577 // The GP register contains the "user" value, so we cannot perform
578 // any gp relative loads until we restore the "kernel" or "system" gp
579 // value. Until support is written we shall only accept the static
580 // relocation model.
581 if ((STI.getRelocationModel() != Reloc::Static))
582 report_fatal_error("\"interrupt\" attribute is only supported for the "
583 "static relocation model on MIPS at the present time.");
584
585 if (!STI.isABI_O32() || STI.hasMips64())
586 report_fatal_error("\"interrupt\" attribute is only supported for the "
587 "O32 ABI on MIPS32R2+ at the present time.");
588
589 // Perform ISR handling like GCC
590 StringRef IntKind =
591 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
592 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass;
593
594 // EIC interrupt handling needs to read the Cause register to disable
595 // interrupts.
596 if (IntKind == "eic") {
597 // Coprocessor registers are always live per se.
598 MBB.addLiveIn(Mips::COP013);
599 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K0)
600 .addReg(Mips::COP013)
601 .addImm(0)
602 .setMIFlag(MachineInstr::FrameSetup);
603
604 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EXT), Mips::K0)
605 .addReg(Mips::K0)
606 .addImm(10)
607 .addImm(6)
608 .setMIFlag(MachineInstr::FrameSetup);
609 }
610
611 // Fetch and spill EPC
612 MBB.addLiveIn(Mips::COP014);
613 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1)
614 .addReg(Mips::COP014)
615 .addImm(0)
616 .setMIFlag(MachineInstr::FrameSetup);
617
618 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
619 MipsFI->getISRRegFI(0), PtrRC,
620 STI.getRegisterInfo(), 0);
621
622 // Fetch and Spill Status
623 MBB.addLiveIn(Mips::COP012);
624 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1)
625 .addReg(Mips::COP012)
626 .addImm(0)
627 .setMIFlag(MachineInstr::FrameSetup);
628
629 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
630 MipsFI->getISRRegFI(1), PtrRC,
631 STI.getRegisterInfo(), 0);
632
633 // Build the configuration for disabling lower priority interrupts. Non EIC
634 // interrupts need to be masked off with zero, EIC from the Cause register.
635 unsigned InsPosition = 8;
636 unsigned InsSize = 0;
637 unsigned SrcReg = Mips::ZERO;
638
639 // If the interrupt we're tied to is the EIC, switch the source for the
640 // masking off interrupts to the cause register.
641 if (IntKind == "eic") {
642 SrcReg = Mips::K0;
643 InsPosition = 10;
644 InsSize = 6;
645 } else
646 InsSize = StringSwitch<unsigned>(IntKind)
647 .Case("sw0", 1)
648 .Case("sw1", 2)
649 .Case("hw0", 3)
650 .Case("hw1", 4)
651 .Case("hw2", 5)
652 .Case("hw3", 6)
653 .Case("hw4", 7)
654 .Case("hw5", 8)
655 .Default(0);
656 assert(InsSize != 0 && "Unknown interrupt type!");
657
658 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
659 .addReg(SrcReg)
660 .addImm(InsPosition)
661 .addImm(InsSize)
662 .addReg(Mips::K1)
663 .setMIFlag(MachineInstr::FrameSetup);
664
665 // Mask off KSU, ERL, EXL
666 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
667 .addReg(Mips::ZERO)
668 .addImm(1)
669 .addImm(4)
670 .addReg(Mips::K1)
671 .setMIFlag(MachineInstr::FrameSetup);
672
673 // Disable the FPU as we are not spilling those register sets.
674 if (!STI.useSoftFloat())
675 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
676 .addReg(Mips::ZERO)
677 .addImm(29)
678 .addImm(1)
679 .addReg(Mips::K1)
680 .setMIFlag(MachineInstr::FrameSetup);
681
682 // Set the new status
683 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012)
684 .addReg(Mips::K1)
685 .addImm(0)
686 .setMIFlag(MachineInstr::FrameSetup);
687 }
688
emitEpilogue(MachineFunction & MF,MachineBasicBlock & MBB) const689 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
690 MachineBasicBlock &MBB) const {
691 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
692 MachineFrameInfo &MFI = MF.getFrameInfo();
693 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
694
695 const MipsSEInstrInfo &TII =
696 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
697 const MipsRegisterInfo &RegInfo =
698 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
699
700 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
701 MipsABIInfo ABI = STI.getABI();
702 unsigned SP = ABI.GetStackPtr();
703 unsigned FP = ABI.GetFramePtr();
704 unsigned ZERO = ABI.GetNullPtr();
705 unsigned MOVE = ABI.GetGPRMoveOp();
706
707 // if framepointer enabled, restore the stack pointer.
708 if (hasFP(MF)) {
709 // Find the first instruction that restores a callee-saved register.
710 MachineBasicBlock::iterator I = MBBI;
711
712 for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i)
713 --I;
714
715 // Insert instruction "move $sp, $fp" at this location.
716 BuildMI(MBB, I, DL, TII.get(MOVE), SP).addReg(FP).addReg(ZERO);
717 }
718
719 if (MipsFI->callsEhReturn()) {
720 const TargetRegisterClass *RC =
721 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
722
723 // Find first instruction that restores a callee-saved register.
724 MachineBasicBlock::iterator I = MBBI;
725 for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i)
726 --I;
727
728 // Insert instructions that restore eh data registers.
729 for (int J = 0; J < 4; ++J) {
730 TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J),
731 MipsFI->getEhDataRegFI(J), RC, &RegInfo);
732 }
733 }
734
735 if (MF.getFunction().hasFnAttribute("interrupt"))
736 emitInterruptEpilogueStub(MF, MBB);
737
738 // Get the number of bytes from FrameInfo
739 uint64_t StackSize = MFI.getStackSize();
740
741 if (!StackSize)
742 return;
743
744 // Adjust stack.
745 TII.adjustStackPtr(SP, StackSize, MBB, MBBI);
746 }
747
emitInterruptEpilogueStub(MachineFunction & MF,MachineBasicBlock & MBB) const748 void MipsSEFrameLowering::emitInterruptEpilogueStub(
749 MachineFunction &MF, MachineBasicBlock &MBB) const {
750 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
751 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
752 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
753
754 // Perform ISR handling like GCC
755 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass;
756
757 // Disable Interrupts.
758 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::DI), Mips::ZERO);
759 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EHB));
760
761 // Restore EPC
762 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
763 MipsFI->getISRRegFI(0), PtrRC,
764 STI.getRegisterInfo());
765 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP014)
766 .addReg(Mips::K1)
767 .addImm(0);
768
769 // Restore Status
770 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
771 MipsFI->getISRRegFI(1), PtrRC,
772 STI.getRegisterInfo());
773 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012)
774 .addReg(Mips::K1)
775 .addImm(0);
776 }
777
getFrameIndexReference(const MachineFunction & MF,int FI,unsigned & FrameReg) const778 int MipsSEFrameLowering::getFrameIndexReference(const MachineFunction &MF,
779 int FI,
780 unsigned &FrameReg) const {
781 const MachineFrameInfo &MFI = MF.getFrameInfo();
782 MipsABIInfo ABI = STI.getABI();
783
784 if (MFI.isFixedObjectIndex(FI))
785 FrameReg = hasFP(MF) ? ABI.GetFramePtr() : ABI.GetStackPtr();
786 else
787 FrameReg = hasBP(MF) ? ABI.GetBasePtr() : ABI.GetStackPtr();
788
789 return MFI.getObjectOffset(FI) + MFI.getStackSize() -
790 getOffsetOfLocalArea() + MFI.getOffsetAdjustment();
791 }
792
793 bool MipsSEFrameLowering::
spillCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const std::vector<CalleeSavedInfo> & CSI,const TargetRegisterInfo * TRI) const794 spillCalleeSavedRegisters(MachineBasicBlock &MBB,
795 MachineBasicBlock::iterator MI,
796 const std::vector<CalleeSavedInfo> &CSI,
797 const TargetRegisterInfo *TRI) const {
798 MachineFunction *MF = MBB.getParent();
799 const TargetInstrInfo &TII = *STI.getInstrInfo();
800
801 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
802 // Add the callee-saved register as live-in. Do not add if the register is
803 // RA and return address is taken, because it has already been added in
804 // method MipsTargetLowering::lowerRETURNADDR.
805 // It's killed at the spill, unless the register is RA and return address
806 // is taken.
807 unsigned Reg = CSI[i].getReg();
808 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64)
809 && MF->getFrameInfo().isReturnAddressTaken();
810 if (!IsRAAndRetAddrIsTaken)
811 MBB.addLiveIn(Reg);
812
813 // ISRs require HI/LO to be spilled into kernel registers to be then
814 // spilled to the stack frame.
815 bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 ||
816 Reg == Mips::HI0 || Reg == Mips::HI0_64);
817 const Function &Func = MBB.getParent()->getFunction();
818 if (IsLOHI && Func.hasFnAttribute("interrupt")) {
819 DebugLoc DL = MI->getDebugLoc();
820
821 unsigned Op = 0;
822 if (!STI.getABI().ArePtrs64bit()) {
823 Op = (Reg == Mips::HI0) ? Mips::MFHI : Mips::MFLO;
824 Reg = Mips::K0;
825 } else {
826 Op = (Reg == Mips::HI0) ? Mips::MFHI64 : Mips::MFLO64;
827 Reg = Mips::K0_64;
828 }
829 BuildMI(MBB, MI, DL, TII.get(Op), Mips::K0)
830 .setMIFlag(MachineInstr::FrameSetup);
831 }
832
833 // Insert the spill to the stack frame.
834 bool IsKill = !IsRAAndRetAddrIsTaken;
835 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
836 TII.storeRegToStackSlot(MBB, MI, Reg, IsKill,
837 CSI[i].getFrameIdx(), RC, TRI);
838 }
839
840 return true;
841 }
842
843 bool
hasReservedCallFrame(const MachineFunction & MF) const844 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
845 const MachineFrameInfo &MFI = MF.getFrameInfo();
846 // Reserve call frame if the size of the maximum call frame fits into 16-bit
847 // immediate field and there are no variable sized objects on the stack.
848 // Make sure the second register scavenger spill slot can be accessed with one
849 // instruction.
850 return isInt<16>(MFI.getMaxCallFrameSize() + getStackAlignment()) &&
851 !MFI.hasVarSizedObjects();
852 }
853
854 /// Mark \p Reg and all registers aliasing it in the bitset.
setAliasRegs(MachineFunction & MF,BitVector & SavedRegs,unsigned Reg)855 static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs,
856 unsigned Reg) {
857 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
858 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
859 SavedRegs.set(*AI);
860 }
861
determineCalleeSaves(MachineFunction & MF,BitVector & SavedRegs,RegScavenger * RS) const862 void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF,
863 BitVector &SavedRegs,
864 RegScavenger *RS) const {
865 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
866 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
867 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
868 MipsABIInfo ABI = STI.getABI();
869 unsigned FP = ABI.GetFramePtr();
870 unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7;
871
872 // Mark $fp as used if function has dedicated frame pointer.
873 if (hasFP(MF))
874 setAliasRegs(MF, SavedRegs, FP);
875 // Mark $s7 as used if function has dedicated base pointer.
876 if (hasBP(MF))
877 setAliasRegs(MF, SavedRegs, BP);
878
879 // Create spill slots for eh data registers if function calls eh_return.
880 if (MipsFI->callsEhReturn())
881 MipsFI->createEhDataRegsFI();
882
883 // Create spill slots for Coprocessor 0 registers if function is an ISR.
884 if (MipsFI->isISR())
885 MipsFI->createISRRegFI();
886
887 // Expand pseudo instructions which load, store or copy accumulators.
888 // Add an emergency spill slot if a pseudo was expanded.
889 if (ExpandPseudo(MF).expand()) {
890 // The spill slot should be half the size of the accumulator. If target have
891 // general-purpose registers 64 bits wide, it should be 64-bit, otherwise
892 // it should be 32-bit.
893 const TargetRegisterClass &RC = STI.isGP64bit() ?
894 Mips::GPR64RegClass : Mips::GPR32RegClass;
895 int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC),
896 TRI->getSpillAlignment(RC),
897 false);
898 RS->addScavengingFrameIndex(FI);
899 }
900
901 // Set scavenging frame index if necessary.
902 uint64_t MaxSPOffset = estimateStackSize(MF);
903
904 // MSA has a minimum offset of 10 bits signed. If there is a variable
905 // sized object on the stack, the estimation cannot account for it.
906 if (isIntN(STI.hasMSA() ? 10 : 16, MaxSPOffset) &&
907 !MF.getFrameInfo().hasVarSizedObjects())
908 return;
909
910 const TargetRegisterClass &RC =
911 ABI.ArePtrs64bit() ? Mips::GPR64RegClass : Mips::GPR32RegClass;
912 int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC),
913 TRI->getSpillAlignment(RC),
914 false);
915 RS->addScavengingFrameIndex(FI);
916 }
917
918 const MipsFrameLowering *
createMipsSEFrameLowering(const MipsSubtarget & ST)919 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) {
920 return new MipsSEFrameLowering(ST);
921 }
922