1 //===-- MipsSEFrameLowering.cpp - Mips32/64 Frame Information -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the Mips32/64 implementation of TargetFrameLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "MipsSEFrameLowering.h"
15 #include "MCTargetDesc/MipsBaseInfo.h"
16 #include "MipsAnalyzeImmediate.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsSEInstrInfo.h"
19 #include "MipsSubtarget.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineModuleInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Target/TargetOptions.h"
30
31 using namespace llvm;
32
33 namespace {
34 typedef MachineBasicBlock::iterator Iter;
35
getMFHiLoOpc(unsigned Src)36 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) {
37 if (Mips::ACC64RegClass.contains(Src))
38 return std::make_pair((unsigned)Mips::PseudoMFHI,
39 (unsigned)Mips::PseudoMFLO);
40
41 if (Mips::ACC64DSPRegClass.contains(Src))
42 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP);
43
44 if (Mips::ACC128RegClass.contains(Src))
45 return std::make_pair((unsigned)Mips::PseudoMFHI64,
46 (unsigned)Mips::PseudoMFLO64);
47
48 return std::make_pair(0, 0);
49 }
50
51 /// Helper class to expand pseudos.
52 class ExpandPseudo {
53 public:
54 ExpandPseudo(MachineFunction &MF);
55 bool expand();
56
57 private:
58 bool expandInstr(MachineBasicBlock &MBB, Iter I);
59 void expandLoadCCond(MachineBasicBlock &MBB, Iter I);
60 void expandStoreCCond(MachineBasicBlock &MBB, Iter I);
61 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize);
62 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
63 unsigned MFLoOpc, unsigned RegSize);
64 bool expandCopy(MachineBasicBlock &MBB, Iter I);
65 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
66 unsigned MFLoOpc);
67 bool expandBuildPairF64(MachineBasicBlock &MBB,
68 MachineBasicBlock::iterator I, bool FP64) const;
69 bool expandExtractElementF64(MachineBasicBlock &MBB,
70 MachineBasicBlock::iterator I, bool FP64) const;
71
72 MachineFunction &MF;
73 MachineRegisterInfo &MRI;
74 const MipsSubtarget &Subtarget;
75 const MipsSEInstrInfo &TII;
76 const MipsRegisterInfo &RegInfo;
77 };
78 }
79
ExpandPseudo(MachineFunction & MF_)80 ExpandPseudo::ExpandPseudo(MachineFunction &MF_)
81 : MF(MF_), MRI(MF.getRegInfo()),
82 Subtarget(static_cast<const MipsSubtarget &>(MF.getSubtarget())),
83 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())),
84 RegInfo(*Subtarget.getRegisterInfo()) {}
85
expand()86 bool ExpandPseudo::expand() {
87 bool Expanded = false;
88
89 for (MachineFunction::iterator BB = MF.begin(), BBEnd = MF.end();
90 BB != BBEnd; ++BB)
91 for (Iter I = BB->begin(), End = BB->end(); I != End;)
92 Expanded |= expandInstr(*BB, I++);
93
94 return Expanded;
95 }
96
expandInstr(MachineBasicBlock & MBB,Iter I)97 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) {
98 switch(I->getOpcode()) {
99 case Mips::LOAD_CCOND_DSP:
100 expandLoadCCond(MBB, I);
101 break;
102 case Mips::STORE_CCOND_DSP:
103 expandStoreCCond(MBB, I);
104 break;
105 case Mips::LOAD_ACC64:
106 case Mips::LOAD_ACC64DSP:
107 expandLoadACC(MBB, I, 4);
108 break;
109 case Mips::LOAD_ACC128:
110 expandLoadACC(MBB, I, 8);
111 break;
112 case Mips::STORE_ACC64:
113 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4);
114 break;
115 case Mips::STORE_ACC64DSP:
116 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4);
117 break;
118 case Mips::STORE_ACC128:
119 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8);
120 break;
121 case Mips::BuildPairF64:
122 if (expandBuildPairF64(MBB, I, false))
123 MBB.erase(I);
124 return false;
125 case Mips::BuildPairF64_64:
126 if (expandBuildPairF64(MBB, I, true))
127 MBB.erase(I);
128 return false;
129 case Mips::ExtractElementF64:
130 if (expandExtractElementF64(MBB, I, false))
131 MBB.erase(I);
132 return false;
133 case Mips::ExtractElementF64_64:
134 if (expandExtractElementF64(MBB, I, true))
135 MBB.erase(I);
136 return false;
137 case TargetOpcode::COPY:
138 if (!expandCopy(MBB, I))
139 return false;
140 break;
141 default:
142 return false;
143 }
144
145 MBB.erase(I);
146 return true;
147 }
148
expandLoadCCond(MachineBasicBlock & MBB,Iter I)149 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) {
150 // load $vr, FI
151 // copy ccond, $vr
152
153 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
154
155 const TargetRegisterClass *RC = RegInfo.intRegClass(4);
156 unsigned VR = MRI.createVirtualRegister(RC);
157 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
158
159 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0);
160 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst)
161 .addReg(VR, RegState::Kill);
162 }
163
expandStoreCCond(MachineBasicBlock & MBB,Iter I)164 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) {
165 // copy $vr, ccond
166 // store $vr, FI
167
168 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
169
170 const TargetRegisterClass *RC = RegInfo.intRegClass(4);
171 unsigned VR = MRI.createVirtualRegister(RC);
172 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
173
174 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR)
175 .addReg(Src, getKillRegState(I->getOperand(0).isKill()));
176 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0);
177 }
178
expandLoadACC(MachineBasicBlock & MBB,Iter I,unsigned RegSize)179 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
180 unsigned RegSize) {
181 // load $vr0, FI
182 // copy lo, $vr0
183 // load $vr1, FI + 4
184 // copy hi, $vr1
185
186 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
187
188 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
189 unsigned VR0 = MRI.createVirtualRegister(RC);
190 unsigned VR1 = MRI.createVirtualRegister(RC);
191 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
192 unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo);
193 unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi);
194 DebugLoc DL = I->getDebugLoc();
195 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY);
196
197 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0);
198 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill);
199 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize);
200 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill);
201 }
202
expandStoreACC(MachineBasicBlock & MBB,Iter I,unsigned MFHiOpc,unsigned MFLoOpc,unsigned RegSize)203 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
204 unsigned MFHiOpc, unsigned MFLoOpc,
205 unsigned RegSize) {
206 // mflo $vr0, src
207 // store $vr0, FI
208 // mfhi $vr1, src
209 // store $vr1, FI + 4
210
211 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
212
213 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
214 unsigned VR0 = MRI.createVirtualRegister(RC);
215 unsigned VR1 = MRI.createVirtualRegister(RC);
216 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
217 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill());
218 DebugLoc DL = I->getDebugLoc();
219
220 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
221 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0);
222 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
223 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize);
224 }
225
expandCopy(MachineBasicBlock & MBB,Iter I)226 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) {
227 unsigned Src = I->getOperand(1).getReg();
228 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src);
229
230 if (!Opcodes.first)
231 return false;
232
233 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second);
234 }
235
expandCopyACC(MachineBasicBlock & MBB,Iter I,unsigned MFHiOpc,unsigned MFLoOpc)236 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I,
237 unsigned MFHiOpc, unsigned MFLoOpc) {
238 // mflo $vr0, src
239 // copy dst_lo, $vr0
240 // mfhi $vr1, src
241 // copy dst_hi, $vr1
242
243 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg();
244 unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2;
245 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize);
246 unsigned VR0 = MRI.createVirtualRegister(RC);
247 unsigned VR1 = MRI.createVirtualRegister(RC);
248 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill());
249 unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo);
250 unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi);
251 DebugLoc DL = I->getDebugLoc();
252
253 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
254 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo)
255 .addReg(VR0, RegState::Kill);
256 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
257 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi)
258 .addReg(VR1, RegState::Kill);
259 return true;
260 }
261
262 /// This method expands the same instruction that MipsSEInstrInfo::
263 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not
264 /// available and the case where the ABI is FP64A. It is implemented here
265 /// because frame indexes are eliminated before MipsSEInstrInfo::
266 /// expandBuildPairF64 is called.
expandBuildPairF64(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,bool FP64) const267 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
268 MachineBasicBlock::iterator I,
269 bool FP64) const {
270 // For fpxx and when mthc1 is not available, use:
271 // spill + reload via ldc1
272 //
273 // The case where dmtc1 is available doesn't need to be handled here
274 // because it never creates a BuildPairF64 node.
275 //
276 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
277 // for odd-numbered double precision values (because the lower 32-bits is
278 // transferred with mtc1 which is redirected to the upper half of the even
279 // register). Unfortunately, we have to make this decision before register
280 // allocation so for now we use a spill/reload sequence for all
281 // double-precision values in regardless of being an odd/even register.
282 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) ||
283 (FP64 && !Subtarget.useOddSPReg())) {
284 unsigned DstReg = I->getOperand(0).getReg();
285 unsigned LoReg = I->getOperand(1).getReg();
286 unsigned HiReg = I->getOperand(2).getReg();
287
288 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
289 // the cases where mthc1 is not available). 64-bit architectures and
290 // MIPS32r2 or later can use FGR64 though.
291 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
292 !Subtarget.isFP64bit());
293
294 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
295 const TargetRegisterClass *RC2 =
296 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
297
298 // We re-use the same spill slot each time so that the stack frame doesn't
299 // grow too much in functions with a large number of moves.
300 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2);
301 if (!Subtarget.isLittle())
302 std::swap(LoReg, HiReg);
303 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC,
304 &RegInfo, 0);
305 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC,
306 &RegInfo, 4);
307 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0);
308 return true;
309 }
310
311 return false;
312 }
313
314 /// This method expands the same instruction that MipsSEInstrInfo::
315 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not
316 /// available and the case where the ABI is FP64A. It is implemented here
317 /// because frame indexes are eliminated before MipsSEInstrInfo::
318 /// expandExtractElementF64 is called.
expandExtractElementF64(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,bool FP64) const319 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
320 MachineBasicBlock::iterator I,
321 bool FP64) const {
322 // For fpxx and when mfhc1 is not available, use:
323 // spill + reload via ldc1
324 //
325 // The case where dmfc1 is available doesn't need to be handled here
326 // because it never creates a ExtractElementF64 node.
327 //
328 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
329 // for odd-numbered double precision values (because the lower 32-bits is
330 // transferred with mfc1 which is redirected to the upper half of the even
331 // register). Unfortunately, we have to make this decision before register
332 // allocation so for now we use a spill/reload sequence for all
333 // double-precision values in regardless of being an odd/even register.
334
335 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) ||
336 (FP64 && !Subtarget.useOddSPReg())) {
337 unsigned DstReg = I->getOperand(0).getReg();
338 unsigned SrcReg = I->getOperand(1).getReg();
339 unsigned N = I->getOperand(2).getImm();
340 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N));
341
342 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
343 // the cases where mfhc1 is not available). 64-bit architectures and
344 // MIPS32r2 or later can use FGR64 though.
345 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
346 !Subtarget.isFP64bit());
347
348 const TargetRegisterClass *RC =
349 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
350 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass;
351
352 // We re-use the same spill slot each time so that the stack frame doesn't
353 // grow too much in functions with a large number of moves.
354 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC);
355 TII.storeRegToStack(MBB, I, SrcReg, I->getOperand(1).isKill(), FI, RC,
356 &RegInfo, 0);
357 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset);
358 return true;
359 }
360
361 return false;
362 }
363
MipsSEFrameLowering(const MipsSubtarget & STI)364 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI)
365 : MipsFrameLowering(STI, STI.stackAlignment()) {}
366
ehDataReg(unsigned I) const367 unsigned MipsSEFrameLowering::ehDataReg(unsigned I) const {
368 static const unsigned EhDataReg[] = {
369 Mips::A0, Mips::A1, Mips::A2, Mips::A3
370 };
371 static const unsigned EhDataReg64[] = {
372 Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64
373 };
374
375 return STI.isABI_N64() ? EhDataReg64[I] : EhDataReg[I];
376 }
377
emitPrologue(MachineFunction & MF) const378 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
379 MachineBasicBlock &MBB = MF.front();
380 MachineFrameInfo *MFI = MF.getFrameInfo();
381 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
382
383 const MipsSEInstrInfo &TII =
384 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
385 const MipsRegisterInfo &RegInfo =
386 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
387
388 MachineBasicBlock::iterator MBBI = MBB.begin();
389 DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
390 unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
391 unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
392 unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
393 unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
394
395 // First, compute final stack size.
396 uint64_t StackSize = MFI->getStackSize();
397
398 // No need to allocate space on the stack.
399 if (StackSize == 0 && !MFI->adjustsStack()) return;
400
401 MachineModuleInfo &MMI = MF.getMMI();
402 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
403 MachineLocation DstML, SrcML;
404
405 // Adjust stack.
406 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI);
407
408 // emit ".cfi_def_cfa_offset StackSize"
409 unsigned CFIIndex = MMI.addFrameInst(
410 MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize));
411 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
412 .addCFIIndex(CFIIndex);
413
414 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
415
416 if (CSI.size()) {
417 // Find the instruction past the last instruction that saves a callee-saved
418 // register to the stack.
419 for (unsigned i = 0; i < CSI.size(); ++i)
420 ++MBBI;
421
422 // Iterate over list of callee-saved registers and emit .cfi_offset
423 // directives.
424 for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
425 E = CSI.end(); I != E; ++I) {
426 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
427 unsigned Reg = I->getReg();
428
429 // If Reg is a double precision register, emit two cfa_offsets,
430 // one for each of the paired single precision registers.
431 if (Mips::AFGR64RegClass.contains(Reg)) {
432 unsigned Reg0 =
433 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true);
434 unsigned Reg1 =
435 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true);
436
437 if (!STI.isLittle())
438 std::swap(Reg0, Reg1);
439
440 unsigned CFIIndex = MMI.addFrameInst(
441 MCCFIInstruction::createOffset(nullptr, Reg0, Offset));
442 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
443 .addCFIIndex(CFIIndex);
444
445 CFIIndex = MMI.addFrameInst(
446 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4));
447 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
448 .addCFIIndex(CFIIndex);
449 } else if (Mips::FGR64RegClass.contains(Reg)) {
450 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true);
451 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1;
452
453 if (!STI.isLittle())
454 std::swap(Reg0, Reg1);
455
456 unsigned CFIIndex = MMI.addFrameInst(
457 MCCFIInstruction::createOffset(nullptr, Reg0, Offset));
458 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
459 .addCFIIndex(CFIIndex);
460
461 CFIIndex = MMI.addFrameInst(
462 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4));
463 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
464 .addCFIIndex(CFIIndex);
465 } else {
466 // Reg is either in GPR32 or FGR32.
467 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
468 nullptr, MRI->getDwarfRegNum(Reg, 1), Offset));
469 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
470 .addCFIIndex(CFIIndex);
471 }
472 }
473 }
474
475 if (MipsFI->callsEhReturn()) {
476 const TargetRegisterClass *RC = STI.isABI_N64() ?
477 &Mips::GPR64RegClass : &Mips::GPR32RegClass;
478
479 // Insert instructions that spill eh data registers.
480 for (int I = 0; I < 4; ++I) {
481 if (!MBB.isLiveIn(ehDataReg(I)))
482 MBB.addLiveIn(ehDataReg(I));
483 TII.storeRegToStackSlot(MBB, MBBI, ehDataReg(I), false,
484 MipsFI->getEhDataRegFI(I), RC, &RegInfo);
485 }
486
487 // Emit .cfi_offset directives for eh data registers.
488 for (int I = 0; I < 4; ++I) {
489 int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I));
490 unsigned Reg = MRI->getDwarfRegNum(ehDataReg(I), true);
491 unsigned CFIIndex = MMI.addFrameInst(
492 MCCFIInstruction::createOffset(nullptr, Reg, Offset));
493 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
494 .addCFIIndex(CFIIndex);
495 }
496 }
497
498 // if framepointer enabled, set it to point to the stack pointer.
499 if (hasFP(MF)) {
500 // Insert instruction "move $fp, $sp" at this location.
501 BuildMI(MBB, MBBI, dl, TII.get(ADDu), FP).addReg(SP).addReg(ZERO)
502 .setMIFlag(MachineInstr::FrameSetup);
503
504 // emit ".cfi_def_cfa_register $fp"
505 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(
506 nullptr, MRI->getDwarfRegNum(FP, true)));
507 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
508 .addCFIIndex(CFIIndex);
509 }
510 }
511
emitEpilogue(MachineFunction & MF,MachineBasicBlock & MBB) const512 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
513 MachineBasicBlock &MBB) const {
514 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
515 MachineFrameInfo *MFI = MF.getFrameInfo();
516 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
517
518 const MipsSEInstrInfo &TII =
519 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
520 const MipsRegisterInfo &RegInfo =
521 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
522
523 DebugLoc dl = MBBI->getDebugLoc();
524 unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
525 unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
526 unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
527 unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
528
529 // if framepointer enabled, restore the stack pointer.
530 if (hasFP(MF)) {
531 // Find the first instruction that restores a callee-saved register.
532 MachineBasicBlock::iterator I = MBBI;
533
534 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i)
535 --I;
536
537 // Insert instruction "move $sp, $fp" at this location.
538 BuildMI(MBB, I, dl, TII.get(ADDu), SP).addReg(FP).addReg(ZERO);
539 }
540
541 if (MipsFI->callsEhReturn()) {
542 const TargetRegisterClass *RC = STI.isABI_N64() ?
543 &Mips::GPR64RegClass : &Mips::GPR32RegClass;
544
545 // Find first instruction that restores a callee-saved register.
546 MachineBasicBlock::iterator I = MBBI;
547 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i)
548 --I;
549
550 // Insert instructions that restore eh data registers.
551 for (int J = 0; J < 4; ++J) {
552 TII.loadRegFromStackSlot(MBB, I, ehDataReg(J), MipsFI->getEhDataRegFI(J),
553 RC, &RegInfo);
554 }
555 }
556
557 // Get the number of bytes from FrameInfo
558 uint64_t StackSize = MFI->getStackSize();
559
560 if (!StackSize)
561 return;
562
563 // Adjust stack.
564 TII.adjustStackPtr(SP, StackSize, MBB, MBBI);
565 }
566
567 bool MipsSEFrameLowering::
spillCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const std::vector<CalleeSavedInfo> & CSI,const TargetRegisterInfo * TRI) const568 spillCalleeSavedRegisters(MachineBasicBlock &MBB,
569 MachineBasicBlock::iterator MI,
570 const std::vector<CalleeSavedInfo> &CSI,
571 const TargetRegisterInfo *TRI) const {
572 MachineFunction *MF = MBB.getParent();
573 MachineBasicBlock *EntryBlock = MF->begin();
574 const TargetInstrInfo &TII = *STI.getInstrInfo();
575
576 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
577 // Add the callee-saved register as live-in. Do not add if the register is
578 // RA and return address is taken, because it has already been added in
579 // method MipsTargetLowering::LowerRETURNADDR.
580 // It's killed at the spill, unless the register is RA and return address
581 // is taken.
582 unsigned Reg = CSI[i].getReg();
583 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64)
584 && MF->getFrameInfo()->isReturnAddressTaken();
585 if (!IsRAAndRetAddrIsTaken)
586 EntryBlock->addLiveIn(Reg);
587
588 // Insert the spill to the stack frame.
589 bool IsKill = !IsRAAndRetAddrIsTaken;
590 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
591 TII.storeRegToStackSlot(*EntryBlock, MI, Reg, IsKill,
592 CSI[i].getFrameIdx(), RC, TRI);
593 }
594
595 return true;
596 }
597
598 bool
hasReservedCallFrame(const MachineFunction & MF) const599 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
600 const MachineFrameInfo *MFI = MF.getFrameInfo();
601
602 // Reserve call frame if the size of the maximum call frame fits into 16-bit
603 // immediate field and there are no variable sized objects on the stack.
604 // Make sure the second register scavenger spill slot can be accessed with one
605 // instruction.
606 return isInt<16>(MFI->getMaxCallFrameSize() + getStackAlignment()) &&
607 !MFI->hasVarSizedObjects();
608 }
609
610 void MipsSEFrameLowering::
processFunctionBeforeCalleeSavedScan(MachineFunction & MF,RegScavenger * RS) const611 processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
612 RegScavenger *RS) const {
613 MachineRegisterInfo &MRI = MF.getRegInfo();
614 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
615 unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
616
617 // Mark $fp as used if function has dedicated frame pointer.
618 if (hasFP(MF))
619 MRI.setPhysRegUsed(FP);
620
621 // Create spill slots for eh data registers if function calls eh_return.
622 if (MipsFI->callsEhReturn())
623 MipsFI->createEhDataRegsFI();
624
625 // Expand pseudo instructions which load, store or copy accumulators.
626 // Add an emergency spill slot if a pseudo was expanded.
627 if (ExpandPseudo(MF).expand()) {
628 // The spill slot should be half the size of the accumulator. If target is
629 // mips64, it should be 64-bit, otherwise it should be 32-bt.
630 const TargetRegisterClass *RC = STI.hasMips64() ?
631 &Mips::GPR64RegClass : &Mips::GPR32RegClass;
632 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
633 RC->getAlignment(), false);
634 RS->addScavengingFrameIndex(FI);
635 }
636
637 // Set scavenging frame index if necessary.
638 uint64_t MaxSPOffset = MF.getInfo<MipsFunctionInfo>()->getIncomingArgSize() +
639 estimateStackSize(MF);
640
641 if (isInt<16>(MaxSPOffset))
642 return;
643
644 const TargetRegisterClass *RC = STI.isABI_N64() ?
645 &Mips::GPR64RegClass : &Mips::GPR32RegClass;
646 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
647 RC->getAlignment(), false);
648 RS->addScavengingFrameIndex(FI);
649 }
650
651 const MipsFrameLowering *
createMipsSEFrameLowering(const MipsSubtarget & ST)652 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) {
653 return new MipsSEFrameLowering(ST);
654 }
655