1 //===----- X86CallFrameOptimization.cpp - Optimize x86 call sequences -----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines a pass that optimizes call sequences on x86.
11 // Currently, it converts movs of function parameters onto the stack into
12 // pushes. This is beneficial for two main reasons:
13 // 1) The push instruction encoding is much smaller than a stack-ptr-based mov.
14 // 2) It is possible to push memory arguments directly. So, if the
15 // the transformation is performed pre-reg-alloc, it can help relieve
16 // register pressure.
17 //
18 //===----------------------------------------------------------------------===//
19
20 #include "MCTargetDesc/X86BaseInfo.h"
21 #include "X86FrameLowering.h"
22 #include "X86InstrInfo.h"
23 #include "X86MachineFunctionInfo.h"
24 #include "X86RegisterInfo.h"
25 #include "X86Subtarget.h"
26 #include "llvm/ADT/DenseSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/StringRef.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineOperand.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/TargetInstrInfo.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/IR/DebugLoc.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/MC/MCDwarf.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/MathExtras.h"
45 #include <cassert>
46 #include <cstddef>
47 #include <cstdint>
48 #include <iterator>
49
50 using namespace llvm;
51
52 #define DEBUG_TYPE "x86-cf-opt"
53
54 static cl::opt<bool>
55 NoX86CFOpt("no-x86-call-frame-opt",
56 cl::desc("Avoid optimizing x86 call frames for size"),
57 cl::init(false), cl::Hidden);
58
59 namespace llvm {
60 void initializeX86CallFrameOptimizationPass(PassRegistry &);
61 }
62
63 namespace {
64
65 class X86CallFrameOptimization : public MachineFunctionPass {
66 public:
X86CallFrameOptimization()67 X86CallFrameOptimization() : MachineFunctionPass(ID) {
68 initializeX86CallFrameOptimizationPass(
69 *PassRegistry::getPassRegistry());
70 }
71
72 bool runOnMachineFunction(MachineFunction &MF) override;
73
74 static char ID;
75
76 private:
77 // Information we know about a particular call site
78 struct CallContext {
CallContext__anon7ee6d9490111::X86CallFrameOptimization::CallContext79 CallContext() : FrameSetup(nullptr), ArgStoreVector(4, nullptr) {}
80
81 // Iterator referring to the frame setup instruction
82 MachineBasicBlock::iterator FrameSetup;
83
84 // Actual call instruction
85 MachineInstr *Call = nullptr;
86
87 // A copy of the stack pointer
88 MachineInstr *SPCopy = nullptr;
89
90 // The total displacement of all passed parameters
91 int64_t ExpectedDist = 0;
92
93 // The sequence of storing instructions used to pass the parameters
94 SmallVector<MachineInstr *, 4> ArgStoreVector;
95
96 // True if this call site has no stack parameters
97 bool NoStackParams = false;
98
99 // True if this call site can use push instructions
100 bool UsePush = false;
101 };
102
103 typedef SmallVector<CallContext, 8> ContextVector;
104
105 bool isLegal(MachineFunction &MF);
106
107 bool isProfitable(MachineFunction &MF, ContextVector &CallSeqMap);
108
109 void collectCallInfo(MachineFunction &MF, MachineBasicBlock &MBB,
110 MachineBasicBlock::iterator I, CallContext &Context);
111
112 void adjustCallSequence(MachineFunction &MF, const CallContext &Context);
113
114 MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,
115 unsigned Reg);
116
117 enum InstClassification { Convert, Skip, Exit };
118
119 InstClassification classifyInstruction(MachineBasicBlock &MBB,
120 MachineBasicBlock::iterator MI,
121 const X86RegisterInfo &RegInfo,
122 DenseSet<unsigned int> &UsedRegs);
123
getPassName() const124 StringRef getPassName() const override { return "X86 Optimize Call Frame"; }
125
126 const X86InstrInfo *TII;
127 const X86FrameLowering *TFL;
128 const X86Subtarget *STI;
129 MachineRegisterInfo *MRI;
130 unsigned SlotSize;
131 unsigned Log2SlotSize;
132 };
133
134 } // end anonymous namespace
135 char X86CallFrameOptimization::ID = 0;
136 INITIALIZE_PASS(X86CallFrameOptimization, DEBUG_TYPE,
137 "X86 Call Frame Optimization", false, false)
138
139 // This checks whether the transformation is legal.
140 // Also returns false in cases where it's potentially legal, but
141 // we don't even want to try.
isLegal(MachineFunction & MF)142 bool X86CallFrameOptimization::isLegal(MachineFunction &MF) {
143 if (NoX86CFOpt.getValue())
144 return false;
145
146 // We can't encode multiple DW_CFA_GNU_args_size or DW_CFA_def_cfa_offset
147 // in the compact unwind encoding that Darwin uses. So, bail if there
148 // is a danger of that being generated.
149 if (STI->isTargetDarwin() &&
150 (!MF.getLandingPads().empty() ||
151 (MF.getFunction().needsUnwindTableEntry() && !TFL->hasFP(MF))))
152 return false;
153
154 // It is not valid to change the stack pointer outside the prolog/epilog
155 // on 64-bit Windows.
156 if (STI->isTargetWin64())
157 return false;
158
159 // You would expect straight-line code between call-frame setup and
160 // call-frame destroy. You would be wrong. There are circumstances (e.g.
161 // CMOV_GR8 expansion of a select that feeds a function call!) where we can
162 // end up with the setup and the destroy in different basic blocks.
163 // This is bad, and breaks SP adjustment.
164 // So, check that all of the frames in the function are closed inside
165 // the same block, and, for good measure, that there are no nested frames.
166 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
167 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
168 for (MachineBasicBlock &BB : MF) {
169 bool InsideFrameSequence = false;
170 for (MachineInstr &MI : BB) {
171 if (MI.getOpcode() == FrameSetupOpcode) {
172 if (InsideFrameSequence)
173 return false;
174 InsideFrameSequence = true;
175 } else if (MI.getOpcode() == FrameDestroyOpcode) {
176 if (!InsideFrameSequence)
177 return false;
178 InsideFrameSequence = false;
179 }
180 }
181
182 if (InsideFrameSequence)
183 return false;
184 }
185
186 return true;
187 }
188
189 // Check whether this transformation is profitable for a particular
190 // function - in terms of code size.
isProfitable(MachineFunction & MF,ContextVector & CallSeqVector)191 bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
192 ContextVector &CallSeqVector) {
193 // This transformation is always a win when we do not expect to have
194 // a reserved call frame. Under other circumstances, it may be either
195 // a win or a loss, and requires a heuristic.
196 bool CannotReserveFrame = MF.getFrameInfo().hasVarSizedObjects();
197 if (CannotReserveFrame)
198 return true;
199
200 unsigned StackAlign = TFL->getStackAlignment();
201
202 int64_t Advantage = 0;
203 for (auto CC : CallSeqVector) {
204 // Call sites where no parameters are passed on the stack
205 // do not affect the cost, since there needs to be no
206 // stack adjustment.
207 if (CC.NoStackParams)
208 continue;
209
210 if (!CC.UsePush) {
211 // If we don't use pushes for a particular call site,
212 // we pay for not having a reserved call frame with an
213 // additional sub/add esp pair. The cost is ~3 bytes per instruction,
214 // depending on the size of the constant.
215 // TODO: Callee-pop functions should have a smaller penalty, because
216 // an add is needed even with a reserved call frame.
217 Advantage -= 6;
218 } else {
219 // We can use pushes. First, account for the fixed costs.
220 // We'll need a add after the call.
221 Advantage -= 3;
222 // If we have to realign the stack, we'll also need a sub before
223 if (CC.ExpectedDist % StackAlign)
224 Advantage -= 3;
225 // Now, for each push, we save ~3 bytes. For small constants, we actually,
226 // save more (up to 5 bytes), but 3 should be a good approximation.
227 Advantage += (CC.ExpectedDist >> Log2SlotSize) * 3;
228 }
229 }
230
231 return Advantage >= 0;
232 }
233
runOnMachineFunction(MachineFunction & MF)234 bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
235 STI = &MF.getSubtarget<X86Subtarget>();
236 TII = STI->getInstrInfo();
237 TFL = STI->getFrameLowering();
238 MRI = &MF.getRegInfo();
239
240 const X86RegisterInfo &RegInfo =
241 *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo());
242 SlotSize = RegInfo.getSlotSize();
243 assert(isPowerOf2_32(SlotSize) && "Expect power of 2 stack slot size");
244 Log2SlotSize = Log2_32(SlotSize);
245
246 if (skipFunction(MF.getFunction()) || !isLegal(MF))
247 return false;
248
249 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
250
251 bool Changed = false;
252
253 ContextVector CallSeqVector;
254
255 for (auto &MBB : MF)
256 for (auto &MI : MBB)
257 if (MI.getOpcode() == FrameSetupOpcode) {
258 CallContext Context;
259 collectCallInfo(MF, MBB, MI, Context);
260 CallSeqVector.push_back(Context);
261 }
262
263 if (!isProfitable(MF, CallSeqVector))
264 return false;
265
266 for (auto CC : CallSeqVector) {
267 if (CC.UsePush) {
268 adjustCallSequence(MF, CC);
269 Changed = true;
270 }
271 }
272
273 return Changed;
274 }
275
276 X86CallFrameOptimization::InstClassification
classifyInstruction(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const X86RegisterInfo & RegInfo,DenseSet<unsigned int> & UsedRegs)277 X86CallFrameOptimization::classifyInstruction(
278 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
279 const X86RegisterInfo &RegInfo, DenseSet<unsigned int> &UsedRegs) {
280 if (MI == MBB.end())
281 return Exit;
282
283 // The instructions we actually care about are movs onto the stack or special
284 // cases of constant-stores to stack
285 switch (MI->getOpcode()) {
286 case X86::AND16mi8:
287 case X86::AND32mi8:
288 case X86::AND64mi8: {
289 MachineOperand ImmOp = MI->getOperand(X86::AddrNumOperands);
290 return ImmOp.getImm() == 0 ? Convert : Exit;
291 }
292 case X86::OR16mi8:
293 case X86::OR32mi8:
294 case X86::OR64mi8: {
295 MachineOperand ImmOp = MI->getOperand(X86::AddrNumOperands);
296 return ImmOp.getImm() == -1 ? Convert : Exit;
297 }
298 case X86::MOV32mi:
299 case X86::MOV32mr:
300 case X86::MOV64mi32:
301 case X86::MOV64mr:
302 return Convert;
303 }
304
305 // Not all calling conventions have only stack MOVs between the stack
306 // adjust and the call.
307
308 // We want to tolerate other instructions, to cover more cases.
309 // In particular:
310 // a) PCrel calls, where we expect an additional COPY of the basereg.
311 // b) Passing frame-index addresses.
312 // c) Calling conventions that have inreg parameters. These generate
313 // both copies and movs into registers.
314 // To avoid creating lots of special cases, allow any instruction
315 // that does not write into memory, does not def or use the stack
316 // pointer, and does not def any register that was used by a preceding
317 // push.
318 // (Reading from memory is allowed, even if referenced through a
319 // frame index, since these will get adjusted properly in PEI)
320
321 // The reason for the last condition is that the pushes can't replace
322 // the movs in place, because the order must be reversed.
323 // So if we have a MOV32mr that uses EDX, then an instruction that defs
324 // EDX, and then the call, after the transformation the push will use
325 // the modified version of EDX, and not the original one.
326 // Since we are still in SSA form at this point, we only need to
327 // make sure we don't clobber any *physical* registers that were
328 // used by an earlier mov that will become a push.
329
330 if (MI->isCall() || MI->mayStore())
331 return Exit;
332
333 for (const MachineOperand &MO : MI->operands()) {
334 if (!MO.isReg())
335 continue;
336 unsigned int Reg = MO.getReg();
337 if (!RegInfo.isPhysicalRegister(Reg))
338 continue;
339 if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister()))
340 return Exit;
341 if (MO.isDef()) {
342 for (unsigned int U : UsedRegs)
343 if (RegInfo.regsOverlap(Reg, U))
344 return Exit;
345 }
346 }
347
348 return Skip;
349 }
350
collectCallInfo(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator I,CallContext & Context)351 void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
352 MachineBasicBlock &MBB,
353 MachineBasicBlock::iterator I,
354 CallContext &Context) {
355 // Check that this particular call sequence is amenable to the
356 // transformation.
357 const X86RegisterInfo &RegInfo =
358 *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo());
359
360 // We expect to enter this at the beginning of a call sequence
361 assert(I->getOpcode() == TII->getCallFrameSetupOpcode());
362 MachineBasicBlock::iterator FrameSetup = I++;
363 Context.FrameSetup = FrameSetup;
364
365 // How much do we adjust the stack? This puts an upper bound on
366 // the number of parameters actually passed on it.
367 unsigned int MaxAdjust = TII->getFrameSize(*FrameSetup) >> Log2SlotSize;
368
369 // A zero adjustment means no stack parameters
370 if (!MaxAdjust) {
371 Context.NoStackParams = true;
372 return;
373 }
374
375 // Skip over DEBUG_VALUE.
376 // For globals in PIC mode, we can have some LEAs here. Skip them as well.
377 // TODO: Extend this to something that covers more cases.
378 while (I->getOpcode() == X86::LEA32r || I->isDebugInstr())
379 ++I;
380
381 unsigned StackPtr = RegInfo.getStackRegister();
382 auto StackPtrCopyInst = MBB.end();
383 // SelectionDAG (but not FastISel) inserts a copy of ESP into a virtual
384 // register. If it's there, use that virtual register as stack pointer
385 // instead. Also, we need to locate this instruction so that we can later
386 // safely ignore it while doing the conservative processing of the call chain.
387 // The COPY can be located anywhere between the call-frame setup
388 // instruction and its first use. We use the call instruction as a boundary
389 // because it is usually cheaper to check if an instruction is a call than
390 // checking if an instruction uses a register.
391 for (auto J = I; !J->isCall(); ++J)
392 if (J->isCopy() && J->getOperand(0).isReg() && J->getOperand(1).isReg() &&
393 J->getOperand(1).getReg() == StackPtr) {
394 StackPtrCopyInst = J;
395 Context.SPCopy = &*J++;
396 StackPtr = Context.SPCopy->getOperand(0).getReg();
397 break;
398 }
399
400 // Scan the call setup sequence for the pattern we're looking for.
401 // We only handle a simple case - a sequence of store instructions that
402 // push a sequence of stack-slot-aligned values onto the stack, with
403 // no gaps between them.
404 if (MaxAdjust > 4)
405 Context.ArgStoreVector.resize(MaxAdjust, nullptr);
406
407 DenseSet<unsigned int> UsedRegs;
408
409 for (InstClassification Classification = Skip; Classification != Exit; ++I) {
410 // If this is the COPY of the stack pointer, it's ok to ignore.
411 if (I == StackPtrCopyInst)
412 continue;
413 Classification = classifyInstruction(MBB, I, RegInfo, UsedRegs);
414 if (Classification != Convert)
415 continue;
416 // We know the instruction has a supported store opcode.
417 // We only want movs of the form:
418 // mov imm/reg, k(%StackPtr)
419 // If we run into something else, bail.
420 // Note that AddrBaseReg may, counter to its name, not be a register,
421 // but rather a frame index.
422 // TODO: Support the fi case. This should probably work now that we
423 // have the infrastructure to track the stack pointer within a call
424 // sequence.
425 if (!I->getOperand(X86::AddrBaseReg).isReg() ||
426 (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) ||
427 !I->getOperand(X86::AddrScaleAmt).isImm() ||
428 (I->getOperand(X86::AddrScaleAmt).getImm() != 1) ||
429 (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) ||
430 (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) ||
431 !I->getOperand(X86::AddrDisp).isImm())
432 return;
433
434 int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm();
435 assert(StackDisp >= 0 &&
436 "Negative stack displacement when passing parameters");
437
438 // We really don't want to consider the unaligned case.
439 if (StackDisp & (SlotSize - 1))
440 return;
441 StackDisp >>= Log2SlotSize;
442
443 assert((size_t)StackDisp < Context.ArgStoreVector.size() &&
444 "Function call has more parameters than the stack is adjusted for.");
445
446 // If the same stack slot is being filled twice, something's fishy.
447 if (Context.ArgStoreVector[StackDisp] != nullptr)
448 return;
449 Context.ArgStoreVector[StackDisp] = &*I;
450
451 for (const MachineOperand &MO : I->uses()) {
452 if (!MO.isReg())
453 continue;
454 unsigned int Reg = MO.getReg();
455 if (RegInfo.isPhysicalRegister(Reg))
456 UsedRegs.insert(Reg);
457 }
458 }
459
460 --I;
461
462 // We now expect the end of the sequence. If we stopped early,
463 // or reached the end of the block without finding a call, bail.
464 if (I == MBB.end() || !I->isCall())
465 return;
466
467 Context.Call = &*I;
468 if ((++I)->getOpcode() != TII->getCallFrameDestroyOpcode())
469 return;
470
471 // Now, go through the vector, and see that we don't have any gaps,
472 // but only a series of storing instructions.
473 auto MMI = Context.ArgStoreVector.begin(), MME = Context.ArgStoreVector.end();
474 for (; MMI != MME; ++MMI, Context.ExpectedDist += SlotSize)
475 if (*MMI == nullptr)
476 break;
477
478 // If the call had no parameters, do nothing
479 if (MMI == Context.ArgStoreVector.begin())
480 return;
481
482 // We are either at the last parameter, or a gap.
483 // Make sure it's not a gap
484 for (; MMI != MME; ++MMI)
485 if (*MMI != nullptr)
486 return;
487
488 Context.UsePush = true;
489 }
490
adjustCallSequence(MachineFunction & MF,const CallContext & Context)491 void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
492 const CallContext &Context) {
493 // Ok, we can in fact do the transformation for this call.
494 // Do not remove the FrameSetup instruction, but adjust the parameters.
495 // PEI will end up finalizing the handling of this.
496 MachineBasicBlock::iterator FrameSetup = Context.FrameSetup;
497 MachineBasicBlock &MBB = *(FrameSetup->getParent());
498 TII->setFrameAdjustment(*FrameSetup, Context.ExpectedDist);
499
500 DebugLoc DL = FrameSetup->getDebugLoc();
501 bool Is64Bit = STI->is64Bit();
502 // Now, iterate through the vector in reverse order, and replace the store to
503 // stack with pushes. MOVmi/MOVmr doesn't have any defs, so no need to
504 // replace uses.
505 for (int Idx = (Context.ExpectedDist >> Log2SlotSize) - 1; Idx >= 0; --Idx) {
506 MachineBasicBlock::iterator Store = *Context.ArgStoreVector[Idx];
507 MachineOperand PushOp = Store->getOperand(X86::AddrNumOperands);
508 MachineBasicBlock::iterator Push = nullptr;
509 unsigned PushOpcode;
510 switch (Store->getOpcode()) {
511 default:
512 llvm_unreachable("Unexpected Opcode!");
513 case X86::AND16mi8:
514 case X86::AND32mi8:
515 case X86::AND64mi8:
516 case X86::OR16mi8:
517 case X86::OR32mi8:
518 case X86::OR64mi8:
519 case X86::MOV32mi:
520 case X86::MOV64mi32:
521 PushOpcode = Is64Bit ? X86::PUSH64i32 : X86::PUSHi32;
522 // If the operand is a small (8-bit) immediate, we can use a
523 // PUSH instruction with a shorter encoding.
524 // Note that isImm() may fail even though this is a MOVmi, because
525 // the operand can also be a symbol.
526 if (PushOp.isImm()) {
527 int64_t Val = PushOp.getImm();
528 if (isInt<8>(Val))
529 PushOpcode = Is64Bit ? X86::PUSH64i8 : X86::PUSH32i8;
530 }
531 Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)).add(PushOp);
532 break;
533 case X86::MOV32mr:
534 case X86::MOV64mr: {
535 unsigned int Reg = PushOp.getReg();
536
537 // If storing a 32-bit vreg on 64-bit targets, extend to a 64-bit vreg
538 // in preparation for the PUSH64. The upper 32 bits can be undef.
539 if (Is64Bit && Store->getOpcode() == X86::MOV32mr) {
540 unsigned UndefReg = MRI->createVirtualRegister(&X86::GR64RegClass);
541 Reg = MRI->createVirtualRegister(&X86::GR64RegClass);
542 BuildMI(MBB, Context.Call, DL, TII->get(X86::IMPLICIT_DEF), UndefReg);
543 BuildMI(MBB, Context.Call, DL, TII->get(X86::INSERT_SUBREG), Reg)
544 .addReg(UndefReg)
545 .add(PushOp)
546 .addImm(X86::sub_32bit);
547 }
548
549 // If PUSHrmm is not slow on this target, try to fold the source of the
550 // push into the instruction.
551 bool SlowPUSHrmm = STI->isAtom() || STI->isSLM();
552
553 // Check that this is legal to fold. Right now, we're extremely
554 // conservative about that.
555 MachineInstr *DefMov = nullptr;
556 if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) {
557 PushOpcode = Is64Bit ? X86::PUSH64rmm : X86::PUSH32rmm;
558 Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode));
559
560 unsigned NumOps = DefMov->getDesc().getNumOperands();
561 for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
562 Push->addOperand(DefMov->getOperand(i));
563
564 DefMov->eraseFromParent();
565 } else {
566 PushOpcode = Is64Bit ? X86::PUSH64r : X86::PUSH32r;
567 Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode))
568 .addReg(Reg)
569 .getInstr();
570 }
571 break;
572 }
573 }
574
575 // For debugging, when using SP-based CFA, we need to adjust the CFA
576 // offset after each push.
577 // TODO: This is needed only if we require precise CFA.
578 if (!TFL->hasFP(MF))
579 TFL->BuildCFI(
580 MBB, std::next(Push), DL,
581 MCCFIInstruction::createAdjustCfaOffset(nullptr, SlotSize));
582
583 MBB.erase(Store);
584 }
585
586 // The stack-pointer copy is no longer used in the call sequences.
587 // There should not be any other users, but we can't commit to that, so:
588 if (Context.SPCopy && MRI->use_empty(Context.SPCopy->getOperand(0).getReg()))
589 Context.SPCopy->eraseFromParent();
590
591 // Once we've done this, we need to make sure PEI doesn't assume a reserved
592 // frame.
593 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
594 FuncInfo->setHasPushSequences(true);
595 }
596
canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,unsigned Reg)597 MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
598 MachineBasicBlock::iterator FrameSetup, unsigned Reg) {
599 // Do an extremely restricted form of load folding.
600 // ISel will often create patterns like:
601 // movl 4(%edi), %eax
602 // movl 8(%edi), %ecx
603 // movl 12(%edi), %edx
604 // movl %edx, 8(%esp)
605 // movl %ecx, 4(%esp)
606 // movl %eax, (%esp)
607 // call
608 // Get rid of those with prejudice.
609 if (!TargetRegisterInfo::isVirtualRegister(Reg))
610 return nullptr;
611
612 // Make sure this is the only use of Reg.
613 if (!MRI->hasOneNonDBGUse(Reg))
614 return nullptr;
615
616 MachineInstr &DefMI = *MRI->getVRegDef(Reg);
617
618 // Make sure the def is a MOV from memory.
619 // If the def is in another block, give up.
620 if ((DefMI.getOpcode() != X86::MOV32rm &&
621 DefMI.getOpcode() != X86::MOV64rm) ||
622 DefMI.getParent() != FrameSetup->getParent())
623 return nullptr;
624
625 // Make sure we don't have any instructions between DefMI and the
626 // push that make folding the load illegal.
627 for (MachineBasicBlock::iterator I = DefMI; I != FrameSetup; ++I)
628 if (I->isLoadFoldBarrier())
629 return nullptr;
630
631 return &DefMI;
632 }
633
createX86CallFrameOptimization()634 FunctionPass *llvm::createX86CallFrameOptimization() {
635 return new X86CallFrameOptimization();
636 }
637