1 //===- X86OptimizeLEAs.cpp - optimize usage of LEA instructions -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the pass that performs some optimizations with LEA
11 // instructions in order to improve performance and code size.
12 // Currently, it does two things:
13 // 1) If there are two LEA instructions calculating addresses which only differ
14 // by displacement inside a basic block, one of them is removed.
15 // 2) Address calculations in load and store instructions are replaced by
16 // existing LEA def registers where possible.
17 //
18 //===----------------------------------------------------------------------===//
19
20 #include "MCTargetDesc/X86BaseInfo.h"
21 #include "X86.h"
22 #include "X86InstrInfo.h"
23 #include "X86Subtarget.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/DenseMapInfo.h"
26 #include "llvm/ADT/Hashing.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineFunctionPass.h"
32 #include "llvm/CodeGen/MachineInstr.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/TargetOpcodes.h"
37 #include "llvm/CodeGen/TargetRegisterInfo.h"
38 #include "llvm/IR/DebugInfoMetadata.h"
39 #include "llvm/IR/DebugLoc.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/MC/MCInstrDesc.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include <cassert>
48 #include <cstdint>
49 #include <iterator>
50
51 using namespace llvm;
52
53 #define DEBUG_TYPE "x86-optimize-LEAs"
54
55 static cl::opt<bool>
56 DisableX86LEAOpt("disable-x86-lea-opt", cl::Hidden,
57 cl::desc("X86: Disable LEA optimizations."),
58 cl::init(false));
59
60 STATISTIC(NumSubstLEAs, "Number of LEA instruction substitutions");
61 STATISTIC(NumRedundantLEAs, "Number of redundant LEA instructions removed");
62
63 /// Returns true if two machine operands are identical and they are not
64 /// physical registers.
65 static inline bool isIdenticalOp(const MachineOperand &MO1,
66 const MachineOperand &MO2);
67
68 /// Returns true if two address displacement operands are of the same
69 /// type and use the same symbol/index/address regardless of the offset.
70 static bool isSimilarDispOp(const MachineOperand &MO1,
71 const MachineOperand &MO2);
72
73 /// Returns true if the instruction is LEA.
74 static inline bool isLEA(const MachineInstr &MI);
75
76 namespace {
77
78 /// A key based on instruction's memory operands.
79 class MemOpKey {
80 public:
MemOpKey(const MachineOperand * Base,const MachineOperand * Scale,const MachineOperand * Index,const MachineOperand * Segment,const MachineOperand * Disp)81 MemOpKey(const MachineOperand *Base, const MachineOperand *Scale,
82 const MachineOperand *Index, const MachineOperand *Segment,
83 const MachineOperand *Disp)
84 : Disp(Disp) {
85 Operands[0] = Base;
86 Operands[1] = Scale;
87 Operands[2] = Index;
88 Operands[3] = Segment;
89 }
90
operator ==(const MemOpKey & Other) const91 bool operator==(const MemOpKey &Other) const {
92 // Addresses' bases, scales, indices and segments must be identical.
93 for (int i = 0; i < 4; ++i)
94 if (!isIdenticalOp(*Operands[i], *Other.Operands[i]))
95 return false;
96
97 // Addresses' displacements don't have to be exactly the same. It only
98 // matters that they use the same symbol/index/address. Immediates' or
99 // offsets' differences will be taken care of during instruction
100 // substitution.
101 return isSimilarDispOp(*Disp, *Other.Disp);
102 }
103
104 // Address' base, scale, index and segment operands.
105 const MachineOperand *Operands[4];
106
107 // Address' displacement operand.
108 const MachineOperand *Disp;
109 };
110
111 } // end anonymous namespace
112
113 /// Provide DenseMapInfo for MemOpKey.
114 namespace llvm {
115
116 template <> struct DenseMapInfo<MemOpKey> {
117 using PtrInfo = DenseMapInfo<const MachineOperand *>;
118
getEmptyKeyllvm::DenseMapInfo119 static inline MemOpKey getEmptyKey() {
120 return MemOpKey(PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
121 PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
122 PtrInfo::getEmptyKey());
123 }
124
getTombstoneKeyllvm::DenseMapInfo125 static inline MemOpKey getTombstoneKey() {
126 return MemOpKey(PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
127 PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
128 PtrInfo::getTombstoneKey());
129 }
130
getHashValuellvm::DenseMapInfo131 static unsigned getHashValue(const MemOpKey &Val) {
132 // Checking any field of MemOpKey is enough to determine if the key is
133 // empty or tombstone.
134 assert(Val.Disp != PtrInfo::getEmptyKey() && "Cannot hash the empty key");
135 assert(Val.Disp != PtrInfo::getTombstoneKey() &&
136 "Cannot hash the tombstone key");
137
138 hash_code Hash = hash_combine(*Val.Operands[0], *Val.Operands[1],
139 *Val.Operands[2], *Val.Operands[3]);
140
141 // If the address displacement is an immediate, it should not affect the
142 // hash so that memory operands which differ only be immediate displacement
143 // would have the same hash. If the address displacement is something else,
144 // we should reflect symbol/index/address in the hash.
145 switch (Val.Disp->getType()) {
146 case MachineOperand::MO_Immediate:
147 break;
148 case MachineOperand::MO_ConstantPoolIndex:
149 case MachineOperand::MO_JumpTableIndex:
150 Hash = hash_combine(Hash, Val.Disp->getIndex());
151 break;
152 case MachineOperand::MO_ExternalSymbol:
153 Hash = hash_combine(Hash, Val.Disp->getSymbolName());
154 break;
155 case MachineOperand::MO_GlobalAddress:
156 Hash = hash_combine(Hash, Val.Disp->getGlobal());
157 break;
158 case MachineOperand::MO_BlockAddress:
159 Hash = hash_combine(Hash, Val.Disp->getBlockAddress());
160 break;
161 case MachineOperand::MO_MCSymbol:
162 Hash = hash_combine(Hash, Val.Disp->getMCSymbol());
163 break;
164 case MachineOperand::MO_MachineBasicBlock:
165 Hash = hash_combine(Hash, Val.Disp->getMBB());
166 break;
167 default:
168 llvm_unreachable("Invalid address displacement operand");
169 }
170
171 return (unsigned)Hash;
172 }
173
isEqualllvm::DenseMapInfo174 static bool isEqual(const MemOpKey &LHS, const MemOpKey &RHS) {
175 // Checking any field of MemOpKey is enough to determine if the key is
176 // empty or tombstone.
177 if (RHS.Disp == PtrInfo::getEmptyKey())
178 return LHS.Disp == PtrInfo::getEmptyKey();
179 if (RHS.Disp == PtrInfo::getTombstoneKey())
180 return LHS.Disp == PtrInfo::getTombstoneKey();
181 return LHS == RHS;
182 }
183 };
184
185 } // end namespace llvm
186
187 /// Returns a hash table key based on memory operands of \p MI. The
188 /// number of the first memory operand of \p MI is specified through \p N.
getMemOpKey(const MachineInstr & MI,unsigned N)189 static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N) {
190 assert((isLEA(MI) || MI.mayLoadOrStore()) &&
191 "The instruction must be a LEA, a load or a store");
192 return MemOpKey(&MI.getOperand(N + X86::AddrBaseReg),
193 &MI.getOperand(N + X86::AddrScaleAmt),
194 &MI.getOperand(N + X86::AddrIndexReg),
195 &MI.getOperand(N + X86::AddrSegmentReg),
196 &MI.getOperand(N + X86::AddrDisp));
197 }
198
isIdenticalOp(const MachineOperand & MO1,const MachineOperand & MO2)199 static inline bool isIdenticalOp(const MachineOperand &MO1,
200 const MachineOperand &MO2) {
201 return MO1.isIdenticalTo(MO2) &&
202 (!MO1.isReg() ||
203 !TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
204 }
205
206 #ifndef NDEBUG
isValidDispOp(const MachineOperand & MO)207 static bool isValidDispOp(const MachineOperand &MO) {
208 return MO.isImm() || MO.isCPI() || MO.isJTI() || MO.isSymbol() ||
209 MO.isGlobal() || MO.isBlockAddress() || MO.isMCSymbol() || MO.isMBB();
210 }
211 #endif
212
isSimilarDispOp(const MachineOperand & MO1,const MachineOperand & MO2)213 static bool isSimilarDispOp(const MachineOperand &MO1,
214 const MachineOperand &MO2) {
215 assert(isValidDispOp(MO1) && isValidDispOp(MO2) &&
216 "Address displacement operand is not valid");
217 return (MO1.isImm() && MO2.isImm()) ||
218 (MO1.isCPI() && MO2.isCPI() && MO1.getIndex() == MO2.getIndex()) ||
219 (MO1.isJTI() && MO2.isJTI() && MO1.getIndex() == MO2.getIndex()) ||
220 (MO1.isSymbol() && MO2.isSymbol() &&
221 MO1.getSymbolName() == MO2.getSymbolName()) ||
222 (MO1.isGlobal() && MO2.isGlobal() &&
223 MO1.getGlobal() == MO2.getGlobal()) ||
224 (MO1.isBlockAddress() && MO2.isBlockAddress() &&
225 MO1.getBlockAddress() == MO2.getBlockAddress()) ||
226 (MO1.isMCSymbol() && MO2.isMCSymbol() &&
227 MO1.getMCSymbol() == MO2.getMCSymbol()) ||
228 (MO1.isMBB() && MO2.isMBB() && MO1.getMBB() == MO2.getMBB());
229 }
230
isLEA(const MachineInstr & MI)231 static inline bool isLEA(const MachineInstr &MI) {
232 unsigned Opcode = MI.getOpcode();
233 return Opcode == X86::LEA16r || Opcode == X86::LEA32r ||
234 Opcode == X86::LEA64r || Opcode == X86::LEA64_32r;
235 }
236
237 namespace {
238
239 class OptimizeLEAPass : public MachineFunctionPass {
240 public:
OptimizeLEAPass()241 OptimizeLEAPass() : MachineFunctionPass(ID) {}
242
getPassName() const243 StringRef getPassName() const override { return "X86 LEA Optimize"; }
244
245 /// Loop over all of the basic blocks, replacing address
246 /// calculations in load and store instructions, if it's already
247 /// been calculated by LEA. Also, remove redundant LEAs.
248 bool runOnMachineFunction(MachineFunction &MF) override;
249
250 private:
251 using MemOpMap = DenseMap<MemOpKey, SmallVector<MachineInstr *, 16>>;
252
253 /// Returns a distance between two instructions inside one basic block.
254 /// Negative result means, that instructions occur in reverse order.
255 int calcInstrDist(const MachineInstr &First, const MachineInstr &Last);
256
257 /// Choose the best \p LEA instruction from the \p List to replace
258 /// address calculation in \p MI instruction. Return the address displacement
259 /// and the distance between \p MI and the chosen \p BestLEA in
260 /// \p AddrDispShift and \p Dist.
261 bool chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
262 const MachineInstr &MI, MachineInstr *&BestLEA,
263 int64_t &AddrDispShift, int &Dist);
264
265 /// Returns the difference between addresses' displacements of \p MI1
266 /// and \p MI2. The numbers of the first memory operands for the instructions
267 /// are specified through \p N1 and \p N2.
268 int64_t getAddrDispShift(const MachineInstr &MI1, unsigned N1,
269 const MachineInstr &MI2, unsigned N2) const;
270
271 /// Returns true if the \p Last LEA instruction can be replaced by the
272 /// \p First. The difference between displacements of the addresses calculated
273 /// by these LEAs is returned in \p AddrDispShift. It'll be used for proper
274 /// replacement of the \p Last LEA's uses with the \p First's def register.
275 bool isReplaceable(const MachineInstr &First, const MachineInstr &Last,
276 int64_t &AddrDispShift) const;
277
278 /// Find all LEA instructions in the basic block. Also, assign position
279 /// numbers to all instructions in the basic block to speed up calculation of
280 /// distance between them.
281 void findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs);
282
283 /// Removes redundant address calculations.
284 bool removeRedundantAddrCalc(MemOpMap &LEAs);
285
286 /// Replace debug value MI with a new debug value instruction using register
287 /// VReg with an appropriate offset and DIExpression to incorporate the
288 /// address displacement AddrDispShift. Return new debug value instruction.
289 MachineInstr *replaceDebugValue(MachineInstr &MI, unsigned VReg,
290 int64_t AddrDispShift);
291
292 /// Removes LEAs which calculate similar addresses.
293 bool removeRedundantLEAs(MemOpMap &LEAs);
294
295 DenseMap<const MachineInstr *, unsigned> InstrPos;
296
297 MachineRegisterInfo *MRI;
298 const X86InstrInfo *TII;
299 const X86RegisterInfo *TRI;
300
301 static char ID;
302 };
303
304 } // end anonymous namespace
305
306 char OptimizeLEAPass::ID = 0;
307
createX86OptimizeLEAs()308 FunctionPass *llvm::createX86OptimizeLEAs() { return new OptimizeLEAPass(); }
309
calcInstrDist(const MachineInstr & First,const MachineInstr & Last)310 int OptimizeLEAPass::calcInstrDist(const MachineInstr &First,
311 const MachineInstr &Last) {
312 // Both instructions must be in the same basic block and they must be
313 // presented in InstrPos.
314 assert(Last.getParent() == First.getParent() &&
315 "Instructions are in different basic blocks");
316 assert(InstrPos.find(&First) != InstrPos.end() &&
317 InstrPos.find(&Last) != InstrPos.end() &&
318 "Instructions' positions are undefined");
319
320 return InstrPos[&Last] - InstrPos[&First];
321 }
322
323 // Find the best LEA instruction in the List to replace address recalculation in
324 // MI. Such LEA must meet these requirements:
325 // 1) The address calculated by the LEA differs only by the displacement from
326 // the address used in MI.
327 // 2) The register class of the definition of the LEA is compatible with the
328 // register class of the address base register of MI.
329 // 3) Displacement of the new memory operand should fit in 1 byte if possible.
330 // 4) The LEA should be as close to MI as possible, and prior to it if
331 // possible.
chooseBestLEA(const SmallVectorImpl<MachineInstr * > & List,const MachineInstr & MI,MachineInstr * & BestLEA,int64_t & AddrDispShift,int & Dist)332 bool OptimizeLEAPass::chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
333 const MachineInstr &MI,
334 MachineInstr *&BestLEA,
335 int64_t &AddrDispShift, int &Dist) {
336 const MachineFunction *MF = MI.getParent()->getParent();
337 const MCInstrDesc &Desc = MI.getDesc();
338 int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags) +
339 X86II::getOperandBias(Desc);
340
341 BestLEA = nullptr;
342
343 // Loop over all LEA instructions.
344 for (auto DefMI : List) {
345 // Get new address displacement.
346 int64_t AddrDispShiftTemp = getAddrDispShift(MI, MemOpNo, *DefMI, 1);
347
348 // Make sure address displacement fits 4 bytes.
349 if (!isInt<32>(AddrDispShiftTemp))
350 continue;
351
352 // Check that LEA def register can be used as MI address base. Some
353 // instructions can use a limited set of registers as address base, for
354 // example MOV8mr_NOREX. We could constrain the register class of the LEA
355 // def to suit MI, however since this case is very rare and hard to
356 // reproduce in a test it's just more reliable to skip the LEA.
357 if (TII->getRegClass(Desc, MemOpNo + X86::AddrBaseReg, TRI, *MF) !=
358 MRI->getRegClass(DefMI->getOperand(0).getReg()))
359 continue;
360
361 // Choose the closest LEA instruction from the list, prior to MI if
362 // possible. Note that we took into account resulting address displacement
363 // as well. Also note that the list is sorted by the order in which the LEAs
364 // occur, so the break condition is pretty simple.
365 int DistTemp = calcInstrDist(*DefMI, MI);
366 assert(DistTemp != 0 &&
367 "The distance between two different instructions cannot be zero");
368 if (DistTemp > 0 || BestLEA == nullptr) {
369 // Do not update return LEA, if the current one provides a displacement
370 // which fits in 1 byte, while the new candidate does not.
371 if (BestLEA != nullptr && !isInt<8>(AddrDispShiftTemp) &&
372 isInt<8>(AddrDispShift))
373 continue;
374
375 BestLEA = DefMI;
376 AddrDispShift = AddrDispShiftTemp;
377 Dist = DistTemp;
378 }
379
380 // FIXME: Maybe we should not always stop at the first LEA after MI.
381 if (DistTemp < 0)
382 break;
383 }
384
385 return BestLEA != nullptr;
386 }
387
388 // Get the difference between the addresses' displacements of the two
389 // instructions \p MI1 and \p MI2. The numbers of the first memory operands are
390 // passed through \p N1 and \p N2.
getAddrDispShift(const MachineInstr & MI1,unsigned N1,const MachineInstr & MI2,unsigned N2) const391 int64_t OptimizeLEAPass::getAddrDispShift(const MachineInstr &MI1, unsigned N1,
392 const MachineInstr &MI2,
393 unsigned N2) const {
394 const MachineOperand &Op1 = MI1.getOperand(N1 + X86::AddrDisp);
395 const MachineOperand &Op2 = MI2.getOperand(N2 + X86::AddrDisp);
396
397 assert(isSimilarDispOp(Op1, Op2) &&
398 "Address displacement operands are not compatible");
399
400 // After the assert above we can be sure that both operands are of the same
401 // valid type and use the same symbol/index/address, thus displacement shift
402 // calculation is rather simple.
403 if (Op1.isJTI())
404 return 0;
405 return Op1.isImm() ? Op1.getImm() - Op2.getImm()
406 : Op1.getOffset() - Op2.getOffset();
407 }
408
409 // Check that the Last LEA can be replaced by the First LEA. To be so,
410 // these requirements must be met:
411 // 1) Addresses calculated by LEAs differ only by displacement.
412 // 2) Def registers of LEAs belong to the same class.
413 // 3) All uses of the Last LEA def register are replaceable, thus the
414 // register is used only as address base.
isReplaceable(const MachineInstr & First,const MachineInstr & Last,int64_t & AddrDispShift) const415 bool OptimizeLEAPass::isReplaceable(const MachineInstr &First,
416 const MachineInstr &Last,
417 int64_t &AddrDispShift) const {
418 assert(isLEA(First) && isLEA(Last) &&
419 "The function works only with LEA instructions");
420
421 // Make sure that LEA def registers belong to the same class. There may be
422 // instructions (like MOV8mr_NOREX) which allow a limited set of registers to
423 // be used as their operands, so we must be sure that replacing one LEA
424 // with another won't lead to putting a wrong register in the instruction.
425 if (MRI->getRegClass(First.getOperand(0).getReg()) !=
426 MRI->getRegClass(Last.getOperand(0).getReg()))
427 return false;
428
429 // Get new address displacement.
430 AddrDispShift = getAddrDispShift(Last, 1, First, 1);
431
432 // Loop over all uses of the Last LEA to check that its def register is
433 // used only as address base for memory accesses. If so, it can be
434 // replaced, otherwise - no.
435 for (auto &MO : MRI->use_nodbg_operands(Last.getOperand(0).getReg())) {
436 MachineInstr &MI = *MO.getParent();
437
438 // Get the number of the first memory operand.
439 const MCInstrDesc &Desc = MI.getDesc();
440 int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
441
442 // If the use instruction has no memory operand - the LEA is not
443 // replaceable.
444 if (MemOpNo < 0)
445 return false;
446
447 MemOpNo += X86II::getOperandBias(Desc);
448
449 // If the address base of the use instruction is not the LEA def register -
450 // the LEA is not replaceable.
451 if (!isIdenticalOp(MI.getOperand(MemOpNo + X86::AddrBaseReg), MO))
452 return false;
453
454 // If the LEA def register is used as any other operand of the use
455 // instruction - the LEA is not replaceable.
456 for (unsigned i = 0; i < MI.getNumOperands(); i++)
457 if (i != (unsigned)(MemOpNo + X86::AddrBaseReg) &&
458 isIdenticalOp(MI.getOperand(i), MO))
459 return false;
460
461 // Check that the new address displacement will fit 4 bytes.
462 if (MI.getOperand(MemOpNo + X86::AddrDisp).isImm() &&
463 !isInt<32>(MI.getOperand(MemOpNo + X86::AddrDisp).getImm() +
464 AddrDispShift))
465 return false;
466 }
467
468 return true;
469 }
470
findLEAs(const MachineBasicBlock & MBB,MemOpMap & LEAs)471 void OptimizeLEAPass::findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs) {
472 unsigned Pos = 0;
473 for (auto &MI : MBB) {
474 // Assign the position number to the instruction. Note that we are going to
475 // move some instructions during the optimization however there will never
476 // be a need to move two instructions before any selected instruction. So to
477 // avoid multiple positions' updates during moves we just increase position
478 // counter by two leaving a free space for instructions which will be moved.
479 InstrPos[&MI] = Pos += 2;
480
481 if (isLEA(MI))
482 LEAs[getMemOpKey(MI, 1)].push_back(const_cast<MachineInstr *>(&MI));
483 }
484 }
485
486 // Try to find load and store instructions which recalculate addresses already
487 // calculated by some LEA and replace their memory operands with its def
488 // register.
removeRedundantAddrCalc(MemOpMap & LEAs)489 bool OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
490 bool Changed = false;
491
492 assert(!LEAs.empty());
493 MachineBasicBlock *MBB = (*LEAs.begin()->second.begin())->getParent();
494
495 // Process all instructions in basic block.
496 for (auto I = MBB->begin(), E = MBB->end(); I != E;) {
497 MachineInstr &MI = *I++;
498
499 // Instruction must be load or store.
500 if (!MI.mayLoadOrStore())
501 continue;
502
503 // Get the number of the first memory operand.
504 const MCInstrDesc &Desc = MI.getDesc();
505 int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
506
507 // If instruction has no memory operand - skip it.
508 if (MemOpNo < 0)
509 continue;
510
511 MemOpNo += X86II::getOperandBias(Desc);
512
513 // Get the best LEA instruction to replace address calculation.
514 MachineInstr *DefMI;
515 int64_t AddrDispShift;
516 int Dist;
517 if (!chooseBestLEA(LEAs[getMemOpKey(MI, MemOpNo)], MI, DefMI, AddrDispShift,
518 Dist))
519 continue;
520
521 // If LEA occurs before current instruction, we can freely replace
522 // the instruction. If LEA occurs after, we can lift LEA above the
523 // instruction and this way to be able to replace it. Since LEA and the
524 // instruction have similar memory operands (thus, the same def
525 // instructions for these operands), we can always do that, without
526 // worries of using registers before their defs.
527 if (Dist < 0) {
528 DefMI->removeFromParent();
529 MBB->insert(MachineBasicBlock::iterator(&MI), DefMI);
530 InstrPos[DefMI] = InstrPos[&MI] - 1;
531
532 // Make sure the instructions' position numbers are sane.
533 assert(((InstrPos[DefMI] == 1 &&
534 MachineBasicBlock::iterator(DefMI) == MBB->begin()) ||
535 InstrPos[DefMI] >
536 InstrPos[&*std::prev(MachineBasicBlock::iterator(DefMI))]) &&
537 "Instruction positioning is broken");
538 }
539
540 // Since we can possibly extend register lifetime, clear kill flags.
541 MRI->clearKillFlags(DefMI->getOperand(0).getReg());
542
543 ++NumSubstLEAs;
544 LLVM_DEBUG(dbgs() << "OptimizeLEAs: Candidate to replace: "; MI.dump(););
545
546 // Change instruction operands.
547 MI.getOperand(MemOpNo + X86::AddrBaseReg)
548 .ChangeToRegister(DefMI->getOperand(0).getReg(), false);
549 MI.getOperand(MemOpNo + X86::AddrScaleAmt).ChangeToImmediate(1);
550 MI.getOperand(MemOpNo + X86::AddrIndexReg)
551 .ChangeToRegister(X86::NoRegister, false);
552 MI.getOperand(MemOpNo + X86::AddrDisp).ChangeToImmediate(AddrDispShift);
553 MI.getOperand(MemOpNo + X86::AddrSegmentReg)
554 .ChangeToRegister(X86::NoRegister, false);
555
556 LLVM_DEBUG(dbgs() << "OptimizeLEAs: Replaced by: "; MI.dump(););
557
558 Changed = true;
559 }
560
561 return Changed;
562 }
563
replaceDebugValue(MachineInstr & MI,unsigned VReg,int64_t AddrDispShift)564 MachineInstr *OptimizeLEAPass::replaceDebugValue(MachineInstr &MI,
565 unsigned VReg,
566 int64_t AddrDispShift) {
567 DIExpression *Expr = const_cast<DIExpression *>(MI.getDebugExpression());
568
569 if (AddrDispShift != 0)
570 Expr = DIExpression::prepend(Expr, DIExpression::NoDeref, AddrDispShift,
571 DIExpression::NoDeref,
572 DIExpression::WithStackValue);
573
574 // Replace DBG_VALUE instruction with modified version.
575 MachineBasicBlock *MBB = MI.getParent();
576 DebugLoc DL = MI.getDebugLoc();
577 bool IsIndirect = MI.isIndirectDebugValue();
578 const MDNode *Var = MI.getDebugVariable();
579 if (IsIndirect)
580 assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset");
581 return BuildMI(*MBB, MBB->erase(&MI), DL, TII->get(TargetOpcode::DBG_VALUE),
582 IsIndirect, VReg, Var, Expr);
583 }
584
585 // Try to find similar LEAs in the list and replace one with another.
removeRedundantLEAs(MemOpMap & LEAs)586 bool OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
587 bool Changed = false;
588
589 // Loop over all entries in the table.
590 for (auto &E : LEAs) {
591 auto &List = E.second;
592
593 // Loop over all LEA pairs.
594 auto I1 = List.begin();
595 while (I1 != List.end()) {
596 MachineInstr &First = **I1;
597 auto I2 = std::next(I1);
598 while (I2 != List.end()) {
599 MachineInstr &Last = **I2;
600 int64_t AddrDispShift;
601
602 // LEAs should be in occurrence order in the list, so we can freely
603 // replace later LEAs with earlier ones.
604 assert(calcInstrDist(First, Last) > 0 &&
605 "LEAs must be in occurrence order in the list");
606
607 // Check that the Last LEA instruction can be replaced by the First.
608 if (!isReplaceable(First, Last, AddrDispShift)) {
609 ++I2;
610 continue;
611 }
612
613 // Loop over all uses of the Last LEA and update their operands. Note
614 // that the correctness of this has already been checked in the
615 // isReplaceable function.
616 unsigned FirstVReg = First.getOperand(0).getReg();
617 unsigned LastVReg = Last.getOperand(0).getReg();
618 for (auto UI = MRI->use_begin(LastVReg), UE = MRI->use_end();
619 UI != UE;) {
620 MachineOperand &MO = *UI++;
621 MachineInstr &MI = *MO.getParent();
622
623 if (MI.isDebugValue()) {
624 // Replace DBG_VALUE instruction with modified version using the
625 // register from the replacing LEA and the address displacement
626 // between the LEA instructions.
627 replaceDebugValue(MI, FirstVReg, AddrDispShift);
628 continue;
629 }
630
631 // Get the number of the first memory operand.
632 const MCInstrDesc &Desc = MI.getDesc();
633 int MemOpNo =
634 X86II::getMemoryOperandNo(Desc.TSFlags) +
635 X86II::getOperandBias(Desc);
636
637 // Update address base.
638 MO.setReg(FirstVReg);
639
640 // Update address disp.
641 MachineOperand &Op = MI.getOperand(MemOpNo + X86::AddrDisp);
642 if (Op.isImm())
643 Op.setImm(Op.getImm() + AddrDispShift);
644 else if (!Op.isJTI())
645 Op.setOffset(Op.getOffset() + AddrDispShift);
646 }
647
648 // Since we can possibly extend register lifetime, clear kill flags.
649 MRI->clearKillFlags(FirstVReg);
650
651 ++NumRedundantLEAs;
652 LLVM_DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: ";
653 Last.dump(););
654
655 // By this moment, all of the Last LEA's uses must be replaced. So we
656 // can freely remove it.
657 assert(MRI->use_empty(LastVReg) &&
658 "The LEA's def register must have no uses");
659 Last.eraseFromParent();
660
661 // Erase removed LEA from the list.
662 I2 = List.erase(I2);
663
664 Changed = true;
665 }
666 ++I1;
667 }
668 }
669
670 return Changed;
671 }
672
runOnMachineFunction(MachineFunction & MF)673 bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
674 bool Changed = false;
675
676 if (DisableX86LEAOpt || skipFunction(MF.getFunction()))
677 return false;
678
679 MRI = &MF.getRegInfo();
680 TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
681 TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo();
682
683 // Process all basic blocks.
684 for (auto &MBB : MF) {
685 MemOpMap LEAs;
686 InstrPos.clear();
687
688 // Find all LEA instructions in basic block.
689 findLEAs(MBB, LEAs);
690
691 // If current basic block has no LEAs, move on to the next one.
692 if (LEAs.empty())
693 continue;
694
695 // Remove redundant LEA instructions.
696 Changed |= removeRedundantLEAs(LEAs);
697
698 // Remove redundant address calculations. Do it only for -Os/-Oz since only
699 // a code size gain is expected from this part of the pass.
700 if (MF.getFunction().optForSize())
701 Changed |= removeRedundantAddrCalc(LEAs);
702 }
703
704 return Changed;
705 }
706