1//===- X86OptimizeLEAs.cpp - optimize usage of LEA instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the pass that performs some optimizations with LEA
10// instructions in order to improve performance and code size.
11// Currently, it does two things:
12// 1) If there are two LEA instructions calculating addresses which only differ
13// by displacement inside a basic block, one of them is removed.
14// 2) Address calculations in load and store instructions are replaced by
15// existing LEA def registers where possible.
16//
17//===----------------------------------------------------------------------===//
18
19#include "MCTargetDesc/X86BaseInfo.h"
20#include "X86.h"
21#include "X86InstrInfo.h"
22#include "X86Subtarget.h"
23#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/DenseMapInfo.h"
25#include "llvm/ADT/Hashing.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/Statistic.h"
28#include "llvm/Analysis/ProfileSummaryInfo.h"
29#include "llvm/CodeGen/LazyMachineBlockFrequencyInfo.h"
30#include "llvm/CodeGen/MachineBasicBlock.h"
31#include "llvm/CodeGen/MachineFunction.h"
32#include "llvm/CodeGen/MachineFunctionPass.h"
33#include "llvm/CodeGen/MachineInstr.h"
34#include "llvm/CodeGen/MachineInstrBuilder.h"
35#include "llvm/CodeGen/MachineOperand.h"
36#include "llvm/CodeGen/MachineRegisterInfo.h"
37#include "llvm/CodeGen/MachineSizeOpts.h"
38#include "llvm/CodeGen/TargetOpcodes.h"
39#include "llvm/CodeGen/TargetRegisterInfo.h"
40#include "llvm/IR/DebugInfoMetadata.h"
41#include "llvm/IR/DebugLoc.h"
42#include "llvm/IR/Function.h"
43#include "llvm/MC/MCInstrDesc.h"
44#include "llvm/Support/CommandLine.h"
45#include "llvm/Support/Debug.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/MathExtras.h"
48#include "llvm/Support/raw_ostream.h"
49#include <cassert>
50#include <cstdint>
51#include <iterator>
52
53using namespace llvm;
54
55#define DEBUG_TYPE "x86-optimize-leas"
56
57static cl::opt<bool>
58 DisableX86LEAOpt("disable-x86-lea-opt", cl::Hidden,
59 cl::desc("X86: Disable LEA optimizations."),
60 cl::init(Val: false));
61
62STATISTIC(NumSubstLEAs, "Number of LEA instruction substitutions");
63STATISTIC(NumRedundantLEAs, "Number of redundant LEA instructions removed");
64
65/// Returns true if two machine operands are identical and they are not
66/// physical registers.
67static inline bool isIdenticalOp(const MachineOperand &MO1,
68 const MachineOperand &MO2);
69
70/// Returns true if two address displacement operands are of the same
71/// type and use the same symbol/index/address regardless of the offset.
72static bool isSimilarDispOp(const MachineOperand &MO1,
73 const MachineOperand &MO2);
74
75/// Returns true if the instruction is LEA.
76static inline bool isLEA(const MachineInstr &MI);
77
78namespace {
79
80/// A key based on instruction's memory operands.
81class MemOpKey {
82public:
83 MemOpKey(const MachineOperand *Base, const MachineOperand *Scale,
84 const MachineOperand *Index, const MachineOperand *Segment,
85 const MachineOperand *Disp)
86 : Disp(Disp) {
87 Operands[0] = Base;
88 Operands[1] = Scale;
89 Operands[2] = Index;
90 Operands[3] = Segment;
91 }
92
93 bool operator==(const MemOpKey &Other) const {
94 // Addresses' bases, scales, indices and segments must be identical.
95 for (int i = 0; i < 4; ++i)
96 if (!isIdenticalOp(MO1: *Operands[i], MO2: *Other.Operands[i]))
97 return false;
98
99 // Addresses' displacements don't have to be exactly the same. It only
100 // matters that they use the same symbol/index/address. Immediates' or
101 // offsets' differences will be taken care of during instruction
102 // substitution.
103 return isSimilarDispOp(MO1: *Disp, MO2: *Other.Disp);
104 }
105
106 // Address' base, scale, index and segment operands.
107 const MachineOperand *Operands[4];
108
109 // Address' displacement operand.
110 const MachineOperand *Disp;
111};
112
113} // end anonymous namespace
114
115namespace llvm {
116
117/// Provide DenseMapInfo for MemOpKey.
118template <> struct DenseMapInfo<MemOpKey> {
119 using PtrInfo = DenseMapInfo<const MachineOperand *>;
120
121 static inline MemOpKey getEmptyKey() {
122 return MemOpKey(PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
123 PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
124 PtrInfo::getEmptyKey());
125 }
126
127 static inline MemOpKey getTombstoneKey() {
128 return MemOpKey(PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
129 PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
130 PtrInfo::getTombstoneKey());
131 }
132
133 static unsigned getHashValue(const MemOpKey &Val) {
134 // Checking any field of MemOpKey is enough to determine if the key is
135 // empty or tombstone.
136 assert(Val.Disp != PtrInfo::getEmptyKey() && "Cannot hash the empty key");
137 assert(Val.Disp != PtrInfo::getTombstoneKey() &&
138 "Cannot hash the tombstone key");
139
140 hash_code Hash = hash_combine(args: *Val.Operands[0], args: *Val.Operands[1],
141 args: *Val.Operands[2], args: *Val.Operands[3]);
142
143 // If the address displacement is an immediate, it should not affect the
144 // hash so that memory operands which differ only be immediate displacement
145 // would have the same hash. If the address displacement is something else,
146 // we should reflect symbol/index/address in the hash.
147 switch (Val.Disp->getType()) {
148 case MachineOperand::MO_Immediate:
149 break;
150 case MachineOperand::MO_ConstantPoolIndex:
151 case MachineOperand::MO_JumpTableIndex:
152 Hash = hash_combine(args: Hash, args: Val.Disp->getIndex());
153 break;
154 case MachineOperand::MO_ExternalSymbol:
155 Hash = hash_combine(args: Hash, args: Val.Disp->getSymbolName());
156 break;
157 case MachineOperand::MO_GlobalAddress:
158 Hash = hash_combine(args: Hash, args: Val.Disp->getGlobal());
159 break;
160 case MachineOperand::MO_BlockAddress:
161 Hash = hash_combine(args: Hash, args: Val.Disp->getBlockAddress());
162 break;
163 case MachineOperand::MO_MCSymbol:
164 Hash = hash_combine(args: Hash, args: Val.Disp->getMCSymbol());
165 break;
166 case MachineOperand::MO_MachineBasicBlock:
167 Hash = hash_combine(args: Hash, args: Val.Disp->getMBB());
168 break;
169 default:
170 llvm_unreachable("Invalid address displacement operand");
171 }
172
173 return (unsigned)Hash;
174 }
175
176 static bool isEqual(const MemOpKey &LHS, const MemOpKey &RHS) {
177 // Checking any field of MemOpKey is enough to determine if the key is
178 // empty or tombstone.
179 if (RHS.Disp == PtrInfo::getEmptyKey())
180 return LHS.Disp == PtrInfo::getEmptyKey();
181 if (RHS.Disp == PtrInfo::getTombstoneKey())
182 return LHS.Disp == PtrInfo::getTombstoneKey();
183 return LHS == RHS;
184 }
185};
186
187} // end namespace llvm
188
189/// Returns a hash table key based on memory operands of \p MI. The
190/// number of the first memory operand of \p MI is specified through \p N.
191static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N) {
192 assert((isLEA(MI) || MI.mayLoadOrStore()) &&
193 "The instruction must be a LEA, a load or a store");
194 return MemOpKey(&MI.getOperand(i: N + X86::AddrBaseReg),
195 &MI.getOperand(i: N + X86::AddrScaleAmt),
196 &MI.getOperand(i: N + X86::AddrIndexReg),
197 &MI.getOperand(i: N + X86::AddrSegmentReg),
198 &MI.getOperand(i: N + X86::AddrDisp));
199}
200
201static inline bool isIdenticalOp(const MachineOperand &MO1,
202 const MachineOperand &MO2) {
203 return MO1.isIdenticalTo(Other: MO2) && (!MO1.isReg() || !MO1.getReg().isPhysical());
204}
205
206#ifndef NDEBUG
207static bool isValidDispOp(const MachineOperand &MO) {
208 return MO.isImm() || MO.isCPI() || MO.isJTI() || MO.isSymbol() ||
209 MO.isGlobal() || MO.isBlockAddress() || MO.isMCSymbol() || MO.isMBB();
210}
211#endif
212
213static bool isSimilarDispOp(const MachineOperand &MO1,
214 const MachineOperand &MO2) {
215 assert(isValidDispOp(MO1) && isValidDispOp(MO2) &&
216 "Address displacement operand is not valid");
217 return (MO1.isImm() && MO2.isImm()) ||
218 (MO1.isCPI() && MO2.isCPI() && MO1.getIndex() == MO2.getIndex()) ||
219 (MO1.isJTI() && MO2.isJTI() && MO1.getIndex() == MO2.getIndex()) ||
220 (MO1.isSymbol() && MO2.isSymbol() &&
221 MO1.getSymbolName() == MO2.getSymbolName()) ||
222 (MO1.isGlobal() && MO2.isGlobal() &&
223 MO1.getGlobal() == MO2.getGlobal()) ||
224 (MO1.isBlockAddress() && MO2.isBlockAddress() &&
225 MO1.getBlockAddress() == MO2.getBlockAddress()) ||
226 (MO1.isMCSymbol() && MO2.isMCSymbol() &&
227 MO1.getMCSymbol() == MO2.getMCSymbol()) ||
228 (MO1.isMBB() && MO2.isMBB() && MO1.getMBB() == MO2.getMBB());
229}
230
231static inline bool isLEA(const MachineInstr &MI) {
232 unsigned Opcode = MI.getOpcode();
233 return Opcode == X86::LEA16r || Opcode == X86::LEA32r ||
234 Opcode == X86::LEA64r || Opcode == X86::LEA64_32r;
235}
236
237namespace {
238
239class X86OptimizeLEAsImpl {
240public:
241 bool runOnMachineFunction(MachineFunction &MF, ProfileSummaryInfo *PSI,
242 MachineBlockFrequencyInfo *MBFI);
243
244private:
245 using MemOpMap = DenseMap<MemOpKey, SmallVector<MachineInstr *, 16>>;
246
247 /// Returns a distance between two instructions inside one basic block.
248 /// Negative result means, that instructions occur in reverse order.
249 int calcInstrDist(const MachineInstr &First, const MachineInstr &Last);
250
251 /// Choose the best \p LEA instruction from the \p List to replace
252 /// address calculation in \p MI instruction. Return the address displacement
253 /// and the distance between \p MI and the chosen \p BestLEA in
254 /// \p AddrDispShift and \p Dist.
255 bool chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
256 const MachineInstr &MI, MachineInstr *&BestLEA,
257 int64_t &AddrDispShift, int &Dist);
258
259 /// Returns the difference between addresses' displacements of \p MI1
260 /// and \p MI2. The numbers of the first memory operands for the instructions
261 /// are specified through \p N1 and \p N2.
262 int64_t getAddrDispShift(const MachineInstr &MI1, unsigned N1,
263 const MachineInstr &MI2, unsigned N2) const;
264
265 /// Returns true if the \p Last LEA instruction can be replaced by the
266 /// \p First. The difference between displacements of the addresses calculated
267 /// by these LEAs is returned in \p AddrDispShift. It'll be used for proper
268 /// replacement of the \p Last LEA's uses with the \p First's def register.
269 bool isReplaceable(const MachineInstr &First, const MachineInstr &Last,
270 int64_t &AddrDispShift) const;
271
272 /// Find all LEA instructions in the basic block. Also, assign position
273 /// numbers to all instructions in the basic block to speed up calculation of
274 /// distance between them.
275 void findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs);
276
277 /// Removes redundant address calculations.
278 bool removeRedundantAddrCalc(MemOpMap &LEAs);
279
280 /// Replace debug value MI with a new debug value instruction using register
281 /// VReg with an appropriate offset and DIExpression to incorporate the
282 /// address displacement AddrDispShift. Return new debug value instruction.
283 MachineInstr *replaceDebugValue(MachineInstr &MI, Register OldReg,
284 Register NewReg, int64_t AddrDispShift);
285
286 /// Removes LEAs which calculate similar addresses.
287 bool removeRedundantLEAs(MemOpMap &LEAs);
288
289 DenseMap<const MachineInstr *, unsigned> InstrPos;
290
291 MachineRegisterInfo *MRI = nullptr;
292 const X86InstrInfo *TII = nullptr;
293 const X86RegisterInfo *TRI = nullptr;
294};
295
296class X86OptimizeLEAsLegacy : public MachineFunctionPass {
297public:
298 X86OptimizeLEAsLegacy() : MachineFunctionPass(ID) {}
299
300 StringRef getPassName() const override { return "X86 LEA Optimize"; }
301
302 /// Loop over all of the basic blocks, replacing address
303 /// calculations in load and store instructions, if it's already
304 /// been calculated by LEA. Also, remove redundant LEAs.
305 bool runOnMachineFunction(MachineFunction &MF) override;
306
307 static char ID;
308
309 void getAnalysisUsage(AnalysisUsage &AU) const override {
310 AU.addRequired<ProfileSummaryInfoWrapperPass>();
311 AU.addRequired<LazyMachineBlockFrequencyInfoPass>();
312 MachineFunctionPass::getAnalysisUsage(AU);
313 }
314};
315
316} // end anonymous namespace
317
318char X86OptimizeLEAsLegacy::ID = 0;
319
320FunctionPass *llvm::createX86OptimizeLEAsLegacyPass() {
321 return new X86OptimizeLEAsLegacy();
322}
323INITIALIZE_PASS(X86OptimizeLEAsLegacy, DEBUG_TYPE, "X86 optimize LEA pass",
324 false, false)
325
326int X86OptimizeLEAsImpl::calcInstrDist(const MachineInstr &First,
327 const MachineInstr &Last) {
328 // Both instructions must be in the same basic block and they must be
329 // presented in InstrPos.
330 assert(Last.getParent() == First.getParent() &&
331 "Instructions are in different basic blocks");
332 assert(InstrPos.contains(&First) && InstrPos.contains(&Last) &&
333 "Instructions' positions are undefined");
334
335 return InstrPos[&Last] - InstrPos[&First];
336}
337
338// Find the best LEA instruction in the List to replace address recalculation in
339// MI. Such LEA must meet these requirements:
340// 1) The address calculated by the LEA differs only by the displacement from
341// the address used in MI.
342// 2) The register class of the definition of the LEA is compatible with the
343// register class of the address base register of MI.
344// 3) Displacement of the new memory operand should fit in 1 byte if possible.
345// 4) The LEA should be as close to MI as possible, and prior to it if
346// possible.
347bool X86OptimizeLEAsImpl::chooseBestLEA(
348 const SmallVectorImpl<MachineInstr *> &List, const MachineInstr &MI,
349 MachineInstr *&BestLEA, int64_t &AddrDispShift, int &Dist) {
350 const MCInstrDesc &Desc = MI.getDesc();
351 int MemOpNo = X86II::getMemoryOperandNo(TSFlags: Desc.TSFlags) +
352 X86II::getOperandBias(Desc);
353
354 BestLEA = nullptr;
355
356 // Loop over all LEA instructions.
357 for (auto *DefMI : List) {
358 // Get new address displacement.
359 int64_t AddrDispShiftTemp = getAddrDispShift(MI1: MI, N1: MemOpNo, MI2: *DefMI, N2: 1);
360
361 // Make sure address displacement fits 4 bytes.
362 if (!isInt<32>(x: AddrDispShiftTemp))
363 continue;
364
365 // Check that LEA def register can be used as MI address base. Some
366 // instructions can use a limited set of registers as address base, for
367 // example MOV8mr_NOREX. We could constrain the register class of the LEA
368 // def to suit MI, however since this case is very rare and hard to
369 // reproduce in a test it's just more reliable to skip the LEA.
370 if (TII->getRegClass(MCID: Desc, OpNum: MemOpNo + X86::AddrBaseReg) !=
371 MRI->getRegClass(Reg: DefMI->getOperand(i: 0).getReg()))
372 continue;
373
374 // Choose the closest LEA instruction from the list, prior to MI if
375 // possible. Note that we took into account resulting address displacement
376 // as well. Also note that the list is sorted by the order in which the LEAs
377 // occur, so the break condition is pretty simple.
378 int DistTemp = calcInstrDist(First: *DefMI, Last: MI);
379 assert(DistTemp != 0 &&
380 "The distance between two different instructions cannot be zero");
381 if (DistTemp > 0 || BestLEA == nullptr) {
382 // Do not update return LEA, if the current one provides a displacement
383 // which fits in 1 byte, while the new candidate does not.
384 if (BestLEA != nullptr && !isInt<8>(x: AddrDispShiftTemp) &&
385 isInt<8>(x: AddrDispShift))
386 continue;
387
388 BestLEA = DefMI;
389 AddrDispShift = AddrDispShiftTemp;
390 Dist = DistTemp;
391 }
392
393 // FIXME: Maybe we should not always stop at the first LEA after MI.
394 if (DistTemp < 0)
395 break;
396 }
397
398 return BestLEA != nullptr;
399}
400
401// Get the difference between the addresses' displacements of the two
402// instructions \p MI1 and \p MI2. The numbers of the first memory operands are
403// passed through \p N1 and \p N2.
404int64_t X86OptimizeLEAsImpl::getAddrDispShift(const MachineInstr &MI1,
405 unsigned N1,
406 const MachineInstr &MI2,
407 unsigned N2) const {
408 const MachineOperand &Op1 = MI1.getOperand(i: N1 + X86::AddrDisp);
409 const MachineOperand &Op2 = MI2.getOperand(i: N2 + X86::AddrDisp);
410
411 assert(isSimilarDispOp(Op1, Op2) &&
412 "Address displacement operands are not compatible");
413
414 // After the assert above we can be sure that both operands are of the same
415 // valid type and use the same symbol/index/address, thus displacement shift
416 // calculation is rather simple.
417 if (Op1.isJTI())
418 return 0;
419 return Op1.isImm() ? Op1.getImm() - Op2.getImm()
420 : Op1.getOffset() - Op2.getOffset();
421}
422
423// Check that the Last LEA can be replaced by the First LEA. To be so,
424// these requirements must be met:
425// 1) Addresses calculated by LEAs differ only by displacement.
426// 2) Def registers of LEAs belong to the same class.
427// 3) All uses of the Last LEA def register are replaceable, thus the
428// register is used only as address base.
429bool X86OptimizeLEAsImpl::isReplaceable(const MachineInstr &First,
430 const MachineInstr &Last,
431 int64_t &AddrDispShift) const {
432 assert(isLEA(First) && isLEA(Last) &&
433 "The function works only with LEA instructions");
434
435 // Make sure that LEA def registers belong to the same class. There may be
436 // instructions (like MOV8mr_NOREX) which allow a limited set of registers to
437 // be used as their operands, so we must be sure that replacing one LEA
438 // with another won't lead to putting a wrong register in the instruction.
439 if (MRI->getRegClass(Reg: First.getOperand(i: 0).getReg()) !=
440 MRI->getRegClass(Reg: Last.getOperand(i: 0).getReg()))
441 return false;
442
443 // Get new address displacement.
444 AddrDispShift = getAddrDispShift(MI1: Last, N1: 1, MI2: First, N2: 1);
445
446 // Loop over all uses of the Last LEA to check that its def register is
447 // used only as address base for memory accesses. If so, it can be
448 // replaced, otherwise - no.
449 for (auto &MO : MRI->use_nodbg_operands(Reg: Last.getOperand(i: 0).getReg())) {
450 MachineInstr &MI = *MO.getParent();
451
452 // Get the number of the first memory operand.
453 const MCInstrDesc &Desc = MI.getDesc();
454 int MemOpNo = X86II::getMemoryOperandNo(TSFlags: Desc.TSFlags);
455
456 // If the use instruction has no memory operand - the LEA is not
457 // replaceable.
458 if (MemOpNo < 0)
459 return false;
460
461 MemOpNo += X86II::getOperandBias(Desc);
462
463 // If the address base of the use instruction is not the LEA def register -
464 // the LEA is not replaceable.
465 if (!isIdenticalOp(MO1: MI.getOperand(i: MemOpNo + X86::AddrBaseReg), MO2: MO))
466 return false;
467
468 // If the LEA def register is used as any other operand of the use
469 // instruction - the LEA is not replaceable.
470 for (unsigned i = 0; i < MI.getNumOperands(); i++)
471 if (i != (unsigned)(MemOpNo + X86::AddrBaseReg) &&
472 isIdenticalOp(MO1: MI.getOperand(i), MO2: MO))
473 return false;
474
475 // Check that the new address displacement will fit 4 bytes.
476 if (MI.getOperand(i: MemOpNo + X86::AddrDisp).isImm() &&
477 !isInt<32>(x: MI.getOperand(i: MemOpNo + X86::AddrDisp).getImm() +
478 AddrDispShift))
479 return false;
480 }
481
482 return true;
483}
484
485void X86OptimizeLEAsImpl::findLEAs(const MachineBasicBlock &MBB,
486 MemOpMap &LEAs) {
487 unsigned Pos = 0;
488 for (auto &MI : MBB) {
489 // Assign the position number to the instruction. Note that we are going to
490 // move some instructions during the optimization however there will never
491 // be a need to move two instructions before any selected instruction. So to
492 // avoid multiple positions' updates during moves we just increase position
493 // counter by two leaving a free space for instructions which will be moved.
494 InstrPos[&MI] = Pos += 2;
495
496 if (isLEA(MI))
497 LEAs[getMemOpKey(MI, N: 1)].push_back(Elt: const_cast<MachineInstr *>(&MI));
498 }
499}
500
501// Try to find load and store instructions which recalculate addresses already
502// calculated by some LEA and replace their memory operands with its def
503// register.
504bool X86OptimizeLEAsImpl::removeRedundantAddrCalc(MemOpMap &LEAs) {
505 bool Changed = false;
506
507 assert(!LEAs.empty());
508 MachineBasicBlock *MBB = (*LEAs.begin()->second.begin())->getParent();
509
510 // Process all instructions in basic block.
511 for (MachineInstr &MI : llvm::make_early_inc_range(Range&: *MBB)) {
512 // Instruction must be load or store.
513 if (!MI.mayLoadOrStore())
514 continue;
515
516 // Get the number of the first memory operand.
517 const MCInstrDesc &Desc = MI.getDesc();
518 int MemOpNo = X86II::getMemoryOperandNo(TSFlags: Desc.TSFlags);
519
520 // If instruction has no memory operand - skip it.
521 if (MemOpNo < 0)
522 continue;
523
524 MemOpNo += X86II::getOperandBias(Desc);
525
526 // Do not call chooseBestLEA if there was no matching LEA
527 auto Insns = LEAs.find(Val: getMemOpKey(MI, N: MemOpNo));
528 if (Insns == LEAs.end())
529 continue;
530
531 // Get the best LEA instruction to replace address calculation.
532 MachineInstr *DefMI;
533 int64_t AddrDispShift;
534 int Dist;
535 if (!chooseBestLEA(List: Insns->second, MI, BestLEA&: DefMI, AddrDispShift, Dist))
536 continue;
537
538 // If LEA occurs before current instruction, we can freely replace
539 // the instruction. If LEA occurs after, we can lift LEA above the
540 // instruction and this way to be able to replace it. Since LEA and the
541 // instruction have similar memory operands (thus, the same def
542 // instructions for these operands), we can always do that, without
543 // worries of using registers before their defs.
544 if (Dist < 0) {
545 DefMI->removeFromParent();
546 MBB->insert(I: MachineBasicBlock::iterator(&MI), MI: DefMI);
547 InstrPos[DefMI] = InstrPos[&MI] - 1;
548
549 // Make sure the instructions' position numbers are sane.
550 assert(((InstrPos[DefMI] == 1 &&
551 MachineBasicBlock::iterator(DefMI) == MBB->begin()) ||
552 InstrPos[DefMI] >
553 InstrPos[&*std::prev(MachineBasicBlock::iterator(DefMI))]) &&
554 "Instruction positioning is broken");
555 }
556
557 // Since we can possibly extend register lifetime, clear kill flags.
558 MRI->clearKillFlags(Reg: DefMI->getOperand(i: 0).getReg());
559
560 ++NumSubstLEAs;
561 LLVM_DEBUG(dbgs() << "OptimizeLEAs: Candidate to replace: "; MI.dump(););
562
563 // Change instruction operands.
564 MI.getOperand(i: MemOpNo + X86::AddrBaseReg)
565 .ChangeToRegister(Reg: DefMI->getOperand(i: 0).getReg(), isDef: false);
566 MI.getOperand(i: MemOpNo + X86::AddrScaleAmt).ChangeToImmediate(ImmVal: 1);
567 MI.getOperand(i: MemOpNo + X86::AddrIndexReg)
568 .ChangeToRegister(Reg: X86::NoRegister, isDef: false);
569 MI.getOperand(i: MemOpNo + X86::AddrDisp).ChangeToImmediate(ImmVal: AddrDispShift);
570 MI.getOperand(i: MemOpNo + X86::AddrSegmentReg)
571 .ChangeToRegister(Reg: X86::NoRegister, isDef: false);
572
573 LLVM_DEBUG(dbgs() << "OptimizeLEAs: Replaced by: "; MI.dump(););
574
575 Changed = true;
576 }
577
578 return Changed;
579}
580
581MachineInstr *X86OptimizeLEAsImpl::replaceDebugValue(MachineInstr &MI,
582 Register OldReg,
583 Register NewReg,
584 int64_t AddrDispShift) {
585 const DIExpression *Expr = MI.getDebugExpression();
586 if (AddrDispShift != 0) {
587 if (MI.isNonListDebugValue()) {
588 Expr =
589 DIExpression::prepend(Expr, Flags: DIExpression::StackValue, Offset: AddrDispShift);
590 } else {
591 // Update the Expression, appending an offset of `AddrDispShift` to the
592 // Op corresponding to `OldReg`.
593 SmallVector<uint64_t, 3> Ops;
594 DIExpression::appendOffset(Ops, Offset: AddrDispShift);
595 for (MachineOperand &Op : MI.getDebugOperandsForReg(Reg: OldReg)) {
596 unsigned OpIdx = MI.getDebugOperandIndex(Op: &Op);
597 Expr = DIExpression::appendOpsToArg(Expr, Ops, ArgNo: OpIdx);
598 }
599 }
600 }
601
602 // Replace DBG_VALUE instruction with modified version.
603 MachineBasicBlock *MBB = MI.getParent();
604 DebugLoc DL = MI.getDebugLoc();
605 bool IsIndirect = MI.isIndirectDebugValue();
606 const MDNode *Var = MI.getDebugVariable();
607 unsigned Opcode = MI.isNonListDebugValue() ? TargetOpcode::DBG_VALUE
608 : TargetOpcode::DBG_VALUE_LIST;
609 if (IsIndirect)
610 assert(MI.getDebugOffset().getImm() == 0 &&
611 "DBG_VALUE with nonzero offset");
612 SmallVector<MachineOperand, 4> NewOps;
613 // If we encounter an operand using the old register, replace it with an
614 // operand that uses the new register; otherwise keep the old operand.
615 auto replaceOldReg = [OldReg, NewReg](const MachineOperand &Op) {
616 if (Op.isReg() && Op.getReg() == OldReg)
617 return MachineOperand::CreateReg(Reg: NewReg, isDef: false, isImp: false, isKill: false, isDead: false,
618 isUndef: false, isEarlyClobber: false, SubReg: false, isDebug: false, isInternalRead: false,
619 /*IsRenamable*/ isRenamable: true);
620 return Op;
621 };
622 for (const MachineOperand &Op : MI.debug_operands())
623 NewOps.push_back(Elt: replaceOldReg(Op));
624 return BuildMI(BB&: *MBB, I: MBB->erase(I: &MI), DL, MCID: TII->get(Opcode), IsIndirect,
625 MOs: NewOps, Variable: Var, Expr);
626}
627
628// Try to find similar LEAs in the list and replace one with another.
629bool X86OptimizeLEAsImpl::removeRedundantLEAs(MemOpMap &LEAs) {
630 bool Changed = false;
631
632 // Loop over all entries in the table.
633 for (auto &E : LEAs) {
634 auto &List = E.second;
635
636 // Loop over all LEA pairs.
637 auto I1 = List.begin();
638 while (I1 != List.end()) {
639 MachineInstr &First = **I1;
640 auto I2 = std::next(x: I1);
641 while (I2 != List.end()) {
642 MachineInstr &Last = **I2;
643 int64_t AddrDispShift;
644
645 // LEAs should be in occurrence order in the list, so we can freely
646 // replace later LEAs with earlier ones.
647 assert(calcInstrDist(First, Last) > 0 &&
648 "LEAs must be in occurrence order in the list");
649
650 // Check that the Last LEA instruction can be replaced by the First.
651 if (!isReplaceable(First, Last, AddrDispShift)) {
652 ++I2;
653 continue;
654 }
655
656 // Loop over all uses of the Last LEA and update their operands. Note
657 // that the correctness of this has already been checked in the
658 // isReplaceable function.
659 Register FirstVReg = First.getOperand(i: 0).getReg();
660 Register LastVReg = Last.getOperand(i: 0).getReg();
661 // We use MRI->use_empty here instead of the combination of
662 // llvm::make_early_inc_range and MRI->use_operands because we could
663 // replace two or more uses in a debug instruction in one iteration, and
664 // that would deeply confuse llvm::make_early_inc_range.
665 while (!MRI->use_empty(RegNo: LastVReg)) {
666 MachineOperand &MO = *MRI->use_begin(RegNo: LastVReg);
667 MachineInstr &MI = *MO.getParent();
668
669 if (MI.isDebugValue()) {
670 // Replace DBG_VALUE instruction with modified version using the
671 // register from the replacing LEA and the address displacement
672 // between the LEA instructions.
673 replaceDebugValue(MI, OldReg: LastVReg, NewReg: FirstVReg, AddrDispShift);
674 continue;
675 }
676
677 // Get the number of the first memory operand.
678 const MCInstrDesc &Desc = MI.getDesc();
679 int MemOpNo =
680 X86II::getMemoryOperandNo(TSFlags: Desc.TSFlags) +
681 X86II::getOperandBias(Desc);
682
683 // Update address base.
684 MO.setReg(FirstVReg);
685
686 // Update address disp.
687 MachineOperand &Op = MI.getOperand(i: MemOpNo + X86::AddrDisp);
688 if (Op.isImm())
689 Op.setImm(Op.getImm() + AddrDispShift);
690 else if (!Op.isJTI())
691 Op.setOffset(Op.getOffset() + AddrDispShift);
692 }
693
694 // Since we can possibly extend register lifetime, clear kill flags.
695 MRI->clearKillFlags(Reg: FirstVReg);
696
697 ++NumRedundantLEAs;
698 LLVM_DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: ";
699 Last.dump(););
700
701 // By this moment, all of the Last LEA's uses must be replaced. So we
702 // can freely remove it.
703 assert(MRI->use_empty(LastVReg) &&
704 "The LEA's def register must have no uses");
705 Last.eraseFromParent();
706
707 // Erase removed LEA from the list.
708 I2 = List.erase(CI: I2);
709
710 Changed = true;
711 }
712 ++I1;
713 }
714 }
715
716 return Changed;
717}
718
719bool X86OptimizeLEAsImpl::runOnMachineFunction(
720 MachineFunction &MF, ProfileSummaryInfo *PSI,
721 MachineBlockFrequencyInfo *MBFI) {
722 bool Changed = false;
723
724 if (DisableX86LEAOpt)
725 return false;
726
727 MRI = &MF.getRegInfo();
728 TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
729 TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo();
730
731 // Process all basic blocks.
732 for (auto &MBB : MF) {
733 MemOpMap LEAs;
734 InstrPos.clear();
735
736 // Find all LEA instructions in basic block.
737 findLEAs(MBB, LEAs);
738
739 // If current basic block has no LEAs, move on to the next one.
740 if (LEAs.empty())
741 continue;
742
743 // Remove redundant LEA instructions.
744 Changed |= removeRedundantLEAs(LEAs);
745
746 // Remove redundant address calculations. Do it only for -Os/-Oz since only
747 // a code size gain is expected from this part of the pass.
748 if (llvm::shouldOptimizeForSize(MBB: &MBB, PSI, MBFI))
749 Changed |= removeRedundantAddrCalc(LEAs);
750 }
751
752 return Changed;
753}
754
755bool X86OptimizeLEAsLegacy::runOnMachineFunction(MachineFunction &MF) {
756 if (skipFunction(F: MF.getFunction()))
757 return false;
758 ProfileSummaryInfo *PSI =
759 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
760 MachineBlockFrequencyInfo *MBFI =
761 (PSI && PSI->hasProfileSummary())
762 ? &getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI()
763 : nullptr;
764 X86OptimizeLEAsImpl PassImpl;
765 return PassImpl.runOnMachineFunction(MF, PSI, MBFI);
766}
767
768PreservedAnalyses
769X86OptimizeLEAsPass::run(MachineFunction &MF,
770 MachineFunctionAnalysisManager &MFAM) {
771 ProfileSummaryInfo *PSI =
772 MFAM.getResult<ModuleAnalysisManagerMachineFunctionProxy>(IR&: MF)
773 .getCachedResult<ProfileSummaryAnalysis>(
774 IR&: *MF.getFunction().getParent());
775 MachineBlockFrequencyInfo *MBFI =
776 (PSI && PSI->hasProfileSummary())
777 ? &MFAM.getResult<MachineBlockFrequencyAnalysis>(IR&: MF)
778 : nullptr;
779 X86OptimizeLEAsImpl PassImpl;
780 bool Changed = PassImpl.runOnMachineFunction(MF, PSI, MBFI);
781 if (!Changed)
782 return PreservedAnalyses::all();
783 return getMachineFunctionPassPreservedAnalyses().preserveSet<CFGAnalyses>();
784}
785