1 //===- X86OptimizeLEAs.cpp - optimize usage of LEA instructions -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the pass that performs some optimizations with LEA 10 // instructions in order to improve performance and code size. 11 // Currently, it does two things: 12 // 1) If there are two LEA instructions calculating addresses which only differ 13 // by displacement inside a basic block, one of them is removed. 14 // 2) Address calculations in load and store instructions are replaced by 15 // existing LEA def registers where possible. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "MCTargetDesc/X86BaseInfo.h" 20 #include "X86.h" 21 #include "X86InstrInfo.h" 22 #include "X86Subtarget.h" 23 #include "llvm/ADT/DenseMap.h" 24 #include "llvm/ADT/DenseMapInfo.h" 25 #include "llvm/ADT/Hashing.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/Statistic.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineFunction.h" 30 #include "llvm/CodeGen/MachineFunctionPass.h" 31 #include "llvm/CodeGen/MachineInstr.h" 32 #include "llvm/CodeGen/MachineInstrBuilder.h" 33 #include "llvm/CodeGen/MachineOperand.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/TargetOpcodes.h" 36 #include "llvm/CodeGen/TargetRegisterInfo.h" 37 #include "llvm/IR/DebugInfoMetadata.h" 38 #include "llvm/IR/DebugLoc.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/MC/MCInstrDesc.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/MathExtras.h" 45 #include "llvm/Support/raw_ostream.h" 46 #include <cassert> 47 #include <cstdint> 48 #include <iterator> 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86-optimize-LEAs" 53 54 static cl::opt<bool> 55 DisableX86LEAOpt("disable-x86-lea-opt", cl::Hidden, 56 cl::desc("X86: Disable LEA optimizations."), 57 cl::init(false)); 58 59 STATISTIC(NumSubstLEAs, "Number of LEA instruction substitutions"); 60 STATISTIC(NumRedundantLEAs, "Number of redundant LEA instructions removed"); 61 62 /// Returns true if two machine operands are identical and they are not 63 /// physical registers. 64 static inline bool isIdenticalOp(const MachineOperand &MO1, 65 const MachineOperand &MO2); 66 67 /// Returns true if two address displacement operands are of the same 68 /// type and use the same symbol/index/address regardless of the offset. 69 static bool isSimilarDispOp(const MachineOperand &MO1, 70 const MachineOperand &MO2); 71 72 /// Returns true if the instruction is LEA. 73 static inline bool isLEA(const MachineInstr &MI); 74 75 namespace { 76 77 /// A key based on instruction's memory operands. 78 class MemOpKey { 79 public: 80 MemOpKey(const MachineOperand *Base, const MachineOperand *Scale, 81 const MachineOperand *Index, const MachineOperand *Segment, 82 const MachineOperand *Disp) 83 : Disp(Disp) { 84 Operands[0] = Base; 85 Operands[1] = Scale; 86 Operands[2] = Index; 87 Operands[3] = Segment; 88 } 89 90 bool operator==(const MemOpKey &Other) const { 91 // Addresses' bases, scales, indices and segments must be identical. 92 for (int i = 0; i < 4; ++i) 93 if (!isIdenticalOp(*Operands[i], *Other.Operands[i])) 94 return false; 95 96 // Addresses' displacements don't have to be exactly the same. It only 97 // matters that they use the same symbol/index/address. Immediates' or 98 // offsets' differences will be taken care of during instruction 99 // substitution. 100 return isSimilarDispOp(*Disp, *Other.Disp); 101 } 102 103 // Address' base, scale, index and segment operands. 104 const MachineOperand *Operands[4]; 105 106 // Address' displacement operand. 107 const MachineOperand *Disp; 108 }; 109 110 } // end anonymous namespace 111 112 /// Provide DenseMapInfo for MemOpKey. 113 namespace llvm { 114 115 template <> struct DenseMapInfo<MemOpKey> { 116 using PtrInfo = DenseMapInfo<const MachineOperand *>; 117 118 static inline MemOpKey getEmptyKey() { 119 return MemOpKey(PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(), 120 PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(), 121 PtrInfo::getEmptyKey()); 122 } 123 124 static inline MemOpKey getTombstoneKey() { 125 return MemOpKey(PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(), 126 PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(), 127 PtrInfo::getTombstoneKey()); 128 } 129 130 static unsigned getHashValue(const MemOpKey &Val) { 131 // Checking any field of MemOpKey is enough to determine if the key is 132 // empty or tombstone. 133 assert(Val.Disp != PtrInfo::getEmptyKey() && "Cannot hash the empty key"); 134 assert(Val.Disp != PtrInfo::getTombstoneKey() && 135 "Cannot hash the tombstone key"); 136 137 hash_code Hash = hash_combine(*Val.Operands[0], *Val.Operands[1], 138 *Val.Operands[2], *Val.Operands[3]); 139 140 // If the address displacement is an immediate, it should not affect the 141 // hash so that memory operands which differ only be immediate displacement 142 // would have the same hash. If the address displacement is something else, 143 // we should reflect symbol/index/address in the hash. 144 switch (Val.Disp->getType()) { 145 case MachineOperand::MO_Immediate: 146 break; 147 case MachineOperand::MO_ConstantPoolIndex: 148 case MachineOperand::MO_JumpTableIndex: 149 Hash = hash_combine(Hash, Val.Disp->getIndex()); 150 break; 151 case MachineOperand::MO_ExternalSymbol: 152 Hash = hash_combine(Hash, Val.Disp->getSymbolName()); 153 break; 154 case MachineOperand::MO_GlobalAddress: 155 Hash = hash_combine(Hash, Val.Disp->getGlobal()); 156 break; 157 case MachineOperand::MO_BlockAddress: 158 Hash = hash_combine(Hash, Val.Disp->getBlockAddress()); 159 break; 160 case MachineOperand::MO_MCSymbol: 161 Hash = hash_combine(Hash, Val.Disp->getMCSymbol()); 162 break; 163 case MachineOperand::MO_MachineBasicBlock: 164 Hash = hash_combine(Hash, Val.Disp->getMBB()); 165 break; 166 default: 167 llvm_unreachable("Invalid address displacement operand"); 168 } 169 170 return (unsigned)Hash; 171 } 172 173 static bool isEqual(const MemOpKey &LHS, const MemOpKey &RHS) { 174 // Checking any field of MemOpKey is enough to determine if the key is 175 // empty or tombstone. 176 if (RHS.Disp == PtrInfo::getEmptyKey()) 177 return LHS.Disp == PtrInfo::getEmptyKey(); 178 if (RHS.Disp == PtrInfo::getTombstoneKey()) 179 return LHS.Disp == PtrInfo::getTombstoneKey(); 180 return LHS == RHS; 181 } 182 }; 183 184 } // end namespace llvm 185 186 /// Returns a hash table key based on memory operands of \p MI. The 187 /// number of the first memory operand of \p MI is specified through \p N. 188 static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N) { 189 assert((isLEA(MI) || MI.mayLoadOrStore()) && 190 "The instruction must be a LEA, a load or a store"); 191 return MemOpKey(&MI.getOperand(N + X86::AddrBaseReg), 192 &MI.getOperand(N + X86::AddrScaleAmt), 193 &MI.getOperand(N + X86::AddrIndexReg), 194 &MI.getOperand(N + X86::AddrSegmentReg), 195 &MI.getOperand(N + X86::AddrDisp)); 196 } 197 198 static inline bool isIdenticalOp(const MachineOperand &MO1, 199 const MachineOperand &MO2) { 200 return MO1.isIdenticalTo(MO2) && 201 (!MO1.isReg() || !Register::isPhysicalRegister(MO1.getReg())); 202 } 203 204 #ifndef NDEBUG 205 static bool isValidDispOp(const MachineOperand &MO) { 206 return MO.isImm() || MO.isCPI() || MO.isJTI() || MO.isSymbol() || 207 MO.isGlobal() || MO.isBlockAddress() || MO.isMCSymbol() || MO.isMBB(); 208 } 209 #endif 210 211 static bool isSimilarDispOp(const MachineOperand &MO1, 212 const MachineOperand &MO2) { 213 assert(isValidDispOp(MO1) && isValidDispOp(MO2) && 214 "Address displacement operand is not valid"); 215 return (MO1.isImm() && MO2.isImm()) || 216 (MO1.isCPI() && MO2.isCPI() && MO1.getIndex() == MO2.getIndex()) || 217 (MO1.isJTI() && MO2.isJTI() && MO1.getIndex() == MO2.getIndex()) || 218 (MO1.isSymbol() && MO2.isSymbol() && 219 MO1.getSymbolName() == MO2.getSymbolName()) || 220 (MO1.isGlobal() && MO2.isGlobal() && 221 MO1.getGlobal() == MO2.getGlobal()) || 222 (MO1.isBlockAddress() && MO2.isBlockAddress() && 223 MO1.getBlockAddress() == MO2.getBlockAddress()) || 224 (MO1.isMCSymbol() && MO2.isMCSymbol() && 225 MO1.getMCSymbol() == MO2.getMCSymbol()) || 226 (MO1.isMBB() && MO2.isMBB() && MO1.getMBB() == MO2.getMBB()); 227 } 228 229 static inline bool isLEA(const MachineInstr &MI) { 230 unsigned Opcode = MI.getOpcode(); 231 return Opcode == X86::LEA16r || Opcode == X86::LEA32r || 232 Opcode == X86::LEA64r || Opcode == X86::LEA64_32r; 233 } 234 235 namespace { 236 237 class X86OptimizeLEAPass : public MachineFunctionPass { 238 public: 239 X86OptimizeLEAPass() : MachineFunctionPass(ID) {} 240 241 StringRef getPassName() const override { return "X86 LEA Optimize"; } 242 243 /// Loop over all of the basic blocks, replacing address 244 /// calculations in load and store instructions, if it's already 245 /// been calculated by LEA. Also, remove redundant LEAs. 246 bool runOnMachineFunction(MachineFunction &MF) override; 247 248 static char ID; 249 250 private: 251 using MemOpMap = DenseMap<MemOpKey, SmallVector<MachineInstr *, 16>>; 252 253 /// Returns a distance between two instructions inside one basic block. 254 /// Negative result means, that instructions occur in reverse order. 255 int calcInstrDist(const MachineInstr &First, const MachineInstr &Last); 256 257 /// Choose the best \p LEA instruction from the \p List to replace 258 /// address calculation in \p MI instruction. Return the address displacement 259 /// and the distance between \p MI and the chosen \p BestLEA in 260 /// \p AddrDispShift and \p Dist. 261 bool chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List, 262 const MachineInstr &MI, MachineInstr *&BestLEA, 263 int64_t &AddrDispShift, int &Dist); 264 265 /// Returns the difference between addresses' displacements of \p MI1 266 /// and \p MI2. The numbers of the first memory operands for the instructions 267 /// are specified through \p N1 and \p N2. 268 int64_t getAddrDispShift(const MachineInstr &MI1, unsigned N1, 269 const MachineInstr &MI2, unsigned N2) const; 270 271 /// Returns true if the \p Last LEA instruction can be replaced by the 272 /// \p First. The difference between displacements of the addresses calculated 273 /// by these LEAs is returned in \p AddrDispShift. It'll be used for proper 274 /// replacement of the \p Last LEA's uses with the \p First's def register. 275 bool isReplaceable(const MachineInstr &First, const MachineInstr &Last, 276 int64_t &AddrDispShift) const; 277 278 /// Find all LEA instructions in the basic block. Also, assign position 279 /// numbers to all instructions in the basic block to speed up calculation of 280 /// distance between them. 281 void findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs); 282 283 /// Removes redundant address calculations. 284 bool removeRedundantAddrCalc(MemOpMap &LEAs); 285 286 /// Replace debug value MI with a new debug value instruction using register 287 /// VReg with an appropriate offset and DIExpression to incorporate the 288 /// address displacement AddrDispShift. Return new debug value instruction. 289 MachineInstr *replaceDebugValue(MachineInstr &MI, unsigned VReg, 290 int64_t AddrDispShift); 291 292 /// Removes LEAs which calculate similar addresses. 293 bool removeRedundantLEAs(MemOpMap &LEAs); 294 295 DenseMap<const MachineInstr *, unsigned> InstrPos; 296 297 MachineRegisterInfo *MRI; 298 const X86InstrInfo *TII; 299 const X86RegisterInfo *TRI; 300 }; 301 302 } // end anonymous namespace 303 304 char X86OptimizeLEAPass::ID = 0; 305 306 FunctionPass *llvm::createX86OptimizeLEAs() { return new X86OptimizeLEAPass(); } 307 INITIALIZE_PASS(X86OptimizeLEAPass, DEBUG_TYPE, "X86 optimize LEA pass", false, 308 false) 309 310 int X86OptimizeLEAPass::calcInstrDist(const MachineInstr &First, 311 const MachineInstr &Last) { 312 // Both instructions must be in the same basic block and they must be 313 // presented in InstrPos. 314 assert(Last.getParent() == First.getParent() && 315 "Instructions are in different basic blocks"); 316 assert(InstrPos.find(&First) != InstrPos.end() && 317 InstrPos.find(&Last) != InstrPos.end() && 318 "Instructions' positions are undefined"); 319 320 return InstrPos[&Last] - InstrPos[&First]; 321 } 322 323 // Find the best LEA instruction in the List to replace address recalculation in 324 // MI. Such LEA must meet these requirements: 325 // 1) The address calculated by the LEA differs only by the displacement from 326 // the address used in MI. 327 // 2) The register class of the definition of the LEA is compatible with the 328 // register class of the address base register of MI. 329 // 3) Displacement of the new memory operand should fit in 1 byte if possible. 330 // 4) The LEA should be as close to MI as possible, and prior to it if 331 // possible. 332 bool X86OptimizeLEAPass::chooseBestLEA( 333 const SmallVectorImpl<MachineInstr *> &List, const MachineInstr &MI, 334 MachineInstr *&BestLEA, int64_t &AddrDispShift, int &Dist) { 335 const MachineFunction *MF = MI.getParent()->getParent(); 336 const MCInstrDesc &Desc = MI.getDesc(); 337 int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags) + 338 X86II::getOperandBias(Desc); 339 340 BestLEA = nullptr; 341 342 // Loop over all LEA instructions. 343 for (auto DefMI : List) { 344 // Get new address displacement. 345 int64_t AddrDispShiftTemp = getAddrDispShift(MI, MemOpNo, *DefMI, 1); 346 347 // Make sure address displacement fits 4 bytes. 348 if (!isInt<32>(AddrDispShiftTemp)) 349 continue; 350 351 // Check that LEA def register can be used as MI address base. Some 352 // instructions can use a limited set of registers as address base, for 353 // example MOV8mr_NOREX. We could constrain the register class of the LEA 354 // def to suit MI, however since this case is very rare and hard to 355 // reproduce in a test it's just more reliable to skip the LEA. 356 if (TII->getRegClass(Desc, MemOpNo + X86::AddrBaseReg, TRI, *MF) != 357 MRI->getRegClass(DefMI->getOperand(0).getReg())) 358 continue; 359 360 // Choose the closest LEA instruction from the list, prior to MI if 361 // possible. Note that we took into account resulting address displacement 362 // as well. Also note that the list is sorted by the order in which the LEAs 363 // occur, so the break condition is pretty simple. 364 int DistTemp = calcInstrDist(*DefMI, MI); 365 assert(DistTemp != 0 && 366 "The distance between two different instructions cannot be zero"); 367 if (DistTemp > 0 || BestLEA == nullptr) { 368 // Do not update return LEA, if the current one provides a displacement 369 // which fits in 1 byte, while the new candidate does not. 370 if (BestLEA != nullptr && !isInt<8>(AddrDispShiftTemp) && 371 isInt<8>(AddrDispShift)) 372 continue; 373 374 BestLEA = DefMI; 375 AddrDispShift = AddrDispShiftTemp; 376 Dist = DistTemp; 377 } 378 379 // FIXME: Maybe we should not always stop at the first LEA after MI. 380 if (DistTemp < 0) 381 break; 382 } 383 384 return BestLEA != nullptr; 385 } 386 387 // Get the difference between the addresses' displacements of the two 388 // instructions \p MI1 and \p MI2. The numbers of the first memory operands are 389 // passed through \p N1 and \p N2. 390 int64_t X86OptimizeLEAPass::getAddrDispShift(const MachineInstr &MI1, 391 unsigned N1, 392 const MachineInstr &MI2, 393 unsigned N2) const { 394 const MachineOperand &Op1 = MI1.getOperand(N1 + X86::AddrDisp); 395 const MachineOperand &Op2 = MI2.getOperand(N2 + X86::AddrDisp); 396 397 assert(isSimilarDispOp(Op1, Op2) && 398 "Address displacement operands are not compatible"); 399 400 // After the assert above we can be sure that both operands are of the same 401 // valid type and use the same symbol/index/address, thus displacement shift 402 // calculation is rather simple. 403 if (Op1.isJTI()) 404 return 0; 405 return Op1.isImm() ? Op1.getImm() - Op2.getImm() 406 : Op1.getOffset() - Op2.getOffset(); 407 } 408 409 // Check that the Last LEA can be replaced by the First LEA. To be so, 410 // these requirements must be met: 411 // 1) Addresses calculated by LEAs differ only by displacement. 412 // 2) Def registers of LEAs belong to the same class. 413 // 3) All uses of the Last LEA def register are replaceable, thus the 414 // register is used only as address base. 415 bool X86OptimizeLEAPass::isReplaceable(const MachineInstr &First, 416 const MachineInstr &Last, 417 int64_t &AddrDispShift) const { 418 assert(isLEA(First) && isLEA(Last) && 419 "The function works only with LEA instructions"); 420 421 // Make sure that LEA def registers belong to the same class. There may be 422 // instructions (like MOV8mr_NOREX) which allow a limited set of registers to 423 // be used as their operands, so we must be sure that replacing one LEA 424 // with another won't lead to putting a wrong register in the instruction. 425 if (MRI->getRegClass(First.getOperand(0).getReg()) != 426 MRI->getRegClass(Last.getOperand(0).getReg())) 427 return false; 428 429 // Get new address displacement. 430 AddrDispShift = getAddrDispShift(Last, 1, First, 1); 431 432 // Loop over all uses of the Last LEA to check that its def register is 433 // used only as address base for memory accesses. If so, it can be 434 // replaced, otherwise - no. 435 for (auto &MO : MRI->use_nodbg_operands(Last.getOperand(0).getReg())) { 436 MachineInstr &MI = *MO.getParent(); 437 438 // Get the number of the first memory operand. 439 const MCInstrDesc &Desc = MI.getDesc(); 440 int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags); 441 442 // If the use instruction has no memory operand - the LEA is not 443 // replaceable. 444 if (MemOpNo < 0) 445 return false; 446 447 MemOpNo += X86II::getOperandBias(Desc); 448 449 // If the address base of the use instruction is not the LEA def register - 450 // the LEA is not replaceable. 451 if (!isIdenticalOp(MI.getOperand(MemOpNo + X86::AddrBaseReg), MO)) 452 return false; 453 454 // If the LEA def register is used as any other operand of the use 455 // instruction - the LEA is not replaceable. 456 for (unsigned i = 0; i < MI.getNumOperands(); i++) 457 if (i != (unsigned)(MemOpNo + X86::AddrBaseReg) && 458 isIdenticalOp(MI.getOperand(i), MO)) 459 return false; 460 461 // Check that the new address displacement will fit 4 bytes. 462 if (MI.getOperand(MemOpNo + X86::AddrDisp).isImm() && 463 !isInt<32>(MI.getOperand(MemOpNo + X86::AddrDisp).getImm() + 464 AddrDispShift)) 465 return false; 466 } 467 468 return true; 469 } 470 471 void X86OptimizeLEAPass::findLEAs(const MachineBasicBlock &MBB, 472 MemOpMap &LEAs) { 473 unsigned Pos = 0; 474 for (auto &MI : MBB) { 475 // Assign the position number to the instruction. Note that we are going to 476 // move some instructions during the optimization however there will never 477 // be a need to move two instructions before any selected instruction. So to 478 // avoid multiple positions' updates during moves we just increase position 479 // counter by two leaving a free space for instructions which will be moved. 480 InstrPos[&MI] = Pos += 2; 481 482 if (isLEA(MI)) 483 LEAs[getMemOpKey(MI, 1)].push_back(const_cast<MachineInstr *>(&MI)); 484 } 485 } 486 487 // Try to find load and store instructions which recalculate addresses already 488 // calculated by some LEA and replace their memory operands with its def 489 // register. 490 bool X86OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) { 491 bool Changed = false; 492 493 assert(!LEAs.empty()); 494 MachineBasicBlock *MBB = (*LEAs.begin()->second.begin())->getParent(); 495 496 // Process all instructions in basic block. 497 for (auto I = MBB->begin(), E = MBB->end(); I != E;) { 498 MachineInstr &MI = *I++; 499 500 // Instruction must be load or store. 501 if (!MI.mayLoadOrStore()) 502 continue; 503 504 // Get the number of the first memory operand. 505 const MCInstrDesc &Desc = MI.getDesc(); 506 int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags); 507 508 // If instruction has no memory operand - skip it. 509 if (MemOpNo < 0) 510 continue; 511 512 MemOpNo += X86II::getOperandBias(Desc); 513 514 // Do not call chooseBestLEA if there was no matching LEA 515 auto Insns = LEAs.find(getMemOpKey(MI, MemOpNo)); 516 if (Insns == LEAs.end()) 517 continue; 518 519 // Get the best LEA instruction to replace address calculation. 520 MachineInstr *DefMI; 521 int64_t AddrDispShift; 522 int Dist; 523 if (!chooseBestLEA(Insns->second, MI, DefMI, AddrDispShift, Dist)) 524 continue; 525 526 // If LEA occurs before current instruction, we can freely replace 527 // the instruction. If LEA occurs after, we can lift LEA above the 528 // instruction and this way to be able to replace it. Since LEA and the 529 // instruction have similar memory operands (thus, the same def 530 // instructions for these operands), we can always do that, without 531 // worries of using registers before their defs. 532 if (Dist < 0) { 533 DefMI->removeFromParent(); 534 MBB->insert(MachineBasicBlock::iterator(&MI), DefMI); 535 InstrPos[DefMI] = InstrPos[&MI] - 1; 536 537 // Make sure the instructions' position numbers are sane. 538 assert(((InstrPos[DefMI] == 1 && 539 MachineBasicBlock::iterator(DefMI) == MBB->begin()) || 540 InstrPos[DefMI] > 541 InstrPos[&*std::prev(MachineBasicBlock::iterator(DefMI))]) && 542 "Instruction positioning is broken"); 543 } 544 545 // Since we can possibly extend register lifetime, clear kill flags. 546 MRI->clearKillFlags(DefMI->getOperand(0).getReg()); 547 548 ++NumSubstLEAs; 549 LLVM_DEBUG(dbgs() << "OptimizeLEAs: Candidate to replace: "; MI.dump();); 550 551 // Change instruction operands. 552 MI.getOperand(MemOpNo + X86::AddrBaseReg) 553 .ChangeToRegister(DefMI->getOperand(0).getReg(), false); 554 MI.getOperand(MemOpNo + X86::AddrScaleAmt).ChangeToImmediate(1); 555 MI.getOperand(MemOpNo + X86::AddrIndexReg) 556 .ChangeToRegister(X86::NoRegister, false); 557 MI.getOperand(MemOpNo + X86::AddrDisp).ChangeToImmediate(AddrDispShift); 558 MI.getOperand(MemOpNo + X86::AddrSegmentReg) 559 .ChangeToRegister(X86::NoRegister, false); 560 561 LLVM_DEBUG(dbgs() << "OptimizeLEAs: Replaced by: "; MI.dump();); 562 563 Changed = true; 564 } 565 566 return Changed; 567 } 568 569 MachineInstr *X86OptimizeLEAPass::replaceDebugValue(MachineInstr &MI, 570 unsigned VReg, 571 int64_t AddrDispShift) { 572 DIExpression *Expr = const_cast<DIExpression *>(MI.getDebugExpression()); 573 if (AddrDispShift != 0) 574 Expr = DIExpression::prepend(Expr, DIExpression::StackValue, AddrDispShift); 575 576 // Replace DBG_VALUE instruction with modified version. 577 MachineBasicBlock *MBB = MI.getParent(); 578 DebugLoc DL = MI.getDebugLoc(); 579 bool IsIndirect = MI.isIndirectDebugValue(); 580 const MDNode *Var = MI.getDebugVariable(); 581 if (IsIndirect) 582 assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset"); 583 return BuildMI(*MBB, MBB->erase(&MI), DL, TII->get(TargetOpcode::DBG_VALUE), 584 IsIndirect, VReg, Var, Expr); 585 } 586 587 // Try to find similar LEAs in the list and replace one with another. 588 bool X86OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) { 589 bool Changed = false; 590 591 // Loop over all entries in the table. 592 for (auto &E : LEAs) { 593 auto &List = E.second; 594 595 // Loop over all LEA pairs. 596 auto I1 = List.begin(); 597 while (I1 != List.end()) { 598 MachineInstr &First = **I1; 599 auto I2 = std::next(I1); 600 while (I2 != List.end()) { 601 MachineInstr &Last = **I2; 602 int64_t AddrDispShift; 603 604 // LEAs should be in occurrence order in the list, so we can freely 605 // replace later LEAs with earlier ones. 606 assert(calcInstrDist(First, Last) > 0 && 607 "LEAs must be in occurrence order in the list"); 608 609 // Check that the Last LEA instruction can be replaced by the First. 610 if (!isReplaceable(First, Last, AddrDispShift)) { 611 ++I2; 612 continue; 613 } 614 615 // Loop over all uses of the Last LEA and update their operands. Note 616 // that the correctness of this has already been checked in the 617 // isReplaceable function. 618 Register FirstVReg = First.getOperand(0).getReg(); 619 Register LastVReg = Last.getOperand(0).getReg(); 620 for (auto UI = MRI->use_begin(LastVReg), UE = MRI->use_end(); 621 UI != UE;) { 622 MachineOperand &MO = *UI++; 623 MachineInstr &MI = *MO.getParent(); 624 625 if (MI.isDebugValue()) { 626 // Replace DBG_VALUE instruction with modified version using the 627 // register from the replacing LEA and the address displacement 628 // between the LEA instructions. 629 replaceDebugValue(MI, FirstVReg, AddrDispShift); 630 continue; 631 } 632 633 // Get the number of the first memory operand. 634 const MCInstrDesc &Desc = MI.getDesc(); 635 int MemOpNo = 636 X86II::getMemoryOperandNo(Desc.TSFlags) + 637 X86II::getOperandBias(Desc); 638 639 // Update address base. 640 MO.setReg(FirstVReg); 641 642 // Update address disp. 643 MachineOperand &Op = MI.getOperand(MemOpNo + X86::AddrDisp); 644 if (Op.isImm()) 645 Op.setImm(Op.getImm() + AddrDispShift); 646 else if (!Op.isJTI()) 647 Op.setOffset(Op.getOffset() + AddrDispShift); 648 } 649 650 // Since we can possibly extend register lifetime, clear kill flags. 651 MRI->clearKillFlags(FirstVReg); 652 653 ++NumRedundantLEAs; 654 LLVM_DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: "; 655 Last.dump();); 656 657 // By this moment, all of the Last LEA's uses must be replaced. So we 658 // can freely remove it. 659 assert(MRI->use_empty(LastVReg) && 660 "The LEA's def register must have no uses"); 661 Last.eraseFromParent(); 662 663 // Erase removed LEA from the list. 664 I2 = List.erase(I2); 665 666 Changed = true; 667 } 668 ++I1; 669 } 670 } 671 672 return Changed; 673 } 674 675 bool X86OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) { 676 bool Changed = false; 677 678 if (DisableX86LEAOpt || skipFunction(MF.getFunction())) 679 return false; 680 681 MRI = &MF.getRegInfo(); 682 TII = MF.getSubtarget<X86Subtarget>().getInstrInfo(); 683 TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo(); 684 685 // Process all basic blocks. 686 for (auto &MBB : MF) { 687 MemOpMap LEAs; 688 InstrPos.clear(); 689 690 // Find all LEA instructions in basic block. 691 findLEAs(MBB, LEAs); 692 693 // If current basic block has no LEAs, move on to the next one. 694 if (LEAs.empty()) 695 continue; 696 697 // Remove redundant LEA instructions. 698 Changed |= removeRedundantLEAs(LEAs); 699 700 // Remove redundant address calculations. Do it only for -Os/-Oz since only 701 // a code size gain is expected from this part of the pass. 702 if (MF.getFunction().hasOptSize()) 703 Changed |= removeRedundantAddrCalc(LEAs); 704 } 705 706 return Changed; 707 } 708