1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetInstrInfo.h" 14 #include "llvm/ADT/StringExtras.h" 15 #include "llvm/BinaryFormat/Dwarf.h" 16 #include "llvm/CodeGen/MachineCombinerPattern.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h" 18 #include "llvm/CodeGen/MachineInstrBuilder.h" 19 #include "llvm/CodeGen/MachineMemOperand.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/CodeGen/MachineScheduler.h" 22 #include "llvm/CodeGen/MachineTraceMetrics.h" 23 #include "llvm/CodeGen/PseudoSourceValue.h" 24 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 25 #include "llvm/CodeGen/StackMaps.h" 26 #include "llvm/CodeGen/TargetFrameLowering.h" 27 #include "llvm/CodeGen/TargetLowering.h" 28 #include "llvm/CodeGen/TargetRegisterInfo.h" 29 #include "llvm/CodeGen/TargetSchedule.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/DebugInfoMetadata.h" 32 #include "llvm/MC/MCAsmInfo.h" 33 #include "llvm/MC/MCInstrItineraries.h" 34 #include "llvm/Support/CommandLine.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/raw_ostream.h" 37 38 using namespace llvm; 39 40 static cl::opt<bool> DisableHazardRecognizer( 41 "disable-sched-hazard", cl::Hidden, cl::init(false), 42 cl::desc("Disable hazard detection during preRA scheduling")); 43 44 TargetInstrInfo::~TargetInstrInfo() = default; 45 46 const TargetRegisterClass* 47 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum, 48 const TargetRegisterInfo *TRI, 49 const MachineFunction &MF) const { 50 if (OpNum >= MCID.getNumOperands()) 51 return nullptr; 52 53 short RegClass = MCID.operands()[OpNum].RegClass; 54 if (MCID.operands()[OpNum].isLookupPtrRegClass()) 55 return TRI->getPointerRegClass(MF, RegClass); 56 57 // Instructions like INSERT_SUBREG do not have fixed register classes. 58 if (RegClass < 0) 59 return nullptr; 60 61 // Otherwise just look it up normally. 62 return TRI->getRegClass(RegClass); 63 } 64 65 /// insertNoop - Insert a noop into the instruction stream at the specified 66 /// point. 67 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB, 68 MachineBasicBlock::iterator MI) const { 69 llvm_unreachable("Target didn't implement insertNoop!"); 70 } 71 72 /// insertNoops - Insert noops into the instruction stream at the specified 73 /// point. 74 void TargetInstrInfo::insertNoops(MachineBasicBlock &MBB, 75 MachineBasicBlock::iterator MI, 76 unsigned Quantity) const { 77 for (unsigned i = 0; i < Quantity; ++i) 78 insertNoop(MBB, MI); 79 } 80 81 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) { 82 return strncmp(Str, MAI.getCommentString().data(), 83 MAI.getCommentString().size()) == 0; 84 } 85 86 /// Measure the specified inline asm to determine an approximation of its 87 /// length. 88 /// Comments (which run till the next SeparatorString or newline) do not 89 /// count as an instruction. 90 /// Any other non-whitespace text is considered an instruction, with 91 /// multiple instructions separated by SeparatorString or newlines. 92 /// Variable-length instructions are not handled here; this function 93 /// may be overloaded in the target code to do that. 94 /// We implement a special case of the .space directive which takes only a 95 /// single integer argument in base 10 that is the size in bytes. This is a 96 /// restricted form of the GAS directive in that we only interpret 97 /// simple--i.e. not a logical or arithmetic expression--size values without 98 /// the optional fill value. This is primarily used for creating arbitrary 99 /// sized inline asm blocks for testing purposes. 100 unsigned TargetInstrInfo::getInlineAsmLength( 101 const char *Str, 102 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const { 103 // Count the number of instructions in the asm. 104 bool AtInsnStart = true; 105 unsigned Length = 0; 106 const unsigned MaxInstLength = MAI.getMaxInstLength(STI); 107 for (; *Str; ++Str) { 108 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(), 109 strlen(MAI.getSeparatorString())) == 0) { 110 AtInsnStart = true; 111 } else if (isAsmComment(Str, MAI)) { 112 // Stop counting as an instruction after a comment until the next 113 // separator. 114 AtInsnStart = false; 115 } 116 117 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) { 118 unsigned AddLength = MaxInstLength; 119 if (strncmp(Str, ".space", 6) == 0) { 120 char *EStr; 121 int SpaceSize; 122 SpaceSize = strtol(Str + 6, &EStr, 10); 123 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize; 124 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr))) 125 ++EStr; 126 if (*EStr == '\0' || *EStr == '\n' || 127 isAsmComment(EStr, MAI)) // Successfully parsed .space argument 128 AddLength = SpaceSize; 129 } 130 Length += AddLength; 131 AtInsnStart = false; 132 } 133 } 134 135 return Length; 136 } 137 138 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything 139 /// after it, replacing it with an unconditional branch to NewDest. 140 void 141 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 142 MachineBasicBlock *NewDest) const { 143 MachineBasicBlock *MBB = Tail->getParent(); 144 145 // Remove all the old successors of MBB from the CFG. 146 while (!MBB->succ_empty()) 147 MBB->removeSuccessor(MBB->succ_begin()); 148 149 // Save off the debug loc before erasing the instruction. 150 DebugLoc DL = Tail->getDebugLoc(); 151 152 // Update call site info and remove all the dead instructions 153 // from the end of MBB. 154 while (Tail != MBB->end()) { 155 auto MI = Tail++; 156 if (MI->shouldUpdateCallSiteInfo()) 157 MBB->getParent()->eraseCallSiteInfo(&*MI); 158 MBB->erase(MI); 159 } 160 161 // If MBB isn't immediately before MBB, insert a branch to it. 162 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest)) 163 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL); 164 MBB->addSuccessor(NewDest); 165 } 166 167 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI, 168 bool NewMI, unsigned Idx1, 169 unsigned Idx2) const { 170 const MCInstrDesc &MCID = MI.getDesc(); 171 bool HasDef = MCID.getNumDefs(); 172 if (HasDef && !MI.getOperand(0).isReg()) 173 // No idea how to commute this instruction. Target should implement its own. 174 return nullptr; 175 176 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1; 177 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2; 178 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && 179 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && 180 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."); 181 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && 182 "This only knows how to commute register operands so far"); 183 184 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register(); 185 Register Reg1 = MI.getOperand(Idx1).getReg(); 186 Register Reg2 = MI.getOperand(Idx2).getReg(); 187 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0; 188 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg(); 189 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg(); 190 bool Reg1IsKill = MI.getOperand(Idx1).isKill(); 191 bool Reg2IsKill = MI.getOperand(Idx2).isKill(); 192 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef(); 193 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef(); 194 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead(); 195 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead(); 196 // Avoid calling isRenamable for virtual registers since we assert that 197 // renamable property is only queried/set for physical registers. 198 bool Reg1IsRenamable = 199 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false; 200 bool Reg2IsRenamable = 201 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false; 202 // If destination is tied to either of the commuted source register, then 203 // it must be updated. 204 if (HasDef && Reg0 == Reg1 && 205 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { 206 Reg2IsKill = false; 207 Reg0 = Reg2; 208 SubReg0 = SubReg2; 209 } else if (HasDef && Reg0 == Reg2 && 210 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { 211 Reg1IsKill = false; 212 Reg0 = Reg1; 213 SubReg0 = SubReg1; 214 } 215 216 MachineInstr *CommutedMI = nullptr; 217 if (NewMI) { 218 // Create a new instruction. 219 MachineFunction &MF = *MI.getMF(); 220 CommutedMI = MF.CloneMachineInstr(&MI); 221 } else { 222 CommutedMI = &MI; 223 } 224 225 if (HasDef) { 226 CommutedMI->getOperand(0).setReg(Reg0); 227 CommutedMI->getOperand(0).setSubReg(SubReg0); 228 } 229 CommutedMI->getOperand(Idx2).setReg(Reg1); 230 CommutedMI->getOperand(Idx1).setReg(Reg2); 231 CommutedMI->getOperand(Idx2).setSubReg(SubReg1); 232 CommutedMI->getOperand(Idx1).setSubReg(SubReg2); 233 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill); 234 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill); 235 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef); 236 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef); 237 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); 238 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); 239 // Avoid calling setIsRenamable for virtual registers since we assert that 240 // renamable property is only queried/set for physical registers. 241 if (Reg1.isPhysical()) 242 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable); 243 if (Reg2.isPhysical()) 244 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable); 245 return CommutedMI; 246 } 247 248 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI, 249 unsigned OpIdx1, 250 unsigned OpIdx2) const { 251 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose 252 // any commutable operand, which is done in findCommutedOpIndices() method 253 // called below. 254 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) && 255 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) { 256 assert(MI.isCommutable() && 257 "Precondition violation: MI must be commutable."); 258 return nullptr; 259 } 260 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 261 } 262 263 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1, 264 unsigned &ResultIdx2, 265 unsigned CommutableOpIdx1, 266 unsigned CommutableOpIdx2) { 267 if (ResultIdx1 == CommuteAnyOperandIndex && 268 ResultIdx2 == CommuteAnyOperandIndex) { 269 ResultIdx1 = CommutableOpIdx1; 270 ResultIdx2 = CommutableOpIdx2; 271 } else if (ResultIdx1 == CommuteAnyOperandIndex) { 272 if (ResultIdx2 == CommutableOpIdx1) 273 ResultIdx1 = CommutableOpIdx2; 274 else if (ResultIdx2 == CommutableOpIdx2) 275 ResultIdx1 = CommutableOpIdx1; 276 else 277 return false; 278 } else if (ResultIdx2 == CommuteAnyOperandIndex) { 279 if (ResultIdx1 == CommutableOpIdx1) 280 ResultIdx2 = CommutableOpIdx2; 281 else if (ResultIdx1 == CommutableOpIdx2) 282 ResultIdx2 = CommutableOpIdx1; 283 else 284 return false; 285 } else 286 // Check that the result operand indices match the given commutable 287 // operand indices. 288 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) || 289 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1); 290 291 return true; 292 } 293 294 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 295 unsigned &SrcOpIdx1, 296 unsigned &SrcOpIdx2) const { 297 assert(!MI.isBundle() && 298 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); 299 300 const MCInstrDesc &MCID = MI.getDesc(); 301 if (!MCID.isCommutable()) 302 return false; 303 304 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this 305 // is not true, then the target must implement this. 306 unsigned CommutableOpIdx1 = MCID.getNumDefs(); 307 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1; 308 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 309 CommutableOpIdx1, CommutableOpIdx2)) 310 return false; 311 312 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg()) 313 // No idea. 314 return false; 315 return true; 316 } 317 318 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { 319 if (!MI.isTerminator()) return false; 320 321 // Conditional branch is a special case. 322 if (MI.isBranch() && !MI.isBarrier()) 323 return true; 324 if (!MI.isPredicable()) 325 return true; 326 return !isPredicated(MI); 327 } 328 329 bool TargetInstrInfo::PredicateInstruction( 330 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { 331 bool MadeChange = false; 332 333 assert(!MI.isBundle() && 334 "TargetInstrInfo::PredicateInstruction() can't handle bundles"); 335 336 const MCInstrDesc &MCID = MI.getDesc(); 337 if (!MI.isPredicable()) 338 return false; 339 340 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) { 341 if (MCID.operands()[i].isPredicate()) { 342 MachineOperand &MO = MI.getOperand(i); 343 if (MO.isReg()) { 344 MO.setReg(Pred[j].getReg()); 345 MadeChange = true; 346 } else if (MO.isImm()) { 347 MO.setImm(Pred[j].getImm()); 348 MadeChange = true; 349 } else if (MO.isMBB()) { 350 MO.setMBB(Pred[j].getMBB()); 351 MadeChange = true; 352 } 353 ++j; 354 } 355 } 356 return MadeChange; 357 } 358 359 bool TargetInstrInfo::hasLoadFromStackSlot( 360 const MachineInstr &MI, 361 SmallVectorImpl<const MachineMemOperand *> &Accesses) const { 362 size_t StartSize = Accesses.size(); 363 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 364 oe = MI.memoperands_end(); 365 o != oe; ++o) { 366 if ((*o)->isLoad() && 367 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) 368 Accesses.push_back(*o); 369 } 370 return Accesses.size() != StartSize; 371 } 372 373 bool TargetInstrInfo::hasStoreToStackSlot( 374 const MachineInstr &MI, 375 SmallVectorImpl<const MachineMemOperand *> &Accesses) const { 376 size_t StartSize = Accesses.size(); 377 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 378 oe = MI.memoperands_end(); 379 o != oe; ++o) { 380 if ((*o)->isStore() && 381 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) 382 Accesses.push_back(*o); 383 } 384 return Accesses.size() != StartSize; 385 } 386 387 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, 388 unsigned SubIdx, unsigned &Size, 389 unsigned &Offset, 390 const MachineFunction &MF) const { 391 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 392 if (!SubIdx) { 393 Size = TRI->getSpillSize(*RC); 394 Offset = 0; 395 return true; 396 } 397 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx); 398 // Convert bit size to byte size. 399 if (BitSize % 8) 400 return false; 401 402 int BitOffset = TRI->getSubRegIdxOffset(SubIdx); 403 if (BitOffset < 0 || BitOffset % 8) 404 return false; 405 406 Size = BitSize / 8; 407 Offset = (unsigned)BitOffset / 8; 408 409 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range"); 410 411 if (!MF.getDataLayout().isLittleEndian()) { 412 Offset = TRI->getSpillSize(*RC) - (Offset + Size); 413 } 414 return true; 415 } 416 417 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, 418 MachineBasicBlock::iterator I, 419 Register DestReg, unsigned SubIdx, 420 const MachineInstr &Orig, 421 const TargetRegisterInfo &TRI) const { 422 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 423 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); 424 MBB.insert(I, MI); 425 } 426 427 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0, 428 const MachineInstr &MI1, 429 const MachineRegisterInfo *MRI) const { 430 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 431 } 432 433 MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB, 434 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const { 435 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated"); 436 MachineFunction &MF = *MBB.getParent(); 437 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig); 438 } 439 440 // If the COPY instruction in MI can be folded to a stack operation, return 441 // the register class to use. 442 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI, 443 unsigned FoldIdx) { 444 assert(MI.isCopy() && "MI must be a COPY instruction"); 445 if (MI.getNumOperands() != 2) 446 return nullptr; 447 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); 448 449 const MachineOperand &FoldOp = MI.getOperand(FoldIdx); 450 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx); 451 452 if (FoldOp.getSubReg() || LiveOp.getSubReg()) 453 return nullptr; 454 455 Register FoldReg = FoldOp.getReg(); 456 Register LiveReg = LiveOp.getReg(); 457 458 assert(FoldReg.isVirtual() && "Cannot fold physregs"); 459 460 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); 461 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); 462 463 if (LiveOp.getReg().isPhysical()) 464 return RC->contains(LiveOp.getReg()) ? RC : nullptr; 465 466 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg))) 467 return RC; 468 469 // FIXME: Allow folding when register classes are memory compatible. 470 return nullptr; 471 } 472 473 MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); } 474 475 std::pair<unsigned, unsigned> 476 TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr &MI) const { 477 switch (MI.getOpcode()) { 478 case TargetOpcode::STACKMAP: 479 // StackMapLiveValues are foldable 480 return std::make_pair(0, StackMapOpers(&MI).getVarIdx()); 481 case TargetOpcode::PATCHPOINT: 482 // For PatchPoint, the call args are not foldable (even if reported in the 483 // stackmap e.g. via anyregcc). 484 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx()); 485 case TargetOpcode::STATEPOINT: 486 // For statepoints, fold deopt and gc arguments, but not call arguments. 487 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx()); 488 default: 489 llvm_unreachable("unexpected stackmap opcode"); 490 } 491 } 492 493 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, 494 ArrayRef<unsigned> Ops, int FrameIndex, 495 const TargetInstrInfo &TII) { 496 unsigned StartIdx = 0; 497 unsigned NumDefs = 0; 498 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint. 499 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI); 500 501 unsigned DefToFoldIdx = MI.getNumOperands(); 502 503 // Return false if any operands requested for folding are not foldable (not 504 // part of the stackmap's live values). 505 for (unsigned Op : Ops) { 506 if (Op < NumDefs) { 507 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs"); 508 DefToFoldIdx = Op; 509 } else if (Op < StartIdx) { 510 return nullptr; 511 } 512 if (MI.getOperand(Op).isTied()) 513 return nullptr; 514 } 515 516 MachineInstr *NewMI = 517 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true); 518 MachineInstrBuilder MIB(MF, NewMI); 519 520 // No need to fold return, the meta data, and function arguments 521 for (unsigned i = 0; i < StartIdx; ++i) 522 if (i != DefToFoldIdx) 523 MIB.add(MI.getOperand(i)); 524 525 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) { 526 MachineOperand &MO = MI.getOperand(i); 527 unsigned TiedTo = e; 528 (void)MI.isRegTiedToDefOperand(i, &TiedTo); 529 530 if (is_contained(Ops, i)) { 531 assert(TiedTo == e && "Cannot fold tied operands"); 532 unsigned SpillSize; 533 unsigned SpillOffset; 534 // Compute the spill slot size and offset. 535 const TargetRegisterClass *RC = 536 MF.getRegInfo().getRegClass(MO.getReg()); 537 bool Valid = 538 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF); 539 if (!Valid) 540 report_fatal_error("cannot spill patchpoint subregister operand"); 541 MIB.addImm(StackMaps::IndirectMemRefOp); 542 MIB.addImm(SpillSize); 543 MIB.addFrameIndex(FrameIndex); 544 MIB.addImm(SpillOffset); 545 } else { 546 MIB.add(MO); 547 if (TiedTo < e) { 548 assert(TiedTo < NumDefs && "Bad tied operand"); 549 if (TiedTo > DefToFoldIdx) 550 --TiedTo; 551 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1); 552 } 553 } 554 } 555 return NewMI; 556 } 557 558 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 559 ArrayRef<unsigned> Ops, int FI, 560 LiveIntervals *LIS, 561 VirtRegMap *VRM) const { 562 auto Flags = MachineMemOperand::MONone; 563 for (unsigned OpIdx : Ops) 564 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore 565 : MachineMemOperand::MOLoad; 566 567 MachineBasicBlock *MBB = MI.getParent(); 568 assert(MBB && "foldMemoryOperand needs an inserted instruction"); 569 MachineFunction &MF = *MBB->getParent(); 570 571 // If we're not folding a load into a subreg, the size of the load is the 572 // size of the spill slot. But if we are, we need to figure out what the 573 // actual load size is. 574 int64_t MemSize = 0; 575 const MachineFrameInfo &MFI = MF.getFrameInfo(); 576 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 577 578 if (Flags & MachineMemOperand::MOStore) { 579 MemSize = MFI.getObjectSize(FI); 580 } else { 581 for (unsigned OpIdx : Ops) { 582 int64_t OpSize = MFI.getObjectSize(FI); 583 584 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) { 585 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg); 586 if (SubRegSize > 0 && !(SubRegSize % 8)) 587 OpSize = SubRegSize / 8; 588 } 589 590 MemSize = std::max(MemSize, OpSize); 591 } 592 } 593 594 assert(MemSize && "Did not expect a zero-sized stack slot"); 595 596 MachineInstr *NewMI = nullptr; 597 598 if (MI.getOpcode() == TargetOpcode::STACKMAP || 599 MI.getOpcode() == TargetOpcode::PATCHPOINT || 600 MI.getOpcode() == TargetOpcode::STATEPOINT) { 601 // Fold stackmap/patchpoint. 602 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); 603 if (NewMI) 604 MBB->insert(MI, NewMI); 605 } else { 606 // Ask the target to do the actual folding. 607 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM); 608 } 609 610 if (NewMI) { 611 NewMI->setMemRefs(MF, MI.memoperands()); 612 // Add a memory operand, foldMemoryOperandImpl doesn't do that. 613 assert((!(Flags & MachineMemOperand::MOStore) || 614 NewMI->mayStore()) && 615 "Folded a def to a non-store!"); 616 assert((!(Flags & MachineMemOperand::MOLoad) || 617 NewMI->mayLoad()) && 618 "Folded a use to a non-load!"); 619 assert(MFI.getObjectOffset(FI) != -1); 620 MachineMemOperand *MMO = 621 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), 622 Flags, MemSize, MFI.getObjectAlign(FI)); 623 NewMI->addMemOperand(MF, MMO); 624 625 // The pass "x86 speculative load hardening" always attaches symbols to 626 // call instructions. We need copy it form old instruction. 627 NewMI->cloneInstrSymbols(MF, MI); 628 629 return NewMI; 630 } 631 632 // Straight COPY may fold as load/store. 633 if (!MI.isCopy() || Ops.size() != 1) 634 return nullptr; 635 636 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); 637 if (!RC) 638 return nullptr; 639 640 const MachineOperand &MO = MI.getOperand(1 - Ops[0]); 641 MachineBasicBlock::iterator Pos = MI; 642 643 if (Flags == MachineMemOperand::MOStore) 644 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI, 645 Register()); 646 else 647 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI, Register()); 648 return &*--Pos; 649 } 650 651 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 652 ArrayRef<unsigned> Ops, 653 MachineInstr &LoadMI, 654 LiveIntervals *LIS) const { 655 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); 656 #ifndef NDEBUG 657 for (unsigned OpIdx : Ops) 658 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!"); 659 #endif 660 661 MachineBasicBlock &MBB = *MI.getParent(); 662 MachineFunction &MF = *MBB.getParent(); 663 664 // Ask the target to do the actual folding. 665 MachineInstr *NewMI = nullptr; 666 int FrameIndex = 0; 667 668 if ((MI.getOpcode() == TargetOpcode::STACKMAP || 669 MI.getOpcode() == TargetOpcode::PATCHPOINT || 670 MI.getOpcode() == TargetOpcode::STATEPOINT) && 671 isLoadFromStackSlot(LoadMI, FrameIndex)) { 672 // Fold stackmap/patchpoint. 673 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); 674 if (NewMI) 675 NewMI = &*MBB.insert(MI, NewMI); 676 } else { 677 // Ask the target to do the actual folding. 678 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); 679 } 680 681 if (!NewMI) 682 return nullptr; 683 684 // Copy the memoperands from the load to the folded instruction. 685 if (MI.memoperands_empty()) { 686 NewMI->setMemRefs(MF, LoadMI.memoperands()); 687 } else { 688 // Handle the rare case of folding multiple loads. 689 NewMI->setMemRefs(MF, MI.memoperands()); 690 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), 691 E = LoadMI.memoperands_end(); 692 I != E; ++I) { 693 NewMI->addMemOperand(MF, *I); 694 } 695 } 696 return NewMI; 697 } 698 699 /// transferImplicitOperands - MI is a pseudo-instruction, and the lowered 700 /// replacement instructions immediately precede it. Copy any implicit 701 /// operands from MI to the replacement instruction. 702 static void transferImplicitOperands(MachineInstr *MI, 703 const TargetRegisterInfo *TRI) { 704 MachineBasicBlock::iterator CopyMI = MI; 705 --CopyMI; 706 707 Register DstReg = MI->getOperand(0).getReg(); 708 for (const MachineOperand &MO : MI->implicit_operands()) { 709 CopyMI->addOperand(MO); 710 711 // Be conservative about preserving kills when subregister defs are 712 // involved. If there was implicit kill of a super-register overlapping the 713 // copy result, we would kill the subregisters previous copies defined. 714 715 if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg())) 716 CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false); 717 } 718 } 719 720 void TargetInstrInfo::lowerCopy(MachineInstr *MI, 721 const TargetRegisterInfo *TRI) const { 722 if (MI->allDefsAreDead()) { 723 MI->setDesc(get(TargetOpcode::KILL)); 724 return; 725 } 726 727 MachineOperand &DstMO = MI->getOperand(0); 728 MachineOperand &SrcMO = MI->getOperand(1); 729 730 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg()); 731 if (IdentityCopy || SrcMO.isUndef()) { 732 // No need to insert an identity copy instruction, but replace with a KILL 733 // if liveness is changed. 734 if (SrcMO.isUndef() || MI->getNumOperands() > 2) { 735 // We must make sure the super-register gets killed. Replace the 736 // instruction with KILL. 737 MI->setDesc(get(TargetOpcode::KILL)); 738 return; 739 } 740 // Vanilla identity copy. 741 MI->eraseFromParent(); 742 return; 743 } 744 745 copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(), 746 SrcMO.getReg(), SrcMO.isKill()); 747 748 if (MI->getNumOperands() > 2) 749 transferImplicitOperands(MI, TRI); 750 MI->eraseFromParent(); 751 return; 752 } 753 754 bool TargetInstrInfo::hasReassociableOperands( 755 const MachineInstr &Inst, const MachineBasicBlock *MBB) const { 756 const MachineOperand &Op1 = Inst.getOperand(1); 757 const MachineOperand &Op2 = Inst.getOperand(2); 758 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 759 760 // We need virtual register definitions for the operands that we will 761 // reassociate. 762 MachineInstr *MI1 = nullptr; 763 MachineInstr *MI2 = nullptr; 764 if (Op1.isReg() && Op1.getReg().isVirtual()) 765 MI1 = MRI.getUniqueVRegDef(Op1.getReg()); 766 if (Op2.isReg() && Op2.getReg().isVirtual()) 767 MI2 = MRI.getUniqueVRegDef(Op2.getReg()); 768 769 // And at least one operand must be defined in MBB. 770 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB); 771 } 772 773 bool TargetInstrInfo::areOpcodesEqualOrInverse(unsigned Opcode1, 774 unsigned Opcode2) const { 775 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2; 776 } 777 778 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst, 779 bool &Commuted) const { 780 const MachineBasicBlock *MBB = Inst.getParent(); 781 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 782 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg()); 783 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg()); 784 unsigned Opcode = Inst.getOpcode(); 785 786 // If only one operand has the same or inverse opcode and it's the second 787 // source operand, the operands must be commuted. 788 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) && 789 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode()); 790 if (Commuted) 791 std::swap(MI1, MI2); 792 793 // 1. The previous instruction must be the same type as Inst. 794 // 2. The previous instruction must also be associative/commutative or be the 795 // inverse of such an operation (this can be different even for 796 // instructions with the same opcode if traits like fast-math-flags are 797 // included). 798 // 3. The previous instruction must have virtual register definitions for its 799 // operands in the same basic block as Inst. 800 // 4. The previous instruction's result must only be used by Inst. 801 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) && 802 (isAssociativeAndCommutative(*MI1) || 803 isAssociativeAndCommutative(*MI1, /* Invert */ true)) && 804 hasReassociableOperands(*MI1, MBB) && 805 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg()); 806 } 807 808 // 1. The operation must be associative and commutative or be the inverse of 809 // such an operation. 810 // 2. The instruction must have virtual register definitions for its 811 // operands in the same basic block. 812 // 3. The instruction must have a reassociable sibling. 813 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst, 814 bool &Commuted) const { 815 return (isAssociativeAndCommutative(Inst) || 816 isAssociativeAndCommutative(Inst, /* Invert */ true)) && 817 hasReassociableOperands(Inst, Inst.getParent()) && 818 hasReassociableSibling(Inst, Commuted); 819 } 820 821 // The concept of the reassociation pass is that these operations can benefit 822 // from this kind of transformation: 823 // 824 // A = ? op ? 825 // B = A op X (Prev) 826 // C = B op Y (Root) 827 // --> 828 // A = ? op ? 829 // B = X op Y 830 // C = A op B 831 // 832 // breaking the dependency between A and B, allowing them to be executed in 833 // parallel (or back-to-back in a pipeline) instead of depending on each other. 834 835 // FIXME: This has the potential to be expensive (compile time) while not 836 // improving the code at all. Some ways to limit the overhead: 837 // 1. Track successful transforms; bail out if hit rate gets too low. 838 // 2. Only enable at -O3 or some other non-default optimization level. 839 // 3. Pre-screen pattern candidates here: if an operand of the previous 840 // instruction is known to not increase the critical path, then don't match 841 // that pattern. 842 bool TargetInstrInfo::getMachineCombinerPatterns( 843 MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns, 844 bool DoRegPressureReduce) const { 845 bool Commute; 846 if (isReassociationCandidate(Root, Commute)) { 847 // We found a sequence of instructions that may be suitable for a 848 // reassociation of operands to increase ILP. Specify each commutation 849 // possibility for the Prev instruction in the sequence and let the 850 // machine combiner decide if changing the operands is worthwhile. 851 if (Commute) { 852 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB); 853 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB); 854 } else { 855 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY); 856 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY); 857 } 858 return true; 859 } 860 861 return false; 862 } 863 864 /// Return true when a code sequence can improve loop throughput. 865 bool 866 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const { 867 return false; 868 } 869 870 std::pair<unsigned, unsigned> 871 TargetInstrInfo::getReassociationOpcodes(MachineCombinerPattern Pattern, 872 const MachineInstr &Root, 873 const MachineInstr &Prev) const { 874 bool AssocCommutRoot = isAssociativeAndCommutative(Root); 875 bool AssocCommutPrev = isAssociativeAndCommutative(Prev); 876 877 // Early exit if both opcodes are associative and commutative. It's a trivial 878 // reassociation when we only change operands order. In this case opcodes are 879 // not required to have inverse versions. 880 if (AssocCommutRoot && AssocCommutPrev) { 881 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal"); 882 return std::make_pair(Root.getOpcode(), Root.getOpcode()); 883 } 884 885 // At least one instruction is not associative or commutative. 886 // Since we have matched one of the reassociation patterns, we expect that the 887 // instructions' opcodes are equal or one of them is the inversion of the 888 // other. 889 assert(areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()) && 890 "Incorrectly matched pattern"); 891 unsigned AssocCommutOpcode = Root.getOpcode(); 892 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode()); 893 if (!AssocCommutRoot) 894 std::swap(AssocCommutOpcode, InverseOpcode); 895 896 // The transformation rule (`+` is any associative and commutative binary 897 // operation, `-` is the inverse): 898 // REASSOC_AX_BY: 899 // (A + X) + Y => A + (X + Y) 900 // (A + X) - Y => A + (X - Y) 901 // (A - X) + Y => A - (X - Y) 902 // (A - X) - Y => A - (X + Y) 903 // REASSOC_XA_BY: 904 // (X + A) + Y => (X + Y) + A 905 // (X + A) - Y => (X - Y) + A 906 // (X - A) + Y => (X + Y) - A 907 // (X - A) - Y => (X - Y) - A 908 // REASSOC_AX_YB: 909 // Y + (A + X) => (Y + X) + A 910 // Y - (A + X) => (Y - X) - A 911 // Y + (A - X) => (Y - X) + A 912 // Y - (A - X) => (Y + X) - A 913 // REASSOC_XA_YB: 914 // Y + (X + A) => (Y + X) + A 915 // Y - (X + A) => (Y - X) - A 916 // Y + (X - A) => (Y + X) - A 917 // Y - (X - A) => (Y - X) + A 918 switch (Pattern) { 919 default: 920 llvm_unreachable("Unexpected pattern"); 921 case MachineCombinerPattern::REASSOC_AX_BY: 922 if (!AssocCommutRoot && AssocCommutPrev) 923 return {AssocCommutOpcode, InverseOpcode}; 924 if (AssocCommutRoot && !AssocCommutPrev) 925 return {InverseOpcode, InverseOpcode}; 926 if (!AssocCommutRoot && !AssocCommutPrev) 927 return {InverseOpcode, AssocCommutOpcode}; 928 break; 929 case MachineCombinerPattern::REASSOC_XA_BY: 930 if (!AssocCommutRoot && AssocCommutPrev) 931 return {AssocCommutOpcode, InverseOpcode}; 932 if (AssocCommutRoot && !AssocCommutPrev) 933 return {InverseOpcode, AssocCommutOpcode}; 934 if (!AssocCommutRoot && !AssocCommutPrev) 935 return {InverseOpcode, InverseOpcode}; 936 break; 937 case MachineCombinerPattern::REASSOC_AX_YB: 938 if (!AssocCommutRoot && AssocCommutPrev) 939 return {InverseOpcode, InverseOpcode}; 940 if (AssocCommutRoot && !AssocCommutPrev) 941 return {AssocCommutOpcode, InverseOpcode}; 942 if (!AssocCommutRoot && !AssocCommutPrev) 943 return {InverseOpcode, AssocCommutOpcode}; 944 break; 945 case MachineCombinerPattern::REASSOC_XA_YB: 946 if (!AssocCommutRoot && AssocCommutPrev) 947 return {InverseOpcode, InverseOpcode}; 948 if (AssocCommutRoot && !AssocCommutPrev) 949 return {InverseOpcode, AssocCommutOpcode}; 950 if (!AssocCommutRoot && !AssocCommutPrev) 951 return {AssocCommutOpcode, InverseOpcode}; 952 break; 953 } 954 llvm_unreachable("Unhandled combination"); 955 } 956 957 // Return a pair of boolean flags showing if the new root and new prev operands 958 // must be swapped. See visual example of the rule in 959 // TargetInstrInfo::getReassociationOpcodes. 960 static std::pair<bool, bool> mustSwapOperands(MachineCombinerPattern Pattern) { 961 switch (Pattern) { 962 default: 963 llvm_unreachable("Unexpected pattern"); 964 case MachineCombinerPattern::REASSOC_AX_BY: 965 return {false, false}; 966 case MachineCombinerPattern::REASSOC_XA_BY: 967 return {true, false}; 968 case MachineCombinerPattern::REASSOC_AX_YB: 969 return {true, true}; 970 case MachineCombinerPattern::REASSOC_XA_YB: 971 return {true, true}; 972 } 973 } 974 975 /// Attempt the reassociation transformation to reduce critical path length. 976 /// See the above comments before getMachineCombinerPatterns(). 977 void TargetInstrInfo::reassociateOps( 978 MachineInstr &Root, MachineInstr &Prev, 979 MachineCombinerPattern Pattern, 980 SmallVectorImpl<MachineInstr *> &InsInstrs, 981 SmallVectorImpl<MachineInstr *> &DelInstrs, 982 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const { 983 MachineFunction *MF = Root.getMF(); 984 MachineRegisterInfo &MRI = MF->getRegInfo(); 985 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 986 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 987 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI); 988 989 // This array encodes the operand index for each parameter because the 990 // operands may be commuted. Each row corresponds to a pattern value, 991 // and each column specifies the index of A, B, X, Y. 992 unsigned OpIdx[4][4] = { 993 { 1, 1, 2, 2 }, 994 { 1, 2, 2, 1 }, 995 { 2, 1, 1, 2 }, 996 { 2, 2, 1, 1 } 997 }; 998 999 int Row; 1000 switch (Pattern) { 1001 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break; 1002 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break; 1003 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break; 1004 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break; 1005 default: llvm_unreachable("unexpected MachineCombinerPattern"); 1006 } 1007 1008 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]); 1009 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]); 1010 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]); 1011 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]); 1012 MachineOperand &OpC = Root.getOperand(0); 1013 1014 Register RegA = OpA.getReg(); 1015 Register RegB = OpB.getReg(); 1016 Register RegX = OpX.getReg(); 1017 Register RegY = OpY.getReg(); 1018 Register RegC = OpC.getReg(); 1019 1020 if (RegA.isVirtual()) 1021 MRI.constrainRegClass(RegA, RC); 1022 if (RegB.isVirtual()) 1023 MRI.constrainRegClass(RegB, RC); 1024 if (RegX.isVirtual()) 1025 MRI.constrainRegClass(RegX, RC); 1026 if (RegY.isVirtual()) 1027 MRI.constrainRegClass(RegY, RC); 1028 if (RegC.isVirtual()) 1029 MRI.constrainRegClass(RegC, RC); 1030 1031 // Create a new virtual register for the result of (X op Y) instead of 1032 // recycling RegB because the MachineCombiner's computation of the critical 1033 // path requires a new register definition rather than an existing one. 1034 Register NewVR = MRI.createVirtualRegister(RC); 1035 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); 1036 1037 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev); 1038 bool KillA = OpA.isKill(); 1039 bool KillX = OpX.isKill(); 1040 bool KillY = OpY.isKill(); 1041 bool KillNewVR = true; 1042 1043 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern); 1044 1045 if (SwapPrevOperands) { 1046 std::swap(RegX, RegY); 1047 std::swap(KillX, KillY); 1048 } 1049 1050 // Create new instructions for insertion. 1051 MachineInstrBuilder MIB1 = 1052 BuildMI(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR) 1053 .addReg(RegX, getKillRegState(KillX)) 1054 .addReg(RegY, getKillRegState(KillY)) 1055 .setMIFlags(Prev.getFlags()); 1056 1057 if (SwapRootOperands) { 1058 std::swap(RegA, NewVR); 1059 std::swap(KillA, KillNewVR); 1060 } 1061 1062 MachineInstrBuilder MIB2 = 1063 BuildMI(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC) 1064 .addReg(RegA, getKillRegState(KillA)) 1065 .addReg(NewVR, getKillRegState(KillNewVR)) 1066 .setMIFlags(Root.getFlags()); 1067 1068 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2); 1069 1070 // Record new instructions for insertion and old instructions for deletion. 1071 InsInstrs.push_back(MIB1); 1072 InsInstrs.push_back(MIB2); 1073 DelInstrs.push_back(&Prev); 1074 DelInstrs.push_back(&Root); 1075 1076 // We transformed: 1077 // B = A op X (Prev) 1078 // C = B op Y (Root) 1079 // Into: 1080 // B = X op Y (MIB1) 1081 // C = A op B (MIB2) 1082 // C has the same value as before, B doesn't; as such, keep the debug number 1083 // of C but not of B. 1084 if (unsigned OldRootNum = Root.peekDebugInstrNum()) 1085 MIB2.getInstr()->setDebugInstrNum(OldRootNum); 1086 } 1087 1088 void TargetInstrInfo::genAlternativeCodeSequence( 1089 MachineInstr &Root, MachineCombinerPattern Pattern, 1090 SmallVectorImpl<MachineInstr *> &InsInstrs, 1091 SmallVectorImpl<MachineInstr *> &DelInstrs, 1092 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const { 1093 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo(); 1094 1095 // Select the previous instruction in the sequence based on the input pattern. 1096 MachineInstr *Prev = nullptr; 1097 switch (Pattern) { 1098 case MachineCombinerPattern::REASSOC_AX_BY: 1099 case MachineCombinerPattern::REASSOC_XA_BY: 1100 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg()); 1101 break; 1102 case MachineCombinerPattern::REASSOC_AX_YB: 1103 case MachineCombinerPattern::REASSOC_XA_YB: 1104 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); 1105 break; 1106 default: 1107 llvm_unreachable("Unknown pattern for machine combiner"); 1108 } 1109 1110 // Don't reassociate if Prev and Root are in different blocks. 1111 if (Prev->getParent() != Root.getParent()) 1112 return; 1113 1114 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg); 1115 } 1116 1117 MachineTraceStrategy TargetInstrInfo::getMachineCombinerTraceStrategy() const { 1118 return MachineTraceStrategy::TS_MinInstrCount; 1119 } 1120 1121 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( 1122 const MachineInstr &MI) const { 1123 const MachineFunction &MF = *MI.getMF(); 1124 const MachineRegisterInfo &MRI = MF.getRegInfo(); 1125 1126 // Remat clients assume operand 0 is the defined register. 1127 if (!MI.getNumOperands() || !MI.getOperand(0).isReg()) 1128 return false; 1129 Register DefReg = MI.getOperand(0).getReg(); 1130 1131 // A sub-register definition can only be rematerialized if the instruction 1132 // doesn't read the other parts of the register. Otherwise it is really a 1133 // read-modify-write operation on the full virtual register which cannot be 1134 // moved safely. 1135 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() && 1136 MI.readsVirtualRegister(DefReg)) 1137 return false; 1138 1139 // A load from a fixed stack slot can be rematerialized. This may be 1140 // redundant with subsequent checks, but it's target-independent, 1141 // simple, and a common case. 1142 int FrameIdx = 0; 1143 if (isLoadFromStackSlot(MI, FrameIdx) && 1144 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx)) 1145 return true; 1146 1147 // Avoid instructions obviously unsafe for remat. 1148 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() || 1149 MI.hasUnmodeledSideEffects()) 1150 return false; 1151 1152 // Don't remat inline asm. We have no idea how expensive it is 1153 // even if it's side effect free. 1154 if (MI.isInlineAsm()) 1155 return false; 1156 1157 // Avoid instructions which load from potentially varying memory. 1158 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad()) 1159 return false; 1160 1161 // If any of the registers accessed are non-constant, conservatively assume 1162 // the instruction is not rematerializable. 1163 for (const MachineOperand &MO : MI.operands()) { 1164 if (!MO.isReg()) continue; 1165 Register Reg = MO.getReg(); 1166 if (Reg == 0) 1167 continue; 1168 1169 // Check for a well-behaved physical register. 1170 if (Reg.isPhysical()) { 1171 if (MO.isUse()) { 1172 // If the physreg has no defs anywhere, it's just an ambient register 1173 // and we can freely move its uses. Alternatively, if it's allocatable, 1174 // it could get allocated to something with a def during allocation. 1175 if (!MRI.isConstantPhysReg(Reg)) 1176 return false; 1177 } else { 1178 // A physreg def. We can't remat it. 1179 return false; 1180 } 1181 continue; 1182 } 1183 1184 // Only allow one virtual-register def. There may be multiple defs of the 1185 // same virtual register, though. 1186 if (MO.isDef() && Reg != DefReg) 1187 return false; 1188 1189 // Don't allow any virtual-register uses. Rematting an instruction with 1190 // virtual register uses would length the live ranges of the uses, which 1191 // is not necessarily a good idea, certainly not "trivial". 1192 if (MO.isUse()) 1193 return false; 1194 } 1195 1196 // Everything checked out. 1197 return true; 1198 } 1199 1200 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const { 1201 const MachineFunction *MF = MI.getMF(); 1202 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 1203 bool StackGrowsDown = 1204 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 1205 1206 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 1207 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 1208 1209 if (!isFrameInstr(MI)) 1210 return 0; 1211 1212 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI)); 1213 1214 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) || 1215 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode)) 1216 SPAdj = -SPAdj; 1217 1218 return SPAdj; 1219 } 1220 1221 /// isSchedulingBoundary - Test if the given instruction should be 1222 /// considered a scheduling boundary. This primarily includes labels 1223 /// and terminators. 1224 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 1225 const MachineBasicBlock *MBB, 1226 const MachineFunction &MF) const { 1227 // Terminators and labels can't be scheduled around. 1228 if (MI.isTerminator() || MI.isPosition()) 1229 return true; 1230 1231 // INLINEASM_BR can jump to another block 1232 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 1233 return true; 1234 1235 // Don't attempt to schedule around any instruction that defines 1236 // a stack-oriented pointer, as it's unlikely to be profitable. This 1237 // saves compile time, because it doesn't require every single 1238 // stack slot reference to depend on the instruction that does the 1239 // modification. 1240 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); 1241 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 1242 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); 1243 } 1244 1245 // Provide a global flag for disabling the PreRA hazard recognizer that targets 1246 // may choose to honor. 1247 bool TargetInstrInfo::usePreRAHazardRecognizer() const { 1248 return !DisableHazardRecognizer; 1249 } 1250 1251 // Default implementation of CreateTargetRAHazardRecognizer. 1252 ScheduleHazardRecognizer *TargetInstrInfo:: 1253 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 1254 const ScheduleDAG *DAG) const { 1255 // Dummy hazard recognizer allows all instructions to issue. 1256 return new ScheduleHazardRecognizer(); 1257 } 1258 1259 // Default implementation of CreateTargetMIHazardRecognizer. 1260 ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer( 1261 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const { 1262 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler"); 1263 } 1264 1265 // Default implementation of CreateTargetPostRAHazardRecognizer. 1266 ScheduleHazardRecognizer *TargetInstrInfo:: 1267 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 1268 const ScheduleDAG *DAG) const { 1269 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched"); 1270 } 1271 1272 // Default implementation of getMemOperandWithOffset. 1273 bool TargetInstrInfo::getMemOperandWithOffset( 1274 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, 1275 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const { 1276 SmallVector<const MachineOperand *, 4> BaseOps; 1277 unsigned Width; 1278 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable, 1279 Width, TRI) || 1280 BaseOps.size() != 1) 1281 return false; 1282 BaseOp = BaseOps.front(); 1283 return true; 1284 } 1285 1286 //===----------------------------------------------------------------------===// 1287 // SelectionDAG latency interface. 1288 //===----------------------------------------------------------------------===// 1289 1290 int 1291 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 1292 SDNode *DefNode, unsigned DefIdx, 1293 SDNode *UseNode, unsigned UseIdx) const { 1294 if (!ItinData || ItinData->isEmpty()) 1295 return -1; 1296 1297 if (!DefNode->isMachineOpcode()) 1298 return -1; 1299 1300 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass(); 1301 if (!UseNode->isMachineOpcode()) 1302 return ItinData->getOperandCycle(DefClass, DefIdx); 1303 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass(); 1304 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 1305 } 1306 1307 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1308 SDNode *N) const { 1309 if (!ItinData || ItinData->isEmpty()) 1310 return 1; 1311 1312 if (!N->isMachineOpcode()) 1313 return 1; 1314 1315 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass()); 1316 } 1317 1318 //===----------------------------------------------------------------------===// 1319 // MachineInstr latency interface. 1320 //===----------------------------------------------------------------------===// 1321 1322 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 1323 const MachineInstr &MI) const { 1324 if (!ItinData || ItinData->isEmpty()) 1325 return 1; 1326 1327 unsigned Class = MI.getDesc().getSchedClass(); 1328 int UOps = ItinData->Itineraries[Class].NumMicroOps; 1329 if (UOps >= 0) 1330 return UOps; 1331 1332 // The # of u-ops is dynamically determined. The specific target should 1333 // override this function to return the right number. 1334 return 1; 1335 } 1336 1337 /// Return the default expected latency for a def based on it's opcode. 1338 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel, 1339 const MachineInstr &DefMI) const { 1340 if (DefMI.isTransient()) 1341 return 0; 1342 if (DefMI.mayLoad()) 1343 return SchedModel.LoadLatency; 1344 if (isHighLatencyDef(DefMI.getOpcode())) 1345 return SchedModel.HighLatency; 1346 return 1; 1347 } 1348 1349 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const { 1350 return 0; 1351 } 1352 1353 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1354 const MachineInstr &MI, 1355 unsigned *PredCost) const { 1356 // Default to one cycle for no itinerary. However, an "empty" itinerary may 1357 // still have a MinLatency property, which getStageLatency checks. 1358 if (!ItinData) 1359 return MI.mayLoad() ? 2 : 1; 1360 1361 return ItinData->getStageLatency(MI.getDesc().getSchedClass()); 1362 } 1363 1364 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, 1365 const MachineInstr &DefMI, 1366 unsigned DefIdx) const { 1367 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); 1368 if (!ItinData || ItinData->isEmpty()) 1369 return false; 1370 1371 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1372 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 1373 return (DefCycle != -1 && DefCycle <= 1); 1374 } 1375 1376 std::optional<ParamLoadedValue> 1377 TargetInstrInfo::describeLoadedValue(const MachineInstr &MI, 1378 Register Reg) const { 1379 const MachineFunction *MF = MI.getMF(); 1380 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 1381 DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {}); 1382 int64_t Offset; 1383 bool OffsetIsScalable; 1384 1385 // To simplify the sub-register handling, verify that we only need to 1386 // consider physical registers. 1387 assert(MF->getProperties().hasProperty( 1388 MachineFunctionProperties::Property::NoVRegs)); 1389 1390 if (auto DestSrc = isCopyInstr(MI)) { 1391 Register DestReg = DestSrc->Destination->getReg(); 1392 1393 // If the copy destination is the forwarding reg, describe the forwarding 1394 // reg using the copy source as the backup location. Example: 1395 // 1396 // x0 = MOV x7 1397 // call callee(x0) ; x0 described as x7 1398 if (Reg == DestReg) 1399 return ParamLoadedValue(*DestSrc->Source, Expr); 1400 1401 // If the target's hook couldn't describe this copy, give up. 1402 return std::nullopt; 1403 } else if (auto RegImm = isAddImmediate(MI, Reg)) { 1404 Register SrcReg = RegImm->Reg; 1405 Offset = RegImm->Imm; 1406 Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset); 1407 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr); 1408 } else if (MI.hasOneMemOperand()) { 1409 // Only describe memory which provably does not escape the function. As 1410 // described in llvm.org/PR43343, escaped memory may be clobbered by the 1411 // callee (or by another thread). 1412 const auto &TII = MF->getSubtarget().getInstrInfo(); 1413 const MachineFrameInfo &MFI = MF->getFrameInfo(); 1414 const MachineMemOperand *MMO = MI.memoperands()[0]; 1415 const PseudoSourceValue *PSV = MMO->getPseudoValue(); 1416 1417 // If the address points to "special" memory (e.g. a spill slot), it's 1418 // sufficient to check that it isn't aliased by any high-level IR value. 1419 if (!PSV || PSV->mayAlias(&MFI)) 1420 return std::nullopt; 1421 1422 const MachineOperand *BaseOp; 1423 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, 1424 TRI)) 1425 return std::nullopt; 1426 1427 // FIXME: Scalable offsets are not yet handled in the offset code below. 1428 if (OffsetIsScalable) 1429 return std::nullopt; 1430 1431 // TODO: Can currently only handle mem instructions with a single define. 1432 // An example from the x86 target: 1433 // ... 1434 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx 1435 // ... 1436 // 1437 if (MI.getNumExplicitDefs() != 1) 1438 return std::nullopt; 1439 1440 // TODO: In what way do we need to take Reg into consideration here? 1441 1442 SmallVector<uint64_t, 8> Ops; 1443 DIExpression::appendOffset(Ops, Offset); 1444 Ops.push_back(dwarf::DW_OP_deref_size); 1445 Ops.push_back(MMO->getSize()); 1446 Expr = DIExpression::prependOpcodes(Expr, Ops); 1447 return ParamLoadedValue(*BaseOp, Expr); 1448 } 1449 1450 return std::nullopt; 1451 } 1452 1453 /// Both DefMI and UseMI must be valid. By default, call directly to the 1454 /// itinerary. This may be overriden by the target. 1455 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 1456 const MachineInstr &DefMI, 1457 unsigned DefIdx, 1458 const MachineInstr &UseMI, 1459 unsigned UseIdx) const { 1460 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1461 unsigned UseClass = UseMI.getDesc().getSchedClass(); 1462 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 1463 } 1464 1465 bool TargetInstrInfo::getRegSequenceInputs( 1466 const MachineInstr &MI, unsigned DefIdx, 1467 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 1468 assert((MI.isRegSequence() || 1469 MI.isRegSequenceLike()) && "Instruction do not have the proper type"); 1470 1471 if (!MI.isRegSequence()) 1472 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs); 1473 1474 // We are looking at: 1475 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ... 1476 assert(DefIdx == 0 && "REG_SEQUENCE only has one def"); 1477 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx; 1478 OpIdx += 2) { 1479 const MachineOperand &MOReg = MI.getOperand(OpIdx); 1480 if (MOReg.isUndef()) 1481 continue; 1482 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1); 1483 assert(MOSubIdx.isImm() && 1484 "One of the subindex of the reg_sequence is not an immediate"); 1485 // Record Reg:SubReg, SubIdx. 1486 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(), 1487 (unsigned)MOSubIdx.getImm())); 1488 } 1489 return true; 1490 } 1491 1492 bool TargetInstrInfo::getExtractSubregInputs( 1493 const MachineInstr &MI, unsigned DefIdx, 1494 RegSubRegPairAndIdx &InputReg) const { 1495 assert((MI.isExtractSubreg() || 1496 MI.isExtractSubregLike()) && "Instruction do not have the proper type"); 1497 1498 if (!MI.isExtractSubreg()) 1499 return getExtractSubregLikeInputs(MI, DefIdx, InputReg); 1500 1501 // We are looking at: 1502 // Def = EXTRACT_SUBREG v0.sub1, sub0. 1503 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def"); 1504 const MachineOperand &MOReg = MI.getOperand(1); 1505 if (MOReg.isUndef()) 1506 return false; 1507 const MachineOperand &MOSubIdx = MI.getOperand(2); 1508 assert(MOSubIdx.isImm() && 1509 "The subindex of the extract_subreg is not an immediate"); 1510 1511 InputReg.Reg = MOReg.getReg(); 1512 InputReg.SubReg = MOReg.getSubReg(); 1513 InputReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1514 return true; 1515 } 1516 1517 bool TargetInstrInfo::getInsertSubregInputs( 1518 const MachineInstr &MI, unsigned DefIdx, 1519 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const { 1520 assert((MI.isInsertSubreg() || 1521 MI.isInsertSubregLike()) && "Instruction do not have the proper type"); 1522 1523 if (!MI.isInsertSubreg()) 1524 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg); 1525 1526 // We are looking at: 1527 // Def = INSERT_SEQUENCE v0, v1, sub0. 1528 assert(DefIdx == 0 && "INSERT_SUBREG only has one def"); 1529 const MachineOperand &MOBaseReg = MI.getOperand(1); 1530 const MachineOperand &MOInsertedReg = MI.getOperand(2); 1531 if (MOInsertedReg.isUndef()) 1532 return false; 1533 const MachineOperand &MOSubIdx = MI.getOperand(3); 1534 assert(MOSubIdx.isImm() && 1535 "One of the subindex of the reg_sequence is not an immediate"); 1536 BaseReg.Reg = MOBaseReg.getReg(); 1537 BaseReg.SubReg = MOBaseReg.getSubReg(); 1538 1539 InsertedReg.Reg = MOInsertedReg.getReg(); 1540 InsertedReg.SubReg = MOInsertedReg.getSubReg(); 1541 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1542 return true; 1543 } 1544 1545 // Returns a MIRPrinter comment for this machine operand. 1546 std::string TargetInstrInfo::createMIROperandComment( 1547 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, 1548 const TargetRegisterInfo *TRI) const { 1549 1550 if (!MI.isInlineAsm()) 1551 return ""; 1552 1553 std::string Flags; 1554 raw_string_ostream OS(Flags); 1555 1556 if (OpIdx == InlineAsm::MIOp_ExtraInfo) { 1557 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack 1558 unsigned ExtraInfo = Op.getImm(); 1559 bool First = true; 1560 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) { 1561 if (!First) 1562 OS << " "; 1563 First = false; 1564 OS << Info; 1565 } 1566 1567 return OS.str(); 1568 } 1569 1570 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx); 1571 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx) 1572 return ""; 1573 1574 assert(Op.isImm() && "Expected flag operand to be an immediate"); 1575 // Pretty print the inline asm operand descriptor. 1576 unsigned Flag = Op.getImm(); 1577 unsigned Kind = InlineAsm::getKind(Flag); 1578 OS << InlineAsm::getKindName(Kind); 1579 1580 unsigned RCID = 0; 1581 if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) && 1582 InlineAsm::hasRegClassConstraint(Flag, RCID)) { 1583 if (TRI) { 1584 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID)); 1585 } else 1586 OS << ":RC" << RCID; 1587 } 1588 1589 if (InlineAsm::isMemKind(Flag)) { 1590 unsigned MCID = InlineAsm::getMemoryConstraintID(Flag); 1591 OS << ":" << InlineAsm::getMemConstraintName(MCID); 1592 } 1593 1594 unsigned TiedTo = 0; 1595 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo)) 1596 OS << " tiedto:$" << TiedTo; 1597 1598 return OS.str(); 1599 } 1600 1601 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() = default; 1602 1603 void TargetInstrInfo::mergeOutliningCandidateAttributes( 1604 Function &F, std::vector<outliner::Candidate> &Candidates) const { 1605 // Include target features from an arbitrary candidate for the outlined 1606 // function. This makes sure the outlined function knows what kinds of 1607 // instructions are going into it. This is fine, since all parent functions 1608 // must necessarily support the instructions that are in the outlined region. 1609 outliner::Candidate &FirstCand = Candidates.front(); 1610 const Function &ParentFn = FirstCand.getMF()->getFunction(); 1611 if (ParentFn.hasFnAttribute("target-features")) 1612 F.addFnAttr(ParentFn.getFnAttribute("target-features")); 1613 if (ParentFn.hasFnAttribute("target-cpu")) 1614 F.addFnAttr(ParentFn.getFnAttribute("target-cpu")); 1615 1616 // Set nounwind, so we don't generate eh_frame. 1617 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) { 1618 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind); 1619 })) 1620 F.addFnAttr(Attribute::NoUnwind); 1621 } 1622 1623 outliner::InstrType TargetInstrInfo::getOutliningType( 1624 MachineBasicBlock::iterator &MIT, unsigned Flags) const { 1625 MachineInstr &MI = *MIT; 1626 1627 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets 1628 // have support for outlining those. Special-case that here. 1629 if (MI.isCFIInstruction()) 1630 // Just go right to the target implementation. 1631 return getOutliningTypeImpl(MIT, Flags); 1632 1633 // Be conservative about inline assembly. 1634 if (MI.isInlineAsm()) 1635 return outliner::InstrType::Illegal; 1636 1637 // Labels generally can't safely be outlined. 1638 if (MI.isLabel()) 1639 return outliner::InstrType::Illegal; 1640 1641 // Don't let debug instructions impact analysis. 1642 if (MI.isDebugInstr()) 1643 return outliner::InstrType::Invisible; 1644 1645 // Some other special cases. 1646 switch (MI.getOpcode()) { 1647 case TargetOpcode::IMPLICIT_DEF: 1648 case TargetOpcode::KILL: 1649 case TargetOpcode::LIFETIME_START: 1650 case TargetOpcode::LIFETIME_END: 1651 return outliner::InstrType::Invisible; 1652 default: 1653 break; 1654 } 1655 1656 // Is this a terminator for a basic block? 1657 if (MI.isTerminator()) { 1658 // If this is a branch to another block, we can't outline it. 1659 if (!MI.getParent()->succ_empty()) 1660 return outliner::InstrType::Illegal; 1661 1662 // Don't outline if the branch is not unconditional. 1663 if (isPredicated(MI)) 1664 return outliner::InstrType::Illegal; 1665 } 1666 1667 // Make sure none of the operands of this instruction do anything that 1668 // might break if they're moved outside their current function. 1669 // This includes MachineBasicBlock references, BlockAddressses, 1670 // Constant pool indices and jump table indices. 1671 // 1672 // A quick note on MO_TargetIndex: 1673 // This doesn't seem to be used in any of the architectures that the 1674 // MachineOutliner supports, but it was still filtered out in all of them. 1675 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there. 1676 // As such, this check is removed both here and in the target-specific 1677 // implementations. Instead, we assert to make sure this doesn't 1678 // catch anyone off-guard somewhere down the line. 1679 for (const MachineOperand &MOP : MI.operands()) { 1680 // If you hit this assertion, please remove it and adjust 1681 // `getOutliningTypeImpl` for your target appropriately if necessary. 1682 // Adding the assertion back to other supported architectures 1683 // would be nice too :) 1684 assert(!MOP.isTargetIndex() && "This isn't used quite yet!"); 1685 1686 // CFI instructions should already have been filtered out at this point. 1687 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!"); 1688 1689 // PrologEpilogInserter should've already run at this point. 1690 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!"); 1691 1692 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI()) 1693 return outliner::InstrType::Illegal; 1694 } 1695 1696 // If we don't know, delegate to the target-specific hook. 1697 return getOutliningTypeImpl(MIT, Flags); 1698 } 1699 1700 bool TargetInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, 1701 unsigned &Flags) const { 1702 // Some instrumentations create special TargetOpcode at the start which 1703 // expands to special code sequences which must be present. 1704 auto First = MBB.getFirstNonDebugInstr(); 1705 if (First == MBB.end()) 1706 return true; 1707 1708 if (First->getOpcode() == TargetOpcode::FENTRY_CALL || 1709 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER) 1710 return false; 1711 1712 // Some instrumentations create special pseudo-instructions at or just before 1713 // the end that must be present. 1714 auto Last = MBB.getLastNonDebugInstr(); 1715 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET || 1716 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL) 1717 return false; 1718 1719 if (Last != First && Last->isReturn()) { 1720 --Last; 1721 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT || 1722 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL) 1723 return false; 1724 } 1725 return true; 1726 } 1727