1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetInstrInfo.h" 14 #include "llvm/ADT/StringExtras.h" 15 #include "llvm/CodeGen/MachineFrameInfo.h" 16 #include "llvm/CodeGen/MachineInstrBuilder.h" 17 #include "llvm/CodeGen/MachineMemOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/MachineScheduler.h" 20 #include "llvm/CodeGen/PseudoSourceValue.h" 21 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 22 #include "llvm/CodeGen/StackMaps.h" 23 #include "llvm/CodeGen/TargetFrameLowering.h" 24 #include "llvm/CodeGen/TargetLowering.h" 25 #include "llvm/CodeGen/TargetRegisterInfo.h" 26 #include "llvm/CodeGen/TargetSchedule.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/DebugInfoMetadata.h" 29 #include "llvm/MC/MCAsmInfo.h" 30 #include "llvm/MC/MCInstrItineraries.h" 31 #include "llvm/Support/CommandLine.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/raw_ostream.h" 34 #include "llvm/Target/TargetMachine.h" 35 #include <cctype> 36 37 using namespace llvm; 38 39 static cl::opt<bool> DisableHazardRecognizer( 40 "disable-sched-hazard", cl::Hidden, cl::init(false), 41 cl::desc("Disable hazard detection during preRA scheduling")); 42 43 TargetInstrInfo::~TargetInstrInfo() { 44 } 45 46 const TargetRegisterClass* 47 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum, 48 const TargetRegisterInfo *TRI, 49 const MachineFunction &MF) const { 50 if (OpNum >= MCID.getNumOperands()) 51 return nullptr; 52 53 short RegClass = MCID.OpInfo[OpNum].RegClass; 54 if (MCID.OpInfo[OpNum].isLookupPtrRegClass()) 55 return TRI->getPointerRegClass(MF, RegClass); 56 57 // Instructions like INSERT_SUBREG do not have fixed register classes. 58 if (RegClass < 0) 59 return nullptr; 60 61 // Otherwise just look it up normally. 62 return TRI->getRegClass(RegClass); 63 } 64 65 /// insertNoop - Insert a noop into the instruction stream at the specified 66 /// point. 67 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB, 68 MachineBasicBlock::iterator MI) const { 69 llvm_unreachable("Target didn't implement insertNoop!"); 70 } 71 72 /// insertNoops - Insert noops into the instruction stream at the specified 73 /// point. 74 void TargetInstrInfo::insertNoops(MachineBasicBlock &MBB, 75 MachineBasicBlock::iterator MI, 76 unsigned Quantity) const { 77 for (unsigned i = 0; i < Quantity; ++i) 78 insertNoop(MBB, MI); 79 } 80 81 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) { 82 return strncmp(Str, MAI.getCommentString().data(), 83 MAI.getCommentString().size()) == 0; 84 } 85 86 /// Measure the specified inline asm to determine an approximation of its 87 /// length. 88 /// Comments (which run till the next SeparatorString or newline) do not 89 /// count as an instruction. 90 /// Any other non-whitespace text is considered an instruction, with 91 /// multiple instructions separated by SeparatorString or newlines. 92 /// Variable-length instructions are not handled here; this function 93 /// may be overloaded in the target code to do that. 94 /// We implement a special case of the .space directive which takes only a 95 /// single integer argument in base 10 that is the size in bytes. This is a 96 /// restricted form of the GAS directive in that we only interpret 97 /// simple--i.e. not a logical or arithmetic expression--size values without 98 /// the optional fill value. This is primarily used for creating arbitrary 99 /// sized inline asm blocks for testing purposes. 100 unsigned TargetInstrInfo::getInlineAsmLength( 101 const char *Str, 102 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const { 103 // Count the number of instructions in the asm. 104 bool AtInsnStart = true; 105 unsigned Length = 0; 106 const unsigned MaxInstLength = MAI.getMaxInstLength(STI); 107 for (; *Str; ++Str) { 108 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(), 109 strlen(MAI.getSeparatorString())) == 0) { 110 AtInsnStart = true; 111 } else if (isAsmComment(Str, MAI)) { 112 // Stop counting as an instruction after a comment until the next 113 // separator. 114 AtInsnStart = false; 115 } 116 117 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) { 118 unsigned AddLength = MaxInstLength; 119 if (strncmp(Str, ".space", 6) == 0) { 120 char *EStr; 121 int SpaceSize; 122 SpaceSize = strtol(Str + 6, &EStr, 10); 123 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize; 124 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr))) 125 ++EStr; 126 if (*EStr == '\0' || *EStr == '\n' || 127 isAsmComment(EStr, MAI)) // Successfully parsed .space argument 128 AddLength = SpaceSize; 129 } 130 Length += AddLength; 131 AtInsnStart = false; 132 } 133 } 134 135 return Length; 136 } 137 138 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything 139 /// after it, replacing it with an unconditional branch to NewDest. 140 void 141 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 142 MachineBasicBlock *NewDest) const { 143 MachineBasicBlock *MBB = Tail->getParent(); 144 145 // Remove all the old successors of MBB from the CFG. 146 while (!MBB->succ_empty()) 147 MBB->removeSuccessor(MBB->succ_begin()); 148 149 // Save off the debug loc before erasing the instruction. 150 DebugLoc DL = Tail->getDebugLoc(); 151 152 // Update call site info and remove all the dead instructions 153 // from the end of MBB. 154 while (Tail != MBB->end()) { 155 auto MI = Tail++; 156 if (MI->shouldUpdateCallSiteInfo()) 157 MBB->getParent()->eraseCallSiteInfo(&*MI); 158 MBB->erase(MI); 159 } 160 161 // If MBB isn't immediately before MBB, insert a branch to it. 162 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest)) 163 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL); 164 MBB->addSuccessor(NewDest); 165 } 166 167 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI, 168 bool NewMI, unsigned Idx1, 169 unsigned Idx2) const { 170 const MCInstrDesc &MCID = MI.getDesc(); 171 bool HasDef = MCID.getNumDefs(); 172 if (HasDef && !MI.getOperand(0).isReg()) 173 // No idea how to commute this instruction. Target should implement its own. 174 return nullptr; 175 176 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1; 177 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2; 178 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && 179 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && 180 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."); 181 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && 182 "This only knows how to commute register operands so far"); 183 184 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register(); 185 Register Reg1 = MI.getOperand(Idx1).getReg(); 186 Register Reg2 = MI.getOperand(Idx2).getReg(); 187 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0; 188 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg(); 189 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg(); 190 bool Reg1IsKill = MI.getOperand(Idx1).isKill(); 191 bool Reg2IsKill = MI.getOperand(Idx2).isKill(); 192 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef(); 193 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef(); 194 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead(); 195 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead(); 196 // Avoid calling isRenamable for virtual registers since we assert that 197 // renamable property is only queried/set for physical registers. 198 bool Reg1IsRenamable = Register::isPhysicalRegister(Reg1) 199 ? MI.getOperand(Idx1).isRenamable() 200 : false; 201 bool Reg2IsRenamable = Register::isPhysicalRegister(Reg2) 202 ? MI.getOperand(Idx2).isRenamable() 203 : false; 204 // If destination is tied to either of the commuted source register, then 205 // it must be updated. 206 if (HasDef && Reg0 == Reg1 && 207 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { 208 Reg2IsKill = false; 209 Reg0 = Reg2; 210 SubReg0 = SubReg2; 211 } else if (HasDef && Reg0 == Reg2 && 212 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { 213 Reg1IsKill = false; 214 Reg0 = Reg1; 215 SubReg0 = SubReg1; 216 } 217 218 MachineInstr *CommutedMI = nullptr; 219 if (NewMI) { 220 // Create a new instruction. 221 MachineFunction &MF = *MI.getMF(); 222 CommutedMI = MF.CloneMachineInstr(&MI); 223 } else { 224 CommutedMI = &MI; 225 } 226 227 if (HasDef) { 228 CommutedMI->getOperand(0).setReg(Reg0); 229 CommutedMI->getOperand(0).setSubReg(SubReg0); 230 } 231 CommutedMI->getOperand(Idx2).setReg(Reg1); 232 CommutedMI->getOperand(Idx1).setReg(Reg2); 233 CommutedMI->getOperand(Idx2).setSubReg(SubReg1); 234 CommutedMI->getOperand(Idx1).setSubReg(SubReg2); 235 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill); 236 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill); 237 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef); 238 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef); 239 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); 240 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); 241 // Avoid calling setIsRenamable for virtual registers since we assert that 242 // renamable property is only queried/set for physical registers. 243 if (Register::isPhysicalRegister(Reg1)) 244 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable); 245 if (Register::isPhysicalRegister(Reg2)) 246 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable); 247 return CommutedMI; 248 } 249 250 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI, 251 unsigned OpIdx1, 252 unsigned OpIdx2) const { 253 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose 254 // any commutable operand, which is done in findCommutedOpIndices() method 255 // called below. 256 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) && 257 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) { 258 assert(MI.isCommutable() && 259 "Precondition violation: MI must be commutable."); 260 return nullptr; 261 } 262 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 263 } 264 265 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1, 266 unsigned &ResultIdx2, 267 unsigned CommutableOpIdx1, 268 unsigned CommutableOpIdx2) { 269 if (ResultIdx1 == CommuteAnyOperandIndex && 270 ResultIdx2 == CommuteAnyOperandIndex) { 271 ResultIdx1 = CommutableOpIdx1; 272 ResultIdx2 = CommutableOpIdx2; 273 } else if (ResultIdx1 == CommuteAnyOperandIndex) { 274 if (ResultIdx2 == CommutableOpIdx1) 275 ResultIdx1 = CommutableOpIdx2; 276 else if (ResultIdx2 == CommutableOpIdx2) 277 ResultIdx1 = CommutableOpIdx1; 278 else 279 return false; 280 } else if (ResultIdx2 == CommuteAnyOperandIndex) { 281 if (ResultIdx1 == CommutableOpIdx1) 282 ResultIdx2 = CommutableOpIdx2; 283 else if (ResultIdx1 == CommutableOpIdx2) 284 ResultIdx2 = CommutableOpIdx1; 285 else 286 return false; 287 } else 288 // Check that the result operand indices match the given commutable 289 // operand indices. 290 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) || 291 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1); 292 293 return true; 294 } 295 296 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 297 unsigned &SrcOpIdx1, 298 unsigned &SrcOpIdx2) const { 299 assert(!MI.isBundle() && 300 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); 301 302 const MCInstrDesc &MCID = MI.getDesc(); 303 if (!MCID.isCommutable()) 304 return false; 305 306 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this 307 // is not true, then the target must implement this. 308 unsigned CommutableOpIdx1 = MCID.getNumDefs(); 309 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1; 310 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 311 CommutableOpIdx1, CommutableOpIdx2)) 312 return false; 313 314 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg()) 315 // No idea. 316 return false; 317 return true; 318 } 319 320 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { 321 if (!MI.isTerminator()) return false; 322 323 // Conditional branch is a special case. 324 if (MI.isBranch() && !MI.isBarrier()) 325 return true; 326 if (!MI.isPredicable()) 327 return true; 328 return !isPredicated(MI); 329 } 330 331 bool TargetInstrInfo::PredicateInstruction( 332 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { 333 bool MadeChange = false; 334 335 assert(!MI.isBundle() && 336 "TargetInstrInfo::PredicateInstruction() can't handle bundles"); 337 338 const MCInstrDesc &MCID = MI.getDesc(); 339 if (!MI.isPredicable()) 340 return false; 341 342 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) { 343 if (MCID.OpInfo[i].isPredicate()) { 344 MachineOperand &MO = MI.getOperand(i); 345 if (MO.isReg()) { 346 MO.setReg(Pred[j].getReg()); 347 MadeChange = true; 348 } else if (MO.isImm()) { 349 MO.setImm(Pred[j].getImm()); 350 MadeChange = true; 351 } else if (MO.isMBB()) { 352 MO.setMBB(Pred[j].getMBB()); 353 MadeChange = true; 354 } 355 ++j; 356 } 357 } 358 return MadeChange; 359 } 360 361 bool TargetInstrInfo::hasLoadFromStackSlot( 362 const MachineInstr &MI, 363 SmallVectorImpl<const MachineMemOperand *> &Accesses) const { 364 size_t StartSize = Accesses.size(); 365 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 366 oe = MI.memoperands_end(); 367 o != oe; ++o) { 368 if ((*o)->isLoad() && 369 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) 370 Accesses.push_back(*o); 371 } 372 return Accesses.size() != StartSize; 373 } 374 375 bool TargetInstrInfo::hasStoreToStackSlot( 376 const MachineInstr &MI, 377 SmallVectorImpl<const MachineMemOperand *> &Accesses) const { 378 size_t StartSize = Accesses.size(); 379 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 380 oe = MI.memoperands_end(); 381 o != oe; ++o) { 382 if ((*o)->isStore() && 383 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) 384 Accesses.push_back(*o); 385 } 386 return Accesses.size() != StartSize; 387 } 388 389 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, 390 unsigned SubIdx, unsigned &Size, 391 unsigned &Offset, 392 const MachineFunction &MF) const { 393 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 394 if (!SubIdx) { 395 Size = TRI->getSpillSize(*RC); 396 Offset = 0; 397 return true; 398 } 399 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx); 400 // Convert bit size to byte size. 401 if (BitSize % 8) 402 return false; 403 404 int BitOffset = TRI->getSubRegIdxOffset(SubIdx); 405 if (BitOffset < 0 || BitOffset % 8) 406 return false; 407 408 Size = BitSize / 8; 409 Offset = (unsigned)BitOffset / 8; 410 411 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range"); 412 413 if (!MF.getDataLayout().isLittleEndian()) { 414 Offset = TRI->getSpillSize(*RC) - (Offset + Size); 415 } 416 return true; 417 } 418 419 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, 420 MachineBasicBlock::iterator I, 421 Register DestReg, unsigned SubIdx, 422 const MachineInstr &Orig, 423 const TargetRegisterInfo &TRI) const { 424 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 425 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); 426 MBB.insert(I, MI); 427 } 428 429 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0, 430 const MachineInstr &MI1, 431 const MachineRegisterInfo *MRI) const { 432 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 433 } 434 435 MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB, 436 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const { 437 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated"); 438 MachineFunction &MF = *MBB.getParent(); 439 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig); 440 } 441 442 // If the COPY instruction in MI can be folded to a stack operation, return 443 // the register class to use. 444 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI, 445 unsigned FoldIdx) { 446 assert(MI.isCopy() && "MI must be a COPY instruction"); 447 if (MI.getNumOperands() != 2) 448 return nullptr; 449 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); 450 451 const MachineOperand &FoldOp = MI.getOperand(FoldIdx); 452 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx); 453 454 if (FoldOp.getSubReg() || LiveOp.getSubReg()) 455 return nullptr; 456 457 Register FoldReg = FoldOp.getReg(); 458 Register LiveReg = LiveOp.getReg(); 459 460 assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs"); 461 462 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); 463 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); 464 465 if (Register::isPhysicalRegister(LiveOp.getReg())) 466 return RC->contains(LiveOp.getReg()) ? RC : nullptr; 467 468 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg))) 469 return RC; 470 471 // FIXME: Allow folding when register classes are memory compatible. 472 return nullptr; 473 } 474 475 MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); } 476 477 std::pair<unsigned, unsigned> 478 TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr &MI) const { 479 switch (MI.getOpcode()) { 480 case TargetOpcode::STACKMAP: 481 // StackMapLiveValues are foldable 482 return std::make_pair(0, StackMapOpers(&MI).getVarIdx()); 483 case TargetOpcode::PATCHPOINT: 484 // For PatchPoint, the call args are not foldable (even if reported in the 485 // stackmap e.g. via anyregcc). 486 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx()); 487 case TargetOpcode::STATEPOINT: 488 // For statepoints, fold deopt and gc arguments, but not call arguments. 489 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx()); 490 default: 491 llvm_unreachable("unexpected stackmap opcode"); 492 } 493 } 494 495 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, 496 ArrayRef<unsigned> Ops, int FrameIndex, 497 const TargetInstrInfo &TII) { 498 unsigned StartIdx = 0; 499 unsigned NumDefs = 0; 500 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint. 501 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI); 502 503 unsigned DefToFoldIdx = MI.getNumOperands(); 504 505 // Return false if any operands requested for folding are not foldable (not 506 // part of the stackmap's live values). 507 for (unsigned Op : Ops) { 508 if (Op < NumDefs) { 509 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs"); 510 DefToFoldIdx = Op; 511 } else if (Op < StartIdx) { 512 return nullptr; 513 } 514 if (MI.getOperand(Op).isTied()) 515 return nullptr; 516 } 517 518 MachineInstr *NewMI = 519 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true); 520 MachineInstrBuilder MIB(MF, NewMI); 521 522 // No need to fold return, the meta data, and function arguments 523 for (unsigned i = 0; i < StartIdx; ++i) 524 if (i != DefToFoldIdx) 525 MIB.add(MI.getOperand(i)); 526 527 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) { 528 MachineOperand &MO = MI.getOperand(i); 529 unsigned TiedTo = e; 530 (void)MI.isRegTiedToDefOperand(i, &TiedTo); 531 532 if (is_contained(Ops, i)) { 533 assert(TiedTo == e && "Cannot fold tied operands"); 534 unsigned SpillSize; 535 unsigned SpillOffset; 536 // Compute the spill slot size and offset. 537 const TargetRegisterClass *RC = 538 MF.getRegInfo().getRegClass(MO.getReg()); 539 bool Valid = 540 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF); 541 if (!Valid) 542 report_fatal_error("cannot spill patchpoint subregister operand"); 543 MIB.addImm(StackMaps::IndirectMemRefOp); 544 MIB.addImm(SpillSize); 545 MIB.addFrameIndex(FrameIndex); 546 MIB.addImm(SpillOffset); 547 } else { 548 MIB.add(MO); 549 if (TiedTo < e) { 550 assert(TiedTo < NumDefs && "Bad tied operand"); 551 if (TiedTo > DefToFoldIdx) 552 --TiedTo; 553 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1); 554 } 555 } 556 } 557 return NewMI; 558 } 559 560 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 561 ArrayRef<unsigned> Ops, int FI, 562 LiveIntervals *LIS, 563 VirtRegMap *VRM) const { 564 auto Flags = MachineMemOperand::MONone; 565 for (unsigned OpIdx : Ops) 566 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore 567 : MachineMemOperand::MOLoad; 568 569 MachineBasicBlock *MBB = MI.getParent(); 570 assert(MBB && "foldMemoryOperand needs an inserted instruction"); 571 MachineFunction &MF = *MBB->getParent(); 572 573 // If we're not folding a load into a subreg, the size of the load is the 574 // size of the spill slot. But if we are, we need to figure out what the 575 // actual load size is. 576 int64_t MemSize = 0; 577 const MachineFrameInfo &MFI = MF.getFrameInfo(); 578 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 579 580 if (Flags & MachineMemOperand::MOStore) { 581 MemSize = MFI.getObjectSize(FI); 582 } else { 583 for (unsigned OpIdx : Ops) { 584 int64_t OpSize = MFI.getObjectSize(FI); 585 586 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) { 587 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg); 588 if (SubRegSize > 0 && !(SubRegSize % 8)) 589 OpSize = SubRegSize / 8; 590 } 591 592 MemSize = std::max(MemSize, OpSize); 593 } 594 } 595 596 assert(MemSize && "Did not expect a zero-sized stack slot"); 597 598 MachineInstr *NewMI = nullptr; 599 600 if (MI.getOpcode() == TargetOpcode::STACKMAP || 601 MI.getOpcode() == TargetOpcode::PATCHPOINT || 602 MI.getOpcode() == TargetOpcode::STATEPOINT) { 603 // Fold stackmap/patchpoint. 604 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); 605 if (NewMI) 606 MBB->insert(MI, NewMI); 607 } else { 608 // Ask the target to do the actual folding. 609 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM); 610 } 611 612 if (NewMI) { 613 NewMI->setMemRefs(MF, MI.memoperands()); 614 // Add a memory operand, foldMemoryOperandImpl doesn't do that. 615 assert((!(Flags & MachineMemOperand::MOStore) || 616 NewMI->mayStore()) && 617 "Folded a def to a non-store!"); 618 assert((!(Flags & MachineMemOperand::MOLoad) || 619 NewMI->mayLoad()) && 620 "Folded a use to a non-load!"); 621 assert(MFI.getObjectOffset(FI) != -1); 622 MachineMemOperand *MMO = 623 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), 624 Flags, MemSize, MFI.getObjectAlign(FI)); 625 NewMI->addMemOperand(MF, MMO); 626 627 // The pass "x86 speculative load hardening" always attaches symbols to 628 // call instructions. We need copy it form old instruction. 629 NewMI->cloneInstrSymbols(MF, MI); 630 631 return NewMI; 632 } 633 634 // Straight COPY may fold as load/store. 635 if (!MI.isCopy() || Ops.size() != 1) 636 return nullptr; 637 638 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); 639 if (!RC) 640 return nullptr; 641 642 const MachineOperand &MO = MI.getOperand(1 - Ops[0]); 643 MachineBasicBlock::iterator Pos = MI; 644 645 if (Flags == MachineMemOperand::MOStore) 646 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); 647 else 648 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI); 649 return &*--Pos; 650 } 651 652 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 653 ArrayRef<unsigned> Ops, 654 MachineInstr &LoadMI, 655 LiveIntervals *LIS) const { 656 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); 657 #ifndef NDEBUG 658 for (unsigned OpIdx : Ops) 659 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!"); 660 #endif 661 662 MachineBasicBlock &MBB = *MI.getParent(); 663 MachineFunction &MF = *MBB.getParent(); 664 665 // Ask the target to do the actual folding. 666 MachineInstr *NewMI = nullptr; 667 int FrameIndex = 0; 668 669 if ((MI.getOpcode() == TargetOpcode::STACKMAP || 670 MI.getOpcode() == TargetOpcode::PATCHPOINT || 671 MI.getOpcode() == TargetOpcode::STATEPOINT) && 672 isLoadFromStackSlot(LoadMI, FrameIndex)) { 673 // Fold stackmap/patchpoint. 674 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); 675 if (NewMI) 676 NewMI = &*MBB.insert(MI, NewMI); 677 } else { 678 // Ask the target to do the actual folding. 679 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); 680 } 681 682 if (!NewMI) 683 return nullptr; 684 685 // Copy the memoperands from the load to the folded instruction. 686 if (MI.memoperands_empty()) { 687 NewMI->setMemRefs(MF, LoadMI.memoperands()); 688 } else { 689 // Handle the rare case of folding multiple loads. 690 NewMI->setMemRefs(MF, MI.memoperands()); 691 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), 692 E = LoadMI.memoperands_end(); 693 I != E; ++I) { 694 NewMI->addMemOperand(MF, *I); 695 } 696 } 697 return NewMI; 698 } 699 700 bool TargetInstrInfo::hasReassociableOperands( 701 const MachineInstr &Inst, const MachineBasicBlock *MBB) const { 702 const MachineOperand &Op1 = Inst.getOperand(1); 703 const MachineOperand &Op2 = Inst.getOperand(2); 704 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 705 706 // We need virtual register definitions for the operands that we will 707 // reassociate. 708 MachineInstr *MI1 = nullptr; 709 MachineInstr *MI2 = nullptr; 710 if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) 711 MI1 = MRI.getUniqueVRegDef(Op1.getReg()); 712 if (Op2.isReg() && Register::isVirtualRegister(Op2.getReg())) 713 MI2 = MRI.getUniqueVRegDef(Op2.getReg()); 714 715 // And they need to be in the trace (otherwise, they won't have a depth). 716 return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB; 717 } 718 719 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst, 720 bool &Commuted) const { 721 const MachineBasicBlock *MBB = Inst.getParent(); 722 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 723 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg()); 724 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg()); 725 unsigned AssocOpcode = Inst.getOpcode(); 726 727 // If only one operand has the same opcode and it's the second source operand, 728 // the operands must be commuted. 729 Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode; 730 if (Commuted) 731 std::swap(MI1, MI2); 732 733 // 1. The previous instruction must be the same type as Inst. 734 // 2. The previous instruction must also be associative/commutative (this can 735 // be different even for instructions with the same opcode if traits like 736 // fast-math-flags are included). 737 // 3. The previous instruction must have virtual register definitions for its 738 // operands in the same basic block as Inst. 739 // 4. The previous instruction's result must only be used by Inst. 740 return MI1->getOpcode() == AssocOpcode && isAssociativeAndCommutative(*MI1) && 741 hasReassociableOperands(*MI1, MBB) && 742 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg()); 743 } 744 745 // 1. The operation must be associative and commutative. 746 // 2. The instruction must have virtual register definitions for its 747 // operands in the same basic block. 748 // 3. The instruction must have a reassociable sibling. 749 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst, 750 bool &Commuted) const { 751 return isAssociativeAndCommutative(Inst) && 752 hasReassociableOperands(Inst, Inst.getParent()) && 753 hasReassociableSibling(Inst, Commuted); 754 } 755 756 // The concept of the reassociation pass is that these operations can benefit 757 // from this kind of transformation: 758 // 759 // A = ? op ? 760 // B = A op X (Prev) 761 // C = B op Y (Root) 762 // --> 763 // A = ? op ? 764 // B = X op Y 765 // C = A op B 766 // 767 // breaking the dependency between A and B, allowing them to be executed in 768 // parallel (or back-to-back in a pipeline) instead of depending on each other. 769 770 // FIXME: This has the potential to be expensive (compile time) while not 771 // improving the code at all. Some ways to limit the overhead: 772 // 1. Track successful transforms; bail out if hit rate gets too low. 773 // 2. Only enable at -O3 or some other non-default optimization level. 774 // 3. Pre-screen pattern candidates here: if an operand of the previous 775 // instruction is known to not increase the critical path, then don't match 776 // that pattern. 777 bool TargetInstrInfo::getMachineCombinerPatterns( 778 MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns, 779 bool DoRegPressureReduce) const { 780 bool Commute; 781 if (isReassociationCandidate(Root, Commute)) { 782 // We found a sequence of instructions that may be suitable for a 783 // reassociation of operands to increase ILP. Specify each commutation 784 // possibility for the Prev instruction in the sequence and let the 785 // machine combiner decide if changing the operands is worthwhile. 786 if (Commute) { 787 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB); 788 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB); 789 } else { 790 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY); 791 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY); 792 } 793 return true; 794 } 795 796 return false; 797 } 798 799 /// Return true when a code sequence can improve loop throughput. 800 bool 801 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const { 802 return false; 803 } 804 805 /// Attempt the reassociation transformation to reduce critical path length. 806 /// See the above comments before getMachineCombinerPatterns(). 807 void TargetInstrInfo::reassociateOps( 808 MachineInstr &Root, MachineInstr &Prev, 809 MachineCombinerPattern Pattern, 810 SmallVectorImpl<MachineInstr *> &InsInstrs, 811 SmallVectorImpl<MachineInstr *> &DelInstrs, 812 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const { 813 MachineFunction *MF = Root.getMF(); 814 MachineRegisterInfo &MRI = MF->getRegInfo(); 815 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 816 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 817 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI); 818 819 // This array encodes the operand index for each parameter because the 820 // operands may be commuted. Each row corresponds to a pattern value, 821 // and each column specifies the index of A, B, X, Y. 822 unsigned OpIdx[4][4] = { 823 { 1, 1, 2, 2 }, 824 { 1, 2, 2, 1 }, 825 { 2, 1, 1, 2 }, 826 { 2, 2, 1, 1 } 827 }; 828 829 int Row; 830 switch (Pattern) { 831 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break; 832 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break; 833 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break; 834 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break; 835 default: llvm_unreachable("unexpected MachineCombinerPattern"); 836 } 837 838 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]); 839 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]); 840 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]); 841 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]); 842 MachineOperand &OpC = Root.getOperand(0); 843 844 Register RegA = OpA.getReg(); 845 Register RegB = OpB.getReg(); 846 Register RegX = OpX.getReg(); 847 Register RegY = OpY.getReg(); 848 Register RegC = OpC.getReg(); 849 850 if (Register::isVirtualRegister(RegA)) 851 MRI.constrainRegClass(RegA, RC); 852 if (Register::isVirtualRegister(RegB)) 853 MRI.constrainRegClass(RegB, RC); 854 if (Register::isVirtualRegister(RegX)) 855 MRI.constrainRegClass(RegX, RC); 856 if (Register::isVirtualRegister(RegY)) 857 MRI.constrainRegClass(RegY, RC); 858 if (Register::isVirtualRegister(RegC)) 859 MRI.constrainRegClass(RegC, RC); 860 861 // Create a new virtual register for the result of (X op Y) instead of 862 // recycling RegB because the MachineCombiner's computation of the critical 863 // path requires a new register definition rather than an existing one. 864 Register NewVR = MRI.createVirtualRegister(RC); 865 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); 866 867 unsigned Opcode = Root.getOpcode(); 868 bool KillA = OpA.isKill(); 869 bool KillX = OpX.isKill(); 870 bool KillY = OpY.isKill(); 871 872 // Create new instructions for insertion. 873 MachineInstrBuilder MIB1 = 874 BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR) 875 .addReg(RegX, getKillRegState(KillX)) 876 .addReg(RegY, getKillRegState(KillY)); 877 MachineInstrBuilder MIB2 = 878 BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC) 879 .addReg(RegA, getKillRegState(KillA)) 880 .addReg(NewVR, getKillRegState(true)); 881 882 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2); 883 884 // Record new instructions for insertion and old instructions for deletion. 885 InsInstrs.push_back(MIB1); 886 InsInstrs.push_back(MIB2); 887 DelInstrs.push_back(&Prev); 888 DelInstrs.push_back(&Root); 889 } 890 891 void TargetInstrInfo::genAlternativeCodeSequence( 892 MachineInstr &Root, MachineCombinerPattern Pattern, 893 SmallVectorImpl<MachineInstr *> &InsInstrs, 894 SmallVectorImpl<MachineInstr *> &DelInstrs, 895 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const { 896 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo(); 897 898 // Select the previous instruction in the sequence based on the input pattern. 899 MachineInstr *Prev = nullptr; 900 switch (Pattern) { 901 case MachineCombinerPattern::REASSOC_AX_BY: 902 case MachineCombinerPattern::REASSOC_XA_BY: 903 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg()); 904 break; 905 case MachineCombinerPattern::REASSOC_AX_YB: 906 case MachineCombinerPattern::REASSOC_XA_YB: 907 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); 908 break; 909 default: 910 break; 911 } 912 913 assert(Prev && "Unknown pattern for machine combiner"); 914 915 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg); 916 } 917 918 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( 919 const MachineInstr &MI, AAResults *AA) const { 920 const MachineFunction &MF = *MI.getMF(); 921 const MachineRegisterInfo &MRI = MF.getRegInfo(); 922 923 // Remat clients assume operand 0 is the defined register. 924 if (!MI.getNumOperands() || !MI.getOperand(0).isReg()) 925 return false; 926 Register DefReg = MI.getOperand(0).getReg(); 927 928 // A sub-register definition can only be rematerialized if the instruction 929 // doesn't read the other parts of the register. Otherwise it is really a 930 // read-modify-write operation on the full virtual register which cannot be 931 // moved safely. 932 if (Register::isVirtualRegister(DefReg) && MI.getOperand(0).getSubReg() && 933 MI.readsVirtualRegister(DefReg)) 934 return false; 935 936 // A load from a fixed stack slot can be rematerialized. This may be 937 // redundant with subsequent checks, but it's target-independent, 938 // simple, and a common case. 939 int FrameIdx = 0; 940 if (isLoadFromStackSlot(MI, FrameIdx) && 941 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx)) 942 return true; 943 944 // Avoid instructions obviously unsafe for remat. 945 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() || 946 MI.hasUnmodeledSideEffects()) 947 return false; 948 949 // Don't remat inline asm. We have no idea how expensive it is 950 // even if it's side effect free. 951 if (MI.isInlineAsm()) 952 return false; 953 954 // Avoid instructions which load from potentially varying memory. 955 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA)) 956 return false; 957 958 // If any of the registers accessed are non-constant, conservatively assume 959 // the instruction is not rematerializable. 960 for (const MachineOperand &MO : MI.operands()) { 961 if (!MO.isReg()) continue; 962 Register Reg = MO.getReg(); 963 if (Reg == 0) 964 continue; 965 966 // Check for a well-behaved physical register. 967 if (Register::isPhysicalRegister(Reg)) { 968 if (MO.isUse()) { 969 // If the physreg has no defs anywhere, it's just an ambient register 970 // and we can freely move its uses. Alternatively, if it's allocatable, 971 // it could get allocated to something with a def during allocation. 972 if (!MRI.isConstantPhysReg(Reg)) 973 return false; 974 } else { 975 // A physreg def. We can't remat it. 976 return false; 977 } 978 continue; 979 } 980 981 // Only allow one virtual-register def. There may be multiple defs of the 982 // same virtual register, though. 983 if (MO.isDef() && Reg != DefReg) 984 return false; 985 986 // Don't allow any virtual-register uses. Rematting an instruction with 987 // virtual register uses would length the live ranges of the uses, which 988 // is not necessarily a good idea, certainly not "trivial". 989 if (MO.isUse()) 990 return false; 991 } 992 993 // Everything checked out. 994 return true; 995 } 996 997 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const { 998 const MachineFunction *MF = MI.getMF(); 999 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 1000 bool StackGrowsDown = 1001 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 1002 1003 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 1004 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 1005 1006 if (!isFrameInstr(MI)) 1007 return 0; 1008 1009 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI)); 1010 1011 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) || 1012 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode)) 1013 SPAdj = -SPAdj; 1014 1015 return SPAdj; 1016 } 1017 1018 /// isSchedulingBoundary - Test if the given instruction should be 1019 /// considered a scheduling boundary. This primarily includes labels 1020 /// and terminators. 1021 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 1022 const MachineBasicBlock *MBB, 1023 const MachineFunction &MF) const { 1024 // Terminators and labels can't be scheduled around. 1025 if (MI.isTerminator() || MI.isPosition()) 1026 return true; 1027 1028 // INLINEASM_BR can jump to another block 1029 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 1030 return true; 1031 1032 // Don't attempt to schedule around any instruction that defines 1033 // a stack-oriented pointer, as it's unlikely to be profitable. This 1034 // saves compile time, because it doesn't require every single 1035 // stack slot reference to depend on the instruction that does the 1036 // modification. 1037 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); 1038 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 1039 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); 1040 } 1041 1042 // Provide a global flag for disabling the PreRA hazard recognizer that targets 1043 // may choose to honor. 1044 bool TargetInstrInfo::usePreRAHazardRecognizer() const { 1045 return !DisableHazardRecognizer; 1046 } 1047 1048 // Default implementation of CreateTargetRAHazardRecognizer. 1049 ScheduleHazardRecognizer *TargetInstrInfo:: 1050 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 1051 const ScheduleDAG *DAG) const { 1052 // Dummy hazard recognizer allows all instructions to issue. 1053 return new ScheduleHazardRecognizer(); 1054 } 1055 1056 // Default implementation of CreateTargetMIHazardRecognizer. 1057 ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer( 1058 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const { 1059 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler"); 1060 } 1061 1062 // Default implementation of CreateTargetPostRAHazardRecognizer. 1063 ScheduleHazardRecognizer *TargetInstrInfo:: 1064 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 1065 const ScheduleDAG *DAG) const { 1066 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched"); 1067 } 1068 1069 // Default implementation of getMemOperandWithOffset. 1070 bool TargetInstrInfo::getMemOperandWithOffset( 1071 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, 1072 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const { 1073 SmallVector<const MachineOperand *, 4> BaseOps; 1074 unsigned Width; 1075 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable, 1076 Width, TRI) || 1077 BaseOps.size() != 1) 1078 return false; 1079 BaseOp = BaseOps.front(); 1080 return true; 1081 } 1082 1083 //===----------------------------------------------------------------------===// 1084 // SelectionDAG latency interface. 1085 //===----------------------------------------------------------------------===// 1086 1087 int 1088 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 1089 SDNode *DefNode, unsigned DefIdx, 1090 SDNode *UseNode, unsigned UseIdx) const { 1091 if (!ItinData || ItinData->isEmpty()) 1092 return -1; 1093 1094 if (!DefNode->isMachineOpcode()) 1095 return -1; 1096 1097 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass(); 1098 if (!UseNode->isMachineOpcode()) 1099 return ItinData->getOperandCycle(DefClass, DefIdx); 1100 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass(); 1101 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 1102 } 1103 1104 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1105 SDNode *N) const { 1106 if (!ItinData || ItinData->isEmpty()) 1107 return 1; 1108 1109 if (!N->isMachineOpcode()) 1110 return 1; 1111 1112 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass()); 1113 } 1114 1115 //===----------------------------------------------------------------------===// 1116 // MachineInstr latency interface. 1117 //===----------------------------------------------------------------------===// 1118 1119 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 1120 const MachineInstr &MI) const { 1121 if (!ItinData || ItinData->isEmpty()) 1122 return 1; 1123 1124 unsigned Class = MI.getDesc().getSchedClass(); 1125 int UOps = ItinData->Itineraries[Class].NumMicroOps; 1126 if (UOps >= 0) 1127 return UOps; 1128 1129 // The # of u-ops is dynamically determined. The specific target should 1130 // override this function to return the right number. 1131 return 1; 1132 } 1133 1134 /// Return the default expected latency for a def based on it's opcode. 1135 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel, 1136 const MachineInstr &DefMI) const { 1137 if (DefMI.isTransient()) 1138 return 0; 1139 if (DefMI.mayLoad()) 1140 return SchedModel.LoadLatency; 1141 if (isHighLatencyDef(DefMI.getOpcode())) 1142 return SchedModel.HighLatency; 1143 return 1; 1144 } 1145 1146 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const { 1147 return 0; 1148 } 1149 1150 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1151 const MachineInstr &MI, 1152 unsigned *PredCost) const { 1153 // Default to one cycle for no itinerary. However, an "empty" itinerary may 1154 // still have a MinLatency property, which getStageLatency checks. 1155 if (!ItinData) 1156 return MI.mayLoad() ? 2 : 1; 1157 1158 return ItinData->getStageLatency(MI.getDesc().getSchedClass()); 1159 } 1160 1161 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, 1162 const MachineInstr &DefMI, 1163 unsigned DefIdx) const { 1164 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); 1165 if (!ItinData || ItinData->isEmpty()) 1166 return false; 1167 1168 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1169 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 1170 return (DefCycle != -1 && DefCycle <= 1); 1171 } 1172 1173 Optional<ParamLoadedValue> 1174 TargetInstrInfo::describeLoadedValue(const MachineInstr &MI, 1175 Register Reg) const { 1176 const MachineFunction *MF = MI.getMF(); 1177 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 1178 DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {}); 1179 int64_t Offset; 1180 bool OffsetIsScalable; 1181 1182 // To simplify the sub-register handling, verify that we only need to 1183 // consider physical registers. 1184 assert(MF->getProperties().hasProperty( 1185 MachineFunctionProperties::Property::NoVRegs)); 1186 1187 if (auto DestSrc = isCopyInstr(MI)) { 1188 Register DestReg = DestSrc->Destination->getReg(); 1189 1190 // If the copy destination is the forwarding reg, describe the forwarding 1191 // reg using the copy source as the backup location. Example: 1192 // 1193 // x0 = MOV x7 1194 // call callee(x0) ; x0 described as x7 1195 if (Reg == DestReg) 1196 return ParamLoadedValue(*DestSrc->Source, Expr); 1197 1198 // Cases where super- or sub-registers needs to be described should 1199 // be handled by the target's hook implementation. 1200 assert(!TRI->isSuperOrSubRegisterEq(Reg, DestReg) && 1201 "TargetInstrInfo::describeLoadedValue can't describe super- or " 1202 "sub-regs for copy instructions"); 1203 return None; 1204 } else if (auto RegImm = isAddImmediate(MI, Reg)) { 1205 Register SrcReg = RegImm->Reg; 1206 Offset = RegImm->Imm; 1207 Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset); 1208 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr); 1209 } else if (MI.hasOneMemOperand()) { 1210 // Only describe memory which provably does not escape the function. As 1211 // described in llvm.org/PR43343, escaped memory may be clobbered by the 1212 // callee (or by another thread). 1213 const auto &TII = MF->getSubtarget().getInstrInfo(); 1214 const MachineFrameInfo &MFI = MF->getFrameInfo(); 1215 const MachineMemOperand *MMO = MI.memoperands()[0]; 1216 const PseudoSourceValue *PSV = MMO->getPseudoValue(); 1217 1218 // If the address points to "special" memory (e.g. a spill slot), it's 1219 // sufficient to check that it isn't aliased by any high-level IR value. 1220 if (!PSV || PSV->mayAlias(&MFI)) 1221 return None; 1222 1223 const MachineOperand *BaseOp; 1224 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, 1225 TRI)) 1226 return None; 1227 1228 // FIXME: Scalable offsets are not yet handled in the offset code below. 1229 if (OffsetIsScalable) 1230 return None; 1231 1232 // TODO: Can currently only handle mem instructions with a single define. 1233 // An example from the x86 target: 1234 // ... 1235 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx 1236 // ... 1237 // 1238 if (MI.getNumExplicitDefs() != 1) 1239 return None; 1240 1241 // TODO: In what way do we need to take Reg into consideration here? 1242 1243 SmallVector<uint64_t, 8> Ops; 1244 DIExpression::appendOffset(Ops, Offset); 1245 Ops.push_back(dwarf::DW_OP_deref_size); 1246 Ops.push_back(MMO->getSize()); 1247 Expr = DIExpression::prependOpcodes(Expr, Ops); 1248 return ParamLoadedValue(*BaseOp, Expr); 1249 } 1250 1251 return None; 1252 } 1253 1254 /// Both DefMI and UseMI must be valid. By default, call directly to the 1255 /// itinerary. This may be overriden by the target. 1256 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 1257 const MachineInstr &DefMI, 1258 unsigned DefIdx, 1259 const MachineInstr &UseMI, 1260 unsigned UseIdx) const { 1261 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1262 unsigned UseClass = UseMI.getDesc().getSchedClass(); 1263 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 1264 } 1265 1266 bool TargetInstrInfo::getRegSequenceInputs( 1267 const MachineInstr &MI, unsigned DefIdx, 1268 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 1269 assert((MI.isRegSequence() || 1270 MI.isRegSequenceLike()) && "Instruction do not have the proper type"); 1271 1272 if (!MI.isRegSequence()) 1273 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs); 1274 1275 // We are looking at: 1276 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ... 1277 assert(DefIdx == 0 && "REG_SEQUENCE only has one def"); 1278 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx; 1279 OpIdx += 2) { 1280 const MachineOperand &MOReg = MI.getOperand(OpIdx); 1281 if (MOReg.isUndef()) 1282 continue; 1283 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1); 1284 assert(MOSubIdx.isImm() && 1285 "One of the subindex of the reg_sequence is not an immediate"); 1286 // Record Reg:SubReg, SubIdx. 1287 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(), 1288 (unsigned)MOSubIdx.getImm())); 1289 } 1290 return true; 1291 } 1292 1293 bool TargetInstrInfo::getExtractSubregInputs( 1294 const MachineInstr &MI, unsigned DefIdx, 1295 RegSubRegPairAndIdx &InputReg) const { 1296 assert((MI.isExtractSubreg() || 1297 MI.isExtractSubregLike()) && "Instruction do not have the proper type"); 1298 1299 if (!MI.isExtractSubreg()) 1300 return getExtractSubregLikeInputs(MI, DefIdx, InputReg); 1301 1302 // We are looking at: 1303 // Def = EXTRACT_SUBREG v0.sub1, sub0. 1304 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def"); 1305 const MachineOperand &MOReg = MI.getOperand(1); 1306 if (MOReg.isUndef()) 1307 return false; 1308 const MachineOperand &MOSubIdx = MI.getOperand(2); 1309 assert(MOSubIdx.isImm() && 1310 "The subindex of the extract_subreg is not an immediate"); 1311 1312 InputReg.Reg = MOReg.getReg(); 1313 InputReg.SubReg = MOReg.getSubReg(); 1314 InputReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1315 return true; 1316 } 1317 1318 bool TargetInstrInfo::getInsertSubregInputs( 1319 const MachineInstr &MI, unsigned DefIdx, 1320 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const { 1321 assert((MI.isInsertSubreg() || 1322 MI.isInsertSubregLike()) && "Instruction do not have the proper type"); 1323 1324 if (!MI.isInsertSubreg()) 1325 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg); 1326 1327 // We are looking at: 1328 // Def = INSERT_SEQUENCE v0, v1, sub0. 1329 assert(DefIdx == 0 && "INSERT_SUBREG only has one def"); 1330 const MachineOperand &MOBaseReg = MI.getOperand(1); 1331 const MachineOperand &MOInsertedReg = MI.getOperand(2); 1332 if (MOInsertedReg.isUndef()) 1333 return false; 1334 const MachineOperand &MOSubIdx = MI.getOperand(3); 1335 assert(MOSubIdx.isImm() && 1336 "One of the subindex of the reg_sequence is not an immediate"); 1337 BaseReg.Reg = MOBaseReg.getReg(); 1338 BaseReg.SubReg = MOBaseReg.getSubReg(); 1339 1340 InsertedReg.Reg = MOInsertedReg.getReg(); 1341 InsertedReg.SubReg = MOInsertedReg.getSubReg(); 1342 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1343 return true; 1344 } 1345 1346 // Returns a MIRPrinter comment for this machine operand. 1347 std::string TargetInstrInfo::createMIROperandComment( 1348 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, 1349 const TargetRegisterInfo *TRI) const { 1350 1351 if (!MI.isInlineAsm()) 1352 return ""; 1353 1354 std::string Flags; 1355 raw_string_ostream OS(Flags); 1356 1357 if (OpIdx == InlineAsm::MIOp_ExtraInfo) { 1358 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack 1359 unsigned ExtraInfo = Op.getImm(); 1360 bool First = true; 1361 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) { 1362 if (!First) 1363 OS << " "; 1364 First = false; 1365 OS << Info; 1366 } 1367 1368 return OS.str(); 1369 } 1370 1371 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx); 1372 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx) 1373 return ""; 1374 1375 assert(Op.isImm() && "Expected flag operand to be an immediate"); 1376 // Pretty print the inline asm operand descriptor. 1377 unsigned Flag = Op.getImm(); 1378 unsigned Kind = InlineAsm::getKind(Flag); 1379 OS << InlineAsm::getKindName(Kind); 1380 1381 unsigned RCID = 0; 1382 if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) && 1383 InlineAsm::hasRegClassConstraint(Flag, RCID)) { 1384 if (TRI) { 1385 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID)); 1386 } else 1387 OS << ":RC" << RCID; 1388 } 1389 1390 if (InlineAsm::isMemKind(Flag)) { 1391 unsigned MCID = InlineAsm::getMemoryConstraintID(Flag); 1392 OS << ":" << InlineAsm::getMemConstraintName(MCID); 1393 } 1394 1395 unsigned TiedTo = 0; 1396 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo)) 1397 OS << " tiedto:$" << TiedTo; 1398 1399 return OS.str(); 1400 } 1401 1402 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() {} 1403 1404 void TargetInstrInfo::mergeOutliningCandidateAttributes( 1405 Function &F, std::vector<outliner::Candidate> &Candidates) const { 1406 // Include target features from an arbitrary candidate for the outlined 1407 // function. This makes sure the outlined function knows what kinds of 1408 // instructions are going into it. This is fine, since all parent functions 1409 // must necessarily support the instructions that are in the outlined region. 1410 outliner::Candidate &FirstCand = Candidates.front(); 1411 const Function &ParentFn = FirstCand.getMF()->getFunction(); 1412 if (ParentFn.hasFnAttribute("target-features")) 1413 F.addFnAttr(ParentFn.getFnAttribute("target-features")); 1414 1415 // Set nounwind, so we don't generate eh_frame. 1416 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) { 1417 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind); 1418 })) 1419 F.addFnAttr(Attribute::NoUnwind); 1420 } 1421 1422 bool TargetInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, 1423 unsigned &Flags) const { 1424 // Some instrumentations create special TargetOpcode at the start which 1425 // expands to special code sequences which must be present. 1426 auto First = MBB.getFirstNonDebugInstr(); 1427 if (First != MBB.end() && 1428 (First->getOpcode() == TargetOpcode::FENTRY_CALL || 1429 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)) 1430 return false; 1431 1432 return true; 1433 } 1434