1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetInstrInfo.h" 14 #include "llvm/CodeGen/MachineFrameInfo.h" 15 #include "llvm/CodeGen/MachineInstrBuilder.h" 16 #include "llvm/CodeGen/MachineMemOperand.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/PseudoSourceValue.h" 19 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 20 #include "llvm/CodeGen/StackMaps.h" 21 #include "llvm/CodeGen/TargetFrameLowering.h" 22 #include "llvm/CodeGen/TargetLowering.h" 23 #include "llvm/CodeGen/TargetRegisterInfo.h" 24 #include "llvm/CodeGen/TargetSchedule.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DebugInfoMetadata.h" 27 #include "llvm/MC/MCAsmInfo.h" 28 #include "llvm/MC/MCInstrItineraries.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/raw_ostream.h" 32 #include "llvm/Target/TargetMachine.h" 33 #include <cctype> 34 35 using namespace llvm; 36 37 static cl::opt<bool> DisableHazardRecognizer( 38 "disable-sched-hazard", cl::Hidden, cl::init(false), 39 cl::desc("Disable hazard detection during preRA scheduling")); 40 41 TargetInstrInfo::~TargetInstrInfo() { 42 } 43 44 const TargetRegisterClass* 45 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum, 46 const TargetRegisterInfo *TRI, 47 const MachineFunction &MF) const { 48 if (OpNum >= MCID.getNumOperands()) 49 return nullptr; 50 51 short RegClass = MCID.OpInfo[OpNum].RegClass; 52 if (MCID.OpInfo[OpNum].isLookupPtrRegClass()) 53 return TRI->getPointerRegClass(MF, RegClass); 54 55 // Instructions like INSERT_SUBREG do not have fixed register classes. 56 if (RegClass < 0) 57 return nullptr; 58 59 // Otherwise just look it up normally. 60 return TRI->getRegClass(RegClass); 61 } 62 63 /// insertNoop - Insert a noop into the instruction stream at the specified 64 /// point. 65 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB, 66 MachineBasicBlock::iterator MI) const { 67 llvm_unreachable("Target didn't implement insertNoop!"); 68 } 69 70 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) { 71 return strncmp(Str, MAI.getCommentString().data(), 72 MAI.getCommentString().size()) == 0; 73 } 74 75 /// Measure the specified inline asm to determine an approximation of its 76 /// length. 77 /// Comments (which run till the next SeparatorString or newline) do not 78 /// count as an instruction. 79 /// Any other non-whitespace text is considered an instruction, with 80 /// multiple instructions separated by SeparatorString or newlines. 81 /// Variable-length instructions are not handled here; this function 82 /// may be overloaded in the target code to do that. 83 /// We implement a special case of the .space directive which takes only a 84 /// single integer argument in base 10 that is the size in bytes. This is a 85 /// restricted form of the GAS directive in that we only interpret 86 /// simple--i.e. not a logical or arithmetic expression--size values without 87 /// the optional fill value. This is primarily used for creating arbitrary 88 /// sized inline asm blocks for testing purposes. 89 unsigned TargetInstrInfo::getInlineAsmLength( 90 const char *Str, 91 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const { 92 // Count the number of instructions in the asm. 93 bool AtInsnStart = true; 94 unsigned Length = 0; 95 const unsigned MaxInstLength = MAI.getMaxInstLength(STI); 96 for (; *Str; ++Str) { 97 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(), 98 strlen(MAI.getSeparatorString())) == 0) { 99 AtInsnStart = true; 100 } else if (isAsmComment(Str, MAI)) { 101 // Stop counting as an instruction after a comment until the next 102 // separator. 103 AtInsnStart = false; 104 } 105 106 if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) { 107 unsigned AddLength = MaxInstLength; 108 if (strncmp(Str, ".space", 6) == 0) { 109 char *EStr; 110 int SpaceSize; 111 SpaceSize = strtol(Str + 6, &EStr, 10); 112 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize; 113 while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr))) 114 ++EStr; 115 if (*EStr == '\0' || *EStr == '\n' || 116 isAsmComment(EStr, MAI)) // Successfully parsed .space argument 117 AddLength = SpaceSize; 118 } 119 Length += AddLength; 120 AtInsnStart = false; 121 } 122 } 123 124 return Length; 125 } 126 127 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything 128 /// after it, replacing it with an unconditional branch to NewDest. 129 void 130 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 131 MachineBasicBlock *NewDest) const { 132 MachineBasicBlock *MBB = Tail->getParent(); 133 134 // Remove all the old successors of MBB from the CFG. 135 while (!MBB->succ_empty()) 136 MBB->removeSuccessor(MBB->succ_begin()); 137 138 // Save off the debug loc before erasing the instruction. 139 DebugLoc DL = Tail->getDebugLoc(); 140 141 // Update call site info and remove all the dead instructions 142 // from the end of MBB. 143 while (Tail != MBB->end()) { 144 auto MI = Tail++; 145 if (MI->isCall()) 146 MBB->getParent()->eraseCallSiteInfo(&*MI); 147 MBB->erase(MI); 148 } 149 150 // If MBB isn't immediately before MBB, insert a branch to it. 151 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest)) 152 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL); 153 MBB->addSuccessor(NewDest); 154 } 155 156 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI, 157 bool NewMI, unsigned Idx1, 158 unsigned Idx2) const { 159 const MCInstrDesc &MCID = MI.getDesc(); 160 bool HasDef = MCID.getNumDefs(); 161 if (HasDef && !MI.getOperand(0).isReg()) 162 // No idea how to commute this instruction. Target should implement its own. 163 return nullptr; 164 165 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1; 166 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2; 167 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && 168 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && 169 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."); 170 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && 171 "This only knows how to commute register operands so far"); 172 173 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register(); 174 Register Reg1 = MI.getOperand(Idx1).getReg(); 175 Register Reg2 = MI.getOperand(Idx2).getReg(); 176 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0; 177 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg(); 178 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg(); 179 bool Reg1IsKill = MI.getOperand(Idx1).isKill(); 180 bool Reg2IsKill = MI.getOperand(Idx2).isKill(); 181 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef(); 182 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef(); 183 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead(); 184 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead(); 185 // Avoid calling isRenamable for virtual registers since we assert that 186 // renamable property is only queried/set for physical registers. 187 bool Reg1IsRenamable = Register::isPhysicalRegister(Reg1) 188 ? MI.getOperand(Idx1).isRenamable() 189 : false; 190 bool Reg2IsRenamable = Register::isPhysicalRegister(Reg2) 191 ? MI.getOperand(Idx2).isRenamable() 192 : false; 193 // If destination is tied to either of the commuted source register, then 194 // it must be updated. 195 if (HasDef && Reg0 == Reg1 && 196 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { 197 Reg2IsKill = false; 198 Reg0 = Reg2; 199 SubReg0 = SubReg2; 200 } else if (HasDef && Reg0 == Reg2 && 201 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { 202 Reg1IsKill = false; 203 Reg0 = Reg1; 204 SubReg0 = SubReg1; 205 } 206 207 MachineInstr *CommutedMI = nullptr; 208 if (NewMI) { 209 // Create a new instruction. 210 MachineFunction &MF = *MI.getMF(); 211 CommutedMI = MF.CloneMachineInstr(&MI); 212 } else { 213 CommutedMI = &MI; 214 } 215 216 if (HasDef) { 217 CommutedMI->getOperand(0).setReg(Reg0); 218 CommutedMI->getOperand(0).setSubReg(SubReg0); 219 } 220 CommutedMI->getOperand(Idx2).setReg(Reg1); 221 CommutedMI->getOperand(Idx1).setReg(Reg2); 222 CommutedMI->getOperand(Idx2).setSubReg(SubReg1); 223 CommutedMI->getOperand(Idx1).setSubReg(SubReg2); 224 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill); 225 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill); 226 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef); 227 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef); 228 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); 229 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); 230 // Avoid calling setIsRenamable for virtual registers since we assert that 231 // renamable property is only queried/set for physical registers. 232 if (Register::isPhysicalRegister(Reg1)) 233 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable); 234 if (Register::isPhysicalRegister(Reg2)) 235 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable); 236 return CommutedMI; 237 } 238 239 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI, 240 unsigned OpIdx1, 241 unsigned OpIdx2) const { 242 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose 243 // any commutable operand, which is done in findCommutedOpIndices() method 244 // called below. 245 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) && 246 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) { 247 assert(MI.isCommutable() && 248 "Precondition violation: MI must be commutable."); 249 return nullptr; 250 } 251 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 252 } 253 254 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1, 255 unsigned &ResultIdx2, 256 unsigned CommutableOpIdx1, 257 unsigned CommutableOpIdx2) { 258 if (ResultIdx1 == CommuteAnyOperandIndex && 259 ResultIdx2 == CommuteAnyOperandIndex) { 260 ResultIdx1 = CommutableOpIdx1; 261 ResultIdx2 = CommutableOpIdx2; 262 } else if (ResultIdx1 == CommuteAnyOperandIndex) { 263 if (ResultIdx2 == CommutableOpIdx1) 264 ResultIdx1 = CommutableOpIdx2; 265 else if (ResultIdx2 == CommutableOpIdx2) 266 ResultIdx1 = CommutableOpIdx1; 267 else 268 return false; 269 } else if (ResultIdx2 == CommuteAnyOperandIndex) { 270 if (ResultIdx1 == CommutableOpIdx1) 271 ResultIdx2 = CommutableOpIdx2; 272 else if (ResultIdx1 == CommutableOpIdx2) 273 ResultIdx2 = CommutableOpIdx1; 274 else 275 return false; 276 } else 277 // Check that the result operand indices match the given commutable 278 // operand indices. 279 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) || 280 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1); 281 282 return true; 283 } 284 285 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 286 unsigned &SrcOpIdx1, 287 unsigned &SrcOpIdx2) const { 288 assert(!MI.isBundle() && 289 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); 290 291 const MCInstrDesc &MCID = MI.getDesc(); 292 if (!MCID.isCommutable()) 293 return false; 294 295 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this 296 // is not true, then the target must implement this. 297 unsigned CommutableOpIdx1 = MCID.getNumDefs(); 298 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1; 299 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 300 CommutableOpIdx1, CommutableOpIdx2)) 301 return false; 302 303 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg()) 304 // No idea. 305 return false; 306 return true; 307 } 308 309 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { 310 if (!MI.isTerminator()) return false; 311 312 // Conditional branch is a special case. 313 if (MI.isBranch() && !MI.isBarrier()) 314 return true; 315 if (!MI.isPredicable()) 316 return true; 317 return !isPredicated(MI); 318 } 319 320 bool TargetInstrInfo::PredicateInstruction( 321 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { 322 bool MadeChange = false; 323 324 assert(!MI.isBundle() && 325 "TargetInstrInfo::PredicateInstruction() can't handle bundles"); 326 327 const MCInstrDesc &MCID = MI.getDesc(); 328 if (!MI.isPredicable()) 329 return false; 330 331 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) { 332 if (MCID.OpInfo[i].isPredicate()) { 333 MachineOperand &MO = MI.getOperand(i); 334 if (MO.isReg()) { 335 MO.setReg(Pred[j].getReg()); 336 MadeChange = true; 337 } else if (MO.isImm()) { 338 MO.setImm(Pred[j].getImm()); 339 MadeChange = true; 340 } else if (MO.isMBB()) { 341 MO.setMBB(Pred[j].getMBB()); 342 MadeChange = true; 343 } 344 ++j; 345 } 346 } 347 return MadeChange; 348 } 349 350 bool TargetInstrInfo::hasLoadFromStackSlot( 351 const MachineInstr &MI, 352 SmallVectorImpl<const MachineMemOperand *> &Accesses) const { 353 size_t StartSize = Accesses.size(); 354 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 355 oe = MI.memoperands_end(); 356 o != oe; ++o) { 357 if ((*o)->isLoad() && 358 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) 359 Accesses.push_back(*o); 360 } 361 return Accesses.size() != StartSize; 362 } 363 364 bool TargetInstrInfo::hasStoreToStackSlot( 365 const MachineInstr &MI, 366 SmallVectorImpl<const MachineMemOperand *> &Accesses) const { 367 size_t StartSize = Accesses.size(); 368 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 369 oe = MI.memoperands_end(); 370 o != oe; ++o) { 371 if ((*o)->isStore() && 372 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) 373 Accesses.push_back(*o); 374 } 375 return Accesses.size() != StartSize; 376 } 377 378 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, 379 unsigned SubIdx, unsigned &Size, 380 unsigned &Offset, 381 const MachineFunction &MF) const { 382 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 383 if (!SubIdx) { 384 Size = TRI->getSpillSize(*RC); 385 Offset = 0; 386 return true; 387 } 388 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx); 389 // Convert bit size to byte size. 390 if (BitSize % 8) 391 return false; 392 393 int BitOffset = TRI->getSubRegIdxOffset(SubIdx); 394 if (BitOffset < 0 || BitOffset % 8) 395 return false; 396 397 Size = BitSize / 8; 398 Offset = (unsigned)BitOffset / 8; 399 400 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range"); 401 402 if (!MF.getDataLayout().isLittleEndian()) { 403 Offset = TRI->getSpillSize(*RC) - (Offset + Size); 404 } 405 return true; 406 } 407 408 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, 409 MachineBasicBlock::iterator I, 410 unsigned DestReg, unsigned SubIdx, 411 const MachineInstr &Orig, 412 const TargetRegisterInfo &TRI) const { 413 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 414 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); 415 MBB.insert(I, MI); 416 } 417 418 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0, 419 const MachineInstr &MI1, 420 const MachineRegisterInfo *MRI) const { 421 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 422 } 423 424 MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB, 425 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const { 426 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated"); 427 MachineFunction &MF = *MBB.getParent(); 428 return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig); 429 } 430 431 // If the COPY instruction in MI can be folded to a stack operation, return 432 // the register class to use. 433 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI, 434 unsigned FoldIdx) { 435 assert(MI.isCopy() && "MI must be a COPY instruction"); 436 if (MI.getNumOperands() != 2) 437 return nullptr; 438 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); 439 440 const MachineOperand &FoldOp = MI.getOperand(FoldIdx); 441 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx); 442 443 if (FoldOp.getSubReg() || LiveOp.getSubReg()) 444 return nullptr; 445 446 Register FoldReg = FoldOp.getReg(); 447 Register LiveReg = LiveOp.getReg(); 448 449 assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs"); 450 451 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); 452 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); 453 454 if (Register::isPhysicalRegister(LiveOp.getReg())) 455 return RC->contains(LiveOp.getReg()) ? RC : nullptr; 456 457 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg))) 458 return RC; 459 460 // FIXME: Allow folding when register classes are memory compatible. 461 return nullptr; 462 } 463 464 void TargetInstrInfo::getNoop(MCInst &NopInst) const { 465 llvm_unreachable("Not implemented"); 466 } 467 468 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, 469 ArrayRef<unsigned> Ops, int FrameIndex, 470 const TargetInstrInfo &TII) { 471 unsigned StartIdx = 0; 472 switch (MI.getOpcode()) { 473 case TargetOpcode::STACKMAP: { 474 // StackMapLiveValues are foldable 475 StartIdx = StackMapOpers(&MI).getVarIdx(); 476 break; 477 } 478 case TargetOpcode::PATCHPOINT: { 479 // For PatchPoint, the call args are not foldable (even if reported in the 480 // stackmap e.g. via anyregcc). 481 StartIdx = PatchPointOpers(&MI).getVarIdx(); 482 break; 483 } 484 case TargetOpcode::STATEPOINT: { 485 // For statepoints, fold deopt and gc arguments, but not call arguments. 486 StartIdx = StatepointOpers(&MI).getVarIdx(); 487 break; 488 } 489 default: 490 llvm_unreachable("unexpected stackmap opcode"); 491 } 492 493 // Return false if any operands requested for folding are not foldable (not 494 // part of the stackmap's live values). 495 for (unsigned Op : Ops) { 496 if (Op < StartIdx) 497 return nullptr; 498 } 499 500 MachineInstr *NewMI = 501 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true); 502 MachineInstrBuilder MIB(MF, NewMI); 503 504 // No need to fold return, the meta data, and function arguments 505 for (unsigned i = 0; i < StartIdx; ++i) 506 MIB.add(MI.getOperand(i)); 507 508 for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) { 509 MachineOperand &MO = MI.getOperand(i); 510 if (is_contained(Ops, i)) { 511 unsigned SpillSize; 512 unsigned SpillOffset; 513 // Compute the spill slot size and offset. 514 const TargetRegisterClass *RC = 515 MF.getRegInfo().getRegClass(MO.getReg()); 516 bool Valid = 517 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF); 518 if (!Valid) 519 report_fatal_error("cannot spill patchpoint subregister operand"); 520 MIB.addImm(StackMaps::IndirectMemRefOp); 521 MIB.addImm(SpillSize); 522 MIB.addFrameIndex(FrameIndex); 523 MIB.addImm(SpillOffset); 524 } 525 else 526 MIB.add(MO); 527 } 528 return NewMI; 529 } 530 531 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 532 ArrayRef<unsigned> Ops, int FI, 533 LiveIntervals *LIS, 534 VirtRegMap *VRM) const { 535 auto Flags = MachineMemOperand::MONone; 536 for (unsigned OpIdx : Ops) 537 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore 538 : MachineMemOperand::MOLoad; 539 540 MachineBasicBlock *MBB = MI.getParent(); 541 assert(MBB && "foldMemoryOperand needs an inserted instruction"); 542 MachineFunction &MF = *MBB->getParent(); 543 544 // If we're not folding a load into a subreg, the size of the load is the 545 // size of the spill slot. But if we are, we need to figure out what the 546 // actual load size is. 547 int64_t MemSize = 0; 548 const MachineFrameInfo &MFI = MF.getFrameInfo(); 549 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 550 551 if (Flags & MachineMemOperand::MOStore) { 552 MemSize = MFI.getObjectSize(FI); 553 } else { 554 for (unsigned OpIdx : Ops) { 555 int64_t OpSize = MFI.getObjectSize(FI); 556 557 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) { 558 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg); 559 if (SubRegSize > 0 && !(SubRegSize % 8)) 560 OpSize = SubRegSize / 8; 561 } 562 563 MemSize = std::max(MemSize, OpSize); 564 } 565 } 566 567 assert(MemSize && "Did not expect a zero-sized stack slot"); 568 569 MachineInstr *NewMI = nullptr; 570 571 if (MI.getOpcode() == TargetOpcode::STACKMAP || 572 MI.getOpcode() == TargetOpcode::PATCHPOINT || 573 MI.getOpcode() == TargetOpcode::STATEPOINT) { 574 // Fold stackmap/patchpoint. 575 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); 576 if (NewMI) 577 MBB->insert(MI, NewMI); 578 } else { 579 // Ask the target to do the actual folding. 580 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM); 581 } 582 583 if (NewMI) { 584 NewMI->setMemRefs(MF, MI.memoperands()); 585 // Add a memory operand, foldMemoryOperandImpl doesn't do that. 586 assert((!(Flags & MachineMemOperand::MOStore) || 587 NewMI->mayStore()) && 588 "Folded a def to a non-store!"); 589 assert((!(Flags & MachineMemOperand::MOLoad) || 590 NewMI->mayLoad()) && 591 "Folded a use to a non-load!"); 592 assert(MFI.getObjectOffset(FI) != -1); 593 MachineMemOperand *MMO = MF.getMachineMemOperand( 594 MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize, 595 MFI.getObjectAlignment(FI)); 596 NewMI->addMemOperand(MF, MMO); 597 598 return NewMI; 599 } 600 601 // Straight COPY may fold as load/store. 602 if (!MI.isCopy() || Ops.size() != 1) 603 return nullptr; 604 605 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); 606 if (!RC) 607 return nullptr; 608 609 const MachineOperand &MO = MI.getOperand(1 - Ops[0]); 610 MachineBasicBlock::iterator Pos = MI; 611 612 if (Flags == MachineMemOperand::MOStore) 613 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); 614 else 615 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI); 616 return &*--Pos; 617 } 618 619 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 620 ArrayRef<unsigned> Ops, 621 MachineInstr &LoadMI, 622 LiveIntervals *LIS) const { 623 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); 624 #ifndef NDEBUG 625 for (unsigned OpIdx : Ops) 626 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!"); 627 #endif 628 629 MachineBasicBlock &MBB = *MI.getParent(); 630 MachineFunction &MF = *MBB.getParent(); 631 632 // Ask the target to do the actual folding. 633 MachineInstr *NewMI = nullptr; 634 int FrameIndex = 0; 635 636 if ((MI.getOpcode() == TargetOpcode::STACKMAP || 637 MI.getOpcode() == TargetOpcode::PATCHPOINT || 638 MI.getOpcode() == TargetOpcode::STATEPOINT) && 639 isLoadFromStackSlot(LoadMI, FrameIndex)) { 640 // Fold stackmap/patchpoint. 641 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); 642 if (NewMI) 643 NewMI = &*MBB.insert(MI, NewMI); 644 } else { 645 // Ask the target to do the actual folding. 646 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); 647 } 648 649 if (!NewMI) 650 return nullptr; 651 652 // Copy the memoperands from the load to the folded instruction. 653 if (MI.memoperands_empty()) { 654 NewMI->setMemRefs(MF, LoadMI.memoperands()); 655 } else { 656 // Handle the rare case of folding multiple loads. 657 NewMI->setMemRefs(MF, MI.memoperands()); 658 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), 659 E = LoadMI.memoperands_end(); 660 I != E; ++I) { 661 NewMI->addMemOperand(MF, *I); 662 } 663 } 664 return NewMI; 665 } 666 667 bool TargetInstrInfo::hasReassociableOperands( 668 const MachineInstr &Inst, const MachineBasicBlock *MBB) const { 669 const MachineOperand &Op1 = Inst.getOperand(1); 670 const MachineOperand &Op2 = Inst.getOperand(2); 671 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 672 673 // We need virtual register definitions for the operands that we will 674 // reassociate. 675 MachineInstr *MI1 = nullptr; 676 MachineInstr *MI2 = nullptr; 677 if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) 678 MI1 = MRI.getUniqueVRegDef(Op1.getReg()); 679 if (Op2.isReg() && Register::isVirtualRegister(Op2.getReg())) 680 MI2 = MRI.getUniqueVRegDef(Op2.getReg()); 681 682 // And they need to be in the trace (otherwise, they won't have a depth). 683 return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB; 684 } 685 686 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst, 687 bool &Commuted) const { 688 const MachineBasicBlock *MBB = Inst.getParent(); 689 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 690 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg()); 691 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg()); 692 unsigned AssocOpcode = Inst.getOpcode(); 693 694 // If only one operand has the same opcode and it's the second source operand, 695 // the operands must be commuted. 696 Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode; 697 if (Commuted) 698 std::swap(MI1, MI2); 699 700 // 1. The previous instruction must be the same type as Inst. 701 // 2. The previous instruction must have virtual register definitions for its 702 // operands in the same basic block as Inst. 703 // 3. The previous instruction's result must only be used by Inst. 704 return MI1->getOpcode() == AssocOpcode && 705 hasReassociableOperands(*MI1, MBB) && 706 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg()); 707 } 708 709 // 1. The operation must be associative and commutative. 710 // 2. The instruction must have virtual register definitions for its 711 // operands in the same basic block. 712 // 3. The instruction must have a reassociable sibling. 713 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst, 714 bool &Commuted) const { 715 return isAssociativeAndCommutative(Inst) && 716 hasReassociableOperands(Inst, Inst.getParent()) && 717 hasReassociableSibling(Inst, Commuted); 718 } 719 720 // The concept of the reassociation pass is that these operations can benefit 721 // from this kind of transformation: 722 // 723 // A = ? op ? 724 // B = A op X (Prev) 725 // C = B op Y (Root) 726 // --> 727 // A = ? op ? 728 // B = X op Y 729 // C = A op B 730 // 731 // breaking the dependency between A and B, allowing them to be executed in 732 // parallel (or back-to-back in a pipeline) instead of depending on each other. 733 734 // FIXME: This has the potential to be expensive (compile time) while not 735 // improving the code at all. Some ways to limit the overhead: 736 // 1. Track successful transforms; bail out if hit rate gets too low. 737 // 2. Only enable at -O3 or some other non-default optimization level. 738 // 3. Pre-screen pattern candidates here: if an operand of the previous 739 // instruction is known to not increase the critical path, then don't match 740 // that pattern. 741 bool TargetInstrInfo::getMachineCombinerPatterns( 742 MachineInstr &Root, 743 SmallVectorImpl<MachineCombinerPattern> &Patterns) const { 744 bool Commute; 745 if (isReassociationCandidate(Root, Commute)) { 746 // We found a sequence of instructions that may be suitable for a 747 // reassociation of operands to increase ILP. Specify each commutation 748 // possibility for the Prev instruction in the sequence and let the 749 // machine combiner decide if changing the operands is worthwhile. 750 if (Commute) { 751 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB); 752 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB); 753 } else { 754 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY); 755 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY); 756 } 757 return true; 758 } 759 760 return false; 761 } 762 763 /// Return true when a code sequence can improve loop throughput. 764 bool 765 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const { 766 return false; 767 } 768 769 /// Attempt the reassociation transformation to reduce critical path length. 770 /// See the above comments before getMachineCombinerPatterns(). 771 void TargetInstrInfo::reassociateOps( 772 MachineInstr &Root, MachineInstr &Prev, 773 MachineCombinerPattern Pattern, 774 SmallVectorImpl<MachineInstr *> &InsInstrs, 775 SmallVectorImpl<MachineInstr *> &DelInstrs, 776 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const { 777 MachineFunction *MF = Root.getMF(); 778 MachineRegisterInfo &MRI = MF->getRegInfo(); 779 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 780 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 781 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI); 782 783 // This array encodes the operand index for each parameter because the 784 // operands may be commuted. Each row corresponds to a pattern value, 785 // and each column specifies the index of A, B, X, Y. 786 unsigned OpIdx[4][4] = { 787 { 1, 1, 2, 2 }, 788 { 1, 2, 2, 1 }, 789 { 2, 1, 1, 2 }, 790 { 2, 2, 1, 1 } 791 }; 792 793 int Row; 794 switch (Pattern) { 795 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break; 796 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break; 797 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break; 798 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break; 799 default: llvm_unreachable("unexpected MachineCombinerPattern"); 800 } 801 802 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]); 803 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]); 804 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]); 805 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]); 806 MachineOperand &OpC = Root.getOperand(0); 807 808 Register RegA = OpA.getReg(); 809 Register RegB = OpB.getReg(); 810 Register RegX = OpX.getReg(); 811 Register RegY = OpY.getReg(); 812 Register RegC = OpC.getReg(); 813 814 if (Register::isVirtualRegister(RegA)) 815 MRI.constrainRegClass(RegA, RC); 816 if (Register::isVirtualRegister(RegB)) 817 MRI.constrainRegClass(RegB, RC); 818 if (Register::isVirtualRegister(RegX)) 819 MRI.constrainRegClass(RegX, RC); 820 if (Register::isVirtualRegister(RegY)) 821 MRI.constrainRegClass(RegY, RC); 822 if (Register::isVirtualRegister(RegC)) 823 MRI.constrainRegClass(RegC, RC); 824 825 // Create a new virtual register for the result of (X op Y) instead of 826 // recycling RegB because the MachineCombiner's computation of the critical 827 // path requires a new register definition rather than an existing one. 828 Register NewVR = MRI.createVirtualRegister(RC); 829 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); 830 831 unsigned Opcode = Root.getOpcode(); 832 bool KillA = OpA.isKill(); 833 bool KillX = OpX.isKill(); 834 bool KillY = OpY.isKill(); 835 836 // Create new instructions for insertion. 837 MachineInstrBuilder MIB1 = 838 BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR) 839 .addReg(RegX, getKillRegState(KillX)) 840 .addReg(RegY, getKillRegState(KillY)); 841 MachineInstrBuilder MIB2 = 842 BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC) 843 .addReg(RegA, getKillRegState(KillA)) 844 .addReg(NewVR, getKillRegState(true)); 845 846 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2); 847 848 // Record new instructions for insertion and old instructions for deletion. 849 InsInstrs.push_back(MIB1); 850 InsInstrs.push_back(MIB2); 851 DelInstrs.push_back(&Prev); 852 DelInstrs.push_back(&Root); 853 } 854 855 void TargetInstrInfo::genAlternativeCodeSequence( 856 MachineInstr &Root, MachineCombinerPattern Pattern, 857 SmallVectorImpl<MachineInstr *> &InsInstrs, 858 SmallVectorImpl<MachineInstr *> &DelInstrs, 859 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const { 860 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo(); 861 862 // Select the previous instruction in the sequence based on the input pattern. 863 MachineInstr *Prev = nullptr; 864 switch (Pattern) { 865 case MachineCombinerPattern::REASSOC_AX_BY: 866 case MachineCombinerPattern::REASSOC_XA_BY: 867 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg()); 868 break; 869 case MachineCombinerPattern::REASSOC_AX_YB: 870 case MachineCombinerPattern::REASSOC_XA_YB: 871 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); 872 break; 873 default: 874 break; 875 } 876 877 assert(Prev && "Unknown pattern for machine combiner"); 878 879 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg); 880 } 881 882 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( 883 const MachineInstr &MI, AAResults *AA) const { 884 const MachineFunction &MF = *MI.getMF(); 885 const MachineRegisterInfo &MRI = MF.getRegInfo(); 886 887 // Remat clients assume operand 0 is the defined register. 888 if (!MI.getNumOperands() || !MI.getOperand(0).isReg()) 889 return false; 890 Register DefReg = MI.getOperand(0).getReg(); 891 892 // A sub-register definition can only be rematerialized if the instruction 893 // doesn't read the other parts of the register. Otherwise it is really a 894 // read-modify-write operation on the full virtual register which cannot be 895 // moved safely. 896 if (Register::isVirtualRegister(DefReg) && MI.getOperand(0).getSubReg() && 897 MI.readsVirtualRegister(DefReg)) 898 return false; 899 900 // A load from a fixed stack slot can be rematerialized. This may be 901 // redundant with subsequent checks, but it's target-independent, 902 // simple, and a common case. 903 int FrameIdx = 0; 904 if (isLoadFromStackSlot(MI, FrameIdx) && 905 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx)) 906 return true; 907 908 // Avoid instructions obviously unsafe for remat. 909 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() || 910 MI.hasUnmodeledSideEffects()) 911 return false; 912 913 // Don't remat inline asm. We have no idea how expensive it is 914 // even if it's side effect free. 915 if (MI.isInlineAsm()) 916 return false; 917 918 // Avoid instructions which load from potentially varying memory. 919 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA)) 920 return false; 921 922 // If any of the registers accessed are non-constant, conservatively assume 923 // the instruction is not rematerializable. 924 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 925 const MachineOperand &MO = MI.getOperand(i); 926 if (!MO.isReg()) continue; 927 Register Reg = MO.getReg(); 928 if (Reg == 0) 929 continue; 930 931 // Check for a well-behaved physical register. 932 if (Register::isPhysicalRegister(Reg)) { 933 if (MO.isUse()) { 934 // If the physreg has no defs anywhere, it's just an ambient register 935 // and we can freely move its uses. Alternatively, if it's allocatable, 936 // it could get allocated to something with a def during allocation. 937 if (!MRI.isConstantPhysReg(Reg)) 938 return false; 939 } else { 940 // A physreg def. We can't remat it. 941 return false; 942 } 943 continue; 944 } 945 946 // Only allow one virtual-register def. There may be multiple defs of the 947 // same virtual register, though. 948 if (MO.isDef() && Reg != DefReg) 949 return false; 950 951 // Don't allow any virtual-register uses. Rematting an instruction with 952 // virtual register uses would length the live ranges of the uses, which 953 // is not necessarily a good idea, certainly not "trivial". 954 if (MO.isUse()) 955 return false; 956 } 957 958 // Everything checked out. 959 return true; 960 } 961 962 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const { 963 const MachineFunction *MF = MI.getMF(); 964 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 965 bool StackGrowsDown = 966 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 967 968 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 969 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 970 971 if (!isFrameInstr(MI)) 972 return 0; 973 974 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI)); 975 976 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) || 977 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode)) 978 SPAdj = -SPAdj; 979 980 return SPAdj; 981 } 982 983 /// isSchedulingBoundary - Test if the given instruction should be 984 /// considered a scheduling boundary. This primarily includes labels 985 /// and terminators. 986 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 987 const MachineBasicBlock *MBB, 988 const MachineFunction &MF) const { 989 // Terminators and labels can't be scheduled around. 990 if (MI.isTerminator() || MI.isPosition()) 991 return true; 992 993 // Don't attempt to schedule around any instruction that defines 994 // a stack-oriented pointer, as it's unlikely to be profitable. This 995 // saves compile time, because it doesn't require every single 996 // stack slot reference to depend on the instruction that does the 997 // modification. 998 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); 999 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 1000 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); 1001 } 1002 1003 // Provide a global flag for disabling the PreRA hazard recognizer that targets 1004 // may choose to honor. 1005 bool TargetInstrInfo::usePreRAHazardRecognizer() const { 1006 return !DisableHazardRecognizer; 1007 } 1008 1009 // Default implementation of CreateTargetRAHazardRecognizer. 1010 ScheduleHazardRecognizer *TargetInstrInfo:: 1011 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 1012 const ScheduleDAG *DAG) const { 1013 // Dummy hazard recognizer allows all instructions to issue. 1014 return new ScheduleHazardRecognizer(); 1015 } 1016 1017 // Default implementation of CreateTargetMIHazardRecognizer. 1018 ScheduleHazardRecognizer *TargetInstrInfo:: 1019 CreateTargetMIHazardRecognizer(const InstrItineraryData *II, 1020 const ScheduleDAG *DAG) const { 1021 return (ScheduleHazardRecognizer *) 1022 new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler"); 1023 } 1024 1025 // Default implementation of CreateTargetPostRAHazardRecognizer. 1026 ScheduleHazardRecognizer *TargetInstrInfo:: 1027 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 1028 const ScheduleDAG *DAG) const { 1029 return (ScheduleHazardRecognizer *) 1030 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched"); 1031 } 1032 1033 //===----------------------------------------------------------------------===// 1034 // SelectionDAG latency interface. 1035 //===----------------------------------------------------------------------===// 1036 1037 int 1038 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 1039 SDNode *DefNode, unsigned DefIdx, 1040 SDNode *UseNode, unsigned UseIdx) const { 1041 if (!ItinData || ItinData->isEmpty()) 1042 return -1; 1043 1044 if (!DefNode->isMachineOpcode()) 1045 return -1; 1046 1047 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass(); 1048 if (!UseNode->isMachineOpcode()) 1049 return ItinData->getOperandCycle(DefClass, DefIdx); 1050 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass(); 1051 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 1052 } 1053 1054 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1055 SDNode *N) const { 1056 if (!ItinData || ItinData->isEmpty()) 1057 return 1; 1058 1059 if (!N->isMachineOpcode()) 1060 return 1; 1061 1062 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass()); 1063 } 1064 1065 //===----------------------------------------------------------------------===// 1066 // MachineInstr latency interface. 1067 //===----------------------------------------------------------------------===// 1068 1069 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 1070 const MachineInstr &MI) const { 1071 if (!ItinData || ItinData->isEmpty()) 1072 return 1; 1073 1074 unsigned Class = MI.getDesc().getSchedClass(); 1075 int UOps = ItinData->Itineraries[Class].NumMicroOps; 1076 if (UOps >= 0) 1077 return UOps; 1078 1079 // The # of u-ops is dynamically determined. The specific target should 1080 // override this function to return the right number. 1081 return 1; 1082 } 1083 1084 /// Return the default expected latency for a def based on it's opcode. 1085 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel, 1086 const MachineInstr &DefMI) const { 1087 if (DefMI.isTransient()) 1088 return 0; 1089 if (DefMI.mayLoad()) 1090 return SchedModel.LoadLatency; 1091 if (isHighLatencyDef(DefMI.getOpcode())) 1092 return SchedModel.HighLatency; 1093 return 1; 1094 } 1095 1096 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const { 1097 return 0; 1098 } 1099 1100 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1101 const MachineInstr &MI, 1102 unsigned *PredCost) const { 1103 // Default to one cycle for no itinerary. However, an "empty" itinerary may 1104 // still have a MinLatency property, which getStageLatency checks. 1105 if (!ItinData) 1106 return MI.mayLoad() ? 2 : 1; 1107 1108 return ItinData->getStageLatency(MI.getDesc().getSchedClass()); 1109 } 1110 1111 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, 1112 const MachineInstr &DefMI, 1113 unsigned DefIdx) const { 1114 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); 1115 if (!ItinData || ItinData->isEmpty()) 1116 return false; 1117 1118 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1119 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 1120 return (DefCycle != -1 && DefCycle <= 1); 1121 } 1122 1123 Optional<ParamLoadedValue> 1124 TargetInstrInfo::describeLoadedValue(const MachineInstr &MI) const { 1125 const MachineFunction *MF = MI.getMF(); 1126 const MachineOperand *Op = nullptr; 1127 DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {});; 1128 const MachineOperand *SrcRegOp, *DestRegOp; 1129 1130 if (isCopyInstr(MI, SrcRegOp, DestRegOp)) { 1131 Op = SrcRegOp; 1132 return ParamLoadedValue(*Op, Expr); 1133 } else if (MI.isMoveImmediate()) { 1134 Op = &MI.getOperand(1); 1135 return ParamLoadedValue(*Op, Expr); 1136 } 1137 1138 return None; 1139 } 1140 1141 /// Both DefMI and UseMI must be valid. By default, call directly to the 1142 /// itinerary. This may be overriden by the target. 1143 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 1144 const MachineInstr &DefMI, 1145 unsigned DefIdx, 1146 const MachineInstr &UseMI, 1147 unsigned UseIdx) const { 1148 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1149 unsigned UseClass = UseMI.getDesc().getSchedClass(); 1150 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 1151 } 1152 1153 /// If we can determine the operand latency from the def only, without itinerary 1154 /// lookup, do so. Otherwise return -1. 1155 int TargetInstrInfo::computeDefOperandLatency( 1156 const InstrItineraryData *ItinData, const MachineInstr &DefMI) const { 1157 1158 // Let the target hook getInstrLatency handle missing itineraries. 1159 if (!ItinData) 1160 return getInstrLatency(ItinData, DefMI); 1161 1162 if(ItinData->isEmpty()) 1163 return defaultDefLatency(ItinData->SchedModel, DefMI); 1164 1165 // ...operand lookup required 1166 return -1; 1167 } 1168 1169 bool TargetInstrInfo::getRegSequenceInputs( 1170 const MachineInstr &MI, unsigned DefIdx, 1171 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 1172 assert((MI.isRegSequence() || 1173 MI.isRegSequenceLike()) && "Instruction do not have the proper type"); 1174 1175 if (!MI.isRegSequence()) 1176 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs); 1177 1178 // We are looking at: 1179 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ... 1180 assert(DefIdx == 0 && "REG_SEQUENCE only has one def"); 1181 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx; 1182 OpIdx += 2) { 1183 const MachineOperand &MOReg = MI.getOperand(OpIdx); 1184 if (MOReg.isUndef()) 1185 continue; 1186 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1); 1187 assert(MOSubIdx.isImm() && 1188 "One of the subindex of the reg_sequence is not an immediate"); 1189 // Record Reg:SubReg, SubIdx. 1190 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(), 1191 (unsigned)MOSubIdx.getImm())); 1192 } 1193 return true; 1194 } 1195 1196 bool TargetInstrInfo::getExtractSubregInputs( 1197 const MachineInstr &MI, unsigned DefIdx, 1198 RegSubRegPairAndIdx &InputReg) const { 1199 assert((MI.isExtractSubreg() || 1200 MI.isExtractSubregLike()) && "Instruction do not have the proper type"); 1201 1202 if (!MI.isExtractSubreg()) 1203 return getExtractSubregLikeInputs(MI, DefIdx, InputReg); 1204 1205 // We are looking at: 1206 // Def = EXTRACT_SUBREG v0.sub1, sub0. 1207 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def"); 1208 const MachineOperand &MOReg = MI.getOperand(1); 1209 if (MOReg.isUndef()) 1210 return false; 1211 const MachineOperand &MOSubIdx = MI.getOperand(2); 1212 assert(MOSubIdx.isImm() && 1213 "The subindex of the extract_subreg is not an immediate"); 1214 1215 InputReg.Reg = MOReg.getReg(); 1216 InputReg.SubReg = MOReg.getSubReg(); 1217 InputReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1218 return true; 1219 } 1220 1221 bool TargetInstrInfo::getInsertSubregInputs( 1222 const MachineInstr &MI, unsigned DefIdx, 1223 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const { 1224 assert((MI.isInsertSubreg() || 1225 MI.isInsertSubregLike()) && "Instruction do not have the proper type"); 1226 1227 if (!MI.isInsertSubreg()) 1228 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg); 1229 1230 // We are looking at: 1231 // Def = INSERT_SEQUENCE v0, v1, sub0. 1232 assert(DefIdx == 0 && "INSERT_SUBREG only has one def"); 1233 const MachineOperand &MOBaseReg = MI.getOperand(1); 1234 const MachineOperand &MOInsertedReg = MI.getOperand(2); 1235 if (MOInsertedReg.isUndef()) 1236 return false; 1237 const MachineOperand &MOSubIdx = MI.getOperand(3); 1238 assert(MOSubIdx.isImm() && 1239 "One of the subindex of the reg_sequence is not an immediate"); 1240 BaseReg.Reg = MOBaseReg.getReg(); 1241 BaseReg.SubReg = MOBaseReg.getSubReg(); 1242 1243 InsertedReg.Reg = MOInsertedReg.getReg(); 1244 InsertedReg.SubReg = MOInsertedReg.getSubReg(); 1245 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1246 return true; 1247 } 1248 1249 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() {} 1250