1 //===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file describes the target machine instruction set to the code generator. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_CODEGEN_TARGETINSTRINFO_H 14 #define LLVM_CODEGEN_TARGETINSTRINFO_H 15 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseMapInfo.h" 19 #include "llvm/ADT/Uniformity.h" 20 #include "llvm/CodeGen/MIRFormatter.h" 21 #include "llvm/CodeGen/MachineBasicBlock.h" 22 #include "llvm/CodeGen/MachineCombinerPattern.h" 23 #include "llvm/CodeGen/MachineCycleAnalysis.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineInstr.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineOperand.h" 28 #include "llvm/CodeGen/MachineOutliner.h" 29 #include "llvm/CodeGen/RegisterClassInfo.h" 30 #include "llvm/CodeGen/VirtRegMap.h" 31 #include "llvm/MC/MCInstrInfo.h" 32 #include "llvm/Support/BranchProbability.h" 33 #include "llvm/Support/Compiler.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/TypeSize.h" 36 #include <array> 37 #include <cassert> 38 #include <cstddef> 39 #include <cstdint> 40 #include <utility> 41 #include <vector> 42 43 namespace llvm { 44 45 class DFAPacketizer; 46 class InstrItineraryData; 47 class LiveIntervals; 48 class LiveVariables; 49 class MachineLoop; 50 class MachineMemOperand; 51 class MachineModuleInfo; 52 class MachineRegisterInfo; 53 class MCAsmInfo; 54 class MCInst; 55 struct MCSchedModel; 56 class Module; 57 class ScheduleDAG; 58 class ScheduleDAGMI; 59 class ScheduleHazardRecognizer; 60 class SDNode; 61 class SelectionDAG; 62 class SMSchedule; 63 class SwingSchedulerDAG; 64 class RegScavenger; 65 class TargetRegisterClass; 66 class TargetRegisterInfo; 67 class TargetSchedModel; 68 class TargetSubtargetInfo; 69 enum class MachineTraceStrategy; 70 71 template <class T> class SmallVectorImpl; 72 73 using ParamLoadedValue = std::pair<MachineOperand, DIExpression*>; 74 75 struct DestSourcePair { 76 const MachineOperand *Destination; 77 const MachineOperand *Source; 78 DestSourcePairDestSourcePair79 DestSourcePair(const MachineOperand &Dest, const MachineOperand &Src) 80 : Destination(&Dest), Source(&Src) {} 81 }; 82 83 /// Used to describe a register and immediate addition. 84 struct RegImmPair { 85 Register Reg; 86 int64_t Imm; 87 RegImmPairRegImmPair88 RegImmPair(Register Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {} 89 }; 90 91 /// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare. 92 /// It holds the register values, the scale value and the displacement. 93 /// It also holds a descriptor for the expression used to calculate the address 94 /// from the operands. 95 struct ExtAddrMode { 96 enum class Formula { 97 Basic = 0, // BaseReg + ScaledReg * Scale + Displacement 98 SExtScaledReg = 1, // BaseReg + sext(ScaledReg) * Scale + Displacement 99 ZExtScaledReg = 2 // BaseReg + zext(ScaledReg) * Scale + Displacement 100 }; 101 102 Register BaseReg; 103 Register ScaledReg; 104 int64_t Scale = 0; 105 int64_t Displacement = 0; 106 Formula Form = Formula::Basic; 107 ExtAddrMode() = default; 108 }; 109 110 //--------------------------------------------------------------------------- 111 /// 112 /// TargetInstrInfo - Interface to description of machine instruction set 113 /// 114 class LLVM_ABI TargetInstrInfo : public MCInstrInfo { 115 public: 116 TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u, 117 unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u) CallFrameSetupOpcode(CFSetupOpcode)118 : CallFrameSetupOpcode(CFSetupOpcode), 119 CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode), 120 ReturnOpcode(ReturnOpcode) {} 121 TargetInstrInfo(const TargetInstrInfo &) = delete; 122 TargetInstrInfo &operator=(const TargetInstrInfo &) = delete; 123 virtual ~TargetInstrInfo(); 124 isGenericOpcode(unsigned Opc)125 static bool isGenericOpcode(unsigned Opc) { 126 return Opc <= TargetOpcode::GENERIC_OP_END; 127 } 128 isGenericAtomicRMWOpcode(unsigned Opc)129 static bool isGenericAtomicRMWOpcode(unsigned Opc) { 130 return Opc >= TargetOpcode::GENERIC_ATOMICRMW_OP_START && 131 Opc <= TargetOpcode::GENERIC_ATOMICRMW_OP_END; 132 } 133 134 /// Given a machine instruction descriptor, returns the register 135 /// class constraint for OpNum, or NULL. 136 virtual 137 const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum, 138 const TargetRegisterInfo *TRI, 139 const MachineFunction &MF) const; 140 141 /// Returns true if MI is an instruction we are unable to reason about 142 /// (like a call or something with unmodeled side effects). 143 virtual bool isGlobalMemoryObject(const MachineInstr *MI) const; 144 145 /// Return true if the instruction is trivially rematerializable, meaning it 146 /// has no side effects and requires no operands that aren't always available. 147 /// This means the only allowed uses are constants and unallocatable physical 148 /// registers so that the instructions result is independent of the place 149 /// in the function. isTriviallyReMaterializable(const MachineInstr & MI)150 bool isTriviallyReMaterializable(const MachineInstr &MI) const { 151 return (MI.getOpcode() == TargetOpcode::IMPLICIT_DEF && 152 MI.getNumOperands() == 1) || 153 (MI.getDesc().isRematerializable() && 154 isReallyTriviallyReMaterializable(MI)); 155 } 156 157 /// Given \p MO is a PhysReg use return if it can be ignored for the purpose 158 /// of instruction rematerialization or sinking. isIgnorableUse(const MachineOperand & MO)159 virtual bool isIgnorableUse(const MachineOperand &MO) const { 160 return false; 161 } 162 isSafeToSink(MachineInstr & MI,MachineBasicBlock * SuccToSinkTo,MachineCycleInfo * CI)163 virtual bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo, 164 MachineCycleInfo *CI) const { 165 return true; 166 } 167 168 /// For a "cheap" instruction which doesn't enable additional sinking, 169 /// should MachineSink break a critical edge to sink it anyways? shouldBreakCriticalEdgeToSink(MachineInstr & MI)170 virtual bool shouldBreakCriticalEdgeToSink(MachineInstr &MI) const { 171 return false; 172 } 173 174 protected: 175 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is 176 /// set, this hook lets the target specify whether the instruction is actually 177 /// trivially rematerializable, taking into consideration its operands. This 178 /// predicate must return false if the instruction has any side effects other 179 /// than producing a value, or if it requres any address registers that are 180 /// not always available. 181 virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const; 182 183 /// This method commutes the operands of the given machine instruction MI. 184 /// The operands to be commuted are specified by their indices OpIdx1 and 185 /// OpIdx2. 186 /// 187 /// If a target has any instructions that are commutable but require 188 /// converting to different instructions or making non-trivial changes 189 /// to commute them, this method can be overloaded to do that. 190 /// The default implementation simply swaps the commutable operands. 191 /// 192 /// If NewMI is false, MI is modified in place and returned; otherwise, a 193 /// new machine instruction is created and returned. 194 /// 195 /// Do not call this method for a non-commutable instruction. 196 /// Even though the instruction is commutable, the method may still 197 /// fail to commute the operands, null pointer is returned in such cases. 198 virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI, 199 unsigned OpIdx1, 200 unsigned OpIdx2) const; 201 202 /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable 203 /// operand indices to (ResultIdx1, ResultIdx2). 204 /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be 205 /// predefined to some indices or be undefined (designated by the special 206 /// value 'CommuteAnyOperandIndex'). 207 /// The predefined result indices cannot be re-defined. 208 /// The function returns true iff after the result pair redefinition 209 /// the fixed result pair is equal to or equivalent to the source pair of 210 /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that 211 /// the pairs (x,y) and (y,x) are equivalent. 212 static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, 213 unsigned CommutableOpIdx1, 214 unsigned CommutableOpIdx2); 215 216 public: 217 /// These methods return the opcode of the frame setup/destroy instructions 218 /// if they exist (-1 otherwise). Some targets use pseudo instructions in 219 /// order to abstract away the difference between operating with a frame 220 /// pointer and operating without, through the use of these two instructions. 221 /// A FrameSetup MI in MF implies MFI::AdjustsStack. 222 /// getCallFrameSetupOpcode()223 unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; } getCallFrameDestroyOpcode()224 unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; } 225 226 /// Returns true if the argument is a frame pseudo instruction. isFrameInstr(const MachineInstr & I)227 bool isFrameInstr(const MachineInstr &I) const { 228 return I.getOpcode() == getCallFrameSetupOpcode() || 229 I.getOpcode() == getCallFrameDestroyOpcode(); 230 } 231 232 /// Returns true if the argument is a frame setup pseudo instruction. isFrameSetup(const MachineInstr & I)233 bool isFrameSetup(const MachineInstr &I) const { 234 return I.getOpcode() == getCallFrameSetupOpcode(); 235 } 236 237 /// Returns size of the frame associated with the given frame instruction. 238 /// For frame setup instruction this is frame that is set up space set up 239 /// after the instruction. For frame destroy instruction this is the frame 240 /// freed by the caller. 241 /// Note, in some cases a call frame (or a part of it) may be prepared prior 242 /// to the frame setup instruction. It occurs in the calls that involve 243 /// inalloca arguments. This function reports only the size of the frame part 244 /// that is set up between the frame setup and destroy pseudo instructions. getFrameSize(const MachineInstr & I)245 int64_t getFrameSize(const MachineInstr &I) const { 246 assert(isFrameInstr(I) && "Not a frame instruction"); 247 assert(I.getOperand(0).getImm() >= 0); 248 return I.getOperand(0).getImm(); 249 } 250 251 /// Returns the total frame size, which is made up of the space set up inside 252 /// the pair of frame start-stop instructions and the space that is set up 253 /// prior to the pair. getFrameTotalSize(const MachineInstr & I)254 int64_t getFrameTotalSize(const MachineInstr &I) const { 255 if (isFrameSetup(I)) { 256 assert(I.getOperand(1).getImm() >= 0 && 257 "Frame size must not be negative"); 258 return getFrameSize(I) + I.getOperand(1).getImm(); 259 } 260 return getFrameSize(I); 261 } 262 getCatchReturnOpcode()263 unsigned getCatchReturnOpcode() const { return CatchRetOpcode; } getReturnOpcode()264 unsigned getReturnOpcode() const { return ReturnOpcode; } 265 266 /// Returns the actual stack pointer adjustment made by an instruction 267 /// as part of a call sequence. By default, only call frame setup/destroy 268 /// instructions adjust the stack, but targets may want to override this 269 /// to enable more fine-grained adjustment, or adjust by a different value. 270 virtual int getSPAdjust(const MachineInstr &MI) const; 271 272 /// Return true if the instruction is a "coalescable" extension instruction. 273 /// That is, it's like a copy where it's legal for the source to overlap the 274 /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's 275 /// expected the pre-extension value is available as a subreg of the result 276 /// register. This also returns the sub-register index in SubIdx. isCoalescableExtInstr(const MachineInstr & MI,Register & SrcReg,Register & DstReg,unsigned & SubIdx)277 virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, 278 Register &DstReg, unsigned &SubIdx) const { 279 return false; 280 } 281 282 /// If the specified machine instruction is a direct 283 /// load from a stack slot, return the virtual or physical register number of 284 /// the destination along with the FrameIndex of the loaded stack slot. If 285 /// not, return 0. This predicate must return 0 if the instruction has 286 /// any side effects other than loading from the stack slot. isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex)287 virtual Register isLoadFromStackSlot(const MachineInstr &MI, 288 int &FrameIndex) const { 289 return 0; 290 } 291 292 /// Optional extension of isLoadFromStackSlot that returns the number of 293 /// bytes loaded from the stack. This must be implemented if a backend 294 /// supports partial stack slot spills/loads to further disambiguate 295 /// what the load does. isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex,TypeSize & MemBytes)296 virtual Register isLoadFromStackSlot(const MachineInstr &MI, 297 int &FrameIndex, 298 TypeSize &MemBytes) const { 299 MemBytes = TypeSize::getZero(); 300 return isLoadFromStackSlot(MI, FrameIndex); 301 } 302 303 /// Check for post-frame ptr elimination stack locations as well. 304 /// This uses a heuristic so it isn't reliable for correctness. isLoadFromStackSlotPostFE(const MachineInstr & MI,int & FrameIndex)305 virtual Register isLoadFromStackSlotPostFE(const MachineInstr &MI, 306 int &FrameIndex) const { 307 return 0; 308 } 309 310 /// If the specified machine instruction has a load from a stack slot, 311 /// return true along with the FrameIndices of the loaded stack slot and the 312 /// machine mem operands containing the reference. 313 /// If not, return false. Unlike isLoadFromStackSlot, this returns true for 314 /// any instructions that loads from the stack. This is just a hint, as some 315 /// cases may be missed. 316 virtual bool hasLoadFromStackSlot( 317 const MachineInstr &MI, 318 SmallVectorImpl<const MachineMemOperand *> &Accesses) const; 319 320 /// If the specified machine instruction is a direct 321 /// store to a stack slot, return the virtual or physical register number of 322 /// the source reg along with the FrameIndex of the loaded stack slot. If 323 /// not, return 0. This predicate must return 0 if the instruction has 324 /// any side effects other than storing to the stack slot. isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex)325 virtual Register isStoreToStackSlot(const MachineInstr &MI, 326 int &FrameIndex) const { 327 return 0; 328 } 329 330 /// Optional extension of isStoreToStackSlot that returns the number of 331 /// bytes stored to the stack. This must be implemented if a backend 332 /// supports partial stack slot spills/loads to further disambiguate 333 /// what the store does. isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex,TypeSize & MemBytes)334 virtual Register isStoreToStackSlot(const MachineInstr &MI, 335 int &FrameIndex, 336 TypeSize &MemBytes) const { 337 MemBytes = TypeSize::getZero(); 338 return isStoreToStackSlot(MI, FrameIndex); 339 } 340 341 /// Check for post-frame ptr elimination stack locations as well. 342 /// This uses a heuristic, so it isn't reliable for correctness. isStoreToStackSlotPostFE(const MachineInstr & MI,int & FrameIndex)343 virtual Register isStoreToStackSlotPostFE(const MachineInstr &MI, 344 int &FrameIndex) const { 345 return 0; 346 } 347 348 /// If the specified machine instruction has a store to a stack slot, 349 /// return true along with the FrameIndices of the loaded stack slot and the 350 /// machine mem operands containing the reference. 351 /// If not, return false. Unlike isStoreToStackSlot, 352 /// this returns true for any instructions that stores to the 353 /// stack. This is just a hint, as some cases may be missed. 354 virtual bool hasStoreToStackSlot( 355 const MachineInstr &MI, 356 SmallVectorImpl<const MachineMemOperand *> &Accesses) const; 357 358 /// Return true if the specified machine instruction 359 /// is a copy of one stack slot to another and has no other effect. 360 /// Provide the identity of the two frame indices. isStackSlotCopy(const MachineInstr & MI,int & DestFrameIndex,int & SrcFrameIndex)361 virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, 362 int &SrcFrameIndex) const { 363 return false; 364 } 365 366 /// Compute the size in bytes and offset within a stack slot of a spilled 367 /// register or subregister. 368 /// 369 /// \param [out] Size in bytes of the spilled value. 370 /// \param [out] Offset in bytes within the stack slot. 371 /// \returns true if both Size and Offset are successfully computed. 372 /// 373 /// Not all subregisters have computable spill slots. For example, 374 /// subregisters registers may not be byte-sized, and a pair of discontiguous 375 /// subregisters has no single offset. 376 /// 377 /// Targets with nontrivial bigendian implementations may need to override 378 /// this, particularly to support spilled vector registers. 379 virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, 380 unsigned &Size, unsigned &Offset, 381 const MachineFunction &MF) const; 382 383 /// Return true if the given instruction is terminator that is unspillable, 384 /// according to isUnspillableTerminatorImpl. isUnspillableTerminator(const MachineInstr * MI)385 bool isUnspillableTerminator(const MachineInstr *MI) const { 386 return MI->isTerminator() && isUnspillableTerminatorImpl(MI); 387 } 388 389 /// Returns the size in bytes of the specified MachineInstr, or ~0U 390 /// when this function is not implemented by a target. getInstSizeInBytes(const MachineInstr & MI)391 virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const { 392 return ~0U; 393 } 394 395 /// Return true if the instruction is as cheap as a move instruction. 396 /// 397 /// Targets for different archs need to override this, and different 398 /// micro-architectures can also be finely tuned inside. isAsCheapAsAMove(const MachineInstr & MI)399 virtual bool isAsCheapAsAMove(const MachineInstr &MI) const { 400 return MI.isAsCheapAsAMove(); 401 } 402 403 /// Return true if the instruction should be sunk by MachineSink. 404 /// 405 /// MachineSink determines on its own whether the instruction is safe to sink; 406 /// this gives the target a hook to override the default behavior with regards 407 /// to which instructions should be sunk. shouldSink(const MachineInstr & MI)408 virtual bool shouldSink(const MachineInstr &MI) const { return true; } 409 410 /// Return false if the instruction should not be hoisted by MachineLICM. 411 /// 412 /// MachineLICM determines on its own whether the instruction is safe to 413 /// hoist; this gives the target a hook to extend this assessment and prevent 414 /// an instruction being hoisted from a given loop for target specific 415 /// reasons. shouldHoist(const MachineInstr & MI,const MachineLoop * FromLoop)416 virtual bool shouldHoist(const MachineInstr &MI, 417 const MachineLoop *FromLoop) const { 418 return true; 419 } 420 421 /// Re-issue the specified 'original' instruction at the 422 /// specific location targeting a new destination register. 423 /// The register in Orig->getOperand(0).getReg() will be substituted by 424 /// DestReg:SubIdx. Any existing subreg index is preserved or composed with 425 /// SubIdx. 426 virtual void reMaterialize(MachineBasicBlock &MBB, 427 MachineBasicBlock::iterator MI, Register DestReg, 428 unsigned SubIdx, const MachineInstr &Orig, 429 const TargetRegisterInfo &TRI) const; 430 431 /// Clones instruction or the whole instruction bundle \p Orig and 432 /// insert into \p MBB before \p InsertBefore. The target may update operands 433 /// that are required to be unique. 434 /// 435 /// \p Orig must not return true for MachineInstr::isNotDuplicable(). 436 virtual MachineInstr &duplicate(MachineBasicBlock &MBB, 437 MachineBasicBlock::iterator InsertBefore, 438 const MachineInstr &Orig) const; 439 440 /// This method must be implemented by targets that 441 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 442 /// may be able to convert a two-address instruction into one or more true 443 /// three-address instructions on demand. This allows the X86 target (for 444 /// example) to convert ADD and SHL instructions into LEA instructions if they 445 /// would require register copies due to two-addressness. 446 /// 447 /// This method returns a null pointer if the transformation cannot be 448 /// performed, otherwise it returns the last new instruction. 449 /// 450 /// If \p LIS is not nullptr, the LiveIntervals info should be updated for 451 /// replacing \p MI with new instructions, even though this function does not 452 /// remove MI. convertToThreeAddress(MachineInstr & MI,LiveVariables * LV,LiveIntervals * LIS)453 virtual MachineInstr *convertToThreeAddress(MachineInstr &MI, 454 LiveVariables *LV, 455 LiveIntervals *LIS) const { 456 return nullptr; 457 } 458 459 // This constant can be used as an input value of operand index passed to 460 // the method findCommutedOpIndices() to tell the method that the 461 // corresponding operand index is not pre-defined and that the method 462 // can pick any commutable operand. 463 static const unsigned CommuteAnyOperandIndex = ~0U; 464 465 /// This method commutes the operands of the given machine instruction MI. 466 /// 467 /// The operands to be commuted are specified by their indices OpIdx1 and 468 /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value 469 /// 'CommuteAnyOperandIndex', which means that the method is free to choose 470 /// any arbitrarily chosen commutable operand. If both arguments are set to 471 /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable 472 /// operands; then commutes them if such operands could be found. 473 /// 474 /// If NewMI is false, MI is modified in place and returned; otherwise, a 475 /// new machine instruction is created and returned. 476 /// 477 /// Do not call this method for a non-commutable instruction or 478 /// for non-commuable operands. 479 /// Even though the instruction is commutable, the method may still 480 /// fail to commute the operands, null pointer is returned in such cases. 481 MachineInstr * 482 commuteInstruction(MachineInstr &MI, bool NewMI = false, 483 unsigned OpIdx1 = CommuteAnyOperandIndex, 484 unsigned OpIdx2 = CommuteAnyOperandIndex) const; 485 486 /// Returns true iff the routine could find two commutable operands in the 487 /// given machine instruction. 488 /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. 489 /// If any of the INPUT values is set to the special value 490 /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable 491 /// operand, then returns its index in the corresponding argument. 492 /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method 493 /// looks for 2 commutable operands. 494 /// If INPUT values refer to some operands of MI, then the method simply 495 /// returns true if the corresponding operands are commutable and returns 496 /// false otherwise. 497 /// 498 /// For example, calling this method this way: 499 /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex; 500 /// findCommutedOpIndices(MI, Op1, Op2); 501 /// can be interpreted as a query asking to find an operand that would be 502 /// commutable with the operand#1. 503 virtual bool findCommutedOpIndices(const MachineInstr &MI, 504 unsigned &SrcOpIdx1, 505 unsigned &SrcOpIdx2) const; 506 507 /// Returns true if the target has a preference on the operands order of 508 /// the given machine instruction. And specify if \p Commute is required to 509 /// get the desired operands order. hasCommutePreference(MachineInstr & MI,bool & Commute)510 virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const { 511 return false; 512 } 513 514 /// If possible, converts the instruction to a simplified/canonical form. 515 /// Returns true if the instruction was modified. 516 /// 517 /// This function is only called after register allocation. The MI will be 518 /// modified in place. This is called by passes such as 519 /// MachineCopyPropagation, where their mutation of the MI operands may 520 /// expose opportunities to convert the instruction to a simpler form (e.g. 521 /// a load of 0). simplifyInstruction(MachineInstr & MI)522 virtual bool simplifyInstruction(MachineInstr &MI) const { return false; } 523 524 /// A pair composed of a register and a sub-register index. 525 /// Used to give some type checking when modeling Reg:SubReg. 526 struct RegSubRegPair { 527 Register Reg; 528 unsigned SubReg; 529 530 RegSubRegPair(Register Reg = Register(), unsigned SubReg = 0) RegRegSubRegPair531 : Reg(Reg), SubReg(SubReg) {} 532 533 bool operator==(const RegSubRegPair& P) const { 534 return Reg == P.Reg && SubReg == P.SubReg; 535 } 536 bool operator!=(const RegSubRegPair& P) const { 537 return !(*this == P); 538 } 539 }; 540 541 /// A pair composed of a pair of a register and a sub-register index, 542 /// and another sub-register index. 543 /// Used to give some type checking when modeling Reg:SubReg1, SubReg2. 544 struct RegSubRegPairAndIdx : RegSubRegPair { 545 unsigned SubIdx; 546 547 RegSubRegPairAndIdx(Register Reg = Register(), unsigned SubReg = 0, 548 unsigned SubIdx = 0) RegSubRegPairRegSubRegPairAndIdx549 : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {} 550 }; 551 552 /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI 553 /// and \p DefIdx. 554 /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of 555 /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef 556 /// flag are not added to this list. 557 /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce 558 /// two elements: 559 /// - %1:sub1, sub0 560 /// - %2<:0>, sub1 561 /// 562 /// \returns true if it is possible to build such an input sequence 563 /// with the pair \p MI, \p DefIdx. False otherwise. 564 /// 565 /// \pre MI.isRegSequence() or MI.isRegSequenceLike(). 566 /// 567 /// \note The generic implementation does not provide any support for 568 /// MI.isRegSequenceLike(). In other words, one has to override 569 /// getRegSequenceLikeInputs for target specific instructions. 570 bool 571 getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, 572 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const; 573 574 /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI 575 /// and \p DefIdx. 576 /// \p [out] InputReg of the equivalent EXTRACT_SUBREG. 577 /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce: 578 /// - %1:sub1, sub0 579 /// 580 /// \returns true if it is possible to build such an input sequence 581 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set. 582 /// False otherwise. 583 /// 584 /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike(). 585 /// 586 /// \note The generic implementation does not provide any support for 587 /// MI.isExtractSubregLike(). In other words, one has to override 588 /// getExtractSubregLikeInputs for target specific instructions. 589 bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, 590 RegSubRegPairAndIdx &InputReg) const; 591 592 /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI 593 /// and \p DefIdx. 594 /// \p [out] BaseReg and \p [out] InsertedReg contain 595 /// the equivalent inputs of INSERT_SUBREG. 596 /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce: 597 /// - BaseReg: %0:sub0 598 /// - InsertedReg: %1:sub1, sub3 599 /// 600 /// \returns true if it is possible to build such an input sequence 601 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set. 602 /// False otherwise. 603 /// 604 /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike(). 605 /// 606 /// \note The generic implementation does not provide any support for 607 /// MI.isInsertSubregLike(). In other words, one has to override 608 /// getInsertSubregLikeInputs for target specific instructions. 609 bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, 610 RegSubRegPair &BaseReg, 611 RegSubRegPairAndIdx &InsertedReg) const; 612 613 /// Return true if two machine instructions would produce identical values. 614 /// By default, this is only true when the two instructions 615 /// are deemed identical except for defs. If this function is called when the 616 /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for 617 /// aggressive checks. 618 virtual bool produceSameValue(const MachineInstr &MI0, 619 const MachineInstr &MI1, 620 const MachineRegisterInfo *MRI = nullptr) const; 621 622 /// \returns true if a branch from an instruction with opcode \p BranchOpc 623 /// bytes is capable of jumping to a position \p BrOffset bytes away. isBranchOffsetInRange(unsigned BranchOpc,int64_t BrOffset)624 virtual bool isBranchOffsetInRange(unsigned BranchOpc, 625 int64_t BrOffset) const { 626 llvm_unreachable("target did not implement"); 627 } 628 629 /// \returns The block that branch instruction \p MI jumps to. getBranchDestBlock(const MachineInstr & MI)630 virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const { 631 llvm_unreachable("target did not implement"); 632 } 633 634 /// Insert an unconditional indirect branch at the end of \p MBB to \p 635 /// NewDestBB. Optionally, insert the clobbered register restoring in \p 636 /// RestoreBB. \p BrOffset indicates the offset of \p NewDestBB relative to 637 /// the offset of the position to insert the new branch. 638 virtual void insertIndirectBranch(MachineBasicBlock &MBB, 639 MachineBasicBlock &NewDestBB, 640 MachineBasicBlock &RestoreBB, 641 const DebugLoc &DL, int64_t BrOffset = 0, 642 RegScavenger *RS = nullptr) const { 643 llvm_unreachable("target did not implement"); 644 } 645 646 /// Analyze the branching code at the end of MBB, returning 647 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't 648 /// implemented for a target). Upon success, this returns false and returns 649 /// with the following information in various cases: 650 /// 651 /// 1. If this block ends with no branches (it just falls through to its succ) 652 /// just return false, leaving TBB/FBB null. 653 /// 2. If this block ends with only an unconditional branch, it sets TBB to be 654 /// the destination block. 655 /// 3. If this block ends with a conditional branch and it falls through to a 656 /// successor block, it sets TBB to be the branch destination block and a 657 /// list of operands that evaluate the condition. These operands can be 658 /// passed to other TargetInstrInfo methods to create new branches. 659 /// 4. If this block ends with a conditional branch followed by an 660 /// unconditional branch, it returns the 'true' destination in TBB, the 661 /// 'false' destination in FBB, and a list of operands that evaluate the 662 /// condition. These operands can be passed to other TargetInstrInfo 663 /// methods to create new branches. 664 /// 665 /// Note that removeBranch and insertBranch must be implemented to support 666 /// cases where this method returns success. 667 /// 668 /// If AllowModify is true, then this routine is allowed to modify the basic 669 /// block (e.g. delete instructions after the unconditional branch). 670 /// 671 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid 672 /// before calling this function. 673 virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 674 MachineBasicBlock *&FBB, 675 SmallVectorImpl<MachineOperand> &Cond, 676 bool AllowModify = false) const { 677 return true; 678 } 679 680 /// Represents a predicate at the MachineFunction level. The control flow a 681 /// MachineBranchPredicate represents is: 682 /// 683 /// Reg = LHS `Predicate` RHS == ConditionDef 684 /// if Reg then goto TrueDest else goto FalseDest 685 /// 686 struct MachineBranchPredicate { 687 enum ComparePredicate { 688 PRED_EQ, // True if two values are equal 689 PRED_NE, // True if two values are not equal 690 PRED_INVALID // Sentinel value 691 }; 692 693 ComparePredicate Predicate = PRED_INVALID; 694 MachineOperand LHS = MachineOperand::CreateImm(0); 695 MachineOperand RHS = MachineOperand::CreateImm(0); 696 MachineBasicBlock *TrueDest = nullptr; 697 MachineBasicBlock *FalseDest = nullptr; 698 MachineInstr *ConditionDef = nullptr; 699 700 /// SingleUseCondition is true if ConditionDef is dead except for the 701 /// branch(es) at the end of the basic block. 702 /// 703 bool SingleUseCondition = false; 704 705 explicit MachineBranchPredicate() = default; 706 }; 707 708 /// Analyze the branching code at the end of MBB and parse it into the 709 /// MachineBranchPredicate structure if possible. Returns false on success 710 /// and true on failure. 711 /// 712 /// If AllowModify is true, then this routine is allowed to modify the basic 713 /// block (e.g. delete instructions after the unconditional branch). 714 /// 715 virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, 716 MachineBranchPredicate &MBP, 717 bool AllowModify = false) const { 718 return true; 719 } 720 721 /// Remove the branching code at the end of the specific MBB. 722 /// This is only invoked in cases where analyzeBranch returns success. It 723 /// returns the number of instructions that were removed. 724 /// If \p BytesRemoved is non-null, report the change in code size from the 725 /// removed instructions. 726 virtual unsigned removeBranch(MachineBasicBlock &MBB, 727 int *BytesRemoved = nullptr) const { 728 llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!"); 729 } 730 731 /// Insert branch code into the end of the specified MachineBasicBlock. The 732 /// operands to this method are the same as those returned by analyzeBranch. 733 /// This is only invoked in cases where analyzeBranch returns success. It 734 /// returns the number of instructions inserted. If \p BytesAdded is non-null, 735 /// report the change in code size from the added instructions. 736 /// 737 /// It is also invoked by tail merging to add unconditional branches in 738 /// cases where analyzeBranch doesn't apply because there was no original 739 /// branch to analyze. At least this much must be implemented, else tail 740 /// merging needs to be disabled. 741 /// 742 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid 743 /// before calling this function. 744 virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 745 MachineBasicBlock *FBB, 746 ArrayRef<MachineOperand> Cond, 747 const DebugLoc &DL, 748 int *BytesAdded = nullptr) const { 749 llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!"); 750 } 751 752 unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, 753 MachineBasicBlock *DestBB, 754 const DebugLoc &DL, 755 int *BytesAdded = nullptr) const { 756 return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL, 757 BytesAdded); 758 } 759 760 /// Object returned by analyzeLoopForPipelining. Allows software pipelining 761 /// implementations to query attributes of the loop being pipelined and to 762 /// apply target-specific updates to the loop once pipelining is complete. 763 class LLVM_ABI PipelinerLoopInfo { 764 public: 765 virtual ~PipelinerLoopInfo(); 766 /// Return true if the given instruction should not be pipelined and should 767 /// be ignored. An example could be a loop comparison, or induction variable 768 /// update with no users being pipelined. 769 virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0; 770 771 /// Return true if the proposed schedule should used. Otherwise return 772 /// false to not pipeline the loop. This function should be used to ensure 773 /// that pipelined loops meet target-specific quality heuristics. shouldUseSchedule(SwingSchedulerDAG & SSD,SMSchedule & SMS)774 virtual bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS) { 775 return true; 776 } 777 778 /// Create a condition to determine if the trip count of the loop is greater 779 /// than TC, where TC is always one more than for the previous prologue or 780 /// 0 if this is being called for the outermost prologue. 781 /// 782 /// If the trip count is statically known to be greater than TC, return 783 /// true. If the trip count is statically known to be not greater than TC, 784 /// return false. Otherwise return nullopt and fill out Cond with the test 785 /// condition. 786 /// 787 /// Note: This hook is guaranteed to be called from the innermost to the 788 /// outermost prologue of the loop being software pipelined. 789 virtual std::optional<bool> 790 createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB, 791 SmallVectorImpl<MachineOperand> &Cond) = 0; 792 793 /// Create a condition to determine if the remaining trip count for a phase 794 /// is greater than TC. Some instructions such as comparisons may be 795 /// inserted at the bottom of MBB. All instructions expanded for the 796 /// phase must be inserted in MBB before calling this function. 797 /// LastStage0Insts is the map from the original instructions scheduled at 798 /// stage#0 to the expanded instructions for the last iteration of the 799 /// kernel. LastStage0Insts is intended to obtain the instruction that 800 /// refers the latest loop counter value. 801 /// 802 /// MBB can also be a predecessor of the prologue block. Then 803 /// LastStage0Insts must be empty and the compared value is the initial 804 /// value of the trip count. createRemainingIterationsGreaterCondition(int TC,MachineBasicBlock & MBB,SmallVectorImpl<MachineOperand> & Cond,DenseMap<MachineInstr *,MachineInstr * > & LastStage0Insts)805 virtual void createRemainingIterationsGreaterCondition( 806 int TC, MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &Cond, 807 DenseMap<MachineInstr *, MachineInstr *> &LastStage0Insts) { 808 llvm_unreachable( 809 "Target didn't implement " 810 "PipelinerLoopInfo::createRemainingIterationsGreaterCondition!"); 811 } 812 813 /// Modify the loop such that the trip count is 814 /// OriginalTC + TripCountAdjust. 815 virtual void adjustTripCount(int TripCountAdjust) = 0; 816 817 /// Called when the loop's preheader has been modified to NewPreheader. 818 virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0; 819 820 /// Called when the loop is being removed. Any instructions in the preheader 821 /// should be removed. 822 /// 823 /// Once this function is called, no other functions on this object are 824 /// valid; the loop has been removed. 825 virtual void disposed(LiveIntervals *LIS = nullptr) {} 826 827 /// Return true if the target can expand pipelined schedule with modulo 828 /// variable expansion. isMVEExpanderSupported()829 virtual bool isMVEExpanderSupported() { return false; } 830 }; 831 832 /// Analyze loop L, which must be a single-basic-block loop, and if the 833 /// conditions can be understood enough produce a PipelinerLoopInfo object. 834 virtual std::unique_ptr<PipelinerLoopInfo> analyzeLoopForPipelining(MachineBasicBlock * LoopBB)835 analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const { 836 return nullptr; 837 } 838 839 /// Analyze the loop code, return true if it cannot be understood. Upon 840 /// success, this function returns false and returns information about the 841 /// induction variable and compare instruction used at the end. analyzeLoop(MachineLoop & L,MachineInstr * & IndVarInst,MachineInstr * & CmpInst)842 virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, 843 MachineInstr *&CmpInst) const { 844 return true; 845 } 846 847 /// Generate code to reduce the loop iteration by one and check if the loop 848 /// is finished. Return the value/register of the new loop count. We need 849 /// this function when peeling off one or more iterations of a loop. This 850 /// function assumes the nth iteration is peeled first. reduceLoopCount(MachineBasicBlock & MBB,MachineBasicBlock & PreHeader,MachineInstr * IndVar,MachineInstr & Cmp,SmallVectorImpl<MachineOperand> & Cond,SmallVectorImpl<MachineInstr * > & PrevInsts,unsigned Iter,unsigned MaxIter)851 virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, 852 MachineBasicBlock &PreHeader, 853 MachineInstr *IndVar, MachineInstr &Cmp, 854 SmallVectorImpl<MachineOperand> &Cond, 855 SmallVectorImpl<MachineInstr *> &PrevInsts, 856 unsigned Iter, unsigned MaxIter) const { 857 llvm_unreachable("Target didn't implement ReduceLoopCount"); 858 } 859 860 /// Delete the instruction OldInst and everything after it, replacing it with 861 /// an unconditional branch to NewDest. This is used by the tail merging pass. 862 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 863 MachineBasicBlock *NewDest) const; 864 865 /// Return true if it's legal to split the given basic 866 /// block at the specified instruction (i.e. instruction would be the start 867 /// of a new basic block). isLegalToSplitMBBAt(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI)868 virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, 869 MachineBasicBlock::iterator MBBI) const { 870 return true; 871 } 872 873 /// Return true if it's profitable to predicate 874 /// instructions with accumulated instruction latency of "NumCycles" 875 /// of the specified basic block, where the probability of the instructions 876 /// being executed is given by Probability, and Confidence is a measure 877 /// of our confidence that it will be properly predicted. isProfitableToIfCvt(MachineBasicBlock & MBB,unsigned NumCycles,unsigned ExtraPredCycles,BranchProbability Probability)878 virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, 879 unsigned ExtraPredCycles, 880 BranchProbability Probability) const { 881 return false; 882 } 883 884 /// Second variant of isProfitableToIfCvt. This one 885 /// checks for the case where two basic blocks from true and false path 886 /// of a if-then-else (diamond) are predicated on mutually exclusive 887 /// predicates, where the probability of the true path being taken is given 888 /// by Probability, and Confidence is a measure of our confidence that it 889 /// will be properly predicted. isProfitableToIfCvt(MachineBasicBlock & TMBB,unsigned NumTCycles,unsigned ExtraTCycles,MachineBasicBlock & FMBB,unsigned NumFCycles,unsigned ExtraFCycles,BranchProbability Probability)890 virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, 891 unsigned ExtraTCycles, 892 MachineBasicBlock &FMBB, unsigned NumFCycles, 893 unsigned ExtraFCycles, 894 BranchProbability Probability) const { 895 return false; 896 } 897 898 /// Return true if it's profitable for if-converter to duplicate instructions 899 /// of specified accumulated instruction latencies in the specified MBB to 900 /// enable if-conversion. 901 /// The probability of the instructions being executed is given by 902 /// Probability, and Confidence is a measure of our confidence that it 903 /// will be properly predicted. isProfitableToDupForIfCvt(MachineBasicBlock & MBB,unsigned NumCycles,BranchProbability Probability)904 virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, 905 unsigned NumCycles, 906 BranchProbability Probability) const { 907 return false; 908 } 909 910 /// Return the increase in code size needed to predicate a contiguous run of 911 /// NumInsts instructions. extraSizeToPredicateInstructions(const MachineFunction & MF,unsigned NumInsts)912 virtual unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, 913 unsigned NumInsts) const { 914 return 0; 915 } 916 917 /// Return an estimate for the code size reduction (in bytes) which will be 918 /// caused by removing the given branch instruction during if-conversion. predictBranchSizeForIfCvt(MachineInstr & MI)919 virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const { 920 return getInstSizeInBytes(MI); 921 } 922 923 /// Return true if it's profitable to unpredicate 924 /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually 925 /// exclusive predicates. 926 /// e.g. 927 /// subeq r0, r1, #1 928 /// addne r0, r1, #1 929 /// => 930 /// sub r0, r1, #1 931 /// addne r0, r1, #1 932 /// 933 /// This may be profitable is conditional instructions are always executed. isProfitableToUnpredicate(MachineBasicBlock & TMBB,MachineBasicBlock & FMBB)934 virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, 935 MachineBasicBlock &FMBB) const { 936 return false; 937 } 938 939 /// Return true if it is possible to insert a select 940 /// instruction that chooses between TrueReg and FalseReg based on the 941 /// condition code in Cond. 942 /// 943 /// When successful, also return the latency in cycles from TrueReg, 944 /// FalseReg, and Cond to the destination register. In most cases, a select 945 /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1 946 /// 947 /// Some x86 implementations have 2-cycle cmov instructions. 948 /// 949 /// @param MBB Block where select instruction would be inserted. 950 /// @param Cond Condition returned by analyzeBranch. 951 /// @param DstReg Virtual dest register that the result should write to. 952 /// @param TrueReg Virtual register to select when Cond is true. 953 /// @param FalseReg Virtual register to select when Cond is false. 954 /// @param CondCycles Latency from Cond+Branch to select output. 955 /// @param TrueCycles Latency from TrueReg to select output. 956 /// @param FalseCycles Latency from FalseReg to select output. canInsertSelect(const MachineBasicBlock & MBB,ArrayRef<MachineOperand> Cond,Register DstReg,Register TrueReg,Register FalseReg,int & CondCycles,int & TrueCycles,int & FalseCycles)957 virtual bool canInsertSelect(const MachineBasicBlock &MBB, 958 ArrayRef<MachineOperand> Cond, Register DstReg, 959 Register TrueReg, Register FalseReg, 960 int &CondCycles, int &TrueCycles, 961 int &FalseCycles) const { 962 return false; 963 } 964 965 /// Insert a select instruction into MBB before I that will copy TrueReg to 966 /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false. 967 /// 968 /// This function can only be called after canInsertSelect() returned true. 969 /// The condition in Cond comes from analyzeBranch, and it can be assumed 970 /// that the same flags or registers required by Cond are available at the 971 /// insertion point. 972 /// 973 /// @param MBB Block where select instruction should be inserted. 974 /// @param I Insertion point. 975 /// @param DL Source location for debugging. 976 /// @param DstReg Virtual register to be defined by select instruction. 977 /// @param Cond Condition as computed by analyzeBranch. 978 /// @param TrueReg Virtual register to copy when Cond is true. 979 /// @param FalseReg Virtual register to copy when Cons is false. insertSelect(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register DstReg,ArrayRef<MachineOperand> Cond,Register TrueReg,Register FalseReg)980 virtual void insertSelect(MachineBasicBlock &MBB, 981 MachineBasicBlock::iterator I, const DebugLoc &DL, 982 Register DstReg, ArrayRef<MachineOperand> Cond, 983 Register TrueReg, Register FalseReg) const { 984 llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!"); 985 } 986 987 /// Analyze the given select instruction, returning true if 988 /// it cannot be understood. It is assumed that MI->isSelect() is true. 989 /// 990 /// When successful, return the controlling condition and the operands that 991 /// determine the true and false result values. 992 /// 993 /// Result = SELECT Cond, TrueOp, FalseOp 994 /// 995 /// Some targets can optimize select instructions, for example by predicating 996 /// the instruction defining one of the operands. Such targets should set 997 /// Optimizable. 998 /// 999 /// @param MI Select instruction to analyze. 1000 /// @param Cond Condition controlling the select. 1001 /// @param TrueOp Operand number of the value selected when Cond is true. 1002 /// @param FalseOp Operand number of the value selected when Cond is false. 1003 /// @param Optimizable Returned as true if MI is optimizable. 1004 /// @returns False on success. analyzeSelect(const MachineInstr & MI,SmallVectorImpl<MachineOperand> & Cond,unsigned & TrueOp,unsigned & FalseOp,bool & Optimizable)1005 virtual bool analyzeSelect(const MachineInstr &MI, 1006 SmallVectorImpl<MachineOperand> &Cond, 1007 unsigned &TrueOp, unsigned &FalseOp, 1008 bool &Optimizable) const { 1009 assert(MI.getDesc().isSelect() && "MI must be a select instruction"); 1010 return true; 1011 } 1012 1013 /// Given a select instruction that was understood by 1014 /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by 1015 /// merging it with one of its operands. Returns NULL on failure. 1016 /// 1017 /// When successful, returns the new select instruction. The client is 1018 /// responsible for deleting MI. 1019 /// 1020 /// If both sides of the select can be optimized, PreferFalse is used to pick 1021 /// a side. 1022 /// 1023 /// @param MI Optimizable select instruction. 1024 /// @param NewMIs Set that record all MIs in the basic block up to \p 1025 /// MI. Has to be updated with any newly created MI or deleted ones. 1026 /// @param PreferFalse Try to optimize FalseOp instead of TrueOp. 1027 /// @returns Optimized instruction or NULL. 1028 virtual MachineInstr *optimizeSelect(MachineInstr &MI, 1029 SmallPtrSetImpl<MachineInstr *> &NewMIs, 1030 bool PreferFalse = false) const { 1031 // This function must be implemented if Optimizable is ever set. 1032 llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!"); 1033 } 1034 1035 /// Emit instructions to copy a pair of physical registers. 1036 /// 1037 /// This function should support copies within any legal register class as 1038 /// well as any cross-class copies created during instruction selection. 1039 /// 1040 /// The source and destination registers may overlap, which may require a 1041 /// careful implementation when multiple copy instructions are required for 1042 /// large registers. See for example the ARM target. 1043 /// 1044 /// If RenamableDest is true, the copy instruction's destination operand is 1045 /// marked renamable. 1046 /// If RenamableSrc is true, the copy instruction's source operand is 1047 /// marked renamable. 1048 virtual void copyPhysReg(MachineBasicBlock &MBB, 1049 MachineBasicBlock::iterator MI, const DebugLoc &DL, 1050 Register DestReg, Register SrcReg, bool KillSrc, 1051 bool RenamableDest = false, 1052 bool RenamableSrc = false) const { 1053 llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!"); 1054 } 1055 1056 /// Allow targets to tell MachineVerifier whether a specific register 1057 /// MachineOperand can be used as part of PC-relative addressing. 1058 /// PC-relative addressing modes in many CISC architectures contain 1059 /// (non-PC) registers as offsets or scaling values, which inherently 1060 /// tags the corresponding MachineOperand with OPERAND_PCREL. 1061 /// 1062 /// @param MO The MachineOperand in question. MO.isReg() should always 1063 /// be true. 1064 /// @return Whether this operand is allowed to be used PC-relatively. isPCRelRegisterOperandLegal(const MachineOperand & MO)1065 virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const { 1066 return false; 1067 } 1068 1069 /// Return an index for MachineJumpTableInfo if \p insn is an indirect jump 1070 /// using a jump table, otherwise -1. getJumpTableIndex(const MachineInstr & MI)1071 virtual int getJumpTableIndex(const MachineInstr &MI) const { return -1; } 1072 1073 protected: 1074 /// Target-dependent implementation for IsCopyInstr. 1075 /// If the specific machine instruction is a instruction that moves/copies 1076 /// value from one register to another register return destination and source 1077 /// registers as machine operands. 1078 virtual std::optional<DestSourcePair> isCopyInstrImpl(const MachineInstr & MI)1079 isCopyInstrImpl(const MachineInstr &MI) const { 1080 return std::nullopt; 1081 } 1082 1083 virtual std::optional<DestSourcePair> isCopyLikeInstrImpl(const MachineInstr & MI)1084 isCopyLikeInstrImpl(const MachineInstr &MI) const { 1085 return std::nullopt; 1086 } 1087 1088 /// Return true if the given terminator MI is not expected to spill. This 1089 /// sets the live interval as not spillable and adjusts phi node lowering to 1090 /// not introduce copies after the terminator. Use with care, these are 1091 /// currently used for hardware loop intrinsics in very controlled situations, 1092 /// created prior to registry allocation in loops that only have single phi 1093 /// users for the terminators value. They may run out of registers if not used 1094 /// carefully. isUnspillableTerminatorImpl(const MachineInstr * MI)1095 virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const { 1096 return false; 1097 } 1098 1099 public: 1100 /// If the specific machine instruction is a instruction that moves/copies 1101 /// value from one register to another register return destination and source 1102 /// registers as machine operands. 1103 /// For COPY-instruction the method naturally returns destination and source 1104 /// registers as machine operands, for all other instructions the method calls 1105 /// target-dependent implementation. isCopyInstr(const MachineInstr & MI)1106 std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const { 1107 if (MI.isCopy()) { 1108 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; 1109 } 1110 return isCopyInstrImpl(MI); 1111 } 1112 1113 // Similar to `isCopyInstr`, but adds non-copy semantics on MIR, but 1114 // ultimately generates a copy instruction. isCopyLikeInstr(const MachineInstr & MI)1115 std::optional<DestSourcePair> isCopyLikeInstr(const MachineInstr &MI) const { 1116 if (auto IsCopyInstr = isCopyInstr(MI)) 1117 return IsCopyInstr; 1118 return isCopyLikeInstrImpl(MI); 1119 } 1120 isFullCopyInstr(const MachineInstr & MI)1121 bool isFullCopyInstr(const MachineInstr &MI) const { 1122 auto DestSrc = isCopyInstr(MI); 1123 if (!DestSrc) 1124 return false; 1125 1126 const MachineOperand *DestRegOp = DestSrc->Destination; 1127 const MachineOperand *SrcRegOp = DestSrc->Source; 1128 return !DestRegOp->getSubReg() && !SrcRegOp->getSubReg(); 1129 } 1130 1131 /// If the specific machine instruction is an instruction that adds an 1132 /// immediate value and a register, and stores the result in the given 1133 /// register \c Reg, return a pair of the source register and the offset 1134 /// which has been added. isAddImmediate(const MachineInstr & MI,Register Reg)1135 virtual std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI, 1136 Register Reg) const { 1137 return std::nullopt; 1138 } 1139 1140 /// Returns true if MI is an instruction that defines Reg to have a constant 1141 /// value and the value is recorded in ImmVal. The ImmVal is a result that 1142 /// should be interpreted as modulo size of Reg. getConstValDefinedInReg(const MachineInstr & MI,const Register Reg,int64_t & ImmVal)1143 virtual bool getConstValDefinedInReg(const MachineInstr &MI, 1144 const Register Reg, 1145 int64_t &ImmVal) const { 1146 return false; 1147 } 1148 1149 /// Store the specified register of the given register class to the specified 1150 /// stack frame index. The store instruction is to be added to the given 1151 /// machine basic block before the specified machine instruction. If isKill 1152 /// is true, the register operand is the last use and must be marked kill. If 1153 /// \p SrcReg is being directly spilled as part of assigning a virtual 1154 /// register, \p VReg is the register being assigned. This additional register 1155 /// argument is needed for certain targets when invoked from RegAllocFast to 1156 /// map the spilled physical register to its virtual register. A null register 1157 /// can be passed elsewhere. The \p Flags is used to set appropriate machine 1158 /// flags on the spill instruction e.g. FrameSetup flag on a callee saved 1159 /// register spill instruction, part of prologue, during the frame lowering. 1160 virtual void storeRegToStackSlot( 1161 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, 1162 bool isKill, int FrameIndex, const TargetRegisterClass *RC, 1163 const TargetRegisterInfo *TRI, Register VReg, 1164 MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const { 1165 llvm_unreachable("Target didn't implement " 1166 "TargetInstrInfo::storeRegToStackSlot!"); 1167 } 1168 1169 /// Load the specified register of the given register class from the specified 1170 /// stack frame index. The load instruction is to be added to the given 1171 /// machine basic block before the specified machine instruction. If \p 1172 /// DestReg is being directly reloaded as part of assigning a virtual 1173 /// register, \p VReg is the register being assigned. This additional register 1174 /// argument is needed for certain targets when invoked from RegAllocFast to 1175 /// map the loaded physical register to its virtual register. A null register 1176 /// can be passed elsewhere. The \p Flags is used to set appropriate machine 1177 /// flags on the spill instruction e.g. FrameDestroy flag on a callee saved 1178 /// register reload instruction, part of epilogue, during the frame lowering. 1179 virtual void loadRegFromStackSlot( 1180 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, 1181 int FrameIndex, const TargetRegisterClass *RC, 1182 const TargetRegisterInfo *TRI, Register VReg, 1183 MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const { 1184 llvm_unreachable("Target didn't implement " 1185 "TargetInstrInfo::loadRegFromStackSlot!"); 1186 } 1187 1188 /// This function is called for all pseudo instructions 1189 /// that remain after register allocation. Many pseudo instructions are 1190 /// created to help register allocation. This is the place to convert them 1191 /// into real instructions. The target can edit MI in place, or it can insert 1192 /// new instructions and erase MI. The function should return true if 1193 /// anything was changed. expandPostRAPseudo(MachineInstr & MI)1194 virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; } 1195 1196 /// Check whether the target can fold a load that feeds a subreg operand 1197 /// (or a subreg operand that feeds a store). 1198 /// For example, X86 may want to return true if it can fold 1199 /// movl (%esp), %eax 1200 /// subb, %al, ... 1201 /// Into: 1202 /// subb (%esp), ... 1203 /// 1204 /// Ideally, we'd like the target implementation of foldMemoryOperand() to 1205 /// reject subregs - but since this behavior used to be enforced in the 1206 /// target-independent code, moving this responsibility to the targets 1207 /// has the potential of causing nasty silent breakage in out-of-tree targets. isSubregFoldable()1208 virtual bool isSubregFoldable() const { return false; } 1209 1210 /// For a patchpoint, stackmap, or statepoint intrinsic, return the range of 1211 /// operands which can't be folded into stack references. Operands outside 1212 /// of the range are most likely foldable but it is not guaranteed. 1213 /// These instructions are unique in that stack references for some operands 1214 /// have the same execution cost (e.g. none) as the unfolded register forms. 1215 /// The ranged return is guaranteed to include all operands which can't be 1216 /// folded at zero cost. 1217 virtual std::pair<unsigned, unsigned> 1218 getPatchpointUnfoldableRange(const MachineInstr &MI) const; 1219 1220 /// Attempt to fold a load or store of the specified stack 1221 /// slot into the specified machine instruction for the specified operand(s). 1222 /// If this is possible, a new instruction is returned with the specified 1223 /// operand folded, otherwise NULL is returned. 1224 /// The new instruction is inserted before MI, and the client is responsible 1225 /// for removing the old instruction. 1226 /// If VRM is passed, the assigned physregs can be inspected by target to 1227 /// decide on using an opcode (note that those assignments can still change). 1228 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops, 1229 int FI, 1230 LiveIntervals *LIS = nullptr, 1231 VirtRegMap *VRM = nullptr) const; 1232 1233 /// Same as the previous version except it allows folding of any load and 1234 /// store from / to any address, not just from a specific stack slot. 1235 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops, 1236 MachineInstr &LoadMI, 1237 LiveIntervals *LIS = nullptr) const; 1238 1239 /// This function defines the logic to lower COPY instruction to 1240 /// target specific instruction(s). 1241 void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const; 1242 1243 /// Return true when there is potentially a faster code sequence 1244 /// for an instruction chain ending in \p Root. All potential patterns are 1245 /// returned in the \p Pattern vector. Pattern should be sorted in priority 1246 /// order since the pattern evaluator stops checking as soon as it finds a 1247 /// faster sequence. 1248 /// \param Root - Instruction that could be combined with one of its operands 1249 /// \param Patterns - Vector of possible combination patterns 1250 virtual bool getMachineCombinerPatterns(MachineInstr &Root, 1251 SmallVectorImpl<unsigned> &Patterns, 1252 bool DoRegPressureReduce) const; 1253 1254 /// Return true if target supports reassociation of instructions in machine 1255 /// combiner pass to reduce register pressure for a given BB. 1256 virtual bool shouldReduceRegisterPressure(const MachineBasicBlock * MBB,const RegisterClassInfo * RegClassInfo)1257 shouldReduceRegisterPressure(const MachineBasicBlock *MBB, 1258 const RegisterClassInfo *RegClassInfo) const { 1259 return false; 1260 } 1261 1262 /// Fix up the placeholder we may add in genAlternativeCodeSequence(). 1263 virtual void finalizeInsInstrs(MachineInstr & Root,unsigned & Pattern,SmallVectorImpl<MachineInstr * > & InsInstrs)1264 finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, 1265 SmallVectorImpl<MachineInstr *> &InsInstrs) const {} 1266 1267 /// Return true when a code sequence can improve throughput. It 1268 /// should be called only for instructions in loops. 1269 /// \param Pattern - combiner pattern 1270 virtual bool isThroughputPattern(unsigned Pattern) const; 1271 1272 /// Return the objective of a combiner pattern. 1273 /// \param Pattern - combiner pattern 1274 virtual CombinerObjective getCombinerObjective(unsigned Pattern) const; 1275 1276 /// Return true if the input \P Inst is part of a chain of dependent ops 1277 /// that are suitable for reassociation, otherwise return false. 1278 /// If the instruction's operands must be commuted to have a previous 1279 /// instruction of the same type define the first source operand, \P Commuted 1280 /// will be set to true. 1281 bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const; 1282 1283 /// Return true when \P Inst is both associative and commutative. If \P Invert 1284 /// is true, then the inverse of \P Inst operation must be tested. 1285 virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, 1286 bool Invert = false) const { 1287 return false; 1288 } 1289 1290 /// Find chains of accumulations that can be rewritten as a tree for increased 1291 /// ILP. 1292 bool getAccumulatorReassociationPatterns( 1293 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const; 1294 1295 /// Find the chain of accumulator instructions in \P MBB and return them in 1296 /// \P Chain. 1297 void getAccumulatorChain(MachineInstr *CurrentInstr, 1298 SmallVectorImpl<Register> &Chain) const; 1299 1300 /// Return true when \P OpCode is an instruction which performs 1301 /// accumulation into one of its operand registers. isAccumulationOpcode(unsigned Opcode)1302 virtual bool isAccumulationOpcode(unsigned Opcode) const { return false; } 1303 1304 /// Returns an opcode which defines the accumulator used by \P Opcode. getAccumulationStartOpcode(unsigned Opcode)1305 virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const { 1306 llvm_unreachable("Function not implemented for target!"); 1307 return 0; 1308 } 1309 1310 /// Returns the opcode that should be use to reduce accumulation registers. 1311 virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode)1312 getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const { 1313 llvm_unreachable("Function not implemented for target!"); 1314 return 0; 1315 } 1316 1317 /// Reduces branches of the accumulator tree into a single register. 1318 void reduceAccumulatorTree(SmallVectorImpl<Register> &RegistersToReduce, 1319 SmallVectorImpl<MachineInstr *> &InsInstrs, 1320 MachineFunction &MF, MachineInstr &Root, 1321 MachineRegisterInfo &MRI, 1322 DenseMap<Register, unsigned> &InstrIdxForVirtReg, 1323 Register ResultReg) const; 1324 1325 /// Return the inverse operation opcode if it exists for \P Opcode (e.g. add 1326 /// for sub and vice versa). getInverseOpcode(unsigned Opcode)1327 virtual std::optional<unsigned> getInverseOpcode(unsigned Opcode) const { 1328 return std::nullopt; 1329 } 1330 1331 /// Return true when \P Opcode1 or its inversion is equal to \P Opcode2. 1332 bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const; 1333 1334 /// Return true when \P Inst has reassociable operands in the same \P MBB. 1335 virtual bool hasReassociableOperands(const MachineInstr &Inst, 1336 const MachineBasicBlock *MBB) const; 1337 1338 /// Return true when \P Inst has reassociable sibling. 1339 virtual bool hasReassociableSibling(const MachineInstr &Inst, 1340 bool &Commuted) const; 1341 1342 /// When getMachineCombinerPatterns() finds patterns, this function generates 1343 /// the instructions that could replace the original code sequence. The client 1344 /// has to decide whether the actual replacement is beneficial or not. 1345 /// \param Root - Instruction that could be combined with one of its operands 1346 /// \param Pattern - Combination pattern for Root 1347 /// \param InsInstrs - Vector of new instructions that implement P 1348 /// \param DelInstrs - Old instructions, including Root, that could be 1349 /// replaced by InsInstr 1350 /// \param InstIdxForVirtReg - map of virtual register to instruction in 1351 /// InsInstr that defines it 1352 virtual void genAlternativeCodeSequence( 1353 MachineInstr &Root, unsigned Pattern, 1354 SmallVectorImpl<MachineInstr *> &InsInstrs, 1355 SmallVectorImpl<MachineInstr *> &DelInstrs, 1356 DenseMap<Register, unsigned> &InstIdxForVirtReg) const; 1357 1358 /// When calculate the latency of the root instruction, accumulate the 1359 /// latency of the sequence to the root latency. 1360 /// \param Root - Instruction that could be combined with one of its operands accumulateInstrSeqToRootLatency(MachineInstr & Root)1361 virtual bool accumulateInstrSeqToRootLatency(MachineInstr &Root) const { 1362 return true; 1363 } 1364 1365 /// The returned array encodes the operand index for each parameter because 1366 /// the operands may be commuted; the operand indices for associative 1367 /// operations might also be target-specific. Each element specifies the index 1368 /// of {Prev, A, B, X, Y}. 1369 virtual void 1370 getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, 1371 std::array<unsigned, 5> &OperandIndices) const; 1372 1373 /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to 1374 /// reduce critical path length. 1375 void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, 1376 SmallVectorImpl<MachineInstr *> &InsInstrs, 1377 SmallVectorImpl<MachineInstr *> &DelInstrs, 1378 ArrayRef<unsigned> OperandIndices, 1379 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const; 1380 1381 /// Reassociation of some instructions requires inverse operations (e.g. 1382 /// (X + A) - Y => (X - Y) + A). This method returns a pair of new opcodes 1383 /// (new root opcode, new prev opcode) that must be used to reassociate \P 1384 /// Root and \P Prev accoring to \P Pattern. 1385 std::pair<unsigned, unsigned> 1386 getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root, 1387 const MachineInstr &Prev) const; 1388 1389 /// The limit on resource length extension we accept in MachineCombiner Pass. getExtendResourceLenLimit()1390 virtual int getExtendResourceLenLimit() const { return 0; } 1391 1392 /// This is an architecture-specific helper function of reassociateOps. 1393 /// Set special operand attributes for new instructions after reassociation. setSpecialOperandAttr(MachineInstr & OldMI1,MachineInstr & OldMI2,MachineInstr & NewMI1,MachineInstr & NewMI2)1394 virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, 1395 MachineInstr &NewMI1, 1396 MachineInstr &NewMI2) const {} 1397 1398 /// Return true when a target supports MachineCombiner. useMachineCombiner()1399 virtual bool useMachineCombiner() const { return false; } 1400 1401 /// Return a strategy that MachineCombiner must use when creating traces. 1402 virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const; 1403 1404 /// Return true if the given SDNode can be copied during scheduling 1405 /// even if it has glue. canCopyGluedNodeDuringSchedule(SDNode * N)1406 virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; } 1407 1408 protected: 1409 /// Target-dependent implementation for foldMemoryOperand. 1410 /// Target-independent code in foldMemoryOperand will 1411 /// take care of adding a MachineMemOperand to the newly created instruction. 1412 /// The instruction and any auxiliary instructions necessary will be inserted 1413 /// at InsertPt. 1414 virtual MachineInstr * 1415 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, 1416 ArrayRef<unsigned> Ops, 1417 MachineBasicBlock::iterator InsertPt, int FrameIndex, 1418 LiveIntervals *LIS = nullptr, 1419 VirtRegMap *VRM = nullptr) const { 1420 return nullptr; 1421 } 1422 1423 /// Target-dependent implementation for foldMemoryOperand. 1424 /// Target-independent code in foldMemoryOperand will 1425 /// take care of adding a MachineMemOperand to the newly created instruction. 1426 /// The instruction and any auxiliary instructions necessary will be inserted 1427 /// at InsertPt. 1428 virtual MachineInstr *foldMemoryOperandImpl( 1429 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 1430 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, 1431 LiveIntervals *LIS = nullptr) const { 1432 return nullptr; 1433 } 1434 1435 /// Target-dependent implementation of getRegSequenceInputs. 1436 /// 1437 /// \returns true if it is possible to build the equivalent 1438 /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise. 1439 /// 1440 /// \pre MI.isRegSequenceLike(). 1441 /// 1442 /// \see TargetInstrInfo::getRegSequenceInputs. getRegSequenceLikeInputs(const MachineInstr & MI,unsigned DefIdx,SmallVectorImpl<RegSubRegPairAndIdx> & InputRegs)1443 virtual bool getRegSequenceLikeInputs( 1444 const MachineInstr &MI, unsigned DefIdx, 1445 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 1446 return false; 1447 } 1448 1449 /// Target-dependent implementation of getExtractSubregInputs. 1450 /// 1451 /// \returns true if it is possible to build the equivalent 1452 /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. 1453 /// 1454 /// \pre MI.isExtractSubregLike(). 1455 /// 1456 /// \see TargetInstrInfo::getExtractSubregInputs. getExtractSubregLikeInputs(const MachineInstr & MI,unsigned DefIdx,RegSubRegPairAndIdx & InputReg)1457 virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, 1458 unsigned DefIdx, 1459 RegSubRegPairAndIdx &InputReg) const { 1460 return false; 1461 } 1462 1463 /// Target-dependent implementation of getInsertSubregInputs. 1464 /// 1465 /// \returns true if it is possible to build the equivalent 1466 /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. 1467 /// 1468 /// \pre MI.isInsertSubregLike(). 1469 /// 1470 /// \see TargetInstrInfo::getInsertSubregInputs. 1471 virtual bool getInsertSubregLikeInputs(const MachineInstr & MI,unsigned DefIdx,RegSubRegPair & BaseReg,RegSubRegPairAndIdx & InsertedReg)1472 getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, 1473 RegSubRegPair &BaseReg, 1474 RegSubRegPairAndIdx &InsertedReg) const { 1475 return false; 1476 } 1477 1478 public: 1479 /// unfoldMemoryOperand - Separate a single instruction which folded a load or 1480 /// a store or a load and a store into two or more instruction. If this is 1481 /// possible, returns true as well as the new instructions by reference. 1482 virtual bool unfoldMemoryOperand(MachineFunction & MF,MachineInstr & MI,Register Reg,bool UnfoldLoad,bool UnfoldStore,SmallVectorImpl<MachineInstr * > & NewMIs)1483 unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, Register Reg, 1484 bool UnfoldLoad, bool UnfoldStore, 1485 SmallVectorImpl<MachineInstr *> &NewMIs) const { 1486 return false; 1487 } 1488 unfoldMemoryOperand(SelectionDAG & DAG,SDNode * N,SmallVectorImpl<SDNode * > & NewNodes)1489 virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 1490 SmallVectorImpl<SDNode *> &NewNodes) const { 1491 return false; 1492 } 1493 1494 /// Returns the opcode of the would be new 1495 /// instruction after load / store are unfolded from an instruction of the 1496 /// specified opcode. It returns zero if the specified unfolding is not 1497 /// possible. If LoadRegIndex is non-null, it is filled in with the operand 1498 /// index of the operand which will hold the register holding the loaded 1499 /// value. 1500 virtual unsigned 1501 getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, 1502 unsigned *LoadRegIndex = nullptr) const { 1503 return 0; 1504 } 1505 1506 /// This is used by the pre-regalloc scheduler to determine if two loads are 1507 /// loading from the same base address. It should only return true if the base 1508 /// pointers are the same and the only differences between the two addresses 1509 /// are the offset. It also returns the offsets by reference. areLoadsFromSameBasePtr(SDNode * Load1,SDNode * Load2,int64_t & Offset1,int64_t & Offset2)1510 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 1511 int64_t &Offset1, 1512 int64_t &Offset2) const { 1513 return false; 1514 } 1515 1516 /// This is a used by the pre-regalloc scheduler to determine (in conjunction 1517 /// with areLoadsFromSameBasePtr) if two loads should be scheduled together. 1518 /// On some targets if two loads are loading from 1519 /// addresses in the same cache line, it's better if they are scheduled 1520 /// together. This function takes two integers that represent the load offsets 1521 /// from the common base address. It returns true if it decides it's desirable 1522 /// to schedule the two loads together. "NumLoads" is the number of loads that 1523 /// have already been scheduled after Load1. shouldScheduleLoadsNear(SDNode * Load1,SDNode * Load2,int64_t Offset1,int64_t Offset2,unsigned NumLoads)1524 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 1525 int64_t Offset1, int64_t Offset2, 1526 unsigned NumLoads) const { 1527 return false; 1528 } 1529 1530 /// Get the base operand and byte offset of an instruction that reads/writes 1531 /// memory. This is a convenience function for callers that are only prepared 1532 /// to handle a single base operand. 1533 /// FIXME: Move Offset and OffsetIsScalable to some ElementCount-style 1534 /// abstraction that supports negative offsets. 1535 bool getMemOperandWithOffset(const MachineInstr &MI, 1536 const MachineOperand *&BaseOp, int64_t &Offset, 1537 bool &OffsetIsScalable, 1538 const TargetRegisterInfo *TRI) const; 1539 1540 /// Get zero or more base operands and the byte offset of an instruction that 1541 /// reads/writes memory. Note that there may be zero base operands if the 1542 /// instruction accesses a constant address. 1543 /// It returns false if MI does not read/write memory. 1544 /// It returns false if base operands and offset could not be determined. 1545 /// It is not guaranteed to always recognize base operands and offsets in all 1546 /// cases. 1547 /// FIXME: Move Offset and OffsetIsScalable to some ElementCount-style 1548 /// abstraction that supports negative offsets. getMemOperandsWithOffsetWidth(const MachineInstr & MI,SmallVectorImpl<const MachineOperand * > & BaseOps,int64_t & Offset,bool & OffsetIsScalable,LocationSize & Width,const TargetRegisterInfo * TRI)1549 virtual bool getMemOperandsWithOffsetWidth( 1550 const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps, 1551 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, 1552 const TargetRegisterInfo *TRI) const { 1553 return false; 1554 } 1555 1556 /// Return true if the instruction contains a base register and offset. If 1557 /// true, the function also sets the operand position in the instruction 1558 /// for the base register and offset. getBaseAndOffsetPosition(const MachineInstr & MI,unsigned & BasePos,unsigned & OffsetPos)1559 virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, 1560 unsigned &BasePos, 1561 unsigned &OffsetPos) const { 1562 return false; 1563 } 1564 1565 /// Target dependent implementation to get the values constituting the address 1566 /// MachineInstr that is accessing memory. These values are returned as a 1567 /// struct ExtAddrMode which contains all relevant information to make up the 1568 /// address. 1569 virtual std::optional<ExtAddrMode> getAddrModeFromMemoryOp(const MachineInstr & MemI,const TargetRegisterInfo * TRI)1570 getAddrModeFromMemoryOp(const MachineInstr &MemI, 1571 const TargetRegisterInfo *TRI) const { 1572 return std::nullopt; 1573 } 1574 1575 /// Check if it's possible and beneficial to fold the addressing computation 1576 /// `AddrI` into the addressing mode of the load/store instruction `MemI`. The 1577 /// memory instruction is a user of the virtual register `Reg`, which in turn 1578 /// is the ultimate destination of zero or more COPY instructions from the 1579 /// output register of `AddrI`. 1580 /// Return the adddressing mode after folding in `AM`. canFoldIntoAddrMode(const MachineInstr & MemI,Register Reg,const MachineInstr & AddrI,ExtAddrMode & AM)1581 virtual bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, 1582 const MachineInstr &AddrI, 1583 ExtAddrMode &AM) const { 1584 return false; 1585 } 1586 1587 /// Emit a load/store instruction with the same value register as `MemI`, but 1588 /// using the address from `AM`. The addressing mode must have been obtained 1589 /// from `canFoldIntoAddr` for the same memory instruction. emitLdStWithAddr(MachineInstr & MemI,const ExtAddrMode & AM)1590 virtual MachineInstr *emitLdStWithAddr(MachineInstr &MemI, 1591 const ExtAddrMode &AM) const { 1592 llvm_unreachable("target did not implement emitLdStWithAddr()"); 1593 } 1594 1595 /// Returns true if MI's Def is NullValueReg, and the MI 1596 /// does not change the Zero value. i.e. cases such as rax = shr rax, X where 1597 /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this 1598 /// function can return true even if becomes zero. Specifically cases such as 1599 /// NullValueReg = shl NullValueReg, 63. preservesZeroValueInReg(const MachineInstr * MI,const Register NullValueReg,const TargetRegisterInfo * TRI)1600 virtual bool preservesZeroValueInReg(const MachineInstr *MI, 1601 const Register NullValueReg, 1602 const TargetRegisterInfo *TRI) const { 1603 return false; 1604 } 1605 1606 /// If the instruction is an increment of a constant value, return the amount. getIncrementValue(const MachineInstr & MI,int & Value)1607 virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const { 1608 return false; 1609 } 1610 1611 /// Returns true if the two given memory operations should be scheduled 1612 /// adjacent. Note that you have to add: 1613 /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 1614 /// or 1615 /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 1616 /// to TargetMachine::createMachineScheduler() to have an effect. 1617 /// 1618 /// \p BaseOps1 and \p BaseOps2 are memory operands of two memory operations. 1619 /// \p Offset1 and \p Offset2 are the byte offsets for the memory 1620 /// operations. 1621 /// \p OffsetIsScalable1 and \p OffsetIsScalable2 indicate if the offset is 1622 /// scaled by a runtime quantity. 1623 /// \p ClusterSize is the number of operations in the resulting load/store 1624 /// cluster if this hook returns true. 1625 /// \p NumBytes is the number of bytes that will be loaded from all the 1626 /// clustered loads if this hook returns true. shouldClusterMemOps(ArrayRef<const MachineOperand * > BaseOps1,int64_t Offset1,bool OffsetIsScalable1,ArrayRef<const MachineOperand * > BaseOps2,int64_t Offset2,bool OffsetIsScalable2,unsigned ClusterSize,unsigned NumBytes)1627 virtual bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 1628 int64_t Offset1, bool OffsetIsScalable1, 1629 ArrayRef<const MachineOperand *> BaseOps2, 1630 int64_t Offset2, bool OffsetIsScalable2, 1631 unsigned ClusterSize, 1632 unsigned NumBytes) const { 1633 llvm_unreachable("target did not implement shouldClusterMemOps()"); 1634 } 1635 1636 /// Reverses the branch condition of the specified condition list, 1637 /// returning false on success and true if it cannot be reversed. 1638 virtual bool reverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond)1639 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 1640 return true; 1641 } 1642 1643 /// Insert a noop into the instruction stream at the specified point. 1644 virtual void insertNoop(MachineBasicBlock &MBB, 1645 MachineBasicBlock::iterator MI) const; 1646 1647 /// Insert noops into the instruction stream at the specified point. 1648 virtual void insertNoops(MachineBasicBlock &MBB, 1649 MachineBasicBlock::iterator MI, 1650 unsigned Quantity) const; 1651 1652 /// Return the noop instruction to use for a noop. 1653 virtual MCInst getNop() const; 1654 1655 /// Return true for post-incremented instructions. isPostIncrement(const MachineInstr & MI)1656 virtual bool isPostIncrement(const MachineInstr &MI) const { return false; } 1657 1658 /// Returns true if the instruction is already predicated. isPredicated(const MachineInstr & MI)1659 virtual bool isPredicated(const MachineInstr &MI) const { return false; } 1660 1661 /// Assumes the instruction is already predicated and returns true if the 1662 /// instruction can be predicated again. canPredicatePredicatedInstr(const MachineInstr & MI)1663 virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const { 1664 assert(isPredicated(MI) && "Instruction is not predicated"); 1665 return false; 1666 } 1667 1668 // Returns a MIRPrinter comment for this machine operand. 1669 virtual std::string 1670 createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, 1671 unsigned OpIdx, const TargetRegisterInfo *TRI) const; 1672 1673 /// Returns true if the instruction is a 1674 /// terminator instruction that has not been predicated. 1675 bool isUnpredicatedTerminator(const MachineInstr &MI) const; 1676 1677 /// Returns true if MI is an unconditional tail call. isUnconditionalTailCall(const MachineInstr & MI)1678 virtual bool isUnconditionalTailCall(const MachineInstr &MI) const { 1679 return false; 1680 } 1681 1682 /// Returns true if the tail call can be made conditional on BranchCond. canMakeTailCallConditional(SmallVectorImpl<MachineOperand> & Cond,const MachineInstr & TailCall)1683 virtual bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond, 1684 const MachineInstr &TailCall) const { 1685 return false; 1686 } 1687 1688 /// Replace the conditional branch in MBB with a conditional tail call. replaceBranchWithTailCall(MachineBasicBlock & MBB,SmallVectorImpl<MachineOperand> & Cond,const MachineInstr & TailCall)1689 virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, 1690 SmallVectorImpl<MachineOperand> &Cond, 1691 const MachineInstr &TailCall) const { 1692 llvm_unreachable("Target didn't implement replaceBranchWithTailCall!"); 1693 } 1694 1695 /// Convert the instruction into a predicated instruction. 1696 /// It returns true if the operation was successful. 1697 virtual bool PredicateInstruction(MachineInstr &MI, 1698 ArrayRef<MachineOperand> Pred) const; 1699 1700 /// Returns true if the first specified predicate 1701 /// subsumes the second, e.g. GE subsumes GT. SubsumesPredicate(ArrayRef<MachineOperand> Pred1,ArrayRef<MachineOperand> Pred2)1702 virtual bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 1703 ArrayRef<MachineOperand> Pred2) const { 1704 return false; 1705 } 1706 1707 /// If the specified instruction defines any predicate 1708 /// or condition code register(s) used for predication, returns true as well 1709 /// as the definition predicate(s) by reference. 1710 /// SkipDead should be set to false at any point that dead 1711 /// predicate instructions should be considered as being defined. 1712 /// A dead predicate instruction is one that is guaranteed to be removed 1713 /// after a call to PredicateInstruction. ClobbersPredicate(MachineInstr & MI,std::vector<MachineOperand> & Pred,bool SkipDead)1714 virtual bool ClobbersPredicate(MachineInstr &MI, 1715 std::vector<MachineOperand> &Pred, 1716 bool SkipDead) const { 1717 return false; 1718 } 1719 1720 /// Return true if the specified instruction can be predicated. 1721 /// By default, this returns true for every instruction with a 1722 /// PredicateOperand. isPredicable(const MachineInstr & MI)1723 virtual bool isPredicable(const MachineInstr &MI) const { 1724 return MI.getDesc().isPredicable(); 1725 } 1726 1727 /// Return true if it's safe to move a machine 1728 /// instruction that defines the specified register class. isSafeToMoveRegClassDefs(const TargetRegisterClass * RC)1729 virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { 1730 return true; 1731 } 1732 1733 /// Return true if it's safe to move a machine instruction. 1734 /// This allows the backend to prevent certain special instruction 1735 /// sequences from being broken by instruction motion in optimization 1736 /// passes. 1737 /// By default, this returns true for every instruction. isSafeToMove(const MachineInstr & MI,const MachineBasicBlock * MBB,const MachineFunction & MF)1738 virtual bool isSafeToMove(const MachineInstr &MI, 1739 const MachineBasicBlock *MBB, 1740 const MachineFunction &MF) const { 1741 return true; 1742 } 1743 1744 /// Test if the given instruction should be considered a scheduling boundary. 1745 /// This primarily includes labels and terminators. 1746 virtual bool isSchedulingBoundary(const MachineInstr &MI, 1747 const MachineBasicBlock *MBB, 1748 const MachineFunction &MF) const; 1749 1750 /// Measure the specified inline asm to determine an approximation of its 1751 /// length. 1752 virtual unsigned getInlineAsmLength( 1753 const char *Str, const MCAsmInfo &MAI, 1754 const TargetSubtargetInfo *STI = nullptr) const; 1755 1756 /// Allocate and return a hazard recognizer to use for this target when 1757 /// scheduling the machine instructions before register allocation. 1758 virtual ScheduleHazardRecognizer * 1759 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 1760 const ScheduleDAG *DAG) const; 1761 1762 /// Allocate and return a hazard recognizer to use for this target when 1763 /// scheduling the machine instructions before register allocation. 1764 virtual ScheduleHazardRecognizer * 1765 CreateTargetMIHazardRecognizer(const InstrItineraryData *, 1766 const ScheduleDAGMI *DAG) const; 1767 1768 /// Allocate and return a hazard recognizer to use for this target when 1769 /// scheduling the machine instructions after register allocation. 1770 virtual ScheduleHazardRecognizer * 1771 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, 1772 const ScheduleDAG *DAG) const; 1773 1774 /// Allocate and return a hazard recognizer to use for by non-scheduling 1775 /// passes. 1776 virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const MachineFunction & MF)1777 CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 1778 return nullptr; 1779 } 1780 1781 /// Provide a global flag for disabling the PreRA hazard recognizer that 1782 /// targets may choose to honor. 1783 bool usePreRAHazardRecognizer() const; 1784 1785 /// For a comparison instruction, return the source registers 1786 /// in SrcReg and SrcReg2 if having two register operands, and the value it 1787 /// compares against in CmpValue. Return true if the comparison instruction 1788 /// can be analyzed. analyzeCompare(const MachineInstr & MI,Register & SrcReg,Register & SrcReg2,int64_t & Mask,int64_t & Value)1789 virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, 1790 Register &SrcReg2, int64_t &Mask, 1791 int64_t &Value) const { 1792 return false; 1793 } 1794 1795 /// See if the comparison instruction can be converted 1796 /// into something more efficient. E.g., on ARM most instructions can set the 1797 /// flags register, obviating the need for a separate CMP. optimizeCompareInstr(MachineInstr & CmpInstr,Register SrcReg,Register SrcReg2,int64_t Mask,int64_t Value,const MachineRegisterInfo * MRI)1798 virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, 1799 Register SrcReg2, int64_t Mask, 1800 int64_t Value, 1801 const MachineRegisterInfo *MRI) const { 1802 return false; 1803 } optimizeCondBranch(MachineInstr & MI)1804 virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; } 1805 1806 /// Try to remove the load by folding it to a register operand at the use. 1807 /// We fold the load instructions if and only if the 1808 /// def and use are in the same BB. We only look at one load and see 1809 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register 1810 /// defined by the load we are trying to fold. DefMI returns the machine 1811 /// instruction that defines FoldAsLoadDefReg, and the function returns 1812 /// the machine instruction generated due to folding. 1813 virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI, 1814 const MachineRegisterInfo *MRI, 1815 Register &FoldAsLoadDefReg, 1816 MachineInstr *&DefMI) const; 1817 1818 /// 'Reg' is known to be defined by a move immediate instruction, 1819 /// try to fold the immediate into the use instruction. 1820 /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true, 1821 /// then the caller may assume that DefMI has been erased from its parent 1822 /// block. The caller may assume that it will not be erased by this 1823 /// function otherwise. foldImmediate(MachineInstr & UseMI,MachineInstr & DefMI,Register Reg,MachineRegisterInfo * MRI)1824 virtual bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 1825 Register Reg, MachineRegisterInfo *MRI) const { 1826 return false; 1827 } 1828 1829 /// Return the number of u-operations the given machine 1830 /// instruction will be decoded to on the target cpu. The itinerary's 1831 /// IssueWidth is the number of microops that can be dispatched each 1832 /// cycle. An instruction with zero microops takes no dispatch resources. 1833 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, 1834 const MachineInstr &MI) const; 1835 1836 /// Return true for pseudo instructions that don't consume any 1837 /// machine resources in their current form. These are common cases that the 1838 /// scheduler should consider free, rather than conservatively handling them 1839 /// as instructions with no itinerary. isZeroCost(unsigned Opcode)1840 bool isZeroCost(unsigned Opcode) const { 1841 return Opcode <= TargetOpcode::COPY; 1842 } 1843 1844 virtual std::optional<unsigned> 1845 getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, 1846 unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const; 1847 1848 /// Compute and return the use operand latency of a given pair of def and use. 1849 /// In most cases, the static scheduling itinerary was enough to determine the 1850 /// operand latency. But it may not be possible for instructions with variable 1851 /// number of defs / uses. 1852 /// 1853 /// This is a raw interface to the itinerary that may be directly overridden 1854 /// by a target. Use computeOperandLatency to get the best estimate of 1855 /// latency. 1856 virtual std::optional<unsigned> 1857 getOperandLatency(const InstrItineraryData *ItinData, 1858 const MachineInstr &DefMI, unsigned DefIdx, 1859 const MachineInstr &UseMI, unsigned UseIdx) const; 1860 1861 /// Compute the instruction latency of a given instruction. 1862 /// If the instruction has higher cost when predicated, it's returned via 1863 /// PredCost. 1864 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, 1865 const MachineInstr &MI, 1866 unsigned *PredCost = nullptr) const; 1867 1868 virtual unsigned getPredicationCost(const MachineInstr &MI) const; 1869 1870 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, 1871 SDNode *Node) const; 1872 1873 /// Return the default expected latency for a def based on its opcode. 1874 unsigned defaultDefLatency(const MCSchedModel &SchedModel, 1875 const MachineInstr &DefMI) const; 1876 1877 /// Return true if this opcode has high latency to its result. isHighLatencyDef(int opc)1878 virtual bool isHighLatencyDef(int opc) const { return false; } 1879 1880 /// Compute operand latency between a def of 'Reg' 1881 /// and a use in the current loop. Return true if the target considered 1882 /// it 'high'. This is used by optimization passes such as machine LICM to 1883 /// determine whether it makes sense to hoist an instruction out even in a 1884 /// high register pressure situation. hasHighOperandLatency(const TargetSchedModel & SchedModel,const MachineRegisterInfo * MRI,const MachineInstr & DefMI,unsigned DefIdx,const MachineInstr & UseMI,unsigned UseIdx)1885 virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, 1886 const MachineRegisterInfo *MRI, 1887 const MachineInstr &DefMI, unsigned DefIdx, 1888 const MachineInstr &UseMI, 1889 unsigned UseIdx) const { 1890 return false; 1891 } 1892 1893 /// Compute operand latency of a def of 'Reg'. Return true 1894 /// if the target considered it 'low'. 1895 virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, 1896 const MachineInstr &DefMI, 1897 unsigned DefIdx) const; 1898 1899 /// Perform target-specific instruction verification. verifyInstruction(const MachineInstr & MI,StringRef & ErrInfo)1900 virtual bool verifyInstruction(const MachineInstr &MI, 1901 StringRef &ErrInfo) const { 1902 return true; 1903 } 1904 1905 /// Return the current execution domain and bit mask of 1906 /// possible domains for instruction. 1907 /// 1908 /// Some micro-architectures have multiple execution domains, and multiple 1909 /// opcodes that perform the same operation in different domains. For 1910 /// example, the x86 architecture provides the por, orps, and orpd 1911 /// instructions that all do the same thing. There is a latency penalty if a 1912 /// register is written in one domain and read in another. 1913 /// 1914 /// This function returns a pair (domain, mask) containing the execution 1915 /// domain of MI, and a bit mask of possible domains. The setExecutionDomain 1916 /// function can be used to change the opcode to one of the domains in the 1917 /// bit mask. Instructions whose execution domain can't be changed should 1918 /// return a 0 mask. 1919 /// 1920 /// The execution domain numbers don't have any special meaning except domain 1921 /// 0 is used for instructions that are not associated with any interesting 1922 /// execution domain. 1923 /// 1924 virtual std::pair<uint16_t, uint16_t> getExecutionDomain(const MachineInstr & MI)1925 getExecutionDomain(const MachineInstr &MI) const { 1926 return std::make_pair(0, 0); 1927 } 1928 1929 /// Change the opcode of MI to execute in Domain. 1930 /// 1931 /// The bit (1 << Domain) must be set in the mask returned from 1932 /// getExecutionDomain(MI). setExecutionDomain(MachineInstr & MI,unsigned Domain)1933 virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {} 1934 1935 /// Returns the preferred minimum clearance 1936 /// before an instruction with an unwanted partial register update. 1937 /// 1938 /// Some instructions only write part of a register, and implicitly need to 1939 /// read the other parts of the register. This may cause unwanted stalls 1940 /// preventing otherwise unrelated instructions from executing in parallel in 1941 /// an out-of-order CPU. 1942 /// 1943 /// For example, the x86 instruction cvtsi2ss writes its result to bits 1944 /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so 1945 /// the instruction needs to wait for the old value of the register to become 1946 /// available: 1947 /// 1948 /// addps %xmm1, %xmm0 1949 /// movaps %xmm0, (%rax) 1950 /// cvtsi2ss %rbx, %xmm0 1951 /// 1952 /// In the code above, the cvtsi2ss instruction needs to wait for the addps 1953 /// instruction before it can issue, even though the high bits of %xmm0 1954 /// probably aren't needed. 1955 /// 1956 /// This hook returns the preferred clearance before MI, measured in 1957 /// instructions. Other defs of MI's operand OpNum are avoided in the last N 1958 /// instructions before MI. It should only return a positive value for 1959 /// unwanted dependencies. If the old bits of the defined register have 1960 /// useful values, or if MI is determined to otherwise read the dependency, 1961 /// the hook should return 0. 1962 /// 1963 /// The unwanted dependency may be handled by: 1964 /// 1965 /// 1. Allocating the same register for an MI def and use. That makes the 1966 /// unwanted dependency identical to a required dependency. 1967 /// 1968 /// 2. Allocating a register for the def that has no defs in the previous N 1969 /// instructions. 1970 /// 1971 /// 3. Calling breakPartialRegDependency() with the same arguments. This 1972 /// allows the target to insert a dependency breaking instruction. 1973 /// 1974 virtual unsigned getPartialRegUpdateClearance(const MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI)1975 getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, 1976 const TargetRegisterInfo *TRI) const { 1977 // The default implementation returns 0 for no partial register dependency. 1978 return 0; 1979 } 1980 1981 /// Return the minimum clearance before an instruction that reads an 1982 /// unused register. 1983 /// 1984 /// For example, AVX instructions may copy part of a register operand into 1985 /// the unused high bits of the destination register. 1986 /// 1987 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14 1988 /// 1989 /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a 1990 /// false dependence on any previous write to %xmm0. 1991 /// 1992 /// This hook works similarly to getPartialRegUpdateClearance, except that it 1993 /// does not take an operand index. Instead sets \p OpNum to the index of the 1994 /// unused register. getUndefRegClearance(const MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI)1995 virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, 1996 const TargetRegisterInfo *TRI) const { 1997 // The default implementation returns 0 for no undef register dependency. 1998 return 0; 1999 } 2000 2001 /// Insert a dependency-breaking instruction 2002 /// before MI to eliminate an unwanted dependency on OpNum. 2003 /// 2004 /// If it wasn't possible to avoid a def in the last N instructions before MI 2005 /// (see getPartialRegUpdateClearance), this hook will be called to break the 2006 /// unwanted dependency. 2007 /// 2008 /// On x86, an xorps instruction can be used as a dependency breaker: 2009 /// 2010 /// addps %xmm1, %xmm0 2011 /// movaps %xmm0, (%rax) 2012 /// xorps %xmm0, %xmm0 2013 /// cvtsi2ss %rbx, %xmm0 2014 /// 2015 /// An <imp-kill> operand should be added to MI if an instruction was 2016 /// inserted. This ties the instructions together in the post-ra scheduler. 2017 /// breakPartialRegDependency(MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI)2018 virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, 2019 const TargetRegisterInfo *TRI) const {} 2020 2021 /// Create machine specific model for scheduling. 2022 virtual DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &)2023 CreateTargetScheduleState(const TargetSubtargetInfo &) const { 2024 return nullptr; 2025 } 2026 2027 /// Sometimes, it is possible for the target 2028 /// to tell, even without aliasing information, that two MIs access different 2029 /// memory addresses. This function returns true if two MIs access different 2030 /// memory addresses and false otherwise. 2031 /// 2032 /// Assumes any physical registers used to compute addresses have the same 2033 /// value for both instructions. (This is the most useful assumption for 2034 /// post-RA scheduling.) 2035 /// 2036 /// See also MachineInstr::mayAlias, which is implemented on top of this 2037 /// function. 2038 virtual bool areMemAccessesTriviallyDisjoint(const MachineInstr & MIa,const MachineInstr & MIb)2039 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 2040 const MachineInstr &MIb) const { 2041 assert(MIa.mayLoadOrStore() && 2042 "MIa must load from or modify a memory location"); 2043 assert(MIb.mayLoadOrStore() && 2044 "MIb must load from or modify a memory location"); 2045 return false; 2046 } 2047 2048 /// Return the value to use for the MachineCSE's LookAheadLimit, 2049 /// which is a heuristic used for CSE'ing phys reg defs. getMachineCSELookAheadLimit()2050 virtual unsigned getMachineCSELookAheadLimit() const { 2051 // The default lookahead is small to prevent unprofitable quadratic 2052 // behavior. 2053 return 5; 2054 } 2055 2056 /// Return the maximal number of alias checks on memory operands. For 2057 /// instructions with more than one memory operands, the alias check on a 2058 /// single MachineInstr pair has quadratic overhead and results in 2059 /// unacceptable performance in the worst case. The limit here is to clamp 2060 /// that maximal checks performed. Usually, that's the product of memory 2061 /// operand numbers from that pair of MachineInstr to be checked. For 2062 /// instance, with two MachineInstrs with 4 and 5 memory operands 2063 /// correspondingly, a total of 20 checks are required. With this limit set to 2064 /// 16, their alias check is skipped. We choose to limit the product instead 2065 /// of the individual instruction as targets may have special MachineInstrs 2066 /// with a considerably high number of memory operands, such as `ldm` in ARM. 2067 /// Setting this limit per MachineInstr would result in either too high 2068 /// overhead or too rigid restriction. getMemOperandAACheckLimit()2069 virtual unsigned getMemOperandAACheckLimit() const { return 16; } 2070 2071 /// Return an array that contains the ids of the target indices (used for the 2072 /// TargetIndex machine operand) and their names. 2073 /// 2074 /// MIR Serialization is able to serialize only the target indices that are 2075 /// defined by this method. 2076 virtual ArrayRef<std::pair<int, const char *>> getSerializableTargetIndices()2077 getSerializableTargetIndices() const { 2078 return {}; 2079 } 2080 2081 /// Decompose the machine operand's target flags into two values - the direct 2082 /// target flag value and any of bit flags that are applied. 2083 virtual std::pair<unsigned, unsigned> decomposeMachineOperandsTargetFlags(unsigned)2084 decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const { 2085 return std::make_pair(0u, 0u); 2086 } 2087 2088 /// Return an array that contains the direct target flag values and their 2089 /// names. 2090 /// 2091 /// MIR Serialization is able to serialize only the target flags that are 2092 /// defined by this method. 2093 virtual ArrayRef<std::pair<unsigned, const char *>> getSerializableDirectMachineOperandTargetFlags()2094 getSerializableDirectMachineOperandTargetFlags() const { 2095 return {}; 2096 } 2097 2098 /// Return an array that contains the bitmask target flag values and their 2099 /// names. 2100 /// 2101 /// MIR Serialization is able to serialize only the target flags that are 2102 /// defined by this method. 2103 virtual ArrayRef<std::pair<unsigned, const char *>> getSerializableBitmaskMachineOperandTargetFlags()2104 getSerializableBitmaskMachineOperandTargetFlags() const { 2105 return {}; 2106 } 2107 2108 /// Return an array that contains the MMO target flag values and their 2109 /// names. 2110 /// 2111 /// MIR Serialization is able to serialize only the MMO target flags that are 2112 /// defined by this method. 2113 virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>> getSerializableMachineMemOperandTargetFlags()2114 getSerializableMachineMemOperandTargetFlags() const { 2115 return {}; 2116 } 2117 2118 /// Determines whether \p Inst is a tail call instruction. Override this 2119 /// method on targets that do not properly set MCID::Return and MCID::Call on 2120 /// tail call instructions." isTailCall(const MachineInstr & Inst)2121 virtual bool isTailCall(const MachineInstr &Inst) const { 2122 return Inst.isReturn() && Inst.isCall(); 2123 } 2124 2125 /// True if the instruction is bound to the top of its basic block and no 2126 /// other instructions shall be inserted before it. This can be implemented 2127 /// to prevent register allocator to insert spills for \p Reg before such 2128 /// instructions. 2129 virtual bool isBasicBlockPrologue(const MachineInstr &MI, 2130 Register Reg = Register()) const { 2131 return false; 2132 } 2133 2134 /// Allows targets to use appropriate copy instruction while spilitting live 2135 /// range of a register in register allocation. getLiveRangeSplitOpcode(Register Reg,const MachineFunction & MF)2136 virtual unsigned getLiveRangeSplitOpcode(Register Reg, 2137 const MachineFunction &MF) const { 2138 return TargetOpcode::COPY; 2139 } 2140 2141 /// During PHI eleimination lets target to make necessary checks and 2142 /// insert the copy to the PHI destination register in a target specific 2143 /// manner. createPHIDestinationCopy(MachineBasicBlock & MBB,MachineBasicBlock::iterator InsPt,const DebugLoc & DL,Register Src,Register Dst)2144 virtual MachineInstr *createPHIDestinationCopy( 2145 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 2146 const DebugLoc &DL, Register Src, Register Dst) const { 2147 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst) 2148 .addReg(Src); 2149 } 2150 2151 /// During PHI eleimination lets target to make necessary checks and 2152 /// insert the copy to the PHI destination register in a target specific 2153 /// manner. createPHISourceCopy(MachineBasicBlock & MBB,MachineBasicBlock::iterator InsPt,const DebugLoc & DL,Register Src,unsigned SrcSubReg,Register Dst)2154 virtual MachineInstr *createPHISourceCopy(MachineBasicBlock &MBB, 2155 MachineBasicBlock::iterator InsPt, 2156 const DebugLoc &DL, Register Src, 2157 unsigned SrcSubReg, 2158 Register Dst) const { 2159 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst) 2160 .addReg(Src, 0, SrcSubReg); 2161 } 2162 2163 /// Returns a \p outliner::OutlinedFunction struct containing target-specific 2164 /// information for a set of outlining candidates. Returns std::nullopt if the 2165 /// candidates are not suitable for outlining. \p MinRepeats is the minimum 2166 /// number of times the instruction sequence must be repeated. 2167 virtual std::optional<std::unique_ptr<outliner::OutlinedFunction>> getOutliningCandidateInfo(const MachineModuleInfo & MMI,std::vector<outliner::Candidate> & RepeatedSequenceLocs,unsigned MinRepeats)2168 getOutliningCandidateInfo( 2169 const MachineModuleInfo &MMI, 2170 std::vector<outliner::Candidate> &RepeatedSequenceLocs, 2171 unsigned MinRepeats) const { 2172 llvm_unreachable( 2173 "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!"); 2174 } 2175 2176 /// Optional target hook to create the LLVM IR attributes for the outlined 2177 /// function. If overridden, the overriding function must call the default 2178 /// implementation. 2179 virtual void mergeOutliningCandidateAttributes( 2180 Function &F, std::vector<outliner::Candidate> &Candidates) const; 2181 2182 protected: 2183 /// Target-dependent implementation for getOutliningTypeImpl. 2184 virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo & MMI,MachineBasicBlock::iterator & MIT,unsigned Flags)2185 getOutliningTypeImpl(const MachineModuleInfo &MMI, 2186 MachineBasicBlock::iterator &MIT, unsigned Flags) const { 2187 llvm_unreachable( 2188 "Target didn't implement TargetInstrInfo::getOutliningTypeImpl!"); 2189 } 2190 2191 public: 2192 /// Returns how or if \p MIT should be outlined. \p Flags is the 2193 /// target-specific information returned by isMBBSafeToOutlineFrom. 2194 outliner::InstrType getOutliningType(const MachineModuleInfo &MMI, 2195 MachineBasicBlock::iterator &MIT, 2196 unsigned Flags) const; 2197 2198 /// Optional target hook that returns true if \p MBB is safe to outline from, 2199 /// and returns any target-specific information in \p Flags. 2200 virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, 2201 unsigned &Flags) const; 2202 2203 /// Optional target hook which partitions \p MBB into outlinable ranges for 2204 /// instruction mapping purposes. Each range is defined by two iterators: 2205 /// [start, end). 2206 /// 2207 /// Ranges are expected to be ordered top-down. That is, ranges closer to the 2208 /// top of the block should come before ranges closer to the end of the block. 2209 /// 2210 /// Ranges cannot overlap. 2211 /// 2212 /// If an entire block is mappable, then its range is [MBB.begin(), MBB.end()) 2213 /// 2214 /// All instructions not present in an outlinable range are considered 2215 /// illegal. 2216 virtual SmallVector< 2217 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>> getOutlinableRanges(MachineBasicBlock & MBB,unsigned & Flags)2218 getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const { 2219 return {std::make_pair(MBB.begin(), MBB.end())}; 2220 } 2221 2222 /// Insert a custom frame for outlined functions. buildOutlinedFrame(MachineBasicBlock & MBB,MachineFunction & MF,const outliner::OutlinedFunction & OF)2223 virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, 2224 const outliner::OutlinedFunction &OF) const { 2225 llvm_unreachable( 2226 "Target didn't implement TargetInstrInfo::buildOutlinedFrame!"); 2227 } 2228 2229 /// Insert a call to an outlined function into the program. 2230 /// Returns an iterator to the spot where we inserted the call. This must be 2231 /// implemented by the target. 2232 virtual MachineBasicBlock::iterator insertOutlinedCall(Module & M,MachineBasicBlock & MBB,MachineBasicBlock::iterator & It,MachineFunction & MF,outliner::Candidate & C)2233 insertOutlinedCall(Module &M, MachineBasicBlock &MBB, 2234 MachineBasicBlock::iterator &It, MachineFunction &MF, 2235 outliner::Candidate &C) const { 2236 llvm_unreachable( 2237 "Target didn't implement TargetInstrInfo::insertOutlinedCall!"); 2238 } 2239 2240 /// Insert an architecture-specific instruction to clear a register. If you 2241 /// need to avoid sideeffects (e.g. avoid XOR on x86, which sets EFLAGS), set 2242 /// \p AllowSideEffects to \p false. 2243 virtual void buildClearRegister(Register Reg, MachineBasicBlock &MBB, 2244 MachineBasicBlock::iterator Iter, 2245 DebugLoc &DL, 2246 bool AllowSideEffects = true) const { 2247 #if 0 2248 // FIXME: This should exist once all platforms that use stack protectors 2249 // implements it. 2250 llvm_unreachable( 2251 "Target didn't implement TargetInstrInfo::buildClearRegister!"); 2252 #endif 2253 } 2254 2255 /// Return true if the function can safely be outlined from. 2256 /// A function \p MF is considered safe for outlining if an outlined function 2257 /// produced from instructions in F will produce a program which produces the 2258 /// same output for any set of given inputs. isFunctionSafeToOutlineFrom(MachineFunction & MF,bool OutlineFromLinkOnceODRs)2259 virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, 2260 bool OutlineFromLinkOnceODRs) const { 2261 llvm_unreachable("Target didn't implement " 2262 "TargetInstrInfo::isFunctionSafeToOutlineFrom!"); 2263 } 2264 2265 /// Return true if the function should be outlined from by default. shouldOutlineFromFunctionByDefault(MachineFunction & MF)2266 virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const { 2267 return false; 2268 } 2269 2270 /// Return true if the function is a viable candidate for machine function 2271 /// splitting. The criteria for if a function can be split may vary by target. 2272 virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const; 2273 2274 /// Return true if the MachineBasicBlock can safely be split to the cold 2275 /// section. On AArch64, certain instructions may cause a block to be unsafe 2276 /// to split to the cold section. isMBBSafeToSplitToCold(const MachineBasicBlock & MBB)2277 virtual bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const { 2278 return true; 2279 } 2280 2281 /// Produce the expression describing the \p MI loading a value into 2282 /// the physical register \p Reg. This hook should only be used with 2283 /// \p MIs belonging to VReg-less functions. 2284 virtual std::optional<ParamLoadedValue> 2285 describeLoadedValue(const MachineInstr &MI, Register Reg) const; 2286 2287 /// Given the generic extension instruction \p ExtMI, returns true if this 2288 /// extension is a likely candidate for being folded into an another 2289 /// instruction. isExtendLikelyToBeFolded(MachineInstr & ExtMI,MachineRegisterInfo & MRI)2290 virtual bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, 2291 MachineRegisterInfo &MRI) const { 2292 return false; 2293 } 2294 2295 /// Return MIR formatter to format/parse MIR operands. Target can override 2296 /// this virtual function and return target specific MIR formatter. getMIRFormatter()2297 virtual const MIRFormatter *getMIRFormatter() const { 2298 if (!Formatter) 2299 Formatter = std::make_unique<MIRFormatter>(); 2300 return Formatter.get(); 2301 } 2302 2303 /// Returns the target-specific default value for tail duplication. 2304 /// This value will be used if the tail-dup-placement-threshold argument is 2305 /// not provided. getTailDuplicateSize(CodeGenOptLevel OptLevel)2306 virtual unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const { 2307 return OptLevel >= CodeGenOptLevel::Aggressive ? 4 : 2; 2308 } 2309 2310 /// Returns the target-specific default value for tail merging. 2311 /// This value will be used if the tail-merge-size argument is not provided. getTailMergeSize(const MachineFunction & MF)2312 virtual unsigned getTailMergeSize(const MachineFunction &MF) const { 2313 return 3; 2314 } 2315 2316 /// Returns the callee operand from the given \p MI. getCalleeOperand(const MachineInstr & MI)2317 virtual const MachineOperand &getCalleeOperand(const MachineInstr &MI) const { 2318 return MI.getOperand(0); 2319 } 2320 2321 /// Return the uniformity behavior of the given instruction. 2322 virtual InstructionUniformity getInstructionUniformity(const MachineInstr & MI)2323 getInstructionUniformity(const MachineInstr &MI) const { 2324 return InstructionUniformity::Default; 2325 } 2326 2327 /// Returns true if the given \p MI defines a TargetIndex operand that can be 2328 /// tracked by their offset, can have values, and can have debug info 2329 /// associated with it. If so, sets \p Index and \p Offset of the target index 2330 /// operand. isExplicitTargetIndexDef(const MachineInstr & MI,int & Index,int64_t & Offset)2331 virtual bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index, 2332 int64_t &Offset) const { 2333 return false; 2334 } 2335 2336 // Get the call frame size just before MI. 2337 unsigned getCallFrameSizeAt(MachineInstr &MI) const; 2338 2339 /// Fills in the necessary MachineOperands to refer to a frame index. 2340 /// The best way to understand this is to print `asm(""::"m"(x));` after 2341 /// finalize-isel. Example: 2342 /// INLINEASM ... 262190 /* mem:m */, %stack.0.x.addr, 1, $noreg, 0, $noreg 2343 /// we would add placeholders for: ^ ^ ^ ^ getFrameIndexOperands(SmallVectorImpl<MachineOperand> & Ops,int FI)2344 virtual void getFrameIndexOperands(SmallVectorImpl<MachineOperand> &Ops, 2345 int FI) const { 2346 llvm_unreachable("unknown number of operands necessary"); 2347 } 2348 2349 private: 2350 mutable std::unique_ptr<MIRFormatter> Formatter; 2351 unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode; 2352 unsigned CatchRetOpcode; 2353 unsigned ReturnOpcode; 2354 }; 2355 2356 /// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair. 2357 template <> struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> { 2358 using RegInfo = DenseMapInfo<Register>; 2359 using SubRegInfo = DenseMapInfo<unsigned>; 2360 2361 static inline TargetInstrInfo::RegSubRegPair getEmptyKey() { 2362 return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(), 2363 SubRegInfo::getEmptyKey()); 2364 } 2365 2366 static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() { 2367 return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(), 2368 SubRegInfo::getTombstoneKey()); 2369 } 2370 2371 /// Reuse getHashValue implementation from 2372 /// std::pair<unsigned, unsigned>. 2373 static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) { 2374 return DenseMapInfo<std::pair<Register, unsigned>>::getHashValue( 2375 std::make_pair(Val.Reg, Val.SubReg)); 2376 } 2377 2378 static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, 2379 const TargetInstrInfo::RegSubRegPair &RHS) { 2380 return LHS == RHS; 2381 } 2382 }; 2383 2384 } // end namespace llvm 2385 2386 #endif // LLVM_CODEGEN_TARGETINSTRINFO_H 2387