1 //===-- PPCInstrInfo.h - PowerPC Instruction Information --------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the PowerPC implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H 14 #define LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H 15 16 #include "PPCRegisterInfo.h" 17 #include "llvm/CodeGen/TargetInstrInfo.h" 18 19 #define GET_INSTRINFO_HEADER 20 #include "PPCGenInstrInfo.inc" 21 22 namespace llvm { 23 24 /// PPCII - This namespace holds all of the PowerPC target-specific 25 /// per-instruction flags. These must match the corresponding definitions in 26 /// PPC.td and PPCInstrFormats.td. 27 namespace PPCII { 28 enum { 29 // PPC970 Instruction Flags. These flags describe the characteristics of the 30 // PowerPC 970 (aka G5) dispatch groups and how they are formed out of 31 // raw machine instructions. 32 33 /// PPC970_First - This instruction starts a new dispatch group, so it will 34 /// always be the first one in the group. 35 PPC970_First = 0x1, 36 37 /// PPC970_Single - This instruction starts a new dispatch group and 38 /// terminates it, so it will be the sole instruction in the group. 39 PPC970_Single = 0x2, 40 41 /// PPC970_Cracked - This instruction is cracked into two pieces, requiring 42 /// two dispatch pipes to be available to issue. 43 PPC970_Cracked = 0x4, 44 45 /// PPC970_Mask/Shift - This is a bitmask that selects the pipeline type that 46 /// an instruction is issued to. 47 PPC970_Shift = 3, 48 PPC970_Mask = 0x07 << PPC970_Shift 49 }; 50 enum PPC970_Unit { 51 /// These are the various PPC970 execution unit pipelines. Each instruction 52 /// is one of these. 53 PPC970_Pseudo = 0 << PPC970_Shift, // Pseudo instruction 54 PPC970_FXU = 1 << PPC970_Shift, // Fixed Point (aka Integer/ALU) Unit 55 PPC970_LSU = 2 << PPC970_Shift, // Load Store Unit 56 PPC970_FPU = 3 << PPC970_Shift, // Floating Point Unit 57 PPC970_CRU = 4 << PPC970_Shift, // Control Register Unit 58 PPC970_VALU = 5 << PPC970_Shift, // Vector ALU 59 PPC970_VPERM = 6 << PPC970_Shift, // Vector Permute Unit 60 PPC970_BRU = 7 << PPC970_Shift // Branch Unit 61 }; 62 63 enum { 64 /// Shift count to bypass PPC970 flags 65 NewDef_Shift = 6, 66 67 /// This instruction is an X-Form memory operation. 68 XFormMemOp = 0x1 << NewDef_Shift, 69 /// This instruction is prefixed. 70 Prefixed = 0x1 << (NewDef_Shift+1) 71 }; 72 } // end namespace PPCII 73 74 // Instructions that have an immediate form might be convertible to that 75 // form if the correct input is a result of a load immediate. In order to 76 // know whether the transformation is special, we might need to know some 77 // of the details of the two forms. 78 struct ImmInstrInfo { 79 // Is the immediate field in the immediate form signed or unsigned? 80 uint64_t SignedImm : 1; 81 // Does the immediate need to be a multiple of some value? 82 uint64_t ImmMustBeMultipleOf : 5; 83 // Is R0/X0 treated specially by the original r+r instruction? 84 // If so, in which operand? 85 uint64_t ZeroIsSpecialOrig : 3; 86 // Is R0/X0 treated specially by the new r+i instruction? 87 // If so, in which operand? 88 uint64_t ZeroIsSpecialNew : 3; 89 // Is the operation commutative? 90 uint64_t IsCommutative : 1; 91 // The operand number to check for add-immediate def. 92 uint64_t OpNoForForwarding : 3; 93 // The operand number for the immediate. 94 uint64_t ImmOpNo : 3; 95 // The opcode of the new instruction. 96 uint64_t ImmOpcode : 16; 97 // The size of the immediate. 98 uint64_t ImmWidth : 5; 99 // The immediate should be truncated to N bits. 100 uint64_t TruncateImmTo : 5; 101 // Is the instruction summing the operand 102 uint64_t IsSummingOperands : 1; 103 }; 104 105 // Information required to convert an instruction to just a materialized 106 // immediate. 107 struct LoadImmediateInfo { 108 unsigned Imm : 16; 109 unsigned Is64Bit : 1; 110 unsigned SetCR : 1; 111 }; 112 113 // Index into the OpcodesForSpill array. 114 enum SpillOpcodeKey { 115 SOK_Int4Spill, 116 SOK_Int8Spill, 117 SOK_Float8Spill, 118 SOK_Float4Spill, 119 SOK_CRSpill, 120 SOK_CRBitSpill, 121 SOK_VRVectorSpill, 122 SOK_VSXVectorSpill, 123 SOK_VectorFloat8Spill, 124 SOK_VectorFloat4Spill, 125 SOK_SpillToVSR, 126 SOK_PairedVecSpill, 127 SOK_AccumulatorSpill, 128 SOK_UAccumulatorSpill, 129 SOK_SPESpill, 130 SOK_PairedG8Spill, 131 SOK_LastOpcodeSpill // This must be last on the enum. 132 }; 133 134 // Define list of load and store spill opcodes. 135 #define NoInstr PPC::INSTRUCTION_LIST_END 136 #define Pwr8LoadOpcodes \ 137 { \ 138 PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \ 139 PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXVD2X, PPC::LXSDX, PPC::LXSSPX, \ 140 PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, PPC::EVLDD, \ 141 PPC::RESTORE_QUADWORD \ 142 } 143 144 #define Pwr9LoadOpcodes \ 145 { \ 146 PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \ 147 PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \ 148 PPC::DFLOADf32, PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, \ 149 NoInstr, PPC::RESTORE_QUADWORD \ 150 } 151 152 #define Pwr10LoadOpcodes \ 153 { \ 154 PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \ 155 PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \ 156 PPC::DFLOADf32, PPC::SPILLTOVSR_LD, PPC::LXVP, PPC::RESTORE_ACC, \ 157 PPC::RESTORE_UACC, NoInstr, PPC::RESTORE_QUADWORD \ 158 } 159 160 #define Pwr8StoreOpcodes \ 161 { \ 162 PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \ 163 PPC::STVX, PPC::STXVD2X, PPC::STXSDX, PPC::STXSSPX, \ 164 PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, PPC::EVSTDD, \ 165 PPC::SPILL_QUADWORD \ 166 } 167 168 #define Pwr9StoreOpcodes \ 169 { \ 170 PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \ 171 PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \ 172 PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, NoInstr, \ 173 PPC::SPILL_QUADWORD \ 174 } 175 176 #define Pwr10StoreOpcodes \ 177 { \ 178 PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \ 179 PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \ 180 PPC::SPILLTOVSR_ST, PPC::STXVP, PPC::SPILL_ACC, PPC::SPILL_UACC, \ 181 NoInstr, PPC::SPILL_QUADWORD \ 182 } 183 184 // Initialize arrays for load and store spill opcodes on supported subtargets. 185 #define StoreOpcodesForSpill \ 186 { Pwr8StoreOpcodes, Pwr9StoreOpcodes, Pwr10StoreOpcodes } 187 #define LoadOpcodesForSpill \ 188 { Pwr8LoadOpcodes, Pwr9LoadOpcodes, Pwr10LoadOpcodes } 189 190 class PPCSubtarget; 191 class PPCInstrInfo : public PPCGenInstrInfo { 192 PPCSubtarget &Subtarget; 193 const PPCRegisterInfo RI; 194 const unsigned StoreSpillOpcodesArray[3][SOK_LastOpcodeSpill] = 195 StoreOpcodesForSpill; 196 const unsigned LoadSpillOpcodesArray[3][SOK_LastOpcodeSpill] = 197 LoadOpcodesForSpill; 198 199 void StoreRegToStackSlot(MachineFunction &MF, unsigned SrcReg, bool isKill, 200 int FrameIdx, const TargetRegisterClass *RC, 201 SmallVectorImpl<MachineInstr *> &NewMIs) const; 202 void LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL, 203 unsigned DestReg, int FrameIdx, 204 const TargetRegisterClass *RC, 205 SmallVectorImpl<MachineInstr *> &NewMIs) const; 206 207 // Replace the instruction with single LI if possible. \p DefMI must be LI or 208 // LI8. 209 bool simplifyToLI(MachineInstr &MI, MachineInstr &DefMI, 210 unsigned OpNoForForwarding, MachineInstr **KilledDef) const; 211 // If the inst is imm-form and its register operand is produced by a ADDI, put 212 // the imm into the inst directly and remove the ADDI if possible. 213 bool transformToNewImmFormFedByAdd(MachineInstr &MI, MachineInstr &DefMI, 214 unsigned OpNoForForwarding) const; 215 // If the inst is x-form and has imm-form and one of its operand is produced 216 // by a LI, put the imm into the inst directly and remove the LI if possible. 217 bool transformToImmFormFedByLI(MachineInstr &MI, const ImmInstrInfo &III, 218 unsigned ConstantOpNo, 219 MachineInstr &DefMI) const; 220 // If the inst is x-form and has imm-form and one of its operand is produced 221 // by an add-immediate, try to transform it when possible. 222 bool transformToImmFormFedByAdd(MachineInstr &MI, const ImmInstrInfo &III, 223 unsigned ConstantOpNo, MachineInstr &DefMI, 224 bool KillDefMI) const; 225 // Try to find that, if the instruction 'MI' contains any operand that 226 // could be forwarded from some inst that feeds it. If yes, return the 227 // Def of that operand. And OpNoForForwarding is the operand index in 228 // the 'MI' for that 'Def'. If we see another use of this Def between 229 // the Def and the MI, SeenIntermediateUse becomes 'true'. 230 MachineInstr *getForwardingDefMI(MachineInstr &MI, 231 unsigned &OpNoForForwarding, 232 bool &SeenIntermediateUse) const; 233 234 // Can the user MI have it's source at index \p OpNoForForwarding 235 // forwarded from an add-immediate that feeds it? 236 bool isUseMIElgibleForForwarding(MachineInstr &MI, const ImmInstrInfo &III, 237 unsigned OpNoForForwarding) const; 238 bool isDefMIElgibleForForwarding(MachineInstr &DefMI, 239 const ImmInstrInfo &III, 240 MachineOperand *&ImmMO, 241 MachineOperand *&RegMO) const; 242 bool isImmElgibleForForwarding(const MachineOperand &ImmMO, 243 const MachineInstr &DefMI, 244 const ImmInstrInfo &III, 245 int64_t &Imm, 246 int64_t BaseImm = 0) const; 247 bool isRegElgibleForForwarding(const MachineOperand &RegMO, 248 const MachineInstr &DefMI, 249 const MachineInstr &MI, bool KillDefMI, 250 bool &IsFwdFeederRegKilled) const; 251 unsigned getSpillTarget() const; 252 const unsigned *getStoreOpcodesForSpillArray() const; 253 const unsigned *getLoadOpcodesForSpillArray() const; 254 unsigned getSpillIndex(const TargetRegisterClass *RC) const; 255 int16_t getFMAOpIdxInfo(unsigned Opcode) const; 256 void reassociateFMA(MachineInstr &Root, MachineCombinerPattern Pattern, 257 SmallVectorImpl<MachineInstr *> &InsInstrs, 258 SmallVectorImpl<MachineInstr *> &DelInstrs, 259 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const; 260 bool isLoadFromConstantPool(MachineInstr *I) const; 261 Register 262 generateLoadForNewConst(unsigned Idx, MachineInstr *MI, Type *Ty, 263 SmallVectorImpl<MachineInstr *> &InsInstrs) const; 264 const Constant *getConstantFromConstantPool(MachineInstr *I) const; 265 virtual void anchor(); 266 267 protected: 268 /// Commutes the operands in the given instruction. 269 /// The commutable operands are specified by their indices OpIdx1 and OpIdx2. 270 /// 271 /// Do not call this method for a non-commutable instruction or for 272 /// non-commutable pair of operand indices OpIdx1 and OpIdx2. 273 /// Even though the instruction is commutable, the method may still 274 /// fail to commute the operands, null pointer is returned in such cases. 275 /// 276 /// For example, we can commute rlwimi instructions, but only if the 277 /// rotate amt is zero. We also have to munge the immediates a bit. 278 MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI, 279 unsigned OpIdx1, 280 unsigned OpIdx2) const override; 281 282 public: 283 explicit PPCInstrInfo(PPCSubtarget &STI); 284 285 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As 286 /// such, whenever a client has an instance of instruction info, it should 287 /// always be able to get register info as well (through this method). 288 /// 289 const PPCRegisterInfo &getRegisterInfo() const { return RI; } 290 291 bool isXFormMemOp(unsigned Opcode) const { 292 return get(Opcode).TSFlags & PPCII::XFormMemOp; 293 } 294 bool isPrefixed(unsigned Opcode) const { 295 return get(Opcode).TSFlags & PPCII::Prefixed; 296 } 297 298 static bool isSameClassPhysRegCopy(unsigned Opcode) { 299 unsigned CopyOpcodes[] = {PPC::OR, PPC::OR8, PPC::FMR, 300 PPC::VOR, PPC::XXLOR, PPC::XXLORf, 301 PPC::XSCPSGNDP, PPC::MCRF, PPC::CROR, 302 PPC::EVOR, -1U}; 303 for (int i = 0; CopyOpcodes[i] != -1U; i++) 304 if (Opcode == CopyOpcodes[i]) 305 return true; 306 return false; 307 } 308 309 ScheduleHazardRecognizer * 310 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 311 const ScheduleDAG *DAG) const override; 312 ScheduleHazardRecognizer * 313 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 314 const ScheduleDAG *DAG) const override; 315 316 unsigned getInstrLatency(const InstrItineraryData *ItinData, 317 const MachineInstr &MI, 318 unsigned *PredCost = nullptr) const override; 319 320 int getOperandLatency(const InstrItineraryData *ItinData, 321 const MachineInstr &DefMI, unsigned DefIdx, 322 const MachineInstr &UseMI, 323 unsigned UseIdx) const override; 324 int getOperandLatency(const InstrItineraryData *ItinData, 325 SDNode *DefNode, unsigned DefIdx, 326 SDNode *UseNode, unsigned UseIdx) const override { 327 return PPCGenInstrInfo::getOperandLatency(ItinData, DefNode, DefIdx, 328 UseNode, UseIdx); 329 } 330 331 bool hasLowDefLatency(const TargetSchedModel &SchedModel, 332 const MachineInstr &DefMI, 333 unsigned DefIdx) const override { 334 // Machine LICM should hoist all instructions in low-register-pressure 335 // situations; none are sufficiently free to justify leaving in a loop 336 // body. 337 return false; 338 } 339 340 bool useMachineCombiner() const override { 341 return true; 342 } 343 344 /// When getMachineCombinerPatterns() finds patterns, this function generates 345 /// the instructions that could replace the original code sequence 346 void genAlternativeCodeSequence( 347 MachineInstr &Root, MachineCombinerPattern Pattern, 348 SmallVectorImpl<MachineInstr *> &InsInstrs, 349 SmallVectorImpl<MachineInstr *> &DelInstrs, 350 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override; 351 352 /// Return true when there is potentially a faster code sequence for a fma 353 /// chain ending in \p Root. All potential patterns are output in the \p 354 /// P array. 355 bool getFMAPatterns(MachineInstr &Root, 356 SmallVectorImpl<MachineCombinerPattern> &P, 357 bool DoRegPressureReduce) const; 358 359 /// Return true when there is potentially a faster code sequence 360 /// for an instruction chain ending in <Root>. All potential patterns are 361 /// output in the <Pattern> array. 362 bool getMachineCombinerPatterns(MachineInstr &Root, 363 SmallVectorImpl<MachineCombinerPattern> &P, 364 bool DoRegPressureReduce) const override; 365 366 /// On PowerPC, we leverage machine combiner pass to reduce register pressure 367 /// when the register pressure is high for one BB. 368 /// Return true if register pressure for \p MBB is high and ABI is supported 369 /// to reduce register pressure. Otherwise return false. 370 bool 371 shouldReduceRegisterPressure(MachineBasicBlock *MBB, 372 RegisterClassInfo *RegClassInfo) const override; 373 374 /// Fixup the placeholders we put in genAlternativeCodeSequence() for 375 /// MachineCombiner. 376 void 377 finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, 378 SmallVectorImpl<MachineInstr *> &InsInstrs) const override; 379 380 bool isAssociativeAndCommutative(const MachineInstr &Inst) const override; 381 382 /// On PowerPC, we try to reassociate FMA chain which will increase 383 /// instruction size. Set extension resource length limit to 1 for edge case. 384 /// Resource Length is calculated by scaled resource usage in getCycles(). 385 /// Because of the division in getCycles(), it returns different cycles due to 386 /// legacy scaled resource usage. So new resource length may be same with 387 /// legacy or 1 bigger than legacy. 388 /// We need to execlude the 1 bigger case even the resource length is not 389 /// perserved for more FMA chain reassociations on PowerPC. 390 int getExtendResourceLenLimit() const override { return 1; } 391 392 void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, 393 MachineInstr &NewMI1, 394 MachineInstr &NewMI2) const override; 395 396 // PowerPC specific version of setSpecialOperandAttr that copies Flags to MI 397 // and clears nuw, nsw, and exact flags. 398 void setSpecialOperandAttr(MachineInstr &MI, uint16_t Flags) const; 399 400 bool isCoalescableExtInstr(const MachineInstr &MI, 401 Register &SrcReg, Register &DstReg, 402 unsigned &SubIdx) const override; 403 unsigned isLoadFromStackSlot(const MachineInstr &MI, 404 int &FrameIndex) const override; 405 bool isReallyTriviallyReMaterializable(const MachineInstr &MI, 406 AAResults *AA) const override; 407 unsigned isStoreToStackSlot(const MachineInstr &MI, 408 int &FrameIndex) const override; 409 410 bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, 411 unsigned &SrcOpIdx2) const override; 412 413 void insertNoop(MachineBasicBlock &MBB, 414 MachineBasicBlock::iterator MI) const override; 415 416 417 // Branch analysis. 418 bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 419 MachineBasicBlock *&FBB, 420 SmallVectorImpl<MachineOperand> &Cond, 421 bool AllowModify) const override; 422 unsigned removeBranch(MachineBasicBlock &MBB, 423 int *BytesRemoved = nullptr) const override; 424 unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 425 MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond, 426 const DebugLoc &DL, 427 int *BytesAdded = nullptr) const override; 428 429 // Select analysis. 430 bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond, 431 Register, Register, Register, int &, int &, 432 int &) const override; 433 void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 434 const DebugLoc &DL, Register DstReg, 435 ArrayRef<MachineOperand> Cond, Register TrueReg, 436 Register FalseReg) const override; 437 438 void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 439 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, 440 bool KillSrc) const override; 441 442 void storeRegToStackSlot(MachineBasicBlock &MBB, 443 MachineBasicBlock::iterator MBBI, 444 Register SrcReg, bool isKill, int FrameIndex, 445 const TargetRegisterClass *RC, 446 const TargetRegisterInfo *TRI) const override; 447 448 // Emits a register spill without updating the register class for vector 449 // registers. This ensures that when we spill a vector register the 450 // element order in the register is the same as it was in memory. 451 void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB, 452 MachineBasicBlock::iterator MBBI, 453 unsigned SrcReg, bool isKill, int FrameIndex, 454 const TargetRegisterClass *RC, 455 const TargetRegisterInfo *TRI) const; 456 457 void loadRegFromStackSlot(MachineBasicBlock &MBB, 458 MachineBasicBlock::iterator MBBI, 459 Register DestReg, int FrameIndex, 460 const TargetRegisterClass *RC, 461 const TargetRegisterInfo *TRI) const override; 462 463 // Emits a register reload without updating the register class for vector 464 // registers. This ensures that when we reload a vector register the 465 // element order in the register is the same as it was in memory. 466 void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB, 467 MachineBasicBlock::iterator MBBI, 468 unsigned DestReg, int FrameIndex, 469 const TargetRegisterClass *RC, 470 const TargetRegisterInfo *TRI) const; 471 472 unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const; 473 474 unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const; 475 476 bool 477 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override; 478 479 bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, 480 MachineRegisterInfo *MRI) const override; 481 482 bool onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 483 Register Reg) const; 484 485 // If conversion by predication (only supported by some branch instructions). 486 // All of the profitability checks always return true; it is always 487 // profitable to use the predicated branches. 488 bool isProfitableToIfCvt(MachineBasicBlock &MBB, 489 unsigned NumCycles, unsigned ExtraPredCycles, 490 BranchProbability Probability) const override { 491 return true; 492 } 493 494 bool isProfitableToIfCvt(MachineBasicBlock &TMBB, 495 unsigned NumT, unsigned ExtraT, 496 MachineBasicBlock &FMBB, 497 unsigned NumF, unsigned ExtraF, 498 BranchProbability Probability) const override; 499 500 bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, 501 BranchProbability Probability) const override { 502 return true; 503 } 504 505 bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, 506 MachineBasicBlock &FMBB) const override { 507 return false; 508 } 509 510 // Predication support. 511 bool isPredicated(const MachineInstr &MI) const override; 512 513 bool isSchedulingBoundary(const MachineInstr &MI, 514 const MachineBasicBlock *MBB, 515 const MachineFunction &MF) const override; 516 517 bool PredicateInstruction(MachineInstr &MI, 518 ArrayRef<MachineOperand> Pred) const override; 519 520 bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 521 ArrayRef<MachineOperand> Pred2) const override; 522 523 bool ClobbersPredicate(MachineInstr &MI, std::vector<MachineOperand> &Pred, 524 bool SkipDead) const override; 525 526 // Comparison optimization. 527 528 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, 529 Register &SrcReg2, int64_t &Mask, 530 int64_t &Value) const override; 531 532 bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, 533 Register SrcReg2, int64_t Mask, int64_t Value, 534 const MachineRegisterInfo *MRI) const override; 535 536 537 /// Return true if get the base operand, byte offset of an instruction and 538 /// the memory width. Width is the size of memory that is being 539 /// loaded/stored (e.g. 1, 2, 4, 8). 540 bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, 541 const MachineOperand *&BaseOp, 542 int64_t &Offset, unsigned &Width, 543 const TargetRegisterInfo *TRI) const; 544 545 /// Get the base operand and byte offset of an instruction that reads/writes 546 /// memory. 547 bool getMemOperandsWithOffsetWidth( 548 const MachineInstr &LdSt, 549 SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset, 550 bool &OffsetIsScalable, unsigned &Width, 551 const TargetRegisterInfo *TRI) const override; 552 553 /// Returns true if the two given memory operations should be scheduled 554 /// adjacent. 555 bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 556 ArrayRef<const MachineOperand *> BaseOps2, 557 unsigned NumLoads, unsigned NumBytes) const override; 558 559 /// Return true if two MIs access different memory addresses and false 560 /// otherwise 561 bool 562 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 563 const MachineInstr &MIb) const override; 564 565 /// GetInstSize - Return the number of bytes of code the specified 566 /// instruction may be. This returns the maximum number of bytes. 567 /// 568 unsigned getInstSizeInBytes(const MachineInstr &MI) const override; 569 570 MCInst getNop() const override; 571 572 std::pair<unsigned, unsigned> 573 decomposeMachineOperandsTargetFlags(unsigned TF) const override; 574 575 ArrayRef<std::pair<unsigned, const char *>> 576 getSerializableDirectMachineOperandTargetFlags() const override; 577 578 ArrayRef<std::pair<unsigned, const char *>> 579 getSerializableBitmaskMachineOperandTargetFlags() const override; 580 581 // Expand VSX Memory Pseudo instruction to either a VSX or a FP instruction. 582 bool expandVSXMemPseudo(MachineInstr &MI) const; 583 584 // Lower pseudo instructions after register allocation. 585 bool expandPostRAPseudo(MachineInstr &MI) const override; 586 587 static bool isVFRegister(unsigned Reg) { 588 return Reg >= PPC::VF0 && Reg <= PPC::VF31; 589 } 590 static bool isVRRegister(unsigned Reg) { 591 return Reg >= PPC::V0 && Reg <= PPC::V31; 592 } 593 const TargetRegisterClass *updatedRC(const TargetRegisterClass *RC) const; 594 static int getRecordFormOpcode(unsigned Opcode); 595 596 bool isTOCSaveMI(const MachineInstr &MI) const; 597 598 bool isSignOrZeroExtended(const MachineInstr &MI, bool SignExt, 599 const unsigned PhiDepth) const; 600 601 /// Return true if the output of the instruction is always a sign-extended, 602 /// i.e. 0 to 31-th bits are same as 32-th bit. 603 bool isSignExtended(const MachineInstr &MI, const unsigned depth = 0) const { 604 return isSignOrZeroExtended(MI, true, depth); 605 } 606 607 /// Return true if the output of the instruction is always zero-extended, 608 /// i.e. 0 to 31-th bits are all zeros 609 bool isZeroExtended(const MachineInstr &MI, const unsigned depth = 0) const { 610 return isSignOrZeroExtended(MI, false, depth); 611 } 612 613 bool convertToImmediateForm(MachineInstr &MI, 614 MachineInstr **KilledDef = nullptr) const; 615 bool foldFrameOffset(MachineInstr &MI) const; 616 bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase = nullptr) const; 617 bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const; 618 bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const; 619 bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg, 620 unsigned &XFormOpcode, 621 int64_t &OffsetOfImmInstr, 622 ImmInstrInfo &III) const; 623 bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index, 624 MachineInstr *&ADDIMI, int64_t &OffsetAddi, 625 int64_t OffsetImm) const; 626 627 /// Fixup killed/dead flag for register \p RegNo between instructions [\p 628 /// StartMI, \p EndMI]. Some pre-RA or post-RA transformations may violate 629 /// register killed/dead flags semantics, this function can be called to fix 630 /// up. Before calling this function, 631 /// 1. Ensure that \p RegNo liveness is killed after instruction \p EndMI. 632 /// 2. Ensure that there is no new definition between (\p StartMI, \p EndMI) 633 /// and possible definition for \p RegNo is \p StartMI or \p EndMI. For 634 /// pre-RA cases, definition may be \p StartMI through COPY, \p StartMI 635 /// will be adjust to true definition. 636 /// 3. We can do accurate fixup for the case when all instructions between 637 /// [\p StartMI, \p EndMI] are in same basic block. 638 /// 4. For the case when \p StartMI and \p EndMI are not in same basic block, 639 /// we conservatively clear kill flag for all uses of \p RegNo for pre-RA 640 /// and for post-RA, we give an assertion as without reaching definition 641 /// analysis post-RA, \p StartMI and \p EndMI are hard to keep right. 642 void fixupIsDeadOrKill(MachineInstr *StartMI, MachineInstr *EndMI, 643 unsigned RegNo) const; 644 void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const; 645 void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo, 646 int64_t Imm) const; 647 648 bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III, 649 bool PostRA) const; 650 651 // In PostRA phase, try to find instruction defines \p Reg before \p MI. 652 // \p SeenIntermediate is set to true if uses between DefMI and \p MI exist. 653 MachineInstr *getDefMIPostRA(unsigned Reg, MachineInstr &MI, 654 bool &SeenIntermediateUse) const; 655 656 /// getRegNumForOperand - some operands use different numbering schemes 657 /// for the same registers. For example, a VSX instruction may have any of 658 /// vs0-vs63 allocated whereas an Altivec instruction could only have 659 /// vs32-vs63 allocated (numbered as v0-v31). This function returns the actual 660 /// register number needed for the opcode/operand number combination. 661 /// The operand number argument will be useful when we need to extend this 662 /// to instructions that use both Altivec and VSX numbering (for different 663 /// operands). 664 static unsigned getRegNumForOperand(const MCInstrDesc &Desc, unsigned Reg, 665 unsigned OpNo) { 666 int16_t regClass = Desc.OpInfo[OpNo].RegClass; 667 switch (regClass) { 668 // We store F0-F31, VF0-VF31 in MCOperand and it should be F0-F31, 669 // VSX32-VSX63 during encoding/disassembling 670 case PPC::VSSRCRegClassID: 671 case PPC::VSFRCRegClassID: 672 if (isVFRegister(Reg)) 673 return PPC::VSX32 + (Reg - PPC::VF0); 674 break; 675 // We store VSL0-VSL31, V0-V31 in MCOperand and it should be VSL0-VSL31, 676 // VSX32-VSX63 during encoding/disassembling 677 case PPC::VSRCRegClassID: 678 if (isVRRegister(Reg)) 679 return PPC::VSX32 + (Reg - PPC::V0); 680 break; 681 // Other RegClass doesn't need mapping 682 default: 683 break; 684 } 685 return Reg; 686 } 687 688 /// Check \p Opcode is BDNZ (Decrement CTR and branch if it is still nonzero). 689 bool isBDNZ(unsigned Opcode) const; 690 691 /// Find the hardware loop instruction used to set-up the specified loop. 692 /// On PPC, we have two instructions used to set-up the hardware loop 693 /// (MTCTRloop, MTCTR8loop) with corresponding endloop (BDNZ, BDNZ8) 694 /// instructions to indicate the end of a loop. 695 MachineInstr * 696 findLoopInstr(MachineBasicBlock &PreHeader, 697 SmallPtrSet<MachineBasicBlock *, 8> &Visited) const; 698 699 /// Analyze loop L, which must be a single-basic-block loop, and if the 700 /// conditions can be understood enough produce a PipelinerLoopInfo object. 701 std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo> 702 analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override; 703 }; 704 705 } 706 707 #endif 708