xref: /freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.h (revision 5ca8c28cd8c725b81781201cfdb5f9969396f934)
1 //===-- PPCInstrInfo.h - PowerPC Instruction Information --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the PowerPC implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H
14 #define LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H
15 
16 #include "MCTargetDesc/PPCMCTargetDesc.h"
17 #include "PPC.h"
18 #include "PPCRegisterInfo.h"
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/CodeGen/TargetInstrInfo.h"
21 
22 #define GET_INSTRINFO_HEADER
23 #include "PPCGenInstrInfo.inc"
24 
25 namespace llvm {
26 
27 // Instructions that have an immediate form might be convertible to that
28 // form if the correct input is a result of a load immediate. In order to
29 // know whether the transformation is special, we might need to know some
30 // of the details of the two forms.
31 struct ImmInstrInfo {
32   // Is the immediate field in the immediate form signed or unsigned?
33   uint64_t SignedImm : 1;
34   // Does the immediate need to be a multiple of some value?
35   uint64_t ImmMustBeMultipleOf : 5;
36   // Is R0/X0 treated specially by the original r+r instruction?
37   // If so, in which operand?
38   uint64_t ZeroIsSpecialOrig : 3;
39   // Is R0/X0 treated specially by the new r+i instruction?
40   // If so, in which operand?
41   uint64_t ZeroIsSpecialNew : 3;
42   // Is the operation commutative?
43   uint64_t IsCommutative : 1;
44   // The operand number to check for add-immediate def.
45   uint64_t OpNoForForwarding : 3;
46   // The operand number for the immediate.
47   uint64_t ImmOpNo : 3;
48   // The opcode of the new instruction.
49   uint64_t ImmOpcode : 16;
50   // The size of the immediate.
51   uint64_t ImmWidth : 5;
52   // The immediate should be truncated to N bits.
53   uint64_t TruncateImmTo : 5;
54   // Is the instruction summing the operand
55   uint64_t IsSummingOperands : 1;
56 };
57 
58 // Information required to convert an instruction to just a materialized
59 // immediate.
60 struct LoadImmediateInfo {
61   unsigned Imm : 16;
62   unsigned Is64Bit : 1;
63   unsigned SetCR : 1;
64 };
65 
66 // Index into the OpcodesForSpill array.
67 enum SpillOpcodeKey {
68   SOK_Int4Spill,
69   SOK_Int8Spill,
70   SOK_Float8Spill,
71   SOK_Float4Spill,
72   SOK_CRSpill,
73   SOK_CRBitSpill,
74   SOK_VRVectorSpill,
75   SOK_VSXVectorSpill,
76   SOK_VectorFloat8Spill,
77   SOK_VectorFloat4Spill,
78   SOK_SpillToVSR,
79   SOK_PairedVecSpill,
80   SOK_AccumulatorSpill,
81   SOK_UAccumulatorSpill,
82   SOK_WAccumulatorSpill,
83   SOK_SPESpill,
84   SOK_PairedG8Spill,
85   SOK_LastOpcodeSpill // This must be last on the enum.
86 };
87 
88 // Define list of load and store spill opcodes.
89 #define NoInstr PPC::INSTRUCTION_LIST_END
90 #define Pwr8LoadOpcodes                                                        \
91   {                                                                            \
92     PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR,                    \
93         PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXVD2X, PPC::LXSDX, PPC::LXSSPX,    \
94         PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, NoInstr, PPC::EVLDD,    \
95         PPC::RESTORE_QUADWORD                                                  \
96   }
97 
98 #define Pwr9LoadOpcodes                                                        \
99   {                                                                            \
100     PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR,                    \
101         PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64,                \
102         PPC::DFLOADf32, PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr,         \
103         NoInstr, NoInstr, PPC::RESTORE_QUADWORD                                \
104   }
105 
106 #define Pwr10LoadOpcodes                                                       \
107   {                                                                            \
108     PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR,                    \
109         PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64,                \
110         PPC::DFLOADf32, PPC::SPILLTOVSR_LD, PPC::LXVP, PPC::RESTORE_ACC,       \
111         PPC::RESTORE_UACC, NoInstr, NoInstr, PPC::RESTORE_QUADWORD             \
112   }
113 
114 #define FutureLoadOpcodes                                                      \
115   {                                                                            \
116     PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR,                    \
117         PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64,                \
118         PPC::DFLOADf32, PPC::SPILLTOVSR_LD, PPC::LXVP, PPC::RESTORE_ACC,       \
119         PPC::RESTORE_UACC, PPC::RESTORE_WACC, NoInstr, PPC::RESTORE_QUADWORD   \
120   }
121 
122 #define Pwr8StoreOpcodes                                                       \
123   {                                                                            \
124     PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
125         PPC::STVX, PPC::STXVD2X, PPC::STXSDX, PPC::STXSSPX,                    \
126         PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, NoInstr, PPC::EVSTDD,   \
127         PPC::SPILL_QUADWORD                                                    \
128   }
129 
130 #define Pwr9StoreOpcodes                                                       \
131   {                                                                            \
132     PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
133         PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32,                \
134         PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, NoInstr, NoInstr,       \
135         PPC::SPILL_QUADWORD                                                    \
136   }
137 
138 #define Pwr10StoreOpcodes                                                      \
139   {                                                                            \
140     PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
141         PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32,                \
142         PPC::SPILLTOVSR_ST, PPC::STXVP, PPC::SPILL_ACC, PPC::SPILL_UACC,       \
143         NoInstr, NoInstr, PPC::SPILL_QUADWORD                                  \
144   }
145 
146 #define FutureStoreOpcodes                                                     \
147   {                                                                            \
148     PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \
149         PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32,                \
150         PPC::SPILLTOVSR_ST, PPC::STXVP, PPC::SPILL_ACC, PPC::SPILL_UACC,       \
151         PPC::SPILL_WACC, NoInstr, PPC::SPILL_QUADWORD                          \
152   }
153 
154 // Initialize arrays for load and store spill opcodes on supported subtargets.
155 #define StoreOpcodesForSpill                                                   \
156   { Pwr8StoreOpcodes, Pwr9StoreOpcodes, Pwr10StoreOpcodes, FutureStoreOpcodes }
157 #define LoadOpcodesForSpill                                                    \
158   { Pwr8LoadOpcodes, Pwr9LoadOpcodes, Pwr10LoadOpcodes, FutureLoadOpcodes }
159 
160 class PPCSubtarget;
161 class PPCInstrInfo : public PPCGenInstrInfo {
162   PPCSubtarget &Subtarget;
163   const PPCRegisterInfo RI;
164   const unsigned StoreSpillOpcodesArray[4][SOK_LastOpcodeSpill] =
165       StoreOpcodesForSpill;
166   const unsigned LoadSpillOpcodesArray[4][SOK_LastOpcodeSpill] =
167       LoadOpcodesForSpill;
168 
169   void StoreRegToStackSlot(MachineFunction &MF, unsigned SrcReg, bool isKill,
170                            int FrameIdx, const TargetRegisterClass *RC,
171                            SmallVectorImpl<MachineInstr *> &NewMIs) const;
172   void LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
173                             unsigned DestReg, int FrameIdx,
174                             const TargetRegisterClass *RC,
175                             SmallVectorImpl<MachineInstr *> &NewMIs) const;
176 
177   // Replace the instruction with single LI if possible. \p DefMI must be LI or
178   // LI8.
179   bool simplifyToLI(MachineInstr &MI, MachineInstr &DefMI,
180                     unsigned OpNoForForwarding, MachineInstr **KilledDef) const;
181   // If the inst is imm-form and its register operand is produced by a ADDI, put
182   // the imm into the inst directly and remove the ADDI if possible.
183   bool transformToNewImmFormFedByAdd(MachineInstr &MI, MachineInstr &DefMI,
184                                      unsigned OpNoForForwarding) const;
185   // If the inst is x-form and has imm-form and one of its operand is produced
186   // by a LI, put the imm into the inst directly and remove the LI if possible.
187   bool transformToImmFormFedByLI(MachineInstr &MI, const ImmInstrInfo &III,
188                                  unsigned ConstantOpNo,
189                                  MachineInstr &DefMI) const;
190   // If the inst is x-form and has imm-form and one of its operand is produced
191   // by an add-immediate, try to transform it when possible.
192   bool transformToImmFormFedByAdd(MachineInstr &MI, const ImmInstrInfo &III,
193                                   unsigned ConstantOpNo, MachineInstr &DefMI,
194                                   bool KillDefMI) const;
195   // Try to find that, if the instruction 'MI' contains any operand that
196   // could be forwarded from some inst that feeds it. If yes, return the
197   // Def of that operand. And OpNoForForwarding is the operand index in
198   // the 'MI' for that 'Def'. If we see another use of this Def between
199   // the Def and the MI, SeenIntermediateUse becomes 'true'.
200   MachineInstr *getForwardingDefMI(MachineInstr &MI,
201                                    unsigned &OpNoForForwarding,
202                                    bool &SeenIntermediateUse) const;
203 
204   // Can the user MI have it's source at index \p OpNoForForwarding
205   // forwarded from an add-immediate that feeds it?
206   bool isUseMIElgibleForForwarding(MachineInstr &MI, const ImmInstrInfo &III,
207                                    unsigned OpNoForForwarding) const;
208   bool isDefMIElgibleForForwarding(MachineInstr &DefMI,
209                                    const ImmInstrInfo &III,
210                                    MachineOperand *&ImmMO,
211                                    MachineOperand *&RegMO) const;
212   bool isImmElgibleForForwarding(const MachineOperand &ImmMO,
213                                  const MachineInstr &DefMI,
214                                  const ImmInstrInfo &III,
215                                  int64_t &Imm,
216                                  int64_t BaseImm = 0) const;
217   bool isRegElgibleForForwarding(const MachineOperand &RegMO,
218                                  const MachineInstr &DefMI,
219                                  const MachineInstr &MI, bool KillDefMI,
220                                  bool &IsFwdFeederRegKilled,
221                                  bool &SeenIntermediateUse) const;
222   unsigned getSpillTarget() const;
223   ArrayRef<unsigned> getStoreOpcodesForSpillArray() const;
224   ArrayRef<unsigned> getLoadOpcodesForSpillArray() const;
225   unsigned getSpillIndex(const TargetRegisterClass *RC) const;
226   int16_t getFMAOpIdxInfo(unsigned Opcode) const;
227   void reassociateFMA(MachineInstr &Root, MachineCombinerPattern Pattern,
228                       SmallVectorImpl<MachineInstr *> &InsInstrs,
229                       SmallVectorImpl<MachineInstr *> &DelInstrs,
230                       DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
231   Register
232   generateLoadForNewConst(unsigned Idx, MachineInstr *MI, Type *Ty,
233                           SmallVectorImpl<MachineInstr *> &InsInstrs) const;
234   virtual void anchor();
235 
236 protected:
237   /// Commutes the operands in the given instruction.
238   /// The commutable operands are specified by their indices OpIdx1 and OpIdx2.
239   ///
240   /// Do not call this method for a non-commutable instruction or for
241   /// non-commutable pair of operand indices OpIdx1 and OpIdx2.
242   /// Even though the instruction is commutable, the method may still
243   /// fail to commute the operands, null pointer is returned in such cases.
244   ///
245   /// For example, we can commute rlwimi instructions, but only if the
246   /// rotate amt is zero.  We also have to munge the immediates a bit.
247   MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
248                                        unsigned OpIdx1,
249                                        unsigned OpIdx2) const override;
250 
251 public:
252   explicit PPCInstrInfo(PPCSubtarget &STI);
253 
254   bool isLoadFromConstantPool(MachineInstr *I) const;
255   const Constant *getConstantFromConstantPool(MachineInstr *I) const;
256 
257   /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info.  As
258   /// such, whenever a client has an instance of instruction info, it should
259   /// always be able to get register info as well (through this method).
260   ///
261   const PPCRegisterInfo &getRegisterInfo() const { return RI; }
262 
263   bool isXFormMemOp(unsigned Opcode) const {
264     return get(Opcode).TSFlags & PPCII::XFormMemOp;
265   }
266   bool isPrefixed(unsigned Opcode) const {
267     return get(Opcode).TSFlags & PPCII::Prefixed;
268   }
269   bool isSExt32To64(unsigned Opcode) const {
270     return get(Opcode).TSFlags & PPCII::SExt32To64;
271   }
272   bool isZExt32To64(unsigned Opcode) const {
273     return get(Opcode).TSFlags & PPCII::ZExt32To64;
274   }
275 
276   static bool isSameClassPhysRegCopy(unsigned Opcode) {
277     unsigned CopyOpcodes[] = {PPC::OR,        PPC::OR8,   PPC::FMR,
278                               PPC::VOR,       PPC::XXLOR, PPC::XXLORf,
279                               PPC::XSCPSGNDP, PPC::MCRF,  PPC::CROR,
280                               PPC::EVOR,      -1U};
281     for (int i = 0; CopyOpcodes[i] != -1U; i++)
282       if (Opcode == CopyOpcodes[i])
283         return true;
284     return false;
285   }
286 
287   static bool hasPCRelFlag(unsigned TF) {
288     return TF == PPCII::MO_PCREL_FLAG || TF == PPCII::MO_GOT_TLSGD_PCREL_FLAG ||
289            TF == PPCII::MO_GOT_TLSLD_PCREL_FLAG ||
290            TF == PPCII::MO_GOT_TPREL_PCREL_FLAG ||
291            TF == PPCII::MO_TPREL_PCREL_FLAG || TF == PPCII::MO_TLS_PCREL_FLAG ||
292            TF == PPCII::MO_GOT_PCREL_FLAG;
293   }
294 
295   static bool hasGOTFlag(unsigned TF) {
296     return TF == PPCII::MO_GOT_FLAG || TF == PPCII::MO_GOT_TLSGD_PCREL_FLAG ||
297            TF == PPCII::MO_GOT_TLSLD_PCREL_FLAG ||
298            TF == PPCII::MO_GOT_TPREL_PCREL_FLAG ||
299            TF == PPCII::MO_GOT_PCREL_FLAG;
300   }
301 
302   static bool hasTLSFlag(unsigned TF) {
303     return TF == PPCII::MO_TLSGD_FLAG || TF == PPCII::MO_TPREL_FLAG ||
304            TF == PPCII::MO_TLSLD_FLAG || TF == PPCII::MO_TLSGDM_FLAG ||
305            TF == PPCII::MO_GOT_TLSGD_PCREL_FLAG ||
306            TF == PPCII::MO_GOT_TLSLD_PCREL_FLAG ||
307            TF == PPCII::MO_GOT_TPREL_PCREL_FLAG || TF == PPCII::MO_TPREL_LO ||
308            TF == PPCII::MO_TPREL_HA || TF == PPCII::MO_DTPREL_LO ||
309            TF == PPCII::MO_TLSLD_LO || TF == PPCII::MO_TLS ||
310            TF == PPCII::MO_TPREL_PCREL_FLAG || TF == PPCII::MO_TLS_PCREL_FLAG;
311   }
312 
313   ScheduleHazardRecognizer *
314   CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
315                                const ScheduleDAG *DAG) const override;
316   ScheduleHazardRecognizer *
317   CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
318                                      const ScheduleDAG *DAG) const override;
319 
320   unsigned getInstrLatency(const InstrItineraryData *ItinData,
321                            const MachineInstr &MI,
322                            unsigned *PredCost = nullptr) const override;
323 
324   std::optional<unsigned> getOperandLatency(const InstrItineraryData *ItinData,
325                                             const MachineInstr &DefMI,
326                                             unsigned DefIdx,
327                                             const MachineInstr &UseMI,
328                                             unsigned UseIdx) const override;
329   std::optional<unsigned> getOperandLatency(const InstrItineraryData *ItinData,
330                                             SDNode *DefNode, unsigned DefIdx,
331                                             SDNode *UseNode,
332                                             unsigned UseIdx) const override {
333     return PPCGenInstrInfo::getOperandLatency(ItinData, DefNode, DefIdx,
334                                               UseNode, UseIdx);
335   }
336 
337   bool hasLowDefLatency(const TargetSchedModel &SchedModel,
338                         const MachineInstr &DefMI,
339                         unsigned DefIdx) const override {
340     // Machine LICM should hoist all instructions in low-register-pressure
341     // situations; none are sufficiently free to justify leaving in a loop
342     // body.
343     return false;
344   }
345 
346   bool useMachineCombiner() const override {
347     return true;
348   }
349 
350   /// When getMachineCombinerPatterns() finds patterns, this function generates
351   /// the instructions that could replace the original code sequence
352   void genAlternativeCodeSequence(
353       MachineInstr &Root, MachineCombinerPattern Pattern,
354       SmallVectorImpl<MachineInstr *> &InsInstrs,
355       SmallVectorImpl<MachineInstr *> &DelInstrs,
356       DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
357 
358   /// Return true when there is potentially a faster code sequence for a fma
359   /// chain ending in \p Root. All potential patterns are output in the \p
360   /// P array.
361   bool getFMAPatterns(MachineInstr &Root,
362                       SmallVectorImpl<MachineCombinerPattern> &P,
363                       bool DoRegPressureReduce) const;
364 
365   /// Return true when there is potentially a faster code sequence
366   /// for an instruction chain ending in <Root>. All potential patterns are
367   /// output in the <Pattern> array.
368   bool getMachineCombinerPatterns(MachineInstr &Root,
369                                   SmallVectorImpl<MachineCombinerPattern> &P,
370                                   bool DoRegPressureReduce) const override;
371 
372   /// On PowerPC, we leverage machine combiner pass to reduce register pressure
373   /// when the register pressure is high for one BB.
374   /// Return true if register pressure for \p MBB is high and ABI is supported
375   /// to reduce register pressure. Otherwise return false.
376   bool shouldReduceRegisterPressure(
377       const MachineBasicBlock *MBB,
378       const RegisterClassInfo *RegClassInfo) const override;
379 
380   /// Fixup the placeholders we put in genAlternativeCodeSequence() for
381   /// MachineCombiner.
382   void
383   finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P,
384                     SmallVectorImpl<MachineInstr *> &InsInstrs) const override;
385 
386   bool isAssociativeAndCommutative(const MachineInstr &Inst,
387                                    bool Invert) const override;
388 
389   /// On PowerPC, we try to reassociate FMA chain which will increase
390   /// instruction size. Set extension resource length limit to 1 for edge case.
391   /// Resource Length is calculated by scaled resource usage in getCycles().
392   /// Because of the division in getCycles(), it returns different cycles due to
393   /// legacy scaled resource usage. So new resource length may be same with
394   /// legacy or 1 bigger than legacy.
395   /// We need to execlude the 1 bigger case even the resource length is not
396   /// perserved for more FMA chain reassociations on PowerPC.
397   int getExtendResourceLenLimit() const override { return 1; }
398 
399   // PowerPC specific version of setSpecialOperandAttr that copies Flags to MI
400   // and clears nuw, nsw, and exact flags.
401   using TargetInstrInfo::setSpecialOperandAttr;
402   void setSpecialOperandAttr(MachineInstr &MI, uint32_t Flags) const;
403 
404   bool isCoalescableExtInstr(const MachineInstr &MI,
405                              Register &SrcReg, Register &DstReg,
406                              unsigned &SubIdx) const override;
407   unsigned isLoadFromStackSlot(const MachineInstr &MI,
408                                int &FrameIndex) const override;
409   bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override;
410   unsigned isStoreToStackSlot(const MachineInstr &MI,
411                               int &FrameIndex) const override;
412 
413   bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1,
414                              unsigned &SrcOpIdx2) const override;
415 
416   void insertNoop(MachineBasicBlock &MBB,
417                   MachineBasicBlock::iterator MI) const override;
418 
419 
420   // Branch analysis.
421   bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
422                      MachineBasicBlock *&FBB,
423                      SmallVectorImpl<MachineOperand> &Cond,
424                      bool AllowModify) const override;
425   unsigned removeBranch(MachineBasicBlock &MBB,
426                         int *BytesRemoved = nullptr) const override;
427   unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
428                         MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
429                         const DebugLoc &DL,
430                         int *BytesAdded = nullptr) const override;
431 
432   // Select analysis.
433   bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
434                        Register, Register, Register, int &, int &,
435                        int &) const override;
436   void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
437                     const DebugLoc &DL, Register DstReg,
438                     ArrayRef<MachineOperand> Cond, Register TrueReg,
439                     Register FalseReg) const override;
440 
441   void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
442                    const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
443                    bool KillSrc) const override;
444 
445   void storeRegToStackSlot(MachineBasicBlock &MBB,
446                            MachineBasicBlock::iterator MBBI, Register SrcReg,
447                            bool isKill, int FrameIndex,
448                            const TargetRegisterClass *RC,
449                            const TargetRegisterInfo *TRI,
450                            Register VReg) const override;
451 
452   // Emits a register spill without updating the register class for vector
453   // registers. This ensures that when we spill a vector register the
454   // element order in the register is the same as it was in memory.
455   void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB,
456                                 MachineBasicBlock::iterator MBBI,
457                                 unsigned SrcReg, bool isKill, int FrameIndex,
458                                 const TargetRegisterClass *RC,
459                                 const TargetRegisterInfo *TRI) const;
460 
461   void loadRegFromStackSlot(MachineBasicBlock &MBB,
462                             MachineBasicBlock::iterator MBBI, Register DestReg,
463                             int FrameIndex, const TargetRegisterClass *RC,
464                             const TargetRegisterInfo *TRI,
465                             Register VReg) const override;
466 
467   // Emits a register reload without updating the register class for vector
468   // registers. This ensures that when we reload a vector register the
469   // element order in the register is the same as it was in memory.
470   void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB,
471                                  MachineBasicBlock::iterator MBBI,
472                                  unsigned DestReg, int FrameIndex,
473                                  const TargetRegisterClass *RC,
474                                  const TargetRegisterInfo *TRI) const;
475 
476   unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const;
477 
478   unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const;
479 
480   bool
481   reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
482 
483   bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg,
484                      MachineRegisterInfo *MRI) const override;
485 
486   bool onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
487                          Register Reg) const;
488 
489   // If conversion by predication (only supported by some branch instructions).
490   // All of the profitability checks always return true; it is always
491   // profitable to use the predicated branches.
492   bool isProfitableToIfCvt(MachineBasicBlock &MBB,
493                           unsigned NumCycles, unsigned ExtraPredCycles,
494                           BranchProbability Probability) const override {
495     return true;
496   }
497 
498   bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
499                            unsigned NumT, unsigned ExtraT,
500                            MachineBasicBlock &FMBB,
501                            unsigned NumF, unsigned ExtraF,
502                            BranchProbability Probability) const override;
503 
504   bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
505                                  BranchProbability Probability) const override {
506     return true;
507   }
508 
509   bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
510                                  MachineBasicBlock &FMBB) const override {
511     return false;
512   }
513 
514   // Predication support.
515   bool isPredicated(const MachineInstr &MI) const override;
516 
517   bool isSchedulingBoundary(const MachineInstr &MI,
518                             const MachineBasicBlock *MBB,
519                             const MachineFunction &MF) const override;
520 
521   bool PredicateInstruction(MachineInstr &MI,
522                             ArrayRef<MachineOperand> Pred) const override;
523 
524   bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
525                          ArrayRef<MachineOperand> Pred2) const override;
526 
527   bool ClobbersPredicate(MachineInstr &MI, std::vector<MachineOperand> &Pred,
528                          bool SkipDead) const override;
529 
530   // Comparison optimization.
531 
532   bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
533                       Register &SrcReg2, int64_t &Mask,
534                       int64_t &Value) const override;
535 
536   bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
537                             Register SrcReg2, int64_t Mask, int64_t Value,
538                             const MachineRegisterInfo *MRI) const override;
539 
540 
541   /// Return true if get the base operand, byte offset of an instruction and
542   /// the memory width. Width is the size of memory that is being
543   /// loaded/stored (e.g. 1, 2, 4, 8).
544   bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
545                                     const MachineOperand *&BaseOp,
546                                     int64_t &Offset, unsigned &Width,
547                                     const TargetRegisterInfo *TRI) const;
548 
549   bool optimizeCmpPostRA(MachineInstr &MI) const;
550 
551   /// Get the base operand and byte offset of an instruction that reads/writes
552   /// memory.
553   bool getMemOperandsWithOffsetWidth(
554       const MachineInstr &LdSt,
555       SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset,
556       bool &OffsetIsScalable, unsigned &Width,
557       const TargetRegisterInfo *TRI) const override;
558 
559   /// Returns true if the two given memory operations should be scheduled
560   /// adjacent.
561   bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
562                            int64_t Offset1, bool OffsetIsScalable1,
563                            ArrayRef<const MachineOperand *> BaseOps2,
564                            int64_t Offset2, bool OffsetIsScalable2,
565                            unsigned ClusterSize,
566                            unsigned NumBytes) const override;
567 
568   /// Return true if two MIs access different memory addresses and false
569   /// otherwise
570   bool
571   areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
572                                   const MachineInstr &MIb) const override;
573 
574   /// GetInstSize - Return the number of bytes of code the specified
575   /// instruction may be.  This returns the maximum number of bytes.
576   ///
577   unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
578 
579   MCInst getNop() const override;
580 
581   std::pair<unsigned, unsigned>
582   decomposeMachineOperandsTargetFlags(unsigned TF) const override;
583 
584   ArrayRef<std::pair<unsigned, const char *>>
585   getSerializableDirectMachineOperandTargetFlags() const override;
586 
587   // Expand VSX Memory Pseudo instruction to either a VSX or a FP instruction.
588   bool expandVSXMemPseudo(MachineInstr &MI) const;
589 
590   // Lower pseudo instructions after register allocation.
591   bool expandPostRAPseudo(MachineInstr &MI) const override;
592 
593   const TargetRegisterClass *updatedRC(const TargetRegisterClass *RC) const;
594   static int getRecordFormOpcode(unsigned Opcode);
595 
596   bool isTOCSaveMI(const MachineInstr &MI) const;
597 
598   std::pair<bool, bool>
599   isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth,
600                        const MachineRegisterInfo *MRI) const;
601 
602   // Return true if the register is sign-extended from 32 to 64 bits.
603   bool isSignExtended(const unsigned Reg,
604                       const MachineRegisterInfo *MRI) const {
605     return isSignOrZeroExtended(Reg, 0, MRI).first;
606   }
607 
608   // Return true if the register is zero-extended from 32 to 64 bits.
609   bool isZeroExtended(const unsigned Reg,
610                       const MachineRegisterInfo *MRI) const {
611     return isSignOrZeroExtended(Reg, 0, MRI).second;
612   }
613 
614   bool convertToImmediateForm(MachineInstr &MI,
615                               SmallSet<Register, 4> &RegsToUpdate,
616                               MachineInstr **KilledDef = nullptr) const;
617   bool foldFrameOffset(MachineInstr &MI) const;
618   bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase = nullptr) const;
619   bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const;
620   bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const;
621   bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg,
622                                     unsigned &XFormOpcode,
623                                     int64_t &OffsetOfImmInstr,
624                                     ImmInstrInfo &III) const;
625   bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index,
626                              MachineInstr *&ADDIMI, int64_t &OffsetAddi,
627                              int64_t OffsetImm) const;
628 
629   void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const;
630   void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo,
631                                   int64_t Imm) const;
632 
633   bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III,
634                        bool PostRA) const;
635 
636   // In PostRA phase, try to find instruction defines \p Reg before \p MI.
637   // \p SeenIntermediate is set to true if uses between DefMI and \p MI exist.
638   MachineInstr *getDefMIPostRA(unsigned Reg, MachineInstr &MI,
639                                bool &SeenIntermediateUse) const;
640 
641   // Materialize immediate after RA.
642   void materializeImmPostRA(MachineBasicBlock &MBB,
643                             MachineBasicBlock::iterator MBBI,
644                             const DebugLoc &DL, Register Reg,
645                             int64_t Imm) const;
646 
647   /// Check \p Opcode is BDNZ (Decrement CTR and branch if it is still nonzero).
648   bool isBDNZ(unsigned Opcode) const;
649 
650   /// Find the hardware loop instruction used to set-up the specified loop.
651   /// On PPC, we have two instructions used to set-up the hardware loop
652   /// (MTCTRloop, MTCTR8loop) with corresponding endloop (BDNZ, BDNZ8)
653   /// instructions to indicate the end of a loop.
654   MachineInstr *
655   findLoopInstr(MachineBasicBlock &PreHeader,
656                 SmallPtrSet<MachineBasicBlock *, 8> &Visited) const;
657 
658   /// Analyze loop L, which must be a single-basic-block loop, and if the
659   /// conditions can be understood enough produce a PipelinerLoopInfo object.
660   std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
661   analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override;
662 };
663 
664 }
665 
666 #endif
667