xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.h (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 //===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14 #define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15 
16 #include "AArch64.h"
17 #include "AArch64RegisterInfo.h"
18 #include "AArch64StackOffset.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/CodeGen/MachineCombinerPattern.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/Support/TypeSize.h"
23 
24 #define GET_INSTRINFO_HEADER
25 #include "AArch64GenInstrInfo.inc"
26 
27 namespace llvm {
28 
29 class AArch64Subtarget;
30 class AArch64TargetMachine;
31 
32 static const MachineMemOperand::Flags MOSuppressPair =
33     MachineMemOperand::MOTargetFlag1;
34 static const MachineMemOperand::Flags MOStridedAccess =
35     MachineMemOperand::MOTargetFlag2;
36 
37 #define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
38 
39 class AArch64InstrInfo final : public AArch64GenInstrInfo {
40   const AArch64RegisterInfo RI;
41   const AArch64Subtarget &Subtarget;
42 
43 public:
44   explicit AArch64InstrInfo(const AArch64Subtarget &STI);
45 
46   /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info.  As
47   /// such, whenever a client has an instance of instruction info, it should
48   /// always be able to get register info as well (through this method).
49   const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
50 
51   unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
52 
53   bool isAsCheapAsAMove(const MachineInstr &MI) const override;
54 
55   bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
56                              Register &DstReg, unsigned &SubIdx) const override;
57 
58   bool
59   areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
60                                   const MachineInstr &MIb) const override;
61 
62   unsigned isLoadFromStackSlot(const MachineInstr &MI,
63                                int &FrameIndex) const override;
64   unsigned isStoreToStackSlot(const MachineInstr &MI,
65                               int &FrameIndex) const override;
66 
67   /// Does this instruction set its full destination register to zero?
68   static bool isGPRZero(const MachineInstr &MI);
69 
70   /// Does this instruction rename a GPR without modifying bits?
71   static bool isGPRCopy(const MachineInstr &MI);
72 
73   /// Does this instruction rename an FPR without modifying bits?
74   static bool isFPRCopy(const MachineInstr &MI);
75 
76   /// Return true if pairing the given load or store is hinted to be
77   /// unprofitable.
78   static bool isLdStPairSuppressed(const MachineInstr &MI);
79 
80   /// Return true if the given load or store is a strided memory access.
81   static bool isStridedAccess(const MachineInstr &MI);
82 
83   /// Return true if this is an unscaled load/store.
84   static bool isUnscaledLdSt(unsigned Opc);
85   static bool isUnscaledLdSt(MachineInstr &MI) {
86     return isUnscaledLdSt(MI.getOpcode());
87   }
88 
89   /// Returns the unscaled load/store for the scaled load/store opcode,
90   /// if there is a corresponding unscaled variant available.
91   static Optional<unsigned> getUnscaledLdSt(unsigned Opc);
92 
93   /// Scaling factor for (scaled or unscaled) load or store.
94   static int getMemScale(unsigned Opc);
95   static int getMemScale(const MachineInstr &MI) {
96     return getMemScale(MI.getOpcode());
97   }
98 
99 
100   /// Returns the index for the immediate for a given instruction.
101   static unsigned getLoadStoreImmIdx(unsigned Opc);
102 
103   /// Return true if pairing the given load or store may be paired with another.
104   static bool isPairableLdStInst(const MachineInstr &MI);
105 
106   /// Return the opcode that set flags when possible.  The caller is
107   /// responsible for ensuring the opc has a flag setting equivalent.
108   static unsigned convertToFlagSettingOpc(unsigned Opc, bool &Is64Bit);
109 
110   /// Return true if this is a load/store that can be potentially paired/merged.
111   bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
112 
113   /// Hint that pairing the given load or store is unprofitable.
114   static void suppressLdStPair(MachineInstr &MI);
115 
116   bool getMemOperandsWithOffsetWidth(
117       const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
118       int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
119       const TargetRegisterInfo *TRI) const override;
120 
121   /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
122   /// This is true for some SVE instructions like ldr/str that have a
123   /// 'reg + imm' addressing mode where the immediate is an index to the
124   /// scalable vector located at 'reg + imm * vscale x #bytes'.
125   bool getMemOperandWithOffsetWidth(const MachineInstr &MI,
126                                     const MachineOperand *&BaseOp,
127                                     int64_t &Offset, bool &OffsetIsScalable,
128                                     unsigned &Width,
129                                     const TargetRegisterInfo *TRI) const;
130 
131   /// Return the immediate offset of the base register in a load/store \p LdSt.
132   MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
133 
134   /// Returns true if opcode \p Opc is a memory operation. If it is, set
135   /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
136   ///
137   /// For unscaled instructions, \p Scale is set to 1.
138   static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, unsigned &Width,
139                            int64_t &MinOffset, int64_t &MaxOffset);
140 
141   bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
142                            ArrayRef<const MachineOperand *> BaseOps2,
143                            unsigned NumLoads, unsigned NumBytes) const override;
144 
145   void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
146                         const DebugLoc &DL, MCRegister DestReg,
147                         MCRegister SrcReg, bool KillSrc, unsigned Opcode,
148                         llvm::ArrayRef<unsigned> Indices) const;
149   void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
150                        DebugLoc DL, unsigned DestReg, unsigned SrcReg,
151                        bool KillSrc, unsigned Opcode, unsigned ZeroReg,
152                        llvm::ArrayRef<unsigned> Indices) const;
153   void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
154                    const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
155                    bool KillSrc) const override;
156 
157   void storeRegToStackSlot(MachineBasicBlock &MBB,
158                            MachineBasicBlock::iterator MBBI, Register SrcReg,
159                            bool isKill, int FrameIndex,
160                            const TargetRegisterClass *RC,
161                            const TargetRegisterInfo *TRI) const override;
162 
163   void loadRegFromStackSlot(MachineBasicBlock &MBB,
164                             MachineBasicBlock::iterator MBBI, Register DestReg,
165                             int FrameIndex, const TargetRegisterClass *RC,
166                             const TargetRegisterInfo *TRI) const override;
167 
168   // This tells target independent code that it is okay to pass instructions
169   // with subreg operands to foldMemoryOperandImpl.
170   bool isSubregFoldable() const override { return true; }
171 
172   using TargetInstrInfo::foldMemoryOperandImpl;
173   MachineInstr *
174   foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
175                         ArrayRef<unsigned> Ops,
176                         MachineBasicBlock::iterator InsertPt, int FrameIndex,
177                         LiveIntervals *LIS = nullptr,
178                         VirtRegMap *VRM = nullptr) const override;
179 
180   /// \returns true if a branch from an instruction with opcode \p BranchOpc
181   ///  bytes is capable of jumping to a position \p BrOffset bytes away.
182   bool isBranchOffsetInRange(unsigned BranchOpc,
183                              int64_t BrOffset) const override;
184 
185   MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
186 
187   bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
188                      MachineBasicBlock *&FBB,
189                      SmallVectorImpl<MachineOperand> &Cond,
190                      bool AllowModify = false) const override;
191   unsigned removeBranch(MachineBasicBlock &MBB,
192                         int *BytesRemoved = nullptr) const override;
193   unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
194                         MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
195                         const DebugLoc &DL,
196                         int *BytesAdded = nullptr) const override;
197   bool
198   reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
199   bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
200                        Register, Register, Register, int &, int &,
201                        int &) const override;
202   void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
203                     const DebugLoc &DL, Register DstReg,
204                     ArrayRef<MachineOperand> Cond, Register TrueReg,
205                     Register FalseReg) const override;
206   void getNoop(MCInst &NopInst) const override;
207 
208   bool isSchedulingBoundary(const MachineInstr &MI,
209                             const MachineBasicBlock *MBB,
210                             const MachineFunction &MF) const override;
211 
212   /// analyzeCompare - For a comparison instruction, return the source registers
213   /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
214   /// Return true if the comparison instruction can be analyzed.
215   bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
216                       Register &SrcReg2, int &CmpMask,
217                       int &CmpValue) const override;
218   /// optimizeCompareInstr - Convert the instruction supplying the argument to
219   /// the comparison into one that sets the zero bit in the flags register.
220   bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
221                             Register SrcReg2, int CmpMask, int CmpValue,
222                             const MachineRegisterInfo *MRI) const override;
223   bool optimizeCondBranch(MachineInstr &MI) const override;
224 
225   /// Return true when a code sequence can improve throughput. It
226   /// should be called only for instructions in loops.
227   /// \param Pattern - combiner pattern
228   bool isThroughputPattern(MachineCombinerPattern Pattern) const override;
229   /// Return true when there is potentially a faster code sequence
230   /// for an instruction chain ending in ``Root``. All potential patterns are
231   /// listed in the ``Patterns`` array.
232   bool getMachineCombinerPatterns(
233       MachineInstr &Root,
234       SmallVectorImpl<MachineCombinerPattern> &Patterns) const override;
235   /// Return true when Inst is associative and commutative so that it can be
236   /// reassociated.
237   bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
238   /// When getMachineCombinerPatterns() finds patterns, this function generates
239   /// the instructions that could replace the original code sequence
240   void genAlternativeCodeSequence(
241       MachineInstr &Root, MachineCombinerPattern Pattern,
242       SmallVectorImpl<MachineInstr *> &InsInstrs,
243       SmallVectorImpl<MachineInstr *> &DelInstrs,
244       DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
245   /// AArch64 supports MachineCombiner.
246   bool useMachineCombiner() const override;
247 
248   bool expandPostRAPseudo(MachineInstr &MI) const override;
249 
250   std::pair<unsigned, unsigned>
251   decomposeMachineOperandsTargetFlags(unsigned TF) const override;
252   ArrayRef<std::pair<unsigned, const char *>>
253   getSerializableDirectMachineOperandTargetFlags() const override;
254   ArrayRef<std::pair<unsigned, const char *>>
255   getSerializableBitmaskMachineOperandTargetFlags() const override;
256   ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
257   getSerializableMachineMemOperandTargetFlags() const override;
258 
259   bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
260                                    bool OutlineFromLinkOnceODRs) const override;
261   outliner::OutlinedFunction getOutliningCandidateInfo(
262       std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
263   outliner::InstrType
264   getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const override;
265   bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
266                               unsigned &Flags) const override;
267   void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
268                           const outliner::OutlinedFunction &OF) const override;
269   MachineBasicBlock::iterator
270   insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
271                      MachineBasicBlock::iterator &It, MachineFunction &MF,
272                      const outliner::Candidate &C) const override;
273   bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
274   /// Returns the vector element size (B, H, S or D) of an SVE opcode.
275   uint64_t getElementSizeForOpcode(unsigned Opc) const;
276   /// Returns true if the instruction has a shift by immediate that can be
277   /// executed in one cycle less.
278   static bool isFalkorShiftExtFast(const MachineInstr &MI);
279   /// Return true if the instructions is a SEH instruciton used for unwinding
280   /// on Windows.
281   static bool isSEHInstruction(const MachineInstr &MI);
282 
283   Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
284                                       Register Reg) const override;
285 
286   Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
287                                                  Register Reg) const override;
288 
289 #define GET_INSTRINFO_HELPER_DECLS
290 #include "AArch64GenInstrInfo.inc"
291 
292 protected:
293   /// If the specific machine instruction is an instruction that moves/copies
294   /// value from one register to another register return destination and source
295   /// registers as machine operands.
296   Optional<DestSourcePair>
297   isCopyInstrImpl(const MachineInstr &MI) const override;
298 
299 private:
300   unsigned getInstBundleLength(const MachineInstr &MI) const;
301 
302   /// Sets the offsets on outlined instructions in \p MBB which use SP
303   /// so that they will be valid post-outlining.
304   ///
305   /// \param MBB A \p MachineBasicBlock in an outlined function.
306   void fixupPostOutline(MachineBasicBlock &MBB) const;
307 
308   void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
309                              MachineBasicBlock *TBB,
310                              ArrayRef<MachineOperand> Cond) const;
311   bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
312                            const MachineRegisterInfo *MRI) const;
313 
314   /// Returns an unused general-purpose register which can be used for
315   /// constructing an outlined call if one exists. Returns 0 otherwise.
316   unsigned findRegisterToSaveLRTo(const outliner::Candidate &C) const;
317 };
318 
319 /// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
320 /// which either reads or clobbers NZCV.
321 bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
322                                      const MachineInstr &UseMI,
323                                      const TargetRegisterInfo *TRI);
324 
325 /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
326 /// plus Offset.  This is intended to be used from within the prolog/epilog
327 /// insertion (PEI) pass, where a virtual scratch register may be allocated
328 /// if necessary, to be replaced by the scavenger at the end of PEI.
329 void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
330                      const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
331                      StackOffset Offset, const TargetInstrInfo *TII,
332                      MachineInstr::MIFlag = MachineInstr::NoFlags,
333                      bool SetNZCV = false, bool NeedsWinCFI = false,
334                      bool *HasWinCFI = nullptr);
335 
336 /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
337 /// FP. Return false if the offset could not be handled directly in MI, and
338 /// return the left-over portion by reference.
339 bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
340                               unsigned FrameReg, StackOffset &Offset,
341                               const AArch64InstrInfo *TII);
342 
343 /// Use to report the frame offset status in isAArch64FrameOffsetLegal.
344 enum AArch64FrameOffsetStatus {
345   AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
346   AArch64FrameOffsetIsLegal = 0x1,      ///< Offset is legal.
347   AArch64FrameOffsetCanUpdate = 0x2     ///< Offset can apply, at least partly.
348 };
349 
350 /// Check if the @p Offset is a valid frame offset for @p MI.
351 /// The returned value reports the validity of the frame offset for @p MI.
352 /// It uses the values defined by AArch64FrameOffsetStatus for that.
353 /// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
354 /// use an offset.eq
355 /// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
356 /// rewritten in @p MI.
357 /// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
358 /// amount that is off the limit of the legal offset.
359 /// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
360 /// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
361 /// If set, @p EmittableOffset contains the amount that can be set in @p MI
362 /// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
363 /// is a legal offset.
364 int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
365                               bool *OutUseUnscaledOp = nullptr,
366                               unsigned *OutUnscaledOp = nullptr,
367                               int64_t *EmittableOffset = nullptr);
368 
369 static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
370 
371 static inline bool isCondBranchOpcode(int Opc) {
372   switch (Opc) {
373   case AArch64::Bcc:
374   case AArch64::CBZW:
375   case AArch64::CBZX:
376   case AArch64::CBNZW:
377   case AArch64::CBNZX:
378   case AArch64::TBZW:
379   case AArch64::TBZX:
380   case AArch64::TBNZW:
381   case AArch64::TBNZX:
382     return true;
383   default:
384     return false;
385   }
386 }
387 
388 static inline bool isIndirectBranchOpcode(int Opc) {
389   switch (Opc) {
390   case AArch64::BR:
391   case AArch64::BRAA:
392   case AArch64::BRAB:
393   case AArch64::BRAAZ:
394   case AArch64::BRABZ:
395     return true;
396   }
397   return false;
398 }
399 
400 /// Return opcode to be used for indirect calls.
401 unsigned getBLRCallOpcode(const MachineFunction &MF);
402 
403 // struct TSFlags {
404 #define TSFLAG_ELEMENT_SIZE_TYPE(X)      (X)       // 3-bits
405 #define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bit
406 #define TSFLAG_FALSE_LANE_TYPE(X)       ((X) << 7) // 2-bits
407 // }
408 
409 namespace AArch64 {
410 
411 enum ElementSizeType {
412   ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7),
413   ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0),
414   ElementSizeB    = TSFLAG_ELEMENT_SIZE_TYPE(0x1),
415   ElementSizeH    = TSFLAG_ELEMENT_SIZE_TYPE(0x2),
416   ElementSizeS    = TSFLAG_ELEMENT_SIZE_TYPE(0x3),
417   ElementSizeD    = TSFLAG_ELEMENT_SIZE_TYPE(0x4),
418 };
419 
420 enum DestructiveInstType {
421   DestructiveInstTypeMask       = TSFLAG_DESTRUCTIVE_INST_TYPE(0xf),
422   NotDestructive                = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0),
423   DestructiveOther              = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
424   DestructiveUnary              = TSFLAG_DESTRUCTIVE_INST_TYPE(0x2),
425   DestructiveBinaryImm          = TSFLAG_DESTRUCTIVE_INST_TYPE(0x3),
426   DestructiveBinaryShImmUnpred  = TSFLAG_DESTRUCTIVE_INST_TYPE(0x4),
427   DestructiveBinary             = TSFLAG_DESTRUCTIVE_INST_TYPE(0x5),
428   DestructiveBinaryComm         = TSFLAG_DESTRUCTIVE_INST_TYPE(0x6),
429   DestructiveBinaryCommWithRev  = TSFLAG_DESTRUCTIVE_INST_TYPE(0x7),
430   DestructiveTernaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x8),
431 };
432 
433 enum FalseLaneType {
434   FalseLanesMask  = TSFLAG_FALSE_LANE_TYPE(0x3),
435   FalseLanesZero  = TSFLAG_FALSE_LANE_TYPE(0x1),
436   FalseLanesUndef = TSFLAG_FALSE_LANE_TYPE(0x2),
437 };
438 
439 #undef TSFLAG_ELEMENT_SIZE_TYPE
440 #undef TSFLAG_DESTRUCTIVE_INST_TYPE
441 #undef TSFLAG_FALSE_LANE_TYPE
442 
443 int getSVEPseudoMap(uint16_t Opcode);
444 int getSVERevInstr(uint16_t Opcode);
445 int getSVENonRevInstr(uint16_t Opcode);
446 }
447 
448 } // end namespace llvm
449 
450 #endif
451