xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.h (revision 5b56413d04e608379c9a306373554a8e4d321bc0)
1 //===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14 #define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15 
16 #include "AArch64.h"
17 #include "AArch64RegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/Support/TypeSize.h"
20 #include <optional>
21 
22 #define GET_INSTRINFO_HEADER
23 #include "AArch64GenInstrInfo.inc"
24 
25 namespace llvm {
26 
27 class AArch64Subtarget;
28 
29 static const MachineMemOperand::Flags MOSuppressPair =
30     MachineMemOperand::MOTargetFlag1;
31 static const MachineMemOperand::Flags MOStridedAccess =
32     MachineMemOperand::MOTargetFlag2;
33 
34 #define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
35 
36 class AArch64InstrInfo final : public AArch64GenInstrInfo {
37   const AArch64RegisterInfo RI;
38   const AArch64Subtarget &Subtarget;
39 
40 public:
41   explicit AArch64InstrInfo(const AArch64Subtarget &STI);
42 
43   /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info.  As
44   /// such, whenever a client has an instance of instruction info, it should
45   /// always be able to get register info as well (through this method).
46   const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
47 
48   unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
49 
50   bool isAsCheapAsAMove(const MachineInstr &MI) const override;
51 
52   bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
53                              Register &DstReg, unsigned &SubIdx) const override;
54 
55   bool
56   areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
57                                   const MachineInstr &MIb) const override;
58 
59   unsigned isLoadFromStackSlot(const MachineInstr &MI,
60                                int &FrameIndex) const override;
61   unsigned isStoreToStackSlot(const MachineInstr &MI,
62                               int &FrameIndex) const override;
63 
64   /// Does this instruction set its full destination register to zero?
65   static bool isGPRZero(const MachineInstr &MI);
66 
67   /// Does this instruction rename a GPR without modifying bits?
68   static bool isGPRCopy(const MachineInstr &MI);
69 
70   /// Does this instruction rename an FPR without modifying bits?
71   static bool isFPRCopy(const MachineInstr &MI);
72 
73   /// Return true if pairing the given load or store is hinted to be
74   /// unprofitable.
75   static bool isLdStPairSuppressed(const MachineInstr &MI);
76 
77   /// Return true if the given load or store is a strided memory access.
78   static bool isStridedAccess(const MachineInstr &MI);
79 
80   /// Return true if it has an unscaled load/store offset.
81   static bool hasUnscaledLdStOffset(unsigned Opc);
82   static bool hasUnscaledLdStOffset(MachineInstr &MI) {
83     return hasUnscaledLdStOffset(MI.getOpcode());
84   }
85 
86   /// Returns the unscaled load/store for the scaled load/store opcode,
87   /// if there is a corresponding unscaled variant available.
88   static std::optional<unsigned> getUnscaledLdSt(unsigned Opc);
89 
90   /// Scaling factor for (scaled or unscaled) load or store.
91   static int getMemScale(unsigned Opc);
92   static int getMemScale(const MachineInstr &MI) {
93     return getMemScale(MI.getOpcode());
94   }
95 
96   /// Returns whether the instruction is a pre-indexed load.
97   static bool isPreLd(const MachineInstr &MI);
98 
99   /// Returns whether the instruction is a pre-indexed store.
100   static bool isPreSt(const MachineInstr &MI);
101 
102   /// Returns whether the instruction is a pre-indexed load/store.
103   static bool isPreLdSt(const MachineInstr &MI);
104 
105   /// Returns whether the instruction is a paired load/store.
106   static bool isPairedLdSt(const MachineInstr &MI);
107 
108   /// Returns the base register operator of a load/store.
109   static const MachineOperand &getLdStBaseOp(const MachineInstr &MI);
110 
111   /// Returns the immediate offset operator of a load/store.
112   static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI);
113 
114   /// Returns whether the instruction is FP or NEON.
115   static bool isFpOrNEON(const MachineInstr &MI);
116 
117   /// Returns whether the instruction is in H form (16 bit operands)
118   static bool isHForm(const MachineInstr &MI);
119 
120   /// Returns whether the instruction is in Q form (128 bit operands)
121   static bool isQForm(const MachineInstr &MI);
122 
123   /// Returns whether the instruction can be compatible with non-zero BTYPE.
124   static bool hasBTISemantics(const MachineInstr &MI);
125 
126   /// Returns the index for the immediate for a given instruction.
127   static unsigned getLoadStoreImmIdx(unsigned Opc);
128 
129   /// Return true if pairing the given load or store may be paired with another.
130   static bool isPairableLdStInst(const MachineInstr &MI);
131 
132   /// Returns true if MI is one of the TCRETURN* instructions.
133   static bool isTailCallReturnInst(const MachineInstr &MI);
134 
135   /// Return the opcode that set flags when possible.  The caller is
136   /// responsible for ensuring the opc has a flag setting equivalent.
137   static unsigned convertToFlagSettingOpc(unsigned Opc);
138 
139   /// Return true if this is a load/store that can be potentially paired/merged.
140   bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
141 
142   /// Hint that pairing the given load or store is unprofitable.
143   static void suppressLdStPair(MachineInstr &MI);
144 
145   std::optional<ExtAddrMode>
146   getAddrModeFromMemoryOp(const MachineInstr &MemI,
147                           const TargetRegisterInfo *TRI) const override;
148 
149   bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
150                            const MachineInstr &AddrI,
151                            ExtAddrMode &AM) const override;
152 
153   MachineInstr *emitLdStWithAddr(MachineInstr &MemI,
154                                  const ExtAddrMode &AM) const override;
155 
156   bool getMemOperandsWithOffsetWidth(
157       const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
158       int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
159       const TargetRegisterInfo *TRI) const override;
160 
161   /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
162   /// This is true for some SVE instructions like ldr/str that have a
163   /// 'reg + imm' addressing mode where the immediate is an index to the
164   /// scalable vector located at 'reg + imm * vscale x #bytes'.
165   bool getMemOperandWithOffsetWidth(const MachineInstr &MI,
166                                     const MachineOperand *&BaseOp,
167                                     int64_t &Offset, bool &OffsetIsScalable,
168                                     TypeSize &Width,
169                                     const TargetRegisterInfo *TRI) const;
170 
171   /// Return the immediate offset of the base register in a load/store \p LdSt.
172   MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
173 
174   /// Returns true if opcode \p Opc is a memory operation. If it is, set
175   /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
176   ///
177   /// For unscaled instructions, \p Scale is set to 1.
178   static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, TypeSize &Width,
179                            int64_t &MinOffset, int64_t &MaxOffset);
180 
181   bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
182                            int64_t Offset1, bool OffsetIsScalable1,
183                            ArrayRef<const MachineOperand *> BaseOps2,
184                            int64_t Offset2, bool OffsetIsScalable2,
185                            unsigned ClusterSize,
186                            unsigned NumBytes) const override;
187 
188   void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
189                         const DebugLoc &DL, MCRegister DestReg,
190                         MCRegister SrcReg, bool KillSrc, unsigned Opcode,
191                         llvm::ArrayRef<unsigned> Indices) const;
192   void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
193                        DebugLoc DL, unsigned DestReg, unsigned SrcReg,
194                        bool KillSrc, unsigned Opcode, unsigned ZeroReg,
195                        llvm::ArrayRef<unsigned> Indices) const;
196   void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
197                    const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
198                    bool KillSrc) const override;
199 
200   void storeRegToStackSlot(MachineBasicBlock &MBB,
201                            MachineBasicBlock::iterator MBBI, Register SrcReg,
202                            bool isKill, int FrameIndex,
203                            const TargetRegisterClass *RC,
204                            const TargetRegisterInfo *TRI,
205                            Register VReg) const override;
206 
207   void loadRegFromStackSlot(MachineBasicBlock &MBB,
208                             MachineBasicBlock::iterator MBBI, Register DestReg,
209                             int FrameIndex, const TargetRegisterClass *RC,
210                             const TargetRegisterInfo *TRI,
211                             Register VReg) const override;
212 
213   // This tells target independent code that it is okay to pass instructions
214   // with subreg operands to foldMemoryOperandImpl.
215   bool isSubregFoldable() const override { return true; }
216 
217   using TargetInstrInfo::foldMemoryOperandImpl;
218   MachineInstr *
219   foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
220                         ArrayRef<unsigned> Ops,
221                         MachineBasicBlock::iterator InsertPt, int FrameIndex,
222                         LiveIntervals *LIS = nullptr,
223                         VirtRegMap *VRM = nullptr) const override;
224 
225   /// \returns true if a branch from an instruction with opcode \p BranchOpc
226   ///  bytes is capable of jumping to a position \p BrOffset bytes away.
227   bool isBranchOffsetInRange(unsigned BranchOpc,
228                              int64_t BrOffset) const override;
229 
230   MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
231 
232   void insertIndirectBranch(MachineBasicBlock &MBB,
233                             MachineBasicBlock &NewDestBB,
234                             MachineBasicBlock &RestoreBB, const DebugLoc &DL,
235                             int64_t BrOffset, RegScavenger *RS) const override;
236 
237   bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
238                      MachineBasicBlock *&FBB,
239                      SmallVectorImpl<MachineOperand> &Cond,
240                      bool AllowModify = false) const override;
241   bool analyzeBranchPredicate(MachineBasicBlock &MBB,
242                               MachineBranchPredicate &MBP,
243                               bool AllowModify) const override;
244   unsigned removeBranch(MachineBasicBlock &MBB,
245                         int *BytesRemoved = nullptr) const override;
246   unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
247                         MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
248                         const DebugLoc &DL,
249                         int *BytesAdded = nullptr) const override;
250   bool
251   reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
252   bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
253                        Register, Register, Register, int &, int &,
254                        int &) const override;
255   void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
256                     const DebugLoc &DL, Register DstReg,
257                     ArrayRef<MachineOperand> Cond, Register TrueReg,
258                     Register FalseReg) const override;
259 
260   void insertNoop(MachineBasicBlock &MBB,
261                   MachineBasicBlock::iterator MI) const override;
262 
263   MCInst getNop() const override;
264 
265   bool isSchedulingBoundary(const MachineInstr &MI,
266                             const MachineBasicBlock *MBB,
267                             const MachineFunction &MF) const override;
268 
269   /// analyzeCompare - For a comparison instruction, return the source registers
270   /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
271   /// Return true if the comparison instruction can be analyzed.
272   bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
273                       Register &SrcReg2, int64_t &CmpMask,
274                       int64_t &CmpValue) const override;
275   /// optimizeCompareInstr - Convert the instruction supplying the argument to
276   /// the comparison into one that sets the zero bit in the flags register.
277   bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
278                             Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
279                             const MachineRegisterInfo *MRI) const override;
280   bool optimizeCondBranch(MachineInstr &MI) const override;
281 
282   /// Return true when a code sequence can improve throughput. It
283   /// should be called only for instructions in loops.
284   /// \param Pattern - combiner pattern
285   bool isThroughputPattern(MachineCombinerPattern Pattern) const override;
286   /// Return true when there is potentially a faster code sequence
287   /// for an instruction chain ending in ``Root``. All potential patterns are
288   /// listed in the ``Patterns`` array.
289   bool
290   getMachineCombinerPatterns(MachineInstr &Root,
291                              SmallVectorImpl<MachineCombinerPattern> &Patterns,
292                              bool DoRegPressureReduce) const override;
293   /// Return true when Inst is associative and commutative so that it can be
294   /// reassociated. If Invert is true, then the inverse of Inst operation must
295   /// be checked.
296   bool isAssociativeAndCommutative(const MachineInstr &Inst,
297                                    bool Invert) const override;
298   /// When getMachineCombinerPatterns() finds patterns, this function generates
299   /// the instructions that could replace the original code sequence
300   void genAlternativeCodeSequence(
301       MachineInstr &Root, MachineCombinerPattern Pattern,
302       SmallVectorImpl<MachineInstr *> &InsInstrs,
303       SmallVectorImpl<MachineInstr *> &DelInstrs,
304       DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
305   /// AArch64 supports MachineCombiner.
306   bool useMachineCombiner() const override;
307 
308   bool expandPostRAPseudo(MachineInstr &MI) const override;
309 
310   std::pair<unsigned, unsigned>
311   decomposeMachineOperandsTargetFlags(unsigned TF) const override;
312   ArrayRef<std::pair<unsigned, const char *>>
313   getSerializableDirectMachineOperandTargetFlags() const override;
314   ArrayRef<std::pair<unsigned, const char *>>
315   getSerializableBitmaskMachineOperandTargetFlags() const override;
316   ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
317   getSerializableMachineMemOperandTargetFlags() const override;
318 
319   bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
320                                    bool OutlineFromLinkOnceODRs) const override;
321   std::optional<outliner::OutlinedFunction> getOutliningCandidateInfo(
322       std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
323   void mergeOutliningCandidateAttributes(
324       Function &F, std::vector<outliner::Candidate> &Candidates) const override;
325   outliner::InstrType
326   getOutliningTypeImpl(MachineBasicBlock::iterator &MIT, unsigned Flags) const override;
327   SmallVector<
328       std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
329   getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const override;
330   void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
331                           const outliner::OutlinedFunction &OF) const override;
332   MachineBasicBlock::iterator
333   insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
334                      MachineBasicBlock::iterator &It, MachineFunction &MF,
335                      outliner::Candidate &C) const override;
336   bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
337 
338   void buildClearRegister(Register Reg, MachineBasicBlock &MBB,
339                           MachineBasicBlock::iterator Iter, DebugLoc &DL,
340                           bool AllowSideEffects = true) const override;
341 
342   /// Returns the vector element size (B, H, S or D) of an SVE opcode.
343   uint64_t getElementSizeForOpcode(unsigned Opc) const;
344   /// Returns true if the opcode is for an SVE instruction that sets the
345   /// condition codes as if it's results had been fed to a PTEST instruction
346   /// along with the same general predicate.
347   bool isPTestLikeOpcode(unsigned Opc) const;
348   /// Returns true if the opcode is for an SVE WHILE## instruction.
349   bool isWhileOpcode(unsigned Opc) const;
350   /// Returns true if the instruction has a shift by immediate that can be
351   /// executed in one cycle less.
352   static bool isFalkorShiftExtFast(const MachineInstr &MI);
353   /// Return true if the instructions is a SEH instruciton used for unwinding
354   /// on Windows.
355   static bool isSEHInstruction(const MachineInstr &MI);
356 
357   std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
358                                            Register Reg) const override;
359 
360   bool isFunctionSafeToSplit(const MachineFunction &MF) const override;
361 
362   bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const override;
363 
364   std::optional<ParamLoadedValue>
365   describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
366 
367   unsigned int getTailDuplicateSize(CodeGenOptLevel OptLevel) const override;
368 
369   bool isExtendLikelyToBeFolded(MachineInstr &ExtMI,
370                                 MachineRegisterInfo &MRI) const override;
371 
372   static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset,
373                                                   int64_t &NumBytes,
374                                                   int64_t &NumPredicateVectors,
375                                                   int64_t &NumDataVectors);
376   static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset,
377                                                   int64_t &ByteSized,
378                                                   int64_t &VGSized);
379 
380   bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override;
381 
382   // Return true if address of the form BaseReg + Scale * ScaledReg + Offset can
383   // be used for a load/store of NumBytes. BaseReg is always present and
384   // implicit.
385   bool isLegalAddressingMode(unsigned NumBytes, int64_t Offset,
386                              unsigned Scale) const;
387 
388   // Decrement the SP, issuing probes along the way. `TargetReg` is the new top
389   // of the stack. `FrameSetup` is passed as true, if the allocation is a part
390   // of constructing the activation frame of a function.
391   MachineBasicBlock::iterator probedStackAlloc(MachineBasicBlock::iterator MBBI,
392                                                Register TargetReg,
393                                                bool FrameSetup) const;
394 
395 #define GET_INSTRINFO_HELPER_DECLS
396 #include "AArch64GenInstrInfo.inc"
397 
398 protected:
399   /// If the specific machine instruction is an instruction that moves/copies
400   /// value from one register to another register return destination and source
401   /// registers as machine operands.
402   std::optional<DestSourcePair>
403   isCopyInstrImpl(const MachineInstr &MI) const override;
404   std::optional<DestSourcePair>
405   isCopyLikeInstrImpl(const MachineInstr &MI) const override;
406 
407 private:
408   unsigned getInstBundleLength(const MachineInstr &MI) const;
409 
410   /// Sets the offsets on outlined instructions in \p MBB which use SP
411   /// so that they will be valid post-outlining.
412   ///
413   /// \param MBB A \p MachineBasicBlock in an outlined function.
414   void fixupPostOutline(MachineBasicBlock &MBB) const;
415 
416   void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
417                              MachineBasicBlock *TBB,
418                              ArrayRef<MachineOperand> Cond) const;
419   bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
420                            const MachineRegisterInfo &MRI) const;
421   bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg,
422                             int CmpValue, const MachineRegisterInfo &MRI) const;
423 
424   /// Returns an unused general-purpose register which can be used for
425   /// constructing an outlined call if one exists. Returns 0 otherwise.
426   Register findRegisterToSaveLRTo(outliner::Candidate &C) const;
427 
428   /// Remove a ptest of a predicate-generating operation that already sets, or
429   /// can be made to set, the condition codes in an identical manner
430   bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg,
431                           unsigned PredReg,
432                           const MachineRegisterInfo *MRI) const;
433 };
434 
435 struct UsedNZCV {
436   bool N = false;
437   bool Z = false;
438   bool C = false;
439   bool V = false;
440 
441   UsedNZCV() = default;
442 
443   UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
444     this->N |= UsedFlags.N;
445     this->Z |= UsedFlags.Z;
446     this->C |= UsedFlags.C;
447     this->V |= UsedFlags.V;
448     return *this;
449   }
450 };
451 
452 /// \returns Conditions flags used after \p CmpInstr in its MachineBB if  NZCV
453 /// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
454 /// \returns std::nullopt otherwise.
455 ///
456 /// Collect instructions using that flags in \p CCUseInstrs if provided.
457 std::optional<UsedNZCV>
458 examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
459                  const TargetRegisterInfo &TRI,
460                  SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
461 
462 /// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
463 /// which either reads or clobbers NZCV.
464 bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
465                                      const MachineInstr &UseMI,
466                                      const TargetRegisterInfo *TRI);
467 
468 MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg,
469                               unsigned Reg, const StackOffset &Offset,
470                               bool LastAdjustmentWasScalable = true);
471 MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg,
472                                  const StackOffset &OffsetFromDefCFA);
473 
474 /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
475 /// plus Offset.  This is intended to be used from within the prolog/epilog
476 /// insertion (PEI) pass, where a virtual scratch register may be allocated
477 /// if necessary, to be replaced by the scavenger at the end of PEI.
478 void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
479                      const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
480                      StackOffset Offset, const TargetInstrInfo *TII,
481                      MachineInstr::MIFlag = MachineInstr::NoFlags,
482                      bool SetNZCV = false, bool NeedsWinCFI = false,
483                      bool *HasWinCFI = nullptr, bool EmitCFAOffset = false,
484                      StackOffset InitialOffset = {},
485                      unsigned FrameReg = AArch64::SP);
486 
487 /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
488 /// FP. Return false if the offset could not be handled directly in MI, and
489 /// return the left-over portion by reference.
490 bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
491                               unsigned FrameReg, StackOffset &Offset,
492                               const AArch64InstrInfo *TII);
493 
494 /// Use to report the frame offset status in isAArch64FrameOffsetLegal.
495 enum AArch64FrameOffsetStatus {
496   AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
497   AArch64FrameOffsetIsLegal = 0x1,      ///< Offset is legal.
498   AArch64FrameOffsetCanUpdate = 0x2     ///< Offset can apply, at least partly.
499 };
500 
501 /// Check if the @p Offset is a valid frame offset for @p MI.
502 /// The returned value reports the validity of the frame offset for @p MI.
503 /// It uses the values defined by AArch64FrameOffsetStatus for that.
504 /// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
505 /// use an offset.eq
506 /// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
507 /// rewritten in @p MI.
508 /// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
509 /// amount that is off the limit of the legal offset.
510 /// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
511 /// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
512 /// If set, @p EmittableOffset contains the amount that can be set in @p MI
513 /// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
514 /// is a legal offset.
515 int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
516                               bool *OutUseUnscaledOp = nullptr,
517                               unsigned *OutUnscaledOp = nullptr,
518                               int64_t *EmittableOffset = nullptr);
519 
520 static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
521 
522 static inline bool isCondBranchOpcode(int Opc) {
523   switch (Opc) {
524   case AArch64::Bcc:
525   case AArch64::CBZW:
526   case AArch64::CBZX:
527   case AArch64::CBNZW:
528   case AArch64::CBNZX:
529   case AArch64::TBZW:
530   case AArch64::TBZX:
531   case AArch64::TBNZW:
532   case AArch64::TBNZX:
533     return true;
534   default:
535     return false;
536   }
537 }
538 
539 static inline bool isIndirectBranchOpcode(int Opc) {
540   switch (Opc) {
541   case AArch64::BR:
542   case AArch64::BRAA:
543   case AArch64::BRAB:
544   case AArch64::BRAAZ:
545   case AArch64::BRABZ:
546     return true;
547   }
548   return false;
549 }
550 
551 static inline bool isPTrueOpcode(unsigned Opc) {
552   switch (Opc) {
553   case AArch64::PTRUE_B:
554   case AArch64::PTRUE_H:
555   case AArch64::PTRUE_S:
556   case AArch64::PTRUE_D:
557     return true;
558   default:
559     return false;
560   }
561 }
562 
563 /// Return opcode to be used for indirect calls.
564 unsigned getBLRCallOpcode(const MachineFunction &MF);
565 
566 /// Return XPAC opcode to be used for a ptrauth strip using the given key.
567 static inline unsigned getXPACOpcodeForKey(AArch64PACKey::ID K) {
568   using namespace AArch64PACKey;
569   switch (K) {
570   case IA: case IB: return AArch64::XPACI;
571   case DA: case DB: return AArch64::XPACD;
572   }
573   llvm_unreachable("Unhandled AArch64PACKey::ID enum");
574 }
575 
576 /// Return AUT opcode to be used for a ptrauth auth using the given key, or its
577 /// AUT*Z variant that doesn't take a discriminator operand, using zero instead.
578 static inline unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
579   using namespace AArch64PACKey;
580   switch (K) {
581   case IA: return Zero ? AArch64::AUTIZA : AArch64::AUTIA;
582   case IB: return Zero ? AArch64::AUTIZB : AArch64::AUTIB;
583   case DA: return Zero ? AArch64::AUTDZA : AArch64::AUTDA;
584   case DB: return Zero ? AArch64::AUTDZB : AArch64::AUTDB;
585   }
586 }
587 
588 /// Return PAC opcode to be used for a ptrauth sign using the given key, or its
589 /// PAC*Z variant that doesn't take a discriminator operand, using zero instead.
590 static inline unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
591   using namespace AArch64PACKey;
592   switch (K) {
593   case IA: return Zero ? AArch64::PACIZA : AArch64::PACIA;
594   case IB: return Zero ? AArch64::PACIZB : AArch64::PACIB;
595   case DA: return Zero ? AArch64::PACDZA : AArch64::PACDA;
596   case DB: return Zero ? AArch64::PACDZB : AArch64::PACDB;
597   }
598 }
599 
600 // struct TSFlags {
601 #define TSFLAG_ELEMENT_SIZE_TYPE(X)      (X)        // 3-bits
602 #define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3)  // 4-bits
603 #define TSFLAG_FALSE_LANE_TYPE(X)       ((X) << 7)  // 2-bits
604 #define TSFLAG_INSTR_FLAGS(X)           ((X) << 9)  // 2-bits
605 #define TSFLAG_SME_MATRIX_TYPE(X)       ((X) << 11) // 3-bits
606 // }
607 
608 namespace AArch64 {
609 
610 enum ElementSizeType {
611   ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7),
612   ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0),
613   ElementSizeB    = TSFLAG_ELEMENT_SIZE_TYPE(0x1),
614   ElementSizeH    = TSFLAG_ELEMENT_SIZE_TYPE(0x2),
615   ElementSizeS    = TSFLAG_ELEMENT_SIZE_TYPE(0x3),
616   ElementSizeD    = TSFLAG_ELEMENT_SIZE_TYPE(0x4),
617 };
618 
619 enum DestructiveInstType {
620   DestructiveInstTypeMask       = TSFLAG_DESTRUCTIVE_INST_TYPE(0xf),
621   NotDestructive                = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0),
622   DestructiveOther              = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
623   DestructiveUnary              = TSFLAG_DESTRUCTIVE_INST_TYPE(0x2),
624   DestructiveBinaryImm          = TSFLAG_DESTRUCTIVE_INST_TYPE(0x3),
625   DestructiveBinaryShImmUnpred  = TSFLAG_DESTRUCTIVE_INST_TYPE(0x4),
626   DestructiveBinary             = TSFLAG_DESTRUCTIVE_INST_TYPE(0x5),
627   DestructiveBinaryComm         = TSFLAG_DESTRUCTIVE_INST_TYPE(0x6),
628   DestructiveBinaryCommWithRev  = TSFLAG_DESTRUCTIVE_INST_TYPE(0x7),
629   DestructiveTernaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x8),
630   DestructiveUnaryPassthru      = TSFLAG_DESTRUCTIVE_INST_TYPE(0x9),
631 };
632 
633 enum FalseLaneType {
634   FalseLanesMask  = TSFLAG_FALSE_LANE_TYPE(0x3),
635   FalseLanesZero  = TSFLAG_FALSE_LANE_TYPE(0x1),
636   FalseLanesUndef = TSFLAG_FALSE_LANE_TYPE(0x2),
637 };
638 
639 // NOTE: This is a bit field.
640 static const uint64_t InstrFlagIsWhile     = TSFLAG_INSTR_FLAGS(0x1);
641 static const uint64_t InstrFlagIsPTestLike = TSFLAG_INSTR_FLAGS(0x2);
642 
643 enum SMEMatrixType {
644   SMEMatrixTypeMask = TSFLAG_SME_MATRIX_TYPE(0x7),
645   SMEMatrixNone     = TSFLAG_SME_MATRIX_TYPE(0x0),
646   SMEMatrixTileB    = TSFLAG_SME_MATRIX_TYPE(0x1),
647   SMEMatrixTileH    = TSFLAG_SME_MATRIX_TYPE(0x2),
648   SMEMatrixTileS    = TSFLAG_SME_MATRIX_TYPE(0x3),
649   SMEMatrixTileD    = TSFLAG_SME_MATRIX_TYPE(0x4),
650   SMEMatrixTileQ    = TSFLAG_SME_MATRIX_TYPE(0x5),
651   SMEMatrixArray    = TSFLAG_SME_MATRIX_TYPE(0x6),
652 };
653 
654 #undef TSFLAG_ELEMENT_SIZE_TYPE
655 #undef TSFLAG_DESTRUCTIVE_INST_TYPE
656 #undef TSFLAG_FALSE_LANE_TYPE
657 #undef TSFLAG_INSTR_FLAGS
658 #undef TSFLAG_SME_MATRIX_TYPE
659 
660 int getSVEPseudoMap(uint16_t Opcode);
661 int getSVERevInstr(uint16_t Opcode);
662 int getSVENonRevInstr(uint16_t Opcode);
663 
664 int getSMEPseudoMap(uint16_t Opcode);
665 }
666 
667 } // end namespace llvm
668 
669 #endif
670