1 //===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the AArch64 implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H 14 #define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H 15 16 #include "AArch64.h" 17 #include "AArch64RegisterInfo.h" 18 #include "llvm/ADT/Optional.h" 19 #include "llvm/CodeGen/MachineCombinerPattern.h" 20 #include "llvm/CodeGen/TargetInstrInfo.h" 21 #include "llvm/Support/TypeSize.h" 22 23 #define GET_INSTRINFO_HEADER 24 #include "AArch64GenInstrInfo.inc" 25 26 namespace llvm { 27 28 class AArch64Subtarget; 29 30 static const MachineMemOperand::Flags MOSuppressPair = 31 MachineMemOperand::MOTargetFlag1; 32 static const MachineMemOperand::Flags MOStridedAccess = 33 MachineMemOperand::MOTargetFlag2; 34 35 #define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access" 36 37 class AArch64InstrInfo final : public AArch64GenInstrInfo { 38 const AArch64RegisterInfo RI; 39 const AArch64Subtarget &Subtarget; 40 41 public: 42 explicit AArch64InstrInfo(const AArch64Subtarget &STI); 43 44 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As 45 /// such, whenever a client has an instance of instruction info, it should 46 /// always be able to get register info as well (through this method). 47 const AArch64RegisterInfo &getRegisterInfo() const { return RI; } 48 49 unsigned getInstSizeInBytes(const MachineInstr &MI) const override; 50 51 bool isAsCheapAsAMove(const MachineInstr &MI) const override; 52 53 bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, 54 Register &DstReg, unsigned &SubIdx) const override; 55 56 bool 57 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 58 const MachineInstr &MIb) const override; 59 60 unsigned isLoadFromStackSlot(const MachineInstr &MI, 61 int &FrameIndex) const override; 62 unsigned isStoreToStackSlot(const MachineInstr &MI, 63 int &FrameIndex) const override; 64 65 /// Does this instruction set its full destination register to zero? 66 static bool isGPRZero(const MachineInstr &MI); 67 68 /// Does this instruction rename a GPR without modifying bits? 69 static bool isGPRCopy(const MachineInstr &MI); 70 71 /// Does this instruction rename an FPR without modifying bits? 72 static bool isFPRCopy(const MachineInstr &MI); 73 74 /// Return true if pairing the given load or store is hinted to be 75 /// unprofitable. 76 static bool isLdStPairSuppressed(const MachineInstr &MI); 77 78 /// Return true if the given load or store is a strided memory access. 79 static bool isStridedAccess(const MachineInstr &MI); 80 81 /// Return true if it has an unscaled load/store offset. 82 static bool hasUnscaledLdStOffset(unsigned Opc); 83 static bool hasUnscaledLdStOffset(MachineInstr &MI) { 84 return hasUnscaledLdStOffset(MI.getOpcode()); 85 } 86 87 /// Returns the unscaled load/store for the scaled load/store opcode, 88 /// if there is a corresponding unscaled variant available. 89 static Optional<unsigned> getUnscaledLdSt(unsigned Opc); 90 91 /// Scaling factor for (scaled or unscaled) load or store. 92 static int getMemScale(unsigned Opc); 93 static int getMemScale(const MachineInstr &MI) { 94 return getMemScale(MI.getOpcode()); 95 } 96 97 /// Returns whether the instruction is a pre-indexed load. 98 static bool isPreLd(const MachineInstr &MI); 99 100 /// Returns whether the instruction is a pre-indexed store. 101 static bool isPreSt(const MachineInstr &MI); 102 103 /// Returns whether the instruction is a pre-indexed load/store. 104 static bool isPreLdSt(const MachineInstr &MI); 105 106 /// Returns the index for the immediate for a given instruction. 107 static unsigned getLoadStoreImmIdx(unsigned Opc); 108 109 /// Return true if pairing the given load or store may be paired with another. 110 static bool isPairableLdStInst(const MachineInstr &MI); 111 112 /// Return the opcode that set flags when possible. The caller is 113 /// responsible for ensuring the opc has a flag setting equivalent. 114 static unsigned convertToFlagSettingOpc(unsigned Opc, bool &Is64Bit); 115 116 /// Return true if this is a load/store that can be potentially paired/merged. 117 bool isCandidateToMergeOrPair(const MachineInstr &MI) const; 118 119 /// Hint that pairing the given load or store is unprofitable. 120 static void suppressLdStPair(MachineInstr &MI); 121 122 Optional<ExtAddrMode> 123 getAddrModeFromMemoryOp(const MachineInstr &MemI, 124 const TargetRegisterInfo *TRI) const override; 125 126 bool getMemOperandsWithOffsetWidth( 127 const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps, 128 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, 129 const TargetRegisterInfo *TRI) const override; 130 131 /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`. 132 /// This is true for some SVE instructions like ldr/str that have a 133 /// 'reg + imm' addressing mode where the immediate is an index to the 134 /// scalable vector located at 'reg + imm * vscale x #bytes'. 135 bool getMemOperandWithOffsetWidth(const MachineInstr &MI, 136 const MachineOperand *&BaseOp, 137 int64_t &Offset, bool &OffsetIsScalable, 138 unsigned &Width, 139 const TargetRegisterInfo *TRI) const; 140 141 /// Return the immediate offset of the base register in a load/store \p LdSt. 142 MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const; 143 144 /// Returns true if opcode \p Opc is a memory operation. If it is, set 145 /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly. 146 /// 147 /// For unscaled instructions, \p Scale is set to 1. 148 static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, unsigned &Width, 149 int64_t &MinOffset, int64_t &MaxOffset); 150 151 bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 152 ArrayRef<const MachineOperand *> BaseOps2, 153 unsigned NumLoads, unsigned NumBytes) const override; 154 155 void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 156 const DebugLoc &DL, MCRegister DestReg, 157 MCRegister SrcReg, bool KillSrc, unsigned Opcode, 158 llvm::ArrayRef<unsigned> Indices) const; 159 void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 160 DebugLoc DL, unsigned DestReg, unsigned SrcReg, 161 bool KillSrc, unsigned Opcode, unsigned ZeroReg, 162 llvm::ArrayRef<unsigned> Indices) const; 163 void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 164 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, 165 bool KillSrc) const override; 166 167 void storeRegToStackSlot(MachineBasicBlock &MBB, 168 MachineBasicBlock::iterator MBBI, Register SrcReg, 169 bool isKill, int FrameIndex, 170 const TargetRegisterClass *RC, 171 const TargetRegisterInfo *TRI) const override; 172 173 void loadRegFromStackSlot(MachineBasicBlock &MBB, 174 MachineBasicBlock::iterator MBBI, Register DestReg, 175 int FrameIndex, const TargetRegisterClass *RC, 176 const TargetRegisterInfo *TRI) const override; 177 178 // This tells target independent code that it is okay to pass instructions 179 // with subreg operands to foldMemoryOperandImpl. 180 bool isSubregFoldable() const override { return true; } 181 182 using TargetInstrInfo::foldMemoryOperandImpl; 183 MachineInstr * 184 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, 185 ArrayRef<unsigned> Ops, 186 MachineBasicBlock::iterator InsertPt, int FrameIndex, 187 LiveIntervals *LIS = nullptr, 188 VirtRegMap *VRM = nullptr) const override; 189 190 /// \returns true if a branch from an instruction with opcode \p BranchOpc 191 /// bytes is capable of jumping to a position \p BrOffset bytes away. 192 bool isBranchOffsetInRange(unsigned BranchOpc, 193 int64_t BrOffset) const override; 194 195 MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override; 196 197 bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 198 MachineBasicBlock *&FBB, 199 SmallVectorImpl<MachineOperand> &Cond, 200 bool AllowModify = false) const override; 201 bool analyzeBranchPredicate(MachineBasicBlock &MBB, 202 MachineBranchPredicate &MBP, 203 bool AllowModify) const override; 204 unsigned removeBranch(MachineBasicBlock &MBB, 205 int *BytesRemoved = nullptr) const override; 206 unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 207 MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond, 208 const DebugLoc &DL, 209 int *BytesAdded = nullptr) const override; 210 bool 211 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override; 212 bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond, 213 Register, Register, Register, int &, int &, 214 int &) const override; 215 void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 216 const DebugLoc &DL, Register DstReg, 217 ArrayRef<MachineOperand> Cond, Register TrueReg, 218 Register FalseReg) const override; 219 MCInst getNop() const override; 220 221 bool isSchedulingBoundary(const MachineInstr &MI, 222 const MachineBasicBlock *MBB, 223 const MachineFunction &MF) const override; 224 225 /// analyzeCompare - For a comparison instruction, return the source registers 226 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue. 227 /// Return true if the comparison instruction can be analyzed. 228 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, 229 Register &SrcReg2, int64_t &CmpMask, 230 int64_t &CmpValue) const override; 231 /// optimizeCompareInstr - Convert the instruction supplying the argument to 232 /// the comparison into one that sets the zero bit in the flags register. 233 bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, 234 Register SrcReg2, int64_t CmpMask, int64_t CmpValue, 235 const MachineRegisterInfo *MRI) const override; 236 bool optimizeCondBranch(MachineInstr &MI) const override; 237 238 /// Return true when a code sequence can improve throughput. It 239 /// should be called only for instructions in loops. 240 /// \param Pattern - combiner pattern 241 bool isThroughputPattern(MachineCombinerPattern Pattern) const override; 242 /// Return true when there is potentially a faster code sequence 243 /// for an instruction chain ending in ``Root``. All potential patterns are 244 /// listed in the ``Patterns`` array. 245 bool 246 getMachineCombinerPatterns(MachineInstr &Root, 247 SmallVectorImpl<MachineCombinerPattern> &Patterns, 248 bool DoRegPressureReduce) const override; 249 /// Return true when Inst is associative and commutative so that it can be 250 /// reassociated. 251 bool isAssociativeAndCommutative(const MachineInstr &Inst) const override; 252 /// When getMachineCombinerPatterns() finds patterns, this function generates 253 /// the instructions that could replace the original code sequence 254 void genAlternativeCodeSequence( 255 MachineInstr &Root, MachineCombinerPattern Pattern, 256 SmallVectorImpl<MachineInstr *> &InsInstrs, 257 SmallVectorImpl<MachineInstr *> &DelInstrs, 258 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override; 259 /// AArch64 supports MachineCombiner. 260 bool useMachineCombiner() const override; 261 262 bool expandPostRAPseudo(MachineInstr &MI) const override; 263 264 std::pair<unsigned, unsigned> 265 decomposeMachineOperandsTargetFlags(unsigned TF) const override; 266 ArrayRef<std::pair<unsigned, const char *>> 267 getSerializableDirectMachineOperandTargetFlags() const override; 268 ArrayRef<std::pair<unsigned, const char *>> 269 getSerializableBitmaskMachineOperandTargetFlags() const override; 270 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>> 271 getSerializableMachineMemOperandTargetFlags() const override; 272 273 bool isFunctionSafeToOutlineFrom(MachineFunction &MF, 274 bool OutlineFromLinkOnceODRs) const override; 275 outliner::OutlinedFunction getOutliningCandidateInfo( 276 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override; 277 outliner::InstrType 278 getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const override; 279 bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, 280 unsigned &Flags) const override; 281 void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, 282 const outliner::OutlinedFunction &OF) const override; 283 MachineBasicBlock::iterator 284 insertOutlinedCall(Module &M, MachineBasicBlock &MBB, 285 MachineBasicBlock::iterator &It, MachineFunction &MF, 286 const outliner::Candidate &C) const override; 287 bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override; 288 /// Returns the vector element size (B, H, S or D) of an SVE opcode. 289 uint64_t getElementSizeForOpcode(unsigned Opc) const; 290 /// Returns true if the opcode is for an SVE instruction that sets the 291 /// condition codes as if it's results had been fed to a PTEST instruction 292 /// along with the same general predicate. 293 bool isPTestLikeOpcode(unsigned Opc) const; 294 /// Returns true if the opcode is for an SVE WHILE## instruction. 295 bool isWhileOpcode(unsigned Opc) const; 296 /// Returns true if the instruction has a shift by immediate that can be 297 /// executed in one cycle less. 298 static bool isFalkorShiftExtFast(const MachineInstr &MI); 299 /// Return true if the instructions is a SEH instruciton used for unwinding 300 /// on Windows. 301 static bool isSEHInstruction(const MachineInstr &MI); 302 303 Optional<RegImmPair> isAddImmediate(const MachineInstr &MI, 304 Register Reg) const override; 305 306 Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI, 307 Register Reg) const override; 308 309 unsigned int getTailDuplicateSize(CodeGenOpt::Level OptLevel) const override; 310 311 bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, 312 MachineRegisterInfo &MRI) const override; 313 314 static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset, 315 int64_t &NumBytes, 316 int64_t &NumPredicateVectors, 317 int64_t &NumDataVectors); 318 static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset, 319 int64_t &ByteSized, 320 int64_t &VGSized); 321 #define GET_INSTRINFO_HELPER_DECLS 322 #include "AArch64GenInstrInfo.inc" 323 324 protected: 325 /// If the specific machine instruction is an instruction that moves/copies 326 /// value from one register to another register return destination and source 327 /// registers as machine operands. 328 Optional<DestSourcePair> 329 isCopyInstrImpl(const MachineInstr &MI) const override; 330 331 private: 332 unsigned getInstBundleLength(const MachineInstr &MI) const; 333 334 /// Sets the offsets on outlined instructions in \p MBB which use SP 335 /// so that they will be valid post-outlining. 336 /// 337 /// \param MBB A \p MachineBasicBlock in an outlined function. 338 void fixupPostOutline(MachineBasicBlock &MBB) const; 339 340 void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL, 341 MachineBasicBlock *TBB, 342 ArrayRef<MachineOperand> Cond) const; 343 bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg, 344 const MachineRegisterInfo &MRI) const; 345 bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg, 346 int CmpValue, const MachineRegisterInfo &MRI) const; 347 348 /// Returns an unused general-purpose register which can be used for 349 /// constructing an outlined call if one exists. Returns 0 otherwise. 350 unsigned findRegisterToSaveLRTo(const outliner::Candidate &C) const; 351 352 /// Remove a ptest of a predicate-generating operation that already sets, or 353 /// can be made to set, the condition codes in an identical manner 354 bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg, 355 unsigned PredReg, 356 const MachineRegisterInfo *MRI) const; 357 }; 358 359 /// Return true if there is an instruction /after/ \p DefMI and before \p UseMI 360 /// which either reads or clobbers NZCV. 361 bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI, 362 const MachineInstr &UseMI, 363 const TargetRegisterInfo *TRI); 364 365 /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg 366 /// plus Offset. This is intended to be used from within the prolog/epilog 367 /// insertion (PEI) pass, where a virtual scratch register may be allocated 368 /// if necessary, to be replaced by the scavenger at the end of PEI. 369 void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 370 const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, 371 StackOffset Offset, const TargetInstrInfo *TII, 372 MachineInstr::MIFlag = MachineInstr::NoFlags, 373 bool SetNZCV = false, bool NeedsWinCFI = false, 374 bool *HasWinCFI = nullptr); 375 376 /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the 377 /// FP. Return false if the offset could not be handled directly in MI, and 378 /// return the left-over portion by reference. 379 bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 380 unsigned FrameReg, StackOffset &Offset, 381 const AArch64InstrInfo *TII); 382 383 /// Use to report the frame offset status in isAArch64FrameOffsetLegal. 384 enum AArch64FrameOffsetStatus { 385 AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply. 386 AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal. 387 AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly. 388 }; 389 390 /// Check if the @p Offset is a valid frame offset for @p MI. 391 /// The returned value reports the validity of the frame offset for @p MI. 392 /// It uses the values defined by AArch64FrameOffsetStatus for that. 393 /// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to 394 /// use an offset.eq 395 /// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be 396 /// rewritten in @p MI. 397 /// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the 398 /// amount that is off the limit of the legal offset. 399 /// If set, @p OutUseUnscaledOp will contain the whether @p MI should be 400 /// turned into an unscaled operator, which opcode is in @p OutUnscaledOp. 401 /// If set, @p EmittableOffset contains the amount that can be set in @p MI 402 /// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that 403 /// is a legal offset. 404 int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, 405 bool *OutUseUnscaledOp = nullptr, 406 unsigned *OutUnscaledOp = nullptr, 407 int64_t *EmittableOffset = nullptr); 408 409 static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; } 410 411 static inline bool isCondBranchOpcode(int Opc) { 412 switch (Opc) { 413 case AArch64::Bcc: 414 case AArch64::CBZW: 415 case AArch64::CBZX: 416 case AArch64::CBNZW: 417 case AArch64::CBNZX: 418 case AArch64::TBZW: 419 case AArch64::TBZX: 420 case AArch64::TBNZW: 421 case AArch64::TBNZX: 422 return true; 423 default: 424 return false; 425 } 426 } 427 428 static inline bool isIndirectBranchOpcode(int Opc) { 429 switch (Opc) { 430 case AArch64::BR: 431 case AArch64::BRAA: 432 case AArch64::BRAB: 433 case AArch64::BRAAZ: 434 case AArch64::BRABZ: 435 return true; 436 } 437 return false; 438 } 439 440 static inline bool isPTrueOpcode(unsigned Opc) { 441 switch (Opc) { 442 case AArch64::PTRUE_B: 443 case AArch64::PTRUE_H: 444 case AArch64::PTRUE_S: 445 case AArch64::PTRUE_D: 446 return true; 447 default: 448 return false; 449 } 450 } 451 452 /// Return opcode to be used for indirect calls. 453 unsigned getBLRCallOpcode(const MachineFunction &MF); 454 455 // struct TSFlags { 456 #define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits 457 #define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits 458 #define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7) // 2-bits 459 #define TSFLAG_INSTR_FLAGS(X) ((X) << 9) // 2-bits 460 // } 461 462 namespace AArch64 { 463 464 enum ElementSizeType { 465 ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7), 466 ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0), 467 ElementSizeB = TSFLAG_ELEMENT_SIZE_TYPE(0x1), 468 ElementSizeH = TSFLAG_ELEMENT_SIZE_TYPE(0x2), 469 ElementSizeS = TSFLAG_ELEMENT_SIZE_TYPE(0x3), 470 ElementSizeD = TSFLAG_ELEMENT_SIZE_TYPE(0x4), 471 }; 472 473 enum DestructiveInstType { 474 DestructiveInstTypeMask = TSFLAG_DESTRUCTIVE_INST_TYPE(0xf), 475 NotDestructive = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0), 476 DestructiveOther = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1), 477 DestructiveUnary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x2), 478 DestructiveBinaryImm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x3), 479 DestructiveBinaryShImmUnpred = TSFLAG_DESTRUCTIVE_INST_TYPE(0x4), 480 DestructiveBinary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x5), 481 DestructiveBinaryComm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x6), 482 DestructiveBinaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x7), 483 DestructiveTernaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x8), 484 DestructiveUnaryPassthru = TSFLAG_DESTRUCTIVE_INST_TYPE(0x9), 485 }; 486 487 enum FalseLaneType { 488 FalseLanesMask = TSFLAG_FALSE_LANE_TYPE(0x3), 489 FalseLanesZero = TSFLAG_FALSE_LANE_TYPE(0x1), 490 FalseLanesUndef = TSFLAG_FALSE_LANE_TYPE(0x2), 491 }; 492 493 // NOTE: This is a bit field. 494 static const uint64_t InstrFlagIsWhile = TSFLAG_INSTR_FLAGS(0x1); 495 static const uint64_t InstrFlagIsPTestLike = TSFLAG_INSTR_FLAGS(0x2); 496 497 #undef TSFLAG_ELEMENT_SIZE_TYPE 498 #undef TSFLAG_DESTRUCTIVE_INST_TYPE 499 #undef TSFLAG_FALSE_LANE_TYPE 500 #undef TSFLAG_INSTR_FLAGS 501 502 int getSVEPseudoMap(uint16_t Opcode); 503 int getSVERevInstr(uint16_t Opcode); 504 int getSVENonRevInstr(uint16_t Opcode); 505 } 506 507 } // end namespace llvm 508 509 #endif 510