xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h (revision 473957941922d17be72089e385e2e2a995fd0e1c)
10b57cec5SDimitry Andric //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file defines the interfaces that AArch64 uses to lower LLVM code into a
100b57cec5SDimitry Andric // selection DAG.
110b57cec5SDimitry Andric //
120b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
130b57cec5SDimitry Andric 
140b57cec5SDimitry Andric #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
150b57cec5SDimitry Andric #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
160b57cec5SDimitry Andric 
170b57cec5SDimitry Andric #include "AArch64.h"
180b57cec5SDimitry Andric #include "llvm/CodeGen/CallingConvLower.h"
190b57cec5SDimitry Andric #include "llvm/CodeGen/SelectionDAG.h"
200b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h"
210b57cec5SDimitry Andric #include "llvm/IR/CallingConv.h"
220b57cec5SDimitry Andric #include "llvm/IR/Instruction.h"
230b57cec5SDimitry Andric 
240b57cec5SDimitry Andric namespace llvm {
250b57cec5SDimitry Andric 
260b57cec5SDimitry Andric namespace AArch64ISD {
270b57cec5SDimitry Andric 
280b57cec5SDimitry Andric enum NodeType : unsigned {
290b57cec5SDimitry Andric   FIRST_NUMBER = ISD::BUILTIN_OP_END,
300b57cec5SDimitry Andric   WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
310b57cec5SDimitry Andric   CALL,         // Function call.
320b57cec5SDimitry Andric 
330b57cec5SDimitry Andric   // Produces the full sequence of instructions for getting the thread pointer
340b57cec5SDimitry Andric   // offset of a variable into X0, using the TLSDesc model.
350b57cec5SDimitry Andric   TLSDESC_CALLSEQ,
360b57cec5SDimitry Andric   ADRP,     // Page address of a TargetGlobalAddress operand.
370b57cec5SDimitry Andric   ADR,      // ADR
380b57cec5SDimitry Andric   ADDlow,   // Add the low 12 bits of a TargetGlobalAddress operand.
390b57cec5SDimitry Andric   LOADgot,  // Load from automatically generated descriptor (e.g. Global
400b57cec5SDimitry Andric             // Offset Table, TLS record).
410b57cec5SDimitry Andric   RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
420b57cec5SDimitry Andric   BRCOND,   // Conditional branch instruction; "b.cond".
430b57cec5SDimitry Andric   CSEL,
440b57cec5SDimitry Andric   FCSEL, // Conditional move instruction.
450b57cec5SDimitry Andric   CSINV, // Conditional select invert.
460b57cec5SDimitry Andric   CSNEG, // Conditional select negate.
470b57cec5SDimitry Andric   CSINC, // Conditional select increment.
480b57cec5SDimitry Andric 
490b57cec5SDimitry Andric   // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
500b57cec5SDimitry Andric   // ELF.
510b57cec5SDimitry Andric   THREAD_POINTER,
520b57cec5SDimitry Andric   ADC,
530b57cec5SDimitry Andric   SBC, // adc, sbc instructions
540b57cec5SDimitry Andric 
550b57cec5SDimitry Andric   // Arithmetic instructions which write flags.
560b57cec5SDimitry Andric   ADDS,
570b57cec5SDimitry Andric   SUBS,
580b57cec5SDimitry Andric   ADCS,
590b57cec5SDimitry Andric   SBCS,
600b57cec5SDimitry Andric   ANDS,
610b57cec5SDimitry Andric 
620b57cec5SDimitry Andric   // Conditional compares. Operands: left,right,falsecc,cc,flags
630b57cec5SDimitry Andric   CCMP,
640b57cec5SDimitry Andric   CCMN,
650b57cec5SDimitry Andric   FCCMP,
660b57cec5SDimitry Andric 
670b57cec5SDimitry Andric   // Floating point comparison
680b57cec5SDimitry Andric   FCMP,
690b57cec5SDimitry Andric 
700b57cec5SDimitry Andric   // Scalar extract
710b57cec5SDimitry Andric   EXTR,
720b57cec5SDimitry Andric 
730b57cec5SDimitry Andric   // Scalar-to-vector duplication
740b57cec5SDimitry Andric   DUP,
750b57cec5SDimitry Andric   DUPLANE8,
760b57cec5SDimitry Andric   DUPLANE16,
770b57cec5SDimitry Andric   DUPLANE32,
780b57cec5SDimitry Andric   DUPLANE64,
790b57cec5SDimitry Andric 
800b57cec5SDimitry Andric   // Vector immedate moves
810b57cec5SDimitry Andric   MOVI,
820b57cec5SDimitry Andric   MOVIshift,
830b57cec5SDimitry Andric   MOVIedit,
840b57cec5SDimitry Andric   MOVImsl,
850b57cec5SDimitry Andric   FMOV,
860b57cec5SDimitry Andric   MVNIshift,
870b57cec5SDimitry Andric   MVNImsl,
880b57cec5SDimitry Andric 
890b57cec5SDimitry Andric   // Vector immediate ops
900b57cec5SDimitry Andric   BICi,
910b57cec5SDimitry Andric   ORRi,
920b57cec5SDimitry Andric 
930b57cec5SDimitry Andric   // Vector bit select: similar to ISD::VSELECT but not all bits within an
940b57cec5SDimitry Andric   // element must be identical.
950b57cec5SDimitry Andric   BSL,
960b57cec5SDimitry Andric 
970b57cec5SDimitry Andric   // Vector arithmetic negation
980b57cec5SDimitry Andric   NEG,
990b57cec5SDimitry Andric 
1000b57cec5SDimitry Andric   // Vector shuffles
1010b57cec5SDimitry Andric   ZIP1,
1020b57cec5SDimitry Andric   ZIP2,
1030b57cec5SDimitry Andric   UZP1,
1040b57cec5SDimitry Andric   UZP2,
1050b57cec5SDimitry Andric   TRN1,
1060b57cec5SDimitry Andric   TRN2,
1070b57cec5SDimitry Andric   REV16,
1080b57cec5SDimitry Andric   REV32,
1090b57cec5SDimitry Andric   REV64,
1100b57cec5SDimitry Andric   EXT,
1110b57cec5SDimitry Andric 
1120b57cec5SDimitry Andric   // Vector shift by scalar
1130b57cec5SDimitry Andric   VSHL,
1140b57cec5SDimitry Andric   VLSHR,
1150b57cec5SDimitry Andric   VASHR,
1160b57cec5SDimitry Andric 
1170b57cec5SDimitry Andric   // Vector shift by scalar (again)
1180b57cec5SDimitry Andric   SQSHL_I,
1190b57cec5SDimitry Andric   UQSHL_I,
1200b57cec5SDimitry Andric   SQSHLU_I,
1210b57cec5SDimitry Andric   SRSHR_I,
1220b57cec5SDimitry Andric   URSHR_I,
1230b57cec5SDimitry Andric 
1240b57cec5SDimitry Andric   // Vector comparisons
1250b57cec5SDimitry Andric   CMEQ,
1260b57cec5SDimitry Andric   CMGE,
1270b57cec5SDimitry Andric   CMGT,
1280b57cec5SDimitry Andric   CMHI,
1290b57cec5SDimitry Andric   CMHS,
1300b57cec5SDimitry Andric   FCMEQ,
1310b57cec5SDimitry Andric   FCMGE,
1320b57cec5SDimitry Andric   FCMGT,
1330b57cec5SDimitry Andric 
1340b57cec5SDimitry Andric   // Vector zero comparisons
1350b57cec5SDimitry Andric   CMEQz,
1360b57cec5SDimitry Andric   CMGEz,
1370b57cec5SDimitry Andric   CMGTz,
1380b57cec5SDimitry Andric   CMLEz,
1390b57cec5SDimitry Andric   CMLTz,
1400b57cec5SDimitry Andric   FCMEQz,
1410b57cec5SDimitry Andric   FCMGEz,
1420b57cec5SDimitry Andric   FCMGTz,
1430b57cec5SDimitry Andric   FCMLEz,
1440b57cec5SDimitry Andric   FCMLTz,
1450b57cec5SDimitry Andric 
1460b57cec5SDimitry Andric   // Vector across-lanes addition
1470b57cec5SDimitry Andric   // Only the lower result lane is defined.
1480b57cec5SDimitry Andric   SADDV,
1490b57cec5SDimitry Andric   UADDV,
1500b57cec5SDimitry Andric 
1510b57cec5SDimitry Andric   // Vector across-lanes min/max
1520b57cec5SDimitry Andric   // Only the lower result lane is defined.
1530b57cec5SDimitry Andric   SMINV,
1540b57cec5SDimitry Andric   UMINV,
1550b57cec5SDimitry Andric   SMAXV,
1560b57cec5SDimitry Andric   UMAXV,
1570b57cec5SDimitry Andric 
158480093f4SDimitry Andric   SMAXV_PRED,
159480093f4SDimitry Andric   UMAXV_PRED,
160480093f4SDimitry Andric   SMINV_PRED,
161480093f4SDimitry Andric   UMINV_PRED,
162480093f4SDimitry Andric   ORV_PRED,
163480093f4SDimitry Andric   EORV_PRED,
164480093f4SDimitry Andric   ANDV_PRED,
165480093f4SDimitry Andric 
1660b57cec5SDimitry Andric   // Vector bitwise negation
1670b57cec5SDimitry Andric   NOT,
1680b57cec5SDimitry Andric 
1690b57cec5SDimitry Andric   // Vector bitwise selection
1700b57cec5SDimitry Andric   BIT,
1710b57cec5SDimitry Andric 
1720b57cec5SDimitry Andric   // Compare-and-branch
1730b57cec5SDimitry Andric   CBZ,
1740b57cec5SDimitry Andric   CBNZ,
1750b57cec5SDimitry Andric   TBZ,
1760b57cec5SDimitry Andric   TBNZ,
1770b57cec5SDimitry Andric 
1780b57cec5SDimitry Andric   // Tail calls
1790b57cec5SDimitry Andric   TC_RETURN,
1800b57cec5SDimitry Andric 
1810b57cec5SDimitry Andric   // Custom prefetch handling
1820b57cec5SDimitry Andric   PREFETCH,
1830b57cec5SDimitry Andric 
1840b57cec5SDimitry Andric   // {s|u}int to FP within a FP register.
1850b57cec5SDimitry Andric   SITOF,
1860b57cec5SDimitry Andric   UITOF,
1870b57cec5SDimitry Andric 
1880b57cec5SDimitry Andric   /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
1890b57cec5SDimitry Andric   /// world w.r.t vectors; which causes additional REV instructions to be
1900b57cec5SDimitry Andric   /// generated to compensate for the byte-swapping. But sometimes we do
1910b57cec5SDimitry Andric   /// need to re-interpret the data in SIMD vector registers in big-endian
1920b57cec5SDimitry Andric   /// mode without emitting such REV instructions.
1930b57cec5SDimitry Andric   NVCAST,
1940b57cec5SDimitry Andric 
1950b57cec5SDimitry Andric   SMULL,
1960b57cec5SDimitry Andric   UMULL,
1970b57cec5SDimitry Andric 
1980b57cec5SDimitry Andric   // Reciprocal estimates and steps.
1990b57cec5SDimitry Andric   FRECPE, FRECPS,
2000b57cec5SDimitry Andric   FRSQRTE, FRSQRTS,
2010b57cec5SDimitry Andric 
2028bcb0991SDimitry Andric   SUNPKHI,
2038bcb0991SDimitry Andric   SUNPKLO,
2048bcb0991SDimitry Andric   UUNPKHI,
2058bcb0991SDimitry Andric   UUNPKLO,
2068bcb0991SDimitry Andric 
207480093f4SDimitry Andric   CLASTA_N,
208480093f4SDimitry Andric   CLASTB_N,
209480093f4SDimitry Andric   LASTA,
210480093f4SDimitry Andric   LASTB,
211480093f4SDimitry Andric   REV,
212480093f4SDimitry Andric   TBL,
213480093f4SDimitry Andric 
214480093f4SDimitry Andric   INSR,
215480093f4SDimitry Andric   PTEST,
216480093f4SDimitry Andric   PTRUE,
217480093f4SDimitry Andric 
218480093f4SDimitry Andric   // Unsigned gather loads.
219480093f4SDimitry Andric   GLD1,
220480093f4SDimitry Andric   GLD1_SCALED,
221480093f4SDimitry Andric   GLD1_UXTW,
222480093f4SDimitry Andric   GLD1_SXTW,
223480093f4SDimitry Andric   GLD1_UXTW_SCALED,
224480093f4SDimitry Andric   GLD1_SXTW_SCALED,
225480093f4SDimitry Andric   GLD1_IMM,
226480093f4SDimitry Andric 
227480093f4SDimitry Andric   // Signed gather loads
228480093f4SDimitry Andric   GLD1S,
229480093f4SDimitry Andric   GLD1S_SCALED,
230480093f4SDimitry Andric   GLD1S_UXTW,
231480093f4SDimitry Andric   GLD1S_SXTW,
232480093f4SDimitry Andric   GLD1S_UXTW_SCALED,
233480093f4SDimitry Andric   GLD1S_SXTW_SCALED,
234480093f4SDimitry Andric   GLD1S_IMM,
235480093f4SDimitry Andric   // Scatter store
236480093f4SDimitry Andric   SST1,
237480093f4SDimitry Andric   SST1_SCALED,
238480093f4SDimitry Andric   SST1_UXTW,
239480093f4SDimitry Andric   SST1_SXTW,
240480093f4SDimitry Andric   SST1_UXTW_SCALED,
241480093f4SDimitry Andric   SST1_SXTW_SCALED,
242480093f4SDimitry Andric   SST1_IMM,
243480093f4SDimitry Andric 
244*47395794SDimitry Andric   // Strict (exception-raising) floating point comparison
245*47395794SDimitry Andric   STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
246*47395794SDimitry Andric   STRICT_FCMPE,
247*47395794SDimitry Andric 
2480b57cec5SDimitry Andric   // NEON Load/Store with post-increment base updates
2490b57cec5SDimitry Andric   LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
2500b57cec5SDimitry Andric   LD3post,
2510b57cec5SDimitry Andric   LD4post,
2520b57cec5SDimitry Andric   ST2post,
2530b57cec5SDimitry Andric   ST3post,
2540b57cec5SDimitry Andric   ST4post,
2550b57cec5SDimitry Andric   LD1x2post,
2560b57cec5SDimitry Andric   LD1x3post,
2570b57cec5SDimitry Andric   LD1x4post,
2580b57cec5SDimitry Andric   ST1x2post,
2590b57cec5SDimitry Andric   ST1x3post,
2600b57cec5SDimitry Andric   ST1x4post,
2610b57cec5SDimitry Andric   LD1DUPpost,
2620b57cec5SDimitry Andric   LD2DUPpost,
2630b57cec5SDimitry Andric   LD3DUPpost,
2640b57cec5SDimitry Andric   LD4DUPpost,
2650b57cec5SDimitry Andric   LD1LANEpost,
2660b57cec5SDimitry Andric   LD2LANEpost,
2670b57cec5SDimitry Andric   LD3LANEpost,
2680b57cec5SDimitry Andric   LD4LANEpost,
2690b57cec5SDimitry Andric   ST2LANEpost,
2700b57cec5SDimitry Andric   ST3LANEpost,
2710b57cec5SDimitry Andric   ST4LANEpost,
2720b57cec5SDimitry Andric 
2730b57cec5SDimitry Andric   STG,
2740b57cec5SDimitry Andric   STZG,
2750b57cec5SDimitry Andric   ST2G,
276480093f4SDimitry Andric   STZ2G,
2770b57cec5SDimitry Andric 
278480093f4SDimitry Andric   LDP,
279480093f4SDimitry Andric   STP
2800b57cec5SDimitry Andric };
2810b57cec5SDimitry Andric 
2820b57cec5SDimitry Andric } // end namespace AArch64ISD
2830b57cec5SDimitry Andric 
2840b57cec5SDimitry Andric namespace {
2850b57cec5SDimitry Andric 
2860b57cec5SDimitry Andric // Any instruction that defines a 32-bit result zeros out the high half of the
2870b57cec5SDimitry Andric // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
2880b57cec5SDimitry Andric // be copying from a truncate. But any other 32-bit operation will zero-extend
2890b57cec5SDimitry Andric // up to 64 bits.
2900b57cec5SDimitry Andric // FIXME: X86 also checks for CMOV here. Do we need something similar?
2910b57cec5SDimitry Andric static inline bool isDef32(const SDNode &N) {
2920b57cec5SDimitry Andric   unsigned Opc = N.getOpcode();
2930b57cec5SDimitry Andric   return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
2940b57cec5SDimitry Andric          Opc != ISD::CopyFromReg;
2950b57cec5SDimitry Andric }
2960b57cec5SDimitry Andric 
2970b57cec5SDimitry Andric } // end anonymous namespace
2980b57cec5SDimitry Andric 
2990b57cec5SDimitry Andric class AArch64Subtarget;
3000b57cec5SDimitry Andric class AArch64TargetMachine;
3010b57cec5SDimitry Andric 
3020b57cec5SDimitry Andric class AArch64TargetLowering : public TargetLowering {
3030b57cec5SDimitry Andric public:
3040b57cec5SDimitry Andric   explicit AArch64TargetLowering(const TargetMachine &TM,
3050b57cec5SDimitry Andric                                  const AArch64Subtarget &STI);
3060b57cec5SDimitry Andric 
3070b57cec5SDimitry Andric   /// Selects the correct CCAssignFn for a given CallingConvention value.
3080b57cec5SDimitry Andric   CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
3090b57cec5SDimitry Andric 
3100b57cec5SDimitry Andric   /// Selects the correct CCAssignFn for a given CallingConvention value.
3110b57cec5SDimitry Andric   CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
3120b57cec5SDimitry Andric 
3130b57cec5SDimitry Andric   /// Determine which of the bits specified in Mask are known to be either zero
3140b57cec5SDimitry Andric   /// or one and return them in the KnownZero/KnownOne bitsets.
3150b57cec5SDimitry Andric   void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
3160b57cec5SDimitry Andric                                      const APInt &DemandedElts,
3170b57cec5SDimitry Andric                                      const SelectionDAG &DAG,
3180b57cec5SDimitry Andric                                      unsigned Depth = 0) const override;
3190b57cec5SDimitry Andric 
3208bcb0991SDimitry Andric   MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
3218bcb0991SDimitry Andric     // Returning i64 unconditionally here (i.e. even for ILP32) means that the
3228bcb0991SDimitry Andric     // *DAG* representation of pointers will always be 64-bits. They will be
3238bcb0991SDimitry Andric     // truncated and extended when transferred to memory, but the 64-bit DAG
3248bcb0991SDimitry Andric     // allows us to use AArch64's addressing modes much more easily.
3258bcb0991SDimitry Andric     return MVT::getIntegerVT(64);
3268bcb0991SDimitry Andric   }
3278bcb0991SDimitry Andric 
3280b57cec5SDimitry Andric   bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
3290b57cec5SDimitry Andric                                     TargetLoweringOpt &TLO) const override;
3300b57cec5SDimitry Andric 
3310b57cec5SDimitry Andric   MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
3320b57cec5SDimitry Andric 
3330b57cec5SDimitry Andric   /// Returns true if the target allows unaligned memory accesses of the
3340b57cec5SDimitry Andric   /// specified type.
3350b57cec5SDimitry Andric   bool allowsMisalignedMemoryAccesses(
3360b57cec5SDimitry Andric       EVT VT, unsigned AddrSpace = 0, unsigned Align = 1,
3370b57cec5SDimitry Andric       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
3380b57cec5SDimitry Andric       bool *Fast = nullptr) const override;
3398bcb0991SDimitry Andric   /// LLT variant.
3408bcb0991SDimitry Andric   bool allowsMisalignedMemoryAccesses(
3418bcb0991SDimitry Andric     LLT Ty, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
3428bcb0991SDimitry Andric     bool *Fast = nullptr) const override;
3430b57cec5SDimitry Andric 
3440b57cec5SDimitry Andric   /// Provide custom lowering hooks for some operations.
3450b57cec5SDimitry Andric   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
3460b57cec5SDimitry Andric 
3470b57cec5SDimitry Andric   const char *getTargetNodeName(unsigned Opcode) const override;
3480b57cec5SDimitry Andric 
3490b57cec5SDimitry Andric   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
3500b57cec5SDimitry Andric 
3510b57cec5SDimitry Andric   /// Returns true if a cast between SrcAS and DestAS is a noop.
3520b57cec5SDimitry Andric   bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
3530b57cec5SDimitry Andric     // Addrspacecasts are always noops.
3540b57cec5SDimitry Andric     return true;
3550b57cec5SDimitry Andric   }
3560b57cec5SDimitry Andric 
3570b57cec5SDimitry Andric   /// This method returns a target specific FastISel object, or null if the
3580b57cec5SDimitry Andric   /// target does not support "fast" ISel.
3590b57cec5SDimitry Andric   FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
3600b57cec5SDimitry Andric                            const TargetLibraryInfo *libInfo) const override;
3610b57cec5SDimitry Andric 
3620b57cec5SDimitry Andric   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
3630b57cec5SDimitry Andric 
3640b57cec5SDimitry Andric   bool isFPImmLegal(const APFloat &Imm, EVT VT,
3650b57cec5SDimitry Andric                     bool ForCodeSize) const override;
3660b57cec5SDimitry Andric 
3670b57cec5SDimitry Andric   /// Return true if the given shuffle mask can be codegen'd directly, or if it
3680b57cec5SDimitry Andric   /// should be stack expanded.
3690b57cec5SDimitry Andric   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
3700b57cec5SDimitry Andric 
3710b57cec5SDimitry Andric   /// Return the ISD::SETCC ValueType.
3720b57cec5SDimitry Andric   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
3730b57cec5SDimitry Andric                          EVT VT) const override;
3740b57cec5SDimitry Andric 
3750b57cec5SDimitry Andric   SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
3760b57cec5SDimitry Andric 
3770b57cec5SDimitry Andric   MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
3780b57cec5SDimitry Andric                                   MachineBasicBlock *BB) const;
3790b57cec5SDimitry Andric 
3800b57cec5SDimitry Andric   MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
3810b57cec5SDimitry Andric                                            MachineBasicBlock *BB) const;
3820b57cec5SDimitry Andric 
3830b57cec5SDimitry Andric   MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
3840b57cec5SDimitry Andric                                          MachineBasicBlock *BB) const;
3850b57cec5SDimitry Andric 
3860b57cec5SDimitry Andric   MachineBasicBlock *
3870b57cec5SDimitry Andric   EmitInstrWithCustomInserter(MachineInstr &MI,
3880b57cec5SDimitry Andric                               MachineBasicBlock *MBB) const override;
3890b57cec5SDimitry Andric 
3900b57cec5SDimitry Andric   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
3910b57cec5SDimitry Andric                           MachineFunction &MF,
3920b57cec5SDimitry Andric                           unsigned Intrinsic) const override;
3930b57cec5SDimitry Andric 
3940b57cec5SDimitry Andric   bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
3950b57cec5SDimitry Andric                              EVT NewVT) const override;
3960b57cec5SDimitry Andric 
3970b57cec5SDimitry Andric   bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
3980b57cec5SDimitry Andric   bool isTruncateFree(EVT VT1, EVT VT2) const override;
3990b57cec5SDimitry Andric 
4000b57cec5SDimitry Andric   bool isProfitableToHoist(Instruction *I) const override;
4010b57cec5SDimitry Andric 
4020b57cec5SDimitry Andric   bool isZExtFree(Type *Ty1, Type *Ty2) const override;
4030b57cec5SDimitry Andric   bool isZExtFree(EVT VT1, EVT VT2) const override;
4040b57cec5SDimitry Andric   bool isZExtFree(SDValue Val, EVT VT2) const override;
4050b57cec5SDimitry Andric 
4060b57cec5SDimitry Andric   bool shouldSinkOperands(Instruction *I,
4070b57cec5SDimitry Andric                           SmallVectorImpl<Use *> &Ops) const override;
4080b57cec5SDimitry Andric 
4090b57cec5SDimitry Andric   bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
4100b57cec5SDimitry Andric 
4110b57cec5SDimitry Andric   unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
4120b57cec5SDimitry Andric 
4130b57cec5SDimitry Andric   bool lowerInterleavedLoad(LoadInst *LI,
4140b57cec5SDimitry Andric                             ArrayRef<ShuffleVectorInst *> Shuffles,
4150b57cec5SDimitry Andric                             ArrayRef<unsigned> Indices,
4160b57cec5SDimitry Andric                             unsigned Factor) const override;
4170b57cec5SDimitry Andric   bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
4180b57cec5SDimitry Andric                              unsigned Factor) const override;
4190b57cec5SDimitry Andric 
4200b57cec5SDimitry Andric   bool isLegalAddImmediate(int64_t) const override;
4210b57cec5SDimitry Andric   bool isLegalICmpImmediate(int64_t) const override;
4220b57cec5SDimitry Andric 
4230b57cec5SDimitry Andric   bool shouldConsiderGEPOffsetSplit() const override;
4240b57cec5SDimitry Andric 
4250b57cec5SDimitry Andric   EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
4260b57cec5SDimitry Andric                           bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
4270b57cec5SDimitry Andric                           const AttributeList &FuncAttributes) const override;
4280b57cec5SDimitry Andric 
4298bcb0991SDimitry Andric   LLT getOptimalMemOpLLT(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
4308bcb0991SDimitry Andric                           bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
4318bcb0991SDimitry Andric                           const AttributeList &FuncAttributes) const override;
4328bcb0991SDimitry Andric 
4330b57cec5SDimitry Andric   /// Return true if the addressing mode represented by AM is legal for this
4340b57cec5SDimitry Andric   /// target, for a load/store of the specified type.
4350b57cec5SDimitry Andric   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
4360b57cec5SDimitry Andric                              unsigned AS,
4370b57cec5SDimitry Andric                              Instruction *I = nullptr) const override;
4380b57cec5SDimitry Andric 
4390b57cec5SDimitry Andric   /// Return the cost of the scaling factor used in the addressing
4400b57cec5SDimitry Andric   /// mode represented by AM for this target, for a load/store
4410b57cec5SDimitry Andric   /// of the specified type.
4420b57cec5SDimitry Andric   /// If the AM is supported, the return value must be >= 0.
4430b57cec5SDimitry Andric   /// If the AM is not supported, it returns a negative value.
4440b57cec5SDimitry Andric   int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
4450b57cec5SDimitry Andric                            unsigned AS) const override;
4460b57cec5SDimitry Andric 
4470b57cec5SDimitry Andric   /// Return true if an FMA operation is faster than a pair of fmul and fadd
4480b57cec5SDimitry Andric   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
4490b57cec5SDimitry Andric   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
450480093f4SDimitry Andric   bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
451480093f4SDimitry Andric                                   EVT VT) const override;
452480093f4SDimitry Andric   bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
4530b57cec5SDimitry Andric 
4540b57cec5SDimitry Andric   const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
4550b57cec5SDimitry Andric 
4560b57cec5SDimitry Andric   /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
4570b57cec5SDimitry Andric   bool isDesirableToCommuteWithShift(const SDNode *N,
4580b57cec5SDimitry Andric                                      CombineLevel Level) const override;
4590b57cec5SDimitry Andric 
4600b57cec5SDimitry Andric   /// Returns true if it is beneficial to convert a load of a constant
4610b57cec5SDimitry Andric   /// to just the constant itself.
4620b57cec5SDimitry Andric   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
4630b57cec5SDimitry Andric                                          Type *Ty) const override;
4640b57cec5SDimitry Andric 
4650b57cec5SDimitry Andric   /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
4660b57cec5SDimitry Andric   /// with this index.
4670b57cec5SDimitry Andric   bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
4680b57cec5SDimitry Andric                                unsigned Index) const override;
4690b57cec5SDimitry Andric 
4700b57cec5SDimitry Andric   Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
4710b57cec5SDimitry Andric                         AtomicOrdering Ord) const override;
4720b57cec5SDimitry Andric   Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
4730b57cec5SDimitry Andric                               Value *Addr, AtomicOrdering Ord) const override;
4740b57cec5SDimitry Andric 
4750b57cec5SDimitry Andric   void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
4760b57cec5SDimitry Andric 
4770b57cec5SDimitry Andric   TargetLoweringBase::AtomicExpansionKind
4780b57cec5SDimitry Andric   shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
4790b57cec5SDimitry Andric   bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
4800b57cec5SDimitry Andric   TargetLoweringBase::AtomicExpansionKind
4810b57cec5SDimitry Andric   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
4820b57cec5SDimitry Andric 
4830b57cec5SDimitry Andric   TargetLoweringBase::AtomicExpansionKind
4840b57cec5SDimitry Andric   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
4850b57cec5SDimitry Andric 
4860b57cec5SDimitry Andric   bool useLoadStackGuardNode() const override;
4870b57cec5SDimitry Andric   TargetLoweringBase::LegalizeTypeAction
4880b57cec5SDimitry Andric   getPreferredVectorAction(MVT VT) const override;
4890b57cec5SDimitry Andric 
4900b57cec5SDimitry Andric   /// If the target has a standard location for the stack protector cookie,
4910b57cec5SDimitry Andric   /// returns the address of that location. Otherwise, returns nullptr.
4920b57cec5SDimitry Andric   Value *getIRStackGuard(IRBuilder<> &IRB) const override;
4930b57cec5SDimitry Andric 
4940b57cec5SDimitry Andric   void insertSSPDeclarations(Module &M) const override;
4950b57cec5SDimitry Andric   Value *getSDagStackGuard(const Module &M) const override;
4960b57cec5SDimitry Andric   Function *getSSPStackGuardCheck(const Module &M) const override;
4970b57cec5SDimitry Andric 
4980b57cec5SDimitry Andric   /// If the target has a standard location for the unsafe stack pointer,
4990b57cec5SDimitry Andric   /// returns the address of that location. Otherwise, returns nullptr.
5000b57cec5SDimitry Andric   Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
5010b57cec5SDimitry Andric 
5020b57cec5SDimitry Andric   /// If a physical register, this returns the register that receives the
5030b57cec5SDimitry Andric   /// exception address on entry to an EH pad.
5040b57cec5SDimitry Andric   unsigned
5050b57cec5SDimitry Andric   getExceptionPointerRegister(const Constant *PersonalityFn) const override {
5060b57cec5SDimitry Andric     // FIXME: This is a guess. Has this been defined yet?
5070b57cec5SDimitry Andric     return AArch64::X0;
5080b57cec5SDimitry Andric   }
5090b57cec5SDimitry Andric 
5100b57cec5SDimitry Andric   /// If a physical register, this returns the register that receives the
5110b57cec5SDimitry Andric   /// exception typeid on entry to a landing pad.
5120b57cec5SDimitry Andric   unsigned
5130b57cec5SDimitry Andric   getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
5140b57cec5SDimitry Andric     // FIXME: This is a guess. Has this been defined yet?
5150b57cec5SDimitry Andric     return AArch64::X1;
5160b57cec5SDimitry Andric   }
5170b57cec5SDimitry Andric 
5180b57cec5SDimitry Andric   bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
5190b57cec5SDimitry Andric 
5200b57cec5SDimitry Andric   bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
5210b57cec5SDimitry Andric                         const SelectionDAG &DAG) const override {
5220b57cec5SDimitry Andric     // Do not merge to float value size (128 bytes) if no implicit
5230b57cec5SDimitry Andric     // float attribute is set.
5240b57cec5SDimitry Andric 
5250b57cec5SDimitry Andric     bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
5260b57cec5SDimitry Andric         Attribute::NoImplicitFloat);
5270b57cec5SDimitry Andric 
5280b57cec5SDimitry Andric     if (NoFloat)
5290b57cec5SDimitry Andric       return (MemVT.getSizeInBits() <= 64);
5300b57cec5SDimitry Andric     return true;
5310b57cec5SDimitry Andric   }
5320b57cec5SDimitry Andric 
5330b57cec5SDimitry Andric   bool isCheapToSpeculateCttz() const override {
5340b57cec5SDimitry Andric     return true;
5350b57cec5SDimitry Andric   }
5360b57cec5SDimitry Andric 
5370b57cec5SDimitry Andric   bool isCheapToSpeculateCtlz() const override {
5380b57cec5SDimitry Andric     return true;
5390b57cec5SDimitry Andric   }
5400b57cec5SDimitry Andric 
5410b57cec5SDimitry Andric   bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
5420b57cec5SDimitry Andric 
5430b57cec5SDimitry Andric   bool hasAndNotCompare(SDValue V) const override {
5440b57cec5SDimitry Andric     // We can use bics for any scalar.
5450b57cec5SDimitry Andric     return V.getValueType().isScalarInteger();
5460b57cec5SDimitry Andric   }
5470b57cec5SDimitry Andric 
5480b57cec5SDimitry Andric   bool hasAndNot(SDValue Y) const override {
5490b57cec5SDimitry Andric     EVT VT = Y.getValueType();
5500b57cec5SDimitry Andric 
5510b57cec5SDimitry Andric     if (!VT.isVector())
5520b57cec5SDimitry Andric       return hasAndNotCompare(Y);
5530b57cec5SDimitry Andric 
5540b57cec5SDimitry Andric     return VT.getSizeInBits() >= 64; // vector 'bic'
5550b57cec5SDimitry Andric   }
5560b57cec5SDimitry Andric 
5578bcb0991SDimitry Andric   bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5588bcb0991SDimitry Andric       SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
5598bcb0991SDimitry Andric       unsigned OldShiftOpcode, unsigned NewShiftOpcode,
5608bcb0991SDimitry Andric       SelectionDAG &DAG) const override;
5618bcb0991SDimitry Andric 
5620b57cec5SDimitry Andric   bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
5630b57cec5SDimitry Andric 
5640b57cec5SDimitry Andric   bool shouldTransformSignedTruncationCheck(EVT XVT,
5650b57cec5SDimitry Andric                                             unsigned KeptBits) const override {
5660b57cec5SDimitry Andric     // For vectors, we don't have a preference..
5670b57cec5SDimitry Andric     if (XVT.isVector())
5680b57cec5SDimitry Andric       return false;
5690b57cec5SDimitry Andric 
5700b57cec5SDimitry Andric     auto VTIsOk = [](EVT VT) -> bool {
5710b57cec5SDimitry Andric       return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
5720b57cec5SDimitry Andric              VT == MVT::i64;
5730b57cec5SDimitry Andric     };
5740b57cec5SDimitry Andric 
5750b57cec5SDimitry Andric     // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
5760b57cec5SDimitry Andric     // XVT will be larger than KeptBitsVT.
5770b57cec5SDimitry Andric     MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
5780b57cec5SDimitry Andric     return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
5790b57cec5SDimitry Andric   }
5800b57cec5SDimitry Andric 
5810b57cec5SDimitry Andric   bool preferIncOfAddToSubOfNot(EVT VT) const override;
5820b57cec5SDimitry Andric 
5830b57cec5SDimitry Andric   bool hasBitPreservingFPLogic(EVT VT) const override {
5840b57cec5SDimitry Andric     // FIXME: Is this always true? It should be true for vectors at least.
5850b57cec5SDimitry Andric     return VT == MVT::f32 || VT == MVT::f64;
5860b57cec5SDimitry Andric   }
5870b57cec5SDimitry Andric 
5880b57cec5SDimitry Andric   bool supportSplitCSR(MachineFunction *MF) const override {
5890b57cec5SDimitry Andric     return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
5900b57cec5SDimitry Andric            MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
5910b57cec5SDimitry Andric   }
5920b57cec5SDimitry Andric   void initializeSplitCSR(MachineBasicBlock *Entry) const override;
5930b57cec5SDimitry Andric   void insertCopiesSplitCSR(
5940b57cec5SDimitry Andric       MachineBasicBlock *Entry,
5950b57cec5SDimitry Andric       const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
5960b57cec5SDimitry Andric 
5970b57cec5SDimitry Andric   bool supportSwiftError() const override {
5980b57cec5SDimitry Andric     return true;
5990b57cec5SDimitry Andric   }
6000b57cec5SDimitry Andric 
6010b57cec5SDimitry Andric   /// Enable aggressive FMA fusion on targets that want it.
6020b57cec5SDimitry Andric   bool enableAggressiveFMAFusion(EVT VT) const override;
6030b57cec5SDimitry Andric 
6040b57cec5SDimitry Andric   /// Returns the size of the platform's va_list object.
6050b57cec5SDimitry Andric   unsigned getVaListSizeInBits(const DataLayout &DL) const override;
6060b57cec5SDimitry Andric 
6070b57cec5SDimitry Andric   /// Returns true if \p VecTy is a legal interleaved access type. This
6080b57cec5SDimitry Andric   /// function checks the vector element type and the overall width of the
6090b57cec5SDimitry Andric   /// vector.
6100b57cec5SDimitry Andric   bool isLegalInterleavedAccessType(VectorType *VecTy,
6110b57cec5SDimitry Andric                                     const DataLayout &DL) const;
6120b57cec5SDimitry Andric 
6130b57cec5SDimitry Andric   /// Returns the number of interleaved accesses that will be generated when
6140b57cec5SDimitry Andric   /// lowering accesses of the given type.
6150b57cec5SDimitry Andric   unsigned getNumInterleavedAccesses(VectorType *VecTy,
6160b57cec5SDimitry Andric                                      const DataLayout &DL) const;
6170b57cec5SDimitry Andric 
6180b57cec5SDimitry Andric   MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
6190b57cec5SDimitry Andric 
6200b57cec5SDimitry Andric   bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
6210b57cec5SDimitry Andric                                                  CallingConv::ID CallConv,
6220b57cec5SDimitry Andric                                                  bool isVarArg) const override;
6230b57cec5SDimitry Andric   /// Used for exception handling on Win64.
6240b57cec5SDimitry Andric   bool needsFixedCatchObjects() const override;
6250b57cec5SDimitry Andric private:
6260b57cec5SDimitry Andric   /// Keep a pointer to the AArch64Subtarget around so that we can
6270b57cec5SDimitry Andric   /// make the right decision when generating code for different targets.
6280b57cec5SDimitry Andric   const AArch64Subtarget *Subtarget;
6290b57cec5SDimitry Andric 
6300b57cec5SDimitry Andric   bool isExtFreeImpl(const Instruction *Ext) const override;
6310b57cec5SDimitry Andric 
6320b57cec5SDimitry Andric   void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
6330b57cec5SDimitry Andric   void addDRTypeForNEON(MVT VT);
6340b57cec5SDimitry Andric   void addQRTypeForNEON(MVT VT);
6350b57cec5SDimitry Andric 
6360b57cec5SDimitry Andric   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
6370b57cec5SDimitry Andric                                bool isVarArg,
6380b57cec5SDimitry Andric                                const SmallVectorImpl<ISD::InputArg> &Ins,
6390b57cec5SDimitry Andric                                const SDLoc &DL, SelectionDAG &DAG,
6400b57cec5SDimitry Andric                                SmallVectorImpl<SDValue> &InVals) const override;
6410b57cec5SDimitry Andric 
6420b57cec5SDimitry Andric   SDValue LowerCall(CallLoweringInfo & /*CLI*/,
6430b57cec5SDimitry Andric                     SmallVectorImpl<SDValue> &InVals) const override;
6440b57cec5SDimitry Andric 
6450b57cec5SDimitry Andric   SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
6460b57cec5SDimitry Andric                           CallingConv::ID CallConv, bool isVarArg,
6470b57cec5SDimitry Andric                           const SmallVectorImpl<ISD::InputArg> &Ins,
6480b57cec5SDimitry Andric                           const SDLoc &DL, SelectionDAG &DAG,
6490b57cec5SDimitry Andric                           SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
6500b57cec5SDimitry Andric                           SDValue ThisVal) const;
6510b57cec5SDimitry Andric 
6520b57cec5SDimitry Andric   SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
6530b57cec5SDimitry Andric 
6540b57cec5SDimitry Andric   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
6550b57cec5SDimitry Andric 
6560b57cec5SDimitry Andric   bool isEligibleForTailCallOptimization(
6570b57cec5SDimitry Andric       SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
6580b57cec5SDimitry Andric       const SmallVectorImpl<ISD::OutputArg> &Outs,
6590b57cec5SDimitry Andric       const SmallVectorImpl<SDValue> &OutVals,
6600b57cec5SDimitry Andric       const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
6610b57cec5SDimitry Andric 
6620b57cec5SDimitry Andric   /// Finds the incoming stack arguments which overlap the given fixed stack
6630b57cec5SDimitry Andric   /// object and incorporates their load into the current chain. This prevents
6640b57cec5SDimitry Andric   /// an upcoming store from clobbering the stack argument before it's used.
6650b57cec5SDimitry Andric   SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
6660b57cec5SDimitry Andric                               MachineFrameInfo &MFI, int ClobberedFI) const;
6670b57cec5SDimitry Andric 
6680b57cec5SDimitry Andric   bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
6690b57cec5SDimitry Andric 
6700b57cec5SDimitry Andric   void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
6710b57cec5SDimitry Andric                            SDValue &Chain) const;
6720b57cec5SDimitry Andric 
6730b57cec5SDimitry Andric   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
6740b57cec5SDimitry Andric                       bool isVarArg,
6750b57cec5SDimitry Andric                       const SmallVectorImpl<ISD::OutputArg> &Outs,
6760b57cec5SDimitry Andric                       LLVMContext &Context) const override;
6770b57cec5SDimitry Andric 
6780b57cec5SDimitry Andric   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
6790b57cec5SDimitry Andric                       const SmallVectorImpl<ISD::OutputArg> &Outs,
6800b57cec5SDimitry Andric                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
6810b57cec5SDimitry Andric                       SelectionDAG &DAG) const override;
6820b57cec5SDimitry Andric 
6830b57cec5SDimitry Andric   SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
6840b57cec5SDimitry Andric                         unsigned Flag) const;
6850b57cec5SDimitry Andric   SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
6860b57cec5SDimitry Andric                         unsigned Flag) const;
6870b57cec5SDimitry Andric   SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
6880b57cec5SDimitry Andric                         unsigned Flag) const;
6890b57cec5SDimitry Andric   SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
6900b57cec5SDimitry Andric                         unsigned Flag) const;
6910b57cec5SDimitry Andric   template <class NodeTy>
6920b57cec5SDimitry Andric   SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
6930b57cec5SDimitry Andric   template <class NodeTy>
6940b57cec5SDimitry Andric   SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
6950b57cec5SDimitry Andric   template <class NodeTy>
6960b57cec5SDimitry Andric   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
6970b57cec5SDimitry Andric   template <class NodeTy>
6980b57cec5SDimitry Andric   SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
6990b57cec5SDimitry Andric   SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
7000b57cec5SDimitry Andric   SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
7010b57cec5SDimitry Andric   SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
7020b57cec5SDimitry Andric   SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
7030b57cec5SDimitry Andric   SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
704480093f4SDimitry Andric   SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase,
705480093f4SDimitry Andric                                const SDLoc &DL, SelectionDAG &DAG) const;
7060b57cec5SDimitry Andric   SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
7070b57cec5SDimitry Andric                                  SelectionDAG &DAG) const;
7080b57cec5SDimitry Andric   SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
7090b57cec5SDimitry Andric   SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
7100b57cec5SDimitry Andric   SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
7110b57cec5SDimitry Andric   SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
7120b57cec5SDimitry Andric   SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
7130b57cec5SDimitry Andric   SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
7140b57cec5SDimitry Andric                          SDValue TVal, SDValue FVal, const SDLoc &dl,
7150b57cec5SDimitry Andric                          SelectionDAG &DAG) const;
7160b57cec5SDimitry Andric   SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
7170b57cec5SDimitry Andric   SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
7180b57cec5SDimitry Andric   SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
7190b57cec5SDimitry Andric   SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
7200b57cec5SDimitry Andric   SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
7210b57cec5SDimitry Andric   SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
7220b57cec5SDimitry Andric   SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
7230b57cec5SDimitry Andric   SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
7240b57cec5SDimitry Andric   SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
7250b57cec5SDimitry Andric   SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
7260b57cec5SDimitry Andric   SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
7270b57cec5SDimitry Andric   SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
7280b57cec5SDimitry Andric   SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
7290b57cec5SDimitry Andric   SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
7300b57cec5SDimitry Andric   SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
7310b57cec5SDimitry Andric   SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
7320b57cec5SDimitry Andric   SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
7330b57cec5SDimitry Andric   SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
7340b57cec5SDimitry Andric   SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
7358bcb0991SDimitry Andric   SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
7360b57cec5SDimitry Andric   SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
7370b57cec5SDimitry Andric   SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
7380b57cec5SDimitry Andric   SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
7390b57cec5SDimitry Andric   SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
7400b57cec5SDimitry Andric   SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
7410b57cec5SDimitry Andric   SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
7420b57cec5SDimitry Andric   SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
7430b57cec5SDimitry Andric                         RTLIB::Libcall Call) const;
7440b57cec5SDimitry Andric   SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
7450b57cec5SDimitry Andric   SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
7460b57cec5SDimitry Andric   SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
7470b57cec5SDimitry Andric   SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
7480b57cec5SDimitry Andric   SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
7490b57cec5SDimitry Andric   SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
7500b57cec5SDimitry Andric   SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
7510b57cec5SDimitry Andric   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
7520b57cec5SDimitry Andric   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
7530b57cec5SDimitry Andric   SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
7540b57cec5SDimitry Andric   SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
7550b57cec5SDimitry Andric   SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
7560b57cec5SDimitry Andric   SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
7570b57cec5SDimitry Andric   SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
7580b57cec5SDimitry Andric                                          SDValue &Size,
7590b57cec5SDimitry Andric                                          SelectionDAG &DAG) const;
7600b57cec5SDimitry Andric 
7610b57cec5SDimitry Andric   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
7620b57cec5SDimitry Andric                         SmallVectorImpl<SDNode *> &Created) const override;
7630b57cec5SDimitry Andric   SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
7640b57cec5SDimitry Andric                           int &ExtraSteps, bool &UseOneConst,
7650b57cec5SDimitry Andric                           bool Reciprocal) const override;
7660b57cec5SDimitry Andric   SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
7670b57cec5SDimitry Andric                            int &ExtraSteps) const override;
7680b57cec5SDimitry Andric   unsigned combineRepeatedFPDivisors() const override;
7690b57cec5SDimitry Andric 
7700b57cec5SDimitry Andric   ConstraintType getConstraintType(StringRef Constraint) const override;
771480093f4SDimitry Andric   Register getRegisterByName(const char* RegName, LLT VT,
7728bcb0991SDimitry Andric                              const MachineFunction &MF) const override;
7730b57cec5SDimitry Andric 
7740b57cec5SDimitry Andric   /// Examine constraint string and operand type and determine a weight value.
7750b57cec5SDimitry Andric   /// The operand object must already have been set up with the operand type.
7760b57cec5SDimitry Andric   ConstraintWeight
7770b57cec5SDimitry Andric   getSingleConstraintMatchWeight(AsmOperandInfo &info,
7780b57cec5SDimitry Andric                                  const char *constraint) const override;
7790b57cec5SDimitry Andric 
7800b57cec5SDimitry Andric   std::pair<unsigned, const TargetRegisterClass *>
7810b57cec5SDimitry Andric   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
7820b57cec5SDimitry Andric                                StringRef Constraint, MVT VT) const override;
7830b57cec5SDimitry Andric 
7840b57cec5SDimitry Andric   const char *LowerXConstraint(EVT ConstraintVT) const override;
7850b57cec5SDimitry Andric 
7860b57cec5SDimitry Andric   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
7870b57cec5SDimitry Andric                                     std::vector<SDValue> &Ops,
7880b57cec5SDimitry Andric                                     SelectionDAG &DAG) const override;
7890b57cec5SDimitry Andric 
7900b57cec5SDimitry Andric   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
7910b57cec5SDimitry Andric     if (ConstraintCode == "Q")
7920b57cec5SDimitry Andric       return InlineAsm::Constraint_Q;
7930b57cec5SDimitry Andric     // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
7940b57cec5SDimitry Andric     //        followed by llvm_unreachable so we'll leave them unimplemented in
7950b57cec5SDimitry Andric     //        the backend for now.
7960b57cec5SDimitry Andric     return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
7970b57cec5SDimitry Andric   }
7980b57cec5SDimitry Andric 
799480093f4SDimitry Andric   bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
8000b57cec5SDimitry Andric   bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
8010b57cec5SDimitry Andric   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
8020b57cec5SDimitry Andric   bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
8030b57cec5SDimitry Andric                               ISD::MemIndexedMode &AM, bool &IsInc,
8040b57cec5SDimitry Andric                               SelectionDAG &DAG) const;
8050b57cec5SDimitry Andric   bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
8060b57cec5SDimitry Andric                                  ISD::MemIndexedMode &AM,
8070b57cec5SDimitry Andric                                  SelectionDAG &DAG) const override;
8080b57cec5SDimitry Andric   bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
8090b57cec5SDimitry Andric                                   SDValue &Offset, ISD::MemIndexedMode &AM,
8100b57cec5SDimitry Andric                                   SelectionDAG &DAG) const override;
8110b57cec5SDimitry Andric 
8120b57cec5SDimitry Andric   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
8130b57cec5SDimitry Andric                           SelectionDAG &DAG) const override;
8140b57cec5SDimitry Andric 
8150b57cec5SDimitry Andric   bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
8160b57cec5SDimitry Andric 
8170b57cec5SDimitry Andric   void finalizeLowering(MachineFunction &MF) const override;
8180b57cec5SDimitry Andric };
8190b57cec5SDimitry Andric 
8200b57cec5SDimitry Andric namespace AArch64 {
8210b57cec5SDimitry Andric FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
8220b57cec5SDimitry Andric                          const TargetLibraryInfo *libInfo);
8230b57cec5SDimitry Andric } // end namespace AArch64
8240b57cec5SDimitry Andric 
8250b57cec5SDimitry Andric } // end namespace llvm
8260b57cec5SDimitry Andric 
8270b57cec5SDimitry Andric #endif
828