xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h (revision 5ffd83dbcc34f10e07f6d3e968ae6365869615f4)
10b57cec5SDimitry Andric //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file defines the interfaces that AArch64 uses to lower LLVM code into a
100b57cec5SDimitry Andric // selection DAG.
110b57cec5SDimitry Andric //
120b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
130b57cec5SDimitry Andric 
140b57cec5SDimitry Andric #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
150b57cec5SDimitry Andric #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
160b57cec5SDimitry Andric 
170b57cec5SDimitry Andric #include "AArch64.h"
180b57cec5SDimitry Andric #include "llvm/CodeGen/CallingConvLower.h"
190b57cec5SDimitry Andric #include "llvm/CodeGen/SelectionDAG.h"
200b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h"
210b57cec5SDimitry Andric #include "llvm/IR/CallingConv.h"
220b57cec5SDimitry Andric #include "llvm/IR/Instruction.h"
230b57cec5SDimitry Andric 
240b57cec5SDimitry Andric namespace llvm {
250b57cec5SDimitry Andric 
260b57cec5SDimitry Andric namespace AArch64ISD {
270b57cec5SDimitry Andric 
28*5ffd83dbSDimitry Andric // For predicated nodes where the result is a vector, the operation is
29*5ffd83dbSDimitry Andric // controlled by a governing predicate and the inactive lanes are explicitly
30*5ffd83dbSDimitry Andric // defined with a value, please stick the following naming convention:
31*5ffd83dbSDimitry Andric //
32*5ffd83dbSDimitry Andric //    _MERGE_OP<n>        The result value is a vector with inactive lanes equal
33*5ffd83dbSDimitry Andric //                        to source operand OP<n>.
34*5ffd83dbSDimitry Andric //
35*5ffd83dbSDimitry Andric //    _MERGE_ZERO         The result value is a vector with inactive lanes
36*5ffd83dbSDimitry Andric //                        actively zeroed.
37*5ffd83dbSDimitry Andric //
38*5ffd83dbSDimitry Andric //    _MERGE_PASSTHRU     The result value is a vector with inactive lanes equal
39*5ffd83dbSDimitry Andric //                        to the last source operand which only purpose is being
40*5ffd83dbSDimitry Andric //                        a passthru value.
41*5ffd83dbSDimitry Andric //
42*5ffd83dbSDimitry Andric // For other cases where no explicit action is needed to set the inactive lanes,
43*5ffd83dbSDimitry Andric // or when the result is not a vector and it is needed or helpful to
44*5ffd83dbSDimitry Andric // distinguish a node from similar unpredicated nodes, use:
45*5ffd83dbSDimitry Andric //
46*5ffd83dbSDimitry Andric //    _PRED
47*5ffd83dbSDimitry Andric //
480b57cec5SDimitry Andric enum NodeType : unsigned {
490b57cec5SDimitry Andric   FIRST_NUMBER = ISD::BUILTIN_OP_END,
500b57cec5SDimitry Andric   WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
510b57cec5SDimitry Andric   CALL,         // Function call.
520b57cec5SDimitry Andric 
530b57cec5SDimitry Andric   // Produces the full sequence of instructions for getting the thread pointer
540b57cec5SDimitry Andric   // offset of a variable into X0, using the TLSDesc model.
550b57cec5SDimitry Andric   TLSDESC_CALLSEQ,
560b57cec5SDimitry Andric   ADRP,     // Page address of a TargetGlobalAddress operand.
570b57cec5SDimitry Andric   ADR,      // ADR
580b57cec5SDimitry Andric   ADDlow,   // Add the low 12 bits of a TargetGlobalAddress operand.
590b57cec5SDimitry Andric   LOADgot,  // Load from automatically generated descriptor (e.g. Global
600b57cec5SDimitry Andric             // Offset Table, TLS record).
610b57cec5SDimitry Andric   RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
620b57cec5SDimitry Andric   BRCOND,   // Conditional branch instruction; "b.cond".
630b57cec5SDimitry Andric   CSEL,
640b57cec5SDimitry Andric   FCSEL, // Conditional move instruction.
650b57cec5SDimitry Andric   CSINV, // Conditional select invert.
660b57cec5SDimitry Andric   CSNEG, // Conditional select negate.
670b57cec5SDimitry Andric   CSINC, // Conditional select increment.
680b57cec5SDimitry Andric 
690b57cec5SDimitry Andric   // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
700b57cec5SDimitry Andric   // ELF.
710b57cec5SDimitry Andric   THREAD_POINTER,
720b57cec5SDimitry Andric   ADC,
730b57cec5SDimitry Andric   SBC, // adc, sbc instructions
740b57cec5SDimitry Andric 
75*5ffd83dbSDimitry Andric   // Arithmetic instructions
76*5ffd83dbSDimitry Andric   ADD_PRED,
77*5ffd83dbSDimitry Andric   FADD_PRED,
78*5ffd83dbSDimitry Andric   SDIV_PRED,
79*5ffd83dbSDimitry Andric   UDIV_PRED,
80*5ffd83dbSDimitry Andric   FMA_PRED,
81*5ffd83dbSDimitry Andric   SMIN_MERGE_OP1,
82*5ffd83dbSDimitry Andric   UMIN_MERGE_OP1,
83*5ffd83dbSDimitry Andric   SMAX_MERGE_OP1,
84*5ffd83dbSDimitry Andric   UMAX_MERGE_OP1,
85*5ffd83dbSDimitry Andric   SHL_MERGE_OP1,
86*5ffd83dbSDimitry Andric   SRL_MERGE_OP1,
87*5ffd83dbSDimitry Andric   SRA_MERGE_OP1,
88*5ffd83dbSDimitry Andric 
89*5ffd83dbSDimitry Andric   SETCC_MERGE_ZERO,
90*5ffd83dbSDimitry Andric 
910b57cec5SDimitry Andric   // Arithmetic instructions which write flags.
920b57cec5SDimitry Andric   ADDS,
930b57cec5SDimitry Andric   SUBS,
940b57cec5SDimitry Andric   ADCS,
950b57cec5SDimitry Andric   SBCS,
960b57cec5SDimitry Andric   ANDS,
970b57cec5SDimitry Andric 
980b57cec5SDimitry Andric   // Conditional compares. Operands: left,right,falsecc,cc,flags
990b57cec5SDimitry Andric   CCMP,
1000b57cec5SDimitry Andric   CCMN,
1010b57cec5SDimitry Andric   FCCMP,
1020b57cec5SDimitry Andric 
1030b57cec5SDimitry Andric   // Floating point comparison
1040b57cec5SDimitry Andric   FCMP,
1050b57cec5SDimitry Andric 
1060b57cec5SDimitry Andric   // Scalar extract
1070b57cec5SDimitry Andric   EXTR,
1080b57cec5SDimitry Andric 
1090b57cec5SDimitry Andric   // Scalar-to-vector duplication
1100b57cec5SDimitry Andric   DUP,
1110b57cec5SDimitry Andric   DUPLANE8,
1120b57cec5SDimitry Andric   DUPLANE16,
1130b57cec5SDimitry Andric   DUPLANE32,
1140b57cec5SDimitry Andric   DUPLANE64,
1150b57cec5SDimitry Andric 
1160b57cec5SDimitry Andric   // Vector immedate moves
1170b57cec5SDimitry Andric   MOVI,
1180b57cec5SDimitry Andric   MOVIshift,
1190b57cec5SDimitry Andric   MOVIedit,
1200b57cec5SDimitry Andric   MOVImsl,
1210b57cec5SDimitry Andric   FMOV,
1220b57cec5SDimitry Andric   MVNIshift,
1230b57cec5SDimitry Andric   MVNImsl,
1240b57cec5SDimitry Andric 
1250b57cec5SDimitry Andric   // Vector immediate ops
1260b57cec5SDimitry Andric   BICi,
1270b57cec5SDimitry Andric   ORRi,
1280b57cec5SDimitry Andric 
129*5ffd83dbSDimitry Andric   // Vector bitwise select: similar to ISD::VSELECT but not all bits within an
1300b57cec5SDimitry Andric   // element must be identical.
131*5ffd83dbSDimitry Andric   BSP,
1320b57cec5SDimitry Andric 
1330b57cec5SDimitry Andric   // Vector arithmetic negation
1340b57cec5SDimitry Andric   NEG,
1350b57cec5SDimitry Andric 
1360b57cec5SDimitry Andric   // Vector shuffles
1370b57cec5SDimitry Andric   ZIP1,
1380b57cec5SDimitry Andric   ZIP2,
1390b57cec5SDimitry Andric   UZP1,
1400b57cec5SDimitry Andric   UZP2,
1410b57cec5SDimitry Andric   TRN1,
1420b57cec5SDimitry Andric   TRN2,
1430b57cec5SDimitry Andric   REV16,
1440b57cec5SDimitry Andric   REV32,
1450b57cec5SDimitry Andric   REV64,
1460b57cec5SDimitry Andric   EXT,
1470b57cec5SDimitry Andric 
1480b57cec5SDimitry Andric   // Vector shift by scalar
1490b57cec5SDimitry Andric   VSHL,
1500b57cec5SDimitry Andric   VLSHR,
1510b57cec5SDimitry Andric   VASHR,
1520b57cec5SDimitry Andric 
1530b57cec5SDimitry Andric   // Vector shift by scalar (again)
1540b57cec5SDimitry Andric   SQSHL_I,
1550b57cec5SDimitry Andric   UQSHL_I,
1560b57cec5SDimitry Andric   SQSHLU_I,
1570b57cec5SDimitry Andric   SRSHR_I,
1580b57cec5SDimitry Andric   URSHR_I,
1590b57cec5SDimitry Andric 
160*5ffd83dbSDimitry Andric   // Vector shift by constant and insert
161*5ffd83dbSDimitry Andric   VSLI,
162*5ffd83dbSDimitry Andric   VSRI,
163*5ffd83dbSDimitry Andric 
1640b57cec5SDimitry Andric   // Vector comparisons
1650b57cec5SDimitry Andric   CMEQ,
1660b57cec5SDimitry Andric   CMGE,
1670b57cec5SDimitry Andric   CMGT,
1680b57cec5SDimitry Andric   CMHI,
1690b57cec5SDimitry Andric   CMHS,
1700b57cec5SDimitry Andric   FCMEQ,
1710b57cec5SDimitry Andric   FCMGE,
1720b57cec5SDimitry Andric   FCMGT,
1730b57cec5SDimitry Andric 
1740b57cec5SDimitry Andric   // Vector zero comparisons
1750b57cec5SDimitry Andric   CMEQz,
1760b57cec5SDimitry Andric   CMGEz,
1770b57cec5SDimitry Andric   CMGTz,
1780b57cec5SDimitry Andric   CMLEz,
1790b57cec5SDimitry Andric   CMLTz,
1800b57cec5SDimitry Andric   FCMEQz,
1810b57cec5SDimitry Andric   FCMGEz,
1820b57cec5SDimitry Andric   FCMGTz,
1830b57cec5SDimitry Andric   FCMLEz,
1840b57cec5SDimitry Andric   FCMLTz,
1850b57cec5SDimitry Andric 
1860b57cec5SDimitry Andric   // Vector across-lanes addition
1870b57cec5SDimitry Andric   // Only the lower result lane is defined.
1880b57cec5SDimitry Andric   SADDV,
1890b57cec5SDimitry Andric   UADDV,
1900b57cec5SDimitry Andric 
191*5ffd83dbSDimitry Andric   // Vector rounding halving addition
192*5ffd83dbSDimitry Andric   SRHADD,
193*5ffd83dbSDimitry Andric   URHADD,
194*5ffd83dbSDimitry Andric 
1950b57cec5SDimitry Andric   // Vector across-lanes min/max
1960b57cec5SDimitry Andric   // Only the lower result lane is defined.
1970b57cec5SDimitry Andric   SMINV,
1980b57cec5SDimitry Andric   UMINV,
1990b57cec5SDimitry Andric   SMAXV,
2000b57cec5SDimitry Andric   UMAXV,
2010b57cec5SDimitry Andric 
202480093f4SDimitry Andric   SMAXV_PRED,
203480093f4SDimitry Andric   UMAXV_PRED,
204480093f4SDimitry Andric   SMINV_PRED,
205480093f4SDimitry Andric   UMINV_PRED,
206480093f4SDimitry Andric   ORV_PRED,
207480093f4SDimitry Andric   EORV_PRED,
208480093f4SDimitry Andric   ANDV_PRED,
209480093f4SDimitry Andric 
2100b57cec5SDimitry Andric   // Vector bitwise negation
2110b57cec5SDimitry Andric   NOT,
2120b57cec5SDimitry Andric 
213*5ffd83dbSDimitry Andric   // Vector bitwise insertion
2140b57cec5SDimitry Andric   BIT,
2150b57cec5SDimitry Andric 
2160b57cec5SDimitry Andric   // Compare-and-branch
2170b57cec5SDimitry Andric   CBZ,
2180b57cec5SDimitry Andric   CBNZ,
2190b57cec5SDimitry Andric   TBZ,
2200b57cec5SDimitry Andric   TBNZ,
2210b57cec5SDimitry Andric 
2220b57cec5SDimitry Andric   // Tail calls
2230b57cec5SDimitry Andric   TC_RETURN,
2240b57cec5SDimitry Andric 
2250b57cec5SDimitry Andric   // Custom prefetch handling
2260b57cec5SDimitry Andric   PREFETCH,
2270b57cec5SDimitry Andric 
2280b57cec5SDimitry Andric   // {s|u}int to FP within a FP register.
2290b57cec5SDimitry Andric   SITOF,
2300b57cec5SDimitry Andric   UITOF,
2310b57cec5SDimitry Andric 
2320b57cec5SDimitry Andric   /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
2330b57cec5SDimitry Andric   /// world w.r.t vectors; which causes additional REV instructions to be
2340b57cec5SDimitry Andric   /// generated to compensate for the byte-swapping. But sometimes we do
2350b57cec5SDimitry Andric   /// need to re-interpret the data in SIMD vector registers in big-endian
2360b57cec5SDimitry Andric   /// mode without emitting such REV instructions.
2370b57cec5SDimitry Andric   NVCAST,
2380b57cec5SDimitry Andric 
2390b57cec5SDimitry Andric   SMULL,
2400b57cec5SDimitry Andric   UMULL,
2410b57cec5SDimitry Andric 
2420b57cec5SDimitry Andric   // Reciprocal estimates and steps.
243*5ffd83dbSDimitry Andric   FRECPE,
244*5ffd83dbSDimitry Andric   FRECPS,
245*5ffd83dbSDimitry Andric   FRSQRTE,
246*5ffd83dbSDimitry Andric   FRSQRTS,
2470b57cec5SDimitry Andric 
2488bcb0991SDimitry Andric   SUNPKHI,
2498bcb0991SDimitry Andric   SUNPKLO,
2508bcb0991SDimitry Andric   UUNPKHI,
2518bcb0991SDimitry Andric   UUNPKLO,
2528bcb0991SDimitry Andric 
253480093f4SDimitry Andric   CLASTA_N,
254480093f4SDimitry Andric   CLASTB_N,
255480093f4SDimitry Andric   LASTA,
256480093f4SDimitry Andric   LASTB,
257480093f4SDimitry Andric   REV,
258480093f4SDimitry Andric   TBL,
259480093f4SDimitry Andric 
260*5ffd83dbSDimitry Andric   // Floating-point reductions.
261*5ffd83dbSDimitry Andric   FADDA_PRED,
262*5ffd83dbSDimitry Andric   FADDV_PRED,
263*5ffd83dbSDimitry Andric   FMAXV_PRED,
264*5ffd83dbSDimitry Andric   FMAXNMV_PRED,
265*5ffd83dbSDimitry Andric   FMINV_PRED,
266*5ffd83dbSDimitry Andric   FMINNMV_PRED,
267*5ffd83dbSDimitry Andric 
268480093f4SDimitry Andric   INSR,
269480093f4SDimitry Andric   PTEST,
270480093f4SDimitry Andric   PTRUE,
271480093f4SDimitry Andric 
272*5ffd83dbSDimitry Andric   DUP_MERGE_PASSTHRU,
273*5ffd83dbSDimitry Andric   INDEX_VECTOR,
274*5ffd83dbSDimitry Andric 
275*5ffd83dbSDimitry Andric   REINTERPRET_CAST,
276*5ffd83dbSDimitry Andric 
277*5ffd83dbSDimitry Andric   LD1_MERGE_ZERO,
278*5ffd83dbSDimitry Andric   LD1S_MERGE_ZERO,
279*5ffd83dbSDimitry Andric   LDNF1_MERGE_ZERO,
280*5ffd83dbSDimitry Andric   LDNF1S_MERGE_ZERO,
281*5ffd83dbSDimitry Andric   LDFF1_MERGE_ZERO,
282*5ffd83dbSDimitry Andric   LDFF1S_MERGE_ZERO,
283*5ffd83dbSDimitry Andric   LD1RQ_MERGE_ZERO,
284*5ffd83dbSDimitry Andric   LD1RO_MERGE_ZERO,
285*5ffd83dbSDimitry Andric 
286*5ffd83dbSDimitry Andric   // Structured loads.
287*5ffd83dbSDimitry Andric   SVE_LD2_MERGE_ZERO,
288*5ffd83dbSDimitry Andric   SVE_LD3_MERGE_ZERO,
289*5ffd83dbSDimitry Andric   SVE_LD4_MERGE_ZERO,
290*5ffd83dbSDimitry Andric 
291480093f4SDimitry Andric   // Unsigned gather loads.
292*5ffd83dbSDimitry Andric   GLD1_MERGE_ZERO,
293*5ffd83dbSDimitry Andric   GLD1_SCALED_MERGE_ZERO,
294*5ffd83dbSDimitry Andric   GLD1_UXTW_MERGE_ZERO,
295*5ffd83dbSDimitry Andric   GLD1_SXTW_MERGE_ZERO,
296*5ffd83dbSDimitry Andric   GLD1_UXTW_SCALED_MERGE_ZERO,
297*5ffd83dbSDimitry Andric   GLD1_SXTW_SCALED_MERGE_ZERO,
298*5ffd83dbSDimitry Andric   GLD1_IMM_MERGE_ZERO,
299480093f4SDimitry Andric 
300480093f4SDimitry Andric   // Signed gather loads
301*5ffd83dbSDimitry Andric   GLD1S_MERGE_ZERO,
302*5ffd83dbSDimitry Andric   GLD1S_SCALED_MERGE_ZERO,
303*5ffd83dbSDimitry Andric   GLD1S_UXTW_MERGE_ZERO,
304*5ffd83dbSDimitry Andric   GLD1S_SXTW_MERGE_ZERO,
305*5ffd83dbSDimitry Andric   GLD1S_UXTW_SCALED_MERGE_ZERO,
306*5ffd83dbSDimitry Andric   GLD1S_SXTW_SCALED_MERGE_ZERO,
307*5ffd83dbSDimitry Andric   GLD1S_IMM_MERGE_ZERO,
308*5ffd83dbSDimitry Andric 
309*5ffd83dbSDimitry Andric   // Unsigned gather loads.
310*5ffd83dbSDimitry Andric   GLDFF1_MERGE_ZERO,
311*5ffd83dbSDimitry Andric   GLDFF1_SCALED_MERGE_ZERO,
312*5ffd83dbSDimitry Andric   GLDFF1_UXTW_MERGE_ZERO,
313*5ffd83dbSDimitry Andric   GLDFF1_SXTW_MERGE_ZERO,
314*5ffd83dbSDimitry Andric   GLDFF1_UXTW_SCALED_MERGE_ZERO,
315*5ffd83dbSDimitry Andric   GLDFF1_SXTW_SCALED_MERGE_ZERO,
316*5ffd83dbSDimitry Andric   GLDFF1_IMM_MERGE_ZERO,
317*5ffd83dbSDimitry Andric 
318*5ffd83dbSDimitry Andric   // Signed gather loads.
319*5ffd83dbSDimitry Andric   GLDFF1S_MERGE_ZERO,
320*5ffd83dbSDimitry Andric   GLDFF1S_SCALED_MERGE_ZERO,
321*5ffd83dbSDimitry Andric   GLDFF1S_UXTW_MERGE_ZERO,
322*5ffd83dbSDimitry Andric   GLDFF1S_SXTW_MERGE_ZERO,
323*5ffd83dbSDimitry Andric   GLDFF1S_UXTW_SCALED_MERGE_ZERO,
324*5ffd83dbSDimitry Andric   GLDFF1S_SXTW_SCALED_MERGE_ZERO,
325*5ffd83dbSDimitry Andric   GLDFF1S_IMM_MERGE_ZERO,
326*5ffd83dbSDimitry Andric 
327*5ffd83dbSDimitry Andric   // Non-temporal gather loads
328*5ffd83dbSDimitry Andric   GLDNT1_MERGE_ZERO,
329*5ffd83dbSDimitry Andric   GLDNT1_INDEX_MERGE_ZERO,
330*5ffd83dbSDimitry Andric   GLDNT1S_MERGE_ZERO,
331*5ffd83dbSDimitry Andric 
332*5ffd83dbSDimitry Andric   // Contiguous masked store.
333*5ffd83dbSDimitry Andric   ST1_PRED,
334*5ffd83dbSDimitry Andric 
335480093f4SDimitry Andric   // Scatter store
336*5ffd83dbSDimitry Andric   SST1_PRED,
337*5ffd83dbSDimitry Andric   SST1_SCALED_PRED,
338*5ffd83dbSDimitry Andric   SST1_UXTW_PRED,
339*5ffd83dbSDimitry Andric   SST1_SXTW_PRED,
340*5ffd83dbSDimitry Andric   SST1_UXTW_SCALED_PRED,
341*5ffd83dbSDimitry Andric   SST1_SXTW_SCALED_PRED,
342*5ffd83dbSDimitry Andric   SST1_IMM_PRED,
343*5ffd83dbSDimitry Andric 
344*5ffd83dbSDimitry Andric   // Non-temporal scatter store
345*5ffd83dbSDimitry Andric   SSTNT1_PRED,
346*5ffd83dbSDimitry Andric   SSTNT1_INDEX_PRED,
347480093f4SDimitry Andric 
34847395794SDimitry Andric   // Strict (exception-raising) floating point comparison
34947395794SDimitry Andric   STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
35047395794SDimitry Andric   STRICT_FCMPE,
35147395794SDimitry Andric 
3520b57cec5SDimitry Andric   // NEON Load/Store with post-increment base updates
3530b57cec5SDimitry Andric   LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
3540b57cec5SDimitry Andric   LD3post,
3550b57cec5SDimitry Andric   LD4post,
3560b57cec5SDimitry Andric   ST2post,
3570b57cec5SDimitry Andric   ST3post,
3580b57cec5SDimitry Andric   ST4post,
3590b57cec5SDimitry Andric   LD1x2post,
3600b57cec5SDimitry Andric   LD1x3post,
3610b57cec5SDimitry Andric   LD1x4post,
3620b57cec5SDimitry Andric   ST1x2post,
3630b57cec5SDimitry Andric   ST1x3post,
3640b57cec5SDimitry Andric   ST1x4post,
3650b57cec5SDimitry Andric   LD1DUPpost,
3660b57cec5SDimitry Andric   LD2DUPpost,
3670b57cec5SDimitry Andric   LD3DUPpost,
3680b57cec5SDimitry Andric   LD4DUPpost,
3690b57cec5SDimitry Andric   LD1LANEpost,
3700b57cec5SDimitry Andric   LD2LANEpost,
3710b57cec5SDimitry Andric   LD3LANEpost,
3720b57cec5SDimitry Andric   LD4LANEpost,
3730b57cec5SDimitry Andric   ST2LANEpost,
3740b57cec5SDimitry Andric   ST3LANEpost,
3750b57cec5SDimitry Andric   ST4LANEpost,
3760b57cec5SDimitry Andric 
3770b57cec5SDimitry Andric   STG,
3780b57cec5SDimitry Andric   STZG,
3790b57cec5SDimitry Andric   ST2G,
380480093f4SDimitry Andric   STZ2G,
3810b57cec5SDimitry Andric 
382480093f4SDimitry Andric   LDP,
383*5ffd83dbSDimitry Andric   STP,
384*5ffd83dbSDimitry Andric   STNP
3850b57cec5SDimitry Andric };
3860b57cec5SDimitry Andric 
3870b57cec5SDimitry Andric } // end namespace AArch64ISD
3880b57cec5SDimitry Andric 
3890b57cec5SDimitry Andric namespace {
3900b57cec5SDimitry Andric 
3910b57cec5SDimitry Andric // Any instruction that defines a 32-bit result zeros out the high half of the
3920b57cec5SDimitry Andric // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
3930b57cec5SDimitry Andric // be copying from a truncate. But any other 32-bit operation will zero-extend
3940b57cec5SDimitry Andric // up to 64 bits.
3950b57cec5SDimitry Andric // FIXME: X86 also checks for CMOV here. Do we need something similar?
3960b57cec5SDimitry Andric static inline bool isDef32(const SDNode &N) {
3970b57cec5SDimitry Andric   unsigned Opc = N.getOpcode();
3980b57cec5SDimitry Andric   return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
3990b57cec5SDimitry Andric          Opc != ISD::CopyFromReg;
4000b57cec5SDimitry Andric }
4010b57cec5SDimitry Andric 
4020b57cec5SDimitry Andric } // end anonymous namespace
4030b57cec5SDimitry Andric 
4040b57cec5SDimitry Andric class AArch64Subtarget;
4050b57cec5SDimitry Andric class AArch64TargetMachine;
4060b57cec5SDimitry Andric 
4070b57cec5SDimitry Andric class AArch64TargetLowering : public TargetLowering {
4080b57cec5SDimitry Andric public:
4090b57cec5SDimitry Andric   explicit AArch64TargetLowering(const TargetMachine &TM,
4100b57cec5SDimitry Andric                                  const AArch64Subtarget &STI);
4110b57cec5SDimitry Andric 
4120b57cec5SDimitry Andric   /// Selects the correct CCAssignFn for a given CallingConvention value.
4130b57cec5SDimitry Andric   CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
4140b57cec5SDimitry Andric 
4150b57cec5SDimitry Andric   /// Selects the correct CCAssignFn for a given CallingConvention value.
4160b57cec5SDimitry Andric   CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
4170b57cec5SDimitry Andric 
4180b57cec5SDimitry Andric   /// Determine which of the bits specified in Mask are known to be either zero
4190b57cec5SDimitry Andric   /// or one and return them in the KnownZero/KnownOne bitsets.
4200b57cec5SDimitry Andric   void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
4210b57cec5SDimitry Andric                                      const APInt &DemandedElts,
4220b57cec5SDimitry Andric                                      const SelectionDAG &DAG,
4230b57cec5SDimitry Andric                                      unsigned Depth = 0) const override;
4240b57cec5SDimitry Andric 
4258bcb0991SDimitry Andric   MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
4268bcb0991SDimitry Andric     // Returning i64 unconditionally here (i.e. even for ILP32) means that the
4278bcb0991SDimitry Andric     // *DAG* representation of pointers will always be 64-bits. They will be
4288bcb0991SDimitry Andric     // truncated and extended when transferred to memory, but the 64-bit DAG
4298bcb0991SDimitry Andric     // allows us to use AArch64's addressing modes much more easily.
4308bcb0991SDimitry Andric     return MVT::getIntegerVT(64);
4318bcb0991SDimitry Andric   }
4328bcb0991SDimitry Andric 
433*5ffd83dbSDimitry Andric   bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
434*5ffd83dbSDimitry Andric                                     const APInt &DemandedElts,
4350b57cec5SDimitry Andric                                     TargetLoweringOpt &TLO) const override;
4360b57cec5SDimitry Andric 
4370b57cec5SDimitry Andric   MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
4380b57cec5SDimitry Andric 
4390b57cec5SDimitry Andric   /// Returns true if the target allows unaligned memory accesses of the
4400b57cec5SDimitry Andric   /// specified type.
4410b57cec5SDimitry Andric   bool allowsMisalignedMemoryAccesses(
4420b57cec5SDimitry Andric       EVT VT, unsigned AddrSpace = 0, unsigned Align = 1,
4430b57cec5SDimitry Andric       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
4440b57cec5SDimitry Andric       bool *Fast = nullptr) const override;
4458bcb0991SDimitry Andric   /// LLT variant.
446*5ffd83dbSDimitry Andric   bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace,
447*5ffd83dbSDimitry Andric                                       Align Alignment,
448*5ffd83dbSDimitry Andric                                       MachineMemOperand::Flags Flags,
4498bcb0991SDimitry Andric                                       bool *Fast = nullptr) const override;
4500b57cec5SDimitry Andric 
4510b57cec5SDimitry Andric   /// Provide custom lowering hooks for some operations.
4520b57cec5SDimitry Andric   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
4530b57cec5SDimitry Andric 
4540b57cec5SDimitry Andric   const char *getTargetNodeName(unsigned Opcode) const override;
4550b57cec5SDimitry Andric 
4560b57cec5SDimitry Andric   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
4570b57cec5SDimitry Andric 
4580b57cec5SDimitry Andric   /// Returns true if a cast between SrcAS and DestAS is a noop.
4590b57cec5SDimitry Andric   bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
4600b57cec5SDimitry Andric     // Addrspacecasts are always noops.
4610b57cec5SDimitry Andric     return true;
4620b57cec5SDimitry Andric   }
4630b57cec5SDimitry Andric 
4640b57cec5SDimitry Andric   /// This method returns a target specific FastISel object, or null if the
4650b57cec5SDimitry Andric   /// target does not support "fast" ISel.
4660b57cec5SDimitry Andric   FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
4670b57cec5SDimitry Andric                            const TargetLibraryInfo *libInfo) const override;
4680b57cec5SDimitry Andric 
4690b57cec5SDimitry Andric   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
4700b57cec5SDimitry Andric 
4710b57cec5SDimitry Andric   bool isFPImmLegal(const APFloat &Imm, EVT VT,
4720b57cec5SDimitry Andric                     bool ForCodeSize) const override;
4730b57cec5SDimitry Andric 
4740b57cec5SDimitry Andric   /// Return true if the given shuffle mask can be codegen'd directly, or if it
4750b57cec5SDimitry Andric   /// should be stack expanded.
4760b57cec5SDimitry Andric   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
4770b57cec5SDimitry Andric 
4780b57cec5SDimitry Andric   /// Return the ISD::SETCC ValueType.
4790b57cec5SDimitry Andric   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
4800b57cec5SDimitry Andric                          EVT VT) const override;
4810b57cec5SDimitry Andric 
4820b57cec5SDimitry Andric   SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
4830b57cec5SDimitry Andric 
4840b57cec5SDimitry Andric   MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
4850b57cec5SDimitry Andric                                   MachineBasicBlock *BB) const;
4860b57cec5SDimitry Andric 
4870b57cec5SDimitry Andric   MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
4880b57cec5SDimitry Andric                                            MachineBasicBlock *BB) const;
4890b57cec5SDimitry Andric 
4900b57cec5SDimitry Andric   MachineBasicBlock *
4910b57cec5SDimitry Andric   EmitInstrWithCustomInserter(MachineInstr &MI,
4920b57cec5SDimitry Andric                               MachineBasicBlock *MBB) const override;
4930b57cec5SDimitry Andric 
4940b57cec5SDimitry Andric   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
4950b57cec5SDimitry Andric                           MachineFunction &MF,
4960b57cec5SDimitry Andric                           unsigned Intrinsic) const override;
4970b57cec5SDimitry Andric 
4980b57cec5SDimitry Andric   bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
4990b57cec5SDimitry Andric                              EVT NewVT) const override;
5000b57cec5SDimitry Andric 
5010b57cec5SDimitry Andric   bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
5020b57cec5SDimitry Andric   bool isTruncateFree(EVT VT1, EVT VT2) const override;
5030b57cec5SDimitry Andric 
5040b57cec5SDimitry Andric   bool isProfitableToHoist(Instruction *I) const override;
5050b57cec5SDimitry Andric 
5060b57cec5SDimitry Andric   bool isZExtFree(Type *Ty1, Type *Ty2) const override;
5070b57cec5SDimitry Andric   bool isZExtFree(EVT VT1, EVT VT2) const override;
5080b57cec5SDimitry Andric   bool isZExtFree(SDValue Val, EVT VT2) const override;
5090b57cec5SDimitry Andric 
5100b57cec5SDimitry Andric   bool shouldSinkOperands(Instruction *I,
5110b57cec5SDimitry Andric                           SmallVectorImpl<Use *> &Ops) const override;
5120b57cec5SDimitry Andric 
513*5ffd83dbSDimitry Andric   bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override;
5140b57cec5SDimitry Andric 
5150b57cec5SDimitry Andric   unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
5160b57cec5SDimitry Andric 
5170b57cec5SDimitry Andric   bool lowerInterleavedLoad(LoadInst *LI,
5180b57cec5SDimitry Andric                             ArrayRef<ShuffleVectorInst *> Shuffles,
5190b57cec5SDimitry Andric                             ArrayRef<unsigned> Indices,
5200b57cec5SDimitry Andric                             unsigned Factor) const override;
5210b57cec5SDimitry Andric   bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
5220b57cec5SDimitry Andric                              unsigned Factor) const override;
5230b57cec5SDimitry Andric 
5240b57cec5SDimitry Andric   bool isLegalAddImmediate(int64_t) const override;
5250b57cec5SDimitry Andric   bool isLegalICmpImmediate(int64_t) const override;
5260b57cec5SDimitry Andric 
5270b57cec5SDimitry Andric   bool shouldConsiderGEPOffsetSplit() const override;
5280b57cec5SDimitry Andric 
529*5ffd83dbSDimitry Andric   EVT getOptimalMemOpType(const MemOp &Op,
5300b57cec5SDimitry Andric                           const AttributeList &FuncAttributes) const override;
5310b57cec5SDimitry Andric 
532*5ffd83dbSDimitry Andric   LLT getOptimalMemOpLLT(const MemOp &Op,
5338bcb0991SDimitry Andric                          const AttributeList &FuncAttributes) const override;
5348bcb0991SDimitry Andric 
5350b57cec5SDimitry Andric   /// Return true if the addressing mode represented by AM is legal for this
5360b57cec5SDimitry Andric   /// target, for a load/store of the specified type.
5370b57cec5SDimitry Andric   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
5380b57cec5SDimitry Andric                              unsigned AS,
5390b57cec5SDimitry Andric                              Instruction *I = nullptr) const override;
5400b57cec5SDimitry Andric 
5410b57cec5SDimitry Andric   /// Return the cost of the scaling factor used in the addressing
5420b57cec5SDimitry Andric   /// mode represented by AM for this target, for a load/store
5430b57cec5SDimitry Andric   /// of the specified type.
5440b57cec5SDimitry Andric   /// If the AM is supported, the return value must be >= 0.
5450b57cec5SDimitry Andric   /// If the AM is not supported, it returns a negative value.
5460b57cec5SDimitry Andric   int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
5470b57cec5SDimitry Andric                            unsigned AS) const override;
5480b57cec5SDimitry Andric 
5490b57cec5SDimitry Andric   /// Return true if an FMA operation is faster than a pair of fmul and fadd
5500b57cec5SDimitry Andric   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
5510b57cec5SDimitry Andric   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
552480093f4SDimitry Andric   bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
553480093f4SDimitry Andric                                   EVT VT) const override;
554480093f4SDimitry Andric   bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
5550b57cec5SDimitry Andric 
5560b57cec5SDimitry Andric   const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
5570b57cec5SDimitry Andric 
5580b57cec5SDimitry Andric   /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
5590b57cec5SDimitry Andric   bool isDesirableToCommuteWithShift(const SDNode *N,
5600b57cec5SDimitry Andric                                      CombineLevel Level) const override;
5610b57cec5SDimitry Andric 
5620b57cec5SDimitry Andric   /// Returns true if it is beneficial to convert a load of a constant
5630b57cec5SDimitry Andric   /// to just the constant itself.
5640b57cec5SDimitry Andric   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
5650b57cec5SDimitry Andric                                          Type *Ty) const override;
5660b57cec5SDimitry Andric 
5670b57cec5SDimitry Andric   /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
5680b57cec5SDimitry Andric   /// with this index.
5690b57cec5SDimitry Andric   bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
5700b57cec5SDimitry Andric                                unsigned Index) const override;
5710b57cec5SDimitry Andric 
572*5ffd83dbSDimitry Andric   bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
573*5ffd83dbSDimitry Andric                             bool MathUsed) const override {
574*5ffd83dbSDimitry Andric     // Using overflow ops for overflow checks only should beneficial on
575*5ffd83dbSDimitry Andric     // AArch64.
576*5ffd83dbSDimitry Andric     return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
577*5ffd83dbSDimitry Andric   }
578*5ffd83dbSDimitry Andric 
5790b57cec5SDimitry Andric   Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
5800b57cec5SDimitry Andric                         AtomicOrdering Ord) const override;
5810b57cec5SDimitry Andric   Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
5820b57cec5SDimitry Andric                               Value *Addr, AtomicOrdering Ord) const override;
5830b57cec5SDimitry Andric 
5840b57cec5SDimitry Andric   void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
5850b57cec5SDimitry Andric 
5860b57cec5SDimitry Andric   TargetLoweringBase::AtomicExpansionKind
5870b57cec5SDimitry Andric   shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
5880b57cec5SDimitry Andric   bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
5890b57cec5SDimitry Andric   TargetLoweringBase::AtomicExpansionKind
5900b57cec5SDimitry Andric   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
5910b57cec5SDimitry Andric 
5920b57cec5SDimitry Andric   TargetLoweringBase::AtomicExpansionKind
5930b57cec5SDimitry Andric   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
5940b57cec5SDimitry Andric 
5950b57cec5SDimitry Andric   bool useLoadStackGuardNode() const override;
5960b57cec5SDimitry Andric   TargetLoweringBase::LegalizeTypeAction
5970b57cec5SDimitry Andric   getPreferredVectorAction(MVT VT) const override;
5980b57cec5SDimitry Andric 
5990b57cec5SDimitry Andric   /// If the target has a standard location for the stack protector cookie,
6000b57cec5SDimitry Andric   /// returns the address of that location. Otherwise, returns nullptr.
6010b57cec5SDimitry Andric   Value *getIRStackGuard(IRBuilder<> &IRB) const override;
6020b57cec5SDimitry Andric 
6030b57cec5SDimitry Andric   void insertSSPDeclarations(Module &M) const override;
6040b57cec5SDimitry Andric   Value *getSDagStackGuard(const Module &M) const override;
6050b57cec5SDimitry Andric   Function *getSSPStackGuardCheck(const Module &M) const override;
6060b57cec5SDimitry Andric 
6070b57cec5SDimitry Andric   /// If the target has a standard location for the unsafe stack pointer,
6080b57cec5SDimitry Andric   /// returns the address of that location. Otherwise, returns nullptr.
6090b57cec5SDimitry Andric   Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
6100b57cec5SDimitry Andric 
6110b57cec5SDimitry Andric   /// If a physical register, this returns the register that receives the
6120b57cec5SDimitry Andric   /// exception address on entry to an EH pad.
613*5ffd83dbSDimitry Andric   Register
6140b57cec5SDimitry Andric   getExceptionPointerRegister(const Constant *PersonalityFn) const override {
6150b57cec5SDimitry Andric     // FIXME: This is a guess. Has this been defined yet?
6160b57cec5SDimitry Andric     return AArch64::X0;
6170b57cec5SDimitry Andric   }
6180b57cec5SDimitry Andric 
6190b57cec5SDimitry Andric   /// If a physical register, this returns the register that receives the
6200b57cec5SDimitry Andric   /// exception typeid on entry to a landing pad.
621*5ffd83dbSDimitry Andric   Register
6220b57cec5SDimitry Andric   getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
6230b57cec5SDimitry Andric     // FIXME: This is a guess. Has this been defined yet?
6240b57cec5SDimitry Andric     return AArch64::X1;
6250b57cec5SDimitry Andric   }
6260b57cec5SDimitry Andric 
6270b57cec5SDimitry Andric   bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
6280b57cec5SDimitry Andric 
6290b57cec5SDimitry Andric   bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
6300b57cec5SDimitry Andric                         const SelectionDAG &DAG) const override {
6310b57cec5SDimitry Andric     // Do not merge to float value size (128 bytes) if no implicit
6320b57cec5SDimitry Andric     // float attribute is set.
6330b57cec5SDimitry Andric 
6340b57cec5SDimitry Andric     bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
6350b57cec5SDimitry Andric         Attribute::NoImplicitFloat);
6360b57cec5SDimitry Andric 
6370b57cec5SDimitry Andric     if (NoFloat)
6380b57cec5SDimitry Andric       return (MemVT.getSizeInBits() <= 64);
6390b57cec5SDimitry Andric     return true;
6400b57cec5SDimitry Andric   }
6410b57cec5SDimitry Andric 
6420b57cec5SDimitry Andric   bool isCheapToSpeculateCttz() const override {
6430b57cec5SDimitry Andric     return true;
6440b57cec5SDimitry Andric   }
6450b57cec5SDimitry Andric 
6460b57cec5SDimitry Andric   bool isCheapToSpeculateCtlz() const override {
6470b57cec5SDimitry Andric     return true;
6480b57cec5SDimitry Andric   }
6490b57cec5SDimitry Andric 
6500b57cec5SDimitry Andric   bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
6510b57cec5SDimitry Andric 
6520b57cec5SDimitry Andric   bool hasAndNotCompare(SDValue V) const override {
6530b57cec5SDimitry Andric     // We can use bics for any scalar.
6540b57cec5SDimitry Andric     return V.getValueType().isScalarInteger();
6550b57cec5SDimitry Andric   }
6560b57cec5SDimitry Andric 
6570b57cec5SDimitry Andric   bool hasAndNot(SDValue Y) const override {
6580b57cec5SDimitry Andric     EVT VT = Y.getValueType();
6590b57cec5SDimitry Andric 
6600b57cec5SDimitry Andric     if (!VT.isVector())
6610b57cec5SDimitry Andric       return hasAndNotCompare(Y);
6620b57cec5SDimitry Andric 
6630b57cec5SDimitry Andric     return VT.getSizeInBits() >= 64; // vector 'bic'
6640b57cec5SDimitry Andric   }
6650b57cec5SDimitry Andric 
6668bcb0991SDimitry Andric   bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
6678bcb0991SDimitry Andric       SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
6688bcb0991SDimitry Andric       unsigned OldShiftOpcode, unsigned NewShiftOpcode,
6698bcb0991SDimitry Andric       SelectionDAG &DAG) const override;
6708bcb0991SDimitry Andric 
6710b57cec5SDimitry Andric   bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
6720b57cec5SDimitry Andric 
6730b57cec5SDimitry Andric   bool shouldTransformSignedTruncationCheck(EVT XVT,
6740b57cec5SDimitry Andric                                             unsigned KeptBits) const override {
6750b57cec5SDimitry Andric     // For vectors, we don't have a preference..
6760b57cec5SDimitry Andric     if (XVT.isVector())
6770b57cec5SDimitry Andric       return false;
6780b57cec5SDimitry Andric 
6790b57cec5SDimitry Andric     auto VTIsOk = [](EVT VT) -> bool {
6800b57cec5SDimitry Andric       return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
6810b57cec5SDimitry Andric              VT == MVT::i64;
6820b57cec5SDimitry Andric     };
6830b57cec5SDimitry Andric 
6840b57cec5SDimitry Andric     // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
6850b57cec5SDimitry Andric     // XVT will be larger than KeptBitsVT.
6860b57cec5SDimitry Andric     MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
6870b57cec5SDimitry Andric     return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
6880b57cec5SDimitry Andric   }
6890b57cec5SDimitry Andric 
6900b57cec5SDimitry Andric   bool preferIncOfAddToSubOfNot(EVT VT) const override;
6910b57cec5SDimitry Andric 
6920b57cec5SDimitry Andric   bool hasBitPreservingFPLogic(EVT VT) const override {
6930b57cec5SDimitry Andric     // FIXME: Is this always true? It should be true for vectors at least.
6940b57cec5SDimitry Andric     return VT == MVT::f32 || VT == MVT::f64;
6950b57cec5SDimitry Andric   }
6960b57cec5SDimitry Andric 
6970b57cec5SDimitry Andric   bool supportSplitCSR(MachineFunction *MF) const override {
6980b57cec5SDimitry Andric     return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
6990b57cec5SDimitry Andric            MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
7000b57cec5SDimitry Andric   }
7010b57cec5SDimitry Andric   void initializeSplitCSR(MachineBasicBlock *Entry) const override;
7020b57cec5SDimitry Andric   void insertCopiesSplitCSR(
7030b57cec5SDimitry Andric       MachineBasicBlock *Entry,
7040b57cec5SDimitry Andric       const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
7050b57cec5SDimitry Andric 
7060b57cec5SDimitry Andric   bool supportSwiftError() const override {
7070b57cec5SDimitry Andric     return true;
7080b57cec5SDimitry Andric   }
7090b57cec5SDimitry Andric 
7100b57cec5SDimitry Andric   /// Enable aggressive FMA fusion on targets that want it.
7110b57cec5SDimitry Andric   bool enableAggressiveFMAFusion(EVT VT) const override;
7120b57cec5SDimitry Andric 
7130b57cec5SDimitry Andric   /// Returns the size of the platform's va_list object.
7140b57cec5SDimitry Andric   unsigned getVaListSizeInBits(const DataLayout &DL) const override;
7150b57cec5SDimitry Andric 
7160b57cec5SDimitry Andric   /// Returns true if \p VecTy is a legal interleaved access type. This
7170b57cec5SDimitry Andric   /// function checks the vector element type and the overall width of the
7180b57cec5SDimitry Andric   /// vector.
7190b57cec5SDimitry Andric   bool isLegalInterleavedAccessType(VectorType *VecTy,
7200b57cec5SDimitry Andric                                     const DataLayout &DL) const;
7210b57cec5SDimitry Andric 
7220b57cec5SDimitry Andric   /// Returns the number of interleaved accesses that will be generated when
7230b57cec5SDimitry Andric   /// lowering accesses of the given type.
7240b57cec5SDimitry Andric   unsigned getNumInterleavedAccesses(VectorType *VecTy,
7250b57cec5SDimitry Andric                                      const DataLayout &DL) const;
7260b57cec5SDimitry Andric 
727*5ffd83dbSDimitry Andric   MachineMemOperand::Flags getTargetMMOFlags(
728*5ffd83dbSDimitry Andric     const Instruction &I) const override;
7290b57cec5SDimitry Andric 
7300b57cec5SDimitry Andric   bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
7310b57cec5SDimitry Andric                                                  CallingConv::ID CallConv,
7320b57cec5SDimitry Andric                                                  bool isVarArg) const override;
7330b57cec5SDimitry Andric   /// Used for exception handling on Win64.
7340b57cec5SDimitry Andric   bool needsFixedCatchObjects() const override;
735*5ffd83dbSDimitry Andric 
736*5ffd83dbSDimitry Andric   bool fallBackToDAGISel(const Instruction &Inst) const override;
737*5ffd83dbSDimitry Andric 
738*5ffd83dbSDimitry Andric   /// SVE code generation for fixed length vectors does not custom lower
739*5ffd83dbSDimitry Andric   /// BUILD_VECTOR. This makes BUILD_VECTOR legalisation a source of stores to
740*5ffd83dbSDimitry Andric   /// merge. However, merging them creates a BUILD_VECTOR that is just as
741*5ffd83dbSDimitry Andric   /// illegal as the original, thus leading to an infinite legalisation loop.
742*5ffd83dbSDimitry Andric   /// NOTE: Once BUILD_VECTOR is legal or can be custom lowered for all legal
743*5ffd83dbSDimitry Andric   /// vector types this override can be removed.
744*5ffd83dbSDimitry Andric   bool mergeStoresAfterLegalization(EVT VT) const override {
745*5ffd83dbSDimitry Andric     return !useSVEForFixedLengthVectors();
746*5ffd83dbSDimitry Andric   }
747*5ffd83dbSDimitry Andric 
7480b57cec5SDimitry Andric private:
7490b57cec5SDimitry Andric   /// Keep a pointer to the AArch64Subtarget around so that we can
7500b57cec5SDimitry Andric   /// make the right decision when generating code for different targets.
7510b57cec5SDimitry Andric   const AArch64Subtarget *Subtarget;
7520b57cec5SDimitry Andric 
7530b57cec5SDimitry Andric   bool isExtFreeImpl(const Instruction *Ext) const override;
7540b57cec5SDimitry Andric 
7550b57cec5SDimitry Andric   void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
756*5ffd83dbSDimitry Andric   void addTypeForFixedLengthSVE(MVT VT);
7570b57cec5SDimitry Andric   void addDRTypeForNEON(MVT VT);
7580b57cec5SDimitry Andric   void addQRTypeForNEON(MVT VT);
7590b57cec5SDimitry Andric 
7600b57cec5SDimitry Andric   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
7610b57cec5SDimitry Andric                                bool isVarArg,
7620b57cec5SDimitry Andric                                const SmallVectorImpl<ISD::InputArg> &Ins,
7630b57cec5SDimitry Andric                                const SDLoc &DL, SelectionDAG &DAG,
7640b57cec5SDimitry Andric                                SmallVectorImpl<SDValue> &InVals) const override;
7650b57cec5SDimitry Andric 
7660b57cec5SDimitry Andric   SDValue LowerCall(CallLoweringInfo & /*CLI*/,
7670b57cec5SDimitry Andric                     SmallVectorImpl<SDValue> &InVals) const override;
7680b57cec5SDimitry Andric 
7690b57cec5SDimitry Andric   SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
7700b57cec5SDimitry Andric                           CallingConv::ID CallConv, bool isVarArg,
7710b57cec5SDimitry Andric                           const SmallVectorImpl<ISD::InputArg> &Ins,
7720b57cec5SDimitry Andric                           const SDLoc &DL, SelectionDAG &DAG,
7730b57cec5SDimitry Andric                           SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
7740b57cec5SDimitry Andric                           SDValue ThisVal) const;
7750b57cec5SDimitry Andric 
7760b57cec5SDimitry Andric   SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
7770b57cec5SDimitry Andric 
7780b57cec5SDimitry Andric   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
7790b57cec5SDimitry Andric 
7800b57cec5SDimitry Andric   bool isEligibleForTailCallOptimization(
7810b57cec5SDimitry Andric       SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
7820b57cec5SDimitry Andric       const SmallVectorImpl<ISD::OutputArg> &Outs,
7830b57cec5SDimitry Andric       const SmallVectorImpl<SDValue> &OutVals,
7840b57cec5SDimitry Andric       const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
7850b57cec5SDimitry Andric 
7860b57cec5SDimitry Andric   /// Finds the incoming stack arguments which overlap the given fixed stack
7870b57cec5SDimitry Andric   /// object and incorporates their load into the current chain. This prevents
7880b57cec5SDimitry Andric   /// an upcoming store from clobbering the stack argument before it's used.
7890b57cec5SDimitry Andric   SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
7900b57cec5SDimitry Andric                               MachineFrameInfo &MFI, int ClobberedFI) const;
7910b57cec5SDimitry Andric 
7920b57cec5SDimitry Andric   bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
7930b57cec5SDimitry Andric 
7940b57cec5SDimitry Andric   void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
7950b57cec5SDimitry Andric                            SDValue &Chain) const;
7960b57cec5SDimitry Andric 
7970b57cec5SDimitry Andric   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
7980b57cec5SDimitry Andric                       bool isVarArg,
7990b57cec5SDimitry Andric                       const SmallVectorImpl<ISD::OutputArg> &Outs,
8000b57cec5SDimitry Andric                       LLVMContext &Context) const override;
8010b57cec5SDimitry Andric 
8020b57cec5SDimitry Andric   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
8030b57cec5SDimitry Andric                       const SmallVectorImpl<ISD::OutputArg> &Outs,
8040b57cec5SDimitry Andric                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
8050b57cec5SDimitry Andric                       SelectionDAG &DAG) const override;
8060b57cec5SDimitry Andric 
8070b57cec5SDimitry Andric   SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
8080b57cec5SDimitry Andric                         unsigned Flag) const;
8090b57cec5SDimitry Andric   SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
8100b57cec5SDimitry Andric                         unsigned Flag) const;
8110b57cec5SDimitry Andric   SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
8120b57cec5SDimitry Andric                         unsigned Flag) const;
8130b57cec5SDimitry Andric   SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
8140b57cec5SDimitry Andric                         unsigned Flag) const;
8150b57cec5SDimitry Andric   template <class NodeTy>
8160b57cec5SDimitry Andric   SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
8170b57cec5SDimitry Andric   template <class NodeTy>
8180b57cec5SDimitry Andric   SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
8190b57cec5SDimitry Andric   template <class NodeTy>
8200b57cec5SDimitry Andric   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
8210b57cec5SDimitry Andric   template <class NodeTy>
8220b57cec5SDimitry Andric   SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
8230b57cec5SDimitry Andric   SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
8240b57cec5SDimitry Andric   SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
8250b57cec5SDimitry Andric   SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
8260b57cec5SDimitry Andric   SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
8270b57cec5SDimitry Andric   SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
828480093f4SDimitry Andric   SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase,
829480093f4SDimitry Andric                                const SDLoc &DL, SelectionDAG &DAG) const;
8300b57cec5SDimitry Andric   SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
8310b57cec5SDimitry Andric                                  SelectionDAG &DAG) const;
8320b57cec5SDimitry Andric   SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
8330b57cec5SDimitry Andric   SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
8340b57cec5SDimitry Andric   SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
8350b57cec5SDimitry Andric   SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
8360b57cec5SDimitry Andric   SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
8370b57cec5SDimitry Andric   SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
8380b57cec5SDimitry Andric                          SDValue TVal, SDValue FVal, const SDLoc &dl,
8390b57cec5SDimitry Andric                          SelectionDAG &DAG) const;
8400b57cec5SDimitry Andric   SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
8410b57cec5SDimitry Andric   SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
8420b57cec5SDimitry Andric   SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
8430b57cec5SDimitry Andric   SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
8440b57cec5SDimitry Andric   SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
8450b57cec5SDimitry Andric   SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
8460b57cec5SDimitry Andric   SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
8470b57cec5SDimitry Andric   SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
8480b57cec5SDimitry Andric   SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
8490b57cec5SDimitry Andric   SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
8500b57cec5SDimitry Andric   SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
8510b57cec5SDimitry Andric   SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
8520b57cec5SDimitry Andric   SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
8530b57cec5SDimitry Andric   SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
8540b57cec5SDimitry Andric   SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
8550b57cec5SDimitry Andric   SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
8560b57cec5SDimitry Andric   SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
8570b57cec5SDimitry Andric   SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
8580b57cec5SDimitry Andric   SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
8598bcb0991SDimitry Andric   SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
860*5ffd83dbSDimitry Andric   SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const;
861*5ffd83dbSDimitry Andric   SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG,
862*5ffd83dbSDimitry Andric                               unsigned NewOp) const;
8630b57cec5SDimitry Andric   SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
864*5ffd83dbSDimitry Andric   SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
8650b57cec5SDimitry Andric   SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
8660b57cec5SDimitry Andric   SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
8670b57cec5SDimitry Andric   SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
8680b57cec5SDimitry Andric   SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
8690b57cec5SDimitry Andric   SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
8700b57cec5SDimitry Andric   SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
8710b57cec5SDimitry Andric                         RTLIB::Libcall Call) const;
8720b57cec5SDimitry Andric   SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
8730b57cec5SDimitry Andric   SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
8740b57cec5SDimitry Andric   SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
8750b57cec5SDimitry Andric   SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
8760b57cec5SDimitry Andric   SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
8770b57cec5SDimitry Andric   SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
8780b57cec5SDimitry Andric   SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
8790b57cec5SDimitry Andric   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
8800b57cec5SDimitry Andric   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
881*5ffd83dbSDimitry Andric   SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
882*5ffd83dbSDimitry Andric   SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
8830b57cec5SDimitry Andric   SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
8840b57cec5SDimitry Andric   SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
8850b57cec5SDimitry Andric   SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
8860b57cec5SDimitry Andric   SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
8870b57cec5SDimitry Andric   SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
8880b57cec5SDimitry Andric                                          SDValue &Size,
8890b57cec5SDimitry Andric                                          SelectionDAG &DAG) const;
890*5ffd83dbSDimitry Andric   SDValue LowerSVEStructLoad(unsigned Intrinsic, ArrayRef<SDValue> LoadOps,
891*5ffd83dbSDimitry Andric                              EVT VT, SelectionDAG &DAG, const SDLoc &DL) const;
892*5ffd83dbSDimitry Andric 
893*5ffd83dbSDimitry Andric   SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
894*5ffd83dbSDimitry Andric   SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const;
895*5ffd83dbSDimitry Andric   SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op,
896*5ffd83dbSDimitry Andric                                               SelectionDAG &DAG) const;
8970b57cec5SDimitry Andric 
8980b57cec5SDimitry Andric   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
8990b57cec5SDimitry Andric                         SmallVectorImpl<SDNode *> &Created) const override;
9000b57cec5SDimitry Andric   SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
9010b57cec5SDimitry Andric                           int &ExtraSteps, bool &UseOneConst,
9020b57cec5SDimitry Andric                           bool Reciprocal) const override;
9030b57cec5SDimitry Andric   SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
9040b57cec5SDimitry Andric                            int &ExtraSteps) const override;
9050b57cec5SDimitry Andric   unsigned combineRepeatedFPDivisors() const override;
9060b57cec5SDimitry Andric 
9070b57cec5SDimitry Andric   ConstraintType getConstraintType(StringRef Constraint) const override;
908480093f4SDimitry Andric   Register getRegisterByName(const char* RegName, LLT VT,
9098bcb0991SDimitry Andric                              const MachineFunction &MF) const override;
9100b57cec5SDimitry Andric 
9110b57cec5SDimitry Andric   /// Examine constraint string and operand type and determine a weight value.
9120b57cec5SDimitry Andric   /// The operand object must already have been set up with the operand type.
9130b57cec5SDimitry Andric   ConstraintWeight
9140b57cec5SDimitry Andric   getSingleConstraintMatchWeight(AsmOperandInfo &info,
9150b57cec5SDimitry Andric                                  const char *constraint) const override;
9160b57cec5SDimitry Andric 
9170b57cec5SDimitry Andric   std::pair<unsigned, const TargetRegisterClass *>
9180b57cec5SDimitry Andric   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
9190b57cec5SDimitry Andric                                StringRef Constraint, MVT VT) const override;
9200b57cec5SDimitry Andric 
9210b57cec5SDimitry Andric   const char *LowerXConstraint(EVT ConstraintVT) const override;
9220b57cec5SDimitry Andric 
9230b57cec5SDimitry Andric   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
9240b57cec5SDimitry Andric                                     std::vector<SDValue> &Ops,
9250b57cec5SDimitry Andric                                     SelectionDAG &DAG) const override;
9260b57cec5SDimitry Andric 
9270b57cec5SDimitry Andric   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
9280b57cec5SDimitry Andric     if (ConstraintCode == "Q")
9290b57cec5SDimitry Andric       return InlineAsm::Constraint_Q;
9300b57cec5SDimitry Andric     // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
9310b57cec5SDimitry Andric     //        followed by llvm_unreachable so we'll leave them unimplemented in
9320b57cec5SDimitry Andric     //        the backend for now.
9330b57cec5SDimitry Andric     return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
9340b57cec5SDimitry Andric   }
9350b57cec5SDimitry Andric 
936480093f4SDimitry Andric   bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
9370b57cec5SDimitry Andric   bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
9380b57cec5SDimitry Andric   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
9390b57cec5SDimitry Andric   bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
9400b57cec5SDimitry Andric                               ISD::MemIndexedMode &AM, bool &IsInc,
9410b57cec5SDimitry Andric                               SelectionDAG &DAG) const;
9420b57cec5SDimitry Andric   bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
9430b57cec5SDimitry Andric                                  ISD::MemIndexedMode &AM,
9440b57cec5SDimitry Andric                                  SelectionDAG &DAG) const override;
9450b57cec5SDimitry Andric   bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
9460b57cec5SDimitry Andric                                   SDValue &Offset, ISD::MemIndexedMode &AM,
9470b57cec5SDimitry Andric                                   SelectionDAG &DAG) const override;
9480b57cec5SDimitry Andric 
9490b57cec5SDimitry Andric   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
9500b57cec5SDimitry Andric                           SelectionDAG &DAG) const override;
951*5ffd83dbSDimitry Andric   void ReplaceExtractSubVectorResults(SDNode *N,
952*5ffd83dbSDimitry Andric                                       SmallVectorImpl<SDValue> &Results,
953*5ffd83dbSDimitry Andric                                       SelectionDAG &DAG) const;
9540b57cec5SDimitry Andric 
9550b57cec5SDimitry Andric   bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
9560b57cec5SDimitry Andric 
9570b57cec5SDimitry Andric   void finalizeLowering(MachineFunction &MF) const override;
958*5ffd83dbSDimitry Andric 
959*5ffd83dbSDimitry Andric   bool shouldLocalize(const MachineInstr &MI,
960*5ffd83dbSDimitry Andric                       const TargetTransformInfo *TTI) const override;
961*5ffd83dbSDimitry Andric 
962*5ffd83dbSDimitry Andric   bool useSVEForFixedLengthVectors() const;
963*5ffd83dbSDimitry Andric   bool useSVEForFixedLengthVectorVT(EVT VT) const;
9640b57cec5SDimitry Andric };
9650b57cec5SDimitry Andric 
9660b57cec5SDimitry Andric namespace AArch64 {
9670b57cec5SDimitry Andric FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
9680b57cec5SDimitry Andric                          const TargetLibraryInfo *libInfo);
9690b57cec5SDimitry Andric } // end namespace AArch64
9700b57cec5SDimitry Andric 
9710b57cec5SDimitry Andric } // end namespace llvm
9720b57cec5SDimitry Andric 
9730b57cec5SDimitry Andric #endif
974