10b57cec5SDimitry Andric //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric // 90b57cec5SDimitry Andric // This file defines the interfaces that AArch64 uses to lower LLVM code into a 100b57cec5SDimitry Andric // selection DAG. 110b57cec5SDimitry Andric // 120b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 130b57cec5SDimitry Andric 140b57cec5SDimitry Andric #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H 150b57cec5SDimitry Andric #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H 160b57cec5SDimitry Andric 170b57cec5SDimitry Andric #include "AArch64.h" 180b57cec5SDimitry Andric #include "llvm/CodeGen/CallingConvLower.h" 190b57cec5SDimitry Andric #include "llvm/CodeGen/SelectionDAG.h" 200b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h" 210b57cec5SDimitry Andric #include "llvm/IR/CallingConv.h" 220b57cec5SDimitry Andric #include "llvm/IR/Instruction.h" 230b57cec5SDimitry Andric 240b57cec5SDimitry Andric namespace llvm { 250b57cec5SDimitry Andric 260b57cec5SDimitry Andric namespace AArch64ISD { 270b57cec5SDimitry Andric 285ffd83dbSDimitry Andric // For predicated nodes where the result is a vector, the operation is 295ffd83dbSDimitry Andric // controlled by a governing predicate and the inactive lanes are explicitly 305ffd83dbSDimitry Andric // defined with a value, please stick the following naming convention: 315ffd83dbSDimitry Andric // 325ffd83dbSDimitry Andric // _MERGE_OP<n> The result value is a vector with inactive lanes equal 335ffd83dbSDimitry Andric // to source operand OP<n>. 345ffd83dbSDimitry Andric // 355ffd83dbSDimitry Andric // _MERGE_ZERO The result value is a vector with inactive lanes 365ffd83dbSDimitry Andric // actively zeroed. 375ffd83dbSDimitry Andric // 385ffd83dbSDimitry Andric // _MERGE_PASSTHRU The result value is a vector with inactive lanes equal 395ffd83dbSDimitry Andric // to the last source operand which only purpose is being 405ffd83dbSDimitry Andric // a passthru value. 415ffd83dbSDimitry Andric // 425ffd83dbSDimitry Andric // For other cases where no explicit action is needed to set the inactive lanes, 435ffd83dbSDimitry Andric // or when the result is not a vector and it is needed or helpful to 445ffd83dbSDimitry Andric // distinguish a node from similar unpredicated nodes, use: 455ffd83dbSDimitry Andric // 465ffd83dbSDimitry Andric // _PRED 475ffd83dbSDimitry Andric // 480b57cec5SDimitry Andric enum NodeType : unsigned { 490b57cec5SDimitry Andric FIRST_NUMBER = ISD::BUILTIN_OP_END, 500b57cec5SDimitry Andric WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses. 510b57cec5SDimitry Andric CALL, // Function call. 520b57cec5SDimitry Andric 530b57cec5SDimitry Andric // Produces the full sequence of instructions for getting the thread pointer 540b57cec5SDimitry Andric // offset of a variable into X0, using the TLSDesc model. 550b57cec5SDimitry Andric TLSDESC_CALLSEQ, 560b57cec5SDimitry Andric ADRP, // Page address of a TargetGlobalAddress operand. 570b57cec5SDimitry Andric ADR, // ADR 580b57cec5SDimitry Andric ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand. 590b57cec5SDimitry Andric LOADgot, // Load from automatically generated descriptor (e.g. Global 600b57cec5SDimitry Andric // Offset Table, TLS record). 610b57cec5SDimitry Andric RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand. 620b57cec5SDimitry Andric BRCOND, // Conditional branch instruction; "b.cond". 630b57cec5SDimitry Andric CSEL, 640b57cec5SDimitry Andric FCSEL, // Conditional move instruction. 650b57cec5SDimitry Andric CSINV, // Conditional select invert. 660b57cec5SDimitry Andric CSNEG, // Conditional select negate. 670b57cec5SDimitry Andric CSINC, // Conditional select increment. 680b57cec5SDimitry Andric 690b57cec5SDimitry Andric // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on 700b57cec5SDimitry Andric // ELF. 710b57cec5SDimitry Andric THREAD_POINTER, 720b57cec5SDimitry Andric ADC, 730b57cec5SDimitry Andric SBC, // adc, sbc instructions 740b57cec5SDimitry Andric 75*e8d8bef9SDimitry Andric // Predicated instructions where inactive lanes produce undefined results. 765ffd83dbSDimitry Andric ADD_PRED, 775ffd83dbSDimitry Andric FADD_PRED, 78*e8d8bef9SDimitry Andric FDIV_PRED, 795ffd83dbSDimitry Andric FMA_PRED, 80*e8d8bef9SDimitry Andric FMAXNM_PRED, 81*e8d8bef9SDimitry Andric FMINNM_PRED, 82*e8d8bef9SDimitry Andric FMUL_PRED, 83*e8d8bef9SDimitry Andric FSUB_PRED, 84*e8d8bef9SDimitry Andric MUL_PRED, 85*e8d8bef9SDimitry Andric SDIV_PRED, 86*e8d8bef9SDimitry Andric SHL_PRED, 87*e8d8bef9SDimitry Andric SMAX_PRED, 88*e8d8bef9SDimitry Andric SMIN_PRED, 89*e8d8bef9SDimitry Andric SRA_PRED, 90*e8d8bef9SDimitry Andric SRL_PRED, 91*e8d8bef9SDimitry Andric SUB_PRED, 92*e8d8bef9SDimitry Andric UDIV_PRED, 93*e8d8bef9SDimitry Andric UMAX_PRED, 94*e8d8bef9SDimitry Andric UMIN_PRED, 95*e8d8bef9SDimitry Andric 96*e8d8bef9SDimitry Andric // Predicated instructions with the result of inactive lanes provided by the 97*e8d8bef9SDimitry Andric // last operand. 98*e8d8bef9SDimitry Andric FABS_MERGE_PASSTHRU, 99*e8d8bef9SDimitry Andric FCEIL_MERGE_PASSTHRU, 100*e8d8bef9SDimitry Andric FFLOOR_MERGE_PASSTHRU, 101*e8d8bef9SDimitry Andric FNEARBYINT_MERGE_PASSTHRU, 102*e8d8bef9SDimitry Andric FNEG_MERGE_PASSTHRU, 103*e8d8bef9SDimitry Andric FRECPX_MERGE_PASSTHRU, 104*e8d8bef9SDimitry Andric FRINT_MERGE_PASSTHRU, 105*e8d8bef9SDimitry Andric FROUND_MERGE_PASSTHRU, 106*e8d8bef9SDimitry Andric FROUNDEVEN_MERGE_PASSTHRU, 107*e8d8bef9SDimitry Andric FSQRT_MERGE_PASSTHRU, 108*e8d8bef9SDimitry Andric FTRUNC_MERGE_PASSTHRU, 109*e8d8bef9SDimitry Andric FP_ROUND_MERGE_PASSTHRU, 110*e8d8bef9SDimitry Andric FP_EXTEND_MERGE_PASSTHRU, 111*e8d8bef9SDimitry Andric UINT_TO_FP_MERGE_PASSTHRU, 112*e8d8bef9SDimitry Andric SINT_TO_FP_MERGE_PASSTHRU, 113*e8d8bef9SDimitry Andric FCVTZU_MERGE_PASSTHRU, 114*e8d8bef9SDimitry Andric FCVTZS_MERGE_PASSTHRU, 115*e8d8bef9SDimitry Andric SIGN_EXTEND_INREG_MERGE_PASSTHRU, 116*e8d8bef9SDimitry Andric ZERO_EXTEND_INREG_MERGE_PASSTHRU, 117*e8d8bef9SDimitry Andric ABS_MERGE_PASSTHRU, 118*e8d8bef9SDimitry Andric NEG_MERGE_PASSTHRU, 1195ffd83dbSDimitry Andric 1205ffd83dbSDimitry Andric SETCC_MERGE_ZERO, 1215ffd83dbSDimitry Andric 1220b57cec5SDimitry Andric // Arithmetic instructions which write flags. 1230b57cec5SDimitry Andric ADDS, 1240b57cec5SDimitry Andric SUBS, 1250b57cec5SDimitry Andric ADCS, 1260b57cec5SDimitry Andric SBCS, 1270b57cec5SDimitry Andric ANDS, 1280b57cec5SDimitry Andric 1290b57cec5SDimitry Andric // Conditional compares. Operands: left,right,falsecc,cc,flags 1300b57cec5SDimitry Andric CCMP, 1310b57cec5SDimitry Andric CCMN, 1320b57cec5SDimitry Andric FCCMP, 1330b57cec5SDimitry Andric 1340b57cec5SDimitry Andric // Floating point comparison 1350b57cec5SDimitry Andric FCMP, 1360b57cec5SDimitry Andric 1370b57cec5SDimitry Andric // Scalar extract 1380b57cec5SDimitry Andric EXTR, 1390b57cec5SDimitry Andric 1400b57cec5SDimitry Andric // Scalar-to-vector duplication 1410b57cec5SDimitry Andric DUP, 1420b57cec5SDimitry Andric DUPLANE8, 1430b57cec5SDimitry Andric DUPLANE16, 1440b57cec5SDimitry Andric DUPLANE32, 1450b57cec5SDimitry Andric DUPLANE64, 1460b57cec5SDimitry Andric 1470b57cec5SDimitry Andric // Vector immedate moves 1480b57cec5SDimitry Andric MOVI, 1490b57cec5SDimitry Andric MOVIshift, 1500b57cec5SDimitry Andric MOVIedit, 1510b57cec5SDimitry Andric MOVImsl, 1520b57cec5SDimitry Andric FMOV, 1530b57cec5SDimitry Andric MVNIshift, 1540b57cec5SDimitry Andric MVNImsl, 1550b57cec5SDimitry Andric 1560b57cec5SDimitry Andric // Vector immediate ops 1570b57cec5SDimitry Andric BICi, 1580b57cec5SDimitry Andric ORRi, 1590b57cec5SDimitry Andric 1605ffd83dbSDimitry Andric // Vector bitwise select: similar to ISD::VSELECT but not all bits within an 1610b57cec5SDimitry Andric // element must be identical. 1625ffd83dbSDimitry Andric BSP, 1630b57cec5SDimitry Andric 1640b57cec5SDimitry Andric // Vector arithmetic negation 1650b57cec5SDimitry Andric NEG, 1660b57cec5SDimitry Andric 1670b57cec5SDimitry Andric // Vector shuffles 1680b57cec5SDimitry Andric ZIP1, 1690b57cec5SDimitry Andric ZIP2, 1700b57cec5SDimitry Andric UZP1, 1710b57cec5SDimitry Andric UZP2, 1720b57cec5SDimitry Andric TRN1, 1730b57cec5SDimitry Andric TRN2, 1740b57cec5SDimitry Andric REV16, 1750b57cec5SDimitry Andric REV32, 1760b57cec5SDimitry Andric REV64, 1770b57cec5SDimitry Andric EXT, 1780b57cec5SDimitry Andric 1790b57cec5SDimitry Andric // Vector shift by scalar 1800b57cec5SDimitry Andric VSHL, 1810b57cec5SDimitry Andric VLSHR, 1820b57cec5SDimitry Andric VASHR, 1830b57cec5SDimitry Andric 1840b57cec5SDimitry Andric // Vector shift by scalar (again) 1850b57cec5SDimitry Andric SQSHL_I, 1860b57cec5SDimitry Andric UQSHL_I, 1870b57cec5SDimitry Andric SQSHLU_I, 1880b57cec5SDimitry Andric SRSHR_I, 1890b57cec5SDimitry Andric URSHR_I, 1900b57cec5SDimitry Andric 1915ffd83dbSDimitry Andric // Vector shift by constant and insert 1925ffd83dbSDimitry Andric VSLI, 1935ffd83dbSDimitry Andric VSRI, 1945ffd83dbSDimitry Andric 1950b57cec5SDimitry Andric // Vector comparisons 1960b57cec5SDimitry Andric CMEQ, 1970b57cec5SDimitry Andric CMGE, 1980b57cec5SDimitry Andric CMGT, 1990b57cec5SDimitry Andric CMHI, 2000b57cec5SDimitry Andric CMHS, 2010b57cec5SDimitry Andric FCMEQ, 2020b57cec5SDimitry Andric FCMGE, 2030b57cec5SDimitry Andric FCMGT, 2040b57cec5SDimitry Andric 2050b57cec5SDimitry Andric // Vector zero comparisons 2060b57cec5SDimitry Andric CMEQz, 2070b57cec5SDimitry Andric CMGEz, 2080b57cec5SDimitry Andric CMGTz, 2090b57cec5SDimitry Andric CMLEz, 2100b57cec5SDimitry Andric CMLTz, 2110b57cec5SDimitry Andric FCMEQz, 2120b57cec5SDimitry Andric FCMGEz, 2130b57cec5SDimitry Andric FCMGTz, 2140b57cec5SDimitry Andric FCMLEz, 2150b57cec5SDimitry Andric FCMLTz, 2160b57cec5SDimitry Andric 2170b57cec5SDimitry Andric // Vector across-lanes addition 2180b57cec5SDimitry Andric // Only the lower result lane is defined. 2190b57cec5SDimitry Andric SADDV, 2200b57cec5SDimitry Andric UADDV, 2210b57cec5SDimitry Andric 222*e8d8bef9SDimitry Andric // Vector halving addition 223*e8d8bef9SDimitry Andric SHADD, 224*e8d8bef9SDimitry Andric UHADD, 225*e8d8bef9SDimitry Andric 2265ffd83dbSDimitry Andric // Vector rounding halving addition 2275ffd83dbSDimitry Andric SRHADD, 2285ffd83dbSDimitry Andric URHADD, 2295ffd83dbSDimitry Andric 230*e8d8bef9SDimitry Andric // Absolute difference 231*e8d8bef9SDimitry Andric UABD, 232*e8d8bef9SDimitry Andric SABD, 233*e8d8bef9SDimitry Andric 2340b57cec5SDimitry Andric // Vector across-lanes min/max 2350b57cec5SDimitry Andric // Only the lower result lane is defined. 2360b57cec5SDimitry Andric SMINV, 2370b57cec5SDimitry Andric UMINV, 2380b57cec5SDimitry Andric SMAXV, 2390b57cec5SDimitry Andric UMAXV, 2400b57cec5SDimitry Andric 241*e8d8bef9SDimitry Andric SADDV_PRED, 242*e8d8bef9SDimitry Andric UADDV_PRED, 243480093f4SDimitry Andric SMAXV_PRED, 244480093f4SDimitry Andric UMAXV_PRED, 245480093f4SDimitry Andric SMINV_PRED, 246480093f4SDimitry Andric UMINV_PRED, 247480093f4SDimitry Andric ORV_PRED, 248480093f4SDimitry Andric EORV_PRED, 249480093f4SDimitry Andric ANDV_PRED, 250480093f4SDimitry Andric 2515ffd83dbSDimitry Andric // Vector bitwise insertion 2520b57cec5SDimitry Andric BIT, 2530b57cec5SDimitry Andric 2540b57cec5SDimitry Andric // Compare-and-branch 2550b57cec5SDimitry Andric CBZ, 2560b57cec5SDimitry Andric CBNZ, 2570b57cec5SDimitry Andric TBZ, 2580b57cec5SDimitry Andric TBNZ, 2590b57cec5SDimitry Andric 2600b57cec5SDimitry Andric // Tail calls 2610b57cec5SDimitry Andric TC_RETURN, 2620b57cec5SDimitry Andric 2630b57cec5SDimitry Andric // Custom prefetch handling 2640b57cec5SDimitry Andric PREFETCH, 2650b57cec5SDimitry Andric 2660b57cec5SDimitry Andric // {s|u}int to FP within a FP register. 2670b57cec5SDimitry Andric SITOF, 2680b57cec5SDimitry Andric UITOF, 2690b57cec5SDimitry Andric 2700b57cec5SDimitry Andric /// Natural vector cast. ISD::BITCAST is not natural in the big-endian 2710b57cec5SDimitry Andric /// world w.r.t vectors; which causes additional REV instructions to be 2720b57cec5SDimitry Andric /// generated to compensate for the byte-swapping. But sometimes we do 2730b57cec5SDimitry Andric /// need to re-interpret the data in SIMD vector registers in big-endian 2740b57cec5SDimitry Andric /// mode without emitting such REV instructions. 2750b57cec5SDimitry Andric NVCAST, 2760b57cec5SDimitry Andric 2770b57cec5SDimitry Andric SMULL, 2780b57cec5SDimitry Andric UMULL, 2790b57cec5SDimitry Andric 2800b57cec5SDimitry Andric // Reciprocal estimates and steps. 2815ffd83dbSDimitry Andric FRECPE, 2825ffd83dbSDimitry Andric FRECPS, 2835ffd83dbSDimitry Andric FRSQRTE, 2845ffd83dbSDimitry Andric FRSQRTS, 2850b57cec5SDimitry Andric 2868bcb0991SDimitry Andric SUNPKHI, 2878bcb0991SDimitry Andric SUNPKLO, 2888bcb0991SDimitry Andric UUNPKHI, 2898bcb0991SDimitry Andric UUNPKLO, 2908bcb0991SDimitry Andric 291480093f4SDimitry Andric CLASTA_N, 292480093f4SDimitry Andric CLASTB_N, 293480093f4SDimitry Andric LASTA, 294480093f4SDimitry Andric LASTB, 295480093f4SDimitry Andric REV, 296480093f4SDimitry Andric TBL, 297480093f4SDimitry Andric 2985ffd83dbSDimitry Andric // Floating-point reductions. 2995ffd83dbSDimitry Andric FADDA_PRED, 3005ffd83dbSDimitry Andric FADDV_PRED, 3015ffd83dbSDimitry Andric FMAXV_PRED, 3025ffd83dbSDimitry Andric FMAXNMV_PRED, 3035ffd83dbSDimitry Andric FMINV_PRED, 3045ffd83dbSDimitry Andric FMINNMV_PRED, 3055ffd83dbSDimitry Andric 306480093f4SDimitry Andric INSR, 307480093f4SDimitry Andric PTEST, 308480093f4SDimitry Andric PTRUE, 309480093f4SDimitry Andric 310*e8d8bef9SDimitry Andric BITREVERSE_MERGE_PASSTHRU, 311*e8d8bef9SDimitry Andric BSWAP_MERGE_PASSTHRU, 312*e8d8bef9SDimitry Andric CTLZ_MERGE_PASSTHRU, 313*e8d8bef9SDimitry Andric CTPOP_MERGE_PASSTHRU, 3145ffd83dbSDimitry Andric DUP_MERGE_PASSTHRU, 3155ffd83dbSDimitry Andric INDEX_VECTOR, 3165ffd83dbSDimitry Andric 317*e8d8bef9SDimitry Andric // Cast between vectors of the same element type but differ in length. 3185ffd83dbSDimitry Andric REINTERPRET_CAST, 3195ffd83dbSDimitry Andric 3205ffd83dbSDimitry Andric LD1_MERGE_ZERO, 3215ffd83dbSDimitry Andric LD1S_MERGE_ZERO, 3225ffd83dbSDimitry Andric LDNF1_MERGE_ZERO, 3235ffd83dbSDimitry Andric LDNF1S_MERGE_ZERO, 3245ffd83dbSDimitry Andric LDFF1_MERGE_ZERO, 3255ffd83dbSDimitry Andric LDFF1S_MERGE_ZERO, 3265ffd83dbSDimitry Andric LD1RQ_MERGE_ZERO, 3275ffd83dbSDimitry Andric LD1RO_MERGE_ZERO, 3285ffd83dbSDimitry Andric 3295ffd83dbSDimitry Andric // Structured loads. 3305ffd83dbSDimitry Andric SVE_LD2_MERGE_ZERO, 3315ffd83dbSDimitry Andric SVE_LD3_MERGE_ZERO, 3325ffd83dbSDimitry Andric SVE_LD4_MERGE_ZERO, 3335ffd83dbSDimitry Andric 334480093f4SDimitry Andric // Unsigned gather loads. 3355ffd83dbSDimitry Andric GLD1_MERGE_ZERO, 3365ffd83dbSDimitry Andric GLD1_SCALED_MERGE_ZERO, 3375ffd83dbSDimitry Andric GLD1_UXTW_MERGE_ZERO, 3385ffd83dbSDimitry Andric GLD1_SXTW_MERGE_ZERO, 3395ffd83dbSDimitry Andric GLD1_UXTW_SCALED_MERGE_ZERO, 3405ffd83dbSDimitry Andric GLD1_SXTW_SCALED_MERGE_ZERO, 3415ffd83dbSDimitry Andric GLD1_IMM_MERGE_ZERO, 342480093f4SDimitry Andric 343480093f4SDimitry Andric // Signed gather loads 3445ffd83dbSDimitry Andric GLD1S_MERGE_ZERO, 3455ffd83dbSDimitry Andric GLD1S_SCALED_MERGE_ZERO, 3465ffd83dbSDimitry Andric GLD1S_UXTW_MERGE_ZERO, 3475ffd83dbSDimitry Andric GLD1S_SXTW_MERGE_ZERO, 3485ffd83dbSDimitry Andric GLD1S_UXTW_SCALED_MERGE_ZERO, 3495ffd83dbSDimitry Andric GLD1S_SXTW_SCALED_MERGE_ZERO, 3505ffd83dbSDimitry Andric GLD1S_IMM_MERGE_ZERO, 3515ffd83dbSDimitry Andric 3525ffd83dbSDimitry Andric // Unsigned gather loads. 3535ffd83dbSDimitry Andric GLDFF1_MERGE_ZERO, 3545ffd83dbSDimitry Andric GLDFF1_SCALED_MERGE_ZERO, 3555ffd83dbSDimitry Andric GLDFF1_UXTW_MERGE_ZERO, 3565ffd83dbSDimitry Andric GLDFF1_SXTW_MERGE_ZERO, 3575ffd83dbSDimitry Andric GLDFF1_UXTW_SCALED_MERGE_ZERO, 3585ffd83dbSDimitry Andric GLDFF1_SXTW_SCALED_MERGE_ZERO, 3595ffd83dbSDimitry Andric GLDFF1_IMM_MERGE_ZERO, 3605ffd83dbSDimitry Andric 3615ffd83dbSDimitry Andric // Signed gather loads. 3625ffd83dbSDimitry Andric GLDFF1S_MERGE_ZERO, 3635ffd83dbSDimitry Andric GLDFF1S_SCALED_MERGE_ZERO, 3645ffd83dbSDimitry Andric GLDFF1S_UXTW_MERGE_ZERO, 3655ffd83dbSDimitry Andric GLDFF1S_SXTW_MERGE_ZERO, 3665ffd83dbSDimitry Andric GLDFF1S_UXTW_SCALED_MERGE_ZERO, 3675ffd83dbSDimitry Andric GLDFF1S_SXTW_SCALED_MERGE_ZERO, 3685ffd83dbSDimitry Andric GLDFF1S_IMM_MERGE_ZERO, 3695ffd83dbSDimitry Andric 3705ffd83dbSDimitry Andric // Non-temporal gather loads 3715ffd83dbSDimitry Andric GLDNT1_MERGE_ZERO, 3725ffd83dbSDimitry Andric GLDNT1_INDEX_MERGE_ZERO, 3735ffd83dbSDimitry Andric GLDNT1S_MERGE_ZERO, 3745ffd83dbSDimitry Andric 3755ffd83dbSDimitry Andric // Contiguous masked store. 3765ffd83dbSDimitry Andric ST1_PRED, 3775ffd83dbSDimitry Andric 378480093f4SDimitry Andric // Scatter store 3795ffd83dbSDimitry Andric SST1_PRED, 3805ffd83dbSDimitry Andric SST1_SCALED_PRED, 3815ffd83dbSDimitry Andric SST1_UXTW_PRED, 3825ffd83dbSDimitry Andric SST1_SXTW_PRED, 3835ffd83dbSDimitry Andric SST1_UXTW_SCALED_PRED, 3845ffd83dbSDimitry Andric SST1_SXTW_SCALED_PRED, 3855ffd83dbSDimitry Andric SST1_IMM_PRED, 3865ffd83dbSDimitry Andric 3875ffd83dbSDimitry Andric // Non-temporal scatter store 3885ffd83dbSDimitry Andric SSTNT1_PRED, 3895ffd83dbSDimitry Andric SSTNT1_INDEX_PRED, 390480093f4SDimitry Andric 39147395794SDimitry Andric // Strict (exception-raising) floating point comparison 39247395794SDimitry Andric STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE, 39347395794SDimitry Andric STRICT_FCMPE, 39447395794SDimitry Andric 3950b57cec5SDimitry Andric // NEON Load/Store with post-increment base updates 3960b57cec5SDimitry Andric LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE, 3970b57cec5SDimitry Andric LD3post, 3980b57cec5SDimitry Andric LD4post, 3990b57cec5SDimitry Andric ST2post, 4000b57cec5SDimitry Andric ST3post, 4010b57cec5SDimitry Andric ST4post, 4020b57cec5SDimitry Andric LD1x2post, 4030b57cec5SDimitry Andric LD1x3post, 4040b57cec5SDimitry Andric LD1x4post, 4050b57cec5SDimitry Andric ST1x2post, 4060b57cec5SDimitry Andric ST1x3post, 4070b57cec5SDimitry Andric ST1x4post, 4080b57cec5SDimitry Andric LD1DUPpost, 4090b57cec5SDimitry Andric LD2DUPpost, 4100b57cec5SDimitry Andric LD3DUPpost, 4110b57cec5SDimitry Andric LD4DUPpost, 4120b57cec5SDimitry Andric LD1LANEpost, 4130b57cec5SDimitry Andric LD2LANEpost, 4140b57cec5SDimitry Andric LD3LANEpost, 4150b57cec5SDimitry Andric LD4LANEpost, 4160b57cec5SDimitry Andric ST2LANEpost, 4170b57cec5SDimitry Andric ST3LANEpost, 4180b57cec5SDimitry Andric ST4LANEpost, 4190b57cec5SDimitry Andric 4200b57cec5SDimitry Andric STG, 4210b57cec5SDimitry Andric STZG, 4220b57cec5SDimitry Andric ST2G, 423480093f4SDimitry Andric STZ2G, 4240b57cec5SDimitry Andric 425480093f4SDimitry Andric LDP, 4265ffd83dbSDimitry Andric STP, 427*e8d8bef9SDimitry Andric STNP, 428*e8d8bef9SDimitry Andric 429*e8d8bef9SDimitry Andric // Pseudo for a OBJC call that gets emitted together with a special `mov 430*e8d8bef9SDimitry Andric // x29, x29` marker instruction. 431*e8d8bef9SDimitry Andric CALL_RVMARKER 4320b57cec5SDimitry Andric }; 4330b57cec5SDimitry Andric 4340b57cec5SDimitry Andric } // end namespace AArch64ISD 4350b57cec5SDimitry Andric 4360b57cec5SDimitry Andric namespace { 4370b57cec5SDimitry Andric 4380b57cec5SDimitry Andric // Any instruction that defines a 32-bit result zeros out the high half of the 4390b57cec5SDimitry Andric // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may 4400b57cec5SDimitry Andric // be copying from a truncate. But any other 32-bit operation will zero-extend 441*e8d8bef9SDimitry Andric // up to 64 bits. AssertSext/AssertZext aren't saying anything about the upper 442*e8d8bef9SDimitry Andric // 32 bits, they're probably just qualifying a CopyFromReg. 4430b57cec5SDimitry Andric // FIXME: X86 also checks for CMOV here. Do we need something similar? 4440b57cec5SDimitry Andric static inline bool isDef32(const SDNode &N) { 4450b57cec5SDimitry Andric unsigned Opc = N.getOpcode(); 4460b57cec5SDimitry Andric return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG && 447*e8d8bef9SDimitry Andric Opc != ISD::CopyFromReg && Opc != ISD::AssertSext && 448*e8d8bef9SDimitry Andric Opc != ISD::AssertZext; 4490b57cec5SDimitry Andric } 4500b57cec5SDimitry Andric 4510b57cec5SDimitry Andric } // end anonymous namespace 4520b57cec5SDimitry Andric 4530b57cec5SDimitry Andric class AArch64Subtarget; 4540b57cec5SDimitry Andric class AArch64TargetMachine; 4550b57cec5SDimitry Andric 4560b57cec5SDimitry Andric class AArch64TargetLowering : public TargetLowering { 4570b57cec5SDimitry Andric public: 4580b57cec5SDimitry Andric explicit AArch64TargetLowering(const TargetMachine &TM, 4590b57cec5SDimitry Andric const AArch64Subtarget &STI); 4600b57cec5SDimitry Andric 4610b57cec5SDimitry Andric /// Selects the correct CCAssignFn for a given CallingConvention value. 4620b57cec5SDimitry Andric CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const; 4630b57cec5SDimitry Andric 4640b57cec5SDimitry Andric /// Selects the correct CCAssignFn for a given CallingConvention value. 4650b57cec5SDimitry Andric CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const; 4660b57cec5SDimitry Andric 4670b57cec5SDimitry Andric /// Determine which of the bits specified in Mask are known to be either zero 4680b57cec5SDimitry Andric /// or one and return them in the KnownZero/KnownOne bitsets. 4690b57cec5SDimitry Andric void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, 4700b57cec5SDimitry Andric const APInt &DemandedElts, 4710b57cec5SDimitry Andric const SelectionDAG &DAG, 4720b57cec5SDimitry Andric unsigned Depth = 0) const override; 4730b57cec5SDimitry Andric 4748bcb0991SDimitry Andric MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override { 4758bcb0991SDimitry Andric // Returning i64 unconditionally here (i.e. even for ILP32) means that the 4768bcb0991SDimitry Andric // *DAG* representation of pointers will always be 64-bits. They will be 4778bcb0991SDimitry Andric // truncated and extended when transferred to memory, but the 64-bit DAG 4788bcb0991SDimitry Andric // allows us to use AArch64's addressing modes much more easily. 4798bcb0991SDimitry Andric return MVT::getIntegerVT(64); 4808bcb0991SDimitry Andric } 4818bcb0991SDimitry Andric 4825ffd83dbSDimitry Andric bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 4835ffd83dbSDimitry Andric const APInt &DemandedElts, 4840b57cec5SDimitry Andric TargetLoweringOpt &TLO) const override; 4850b57cec5SDimitry Andric 4860b57cec5SDimitry Andric MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override; 4870b57cec5SDimitry Andric 4880b57cec5SDimitry Andric /// Returns true if the target allows unaligned memory accesses of the 4890b57cec5SDimitry Andric /// specified type. 4900b57cec5SDimitry Andric bool allowsMisalignedMemoryAccesses( 4910b57cec5SDimitry Andric EVT VT, unsigned AddrSpace = 0, unsigned Align = 1, 4920b57cec5SDimitry Andric MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 4930b57cec5SDimitry Andric bool *Fast = nullptr) const override; 4948bcb0991SDimitry Andric /// LLT variant. 4955ffd83dbSDimitry Andric bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace, 4965ffd83dbSDimitry Andric Align Alignment, 4975ffd83dbSDimitry Andric MachineMemOperand::Flags Flags, 4988bcb0991SDimitry Andric bool *Fast = nullptr) const override; 4990b57cec5SDimitry Andric 5000b57cec5SDimitry Andric /// Provide custom lowering hooks for some operations. 5010b57cec5SDimitry Andric SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 5020b57cec5SDimitry Andric 5030b57cec5SDimitry Andric const char *getTargetNodeName(unsigned Opcode) const override; 5040b57cec5SDimitry Andric 5050b57cec5SDimitry Andric SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 5060b57cec5SDimitry Andric 5070b57cec5SDimitry Andric /// This method returns a target specific FastISel object, or null if the 5080b57cec5SDimitry Andric /// target does not support "fast" ISel. 5090b57cec5SDimitry Andric FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 5100b57cec5SDimitry Andric const TargetLibraryInfo *libInfo) const override; 5110b57cec5SDimitry Andric 5120b57cec5SDimitry Andric bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 5130b57cec5SDimitry Andric 5140b57cec5SDimitry Andric bool isFPImmLegal(const APFloat &Imm, EVT VT, 5150b57cec5SDimitry Andric bool ForCodeSize) const override; 5160b57cec5SDimitry Andric 5170b57cec5SDimitry Andric /// Return true if the given shuffle mask can be codegen'd directly, or if it 5180b57cec5SDimitry Andric /// should be stack expanded. 5190b57cec5SDimitry Andric bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override; 5200b57cec5SDimitry Andric 5210b57cec5SDimitry Andric /// Return the ISD::SETCC ValueType. 5220b57cec5SDimitry Andric EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 5230b57cec5SDimitry Andric EVT VT) const override; 5240b57cec5SDimitry Andric 5250b57cec5SDimitry Andric SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 5260b57cec5SDimitry Andric 5270b57cec5SDimitry Andric MachineBasicBlock *EmitF128CSEL(MachineInstr &MI, 5280b57cec5SDimitry Andric MachineBasicBlock *BB) const; 5290b57cec5SDimitry Andric 5300b57cec5SDimitry Andric MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI, 5310b57cec5SDimitry Andric MachineBasicBlock *BB) const; 5320b57cec5SDimitry Andric 5330b57cec5SDimitry Andric MachineBasicBlock * 5340b57cec5SDimitry Andric EmitInstrWithCustomInserter(MachineInstr &MI, 5350b57cec5SDimitry Andric MachineBasicBlock *MBB) const override; 5360b57cec5SDimitry Andric 5370b57cec5SDimitry Andric bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, 5380b57cec5SDimitry Andric MachineFunction &MF, 5390b57cec5SDimitry Andric unsigned Intrinsic) const override; 5400b57cec5SDimitry Andric 5410b57cec5SDimitry Andric bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, 5420b57cec5SDimitry Andric EVT NewVT) const override; 5430b57cec5SDimitry Andric 5440b57cec5SDimitry Andric bool isTruncateFree(Type *Ty1, Type *Ty2) const override; 5450b57cec5SDimitry Andric bool isTruncateFree(EVT VT1, EVT VT2) const override; 5460b57cec5SDimitry Andric 5470b57cec5SDimitry Andric bool isProfitableToHoist(Instruction *I) const override; 5480b57cec5SDimitry Andric 5490b57cec5SDimitry Andric bool isZExtFree(Type *Ty1, Type *Ty2) const override; 5500b57cec5SDimitry Andric bool isZExtFree(EVT VT1, EVT VT2) const override; 5510b57cec5SDimitry Andric bool isZExtFree(SDValue Val, EVT VT2) const override; 5520b57cec5SDimitry Andric 5530b57cec5SDimitry Andric bool shouldSinkOperands(Instruction *I, 5540b57cec5SDimitry Andric SmallVectorImpl<Use *> &Ops) const override; 5550b57cec5SDimitry Andric 5565ffd83dbSDimitry Andric bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override; 5570b57cec5SDimitry Andric 5580b57cec5SDimitry Andric unsigned getMaxSupportedInterleaveFactor() const override { return 4; } 5590b57cec5SDimitry Andric 5600b57cec5SDimitry Andric bool lowerInterleavedLoad(LoadInst *LI, 5610b57cec5SDimitry Andric ArrayRef<ShuffleVectorInst *> Shuffles, 5620b57cec5SDimitry Andric ArrayRef<unsigned> Indices, 5630b57cec5SDimitry Andric unsigned Factor) const override; 5640b57cec5SDimitry Andric bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, 5650b57cec5SDimitry Andric unsigned Factor) const override; 5660b57cec5SDimitry Andric 5670b57cec5SDimitry Andric bool isLegalAddImmediate(int64_t) const override; 5680b57cec5SDimitry Andric bool isLegalICmpImmediate(int64_t) const override; 5690b57cec5SDimitry Andric 5700b57cec5SDimitry Andric bool shouldConsiderGEPOffsetSplit() const override; 5710b57cec5SDimitry Andric 5725ffd83dbSDimitry Andric EVT getOptimalMemOpType(const MemOp &Op, 5730b57cec5SDimitry Andric const AttributeList &FuncAttributes) const override; 5740b57cec5SDimitry Andric 5755ffd83dbSDimitry Andric LLT getOptimalMemOpLLT(const MemOp &Op, 5768bcb0991SDimitry Andric const AttributeList &FuncAttributes) const override; 5778bcb0991SDimitry Andric 5780b57cec5SDimitry Andric /// Return true if the addressing mode represented by AM is legal for this 5790b57cec5SDimitry Andric /// target, for a load/store of the specified type. 5800b57cec5SDimitry Andric bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, 5810b57cec5SDimitry Andric unsigned AS, 5820b57cec5SDimitry Andric Instruction *I = nullptr) const override; 5830b57cec5SDimitry Andric 5840b57cec5SDimitry Andric /// Return the cost of the scaling factor used in the addressing 5850b57cec5SDimitry Andric /// mode represented by AM for this target, for a load/store 5860b57cec5SDimitry Andric /// of the specified type. 5870b57cec5SDimitry Andric /// If the AM is supported, the return value must be >= 0. 5880b57cec5SDimitry Andric /// If the AM is not supported, it returns a negative value. 5890b57cec5SDimitry Andric int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, 5900b57cec5SDimitry Andric unsigned AS) const override; 5910b57cec5SDimitry Andric 5920b57cec5SDimitry Andric /// Return true if an FMA operation is faster than a pair of fmul and fadd 5930b57cec5SDimitry Andric /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 5940b57cec5SDimitry Andric /// returns true, otherwise fmuladd is expanded to fmul + fadd. 595480093f4SDimitry Andric bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 596480093f4SDimitry Andric EVT VT) const override; 597480093f4SDimitry Andric bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override; 5980b57cec5SDimitry Andric 5990b57cec5SDimitry Andric const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; 6000b57cec5SDimitry Andric 6010b57cec5SDimitry Andric /// Returns false if N is a bit extraction pattern of (X >> C) & Mask. 6020b57cec5SDimitry Andric bool isDesirableToCommuteWithShift(const SDNode *N, 6030b57cec5SDimitry Andric CombineLevel Level) const override; 6040b57cec5SDimitry Andric 6050b57cec5SDimitry Andric /// Returns true if it is beneficial to convert a load of a constant 6060b57cec5SDimitry Andric /// to just the constant itself. 6070b57cec5SDimitry Andric bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 6080b57cec5SDimitry Andric Type *Ty) const override; 6090b57cec5SDimitry Andric 6100b57cec5SDimitry Andric /// Return true if EXTRACT_SUBVECTOR is cheap for this result type 6110b57cec5SDimitry Andric /// with this index. 6120b57cec5SDimitry Andric bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 6130b57cec5SDimitry Andric unsigned Index) const override; 6140b57cec5SDimitry Andric 6155ffd83dbSDimitry Andric bool shouldFormOverflowOp(unsigned Opcode, EVT VT, 6165ffd83dbSDimitry Andric bool MathUsed) const override { 6175ffd83dbSDimitry Andric // Using overflow ops for overflow checks only should beneficial on 6185ffd83dbSDimitry Andric // AArch64. 6195ffd83dbSDimitry Andric return TargetLowering::shouldFormOverflowOp(Opcode, VT, true); 6205ffd83dbSDimitry Andric } 6215ffd83dbSDimitry Andric 6220b57cec5SDimitry Andric Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 6230b57cec5SDimitry Andric AtomicOrdering Ord) const override; 6240b57cec5SDimitry Andric Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, 6250b57cec5SDimitry Andric Value *Addr, AtomicOrdering Ord) const override; 6260b57cec5SDimitry Andric 6270b57cec5SDimitry Andric void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override; 6280b57cec5SDimitry Andric 6290b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 6300b57cec5SDimitry Andric shouldExpandAtomicLoadInIR(LoadInst *LI) const override; 6310b57cec5SDimitry Andric bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; 6320b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 6330b57cec5SDimitry Andric shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; 6340b57cec5SDimitry Andric 6350b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 6360b57cec5SDimitry Andric shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; 6370b57cec5SDimitry Andric 6380b57cec5SDimitry Andric bool useLoadStackGuardNode() const override; 6390b57cec5SDimitry Andric TargetLoweringBase::LegalizeTypeAction 6400b57cec5SDimitry Andric getPreferredVectorAction(MVT VT) const override; 6410b57cec5SDimitry Andric 6420b57cec5SDimitry Andric /// If the target has a standard location for the stack protector cookie, 6430b57cec5SDimitry Andric /// returns the address of that location. Otherwise, returns nullptr. 6440b57cec5SDimitry Andric Value *getIRStackGuard(IRBuilder<> &IRB) const override; 6450b57cec5SDimitry Andric 6460b57cec5SDimitry Andric void insertSSPDeclarations(Module &M) const override; 6470b57cec5SDimitry Andric Value *getSDagStackGuard(const Module &M) const override; 6480b57cec5SDimitry Andric Function *getSSPStackGuardCheck(const Module &M) const override; 6490b57cec5SDimitry Andric 6500b57cec5SDimitry Andric /// If the target has a standard location for the unsafe stack pointer, 6510b57cec5SDimitry Andric /// returns the address of that location. Otherwise, returns nullptr. 6520b57cec5SDimitry Andric Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override; 6530b57cec5SDimitry Andric 6540b57cec5SDimitry Andric /// If a physical register, this returns the register that receives the 6550b57cec5SDimitry Andric /// exception address on entry to an EH pad. 6565ffd83dbSDimitry Andric Register 6570b57cec5SDimitry Andric getExceptionPointerRegister(const Constant *PersonalityFn) const override { 6580b57cec5SDimitry Andric // FIXME: This is a guess. Has this been defined yet? 6590b57cec5SDimitry Andric return AArch64::X0; 6600b57cec5SDimitry Andric } 6610b57cec5SDimitry Andric 6620b57cec5SDimitry Andric /// If a physical register, this returns the register that receives the 6630b57cec5SDimitry Andric /// exception typeid on entry to a landing pad. 6645ffd83dbSDimitry Andric Register 6650b57cec5SDimitry Andric getExceptionSelectorRegister(const Constant *PersonalityFn) const override { 6660b57cec5SDimitry Andric // FIXME: This is a guess. Has this been defined yet? 6670b57cec5SDimitry Andric return AArch64::X1; 6680b57cec5SDimitry Andric } 6690b57cec5SDimitry Andric 6700b57cec5SDimitry Andric bool isIntDivCheap(EVT VT, AttributeList Attr) const override; 6710b57cec5SDimitry Andric 6720b57cec5SDimitry Andric bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, 6730b57cec5SDimitry Andric const SelectionDAG &DAG) const override { 6740b57cec5SDimitry Andric // Do not merge to float value size (128 bytes) if no implicit 6750b57cec5SDimitry Andric // float attribute is set. 6760b57cec5SDimitry Andric 6770b57cec5SDimitry Andric bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute( 6780b57cec5SDimitry Andric Attribute::NoImplicitFloat); 6790b57cec5SDimitry Andric 6800b57cec5SDimitry Andric if (NoFloat) 6810b57cec5SDimitry Andric return (MemVT.getSizeInBits() <= 64); 6820b57cec5SDimitry Andric return true; 6830b57cec5SDimitry Andric } 6840b57cec5SDimitry Andric 6850b57cec5SDimitry Andric bool isCheapToSpeculateCttz() const override { 6860b57cec5SDimitry Andric return true; 6870b57cec5SDimitry Andric } 6880b57cec5SDimitry Andric 6890b57cec5SDimitry Andric bool isCheapToSpeculateCtlz() const override { 6900b57cec5SDimitry Andric return true; 6910b57cec5SDimitry Andric } 6920b57cec5SDimitry Andric 6930b57cec5SDimitry Andric bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override; 6940b57cec5SDimitry Andric 6950b57cec5SDimitry Andric bool hasAndNotCompare(SDValue V) const override { 6960b57cec5SDimitry Andric // We can use bics for any scalar. 6970b57cec5SDimitry Andric return V.getValueType().isScalarInteger(); 6980b57cec5SDimitry Andric } 6990b57cec5SDimitry Andric 7000b57cec5SDimitry Andric bool hasAndNot(SDValue Y) const override { 7010b57cec5SDimitry Andric EVT VT = Y.getValueType(); 7020b57cec5SDimitry Andric 7030b57cec5SDimitry Andric if (!VT.isVector()) 7040b57cec5SDimitry Andric return hasAndNotCompare(Y); 7050b57cec5SDimitry Andric 7060b57cec5SDimitry Andric return VT.getSizeInBits() >= 64; // vector 'bic' 7070b57cec5SDimitry Andric } 7080b57cec5SDimitry Andric 7098bcb0991SDimitry Andric bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 7108bcb0991SDimitry Andric SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, 7118bcb0991SDimitry Andric unsigned OldShiftOpcode, unsigned NewShiftOpcode, 7128bcb0991SDimitry Andric SelectionDAG &DAG) const override; 7138bcb0991SDimitry Andric 7140b57cec5SDimitry Andric bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override; 7150b57cec5SDimitry Andric 7160b57cec5SDimitry Andric bool shouldTransformSignedTruncationCheck(EVT XVT, 7170b57cec5SDimitry Andric unsigned KeptBits) const override { 7180b57cec5SDimitry Andric // For vectors, we don't have a preference.. 7190b57cec5SDimitry Andric if (XVT.isVector()) 7200b57cec5SDimitry Andric return false; 7210b57cec5SDimitry Andric 7220b57cec5SDimitry Andric auto VTIsOk = [](EVT VT) -> bool { 7230b57cec5SDimitry Andric return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || 7240b57cec5SDimitry Andric VT == MVT::i64; 7250b57cec5SDimitry Andric }; 7260b57cec5SDimitry Andric 7270b57cec5SDimitry Andric // We are ok with KeptBitsVT being byte/word/dword, what SXT supports. 7280b57cec5SDimitry Andric // XVT will be larger than KeptBitsVT. 7290b57cec5SDimitry Andric MVT KeptBitsVT = MVT::getIntegerVT(KeptBits); 7300b57cec5SDimitry Andric return VTIsOk(XVT) && VTIsOk(KeptBitsVT); 7310b57cec5SDimitry Andric } 7320b57cec5SDimitry Andric 7330b57cec5SDimitry Andric bool preferIncOfAddToSubOfNot(EVT VT) const override; 7340b57cec5SDimitry Andric 7350b57cec5SDimitry Andric bool hasBitPreservingFPLogic(EVT VT) const override { 7360b57cec5SDimitry Andric // FIXME: Is this always true? It should be true for vectors at least. 7370b57cec5SDimitry Andric return VT == MVT::f32 || VT == MVT::f64; 7380b57cec5SDimitry Andric } 7390b57cec5SDimitry Andric 7400b57cec5SDimitry Andric bool supportSplitCSR(MachineFunction *MF) const override { 7410b57cec5SDimitry Andric return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 7420b57cec5SDimitry Andric MF->getFunction().hasFnAttribute(Attribute::NoUnwind); 7430b57cec5SDimitry Andric } 7440b57cec5SDimitry Andric void initializeSplitCSR(MachineBasicBlock *Entry) const override; 7450b57cec5SDimitry Andric void insertCopiesSplitCSR( 7460b57cec5SDimitry Andric MachineBasicBlock *Entry, 7470b57cec5SDimitry Andric const SmallVectorImpl<MachineBasicBlock *> &Exits) const override; 7480b57cec5SDimitry Andric 7490b57cec5SDimitry Andric bool supportSwiftError() const override { 7500b57cec5SDimitry Andric return true; 7510b57cec5SDimitry Andric } 7520b57cec5SDimitry Andric 7530b57cec5SDimitry Andric /// Enable aggressive FMA fusion on targets that want it. 7540b57cec5SDimitry Andric bool enableAggressiveFMAFusion(EVT VT) const override; 7550b57cec5SDimitry Andric 7560b57cec5SDimitry Andric /// Returns the size of the platform's va_list object. 7570b57cec5SDimitry Andric unsigned getVaListSizeInBits(const DataLayout &DL) const override; 7580b57cec5SDimitry Andric 7590b57cec5SDimitry Andric /// Returns true if \p VecTy is a legal interleaved access type. This 7600b57cec5SDimitry Andric /// function checks the vector element type and the overall width of the 7610b57cec5SDimitry Andric /// vector. 7620b57cec5SDimitry Andric bool isLegalInterleavedAccessType(VectorType *VecTy, 7630b57cec5SDimitry Andric const DataLayout &DL) const; 7640b57cec5SDimitry Andric 7650b57cec5SDimitry Andric /// Returns the number of interleaved accesses that will be generated when 7660b57cec5SDimitry Andric /// lowering accesses of the given type. 7670b57cec5SDimitry Andric unsigned getNumInterleavedAccesses(VectorType *VecTy, 7680b57cec5SDimitry Andric const DataLayout &DL) const; 7690b57cec5SDimitry Andric 7705ffd83dbSDimitry Andric MachineMemOperand::Flags getTargetMMOFlags( 7715ffd83dbSDimitry Andric const Instruction &I) const override; 7720b57cec5SDimitry Andric 7730b57cec5SDimitry Andric bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, 7740b57cec5SDimitry Andric CallingConv::ID CallConv, 7750b57cec5SDimitry Andric bool isVarArg) const override; 7760b57cec5SDimitry Andric /// Used for exception handling on Win64. 7770b57cec5SDimitry Andric bool needsFixedCatchObjects() const override; 7785ffd83dbSDimitry Andric 7795ffd83dbSDimitry Andric bool fallBackToDAGISel(const Instruction &Inst) const override; 7805ffd83dbSDimitry Andric 7815ffd83dbSDimitry Andric /// SVE code generation for fixed length vectors does not custom lower 7825ffd83dbSDimitry Andric /// BUILD_VECTOR. This makes BUILD_VECTOR legalisation a source of stores to 7835ffd83dbSDimitry Andric /// merge. However, merging them creates a BUILD_VECTOR that is just as 7845ffd83dbSDimitry Andric /// illegal as the original, thus leading to an infinite legalisation loop. 7855ffd83dbSDimitry Andric /// NOTE: Once BUILD_VECTOR is legal or can be custom lowered for all legal 7865ffd83dbSDimitry Andric /// vector types this override can be removed. 787*e8d8bef9SDimitry Andric bool mergeStoresAfterLegalization(EVT VT) const override; 7885ffd83dbSDimitry Andric 7890b57cec5SDimitry Andric private: 7900b57cec5SDimitry Andric /// Keep a pointer to the AArch64Subtarget around so that we can 7910b57cec5SDimitry Andric /// make the right decision when generating code for different targets. 7920b57cec5SDimitry Andric const AArch64Subtarget *Subtarget; 7930b57cec5SDimitry Andric 7940b57cec5SDimitry Andric bool isExtFreeImpl(const Instruction *Ext) const override; 7950b57cec5SDimitry Andric 7960b57cec5SDimitry Andric void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT); 7975ffd83dbSDimitry Andric void addTypeForFixedLengthSVE(MVT VT); 7980b57cec5SDimitry Andric void addDRTypeForNEON(MVT VT); 7990b57cec5SDimitry Andric void addQRTypeForNEON(MVT VT); 8000b57cec5SDimitry Andric 8010b57cec5SDimitry Andric SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, 8020b57cec5SDimitry Andric bool isVarArg, 8030b57cec5SDimitry Andric const SmallVectorImpl<ISD::InputArg> &Ins, 8040b57cec5SDimitry Andric const SDLoc &DL, SelectionDAG &DAG, 8050b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals) const override; 8060b57cec5SDimitry Andric 8070b57cec5SDimitry Andric SDValue LowerCall(CallLoweringInfo & /*CLI*/, 8080b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals) const override; 8090b57cec5SDimitry Andric 8100b57cec5SDimitry Andric SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 8110b57cec5SDimitry Andric CallingConv::ID CallConv, bool isVarArg, 8120b57cec5SDimitry Andric const SmallVectorImpl<ISD::InputArg> &Ins, 8130b57cec5SDimitry Andric const SDLoc &DL, SelectionDAG &DAG, 8140b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals, bool isThisReturn, 8150b57cec5SDimitry Andric SDValue ThisVal) const; 8160b57cec5SDimitry Andric 8170b57cec5SDimitry Andric SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; 818*e8d8bef9SDimitry Andric SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const; 819*e8d8bef9SDimitry Andric 820*e8d8bef9SDimitry Andric SDValue LowerMGATHER(SDValue Op, SelectionDAG &DAG) const; 821*e8d8bef9SDimitry Andric SDValue LowerMSCATTER(SDValue Op, SelectionDAG &DAG) const; 8220b57cec5SDimitry Andric 8230b57cec5SDimitry Andric SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 8240b57cec5SDimitry Andric 8250b57cec5SDimitry Andric bool isEligibleForTailCallOptimization( 8260b57cec5SDimitry Andric SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, 8270b57cec5SDimitry Andric const SmallVectorImpl<ISD::OutputArg> &Outs, 8280b57cec5SDimitry Andric const SmallVectorImpl<SDValue> &OutVals, 8290b57cec5SDimitry Andric const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const; 8300b57cec5SDimitry Andric 8310b57cec5SDimitry Andric /// Finds the incoming stack arguments which overlap the given fixed stack 8320b57cec5SDimitry Andric /// object and incorporates their load into the current chain. This prevents 8330b57cec5SDimitry Andric /// an upcoming store from clobbering the stack argument before it's used. 8340b57cec5SDimitry Andric SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG, 8350b57cec5SDimitry Andric MachineFrameInfo &MFI, int ClobberedFI) const; 8360b57cec5SDimitry Andric 8370b57cec5SDimitry Andric bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const; 8380b57cec5SDimitry Andric 8390b57cec5SDimitry Andric void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL, 8400b57cec5SDimitry Andric SDValue &Chain) const; 8410b57cec5SDimitry Andric 8420b57cec5SDimitry Andric bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 8430b57cec5SDimitry Andric bool isVarArg, 8440b57cec5SDimitry Andric const SmallVectorImpl<ISD::OutputArg> &Outs, 8450b57cec5SDimitry Andric LLVMContext &Context) const override; 8460b57cec5SDimitry Andric 8470b57cec5SDimitry Andric SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 8480b57cec5SDimitry Andric const SmallVectorImpl<ISD::OutputArg> &Outs, 8490b57cec5SDimitry Andric const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 8500b57cec5SDimitry Andric SelectionDAG &DAG) const override; 8510b57cec5SDimitry Andric 8520b57cec5SDimitry Andric SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG, 8530b57cec5SDimitry Andric unsigned Flag) const; 8540b57cec5SDimitry Andric SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG, 8550b57cec5SDimitry Andric unsigned Flag) const; 8560b57cec5SDimitry Andric SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG, 8570b57cec5SDimitry Andric unsigned Flag) const; 8580b57cec5SDimitry Andric SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG, 8590b57cec5SDimitry Andric unsigned Flag) const; 8600b57cec5SDimitry Andric template <class NodeTy> 8610b57cec5SDimitry Andric SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; 8620b57cec5SDimitry Andric template <class NodeTy> 8630b57cec5SDimitry Andric SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; 8640b57cec5SDimitry Andric template <class NodeTy> 8650b57cec5SDimitry Andric SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; 8660b57cec5SDimitry Andric template <class NodeTy> 8670b57cec5SDimitry Andric SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; 8680b57cec5SDimitry Andric SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 8690b57cec5SDimitry Andric SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 8700b57cec5SDimitry Andric SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 8710b57cec5SDimitry Andric SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 8720b57cec5SDimitry Andric SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 873480093f4SDimitry Andric SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase, 874480093f4SDimitry Andric const SDLoc &DL, SelectionDAG &DAG) const; 8750b57cec5SDimitry Andric SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL, 8760b57cec5SDimitry Andric SelectionDAG &DAG) const; 8770b57cec5SDimitry Andric SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 8780b57cec5SDimitry Andric SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 8790b57cec5SDimitry Andric SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 8800b57cec5SDimitry Andric SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 8810b57cec5SDimitry Andric SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 8820b57cec5SDimitry Andric SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS, 8830b57cec5SDimitry Andric SDValue TVal, SDValue FVal, const SDLoc &dl, 8840b57cec5SDimitry Andric SelectionDAG &DAG) const; 8850b57cec5SDimitry Andric SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 8860b57cec5SDimitry Andric SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 8870b57cec5SDimitry Andric SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 8880b57cec5SDimitry Andric SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 8890b57cec5SDimitry Andric SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const; 8900b57cec5SDimitry Andric SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const; 8910b57cec5SDimitry Andric SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const; 8920b57cec5SDimitry Andric SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; 8930b57cec5SDimitry Andric SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; 8940b57cec5SDimitry Andric SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; 8950b57cec5SDimitry Andric SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 8960b57cec5SDimitry Andric SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const; 8970b57cec5SDimitry Andric SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 8980b57cec5SDimitry Andric SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 8990b57cec5SDimitry Andric SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 9000b57cec5SDimitry Andric SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 9010b57cec5SDimitry Andric SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 9020b57cec5SDimitry Andric SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 9030b57cec5SDimitry Andric SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 9048bcb0991SDimitry Andric SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const; 9055ffd83dbSDimitry Andric SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const; 906*e8d8bef9SDimitry Andric SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG, unsigned NewOp, 907*e8d8bef9SDimitry Andric bool OverrideNEON = false) const; 908*e8d8bef9SDimitry Andric SDValue LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const; 9090b57cec5SDimitry Andric SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 9105ffd83dbSDimitry Andric SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 911*e8d8bef9SDimitry Andric SDValue LowerDIV(SDValue Op, SelectionDAG &DAG) const; 912*e8d8bef9SDimitry Andric SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; 9130b57cec5SDimitry Andric SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const; 9140b57cec5SDimitry Andric SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 9150b57cec5SDimitry Andric SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 9160b57cec5SDimitry Andric SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const; 9170b57cec5SDimitry Andric SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const; 918*e8d8bef9SDimitry Andric SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const; 9190b57cec5SDimitry Andric SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 9200b57cec5SDimitry Andric SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; 9210b57cec5SDimitry Andric SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; 9220b57cec5SDimitry Andric SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 9230b57cec5SDimitry Andric SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 9240b57cec5SDimitry Andric SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 925*e8d8bef9SDimitry Andric SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 9260b57cec5SDimitry Andric SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const; 927*e8d8bef9SDimitry Andric SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const; 9280b57cec5SDimitry Andric SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; 9290b57cec5SDimitry Andric SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; 9305ffd83dbSDimitry Andric SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const; 9315ffd83dbSDimitry Andric SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const; 9320b57cec5SDimitry Andric SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const; 9330b57cec5SDimitry Andric SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const; 9340b57cec5SDimitry Andric SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const; 9350b57cec5SDimitry Andric SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 9360b57cec5SDimitry Andric SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain, 9370b57cec5SDimitry Andric SDValue &Size, 9380b57cec5SDimitry Andric SelectionDAG &DAG) const; 9395ffd83dbSDimitry Andric SDValue LowerSVEStructLoad(unsigned Intrinsic, ArrayRef<SDValue> LoadOps, 9405ffd83dbSDimitry Andric EVT VT, SelectionDAG &DAG, const SDLoc &DL) const; 9415ffd83dbSDimitry Andric 942*e8d8bef9SDimitry Andric SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op, 943*e8d8bef9SDimitry Andric SelectionDAG &DAG) const; 944*e8d8bef9SDimitry Andric SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op, 945*e8d8bef9SDimitry Andric SelectionDAG &DAG) const; 9465ffd83dbSDimitry Andric SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const; 947*e8d8bef9SDimitry Andric SDValue LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const; 948*e8d8bef9SDimitry Andric SDValue LowerPredReductionToSVE(SDValue ScalarOp, SelectionDAG &DAG) const; 949*e8d8bef9SDimitry Andric SDValue LowerReductionToSVE(unsigned Opcode, SDValue ScalarOp, 950*e8d8bef9SDimitry Andric SelectionDAG &DAG) const; 951*e8d8bef9SDimitry Andric SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const; 952*e8d8bef9SDimitry Andric SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const; 9535ffd83dbSDimitry Andric SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const; 9545ffd83dbSDimitry Andric SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op, 9555ffd83dbSDimitry Andric SelectionDAG &DAG) const; 9560b57cec5SDimitry Andric 9570b57cec5SDimitry Andric SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, 9580b57cec5SDimitry Andric SmallVectorImpl<SDNode *> &Created) const override; 9590b57cec5SDimitry Andric SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, 9600b57cec5SDimitry Andric int &ExtraSteps, bool &UseOneConst, 9610b57cec5SDimitry Andric bool Reciprocal) const override; 9620b57cec5SDimitry Andric SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, 9630b57cec5SDimitry Andric int &ExtraSteps) const override; 964*e8d8bef9SDimitry Andric SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, 965*e8d8bef9SDimitry Andric const DenormalMode &Mode) const override; 966*e8d8bef9SDimitry Andric SDValue getSqrtResultForDenormInput(SDValue Operand, 967*e8d8bef9SDimitry Andric SelectionDAG &DAG) const override; 9680b57cec5SDimitry Andric unsigned combineRepeatedFPDivisors() const override; 9690b57cec5SDimitry Andric 9700b57cec5SDimitry Andric ConstraintType getConstraintType(StringRef Constraint) const override; 971480093f4SDimitry Andric Register getRegisterByName(const char* RegName, LLT VT, 9728bcb0991SDimitry Andric const MachineFunction &MF) const override; 9730b57cec5SDimitry Andric 9740b57cec5SDimitry Andric /// Examine constraint string and operand type and determine a weight value. 9750b57cec5SDimitry Andric /// The operand object must already have been set up with the operand type. 9760b57cec5SDimitry Andric ConstraintWeight 9770b57cec5SDimitry Andric getSingleConstraintMatchWeight(AsmOperandInfo &info, 9780b57cec5SDimitry Andric const char *constraint) const override; 9790b57cec5SDimitry Andric 9800b57cec5SDimitry Andric std::pair<unsigned, const TargetRegisterClass *> 9810b57cec5SDimitry Andric getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 9820b57cec5SDimitry Andric StringRef Constraint, MVT VT) const override; 9830b57cec5SDimitry Andric 9840b57cec5SDimitry Andric const char *LowerXConstraint(EVT ConstraintVT) const override; 9850b57cec5SDimitry Andric 9860b57cec5SDimitry Andric void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 9870b57cec5SDimitry Andric std::vector<SDValue> &Ops, 9880b57cec5SDimitry Andric SelectionDAG &DAG) const override; 9890b57cec5SDimitry Andric 9900b57cec5SDimitry Andric unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override { 9910b57cec5SDimitry Andric if (ConstraintCode == "Q") 9920b57cec5SDimitry Andric return InlineAsm::Constraint_Q; 9930b57cec5SDimitry Andric // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are 9940b57cec5SDimitry Andric // followed by llvm_unreachable so we'll leave them unimplemented in 9950b57cec5SDimitry Andric // the backend for now. 9960b57cec5SDimitry Andric return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 9970b57cec5SDimitry Andric } 9980b57cec5SDimitry Andric 999*e8d8bef9SDimitry Andric bool shouldRemoveExtendFromGSIndex(EVT VT) const override; 1000480093f4SDimitry Andric bool isVectorLoadExtDesirable(SDValue ExtVal) const override; 10010b57cec5SDimitry Andric bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; 10020b57cec5SDimitry Andric bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 10030b57cec5SDimitry Andric bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset, 10040b57cec5SDimitry Andric ISD::MemIndexedMode &AM, bool &IsInc, 10050b57cec5SDimitry Andric SelectionDAG &DAG) const; 10060b57cec5SDimitry Andric bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, 10070b57cec5SDimitry Andric ISD::MemIndexedMode &AM, 10080b57cec5SDimitry Andric SelectionDAG &DAG) const override; 10090b57cec5SDimitry Andric bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, 10100b57cec5SDimitry Andric SDValue &Offset, ISD::MemIndexedMode &AM, 10110b57cec5SDimitry Andric SelectionDAG &DAG) const override; 10120b57cec5SDimitry Andric 10130b57cec5SDimitry Andric void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, 10140b57cec5SDimitry Andric SelectionDAG &DAG) const override; 10155ffd83dbSDimitry Andric void ReplaceExtractSubVectorResults(SDNode *N, 10165ffd83dbSDimitry Andric SmallVectorImpl<SDValue> &Results, 10175ffd83dbSDimitry Andric SelectionDAG &DAG) const; 10180b57cec5SDimitry Andric 10190b57cec5SDimitry Andric bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override; 10200b57cec5SDimitry Andric 10210b57cec5SDimitry Andric void finalizeLowering(MachineFunction &MF) const override; 10225ffd83dbSDimitry Andric 10235ffd83dbSDimitry Andric bool shouldLocalize(const MachineInstr &MI, 10245ffd83dbSDimitry Andric const TargetTransformInfo *TTI) const override; 10255ffd83dbSDimitry Andric 1026*e8d8bef9SDimitry Andric // Normally SVE is only used for byte size vectors that do not fit within a 1027*e8d8bef9SDimitry Andric // NEON vector. This changes when OverrideNEON is true, allowing SVE to be 1028*e8d8bef9SDimitry Andric // used for 64bit and 128bit vectors as well. 1029*e8d8bef9SDimitry Andric bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const; 1030*e8d8bef9SDimitry Andric 1031*e8d8bef9SDimitry Andric // With the exception of data-predicate transitions, no instructions are 1032*e8d8bef9SDimitry Andric // required to cast between legal scalable vector types. However: 1033*e8d8bef9SDimitry Andric // 1. Packed and unpacked types have different bit lengths, meaning BITCAST 1034*e8d8bef9SDimitry Andric // is not universally useable. 1035*e8d8bef9SDimitry Andric // 2. Most unpacked integer types are not legal and thus integer extends 1036*e8d8bef9SDimitry Andric // cannot be used to convert between unpacked and packed types. 1037*e8d8bef9SDimitry Andric // These can make "bitcasting" a multiphase process. REINTERPRET_CAST is used 1038*e8d8bef9SDimitry Andric // to transition between unpacked and packed types of the same element type, 1039*e8d8bef9SDimitry Andric // with BITCAST used otherwise. 1040*e8d8bef9SDimitry Andric SDValue getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const; 10410b57cec5SDimitry Andric }; 10420b57cec5SDimitry Andric 10430b57cec5SDimitry Andric namespace AArch64 { 10440b57cec5SDimitry Andric FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 10450b57cec5SDimitry Andric const TargetLibraryInfo *libInfo); 10460b57cec5SDimitry Andric } // end namespace AArch64 10470b57cec5SDimitry Andric 10480b57cec5SDimitry Andric } // end namespace llvm 10490b57cec5SDimitry Andric 10500b57cec5SDimitry Andric #endif 1051