10b57cec5SDimitry Andric //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric // 90b57cec5SDimitry Andric // This file defines the interfaces that AArch64 uses to lower LLVM code into a 100b57cec5SDimitry Andric // selection DAG. 110b57cec5SDimitry Andric // 120b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 130b57cec5SDimitry Andric 140b57cec5SDimitry Andric #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H 150b57cec5SDimitry Andric #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H 160b57cec5SDimitry Andric 170b57cec5SDimitry Andric #include "AArch64.h" 180b57cec5SDimitry Andric #include "llvm/CodeGen/CallingConvLower.h" 19fe6060f1SDimitry Andric #include "llvm/CodeGen/MachineFunction.h" 200b57cec5SDimitry Andric #include "llvm/CodeGen/SelectionDAG.h" 210b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h" 220b57cec5SDimitry Andric #include "llvm/IR/CallingConv.h" 230b57cec5SDimitry Andric #include "llvm/IR/Instruction.h" 240b57cec5SDimitry Andric 250b57cec5SDimitry Andric namespace llvm { 260b57cec5SDimitry Andric 270b57cec5SDimitry Andric namespace AArch64ISD { 280b57cec5SDimitry Andric 295ffd83dbSDimitry Andric // For predicated nodes where the result is a vector, the operation is 305ffd83dbSDimitry Andric // controlled by a governing predicate and the inactive lanes are explicitly 315ffd83dbSDimitry Andric // defined with a value, please stick the following naming convention: 325ffd83dbSDimitry Andric // 335ffd83dbSDimitry Andric // _MERGE_OP<n> The result value is a vector with inactive lanes equal 345ffd83dbSDimitry Andric // to source operand OP<n>. 355ffd83dbSDimitry Andric // 365ffd83dbSDimitry Andric // _MERGE_ZERO The result value is a vector with inactive lanes 375ffd83dbSDimitry Andric // actively zeroed. 385ffd83dbSDimitry Andric // 395ffd83dbSDimitry Andric // _MERGE_PASSTHRU The result value is a vector with inactive lanes equal 405ffd83dbSDimitry Andric // to the last source operand which only purpose is being 415ffd83dbSDimitry Andric // a passthru value. 425ffd83dbSDimitry Andric // 435ffd83dbSDimitry Andric // For other cases where no explicit action is needed to set the inactive lanes, 445ffd83dbSDimitry Andric // or when the result is not a vector and it is needed or helpful to 455ffd83dbSDimitry Andric // distinguish a node from similar unpredicated nodes, use: 465ffd83dbSDimitry Andric // 475ffd83dbSDimitry Andric // _PRED 485ffd83dbSDimitry Andric // 490b57cec5SDimitry Andric enum NodeType : unsigned { 500b57cec5SDimitry Andric FIRST_NUMBER = ISD::BUILTIN_OP_END, 510b57cec5SDimitry Andric WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses. 520b57cec5SDimitry Andric CALL, // Function call. 530b57cec5SDimitry Andric 54fe6060f1SDimitry Andric // Pseudo for a OBJC call that gets emitted together with a special `mov 55fe6060f1SDimitry Andric // x29, x29` marker instruction. 56fe6060f1SDimitry Andric CALL_RVMARKER, 57fe6060f1SDimitry Andric 583a9a9c0cSDimitry Andric CALL_BTI, // Function call followed by a BTI instruction. 593a9a9c0cSDimitry Andric 600b57cec5SDimitry Andric // Produces the full sequence of instructions for getting the thread pointer 610b57cec5SDimitry Andric // offset of a variable into X0, using the TLSDesc model. 620b57cec5SDimitry Andric TLSDESC_CALLSEQ, 630b57cec5SDimitry Andric ADRP, // Page address of a TargetGlobalAddress operand. 640b57cec5SDimitry Andric ADR, // ADR 650b57cec5SDimitry Andric ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand. 660b57cec5SDimitry Andric LOADgot, // Load from automatically generated descriptor (e.g. Global 670b57cec5SDimitry Andric // Offset Table, TLS record). 680b57cec5SDimitry Andric RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand. 690b57cec5SDimitry Andric BRCOND, // Conditional branch instruction; "b.cond". 700b57cec5SDimitry Andric CSEL, 710b57cec5SDimitry Andric CSINV, // Conditional select invert. 720b57cec5SDimitry Andric CSNEG, // Conditional select negate. 730b57cec5SDimitry Andric CSINC, // Conditional select increment. 740b57cec5SDimitry Andric 750b57cec5SDimitry Andric // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on 760b57cec5SDimitry Andric // ELF. 770b57cec5SDimitry Andric THREAD_POINTER, 780b57cec5SDimitry Andric ADC, 790b57cec5SDimitry Andric SBC, // adc, sbc instructions 800b57cec5SDimitry Andric 81e8d8bef9SDimitry Andric // Predicated instructions where inactive lanes produce undefined results. 8204eeddc0SDimitry Andric ABDS_PRED, 8304eeddc0SDimitry Andric ABDU_PRED, 845ffd83dbSDimitry Andric FADD_PRED, 85e8d8bef9SDimitry Andric FDIV_PRED, 865ffd83dbSDimitry Andric FMA_PRED, 87fe6060f1SDimitry Andric FMAX_PRED, 8804eeddc0SDimitry Andric FMAXNM_PRED, 89fe6060f1SDimitry Andric FMIN_PRED, 9004eeddc0SDimitry Andric FMINNM_PRED, 91e8d8bef9SDimitry Andric FMUL_PRED, 92e8d8bef9SDimitry Andric FSUB_PRED, 93e8d8bef9SDimitry Andric MUL_PRED, 94fe6060f1SDimitry Andric MULHS_PRED, 95fe6060f1SDimitry Andric MULHU_PRED, 96e8d8bef9SDimitry Andric SDIV_PRED, 97e8d8bef9SDimitry Andric SHL_PRED, 98e8d8bef9SDimitry Andric SMAX_PRED, 99e8d8bef9SDimitry Andric SMIN_PRED, 100e8d8bef9SDimitry Andric SRA_PRED, 101e8d8bef9SDimitry Andric SRL_PRED, 102e8d8bef9SDimitry Andric UDIV_PRED, 103e8d8bef9SDimitry Andric UMAX_PRED, 104e8d8bef9SDimitry Andric UMIN_PRED, 105e8d8bef9SDimitry Andric 106fe6060f1SDimitry Andric // Unpredicated vector instructions 107fe6060f1SDimitry Andric BIC, 108fe6060f1SDimitry Andric 1094824e7fdSDimitry Andric SRAD_MERGE_OP1, 1104824e7fdSDimitry Andric 111e8d8bef9SDimitry Andric // Predicated instructions with the result of inactive lanes provided by the 112e8d8bef9SDimitry Andric // last operand. 113e8d8bef9SDimitry Andric FABS_MERGE_PASSTHRU, 114e8d8bef9SDimitry Andric FCEIL_MERGE_PASSTHRU, 115e8d8bef9SDimitry Andric FFLOOR_MERGE_PASSTHRU, 116e8d8bef9SDimitry Andric FNEARBYINT_MERGE_PASSTHRU, 117e8d8bef9SDimitry Andric FNEG_MERGE_PASSTHRU, 118e8d8bef9SDimitry Andric FRECPX_MERGE_PASSTHRU, 119e8d8bef9SDimitry Andric FRINT_MERGE_PASSTHRU, 120e8d8bef9SDimitry Andric FROUND_MERGE_PASSTHRU, 121e8d8bef9SDimitry Andric FROUNDEVEN_MERGE_PASSTHRU, 122e8d8bef9SDimitry Andric FSQRT_MERGE_PASSTHRU, 123e8d8bef9SDimitry Andric FTRUNC_MERGE_PASSTHRU, 124e8d8bef9SDimitry Andric FP_ROUND_MERGE_PASSTHRU, 125e8d8bef9SDimitry Andric FP_EXTEND_MERGE_PASSTHRU, 126e8d8bef9SDimitry Andric UINT_TO_FP_MERGE_PASSTHRU, 127e8d8bef9SDimitry Andric SINT_TO_FP_MERGE_PASSTHRU, 128e8d8bef9SDimitry Andric FCVTZU_MERGE_PASSTHRU, 129e8d8bef9SDimitry Andric FCVTZS_MERGE_PASSTHRU, 130e8d8bef9SDimitry Andric SIGN_EXTEND_INREG_MERGE_PASSTHRU, 131e8d8bef9SDimitry Andric ZERO_EXTEND_INREG_MERGE_PASSTHRU, 132e8d8bef9SDimitry Andric ABS_MERGE_PASSTHRU, 133e8d8bef9SDimitry Andric NEG_MERGE_PASSTHRU, 1345ffd83dbSDimitry Andric 1355ffd83dbSDimitry Andric SETCC_MERGE_ZERO, 1365ffd83dbSDimitry Andric 1370b57cec5SDimitry Andric // Arithmetic instructions which write flags. 1380b57cec5SDimitry Andric ADDS, 1390b57cec5SDimitry Andric SUBS, 1400b57cec5SDimitry Andric ADCS, 1410b57cec5SDimitry Andric SBCS, 1420b57cec5SDimitry Andric ANDS, 1430b57cec5SDimitry Andric 1440b57cec5SDimitry Andric // Conditional compares. Operands: left,right,falsecc,cc,flags 1450b57cec5SDimitry Andric CCMP, 1460b57cec5SDimitry Andric CCMN, 1470b57cec5SDimitry Andric FCCMP, 1480b57cec5SDimitry Andric 1490b57cec5SDimitry Andric // Floating point comparison 1500b57cec5SDimitry Andric FCMP, 1510b57cec5SDimitry Andric 1520b57cec5SDimitry Andric // Scalar extract 1530b57cec5SDimitry Andric EXTR, 1540b57cec5SDimitry Andric 1550b57cec5SDimitry Andric // Scalar-to-vector duplication 1560b57cec5SDimitry Andric DUP, 1570b57cec5SDimitry Andric DUPLANE8, 1580b57cec5SDimitry Andric DUPLANE16, 1590b57cec5SDimitry Andric DUPLANE32, 1600b57cec5SDimitry Andric DUPLANE64, 161*81ad6265SDimitry Andric DUPLANE128, 1620b57cec5SDimitry Andric 1630b57cec5SDimitry Andric // Vector immedate moves 1640b57cec5SDimitry Andric MOVI, 1650b57cec5SDimitry Andric MOVIshift, 1660b57cec5SDimitry Andric MOVIedit, 1670b57cec5SDimitry Andric MOVImsl, 1680b57cec5SDimitry Andric FMOV, 1690b57cec5SDimitry Andric MVNIshift, 1700b57cec5SDimitry Andric MVNImsl, 1710b57cec5SDimitry Andric 1720b57cec5SDimitry Andric // Vector immediate ops 1730b57cec5SDimitry Andric BICi, 1740b57cec5SDimitry Andric ORRi, 1750b57cec5SDimitry Andric 1765ffd83dbSDimitry Andric // Vector bitwise select: similar to ISD::VSELECT but not all bits within an 1770b57cec5SDimitry Andric // element must be identical. 1785ffd83dbSDimitry Andric BSP, 1790b57cec5SDimitry Andric 1800b57cec5SDimitry Andric // Vector shuffles 1810b57cec5SDimitry Andric ZIP1, 1820b57cec5SDimitry Andric ZIP2, 1830b57cec5SDimitry Andric UZP1, 1840b57cec5SDimitry Andric UZP2, 1850b57cec5SDimitry Andric TRN1, 1860b57cec5SDimitry Andric TRN2, 1870b57cec5SDimitry Andric REV16, 1880b57cec5SDimitry Andric REV32, 1890b57cec5SDimitry Andric REV64, 1900b57cec5SDimitry Andric EXT, 191fe6060f1SDimitry Andric SPLICE, 1920b57cec5SDimitry Andric 1930b57cec5SDimitry Andric // Vector shift by scalar 1940b57cec5SDimitry Andric VSHL, 1950b57cec5SDimitry Andric VLSHR, 1960b57cec5SDimitry Andric VASHR, 1970b57cec5SDimitry Andric 1980b57cec5SDimitry Andric // Vector shift by scalar (again) 1990b57cec5SDimitry Andric SQSHL_I, 2000b57cec5SDimitry Andric UQSHL_I, 2010b57cec5SDimitry Andric SQSHLU_I, 2020b57cec5SDimitry Andric SRSHR_I, 2030b57cec5SDimitry Andric URSHR_I, 2040b57cec5SDimitry Andric 2055ffd83dbSDimitry Andric // Vector shift by constant and insert 2065ffd83dbSDimitry Andric VSLI, 2075ffd83dbSDimitry Andric VSRI, 2085ffd83dbSDimitry Andric 2090b57cec5SDimitry Andric // Vector comparisons 2100b57cec5SDimitry Andric CMEQ, 2110b57cec5SDimitry Andric CMGE, 2120b57cec5SDimitry Andric CMGT, 2130b57cec5SDimitry Andric CMHI, 2140b57cec5SDimitry Andric CMHS, 2150b57cec5SDimitry Andric FCMEQ, 2160b57cec5SDimitry Andric FCMGE, 2170b57cec5SDimitry Andric FCMGT, 2180b57cec5SDimitry Andric 2190b57cec5SDimitry Andric // Vector zero comparisons 2200b57cec5SDimitry Andric CMEQz, 2210b57cec5SDimitry Andric CMGEz, 2220b57cec5SDimitry Andric CMGTz, 2230b57cec5SDimitry Andric CMLEz, 2240b57cec5SDimitry Andric CMLTz, 2250b57cec5SDimitry Andric FCMEQz, 2260b57cec5SDimitry Andric FCMGEz, 2270b57cec5SDimitry Andric FCMGTz, 2280b57cec5SDimitry Andric FCMLEz, 2290b57cec5SDimitry Andric FCMLTz, 2300b57cec5SDimitry Andric 2310b57cec5SDimitry Andric // Vector across-lanes addition 2320b57cec5SDimitry Andric // Only the lower result lane is defined. 2330b57cec5SDimitry Andric SADDV, 2340b57cec5SDimitry Andric UADDV, 2350b57cec5SDimitry Andric 236*81ad6265SDimitry Andric // Add Pairwise of two vectors 237*81ad6265SDimitry Andric ADDP, 238*81ad6265SDimitry Andric // Add Long Pairwise 239*81ad6265SDimitry Andric SADDLP, 240fe6060f1SDimitry Andric UADDLP, 241fe6060f1SDimitry Andric 242fe6060f1SDimitry Andric // udot/sdot instructions 243fe6060f1SDimitry Andric UDOT, 244fe6060f1SDimitry Andric SDOT, 245e8d8bef9SDimitry Andric 2460b57cec5SDimitry Andric // Vector across-lanes min/max 2470b57cec5SDimitry Andric // Only the lower result lane is defined. 2480b57cec5SDimitry Andric SMINV, 2490b57cec5SDimitry Andric UMINV, 2500b57cec5SDimitry Andric SMAXV, 2510b57cec5SDimitry Andric UMAXV, 2520b57cec5SDimitry Andric 253e8d8bef9SDimitry Andric SADDV_PRED, 254e8d8bef9SDimitry Andric UADDV_PRED, 255480093f4SDimitry Andric SMAXV_PRED, 256480093f4SDimitry Andric UMAXV_PRED, 257480093f4SDimitry Andric SMINV_PRED, 258480093f4SDimitry Andric UMINV_PRED, 259480093f4SDimitry Andric ORV_PRED, 260480093f4SDimitry Andric EORV_PRED, 261480093f4SDimitry Andric ANDV_PRED, 262480093f4SDimitry Andric 2635ffd83dbSDimitry Andric // Vector bitwise insertion 2640b57cec5SDimitry Andric BIT, 2650b57cec5SDimitry Andric 2660b57cec5SDimitry Andric // Compare-and-branch 2670b57cec5SDimitry Andric CBZ, 2680b57cec5SDimitry Andric CBNZ, 2690b57cec5SDimitry Andric TBZ, 2700b57cec5SDimitry Andric TBNZ, 2710b57cec5SDimitry Andric 2720b57cec5SDimitry Andric // Tail calls 2730b57cec5SDimitry Andric TC_RETURN, 2740b57cec5SDimitry Andric 2750b57cec5SDimitry Andric // Custom prefetch handling 2760b57cec5SDimitry Andric PREFETCH, 2770b57cec5SDimitry Andric 2780b57cec5SDimitry Andric // {s|u}int to FP within a FP register. 2790b57cec5SDimitry Andric SITOF, 2800b57cec5SDimitry Andric UITOF, 2810b57cec5SDimitry Andric 2820b57cec5SDimitry Andric /// Natural vector cast. ISD::BITCAST is not natural in the big-endian 2830b57cec5SDimitry Andric /// world w.r.t vectors; which causes additional REV instructions to be 2840b57cec5SDimitry Andric /// generated to compensate for the byte-swapping. But sometimes we do 2850b57cec5SDimitry Andric /// need to re-interpret the data in SIMD vector registers in big-endian 2860b57cec5SDimitry Andric /// mode without emitting such REV instructions. 2870b57cec5SDimitry Andric NVCAST, 2880b57cec5SDimitry Andric 289fe6060f1SDimitry Andric MRS, // MRS, also sets the flags via a glue. 290fe6060f1SDimitry Andric 2910b57cec5SDimitry Andric SMULL, 2920b57cec5SDimitry Andric UMULL, 2930b57cec5SDimitry Andric 2940b57cec5SDimitry Andric // Reciprocal estimates and steps. 2955ffd83dbSDimitry Andric FRECPE, 2965ffd83dbSDimitry Andric FRECPS, 2975ffd83dbSDimitry Andric FRSQRTE, 2985ffd83dbSDimitry Andric FRSQRTS, 2990b57cec5SDimitry Andric 3008bcb0991SDimitry Andric SUNPKHI, 3018bcb0991SDimitry Andric SUNPKLO, 3028bcb0991SDimitry Andric UUNPKHI, 3038bcb0991SDimitry Andric UUNPKLO, 3048bcb0991SDimitry Andric 305480093f4SDimitry Andric CLASTA_N, 306480093f4SDimitry Andric CLASTB_N, 307480093f4SDimitry Andric LASTA, 308480093f4SDimitry Andric LASTB, 309480093f4SDimitry Andric TBL, 310480093f4SDimitry Andric 3115ffd83dbSDimitry Andric // Floating-point reductions. 3125ffd83dbSDimitry Andric FADDA_PRED, 3135ffd83dbSDimitry Andric FADDV_PRED, 3145ffd83dbSDimitry Andric FMAXV_PRED, 3155ffd83dbSDimitry Andric FMAXNMV_PRED, 3165ffd83dbSDimitry Andric FMINV_PRED, 3175ffd83dbSDimitry Andric FMINNMV_PRED, 3185ffd83dbSDimitry Andric 319480093f4SDimitry Andric INSR, 320480093f4SDimitry Andric PTEST, 321480093f4SDimitry Andric PTRUE, 322480093f4SDimitry Andric 323e8d8bef9SDimitry Andric BITREVERSE_MERGE_PASSTHRU, 324e8d8bef9SDimitry Andric BSWAP_MERGE_PASSTHRU, 3250eae32dcSDimitry Andric REVH_MERGE_PASSTHRU, 3260eae32dcSDimitry Andric REVW_MERGE_PASSTHRU, 327e8d8bef9SDimitry Andric CTLZ_MERGE_PASSTHRU, 328e8d8bef9SDimitry Andric CTPOP_MERGE_PASSTHRU, 3295ffd83dbSDimitry Andric DUP_MERGE_PASSTHRU, 3305ffd83dbSDimitry Andric INDEX_VECTOR, 3315ffd83dbSDimitry Andric 332e8d8bef9SDimitry Andric // Cast between vectors of the same element type but differ in length. 3335ffd83dbSDimitry Andric REINTERPRET_CAST, 3345ffd83dbSDimitry Andric 3356e75b2fbSDimitry Andric // Nodes to build an LD64B / ST64B 64-bit quantity out of i64, and vice versa 3366e75b2fbSDimitry Andric LS64_BUILD, 3376e75b2fbSDimitry Andric LS64_EXTRACT, 3386e75b2fbSDimitry Andric 3395ffd83dbSDimitry Andric LD1_MERGE_ZERO, 3405ffd83dbSDimitry Andric LD1S_MERGE_ZERO, 3415ffd83dbSDimitry Andric LDNF1_MERGE_ZERO, 3425ffd83dbSDimitry Andric LDNF1S_MERGE_ZERO, 3435ffd83dbSDimitry Andric LDFF1_MERGE_ZERO, 3445ffd83dbSDimitry Andric LDFF1S_MERGE_ZERO, 3455ffd83dbSDimitry Andric LD1RQ_MERGE_ZERO, 3465ffd83dbSDimitry Andric LD1RO_MERGE_ZERO, 3475ffd83dbSDimitry Andric 3485ffd83dbSDimitry Andric // Structured loads. 3495ffd83dbSDimitry Andric SVE_LD2_MERGE_ZERO, 3505ffd83dbSDimitry Andric SVE_LD3_MERGE_ZERO, 3515ffd83dbSDimitry Andric SVE_LD4_MERGE_ZERO, 3525ffd83dbSDimitry Andric 353480093f4SDimitry Andric // Unsigned gather loads. 3545ffd83dbSDimitry Andric GLD1_MERGE_ZERO, 3555ffd83dbSDimitry Andric GLD1_SCALED_MERGE_ZERO, 3565ffd83dbSDimitry Andric GLD1_UXTW_MERGE_ZERO, 3575ffd83dbSDimitry Andric GLD1_SXTW_MERGE_ZERO, 3585ffd83dbSDimitry Andric GLD1_UXTW_SCALED_MERGE_ZERO, 3595ffd83dbSDimitry Andric GLD1_SXTW_SCALED_MERGE_ZERO, 3605ffd83dbSDimitry Andric GLD1_IMM_MERGE_ZERO, 361480093f4SDimitry Andric 362480093f4SDimitry Andric // Signed gather loads 3635ffd83dbSDimitry Andric GLD1S_MERGE_ZERO, 3645ffd83dbSDimitry Andric GLD1S_SCALED_MERGE_ZERO, 3655ffd83dbSDimitry Andric GLD1S_UXTW_MERGE_ZERO, 3665ffd83dbSDimitry Andric GLD1S_SXTW_MERGE_ZERO, 3675ffd83dbSDimitry Andric GLD1S_UXTW_SCALED_MERGE_ZERO, 3685ffd83dbSDimitry Andric GLD1S_SXTW_SCALED_MERGE_ZERO, 3695ffd83dbSDimitry Andric GLD1S_IMM_MERGE_ZERO, 3705ffd83dbSDimitry Andric 3715ffd83dbSDimitry Andric // Unsigned gather loads. 3725ffd83dbSDimitry Andric GLDFF1_MERGE_ZERO, 3735ffd83dbSDimitry Andric GLDFF1_SCALED_MERGE_ZERO, 3745ffd83dbSDimitry Andric GLDFF1_UXTW_MERGE_ZERO, 3755ffd83dbSDimitry Andric GLDFF1_SXTW_MERGE_ZERO, 3765ffd83dbSDimitry Andric GLDFF1_UXTW_SCALED_MERGE_ZERO, 3775ffd83dbSDimitry Andric GLDFF1_SXTW_SCALED_MERGE_ZERO, 3785ffd83dbSDimitry Andric GLDFF1_IMM_MERGE_ZERO, 3795ffd83dbSDimitry Andric 3805ffd83dbSDimitry Andric // Signed gather loads. 3815ffd83dbSDimitry Andric GLDFF1S_MERGE_ZERO, 3825ffd83dbSDimitry Andric GLDFF1S_SCALED_MERGE_ZERO, 3835ffd83dbSDimitry Andric GLDFF1S_UXTW_MERGE_ZERO, 3845ffd83dbSDimitry Andric GLDFF1S_SXTW_MERGE_ZERO, 3855ffd83dbSDimitry Andric GLDFF1S_UXTW_SCALED_MERGE_ZERO, 3865ffd83dbSDimitry Andric GLDFF1S_SXTW_SCALED_MERGE_ZERO, 3875ffd83dbSDimitry Andric GLDFF1S_IMM_MERGE_ZERO, 3885ffd83dbSDimitry Andric 3895ffd83dbSDimitry Andric // Non-temporal gather loads 3905ffd83dbSDimitry Andric GLDNT1_MERGE_ZERO, 3915ffd83dbSDimitry Andric GLDNT1_INDEX_MERGE_ZERO, 3925ffd83dbSDimitry Andric GLDNT1S_MERGE_ZERO, 3935ffd83dbSDimitry Andric 3945ffd83dbSDimitry Andric // Contiguous masked store. 3955ffd83dbSDimitry Andric ST1_PRED, 3965ffd83dbSDimitry Andric 397480093f4SDimitry Andric // Scatter store 3985ffd83dbSDimitry Andric SST1_PRED, 3995ffd83dbSDimitry Andric SST1_SCALED_PRED, 4005ffd83dbSDimitry Andric SST1_UXTW_PRED, 4015ffd83dbSDimitry Andric SST1_SXTW_PRED, 4025ffd83dbSDimitry Andric SST1_UXTW_SCALED_PRED, 4035ffd83dbSDimitry Andric SST1_SXTW_SCALED_PRED, 4045ffd83dbSDimitry Andric SST1_IMM_PRED, 4055ffd83dbSDimitry Andric 4065ffd83dbSDimitry Andric // Non-temporal scatter store 4075ffd83dbSDimitry Andric SSTNT1_PRED, 4085ffd83dbSDimitry Andric SSTNT1_INDEX_PRED, 409480093f4SDimitry Andric 410*81ad6265SDimitry Andric // SME 411*81ad6265SDimitry Andric RDSVL, 412*81ad6265SDimitry Andric REVD_MERGE_PASSTHRU, 413*81ad6265SDimitry Andric 414349cc55cSDimitry Andric // Asserts that a function argument (i32) is zero-extended to i8 by 415349cc55cSDimitry Andric // the caller 416349cc55cSDimitry Andric ASSERT_ZEXT_BOOL, 417349cc55cSDimitry Andric 41847395794SDimitry Andric // Strict (exception-raising) floating point comparison 41947395794SDimitry Andric STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE, 42047395794SDimitry Andric STRICT_FCMPE, 42147395794SDimitry Andric 4220b57cec5SDimitry Andric // NEON Load/Store with post-increment base updates 4230b57cec5SDimitry Andric LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE, 4240b57cec5SDimitry Andric LD3post, 4250b57cec5SDimitry Andric LD4post, 4260b57cec5SDimitry Andric ST2post, 4270b57cec5SDimitry Andric ST3post, 4280b57cec5SDimitry Andric ST4post, 4290b57cec5SDimitry Andric LD1x2post, 4300b57cec5SDimitry Andric LD1x3post, 4310b57cec5SDimitry Andric LD1x4post, 4320b57cec5SDimitry Andric ST1x2post, 4330b57cec5SDimitry Andric ST1x3post, 4340b57cec5SDimitry Andric ST1x4post, 4350b57cec5SDimitry Andric LD1DUPpost, 4360b57cec5SDimitry Andric LD2DUPpost, 4370b57cec5SDimitry Andric LD3DUPpost, 4380b57cec5SDimitry Andric LD4DUPpost, 4390b57cec5SDimitry Andric LD1LANEpost, 4400b57cec5SDimitry Andric LD2LANEpost, 4410b57cec5SDimitry Andric LD3LANEpost, 4420b57cec5SDimitry Andric LD4LANEpost, 4430b57cec5SDimitry Andric ST2LANEpost, 4440b57cec5SDimitry Andric ST3LANEpost, 4450b57cec5SDimitry Andric ST4LANEpost, 4460b57cec5SDimitry Andric 4470b57cec5SDimitry Andric STG, 4480b57cec5SDimitry Andric STZG, 4490b57cec5SDimitry Andric ST2G, 450480093f4SDimitry Andric STZ2G, 4510b57cec5SDimitry Andric 452480093f4SDimitry Andric LDP, 4535ffd83dbSDimitry Andric STP, 454e8d8bef9SDimitry Andric STNP, 4551fd87a68SDimitry Andric 4561fd87a68SDimitry Andric // Memory Operations 4571fd87a68SDimitry Andric MOPS_MEMSET, 4581fd87a68SDimitry Andric MOPS_MEMSET_TAGGING, 4591fd87a68SDimitry Andric MOPS_MEMCOPY, 4601fd87a68SDimitry Andric MOPS_MEMMOVE, 4610b57cec5SDimitry Andric }; 4620b57cec5SDimitry Andric 4630b57cec5SDimitry Andric } // end namespace AArch64ISD 4640b57cec5SDimitry Andric 465fe6060f1SDimitry Andric namespace AArch64 { 466fe6060f1SDimitry Andric /// Possible values of current rounding mode, which is specified in bits 467fe6060f1SDimitry Andric /// 23:22 of FPCR. 468fe6060f1SDimitry Andric enum Rounding { 469fe6060f1SDimitry Andric RN = 0, // Round to Nearest 470fe6060f1SDimitry Andric RP = 1, // Round towards Plus infinity 471fe6060f1SDimitry Andric RM = 2, // Round towards Minus infinity 472fe6060f1SDimitry Andric RZ = 3, // Round towards Zero 473fe6060f1SDimitry Andric rmMask = 3 // Bit mask selecting rounding mode 474fe6060f1SDimitry Andric }; 475fe6060f1SDimitry Andric 476fe6060f1SDimitry Andric // Bit position of rounding mode bits in FPCR. 477fe6060f1SDimitry Andric const unsigned RoundingBitsPos = 22; 478fe6060f1SDimitry Andric } // namespace AArch64 479fe6060f1SDimitry Andric 4800b57cec5SDimitry Andric class AArch64Subtarget; 4810b57cec5SDimitry Andric 4820b57cec5SDimitry Andric class AArch64TargetLowering : public TargetLowering { 4830b57cec5SDimitry Andric public: 4840b57cec5SDimitry Andric explicit AArch64TargetLowering(const TargetMachine &TM, 4850b57cec5SDimitry Andric const AArch64Subtarget &STI); 4860b57cec5SDimitry Andric 487*81ad6265SDimitry Andric /// Control the following reassociation of operands: (op (op x, c1), y) -> (op 488*81ad6265SDimitry Andric /// (op x, y), c1) where N0 is (op x, c1) and N1 is y. 489*81ad6265SDimitry Andric bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, 490*81ad6265SDimitry Andric SDValue N1) const override; 491*81ad6265SDimitry Andric 4920b57cec5SDimitry Andric /// Selects the correct CCAssignFn for a given CallingConvention value. 4930b57cec5SDimitry Andric CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const; 4940b57cec5SDimitry Andric 4950b57cec5SDimitry Andric /// Selects the correct CCAssignFn for a given CallingConvention value. 4960b57cec5SDimitry Andric CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const; 4970b57cec5SDimitry Andric 4980b57cec5SDimitry Andric /// Determine which of the bits specified in Mask are known to be either zero 4990b57cec5SDimitry Andric /// or one and return them in the KnownZero/KnownOne bitsets. 5000b57cec5SDimitry Andric void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, 5010b57cec5SDimitry Andric const APInt &DemandedElts, 5020b57cec5SDimitry Andric const SelectionDAG &DAG, 5030b57cec5SDimitry Andric unsigned Depth = 0) const override; 5040b57cec5SDimitry Andric 5058bcb0991SDimitry Andric MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override { 5068bcb0991SDimitry Andric // Returning i64 unconditionally here (i.e. even for ILP32) means that the 5078bcb0991SDimitry Andric // *DAG* representation of pointers will always be 64-bits. They will be 5088bcb0991SDimitry Andric // truncated and extended when transferred to memory, but the 64-bit DAG 5098bcb0991SDimitry Andric // allows us to use AArch64's addressing modes much more easily. 5108bcb0991SDimitry Andric return MVT::getIntegerVT(64); 5118bcb0991SDimitry Andric } 5128bcb0991SDimitry Andric 5135ffd83dbSDimitry Andric bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 5145ffd83dbSDimitry Andric const APInt &DemandedElts, 5150b57cec5SDimitry Andric TargetLoweringOpt &TLO) const override; 5160b57cec5SDimitry Andric 5170b57cec5SDimitry Andric MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override; 5180b57cec5SDimitry Andric 5190b57cec5SDimitry Andric /// Returns true if the target allows unaligned memory accesses of the 5200b57cec5SDimitry Andric /// specified type. 5210b57cec5SDimitry Andric bool allowsMisalignedMemoryAccesses( 522fe6060f1SDimitry Andric EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1), 5230b57cec5SDimitry Andric MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 5240b57cec5SDimitry Andric bool *Fast = nullptr) const override; 5258bcb0991SDimitry Andric /// LLT variant. 5265ffd83dbSDimitry Andric bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace, 5275ffd83dbSDimitry Andric Align Alignment, 5285ffd83dbSDimitry Andric MachineMemOperand::Flags Flags, 5298bcb0991SDimitry Andric bool *Fast = nullptr) const override; 5300b57cec5SDimitry Andric 5310b57cec5SDimitry Andric /// Provide custom lowering hooks for some operations. 5320b57cec5SDimitry Andric SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 5330b57cec5SDimitry Andric 5340b57cec5SDimitry Andric const char *getTargetNodeName(unsigned Opcode) const override; 5350b57cec5SDimitry Andric 5360b57cec5SDimitry Andric SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 5370b57cec5SDimitry Andric 5380b57cec5SDimitry Andric /// This method returns a target specific FastISel object, or null if the 5390b57cec5SDimitry Andric /// target does not support "fast" ISel. 5400b57cec5SDimitry Andric FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 5410b57cec5SDimitry Andric const TargetLibraryInfo *libInfo) const override; 5420b57cec5SDimitry Andric 5430b57cec5SDimitry Andric bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 5440b57cec5SDimitry Andric 5450b57cec5SDimitry Andric bool isFPImmLegal(const APFloat &Imm, EVT VT, 5460b57cec5SDimitry Andric bool ForCodeSize) const override; 5470b57cec5SDimitry Andric 5480b57cec5SDimitry Andric /// Return true if the given shuffle mask can be codegen'd directly, or if it 5490b57cec5SDimitry Andric /// should be stack expanded. 5500b57cec5SDimitry Andric bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override; 5510b57cec5SDimitry Andric 5520b57cec5SDimitry Andric /// Return the ISD::SETCC ValueType. 5530b57cec5SDimitry Andric EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 5540b57cec5SDimitry Andric EVT VT) const override; 5550b57cec5SDimitry Andric 5560b57cec5SDimitry Andric SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 5570b57cec5SDimitry Andric 5580b57cec5SDimitry Andric MachineBasicBlock *EmitF128CSEL(MachineInstr &MI, 5590b57cec5SDimitry Andric MachineBasicBlock *BB) const; 5600b57cec5SDimitry Andric 5610b57cec5SDimitry Andric MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI, 5620b57cec5SDimitry Andric MachineBasicBlock *BB) const; 5630b57cec5SDimitry Andric 564*81ad6265SDimitry Andric MachineBasicBlock *EmitTileLoad(unsigned Opc, unsigned BaseReg, 565*81ad6265SDimitry Andric MachineInstr &MI, 566*81ad6265SDimitry Andric MachineBasicBlock *BB) const; 567*81ad6265SDimitry Andric MachineBasicBlock *EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const; 568*81ad6265SDimitry Andric MachineBasicBlock *EmitMopa(unsigned Opc, unsigned BaseReg, MachineInstr &MI, 569*81ad6265SDimitry Andric MachineBasicBlock *BB) const; 570*81ad6265SDimitry Andric MachineBasicBlock *EmitInsertVectorToTile(unsigned Opc, unsigned BaseReg, 571*81ad6265SDimitry Andric MachineInstr &MI, 572*81ad6265SDimitry Andric MachineBasicBlock *BB) const; 573*81ad6265SDimitry Andric MachineBasicBlock *EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const; 574*81ad6265SDimitry Andric 5750b57cec5SDimitry Andric MachineBasicBlock * 5760b57cec5SDimitry Andric EmitInstrWithCustomInserter(MachineInstr &MI, 5770b57cec5SDimitry Andric MachineBasicBlock *MBB) const override; 5780b57cec5SDimitry Andric 5790b57cec5SDimitry Andric bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, 5800b57cec5SDimitry Andric MachineFunction &MF, 5810b57cec5SDimitry Andric unsigned Intrinsic) const override; 5820b57cec5SDimitry Andric 5830b57cec5SDimitry Andric bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, 5840b57cec5SDimitry Andric EVT NewVT) const override; 5850b57cec5SDimitry Andric 5860b57cec5SDimitry Andric bool isTruncateFree(Type *Ty1, Type *Ty2) const override; 5870b57cec5SDimitry Andric bool isTruncateFree(EVT VT1, EVT VT2) const override; 5880b57cec5SDimitry Andric 5890b57cec5SDimitry Andric bool isProfitableToHoist(Instruction *I) const override; 5900b57cec5SDimitry Andric 5910b57cec5SDimitry Andric bool isZExtFree(Type *Ty1, Type *Ty2) const override; 5920b57cec5SDimitry Andric bool isZExtFree(EVT VT1, EVT VT2) const override; 5930b57cec5SDimitry Andric bool isZExtFree(SDValue Val, EVT VT2) const override; 5940b57cec5SDimitry Andric 5950b57cec5SDimitry Andric bool shouldSinkOperands(Instruction *I, 5960b57cec5SDimitry Andric SmallVectorImpl<Use *> &Ops) const override; 5970b57cec5SDimitry Andric 5985ffd83dbSDimitry Andric bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override; 5990b57cec5SDimitry Andric 6000b57cec5SDimitry Andric unsigned getMaxSupportedInterleaveFactor() const override { return 4; } 6010b57cec5SDimitry Andric 6020b57cec5SDimitry Andric bool lowerInterleavedLoad(LoadInst *LI, 6030b57cec5SDimitry Andric ArrayRef<ShuffleVectorInst *> Shuffles, 6040b57cec5SDimitry Andric ArrayRef<unsigned> Indices, 6050b57cec5SDimitry Andric unsigned Factor) const override; 6060b57cec5SDimitry Andric bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, 6070b57cec5SDimitry Andric unsigned Factor) const override; 6080b57cec5SDimitry Andric 6090b57cec5SDimitry Andric bool isLegalAddImmediate(int64_t) const override; 6100b57cec5SDimitry Andric bool isLegalICmpImmediate(int64_t) const override; 6110b57cec5SDimitry Andric 612*81ad6265SDimitry Andric bool isMulAddWithConstProfitable(SDValue AddNode, 613*81ad6265SDimitry Andric SDValue ConstNode) const override; 614349cc55cSDimitry Andric 6150b57cec5SDimitry Andric bool shouldConsiderGEPOffsetSplit() const override; 6160b57cec5SDimitry Andric 6175ffd83dbSDimitry Andric EVT getOptimalMemOpType(const MemOp &Op, 6180b57cec5SDimitry Andric const AttributeList &FuncAttributes) const override; 6190b57cec5SDimitry Andric 6205ffd83dbSDimitry Andric LLT getOptimalMemOpLLT(const MemOp &Op, 6218bcb0991SDimitry Andric const AttributeList &FuncAttributes) const override; 6228bcb0991SDimitry Andric 6230b57cec5SDimitry Andric /// Return true if the addressing mode represented by AM is legal for this 6240b57cec5SDimitry Andric /// target, for a load/store of the specified type. 6250b57cec5SDimitry Andric bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, 6260b57cec5SDimitry Andric unsigned AS, 6270b57cec5SDimitry Andric Instruction *I = nullptr) const override; 6280b57cec5SDimitry Andric 6290b57cec5SDimitry Andric /// Return the cost of the scaling factor used in the addressing 6300b57cec5SDimitry Andric /// mode represented by AM for this target, for a load/store 6310b57cec5SDimitry Andric /// of the specified type. 6320b57cec5SDimitry Andric /// If the AM is supported, the return value must be >= 0. 6330b57cec5SDimitry Andric /// If the AM is not supported, it returns a negative value. 634fe6060f1SDimitry Andric InstructionCost getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, 635fe6060f1SDimitry Andric Type *Ty, unsigned AS) const override; 6360b57cec5SDimitry Andric 6370b57cec5SDimitry Andric /// Return true if an FMA operation is faster than a pair of fmul and fadd 6380b57cec5SDimitry Andric /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 6390b57cec5SDimitry Andric /// returns true, otherwise fmuladd is expanded to fmul + fadd. 640480093f4SDimitry Andric bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 641480093f4SDimitry Andric EVT VT) const override; 642480093f4SDimitry Andric bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override; 6430b57cec5SDimitry Andric 644fe6060f1SDimitry Andric bool generateFMAsInMachineCombiner(EVT VT, 645fe6060f1SDimitry Andric CodeGenOpt::Level OptLevel) const override; 646fe6060f1SDimitry Andric 6470b57cec5SDimitry Andric const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; 6480b57cec5SDimitry Andric 6490b57cec5SDimitry Andric /// Returns false if N is a bit extraction pattern of (X >> C) & Mask. 6500b57cec5SDimitry Andric bool isDesirableToCommuteWithShift(const SDNode *N, 6510b57cec5SDimitry Andric CombineLevel Level) const override; 6520b57cec5SDimitry Andric 653*81ad6265SDimitry Andric /// Return true if it is profitable to fold a pair of shifts into a mask. 654*81ad6265SDimitry Andric bool shouldFoldConstantShiftPairToMask(const SDNode *N, 655*81ad6265SDimitry Andric CombineLevel Level) const override; 656*81ad6265SDimitry Andric 6570b57cec5SDimitry Andric /// Returns true if it is beneficial to convert a load of a constant 6580b57cec5SDimitry Andric /// to just the constant itself. 6590b57cec5SDimitry Andric bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 6600b57cec5SDimitry Andric Type *Ty) const override; 6610b57cec5SDimitry Andric 6620b57cec5SDimitry Andric /// Return true if EXTRACT_SUBVECTOR is cheap for this result type 6630b57cec5SDimitry Andric /// with this index. 6640b57cec5SDimitry Andric bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 6650b57cec5SDimitry Andric unsigned Index) const override; 6660b57cec5SDimitry Andric 6675ffd83dbSDimitry Andric bool shouldFormOverflowOp(unsigned Opcode, EVT VT, 6685ffd83dbSDimitry Andric bool MathUsed) const override { 6695ffd83dbSDimitry Andric // Using overflow ops for overflow checks only should beneficial on 6705ffd83dbSDimitry Andric // AArch64. 6715ffd83dbSDimitry Andric return TargetLowering::shouldFormOverflowOp(Opcode, VT, true); 6725ffd83dbSDimitry Andric } 6735ffd83dbSDimitry Andric 674fe6060f1SDimitry Andric Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, 6750b57cec5SDimitry Andric AtomicOrdering Ord) const override; 676fe6060f1SDimitry Andric Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, 677fe6060f1SDimitry Andric AtomicOrdering Ord) const override; 6780b57cec5SDimitry Andric 679fe6060f1SDimitry Andric void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override; 6800b57cec5SDimitry Andric 681349cc55cSDimitry Andric bool isOpSuitableForLDPSTP(const Instruction *I) const; 682349cc55cSDimitry Andric bool shouldInsertFencesForAtomic(const Instruction *I) const override; 683349cc55cSDimitry Andric 6840b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 6850b57cec5SDimitry Andric shouldExpandAtomicLoadInIR(LoadInst *LI) const override; 686*81ad6265SDimitry Andric TargetLoweringBase::AtomicExpansionKind 687*81ad6265SDimitry Andric shouldExpandAtomicStoreInIR(StoreInst *SI) const override; 6880b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 6890b57cec5SDimitry Andric shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; 6900b57cec5SDimitry Andric 6910b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 6920b57cec5SDimitry Andric shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; 6930b57cec5SDimitry Andric 6940b57cec5SDimitry Andric bool useLoadStackGuardNode() const override; 6950b57cec5SDimitry Andric TargetLoweringBase::LegalizeTypeAction 6960b57cec5SDimitry Andric getPreferredVectorAction(MVT VT) const override; 6970b57cec5SDimitry Andric 6980b57cec5SDimitry Andric /// If the target has a standard location for the stack protector cookie, 6990b57cec5SDimitry Andric /// returns the address of that location. Otherwise, returns nullptr. 700fe6060f1SDimitry Andric Value *getIRStackGuard(IRBuilderBase &IRB) const override; 7010b57cec5SDimitry Andric 7020b57cec5SDimitry Andric void insertSSPDeclarations(Module &M) const override; 7030b57cec5SDimitry Andric Value *getSDagStackGuard(const Module &M) const override; 7040b57cec5SDimitry Andric Function *getSSPStackGuardCheck(const Module &M) const override; 7050b57cec5SDimitry Andric 7060b57cec5SDimitry Andric /// If the target has a standard location for the unsafe stack pointer, 7070b57cec5SDimitry Andric /// returns the address of that location. Otherwise, returns nullptr. 708fe6060f1SDimitry Andric Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const override; 7090b57cec5SDimitry Andric 7100b57cec5SDimitry Andric /// If a physical register, this returns the register that receives the 7110b57cec5SDimitry Andric /// exception address on entry to an EH pad. 7125ffd83dbSDimitry Andric Register 7130b57cec5SDimitry Andric getExceptionPointerRegister(const Constant *PersonalityFn) const override { 7140b57cec5SDimitry Andric // FIXME: This is a guess. Has this been defined yet? 7150b57cec5SDimitry Andric return AArch64::X0; 7160b57cec5SDimitry Andric } 7170b57cec5SDimitry Andric 7180b57cec5SDimitry Andric /// If a physical register, this returns the register that receives the 7190b57cec5SDimitry Andric /// exception typeid on entry to a landing pad. 7205ffd83dbSDimitry Andric Register 7210b57cec5SDimitry Andric getExceptionSelectorRegister(const Constant *PersonalityFn) const override { 7220b57cec5SDimitry Andric // FIXME: This is a guess. Has this been defined yet? 7230b57cec5SDimitry Andric return AArch64::X1; 7240b57cec5SDimitry Andric } 7250b57cec5SDimitry Andric 7260b57cec5SDimitry Andric bool isIntDivCheap(EVT VT, AttributeList Attr) const override; 7270b57cec5SDimitry Andric 7280b57cec5SDimitry Andric bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, 729349cc55cSDimitry Andric const MachineFunction &MF) const override { 7300b57cec5SDimitry Andric // Do not merge to float value size (128 bytes) if no implicit 7310b57cec5SDimitry Andric // float attribute is set. 7320b57cec5SDimitry Andric 733349cc55cSDimitry Andric bool NoFloat = MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat); 7340b57cec5SDimitry Andric 7350b57cec5SDimitry Andric if (NoFloat) 7360b57cec5SDimitry Andric return (MemVT.getSizeInBits() <= 64); 7370b57cec5SDimitry Andric return true; 7380b57cec5SDimitry Andric } 7390b57cec5SDimitry Andric 7400b57cec5SDimitry Andric bool isCheapToSpeculateCttz() const override { 7410b57cec5SDimitry Andric return true; 7420b57cec5SDimitry Andric } 7430b57cec5SDimitry Andric 7440b57cec5SDimitry Andric bool isCheapToSpeculateCtlz() const override { 7450b57cec5SDimitry Andric return true; 7460b57cec5SDimitry Andric } 7470b57cec5SDimitry Andric 7480b57cec5SDimitry Andric bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override; 7490b57cec5SDimitry Andric 7500b57cec5SDimitry Andric bool hasAndNotCompare(SDValue V) const override { 7510b57cec5SDimitry Andric // We can use bics for any scalar. 7520b57cec5SDimitry Andric return V.getValueType().isScalarInteger(); 7530b57cec5SDimitry Andric } 7540b57cec5SDimitry Andric 7550b57cec5SDimitry Andric bool hasAndNot(SDValue Y) const override { 7560b57cec5SDimitry Andric EVT VT = Y.getValueType(); 7570b57cec5SDimitry Andric 7580b57cec5SDimitry Andric if (!VT.isVector()) 7590b57cec5SDimitry Andric return hasAndNotCompare(Y); 7600b57cec5SDimitry Andric 761349cc55cSDimitry Andric TypeSize TS = VT.getSizeInBits(); 762349cc55cSDimitry Andric // TODO: We should be able to use bic/bif too for SVE. 763349cc55cSDimitry Andric return !TS.isScalable() && TS.getFixedValue() >= 64; // vector 'bic' 7640b57cec5SDimitry Andric } 7650b57cec5SDimitry Andric 7668bcb0991SDimitry Andric bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 7678bcb0991SDimitry Andric SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, 7688bcb0991SDimitry Andric unsigned OldShiftOpcode, unsigned NewShiftOpcode, 7698bcb0991SDimitry Andric SelectionDAG &DAG) const override; 7708bcb0991SDimitry Andric 7710b57cec5SDimitry Andric bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override; 7720b57cec5SDimitry Andric 7730b57cec5SDimitry Andric bool shouldTransformSignedTruncationCheck(EVT XVT, 7740b57cec5SDimitry Andric unsigned KeptBits) const override { 7750b57cec5SDimitry Andric // For vectors, we don't have a preference.. 7760b57cec5SDimitry Andric if (XVT.isVector()) 7770b57cec5SDimitry Andric return false; 7780b57cec5SDimitry Andric 7790b57cec5SDimitry Andric auto VTIsOk = [](EVT VT) -> bool { 7800b57cec5SDimitry Andric return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || 7810b57cec5SDimitry Andric VT == MVT::i64; 7820b57cec5SDimitry Andric }; 7830b57cec5SDimitry Andric 7840b57cec5SDimitry Andric // We are ok with KeptBitsVT being byte/word/dword, what SXT supports. 7850b57cec5SDimitry Andric // XVT will be larger than KeptBitsVT. 7860b57cec5SDimitry Andric MVT KeptBitsVT = MVT::getIntegerVT(KeptBits); 7870b57cec5SDimitry Andric return VTIsOk(XVT) && VTIsOk(KeptBitsVT); 7880b57cec5SDimitry Andric } 7890b57cec5SDimitry Andric 7900b57cec5SDimitry Andric bool preferIncOfAddToSubOfNot(EVT VT) const override; 7910b57cec5SDimitry Andric 7924824e7fdSDimitry Andric bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override; 7934824e7fdSDimitry Andric 7940b57cec5SDimitry Andric bool hasBitPreservingFPLogic(EVT VT) const override { 7950b57cec5SDimitry Andric // FIXME: Is this always true? It should be true for vectors at least. 7960b57cec5SDimitry Andric return VT == MVT::f32 || VT == MVT::f64; 7970b57cec5SDimitry Andric } 7980b57cec5SDimitry Andric 7990b57cec5SDimitry Andric bool supportSplitCSR(MachineFunction *MF) const override { 8000b57cec5SDimitry Andric return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 8010b57cec5SDimitry Andric MF->getFunction().hasFnAttribute(Attribute::NoUnwind); 8020b57cec5SDimitry Andric } 8030b57cec5SDimitry Andric void initializeSplitCSR(MachineBasicBlock *Entry) const override; 8040b57cec5SDimitry Andric void insertCopiesSplitCSR( 8050b57cec5SDimitry Andric MachineBasicBlock *Entry, 8060b57cec5SDimitry Andric const SmallVectorImpl<MachineBasicBlock *> &Exits) const override; 8070b57cec5SDimitry Andric 8080b57cec5SDimitry Andric bool supportSwiftError() const override { 8090b57cec5SDimitry Andric return true; 8100b57cec5SDimitry Andric } 8110b57cec5SDimitry Andric 8120b57cec5SDimitry Andric /// Enable aggressive FMA fusion on targets that want it. 8130b57cec5SDimitry Andric bool enableAggressiveFMAFusion(EVT VT) const override; 8140b57cec5SDimitry Andric 8150b57cec5SDimitry Andric /// Returns the size of the platform's va_list object. 8160b57cec5SDimitry Andric unsigned getVaListSizeInBits(const DataLayout &DL) const override; 8170b57cec5SDimitry Andric 8180b57cec5SDimitry Andric /// Returns true if \p VecTy is a legal interleaved access type. This 8190b57cec5SDimitry Andric /// function checks the vector element type and the overall width of the 8200b57cec5SDimitry Andric /// vector. 821349cc55cSDimitry Andric bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL, 822349cc55cSDimitry Andric bool &UseScalable) const; 8230b57cec5SDimitry Andric 8240b57cec5SDimitry Andric /// Returns the number of interleaved accesses that will be generated when 8250b57cec5SDimitry Andric /// lowering accesses of the given type. 826349cc55cSDimitry Andric unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL, 827349cc55cSDimitry Andric bool UseScalable) const; 8280b57cec5SDimitry Andric 8295ffd83dbSDimitry Andric MachineMemOperand::Flags getTargetMMOFlags( 8305ffd83dbSDimitry Andric const Instruction &I) const override; 8310b57cec5SDimitry Andric 832fe6060f1SDimitry Andric bool functionArgumentNeedsConsecutiveRegisters( 833fe6060f1SDimitry Andric Type *Ty, CallingConv::ID CallConv, bool isVarArg, 834fe6060f1SDimitry Andric const DataLayout &DL) const override; 835fe6060f1SDimitry Andric 8360b57cec5SDimitry Andric /// Used for exception handling on Win64. 8370b57cec5SDimitry Andric bool needsFixedCatchObjects() const override; 8385ffd83dbSDimitry Andric 8395ffd83dbSDimitry Andric bool fallBackToDAGISel(const Instruction &Inst) const override; 8405ffd83dbSDimitry Andric 8415ffd83dbSDimitry Andric /// SVE code generation for fixed length vectors does not custom lower 8425ffd83dbSDimitry Andric /// BUILD_VECTOR. This makes BUILD_VECTOR legalisation a source of stores to 8435ffd83dbSDimitry Andric /// merge. However, merging them creates a BUILD_VECTOR that is just as 8445ffd83dbSDimitry Andric /// illegal as the original, thus leading to an infinite legalisation loop. 8455ffd83dbSDimitry Andric /// NOTE: Once BUILD_VECTOR is legal or can be custom lowered for all legal 8465ffd83dbSDimitry Andric /// vector types this override can be removed. 847e8d8bef9SDimitry Andric bool mergeStoresAfterLegalization(EVT VT) const override; 8485ffd83dbSDimitry Andric 849fe6060f1SDimitry Andric // If the platform/function should have a redzone, return the size in bytes. 850fe6060f1SDimitry Andric unsigned getRedZoneSize(const Function &F) const { 851fe6060f1SDimitry Andric if (F.hasFnAttribute(Attribute::NoRedZone)) 852fe6060f1SDimitry Andric return 0; 853fe6060f1SDimitry Andric return 128; 854fe6060f1SDimitry Andric } 855fe6060f1SDimitry Andric 85604eeddc0SDimitry Andric bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const; 857fe6060f1SDimitry Andric EVT getPromotedVTForPredicate(EVT VT) const; 858fe6060f1SDimitry Andric 8596e75b2fbSDimitry Andric EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, 8606e75b2fbSDimitry Andric bool AllowUnknown = false) const override; 8616e75b2fbSDimitry Andric 8624824e7fdSDimitry Andric bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override; 8634824e7fdSDimitry Andric 8640b57cec5SDimitry Andric private: 8650b57cec5SDimitry Andric /// Keep a pointer to the AArch64Subtarget around so that we can 8660b57cec5SDimitry Andric /// make the right decision when generating code for different targets. 8670b57cec5SDimitry Andric const AArch64Subtarget *Subtarget; 8680b57cec5SDimitry Andric 8690b57cec5SDimitry Andric bool isExtFreeImpl(const Instruction *Ext) const override; 8700b57cec5SDimitry Andric 871fe6060f1SDimitry Andric void addTypeForNEON(MVT VT); 8725ffd83dbSDimitry Andric void addTypeForFixedLengthSVE(MVT VT); 8730b57cec5SDimitry Andric void addDRTypeForNEON(MVT VT); 8740b57cec5SDimitry Andric void addQRTypeForNEON(MVT VT); 8750b57cec5SDimitry Andric 8760b57cec5SDimitry Andric SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, 8770b57cec5SDimitry Andric bool isVarArg, 8780b57cec5SDimitry Andric const SmallVectorImpl<ISD::InputArg> &Ins, 8790b57cec5SDimitry Andric const SDLoc &DL, SelectionDAG &DAG, 8800b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals) const override; 8810b57cec5SDimitry Andric 8820b57cec5SDimitry Andric SDValue LowerCall(CallLoweringInfo & /*CLI*/, 8830b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals) const override; 8840b57cec5SDimitry Andric 8850b57cec5SDimitry Andric SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 8860b57cec5SDimitry Andric CallingConv::ID CallConv, bool isVarArg, 8870b57cec5SDimitry Andric const SmallVectorImpl<ISD::InputArg> &Ins, 8880b57cec5SDimitry Andric const SDLoc &DL, SelectionDAG &DAG, 8890b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals, bool isThisReturn, 8900b57cec5SDimitry Andric SDValue ThisVal) const; 8910b57cec5SDimitry Andric 892fe6060f1SDimitry Andric SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const; 8930b57cec5SDimitry Andric SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; 894349cc55cSDimitry Andric SDValue LowerStore128(SDValue Op, SelectionDAG &DAG) const; 895e8d8bef9SDimitry Andric SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const; 896e8d8bef9SDimitry Andric 897e8d8bef9SDimitry Andric SDValue LowerMGATHER(SDValue Op, SelectionDAG &DAG) const; 898e8d8bef9SDimitry Andric SDValue LowerMSCATTER(SDValue Op, SelectionDAG &DAG) const; 8990b57cec5SDimitry Andric 900fe6060f1SDimitry Andric SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) const; 901fe6060f1SDimitry Andric 9021fd87a68SDimitry Andric SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; 9030b57cec5SDimitry Andric SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 9040b57cec5SDimitry Andric 9053a9a9c0cSDimitry Andric bool 9063a9a9c0cSDimitry Andric isEligibleForTailCallOptimization(const CallLoweringInfo &CLI) const; 9070b57cec5SDimitry Andric 9080b57cec5SDimitry Andric /// Finds the incoming stack arguments which overlap the given fixed stack 9090b57cec5SDimitry Andric /// object and incorporates their load into the current chain. This prevents 9100b57cec5SDimitry Andric /// an upcoming store from clobbering the stack argument before it's used. 9110b57cec5SDimitry Andric SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG, 9120b57cec5SDimitry Andric MachineFrameInfo &MFI, int ClobberedFI) const; 9130b57cec5SDimitry Andric 9140b57cec5SDimitry Andric bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const; 9150b57cec5SDimitry Andric 9160b57cec5SDimitry Andric void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL, 9170b57cec5SDimitry Andric SDValue &Chain) const; 9180b57cec5SDimitry Andric 9190b57cec5SDimitry Andric bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 9200b57cec5SDimitry Andric bool isVarArg, 9210b57cec5SDimitry Andric const SmallVectorImpl<ISD::OutputArg> &Outs, 9220b57cec5SDimitry Andric LLVMContext &Context) const override; 9230b57cec5SDimitry Andric 9240b57cec5SDimitry Andric SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 9250b57cec5SDimitry Andric const SmallVectorImpl<ISD::OutputArg> &Outs, 9260b57cec5SDimitry Andric const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 9270b57cec5SDimitry Andric SelectionDAG &DAG) const override; 9280b57cec5SDimitry Andric 9290b57cec5SDimitry Andric SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG, 9300b57cec5SDimitry Andric unsigned Flag) const; 9310b57cec5SDimitry Andric SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG, 9320b57cec5SDimitry Andric unsigned Flag) const; 9330b57cec5SDimitry Andric SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG, 9340b57cec5SDimitry Andric unsigned Flag) const; 9350b57cec5SDimitry Andric SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG, 9360b57cec5SDimitry Andric unsigned Flag) const; 9370b57cec5SDimitry Andric template <class NodeTy> 9380b57cec5SDimitry Andric SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; 9390b57cec5SDimitry Andric template <class NodeTy> 9400b57cec5SDimitry Andric SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; 9410b57cec5SDimitry Andric template <class NodeTy> 9420b57cec5SDimitry Andric SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; 9430b57cec5SDimitry Andric template <class NodeTy> 9440b57cec5SDimitry Andric SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; 9450b57cec5SDimitry Andric SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 9460b57cec5SDimitry Andric SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 9470b57cec5SDimitry Andric SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 9480b57cec5SDimitry Andric SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 9490b57cec5SDimitry Andric SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 950480093f4SDimitry Andric SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase, 951480093f4SDimitry Andric const SDLoc &DL, SelectionDAG &DAG) const; 9520b57cec5SDimitry Andric SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL, 9530b57cec5SDimitry Andric SelectionDAG &DAG) const; 9540b57cec5SDimitry Andric SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 9550b57cec5SDimitry Andric SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 9560b57cec5SDimitry Andric SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 9570b57cec5SDimitry Andric SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 9580b57cec5SDimitry Andric SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 9590b57cec5SDimitry Andric SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS, 9600b57cec5SDimitry Andric SDValue TVal, SDValue FVal, const SDLoc &dl, 9610b57cec5SDimitry Andric SelectionDAG &DAG) const; 9620b57cec5SDimitry Andric SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 9630b57cec5SDimitry Andric SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 9640b57cec5SDimitry Andric SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 9650b57cec5SDimitry Andric SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 9660b57cec5SDimitry Andric SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const; 9670b57cec5SDimitry Andric SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const; 9680b57cec5SDimitry Andric SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const; 9690b57cec5SDimitry Andric SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; 9700b57cec5SDimitry Andric SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; 9710b57cec5SDimitry Andric SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; 9720b57cec5SDimitry Andric SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 9730b57cec5SDimitry Andric SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const; 9740b57cec5SDimitry Andric SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 9750b57cec5SDimitry Andric SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 976fe6060f1SDimitry Andric SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; 9770b57cec5SDimitry Andric SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 9780b57cec5SDimitry Andric SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 9790b57cec5SDimitry Andric SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 9800b57cec5SDimitry Andric SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 9810b57cec5SDimitry Andric SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 9828bcb0991SDimitry Andric SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const; 9835ffd83dbSDimitry Andric SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const; 984*81ad6265SDimitry Andric SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG, 985*81ad6265SDimitry Andric unsigned NewOp) const; 986e8d8bef9SDimitry Andric SDValue LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const; 987fe6060f1SDimitry Andric SDValue LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const; 9880b57cec5SDimitry Andric SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 9895ffd83dbSDimitry Andric SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 990e8d8bef9SDimitry Andric SDValue LowerDIV(SDValue Op, SelectionDAG &DAG) const; 991e8d8bef9SDimitry Andric SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; 9920b57cec5SDimitry Andric SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const; 993fe6060f1SDimitry Andric SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const; 9940b57cec5SDimitry Andric SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const; 9950b57cec5SDimitry Andric SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const; 996e8d8bef9SDimitry Andric SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const; 997fe6060f1SDimitry Andric SDValue LowerBitreverse(SDValue Op, SelectionDAG &DAG) const; 998349cc55cSDimitry Andric SDValue LowerMinMax(SDValue Op, SelectionDAG &DAG) const; 9990b57cec5SDimitry Andric SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 10000b57cec5SDimitry Andric SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; 10010b57cec5SDimitry Andric SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; 10020b57cec5SDimitry Andric SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 1003349cc55cSDimitry Andric SDValue LowerVectorFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const; 10040b57cec5SDimitry Andric SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 1005fe6060f1SDimitry Andric SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const; 10060b57cec5SDimitry Andric SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 1007e8d8bef9SDimitry Andric SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 10080b57cec5SDimitry Andric SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const; 1009e8d8bef9SDimitry Andric SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const; 10100b57cec5SDimitry Andric SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; 10110b57cec5SDimitry Andric SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; 1012fe6060f1SDimitry Andric SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const; 10135ffd83dbSDimitry Andric SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const; 10145ffd83dbSDimitry Andric SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const; 10150b57cec5SDimitry Andric SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const; 10160b57cec5SDimitry Andric SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const; 10170b57cec5SDimitry Andric SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const; 10180b57cec5SDimitry Andric SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 10190b57cec5SDimitry Andric SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain, 10200b57cec5SDimitry Andric SDValue &Size, 10210b57cec5SDimitry Andric SelectionDAG &DAG) const; 10225ffd83dbSDimitry Andric SDValue LowerSVEStructLoad(unsigned Intrinsic, ArrayRef<SDValue> LoadOps, 10235ffd83dbSDimitry Andric EVT VT, SelectionDAG &DAG, const SDLoc &DL) const; 10245ffd83dbSDimitry Andric 1025e8d8bef9SDimitry Andric SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op, 1026e8d8bef9SDimitry Andric SelectionDAG &DAG) const; 1027e8d8bef9SDimitry Andric SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op, 1028e8d8bef9SDimitry Andric SelectionDAG &DAG) const; 10295ffd83dbSDimitry Andric SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const; 1030fe6060f1SDimitry Andric SDValue LowerFixedLengthVectorMLoadToSVE(SDValue Op, SelectionDAG &DAG) const; 1031e8d8bef9SDimitry Andric SDValue LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const; 1032e8d8bef9SDimitry Andric SDValue LowerPredReductionToSVE(SDValue ScalarOp, SelectionDAG &DAG) const; 1033e8d8bef9SDimitry Andric SDValue LowerReductionToSVE(unsigned Opcode, SDValue ScalarOp, 1034e8d8bef9SDimitry Andric SelectionDAG &DAG) const; 1035e8d8bef9SDimitry Andric SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const; 1036e8d8bef9SDimitry Andric SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const; 10375ffd83dbSDimitry Andric SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const; 1038fe6060f1SDimitry Andric SDValue LowerFixedLengthVectorMStoreToSVE(SDValue Op, 1039fe6060f1SDimitry Andric SelectionDAG &DAG) const; 10405ffd83dbSDimitry Andric SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op, 10415ffd83dbSDimitry Andric SelectionDAG &DAG) const; 1042fe6060f1SDimitry Andric SDValue LowerFixedLengthExtractVectorElt(SDValue Op, SelectionDAG &DAG) const; 1043fe6060f1SDimitry Andric SDValue LowerFixedLengthInsertVectorElt(SDValue Op, SelectionDAG &DAG) const; 1044fe6060f1SDimitry Andric SDValue LowerFixedLengthBitcastToSVE(SDValue Op, SelectionDAG &DAG) const; 1045fe6060f1SDimitry Andric SDValue LowerFixedLengthConcatVectorsToSVE(SDValue Op, 1046fe6060f1SDimitry Andric SelectionDAG &DAG) const; 1047fe6060f1SDimitry Andric SDValue LowerFixedLengthFPExtendToSVE(SDValue Op, SelectionDAG &DAG) const; 1048fe6060f1SDimitry Andric SDValue LowerFixedLengthFPRoundToSVE(SDValue Op, SelectionDAG &DAG) const; 1049fe6060f1SDimitry Andric SDValue LowerFixedLengthIntToFPToSVE(SDValue Op, SelectionDAG &DAG) const; 1050fe6060f1SDimitry Andric SDValue LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const; 1051fe6060f1SDimitry Andric SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op, 1052fe6060f1SDimitry Andric SelectionDAG &DAG) const; 10530b57cec5SDimitry Andric 10540b57cec5SDimitry Andric SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, 10550b57cec5SDimitry Andric SmallVectorImpl<SDNode *> &Created) const override; 1056*81ad6265SDimitry Andric SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, 1057*81ad6265SDimitry Andric SmallVectorImpl<SDNode *> &Created) const override; 10580b57cec5SDimitry Andric SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, 10590b57cec5SDimitry Andric int &ExtraSteps, bool &UseOneConst, 10600b57cec5SDimitry Andric bool Reciprocal) const override; 10610b57cec5SDimitry Andric SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, 10620b57cec5SDimitry Andric int &ExtraSteps) const override; 1063e8d8bef9SDimitry Andric SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, 1064e8d8bef9SDimitry Andric const DenormalMode &Mode) const override; 1065e8d8bef9SDimitry Andric SDValue getSqrtResultForDenormInput(SDValue Operand, 1066e8d8bef9SDimitry Andric SelectionDAG &DAG) const override; 10670b57cec5SDimitry Andric unsigned combineRepeatedFPDivisors() const override; 10680b57cec5SDimitry Andric 10690b57cec5SDimitry Andric ConstraintType getConstraintType(StringRef Constraint) const override; 1070480093f4SDimitry Andric Register getRegisterByName(const char* RegName, LLT VT, 10718bcb0991SDimitry Andric const MachineFunction &MF) const override; 10720b57cec5SDimitry Andric 10730b57cec5SDimitry Andric /// Examine constraint string and operand type and determine a weight value. 10740b57cec5SDimitry Andric /// The operand object must already have been set up with the operand type. 10750b57cec5SDimitry Andric ConstraintWeight 10760b57cec5SDimitry Andric getSingleConstraintMatchWeight(AsmOperandInfo &info, 10770b57cec5SDimitry Andric const char *constraint) const override; 10780b57cec5SDimitry Andric 10790b57cec5SDimitry Andric std::pair<unsigned, const TargetRegisterClass *> 10800b57cec5SDimitry Andric getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 10810b57cec5SDimitry Andric StringRef Constraint, MVT VT) const override; 10820b57cec5SDimitry Andric 10830b57cec5SDimitry Andric const char *LowerXConstraint(EVT ConstraintVT) const override; 10840b57cec5SDimitry Andric 10850b57cec5SDimitry Andric void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 10860b57cec5SDimitry Andric std::vector<SDValue> &Ops, 10870b57cec5SDimitry Andric SelectionDAG &DAG) const override; 10880b57cec5SDimitry Andric 10890b57cec5SDimitry Andric unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override { 10900b57cec5SDimitry Andric if (ConstraintCode == "Q") 10910b57cec5SDimitry Andric return InlineAsm::Constraint_Q; 10920b57cec5SDimitry Andric // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are 10930b57cec5SDimitry Andric // followed by llvm_unreachable so we'll leave them unimplemented in 10940b57cec5SDimitry Andric // the backend for now. 10950b57cec5SDimitry Andric return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 10960b57cec5SDimitry Andric } 10970b57cec5SDimitry Andric 1098fe6060f1SDimitry Andric bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const override; 1099*81ad6265SDimitry Andric bool shouldRemoveExtendFromGSIndex(EVT IndexVT, EVT DataVT) const override; 1100480093f4SDimitry Andric bool isVectorLoadExtDesirable(SDValue ExtVal) const override; 11010b57cec5SDimitry Andric bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; 11020b57cec5SDimitry Andric bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 11030b57cec5SDimitry Andric bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset, 11040b57cec5SDimitry Andric ISD::MemIndexedMode &AM, bool &IsInc, 11050b57cec5SDimitry Andric SelectionDAG &DAG) const; 11060b57cec5SDimitry Andric bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, 11070b57cec5SDimitry Andric ISD::MemIndexedMode &AM, 11080b57cec5SDimitry Andric SelectionDAG &DAG) const override; 11090b57cec5SDimitry Andric bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, 11100b57cec5SDimitry Andric SDValue &Offset, ISD::MemIndexedMode &AM, 11110b57cec5SDimitry Andric SelectionDAG &DAG) const override; 11120b57cec5SDimitry Andric 11130b57cec5SDimitry Andric void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, 11140b57cec5SDimitry Andric SelectionDAG &DAG) const override; 1115fe6060f1SDimitry Andric void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results, 1116fe6060f1SDimitry Andric SelectionDAG &DAG) const; 11175ffd83dbSDimitry Andric void ReplaceExtractSubVectorResults(SDNode *N, 11185ffd83dbSDimitry Andric SmallVectorImpl<SDValue> &Results, 11195ffd83dbSDimitry Andric SelectionDAG &DAG) const; 11200b57cec5SDimitry Andric 11210b57cec5SDimitry Andric bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override; 11220b57cec5SDimitry Andric 11230b57cec5SDimitry Andric void finalizeLowering(MachineFunction &MF) const override; 11245ffd83dbSDimitry Andric 11255ffd83dbSDimitry Andric bool shouldLocalize(const MachineInstr &MI, 11265ffd83dbSDimitry Andric const TargetTransformInfo *TTI) const override; 11275ffd83dbSDimitry Andric 1128fe6060f1SDimitry Andric bool SimplifyDemandedBitsForTargetNode(SDValue Op, 1129fe6060f1SDimitry Andric const APInt &OriginalDemandedBits, 1130fe6060f1SDimitry Andric const APInt &OriginalDemandedElts, 1131fe6060f1SDimitry Andric KnownBits &Known, 1132fe6060f1SDimitry Andric TargetLoweringOpt &TLO, 1133fe6060f1SDimitry Andric unsigned Depth) const override; 1134fe6060f1SDimitry Andric 1135*81ad6265SDimitry Andric bool isTargetCanonicalConstantNode(SDValue Op) const override; 1136*81ad6265SDimitry Andric 1137e8d8bef9SDimitry Andric // Normally SVE is only used for byte size vectors that do not fit within a 1138e8d8bef9SDimitry Andric // NEON vector. This changes when OverrideNEON is true, allowing SVE to be 1139e8d8bef9SDimitry Andric // used for 64bit and 128bit vectors as well. 1140e8d8bef9SDimitry Andric bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const; 1141e8d8bef9SDimitry Andric 1142e8d8bef9SDimitry Andric // With the exception of data-predicate transitions, no instructions are 1143e8d8bef9SDimitry Andric // required to cast between legal scalable vector types. However: 1144e8d8bef9SDimitry Andric // 1. Packed and unpacked types have different bit lengths, meaning BITCAST 1145e8d8bef9SDimitry Andric // is not universally useable. 1146e8d8bef9SDimitry Andric // 2. Most unpacked integer types are not legal and thus integer extends 1147e8d8bef9SDimitry Andric // cannot be used to convert between unpacked and packed types. 1148e8d8bef9SDimitry Andric // These can make "bitcasting" a multiphase process. REINTERPRET_CAST is used 1149e8d8bef9SDimitry Andric // to transition between unpacked and packed types of the same element type, 1150e8d8bef9SDimitry Andric // with BITCAST used otherwise. 1151e8d8bef9SDimitry Andric SDValue getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const; 1152fe6060f1SDimitry Andric 115304eeddc0SDimitry Andric bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1, 1154fe6060f1SDimitry Andric LLT Ty2) const override; 11550b57cec5SDimitry Andric }; 11560b57cec5SDimitry Andric 11570b57cec5SDimitry Andric namespace AArch64 { 11580b57cec5SDimitry Andric FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 11590b57cec5SDimitry Andric const TargetLibraryInfo *libInfo); 11600b57cec5SDimitry Andric } // end namespace AArch64 11610b57cec5SDimitry Andric 11620b57cec5SDimitry Andric } // end namespace llvm 11630b57cec5SDimitry Andric 11640b57cec5SDimitry Andric #endif 1165