10b57cec5SDimitry Andric //===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric // 90b57cec5SDimitry Andric // This file defines the interfaces that ARM uses to lower LLVM code into a 100b57cec5SDimitry Andric // selection DAG. 110b57cec5SDimitry Andric // 120b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 130b57cec5SDimitry Andric 140b57cec5SDimitry Andric #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 150b57cec5SDimitry Andric #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 160b57cec5SDimitry Andric 170b57cec5SDimitry Andric #include "MCTargetDesc/ARMBaseInfo.h" 180b57cec5SDimitry Andric #include "llvm/ADT/SmallVector.h" 190b57cec5SDimitry Andric #include "llvm/ADT/StringRef.h" 200b57cec5SDimitry Andric #include "llvm/CodeGen/CallingConvLower.h" 210b57cec5SDimitry Andric #include "llvm/CodeGen/ISDOpcodes.h" 220b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFunction.h" 230b57cec5SDimitry Andric #include "llvm/CodeGen/SelectionDAGNodes.h" 240b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h" 250b57cec5SDimitry Andric #include "llvm/CodeGen/ValueTypes.h" 26*0fca6ea1SDimitry Andric #include "llvm/CodeGenTypes/MachineValueType.h" 270b57cec5SDimitry Andric #include "llvm/IR/Attributes.h" 280b57cec5SDimitry Andric #include "llvm/IR/CallingConv.h" 290b57cec5SDimitry Andric #include "llvm/IR/Function.h" 300b57cec5SDimitry Andric #include "llvm/IR/InlineAsm.h" 310b57cec5SDimitry Andric #include "llvm/Support/CodeGen.h" 32bdd1243dSDimitry Andric #include <optional> 330b57cec5SDimitry Andric #include <utility> 340b57cec5SDimitry Andric 350b57cec5SDimitry Andric namespace llvm { 360b57cec5SDimitry Andric 370b57cec5SDimitry Andric class ARMSubtarget; 380b57cec5SDimitry Andric class DataLayout; 390b57cec5SDimitry Andric class FastISel; 400b57cec5SDimitry Andric class FunctionLoweringInfo; 410b57cec5SDimitry Andric class GlobalValue; 420b57cec5SDimitry Andric class InstrItineraryData; 430b57cec5SDimitry Andric class Instruction; 44*0fca6ea1SDimitry Andric class IRBuilderBase; 450b57cec5SDimitry Andric class MachineBasicBlock; 460b57cec5SDimitry Andric class MachineInstr; 470b57cec5SDimitry Andric class SelectionDAG; 480b57cec5SDimitry Andric class TargetLibraryInfo; 490b57cec5SDimitry Andric class TargetMachine; 500b57cec5SDimitry Andric class TargetRegisterInfo; 510b57cec5SDimitry Andric class VectorType; 520b57cec5SDimitry Andric 530b57cec5SDimitry Andric namespace ARMISD { 540b57cec5SDimitry Andric 550b57cec5SDimitry Andric // ARM Specific DAG Nodes 560b57cec5SDimitry Andric enum NodeType : unsigned { 570b57cec5SDimitry Andric // Start the numbering where the builtin ops and target ops leave off. 580b57cec5SDimitry Andric FIRST_NUMBER = ISD::BUILTIN_OP_END, 590b57cec5SDimitry Andric 600b57cec5SDimitry Andric Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 610b57cec5SDimitry Andric // TargetExternalSymbol, and TargetGlobalAddress. 620b57cec5SDimitry Andric WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 630b57cec5SDimitry Andric // PIC mode. 640b57cec5SDimitry Andric WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 650b57cec5SDimitry Andric 660b57cec5SDimitry Andric // Add pseudo op to model memcpy for struct byval. 670b57cec5SDimitry Andric COPY_STRUCT_BYVAL, 680b57cec5SDimitry Andric 690b57cec5SDimitry Andric CALL, // Function call. 700b57cec5SDimitry Andric CALL_PRED, // Function call that's predicable. 710b57cec5SDimitry Andric CALL_NOLINK, // Function call with branch not branch-and-link. 725ffd83dbSDimitry Andric tSECALL, // CMSE non-secure function call. 730eae32dcSDimitry Andric t2CALL_BTI, // Thumb function call followed by BTI instruction. 740b57cec5SDimitry Andric BRCOND, // Conditional branch. 750b57cec5SDimitry Andric BR_JT, // Jumptable branch. 760b57cec5SDimitry Andric BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 7706c3fb27SDimitry Andric RET_GLUE, // Return with a flag operand. 7806c3fb27SDimitry Andric SERET_GLUE, // CMSE Entry function return with a flag operand. 7906c3fb27SDimitry Andric INTRET_GLUE, // Interrupt return with an LR-offset and a flag operand. 800b57cec5SDimitry Andric 810b57cec5SDimitry Andric PIC_ADD, // Add with a PC operand and a PIC label. 820b57cec5SDimitry Andric 830b57cec5SDimitry Andric ASRL, // MVE long arithmetic shift right. 840b57cec5SDimitry Andric LSRL, // MVE long shift right. 850b57cec5SDimitry Andric LSLL, // MVE long shift left. 860b57cec5SDimitry Andric 870b57cec5SDimitry Andric CMP, // ARM compare instructions. 880b57cec5SDimitry Andric CMN, // ARM CMN instructions. 890b57cec5SDimitry Andric CMPZ, // ARM compare that sets only Z flag. 900b57cec5SDimitry Andric CMPFP, // ARM VFP compare instruction, sets FPSCR. 9147395794SDimitry Andric CMPFPE, // ARM VFP signalling compare instruction, sets FPSCR. 920b57cec5SDimitry Andric CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 93fe6060f1SDimitry Andric CMPFPEw0, // ARM VFP signalling compare against zero instruction, sets 94fe6060f1SDimitry Andric // FPSCR. 950b57cec5SDimitry Andric FMSTAT, // ARM fmstat instruction. 960b57cec5SDimitry Andric 970b57cec5SDimitry Andric CMOV, // ARM conditional move instructions. 980b57cec5SDimitry Andric 990b57cec5SDimitry Andric SSAT, // Signed saturation 1000b57cec5SDimitry Andric USAT, // Unsigned saturation 1010b57cec5SDimitry Andric 1020b57cec5SDimitry Andric BCC_i64, 1030b57cec5SDimitry Andric 10406c3fb27SDimitry Andric SRL_GLUE, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 10506c3fb27SDimitry Andric SRA_GLUE, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 1060b57cec5SDimitry Andric RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 1070b57cec5SDimitry Andric 1080b57cec5SDimitry Andric ADDC, // Add with carry 1090b57cec5SDimitry Andric ADDE, // Add using carry 1100b57cec5SDimitry Andric SUBC, // Sub with carry 1110b57cec5SDimitry Andric SUBE, // Sub using carry 1128bcb0991SDimitry Andric LSLS, // Shift left producing carry 1130b57cec5SDimitry Andric 1140b57cec5SDimitry Andric VMOVRRD, // double to two gprs. 1150b57cec5SDimitry Andric VMOVDRR, // Two gprs to double. 1160b57cec5SDimitry Andric VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr 1170b57cec5SDimitry Andric 1180b57cec5SDimitry Andric EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 1190b57cec5SDimitry Andric EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 1200b57cec5SDimitry Andric EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch. 1210b57cec5SDimitry Andric 1220b57cec5SDimitry Andric TC_RETURN, // Tail call return pseudo. 1230b57cec5SDimitry Andric 1240b57cec5SDimitry Andric THREAD_POINTER, 1250b57cec5SDimitry Andric 1260b57cec5SDimitry Andric DYN_ALLOC, // Dynamic allocation on the stack. 1270b57cec5SDimitry Andric 1280b57cec5SDimitry Andric MEMBARRIER_MCR, // Memory barrier (MCR) 1290b57cec5SDimitry Andric 1300b57cec5SDimitry Andric PRELOAD, // Preload 1310b57cec5SDimitry Andric 1320b57cec5SDimitry Andric WIN__CHKSTK, // Windows' __chkstk call to do stack probing. 1330b57cec5SDimitry Andric WIN__DBZCHK, // Windows' divide by zero check 1340b57cec5SDimitry Andric 135fe6060f1SDimitry Andric WLS, // Low-overhead loops, While Loop Start branch. See t2WhileLoopStart 136fe6060f1SDimitry Andric WLSSETUP, // Setup for the iteration count of a WLS. See t2WhileLoopSetup. 1378bcb0991SDimitry Andric LOOP_DEC, // Really a part of LE, performs the sub 1388bcb0991SDimitry Andric LE, // Low-overhead loops, Loop End 1390b57cec5SDimitry Andric 1408bcb0991SDimitry Andric PREDICATE_CAST, // Predicate cast for MVE i1 types 1415ffd83dbSDimitry Andric VECTOR_REG_CAST, // Reinterpret the current contents of a vector register 1428bcb0991SDimitry Andric 143fe6060f1SDimitry Andric MVESEXT, // Legalization aids for extending a vector into two/four vectors. 144fe6060f1SDimitry Andric MVEZEXT, // or truncating two/four vectors into one. Eventually becomes 145fe6060f1SDimitry Andric MVETRUNC, // stack store/load sequence, if not optimized to anything else. 146fe6060f1SDimitry Andric 1478bcb0991SDimitry Andric VCMP, // Vector compare. 1488bcb0991SDimitry Andric VCMPZ, // Vector compare to zero. 1490b57cec5SDimitry Andric VTST, // Vector test bits. 1500b57cec5SDimitry Andric 1510b57cec5SDimitry Andric // Vector shift by vector 1520b57cec5SDimitry Andric VSHLs, // ...left/right by signed 1530b57cec5SDimitry Andric VSHLu, // ...left/right by unsigned 1540b57cec5SDimitry Andric 1550b57cec5SDimitry Andric // Vector shift by immediate: 1560b57cec5SDimitry Andric VSHLIMM, // ...left 1570b57cec5SDimitry Andric VSHRsIMM, // ...right (signed) 1580b57cec5SDimitry Andric VSHRuIMM, // ...right (unsigned) 1590b57cec5SDimitry Andric 1600b57cec5SDimitry Andric // Vector rounding shift by immediate: 1610b57cec5SDimitry Andric VRSHRsIMM, // ...right (signed) 1620b57cec5SDimitry Andric VRSHRuIMM, // ...right (unsigned) 1630b57cec5SDimitry Andric VRSHRNIMM, // ...right narrow 1640b57cec5SDimitry Andric 1650b57cec5SDimitry Andric // Vector saturating shift by immediate: 1660b57cec5SDimitry Andric VQSHLsIMM, // ...left (signed) 1670b57cec5SDimitry Andric VQSHLuIMM, // ...left (unsigned) 1680b57cec5SDimitry Andric VQSHLsuIMM, // ...left (signed to unsigned) 1690b57cec5SDimitry Andric VQSHRNsIMM, // ...right narrow (signed) 1700b57cec5SDimitry Andric VQSHRNuIMM, // ...right narrow (unsigned) 1710b57cec5SDimitry Andric VQSHRNsuIMM, // ...right narrow (signed to unsigned) 1720b57cec5SDimitry Andric 1730b57cec5SDimitry Andric // Vector saturating rounding shift by immediate: 1740b57cec5SDimitry Andric VQRSHRNsIMM, // ...right narrow (signed) 1750b57cec5SDimitry Andric VQRSHRNuIMM, // ...right narrow (unsigned) 1760b57cec5SDimitry Andric VQRSHRNsuIMM, // ...right narrow (signed to unsigned) 1770b57cec5SDimitry Andric 1780b57cec5SDimitry Andric // Vector shift and insert: 1790b57cec5SDimitry Andric VSLIIMM, // ...left 1800b57cec5SDimitry Andric VSRIIMM, // ...right 1810b57cec5SDimitry Andric 1820b57cec5SDimitry Andric // Vector get lane (VMOV scalar to ARM core register) 1830b57cec5SDimitry Andric // (These are used for 8- and 16-bit element types only.) 1840b57cec5SDimitry Andric VGETLANEu, // zero-extend vector extract element 1850b57cec5SDimitry Andric VGETLANEs, // sign-extend vector extract element 1860b57cec5SDimitry Andric 1870b57cec5SDimitry Andric // Vector move immediate and move negated immediate: 1880b57cec5SDimitry Andric VMOVIMM, 1890b57cec5SDimitry Andric VMVNIMM, 1900b57cec5SDimitry Andric 1910b57cec5SDimitry Andric // Vector move f32 immediate: 1920b57cec5SDimitry Andric VMOVFPIMM, 1930b57cec5SDimitry Andric 1940b57cec5SDimitry Andric // Move H <-> R, clearing top 16 bits 1950b57cec5SDimitry Andric VMOVrh, 1960b57cec5SDimitry Andric VMOVhr, 1970b57cec5SDimitry Andric 1980b57cec5SDimitry Andric // Vector duplicate: 1990b57cec5SDimitry Andric VDUP, 2000b57cec5SDimitry Andric VDUPLANE, 2010b57cec5SDimitry Andric 2020b57cec5SDimitry Andric // Vector shuffles: 2030b57cec5SDimitry Andric VEXT, // extract 2040b57cec5SDimitry Andric VREV64, // reverse elements within 64-bit doublewords 2050b57cec5SDimitry Andric VREV32, // reverse elements within 32-bit words 2060b57cec5SDimitry Andric VREV16, // reverse elements within 16-bit halfwords 2070b57cec5SDimitry Andric VZIP, // zip (interleave) 2080b57cec5SDimitry Andric VUZP, // unzip (deinterleave) 2090b57cec5SDimitry Andric VTRN, // transpose 2100b57cec5SDimitry Andric VTBL1, // 1-register shuffle with mask 2110b57cec5SDimitry Andric VTBL2, // 2-register shuffle with mask 2128bcb0991SDimitry Andric VMOVN, // MVE vmovn 2130b57cec5SDimitry Andric 2145ffd83dbSDimitry Andric // MVE Saturating truncates 2155ffd83dbSDimitry Andric VQMOVNs, // Vector (V) Saturating (Q) Move and Narrow (N), signed (s) 2165ffd83dbSDimitry Andric VQMOVNu, // Vector (V) Saturating (Q) Move and Narrow (N), unsigned (u) 2175ffd83dbSDimitry Andric 2185ffd83dbSDimitry Andric // MVE float <> half converts 219fe6060f1SDimitry Andric VCVTN, // MVE vcvt f32 -> f16, truncating into either the bottom or top 220fe6060f1SDimitry Andric // lanes 2215ffd83dbSDimitry Andric VCVTL, // MVE vcvt f16 -> f32, extending from either the bottom or top lanes 2225ffd83dbSDimitry Andric 223fe6060f1SDimitry Andric // MVE VIDUP instruction, taking a start value and increment. 224fe6060f1SDimitry Andric VIDUP, 225fe6060f1SDimitry Andric 2260b57cec5SDimitry Andric // Vector multiply long: 2270b57cec5SDimitry Andric VMULLs, // ...signed 2280b57cec5SDimitry Andric VMULLu, // ...unsigned 2290b57cec5SDimitry Andric 230e8d8bef9SDimitry Andric VQDMULH, // MVE vqdmulh instruction 231e8d8bef9SDimitry Andric 2325ffd83dbSDimitry Andric // MVE reductions 2335ffd83dbSDimitry Andric VADDVs, // sign- or zero-extend the elements of a vector to i32, 2345ffd83dbSDimitry Andric VADDVu, // add them all together, and return an i32 of their sum 235e8d8bef9SDimitry Andric VADDVps, // Same as VADDV[su] but with a v4i1 predicate mask 236e8d8bef9SDimitry Andric VADDVpu, 2375ffd83dbSDimitry Andric VADDLVs, // sign- or zero-extend elements to i64 and sum, returning 2385ffd83dbSDimitry Andric VADDLVu, // the low and high 32-bit halves of the sum 239e8d8bef9SDimitry Andric VADDLVAs, // Same as VADDLV[su] but also add an input accumulator 2405ffd83dbSDimitry Andric VADDLVAu, // provided as low and high halves 241e8d8bef9SDimitry Andric VADDLVps, // Same as VADDLV[su] but with a v4i1 predicate mask 242e8d8bef9SDimitry Andric VADDLVpu, 243e8d8bef9SDimitry Andric VADDLVAps, // Same as VADDLVp[su] but with a v4i1 predicate mask 244e8d8bef9SDimitry Andric VADDLVApu, 245fe6060f1SDimitry Andric VMLAVs, // sign- or zero-extend the elements of two vectors to i32, multiply 246*0fca6ea1SDimitry Andric VMLAVu, // them and add the results together, returning an i32 of the sum 247e8d8bef9SDimitry Andric VMLAVps, // Same as VMLAV[su] with a v4i1 predicate mask 248e8d8bef9SDimitry Andric VMLAVpu, 249e8d8bef9SDimitry Andric VMLALVs, // Same as VMLAV but with i64, returning the low and 250e8d8bef9SDimitry Andric VMLALVu, // high 32-bit halves of the sum 251e8d8bef9SDimitry Andric VMLALVps, // Same as VMLALV[su] with a v4i1 predicate mask 252e8d8bef9SDimitry Andric VMLALVpu, 253e8d8bef9SDimitry Andric VMLALVAs, // Same as VMLALV but also add an input accumulator 254e8d8bef9SDimitry Andric VMLALVAu, // provided as low and high halves 255e8d8bef9SDimitry Andric VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask 256e8d8bef9SDimitry Andric VMLALVApu, 257e8d8bef9SDimitry Andric VMINVu, // Find minimum unsigned value of a vector and register 258e8d8bef9SDimitry Andric VMINVs, // Find minimum signed value of a vector and register 259e8d8bef9SDimitry Andric VMAXVu, // Find maximum unsigned value of a vector and register 260e8d8bef9SDimitry Andric VMAXVs, // Find maximum signed value of a vector and register 2615ffd83dbSDimitry Andric 2620b57cec5SDimitry Andric SMULWB, // Signed multiply word by half word, bottom 2630b57cec5SDimitry Andric SMULWT, // Signed multiply word by half word, top 2640b57cec5SDimitry Andric UMLAL, // 64bit Unsigned Accumulate Multiply 2650b57cec5SDimitry Andric SMLAL, // 64bit Signed Accumulate Multiply 2660b57cec5SDimitry Andric UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply 2670b57cec5SDimitry Andric SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16 2680b57cec5SDimitry Andric SMLALBT, // 64-bit signed accumulate multiply bottom, top 16 2690b57cec5SDimitry Andric SMLALTB, // 64-bit signed accumulate multiply top, bottom 16 2700b57cec5SDimitry Andric SMLALTT, // 64-bit signed accumulate multiply top, top 16 2710b57cec5SDimitry Andric SMLALD, // Signed multiply accumulate long dual 2720b57cec5SDimitry Andric SMLALDX, // Signed multiply accumulate long dual exchange 2730b57cec5SDimitry Andric SMLSLD, // Signed multiply subtract long dual 2740b57cec5SDimitry Andric SMLSLDX, // Signed multiply subtract long dual exchange 2750b57cec5SDimitry Andric SMMLAR, // Signed multiply long, round and add 2760b57cec5SDimitry Andric SMMLSR, // Signed multiply long, subtract and round 2770b57cec5SDimitry Andric 278fe6060f1SDimitry Andric // Single Lane QADD8 and QADD16. Only the bottom lane. That's what the b 279fe6060f1SDimitry Andric // stands for. 2808bcb0991SDimitry Andric QADD8b, 2818bcb0991SDimitry Andric QSUB8b, 2828bcb0991SDimitry Andric QADD16b, 2838bcb0991SDimitry Andric QSUB16b, 284fe6060f1SDimitry Andric UQADD8b, 285fe6060f1SDimitry Andric UQSUB8b, 286fe6060f1SDimitry Andric UQADD16b, 287fe6060f1SDimitry Andric UQSUB16b, 2888bcb0991SDimitry Andric 2890b57cec5SDimitry Andric // Operands of the standard BUILD_VECTOR node are not legalized, which 2900b57cec5SDimitry Andric // is fine if BUILD_VECTORs are always lowered to shuffles or other 2910b57cec5SDimitry Andric // operations, but for ARM some BUILD_VECTORs are legal as-is and their 2920b57cec5SDimitry Andric // operands need to be legalized. Define an ARM-specific version of 2930b57cec5SDimitry Andric // BUILD_VECTOR for this purpose. 2940b57cec5SDimitry Andric BUILD_VECTOR, 2950b57cec5SDimitry Andric 2960b57cec5SDimitry Andric // Bit-field insert 2970b57cec5SDimitry Andric BFI, 2980b57cec5SDimitry Andric 2990b57cec5SDimitry Andric // Vector OR with immediate 3000b57cec5SDimitry Andric VORRIMM, 3010b57cec5SDimitry Andric // Vector AND with NOT of immediate 3020b57cec5SDimitry Andric VBICIMM, 3030b57cec5SDimitry Andric 304e8d8bef9SDimitry Andric // Pseudo vector bitwise select 305e8d8bef9SDimitry Andric VBSP, 3060b57cec5SDimitry Andric 3070b57cec5SDimitry Andric // Pseudo-instruction representing a memory copy using ldm/stm 3080b57cec5SDimitry Andric // instructions. 3090b57cec5SDimitry Andric MEMCPY, 3100b57cec5SDimitry Andric 311fe6060f1SDimitry Andric // Pseudo-instruction representing a memory copy using a tail predicated 312fe6060f1SDimitry Andric // loop 313fe6060f1SDimitry Andric MEMCPYLOOP, 314fe6060f1SDimitry Andric // Pseudo-instruction representing a memset using a tail predicated 315fe6060f1SDimitry Andric // loop 316fe6060f1SDimitry Andric MEMSETLOOP, 317fe6060f1SDimitry Andric 3188bcb0991SDimitry Andric // V8.1MMainline condition select 3198bcb0991SDimitry Andric CSINV, // Conditional select invert. 3208bcb0991SDimitry Andric CSNEG, // Conditional select negate. 3218bcb0991SDimitry Andric CSINC, // Conditional select increment. 3228bcb0991SDimitry Andric 3230b57cec5SDimitry Andric // Vector load N-element structure to all lanes: 3240b57cec5SDimitry Andric VLD1DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 3250b57cec5SDimitry Andric VLD2DUP, 3260b57cec5SDimitry Andric VLD3DUP, 3270b57cec5SDimitry Andric VLD4DUP, 3280b57cec5SDimitry Andric 3290b57cec5SDimitry Andric // NEON loads with post-increment base updates: 3300b57cec5SDimitry Andric VLD1_UPD, 3310b57cec5SDimitry Andric VLD2_UPD, 3320b57cec5SDimitry Andric VLD3_UPD, 3330b57cec5SDimitry Andric VLD4_UPD, 3340b57cec5SDimitry Andric VLD2LN_UPD, 3350b57cec5SDimitry Andric VLD3LN_UPD, 3360b57cec5SDimitry Andric VLD4LN_UPD, 3370b57cec5SDimitry Andric VLD1DUP_UPD, 3380b57cec5SDimitry Andric VLD2DUP_UPD, 3390b57cec5SDimitry Andric VLD3DUP_UPD, 3400b57cec5SDimitry Andric VLD4DUP_UPD, 341fe6060f1SDimitry Andric VLD1x2_UPD, 342fe6060f1SDimitry Andric VLD1x3_UPD, 343fe6060f1SDimitry Andric VLD1x4_UPD, 3440b57cec5SDimitry Andric 3450b57cec5SDimitry Andric // NEON stores with post-increment base updates: 3460b57cec5SDimitry Andric VST1_UPD, 3470b57cec5SDimitry Andric VST2_UPD, 3480b57cec5SDimitry Andric VST3_UPD, 3490b57cec5SDimitry Andric VST4_UPD, 3500b57cec5SDimitry Andric VST2LN_UPD, 3510b57cec5SDimitry Andric VST3LN_UPD, 3525ffd83dbSDimitry Andric VST4LN_UPD, 353fe6060f1SDimitry Andric VST1x2_UPD, 354fe6060f1SDimitry Andric VST1x3_UPD, 355fe6060f1SDimitry Andric VST1x4_UPD, 3565ffd83dbSDimitry Andric 3575ffd83dbSDimitry Andric // Load/Store of dual registers 3585ffd83dbSDimitry Andric LDRD, 3595ffd83dbSDimitry Andric STRD 3600b57cec5SDimitry Andric }; 3610b57cec5SDimitry Andric 3620b57cec5SDimitry Andric } // end namespace ARMISD 3630b57cec5SDimitry Andric 364fe6060f1SDimitry Andric namespace ARM { 365fe6060f1SDimitry Andric /// Possible values of current rounding mode, which is specified in bits 366fe6060f1SDimitry Andric /// 23:22 of FPSCR. 367fe6060f1SDimitry Andric enum Rounding { 368fe6060f1SDimitry Andric RN = 0, // Round to Nearest 369fe6060f1SDimitry Andric RP = 1, // Round towards Plus infinity 370fe6060f1SDimitry Andric RM = 2, // Round towards Minus infinity 371fe6060f1SDimitry Andric RZ = 3, // Round towards Zero 372fe6060f1SDimitry Andric rmMask = 3 // Bit mask selecting rounding mode 373fe6060f1SDimitry Andric }; 374fe6060f1SDimitry Andric 375fe6060f1SDimitry Andric // Bit position of rounding mode bits in FPSCR. 376fe6060f1SDimitry Andric const unsigned RoundingBitsPos = 22; 377cb14a3feSDimitry Andric 378cb14a3feSDimitry Andric // Bits of floating-point status. These are NZCV flags, QC bit and cumulative 379cb14a3feSDimitry Andric // FP exception bits. 380cb14a3feSDimitry Andric const unsigned FPStatusBits = 0xf800009f; 381cb14a3feSDimitry Andric 382cb14a3feSDimitry Andric // Some bits in the FPSCR are not yet defined. They must be preserved when 383cb14a3feSDimitry Andric // modifying the contents. 384cb14a3feSDimitry Andric const unsigned FPReservedBits = 0x00006060; 385fe6060f1SDimitry Andric } // namespace ARM 386fe6060f1SDimitry Andric 3870b57cec5SDimitry Andric /// Define some predicates that are used for node matching. 3880b57cec5SDimitry Andric namespace ARM { 3890b57cec5SDimitry Andric 3900b57cec5SDimitry Andric bool isBitFieldInvertedMask(unsigned v); 3910b57cec5SDimitry Andric 3920b57cec5SDimitry Andric } // end namespace ARM 3930b57cec5SDimitry Andric 3940b57cec5SDimitry Andric //===--------------------------------------------------------------------===// 3950b57cec5SDimitry Andric // ARMTargetLowering - ARM Implementation of the TargetLowering interface 3960b57cec5SDimitry Andric 3970b57cec5SDimitry Andric class ARMTargetLowering : public TargetLowering { 3980b57cec5SDimitry Andric public: 3990b57cec5SDimitry Andric explicit ARMTargetLowering(const TargetMachine &TM, 4000b57cec5SDimitry Andric const ARMSubtarget &STI); 4010b57cec5SDimitry Andric 4020b57cec5SDimitry Andric unsigned getJumpTableEncoding() const override; 4030b57cec5SDimitry Andric bool useSoftFloat() const override; 4040b57cec5SDimitry Andric 4050b57cec5SDimitry Andric SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 4060b57cec5SDimitry Andric 4070b57cec5SDimitry Andric /// ReplaceNodeResults - Replace the results of node with an illegal result 4080b57cec5SDimitry Andric /// type with new values built out of custom code. 4090b57cec5SDimitry Andric void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 4100b57cec5SDimitry Andric SelectionDAG &DAG) const override; 4110b57cec5SDimitry Andric 4120b57cec5SDimitry Andric const char *getTargetNodeName(unsigned Opcode) const override; 4130b57cec5SDimitry Andric isSelectSupported(SelectSupportKind Kind)4140b57cec5SDimitry Andric bool isSelectSupported(SelectSupportKind Kind) const override { 4150b57cec5SDimitry Andric // ARM does not support scalar condition selects on vectors. 4160b57cec5SDimitry Andric return (Kind != ScalarCondVectorVal); 4170b57cec5SDimitry Andric } 4180b57cec5SDimitry Andric 4190b57cec5SDimitry Andric bool isReadOnly(const GlobalValue *GV) const; 4200b57cec5SDimitry Andric 4210b57cec5SDimitry Andric /// getSetCCResultType - Return the value type to use for ISD::SETCC. 4220b57cec5SDimitry Andric EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 4230b57cec5SDimitry Andric EVT VT) const override; 4240b57cec5SDimitry Andric 4250b57cec5SDimitry Andric MachineBasicBlock * 4260b57cec5SDimitry Andric EmitInstrWithCustomInserter(MachineInstr &MI, 4270b57cec5SDimitry Andric MachineBasicBlock *MBB) const override; 4280b57cec5SDimitry Andric 4290b57cec5SDimitry Andric void AdjustInstrPostInstrSelection(MachineInstr &MI, 4300b57cec5SDimitry Andric SDNode *Node) const override; 4310b57cec5SDimitry Andric 4320b57cec5SDimitry Andric SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const; 4330b57cec5SDimitry Andric SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const; 4340b57cec5SDimitry Andric SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const; 4355ffd83dbSDimitry Andric SDValue PerformIntrinsicCombine(SDNode *N, DAGCombinerInfo &DCI) const; 436fe6060f1SDimitry Andric SDValue PerformMVEExtCombine(SDNode *N, DAGCombinerInfo &DCI) const; 437fe6060f1SDimitry Andric SDValue PerformMVETruncCombine(SDNode *N, DAGCombinerInfo &DCI) const; 4380b57cec5SDimitry Andric SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 4390b57cec5SDimitry Andric 4405ffd83dbSDimitry Andric bool SimplifyDemandedBitsForTargetNode(SDValue Op, 4415ffd83dbSDimitry Andric const APInt &OriginalDemandedBits, 4425ffd83dbSDimitry Andric const APInt &OriginalDemandedElts, 4435ffd83dbSDimitry Andric KnownBits &Known, 4445ffd83dbSDimitry Andric TargetLoweringOpt &TLO, 4455ffd83dbSDimitry Andric unsigned Depth) const override; 4465ffd83dbSDimitry Andric 4470b57cec5SDimitry Andric bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override; 4480b57cec5SDimitry Andric 4490b57cec5SDimitry Andric /// allowsMisalignedMemoryAccesses - Returns true if the target allows 4500b57cec5SDimitry Andric /// unaligned memory accesses of the specified type. Returns whether it 4510b57cec5SDimitry Andric /// is "fast" by reference in the second argument. 4520b57cec5SDimitry Andric bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, 453fe6060f1SDimitry Andric Align Alignment, 4540b57cec5SDimitry Andric MachineMemOperand::Flags Flags, 455bdd1243dSDimitry Andric unsigned *Fast) const override; 4560b57cec5SDimitry Andric 4575ffd83dbSDimitry Andric EVT getOptimalMemOpType(const MemOp &Op, 4580b57cec5SDimitry Andric const AttributeList &FuncAttributes) const override; 4590b57cec5SDimitry Andric 4600b57cec5SDimitry Andric bool isTruncateFree(Type *SrcTy, Type *DstTy) const override; 4610b57cec5SDimitry Andric bool isTruncateFree(EVT SrcVT, EVT DstVT) const override; 4620b57cec5SDimitry Andric bool isZExtFree(SDValue Val, EVT VT2) const override; 4630b57cec5SDimitry Andric bool shouldSinkOperands(Instruction *I, 4640b57cec5SDimitry Andric SmallVectorImpl<Use *> &Ops) const override; 4655ffd83dbSDimitry Andric Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const override; 4660b57cec5SDimitry Andric 4670b57cec5SDimitry Andric bool isFNegFree(EVT VT) const override; 4680b57cec5SDimitry Andric 4690b57cec5SDimitry Andric bool isVectorLoadExtDesirable(SDValue ExtVal) const override; 4700b57cec5SDimitry Andric 4710b57cec5SDimitry Andric bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; 4720b57cec5SDimitry Andric 4730b57cec5SDimitry Andric 4740b57cec5SDimitry Andric /// isLegalAddressingMode - Return true if the addressing mode represented 4750b57cec5SDimitry Andric /// by AM is legal for this target, for a load/store of the specified type. 4760b57cec5SDimitry Andric bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, 4770b57cec5SDimitry Andric Type *Ty, unsigned AS, 4780b57cec5SDimitry Andric Instruction *I = nullptr) const override; 4790b57cec5SDimitry Andric 4800b57cec5SDimitry Andric bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 4810b57cec5SDimitry Andric 482480093f4SDimitry Andric /// Returns true if the addressing mode representing by AM is legal 4830b57cec5SDimitry Andric /// for the Thumb1 target, for a load/store of the specified type. 4840b57cec5SDimitry Andric bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 4850b57cec5SDimitry Andric 4860b57cec5SDimitry Andric /// isLegalICmpImmediate - Return true if the specified immediate is legal 4870b57cec5SDimitry Andric /// icmp immediate, that is the target has icmp instructions which can 4880b57cec5SDimitry Andric /// compare a register against the immediate without having to materialize 4890b57cec5SDimitry Andric /// the immediate into a register. 4900b57cec5SDimitry Andric bool isLegalICmpImmediate(int64_t Imm) const override; 4910b57cec5SDimitry Andric 4920b57cec5SDimitry Andric /// isLegalAddImmediate - Return true if the specified immediate is legal 4930b57cec5SDimitry Andric /// add immediate, that is the target has add instructions which can 4940b57cec5SDimitry Andric /// add a register and the immediate without having to materialize 4950b57cec5SDimitry Andric /// the immediate into a register. 4960b57cec5SDimitry Andric bool isLegalAddImmediate(int64_t Imm) const override; 4970b57cec5SDimitry Andric 4980b57cec5SDimitry Andric /// getPreIndexedAddressParts - returns true by value, base pointer and 4990b57cec5SDimitry Andric /// offset pointer and addressing mode by reference if the node's address 5000b57cec5SDimitry Andric /// can be legally represented as pre-indexed load / store address. 5010b57cec5SDimitry Andric bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, 5020b57cec5SDimitry Andric ISD::MemIndexedMode &AM, 5030b57cec5SDimitry Andric SelectionDAG &DAG) const override; 5040b57cec5SDimitry Andric 5050b57cec5SDimitry Andric /// getPostIndexedAddressParts - returns true by value, base pointer and 5060b57cec5SDimitry Andric /// offset pointer and addressing mode by reference if this node can be 5070b57cec5SDimitry Andric /// combined with a load / store to form a post-indexed load / store. 5080b57cec5SDimitry Andric bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, 5090b57cec5SDimitry Andric SDValue &Offset, ISD::MemIndexedMode &AM, 5100b57cec5SDimitry Andric SelectionDAG &DAG) const override; 5110b57cec5SDimitry Andric 5120b57cec5SDimitry Andric void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, 5130b57cec5SDimitry Andric const APInt &DemandedElts, 5140b57cec5SDimitry Andric const SelectionDAG &DAG, 5150b57cec5SDimitry Andric unsigned Depth) const override; 5160b57cec5SDimitry Andric 5175ffd83dbSDimitry Andric bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 5185ffd83dbSDimitry Andric const APInt &DemandedElts, 5190b57cec5SDimitry Andric TargetLoweringOpt &TLO) const override; 5200b57cec5SDimitry Andric 5210b57cec5SDimitry Andric bool ExpandInlineAsm(CallInst *CI) const override; 5220b57cec5SDimitry Andric 5230b57cec5SDimitry Andric ConstraintType getConstraintType(StringRef Constraint) const override; 5240b57cec5SDimitry Andric 5250b57cec5SDimitry Andric /// Examine constraint string and operand type and determine a weight value. 5260b57cec5SDimitry Andric /// The operand object must already have been set up with the operand type. 5270b57cec5SDimitry Andric ConstraintWeight getSingleConstraintMatchWeight( 5280b57cec5SDimitry Andric AsmOperandInfo &info, const char *constraint) const override; 5290b57cec5SDimitry Andric 5300b57cec5SDimitry Andric std::pair<unsigned, const TargetRegisterClass *> 5310b57cec5SDimitry Andric getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 5320b57cec5SDimitry Andric StringRef Constraint, MVT VT) const override; 5330b57cec5SDimitry Andric 5340b57cec5SDimitry Andric const char *LowerXConstraint(EVT ConstraintVT) const override; 5350b57cec5SDimitry Andric 5360b57cec5SDimitry Andric /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5370b57cec5SDimitry Andric /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 5380b57cec5SDimitry Andric /// true it means one of the asm constraint of the inline asm instruction 5390b57cec5SDimitry Andric /// being processed is 'm'. 5405f757f3fSDimitry Andric void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, 5410b57cec5SDimitry Andric std::vector<SDValue> &Ops, 5420b57cec5SDimitry Andric SelectionDAG &DAG) const override; 5430b57cec5SDimitry Andric 5445f757f3fSDimitry Andric InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode)5450b57cec5SDimitry Andric getInlineAsmMemConstraint(StringRef ConstraintCode) const override { 5460b57cec5SDimitry Andric if (ConstraintCode == "Q") 5475f757f3fSDimitry Andric return InlineAsm::ConstraintCode::Q; 5485f757f3fSDimitry Andric if (ConstraintCode.size() == 2) { 5490b57cec5SDimitry Andric if (ConstraintCode[0] == 'U') { 5500b57cec5SDimitry Andric switch(ConstraintCode[1]) { 5510b57cec5SDimitry Andric default: 5520b57cec5SDimitry Andric break; 5530b57cec5SDimitry Andric case 'm': 5545f757f3fSDimitry Andric return InlineAsm::ConstraintCode::Um; 5550b57cec5SDimitry Andric case 'n': 5565f757f3fSDimitry Andric return InlineAsm::ConstraintCode::Un; 5570b57cec5SDimitry Andric case 'q': 5585f757f3fSDimitry Andric return InlineAsm::ConstraintCode::Uq; 5590b57cec5SDimitry Andric case 's': 5605f757f3fSDimitry Andric return InlineAsm::ConstraintCode::Us; 5610b57cec5SDimitry Andric case 't': 5625f757f3fSDimitry Andric return InlineAsm::ConstraintCode::Ut; 5630b57cec5SDimitry Andric case 'v': 5645f757f3fSDimitry Andric return InlineAsm::ConstraintCode::Uv; 5650b57cec5SDimitry Andric case 'y': 5665f757f3fSDimitry Andric return InlineAsm::ConstraintCode::Uy; 5670b57cec5SDimitry Andric } 5680b57cec5SDimitry Andric } 5690b57cec5SDimitry Andric } 5700b57cec5SDimitry Andric return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 5710b57cec5SDimitry Andric } 5720b57cec5SDimitry Andric getSubtarget()5730b57cec5SDimitry Andric const ARMSubtarget* getSubtarget() const { 5740b57cec5SDimitry Andric return Subtarget; 5750b57cec5SDimitry Andric } 5760b57cec5SDimitry Andric 5770b57cec5SDimitry Andric /// getRegClassFor - Return the register class that should be used for the 5780b57cec5SDimitry Andric /// specified value type. 5790b57cec5SDimitry Andric const TargetRegisterClass * 5800b57cec5SDimitry Andric getRegClassFor(MVT VT, bool isDivergent = false) const override; 5810b57cec5SDimitry Andric 5820b57cec5SDimitry Andric bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 58381ad6265SDimitry Andric Align &PrefAlign) const override; 5840b57cec5SDimitry Andric 5850b57cec5SDimitry Andric /// createFastISel - This method returns a target specific FastISel object, 5860b57cec5SDimitry Andric /// or null if the target does not support "fast" ISel. 5870b57cec5SDimitry Andric FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 5880b57cec5SDimitry Andric const TargetLibraryInfo *libInfo) const override; 5890b57cec5SDimitry Andric 5900b57cec5SDimitry Andric Sched::Preference getSchedulingPreference(SDNode *N) const override; 5910b57cec5SDimitry Andric preferZeroCompareBranch()592fe6060f1SDimitry Andric bool preferZeroCompareBranch() const override { return true; } 593fe6060f1SDimitry Andric 594bdd1243dSDimitry Andric bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override; 595bdd1243dSDimitry Andric 5960b57cec5SDimitry Andric bool 5970b57cec5SDimitry Andric isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override; 5980b57cec5SDimitry Andric bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 5990b57cec5SDimitry Andric 6000b57cec5SDimitry Andric /// isFPImmLegal - Returns true if the target can instruction select the 6010b57cec5SDimitry Andric /// specified FP immediate natively. If false, the legalizer will 6020b57cec5SDimitry Andric /// materialize the FP immediate as a load from a constant pool. 6030b57cec5SDimitry Andric bool isFPImmLegal(const APFloat &Imm, EVT VT, 6040b57cec5SDimitry Andric bool ForCodeSize = false) const override; 6050b57cec5SDimitry Andric 6060b57cec5SDimitry Andric bool getTgtMemIntrinsic(IntrinsicInfo &Info, 6070b57cec5SDimitry Andric const CallInst &I, 6080b57cec5SDimitry Andric MachineFunction &MF, 6090b57cec5SDimitry Andric unsigned Intrinsic) const override; 6100b57cec5SDimitry Andric 6110b57cec5SDimitry Andric /// Returns true if it is beneficial to convert a load of a constant 6120b57cec5SDimitry Andric /// to just the constant itself. 6130b57cec5SDimitry Andric bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 6140b57cec5SDimitry Andric Type *Ty) const override; 6150b57cec5SDimitry Andric 6160b57cec5SDimitry Andric /// Return true if EXTRACT_SUBVECTOR is cheap for this result type 6170b57cec5SDimitry Andric /// with this index. 6180b57cec5SDimitry Andric bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 6190b57cec5SDimitry Andric unsigned Index) const override; 6200b57cec5SDimitry Andric shouldFormOverflowOp(unsigned Opcode,EVT VT,bool MathUsed)6215ffd83dbSDimitry Andric bool shouldFormOverflowOp(unsigned Opcode, EVT VT, 6225ffd83dbSDimitry Andric bool MathUsed) const override { 6235ffd83dbSDimitry Andric // Using overflow ops for overflow checks only should beneficial on ARM. 6245ffd83dbSDimitry Andric return TargetLowering::shouldFormOverflowOp(Opcode, VT, true); 6255ffd83dbSDimitry Andric } 6265ffd83dbSDimitry Andric shouldReassociateReduction(unsigned Opc,EVT VT)62706c3fb27SDimitry Andric bool shouldReassociateReduction(unsigned Opc, EVT VT) const override { 62806c3fb27SDimitry Andric return Opc != ISD::VECREDUCE_ADD; 62906c3fb27SDimitry Andric } 63006c3fb27SDimitry Andric 6310b57cec5SDimitry Andric /// Returns true if an argument of type Ty needs to be passed in a 6320b57cec5SDimitry Andric /// contiguous block of registers in calling convention CallConv. 6330b57cec5SDimitry Andric bool functionArgumentNeedsConsecutiveRegisters( 634fe6060f1SDimitry Andric Type *Ty, CallingConv::ID CallConv, bool isVarArg, 635fe6060f1SDimitry Andric const DataLayout &DL) const override; 6360b57cec5SDimitry Andric 6370b57cec5SDimitry Andric /// If a physical register, this returns the register that receives the 6380b57cec5SDimitry Andric /// exception address on entry to an EH pad. 6395ffd83dbSDimitry Andric Register 6400b57cec5SDimitry Andric getExceptionPointerRegister(const Constant *PersonalityFn) const override; 6410b57cec5SDimitry Andric 6420b57cec5SDimitry Andric /// If a physical register, this returns the register that receives the 6430b57cec5SDimitry Andric /// exception typeid on entry to a landing pad. 6445ffd83dbSDimitry Andric Register 6450b57cec5SDimitry Andric getExceptionSelectorRegister(const Constant *PersonalityFn) const override; 6460b57cec5SDimitry Andric 647fe6060f1SDimitry Andric Instruction *makeDMB(IRBuilderBase &Builder, ARM_MB::MemBOpt Domain) const; 648fe6060f1SDimitry Andric Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, 6490b57cec5SDimitry Andric AtomicOrdering Ord) const override; 650fe6060f1SDimitry Andric Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, 6510b57cec5SDimitry Andric AtomicOrdering Ord) const override; 652fe6060f1SDimitry Andric 653fe6060f1SDimitry Andric void 654fe6060f1SDimitry Andric emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override; 655fe6060f1SDimitry Andric 656fe6060f1SDimitry Andric Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, 657fe6060f1SDimitry Andric AtomicOrdering Ord) const override; 658fe6060f1SDimitry Andric Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, 6590b57cec5SDimitry Andric AtomicOrdering Ord) const override; 6600b57cec5SDimitry Andric 6618bcb0991SDimitry Andric unsigned getMaxSupportedInterleaveFactor() const override; 6620b57cec5SDimitry Andric 6630b57cec5SDimitry Andric bool lowerInterleavedLoad(LoadInst *LI, 6640b57cec5SDimitry Andric ArrayRef<ShuffleVectorInst *> Shuffles, 6650b57cec5SDimitry Andric ArrayRef<unsigned> Indices, 6660b57cec5SDimitry Andric unsigned Factor) const override; 6670b57cec5SDimitry Andric bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, 6680b57cec5SDimitry Andric unsigned Factor) const override; 6690b57cec5SDimitry Andric 6700b57cec5SDimitry Andric bool shouldInsertFencesForAtomic(const Instruction *I) const override; 6710b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 6720b57cec5SDimitry Andric shouldExpandAtomicLoadInIR(LoadInst *LI) const override; 67381ad6265SDimitry Andric TargetLoweringBase::AtomicExpansionKind 67481ad6265SDimitry Andric shouldExpandAtomicStoreInIR(StoreInst *SI) const override; 6750b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 6760b57cec5SDimitry Andric shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; 6770b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 6780b57cec5SDimitry Andric shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; 6790b57cec5SDimitry Andric 6800b57cec5SDimitry Andric bool useLoadStackGuardNode() const override; 6810b57cec5SDimitry Andric 6820b57cec5SDimitry Andric void insertSSPDeclarations(Module &M) const override; 6830b57cec5SDimitry Andric Value *getSDagStackGuard(const Module &M) const override; 6840b57cec5SDimitry Andric Function *getSSPStackGuardCheck(const Module &M) const override; 6850b57cec5SDimitry Andric 6860b57cec5SDimitry Andric bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 6870b57cec5SDimitry Andric unsigned &Cost) const override; 6880b57cec5SDimitry Andric canMergeStoresTo(unsigned AddressSpace,EVT MemVT,const MachineFunction & MF)6890b57cec5SDimitry Andric bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, 690349cc55cSDimitry Andric const MachineFunction &MF) const override { 6910b57cec5SDimitry Andric // Do not merge to larger than i32. 6920b57cec5SDimitry Andric return (MemVT.getSizeInBits() <= 32); 6930b57cec5SDimitry Andric } 6940b57cec5SDimitry Andric 695bdd1243dSDimitry Andric bool isCheapToSpeculateCttz(Type *Ty) const override; 696bdd1243dSDimitry Andric bool isCheapToSpeculateCtlz(Type *Ty) const override; 6970b57cec5SDimitry Andric convertSetCCLogicToBitwiseLogic(EVT VT)6980b57cec5SDimitry Andric bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { 6990b57cec5SDimitry Andric return VT.isScalarInteger(); 7000b57cec5SDimitry Andric } 7010b57cec5SDimitry Andric supportSwiftError()7020b57cec5SDimitry Andric bool supportSwiftError() const override { 7030b57cec5SDimitry Andric return true; 7040b57cec5SDimitry Andric } 7050b57cec5SDimitry Andric hasStandaloneRem(EVT VT)7060b57cec5SDimitry Andric bool hasStandaloneRem(EVT VT) const override { 7070b57cec5SDimitry Andric return HasStandaloneRem; 7080b57cec5SDimitry Andric } 7090b57cec5SDimitry Andric 710bdd1243dSDimitry Andric ShiftLegalizationStrategy 711bdd1243dSDimitry Andric preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, 712bdd1243dSDimitry Andric unsigned ExpansionFactor) const override; 7130b57cec5SDimitry Andric 7140b57cec5SDimitry Andric CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const; 7150b57cec5SDimitry Andric CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const; 7160b57cec5SDimitry Andric 7170b57cec5SDimitry Andric /// Returns true if \p VecTy is a legal interleaved access type. This 7180b57cec5SDimitry Andric /// function checks the vector element type and the overall width of the 7190b57cec5SDimitry Andric /// vector. 7205ffd83dbSDimitry Andric bool isLegalInterleavedAccessType(unsigned Factor, FixedVectorType *VecTy, 721fe6060f1SDimitry Andric Align Alignment, 7220b57cec5SDimitry Andric const DataLayout &DL) const; 7230b57cec5SDimitry Andric 72481ad6265SDimitry Andric bool isMulAddWithConstProfitable(SDValue AddNode, 72581ad6265SDimitry Andric SDValue ConstNode) const override; 726349cc55cSDimitry Andric 7270b57cec5SDimitry Andric bool alignLoopsWithOptSize() const override; 7280b57cec5SDimitry Andric 7290b57cec5SDimitry Andric /// Returns the number of interleaved accesses that will be generated when 7300b57cec5SDimitry Andric /// lowering accesses of the given type. 7310b57cec5SDimitry Andric unsigned getNumInterleavedAccesses(VectorType *VecTy, 7320b57cec5SDimitry Andric const DataLayout &DL) const; 7330b57cec5SDimitry Andric 7340b57cec5SDimitry Andric void finalizeLowering(MachineFunction &MF) const override; 7350b57cec5SDimitry Andric 7360b57cec5SDimitry Andric /// Return the correct alignment for the current calling convention. 7378bcb0991SDimitry Andric Align getABIAlignmentForCallingConv(Type *ArgTy, 738fe6060f1SDimitry Andric const DataLayout &DL) const override; 7390b57cec5SDimitry Andric 7400b57cec5SDimitry Andric bool isDesirableToCommuteWithShift(const SDNode *N, 7410b57cec5SDimitry Andric CombineLevel Level) const override; 7420b57cec5SDimitry Andric 743fcaf7f86SDimitry Andric bool isDesirableToCommuteXorWithShift(const SDNode *N) const override; 744fcaf7f86SDimitry Andric 7450b57cec5SDimitry Andric bool shouldFoldConstantShiftPairToMask(const SDNode *N, 7460b57cec5SDimitry Andric CombineLevel Level) const override; 7470b57cec5SDimitry Andric 74806c3fb27SDimitry Andric bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, 74906c3fb27SDimitry Andric EVT VT) const override; 75006c3fb27SDimitry Andric 7510b57cec5SDimitry Andric bool preferIncOfAddToSubOfNot(EVT VT) const override; 7520b57cec5SDimitry Andric 7534824e7fdSDimitry Andric bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override; 7544824e7fdSDimitry Andric 755bdd1243dSDimitry Andric bool isComplexDeinterleavingSupported() const override; 756bdd1243dSDimitry Andric bool isComplexDeinterleavingOperationSupported( 757bdd1243dSDimitry Andric ComplexDeinterleavingOperation Operation, Type *Ty) const override; 758bdd1243dSDimitry Andric 759bdd1243dSDimitry Andric Value *createComplexDeinterleavingIR( 76006c3fb27SDimitry Andric IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, 761bdd1243dSDimitry Andric ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, 762bdd1243dSDimitry Andric Value *Accumulator = nullptr) const override; 763bdd1243dSDimitry Andric softPromoteHalfType()764*0fca6ea1SDimitry Andric bool softPromoteHalfType() const override { return true; } 765*0fca6ea1SDimitry Andric useFPRegsForHalfType()766*0fca6ea1SDimitry Andric bool useFPRegsForHalfType() const override { return true; } 767*0fca6ea1SDimitry Andric 7680b57cec5SDimitry Andric protected: 7690b57cec5SDimitry Andric std::pair<const TargetRegisterClass *, uint8_t> 7700b57cec5SDimitry Andric findRepresentativeClass(const TargetRegisterInfo *TRI, 7710b57cec5SDimitry Andric MVT VT) const override; 7720b57cec5SDimitry Andric 7730b57cec5SDimitry Andric private: 7740b57cec5SDimitry Andric /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 7750b57cec5SDimitry Andric /// make the right decision when generating code for different targets. 7760b57cec5SDimitry Andric const ARMSubtarget *Subtarget; 7770b57cec5SDimitry Andric 7780b57cec5SDimitry Andric const TargetRegisterInfo *RegInfo; 7790b57cec5SDimitry Andric 7800b57cec5SDimitry Andric const InstrItineraryData *Itins; 7810b57cec5SDimitry Andric 7820b57cec5SDimitry Andric // TODO: remove this, and have shouldInsertFencesForAtomic do the proper 7830b57cec5SDimitry Andric // check. 7840b57cec5SDimitry Andric bool InsertFencesForAtomic; 7850b57cec5SDimitry Andric 7860b57cec5SDimitry Andric bool HasStandaloneRem = true; 7870b57cec5SDimitry Andric 788fe6060f1SDimitry Andric void addTypeForNEON(MVT VT, MVT PromotedLdStVT); 7890b57cec5SDimitry Andric void addDRTypeForNEON(MVT VT); 7900b57cec5SDimitry Andric void addQRTypeForNEON(MVT VT); 7910b57cec5SDimitry Andric std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const; 7920b57cec5SDimitry Andric 7930b57cec5SDimitry Andric using RegsToPassVector = SmallVector<std::pair<unsigned, SDValue>, 8>; 7940b57cec5SDimitry Andric 7950b57cec5SDimitry Andric void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain, 7960b57cec5SDimitry Andric SDValue &Arg, RegsToPassVector &RegsToPass, 7970b57cec5SDimitry Andric CCValAssign &VA, CCValAssign &NextVA, 7980b57cec5SDimitry Andric SDValue &StackPtr, 7990b57cec5SDimitry Andric SmallVectorImpl<SDValue> &MemOpChains, 800fe6060f1SDimitry Andric bool IsTailCall, 801fe6060f1SDimitry Andric int SPDiff) const; 8020b57cec5SDimitry Andric SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 8030b57cec5SDimitry Andric SDValue &Root, SelectionDAG &DAG, 8040b57cec5SDimitry Andric const SDLoc &dl) const; 8050b57cec5SDimitry Andric 8060b57cec5SDimitry Andric CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC, 8070b57cec5SDimitry Andric bool isVarArg) const; 8080b57cec5SDimitry Andric CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 8090b57cec5SDimitry Andric bool isVarArg) const; 810fe6060f1SDimitry Andric std::pair<SDValue, MachinePointerInfo> 811fe6060f1SDimitry Andric computeAddrForCallArg(const SDLoc &dl, SelectionDAG &DAG, 812fe6060f1SDimitry Andric const CCValAssign &VA, SDValue StackPtr, 813fe6060f1SDimitry Andric bool IsTailCall, int SPDiff) const; 8140b57cec5SDimitry Andric SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 8150b57cec5SDimitry Andric SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 8160b57cec5SDimitry Andric SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const; 8178bcb0991SDimitry Andric SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG, 8188bcb0991SDimitry Andric const ARMSubtarget *Subtarget) const; 8190b57cec5SDimitry Andric SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 8200b57cec5SDimitry Andric const ARMSubtarget *Subtarget) const; 8210b57cec5SDimitry Andric SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 8220b57cec5SDimitry Andric SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 8230b57cec5SDimitry Andric SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 8240b57cec5SDimitry Andric SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 8250b57cec5SDimitry Andric SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 8260b57cec5SDimitry Andric SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const; 8270b57cec5SDimitry Andric SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 8280b57cec5SDimitry Andric SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 8290b57cec5SDimitry Andric SelectionDAG &DAG) const; 8300b57cec5SDimitry Andric SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 8310b57cec5SDimitry Andric SelectionDAG &DAG, 8320b57cec5SDimitry Andric TLSModel::Model model) const; 8330b57cec5SDimitry Andric SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 8340b57cec5SDimitry Andric SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const; 8350b57cec5SDimitry Andric SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 8360b57cec5SDimitry Andric SDValue LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const; 8370b57cec5SDimitry Andric SDValue LowerUnsignedALUO(SDValue Op, SelectionDAG &DAG) const; 8380b57cec5SDimitry Andric SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 8390b57cec5SDimitry Andric SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 8400b57cec5SDimitry Andric SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; 8410b57cec5SDimitry Andric SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 8420b57cec5SDimitry Andric SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 8430b57cec5SDimitry Andric SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 8440b57cec5SDimitry Andric SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 8450b57cec5SDimitry Andric SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 8460b57cec5SDimitry Andric SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 847bdd1243dSDimitry Andric SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; 848fe6060f1SDimitry Andric SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; 849cb14a3feSDimitry Andric SDValue LowerSET_FPMODE(SDValue Op, SelectionDAG &DAG) const; 850cb14a3feSDimitry Andric SDValue LowerRESET_FPMODE(SDValue Op, SelectionDAG &DAG) const; 8510b57cec5SDimitry Andric SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG, 8520b57cec5SDimitry Andric const ARMSubtarget *ST) const; 8530b57cec5SDimitry Andric SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 8540b57cec5SDimitry Andric const ARMSubtarget *ST) const; 8550b57cec5SDimitry Andric SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 8560b57cec5SDimitry Andric SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; 8570b57cec5SDimitry Andric SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; 8580b57cec5SDimitry Andric SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const; 8590b57cec5SDimitry Andric void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed, 8600b57cec5SDimitry Andric SmallVectorImpl<SDValue> &Results) const; 8615ffd83dbSDimitry Andric SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG, 8625ffd83dbSDimitry Andric const ARMSubtarget *Subtarget) const; 8630b57cec5SDimitry Andric SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed, 8640b57cec5SDimitry Andric SDValue &Chain) const; 8650b57cec5SDimitry Andric SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const; 8660b57cec5SDimitry Andric SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 8670b57cec5SDimitry Andric SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; 8680b57cec5SDimitry Andric SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; 8690b57cec5SDimitry Andric SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 8700b57cec5SDimitry Andric SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 87147395794SDimitry Andric SDValue LowerFSETCC(SDValue Op, SelectionDAG &DAG) const; 87281ad6265SDimitry Andric SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const; 8735ffd83dbSDimitry Andric void LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results, 8745ffd83dbSDimitry Andric SelectionDAG &DAG) const; 8750b57cec5SDimitry Andric 876480093f4SDimitry Andric Register getRegisterByName(const char* RegName, LLT VT, 8778bcb0991SDimitry Andric const MachineFunction &MF) const override; 8780b57cec5SDimitry Andric 8790b57cec5SDimitry Andric SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, 8800b57cec5SDimitry Andric SmallVectorImpl<SDNode *> &Created) const override; 8810b57cec5SDimitry Andric 882480093f4SDimitry Andric bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 883480093f4SDimitry Andric EVT VT) const override; 8840b57cec5SDimitry Andric 8855ffd83dbSDimitry Andric SDValue MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT, MVT ValVT, 8865ffd83dbSDimitry Andric SDValue Val) const; 8875ffd83dbSDimitry Andric SDValue MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT, 8885ffd83dbSDimitry Andric MVT ValVT, SDValue Val) const; 8895ffd83dbSDimitry Andric 8900b57cec5SDimitry Andric SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 8910b57cec5SDimitry Andric 89206c3fb27SDimitry Andric SDValue LowerCallResult(SDValue Chain, SDValue InGlue, 8930b57cec5SDimitry Andric CallingConv::ID CallConv, bool isVarArg, 8940b57cec5SDimitry Andric const SmallVectorImpl<ISD::InputArg> &Ins, 8950b57cec5SDimitry Andric const SDLoc &dl, SelectionDAG &DAG, 8960b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals, bool isThisReturn, 897*0fca6ea1SDimitry Andric SDValue ThisVal, bool isCmseNSCall) const; 8980b57cec5SDimitry Andric supportSplitCSR(MachineFunction * MF)8990b57cec5SDimitry Andric bool supportSplitCSR(MachineFunction *MF) const override { 9000b57cec5SDimitry Andric return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 9010b57cec5SDimitry Andric MF->getFunction().hasFnAttribute(Attribute::NoUnwind); 9020b57cec5SDimitry Andric } 9030b57cec5SDimitry Andric 9040b57cec5SDimitry Andric void initializeSplitCSR(MachineBasicBlock *Entry) const override; 9050b57cec5SDimitry Andric void insertCopiesSplitCSR( 9060b57cec5SDimitry Andric MachineBasicBlock *Entry, 9070b57cec5SDimitry Andric const SmallVectorImpl<MachineBasicBlock *> &Exits) const override; 9080b57cec5SDimitry Andric 909bdd1243dSDimitry Andric bool splitValueIntoRegisterParts( 910bdd1243dSDimitry Andric SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 911bdd1243dSDimitry Andric unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) 912bdd1243dSDimitry Andric const override; 9135ffd83dbSDimitry Andric 914bdd1243dSDimitry Andric SDValue joinRegisterPartsIntoValue( 915bdd1243dSDimitry Andric SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts, 916bdd1243dSDimitry Andric unsigned NumParts, MVT PartVT, EVT ValueVT, 917bdd1243dSDimitry Andric std::optional<CallingConv::ID> CC) const override; 9185ffd83dbSDimitry Andric 9190b57cec5SDimitry Andric SDValue 9200b57cec5SDimitry Andric LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 9210b57cec5SDimitry Andric const SmallVectorImpl<ISD::InputArg> &Ins, 9220b57cec5SDimitry Andric const SDLoc &dl, SelectionDAG &DAG, 9230b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals) const override; 9240b57cec5SDimitry Andric 9250b57cec5SDimitry Andric int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl, 9260b57cec5SDimitry Andric SDValue &Chain, const Value *OrigArg, 9270b57cec5SDimitry Andric unsigned InRegsParamRecordIdx, int ArgOffset, 9280b57cec5SDimitry Andric unsigned ArgSize) const; 9290b57cec5SDimitry Andric 9300b57cec5SDimitry Andric void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 9310b57cec5SDimitry Andric const SDLoc &dl, SDValue &Chain, 9320b57cec5SDimitry Andric unsigned ArgOffset, unsigned TotalArgRegsSaveSize, 9330b57cec5SDimitry Andric bool ForceMutable = false) const; 9340b57cec5SDimitry Andric 9350b57cec5SDimitry Andric SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, 9360b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals) const override; 9370b57cec5SDimitry Andric 9380b57cec5SDimitry Andric /// HandleByVal - Target-specific cleanup for ByVal support. 9395ffd83dbSDimitry Andric void HandleByVal(CCState *, unsigned &, Align) const override; 9400b57cec5SDimitry Andric 9410b57cec5SDimitry Andric /// IsEligibleForTailCallOptimization - Check whether the call is eligible 9420b57cec5SDimitry Andric /// for tail call optimization. Targets which want to do tail call 9430b57cec5SDimitry Andric /// optimization should implement this function. 9440b57cec5SDimitry Andric bool IsEligibleForTailCallOptimization( 945*0fca6ea1SDimitry Andric TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo, 946*0fca6ea1SDimitry Andric SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const; 9470b57cec5SDimitry Andric 9480b57cec5SDimitry Andric bool CanLowerReturn(CallingConv::ID CallConv, 9490b57cec5SDimitry Andric MachineFunction &MF, bool isVarArg, 9500b57cec5SDimitry Andric const SmallVectorImpl<ISD::OutputArg> &Outs, 9510b57cec5SDimitry Andric LLVMContext &Context) const override; 9520b57cec5SDimitry Andric 9530b57cec5SDimitry Andric SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 9540b57cec5SDimitry Andric const SmallVectorImpl<ISD::OutputArg> &Outs, 9550b57cec5SDimitry Andric const SmallVectorImpl<SDValue> &OutVals, 9560b57cec5SDimitry Andric const SDLoc &dl, SelectionDAG &DAG) const override; 9570b57cec5SDimitry Andric 9580b57cec5SDimitry Andric bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; 9590b57cec5SDimitry Andric 9600b57cec5SDimitry Andric bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 9610b57cec5SDimitry Andric shouldConsiderGEPOffsetSplit()9620b57cec5SDimitry Andric bool shouldConsiderGEPOffsetSplit() const override { return true; } 9630b57cec5SDimitry Andric 9640b57cec5SDimitry Andric bool isUnsupportedFloatingType(EVT VT) const; 9650b57cec5SDimitry Andric 9660b57cec5SDimitry Andric SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal, 9670b57cec5SDimitry Andric SDValue ARMcc, SDValue CCR, SDValue Cmp, 9680b57cec5SDimitry Andric SelectionDAG &DAG) const; 9690b57cec5SDimitry Andric SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 9700b57cec5SDimitry Andric SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const; 9710b57cec5SDimitry Andric SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 97247395794SDimitry Andric const SDLoc &dl, bool Signaling = false) const; 9730b57cec5SDimitry Andric SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; 9740b57cec5SDimitry Andric 9750b57cec5SDimitry Andric SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 9760b57cec5SDimitry Andric 9770b57cec5SDimitry Andric void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB, 9780b57cec5SDimitry Andric MachineBasicBlock *DispatchBB, int FI) const; 9790b57cec5SDimitry Andric 9800b57cec5SDimitry Andric void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const; 9810b57cec5SDimitry Andric 9820b57cec5SDimitry Andric MachineBasicBlock *EmitStructByval(MachineInstr &MI, 9830b57cec5SDimitry Andric MachineBasicBlock *MBB) const; 9840b57cec5SDimitry Andric 9850b57cec5SDimitry Andric MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI, 9860b57cec5SDimitry Andric MachineBasicBlock *MBB) const; 9870b57cec5SDimitry Andric MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI, 9880b57cec5SDimitry Andric MachineBasicBlock *MBB) const; 9890b57cec5SDimitry Andric void addMVEVectorTypes(bool HasMVEFP); 9900b57cec5SDimitry Andric void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action); 9910b57cec5SDimitry Andric void setAllExpand(MVT VT); 9920b57cec5SDimitry Andric }; 9930b57cec5SDimitry Andric 9948bcb0991SDimitry Andric enum VMOVModImmType { 9950b57cec5SDimitry Andric VMOVModImm, 9960b57cec5SDimitry Andric VMVNModImm, 9970b57cec5SDimitry Andric MVEVMVNModImm, 9980b57cec5SDimitry Andric OtherModImm 9990b57cec5SDimitry Andric }; 10000b57cec5SDimitry Andric 10010b57cec5SDimitry Andric namespace ARM { 10020b57cec5SDimitry Andric 10030b57cec5SDimitry Andric FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 10040b57cec5SDimitry Andric const TargetLibraryInfo *libInfo); 10050b57cec5SDimitry Andric 10060b57cec5SDimitry Andric } // end namespace ARM 10070b57cec5SDimitry Andric 10080b57cec5SDimitry Andric } // end namespace llvm 10090b57cec5SDimitry Andric 10100b57cec5SDimitry Andric #endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 1011