1*0b57cec5SDimitry Andric //===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===// 2*0b57cec5SDimitry Andric // 3*0b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*0b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 5*0b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*0b57cec5SDimitry Andric // 7*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 8*0b57cec5SDimitry Andric // 9*0b57cec5SDimitry Andric // This file defines the interfaces that ARM uses to lower LLVM code into a 10*0b57cec5SDimitry Andric // selection DAG. 11*0b57cec5SDimitry Andric // 12*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 13*0b57cec5SDimitry Andric 14*0b57cec5SDimitry Andric #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 15*0b57cec5SDimitry Andric #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 16*0b57cec5SDimitry Andric 17*0b57cec5SDimitry Andric #include "MCTargetDesc/ARMBaseInfo.h" 18*0b57cec5SDimitry Andric #include "llvm/ADT/SmallVector.h" 19*0b57cec5SDimitry Andric #include "llvm/ADT/StringRef.h" 20*0b57cec5SDimitry Andric #include "llvm/CodeGen/CallingConvLower.h" 21*0b57cec5SDimitry Andric #include "llvm/CodeGen/ISDOpcodes.h" 22*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFunction.h" 23*0b57cec5SDimitry Andric #include "llvm/CodeGen/SelectionDAGNodes.h" 24*0b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h" 25*0b57cec5SDimitry Andric #include "llvm/CodeGen/ValueTypes.h" 26*0b57cec5SDimitry Andric #include "llvm/IR/Attributes.h" 27*0b57cec5SDimitry Andric #include "llvm/IR/CallingConv.h" 28*0b57cec5SDimitry Andric #include "llvm/IR/Function.h" 29*0b57cec5SDimitry Andric #include "llvm/IR/IRBuilder.h" 30*0b57cec5SDimitry Andric #include "llvm/IR/InlineAsm.h" 31*0b57cec5SDimitry Andric #include "llvm/Support/CodeGen.h" 32*0b57cec5SDimitry Andric #include "llvm/Support/MachineValueType.h" 33*0b57cec5SDimitry Andric #include <utility> 34*0b57cec5SDimitry Andric 35*0b57cec5SDimitry Andric namespace llvm { 36*0b57cec5SDimitry Andric 37*0b57cec5SDimitry Andric class ARMSubtarget; 38*0b57cec5SDimitry Andric class DataLayout; 39*0b57cec5SDimitry Andric class FastISel; 40*0b57cec5SDimitry Andric class FunctionLoweringInfo; 41*0b57cec5SDimitry Andric class GlobalValue; 42*0b57cec5SDimitry Andric class InstrItineraryData; 43*0b57cec5SDimitry Andric class Instruction; 44*0b57cec5SDimitry Andric class MachineBasicBlock; 45*0b57cec5SDimitry Andric class MachineInstr; 46*0b57cec5SDimitry Andric class SelectionDAG; 47*0b57cec5SDimitry Andric class TargetLibraryInfo; 48*0b57cec5SDimitry Andric class TargetMachine; 49*0b57cec5SDimitry Andric class TargetRegisterInfo; 50*0b57cec5SDimitry Andric class VectorType; 51*0b57cec5SDimitry Andric 52*0b57cec5SDimitry Andric namespace ARMISD { 53*0b57cec5SDimitry Andric 54*0b57cec5SDimitry Andric // ARM Specific DAG Nodes 55*0b57cec5SDimitry Andric enum NodeType : unsigned { 56*0b57cec5SDimitry Andric // Start the numbering where the builtin ops and target ops leave off. 57*0b57cec5SDimitry Andric FIRST_NUMBER = ISD::BUILTIN_OP_END, 58*0b57cec5SDimitry Andric 59*0b57cec5SDimitry Andric Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 60*0b57cec5SDimitry Andric // TargetExternalSymbol, and TargetGlobalAddress. 61*0b57cec5SDimitry Andric WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 62*0b57cec5SDimitry Andric // PIC mode. 63*0b57cec5SDimitry Andric WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 64*0b57cec5SDimitry Andric 65*0b57cec5SDimitry Andric // Add pseudo op to model memcpy for struct byval. 66*0b57cec5SDimitry Andric COPY_STRUCT_BYVAL, 67*0b57cec5SDimitry Andric 68*0b57cec5SDimitry Andric CALL, // Function call. 69*0b57cec5SDimitry Andric CALL_PRED, // Function call that's predicable. 70*0b57cec5SDimitry Andric CALL_NOLINK, // Function call with branch not branch-and-link. 71*0b57cec5SDimitry Andric BRCOND, // Conditional branch. 72*0b57cec5SDimitry Andric BR_JT, // Jumptable branch. 73*0b57cec5SDimitry Andric BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 74*0b57cec5SDimitry Andric RET_FLAG, // Return with a flag operand. 75*0b57cec5SDimitry Andric INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand. 76*0b57cec5SDimitry Andric 77*0b57cec5SDimitry Andric PIC_ADD, // Add with a PC operand and a PIC label. 78*0b57cec5SDimitry Andric 79*0b57cec5SDimitry Andric ASRL, // MVE long arithmetic shift right. 80*0b57cec5SDimitry Andric LSRL, // MVE long shift right. 81*0b57cec5SDimitry Andric LSLL, // MVE long shift left. 82*0b57cec5SDimitry Andric 83*0b57cec5SDimitry Andric CMP, // ARM compare instructions. 84*0b57cec5SDimitry Andric CMN, // ARM CMN instructions. 85*0b57cec5SDimitry Andric CMPZ, // ARM compare that sets only Z flag. 86*0b57cec5SDimitry Andric CMPFP, // ARM VFP compare instruction, sets FPSCR. 87*0b57cec5SDimitry Andric CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 88*0b57cec5SDimitry Andric FMSTAT, // ARM fmstat instruction. 89*0b57cec5SDimitry Andric 90*0b57cec5SDimitry Andric CMOV, // ARM conditional move instructions. 91*0b57cec5SDimitry Andric SUBS, // Flag-setting subtraction. 92*0b57cec5SDimitry Andric 93*0b57cec5SDimitry Andric SSAT, // Signed saturation 94*0b57cec5SDimitry Andric USAT, // Unsigned saturation 95*0b57cec5SDimitry Andric 96*0b57cec5SDimitry Andric BCC_i64, 97*0b57cec5SDimitry Andric 98*0b57cec5SDimitry Andric SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 99*0b57cec5SDimitry Andric SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 100*0b57cec5SDimitry Andric RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 101*0b57cec5SDimitry Andric 102*0b57cec5SDimitry Andric ADDC, // Add with carry 103*0b57cec5SDimitry Andric ADDE, // Add using carry 104*0b57cec5SDimitry Andric SUBC, // Sub with carry 105*0b57cec5SDimitry Andric SUBE, // Sub using carry 106*0b57cec5SDimitry Andric 107*0b57cec5SDimitry Andric VMOVRRD, // double to two gprs. 108*0b57cec5SDimitry Andric VMOVDRR, // Two gprs to double. 109*0b57cec5SDimitry Andric VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr 110*0b57cec5SDimitry Andric 111*0b57cec5SDimitry Andric EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 112*0b57cec5SDimitry Andric EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 113*0b57cec5SDimitry Andric EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch. 114*0b57cec5SDimitry Andric 115*0b57cec5SDimitry Andric TC_RETURN, // Tail call return pseudo. 116*0b57cec5SDimitry Andric 117*0b57cec5SDimitry Andric THREAD_POINTER, 118*0b57cec5SDimitry Andric 119*0b57cec5SDimitry Andric DYN_ALLOC, // Dynamic allocation on the stack. 120*0b57cec5SDimitry Andric 121*0b57cec5SDimitry Andric MEMBARRIER_MCR, // Memory barrier (MCR) 122*0b57cec5SDimitry Andric 123*0b57cec5SDimitry Andric PRELOAD, // Preload 124*0b57cec5SDimitry Andric 125*0b57cec5SDimitry Andric WIN__CHKSTK, // Windows' __chkstk call to do stack probing. 126*0b57cec5SDimitry Andric WIN__DBZCHK, // Windows' divide by zero check 127*0b57cec5SDimitry Andric 128*0b57cec5SDimitry Andric WLS, // Low-overhead loops, While Loop Start 129*0b57cec5SDimitry Andric 130*0b57cec5SDimitry Andric VCEQ, // Vector compare equal. 131*0b57cec5SDimitry Andric VCEQZ, // Vector compare equal to zero. 132*0b57cec5SDimitry Andric VCGE, // Vector compare greater than or equal. 133*0b57cec5SDimitry Andric VCGEZ, // Vector compare greater than or equal to zero. 134*0b57cec5SDimitry Andric VCLEZ, // Vector compare less than or equal to zero. 135*0b57cec5SDimitry Andric VCGEU, // Vector compare unsigned greater than or equal. 136*0b57cec5SDimitry Andric VCGT, // Vector compare greater than. 137*0b57cec5SDimitry Andric VCGTZ, // Vector compare greater than zero. 138*0b57cec5SDimitry Andric VCLTZ, // Vector compare less than zero. 139*0b57cec5SDimitry Andric VCGTU, // Vector compare unsigned greater than. 140*0b57cec5SDimitry Andric VTST, // Vector test bits. 141*0b57cec5SDimitry Andric 142*0b57cec5SDimitry Andric // Vector shift by vector 143*0b57cec5SDimitry Andric VSHLs, // ...left/right by signed 144*0b57cec5SDimitry Andric VSHLu, // ...left/right by unsigned 145*0b57cec5SDimitry Andric 146*0b57cec5SDimitry Andric // Vector shift by immediate: 147*0b57cec5SDimitry Andric VSHLIMM, // ...left 148*0b57cec5SDimitry Andric VSHRsIMM, // ...right (signed) 149*0b57cec5SDimitry Andric VSHRuIMM, // ...right (unsigned) 150*0b57cec5SDimitry Andric 151*0b57cec5SDimitry Andric // Vector rounding shift by immediate: 152*0b57cec5SDimitry Andric VRSHRsIMM, // ...right (signed) 153*0b57cec5SDimitry Andric VRSHRuIMM, // ...right (unsigned) 154*0b57cec5SDimitry Andric VRSHRNIMM, // ...right narrow 155*0b57cec5SDimitry Andric 156*0b57cec5SDimitry Andric // Vector saturating shift by immediate: 157*0b57cec5SDimitry Andric VQSHLsIMM, // ...left (signed) 158*0b57cec5SDimitry Andric VQSHLuIMM, // ...left (unsigned) 159*0b57cec5SDimitry Andric VQSHLsuIMM, // ...left (signed to unsigned) 160*0b57cec5SDimitry Andric VQSHRNsIMM, // ...right narrow (signed) 161*0b57cec5SDimitry Andric VQSHRNuIMM, // ...right narrow (unsigned) 162*0b57cec5SDimitry Andric VQSHRNsuIMM, // ...right narrow (signed to unsigned) 163*0b57cec5SDimitry Andric 164*0b57cec5SDimitry Andric // Vector saturating rounding shift by immediate: 165*0b57cec5SDimitry Andric VQRSHRNsIMM, // ...right narrow (signed) 166*0b57cec5SDimitry Andric VQRSHRNuIMM, // ...right narrow (unsigned) 167*0b57cec5SDimitry Andric VQRSHRNsuIMM, // ...right narrow (signed to unsigned) 168*0b57cec5SDimitry Andric 169*0b57cec5SDimitry Andric // Vector shift and insert: 170*0b57cec5SDimitry Andric VSLIIMM, // ...left 171*0b57cec5SDimitry Andric VSRIIMM, // ...right 172*0b57cec5SDimitry Andric 173*0b57cec5SDimitry Andric // Vector get lane (VMOV scalar to ARM core register) 174*0b57cec5SDimitry Andric // (These are used for 8- and 16-bit element types only.) 175*0b57cec5SDimitry Andric VGETLANEu, // zero-extend vector extract element 176*0b57cec5SDimitry Andric VGETLANEs, // sign-extend vector extract element 177*0b57cec5SDimitry Andric 178*0b57cec5SDimitry Andric // Vector move immediate and move negated immediate: 179*0b57cec5SDimitry Andric VMOVIMM, 180*0b57cec5SDimitry Andric VMVNIMM, 181*0b57cec5SDimitry Andric 182*0b57cec5SDimitry Andric // Vector move f32 immediate: 183*0b57cec5SDimitry Andric VMOVFPIMM, 184*0b57cec5SDimitry Andric 185*0b57cec5SDimitry Andric // Move H <-> R, clearing top 16 bits 186*0b57cec5SDimitry Andric VMOVrh, 187*0b57cec5SDimitry Andric VMOVhr, 188*0b57cec5SDimitry Andric 189*0b57cec5SDimitry Andric // Vector duplicate: 190*0b57cec5SDimitry Andric VDUP, 191*0b57cec5SDimitry Andric VDUPLANE, 192*0b57cec5SDimitry Andric 193*0b57cec5SDimitry Andric // Vector shuffles: 194*0b57cec5SDimitry Andric VEXT, // extract 195*0b57cec5SDimitry Andric VREV64, // reverse elements within 64-bit doublewords 196*0b57cec5SDimitry Andric VREV32, // reverse elements within 32-bit words 197*0b57cec5SDimitry Andric VREV16, // reverse elements within 16-bit halfwords 198*0b57cec5SDimitry Andric VZIP, // zip (interleave) 199*0b57cec5SDimitry Andric VUZP, // unzip (deinterleave) 200*0b57cec5SDimitry Andric VTRN, // transpose 201*0b57cec5SDimitry Andric VTBL1, // 1-register shuffle with mask 202*0b57cec5SDimitry Andric VTBL2, // 2-register shuffle with mask 203*0b57cec5SDimitry Andric 204*0b57cec5SDimitry Andric // Vector multiply long: 205*0b57cec5SDimitry Andric VMULLs, // ...signed 206*0b57cec5SDimitry Andric VMULLu, // ...unsigned 207*0b57cec5SDimitry Andric 208*0b57cec5SDimitry Andric SMULWB, // Signed multiply word by half word, bottom 209*0b57cec5SDimitry Andric SMULWT, // Signed multiply word by half word, top 210*0b57cec5SDimitry Andric UMLAL, // 64bit Unsigned Accumulate Multiply 211*0b57cec5SDimitry Andric SMLAL, // 64bit Signed Accumulate Multiply 212*0b57cec5SDimitry Andric UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply 213*0b57cec5SDimitry Andric SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16 214*0b57cec5SDimitry Andric SMLALBT, // 64-bit signed accumulate multiply bottom, top 16 215*0b57cec5SDimitry Andric SMLALTB, // 64-bit signed accumulate multiply top, bottom 16 216*0b57cec5SDimitry Andric SMLALTT, // 64-bit signed accumulate multiply top, top 16 217*0b57cec5SDimitry Andric SMLALD, // Signed multiply accumulate long dual 218*0b57cec5SDimitry Andric SMLALDX, // Signed multiply accumulate long dual exchange 219*0b57cec5SDimitry Andric SMLSLD, // Signed multiply subtract long dual 220*0b57cec5SDimitry Andric SMLSLDX, // Signed multiply subtract long dual exchange 221*0b57cec5SDimitry Andric SMMLAR, // Signed multiply long, round and add 222*0b57cec5SDimitry Andric SMMLSR, // Signed multiply long, subtract and round 223*0b57cec5SDimitry Andric 224*0b57cec5SDimitry Andric // Operands of the standard BUILD_VECTOR node are not legalized, which 225*0b57cec5SDimitry Andric // is fine if BUILD_VECTORs are always lowered to shuffles or other 226*0b57cec5SDimitry Andric // operations, but for ARM some BUILD_VECTORs are legal as-is and their 227*0b57cec5SDimitry Andric // operands need to be legalized. Define an ARM-specific version of 228*0b57cec5SDimitry Andric // BUILD_VECTOR for this purpose. 229*0b57cec5SDimitry Andric BUILD_VECTOR, 230*0b57cec5SDimitry Andric 231*0b57cec5SDimitry Andric // Bit-field insert 232*0b57cec5SDimitry Andric BFI, 233*0b57cec5SDimitry Andric 234*0b57cec5SDimitry Andric // Vector OR with immediate 235*0b57cec5SDimitry Andric VORRIMM, 236*0b57cec5SDimitry Andric // Vector AND with NOT of immediate 237*0b57cec5SDimitry Andric VBICIMM, 238*0b57cec5SDimitry Andric 239*0b57cec5SDimitry Andric // Vector bitwise select 240*0b57cec5SDimitry Andric VBSL, 241*0b57cec5SDimitry Andric 242*0b57cec5SDimitry Andric // Pseudo-instruction representing a memory copy using ldm/stm 243*0b57cec5SDimitry Andric // instructions. 244*0b57cec5SDimitry Andric MEMCPY, 245*0b57cec5SDimitry Andric 246*0b57cec5SDimitry Andric // Vector load N-element structure to all lanes: 247*0b57cec5SDimitry Andric VLD1DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 248*0b57cec5SDimitry Andric VLD2DUP, 249*0b57cec5SDimitry Andric VLD3DUP, 250*0b57cec5SDimitry Andric VLD4DUP, 251*0b57cec5SDimitry Andric 252*0b57cec5SDimitry Andric // NEON loads with post-increment base updates: 253*0b57cec5SDimitry Andric VLD1_UPD, 254*0b57cec5SDimitry Andric VLD2_UPD, 255*0b57cec5SDimitry Andric VLD3_UPD, 256*0b57cec5SDimitry Andric VLD4_UPD, 257*0b57cec5SDimitry Andric VLD2LN_UPD, 258*0b57cec5SDimitry Andric VLD3LN_UPD, 259*0b57cec5SDimitry Andric VLD4LN_UPD, 260*0b57cec5SDimitry Andric VLD1DUP_UPD, 261*0b57cec5SDimitry Andric VLD2DUP_UPD, 262*0b57cec5SDimitry Andric VLD3DUP_UPD, 263*0b57cec5SDimitry Andric VLD4DUP_UPD, 264*0b57cec5SDimitry Andric 265*0b57cec5SDimitry Andric // NEON stores with post-increment base updates: 266*0b57cec5SDimitry Andric VST1_UPD, 267*0b57cec5SDimitry Andric VST2_UPD, 268*0b57cec5SDimitry Andric VST3_UPD, 269*0b57cec5SDimitry Andric VST4_UPD, 270*0b57cec5SDimitry Andric VST2LN_UPD, 271*0b57cec5SDimitry Andric VST3LN_UPD, 272*0b57cec5SDimitry Andric VST4LN_UPD 273*0b57cec5SDimitry Andric }; 274*0b57cec5SDimitry Andric 275*0b57cec5SDimitry Andric } // end namespace ARMISD 276*0b57cec5SDimitry Andric 277*0b57cec5SDimitry Andric /// Define some predicates that are used for node matching. 278*0b57cec5SDimitry Andric namespace ARM { 279*0b57cec5SDimitry Andric 280*0b57cec5SDimitry Andric bool isBitFieldInvertedMask(unsigned v); 281*0b57cec5SDimitry Andric 282*0b57cec5SDimitry Andric } // end namespace ARM 283*0b57cec5SDimitry Andric 284*0b57cec5SDimitry Andric //===--------------------------------------------------------------------===// 285*0b57cec5SDimitry Andric // ARMTargetLowering - ARM Implementation of the TargetLowering interface 286*0b57cec5SDimitry Andric 287*0b57cec5SDimitry Andric class ARMTargetLowering : public TargetLowering { 288*0b57cec5SDimitry Andric public: 289*0b57cec5SDimitry Andric explicit ARMTargetLowering(const TargetMachine &TM, 290*0b57cec5SDimitry Andric const ARMSubtarget &STI); 291*0b57cec5SDimitry Andric 292*0b57cec5SDimitry Andric unsigned getJumpTableEncoding() const override; 293*0b57cec5SDimitry Andric bool useSoftFloat() const override; 294*0b57cec5SDimitry Andric 295*0b57cec5SDimitry Andric SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 296*0b57cec5SDimitry Andric 297*0b57cec5SDimitry Andric /// ReplaceNodeResults - Replace the results of node with an illegal result 298*0b57cec5SDimitry Andric /// type with new values built out of custom code. 299*0b57cec5SDimitry Andric void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 300*0b57cec5SDimitry Andric SelectionDAG &DAG) const override; 301*0b57cec5SDimitry Andric 302*0b57cec5SDimitry Andric const char *getTargetNodeName(unsigned Opcode) const override; 303*0b57cec5SDimitry Andric 304*0b57cec5SDimitry Andric bool isSelectSupported(SelectSupportKind Kind) const override { 305*0b57cec5SDimitry Andric // ARM does not support scalar condition selects on vectors. 306*0b57cec5SDimitry Andric return (Kind != ScalarCondVectorVal); 307*0b57cec5SDimitry Andric } 308*0b57cec5SDimitry Andric 309*0b57cec5SDimitry Andric bool isReadOnly(const GlobalValue *GV) const; 310*0b57cec5SDimitry Andric 311*0b57cec5SDimitry Andric /// getSetCCResultType - Return the value type to use for ISD::SETCC. 312*0b57cec5SDimitry Andric EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 313*0b57cec5SDimitry Andric EVT VT) const override; 314*0b57cec5SDimitry Andric 315*0b57cec5SDimitry Andric MachineBasicBlock * 316*0b57cec5SDimitry Andric EmitInstrWithCustomInserter(MachineInstr &MI, 317*0b57cec5SDimitry Andric MachineBasicBlock *MBB) const override; 318*0b57cec5SDimitry Andric 319*0b57cec5SDimitry Andric void AdjustInstrPostInstrSelection(MachineInstr &MI, 320*0b57cec5SDimitry Andric SDNode *Node) const override; 321*0b57cec5SDimitry Andric 322*0b57cec5SDimitry Andric SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const; 323*0b57cec5SDimitry Andric SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const; 324*0b57cec5SDimitry Andric SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const; 325*0b57cec5SDimitry Andric SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 326*0b57cec5SDimitry Andric 327*0b57cec5SDimitry Andric bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override; 328*0b57cec5SDimitry Andric 329*0b57cec5SDimitry Andric /// allowsMisalignedMemoryAccesses - Returns true if the target allows 330*0b57cec5SDimitry Andric /// unaligned memory accesses of the specified type. Returns whether it 331*0b57cec5SDimitry Andric /// is "fast" by reference in the second argument. 332*0b57cec5SDimitry Andric bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, 333*0b57cec5SDimitry Andric unsigned Align, 334*0b57cec5SDimitry Andric MachineMemOperand::Flags Flags, 335*0b57cec5SDimitry Andric bool *Fast) const override; 336*0b57cec5SDimitry Andric 337*0b57cec5SDimitry Andric EVT getOptimalMemOpType(uint64_t Size, 338*0b57cec5SDimitry Andric unsigned DstAlign, unsigned SrcAlign, 339*0b57cec5SDimitry Andric bool IsMemset, bool ZeroMemset, 340*0b57cec5SDimitry Andric bool MemcpyStrSrc, 341*0b57cec5SDimitry Andric const AttributeList &FuncAttributes) const override; 342*0b57cec5SDimitry Andric 343*0b57cec5SDimitry Andric bool isTruncateFree(Type *SrcTy, Type *DstTy) const override; 344*0b57cec5SDimitry Andric bool isTruncateFree(EVT SrcVT, EVT DstVT) const override; 345*0b57cec5SDimitry Andric bool isZExtFree(SDValue Val, EVT VT2) const override; 346*0b57cec5SDimitry Andric bool shouldSinkOperands(Instruction *I, 347*0b57cec5SDimitry Andric SmallVectorImpl<Use *> &Ops) const override; 348*0b57cec5SDimitry Andric 349*0b57cec5SDimitry Andric bool isFNegFree(EVT VT) const override; 350*0b57cec5SDimitry Andric 351*0b57cec5SDimitry Andric bool isVectorLoadExtDesirable(SDValue ExtVal) const override; 352*0b57cec5SDimitry Andric 353*0b57cec5SDimitry Andric bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; 354*0b57cec5SDimitry Andric 355*0b57cec5SDimitry Andric 356*0b57cec5SDimitry Andric /// isLegalAddressingMode - Return true if the addressing mode represented 357*0b57cec5SDimitry Andric /// by AM is legal for this target, for a load/store of the specified type. 358*0b57cec5SDimitry Andric bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, 359*0b57cec5SDimitry Andric Type *Ty, unsigned AS, 360*0b57cec5SDimitry Andric Instruction *I = nullptr) const override; 361*0b57cec5SDimitry Andric 362*0b57cec5SDimitry Andric /// getScalingFactorCost - Return the cost of the scaling used in 363*0b57cec5SDimitry Andric /// addressing mode represented by AM. 364*0b57cec5SDimitry Andric /// If the AM is supported, the return value must be >= 0. 365*0b57cec5SDimitry Andric /// If the AM is not supported, the return value must be negative. 366*0b57cec5SDimitry Andric int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, 367*0b57cec5SDimitry Andric unsigned AS) const override; 368*0b57cec5SDimitry Andric 369*0b57cec5SDimitry Andric bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 370*0b57cec5SDimitry Andric 371*0b57cec5SDimitry Andric /// Returns true if the addresing mode representing by AM is legal 372*0b57cec5SDimitry Andric /// for the Thumb1 target, for a load/store of the specified type. 373*0b57cec5SDimitry Andric bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 374*0b57cec5SDimitry Andric 375*0b57cec5SDimitry Andric /// isLegalICmpImmediate - Return true if the specified immediate is legal 376*0b57cec5SDimitry Andric /// icmp immediate, that is the target has icmp instructions which can 377*0b57cec5SDimitry Andric /// compare a register against the immediate without having to materialize 378*0b57cec5SDimitry Andric /// the immediate into a register. 379*0b57cec5SDimitry Andric bool isLegalICmpImmediate(int64_t Imm) const override; 380*0b57cec5SDimitry Andric 381*0b57cec5SDimitry Andric /// isLegalAddImmediate - Return true if the specified immediate is legal 382*0b57cec5SDimitry Andric /// add immediate, that is the target has add instructions which can 383*0b57cec5SDimitry Andric /// add a register and the immediate without having to materialize 384*0b57cec5SDimitry Andric /// the immediate into a register. 385*0b57cec5SDimitry Andric bool isLegalAddImmediate(int64_t Imm) const override; 386*0b57cec5SDimitry Andric 387*0b57cec5SDimitry Andric /// getPreIndexedAddressParts - returns true by value, base pointer and 388*0b57cec5SDimitry Andric /// offset pointer and addressing mode by reference if the node's address 389*0b57cec5SDimitry Andric /// can be legally represented as pre-indexed load / store address. 390*0b57cec5SDimitry Andric bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, 391*0b57cec5SDimitry Andric ISD::MemIndexedMode &AM, 392*0b57cec5SDimitry Andric SelectionDAG &DAG) const override; 393*0b57cec5SDimitry Andric 394*0b57cec5SDimitry Andric /// getPostIndexedAddressParts - returns true by value, base pointer and 395*0b57cec5SDimitry Andric /// offset pointer and addressing mode by reference if this node can be 396*0b57cec5SDimitry Andric /// combined with a load / store to form a post-indexed load / store. 397*0b57cec5SDimitry Andric bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, 398*0b57cec5SDimitry Andric SDValue &Offset, ISD::MemIndexedMode &AM, 399*0b57cec5SDimitry Andric SelectionDAG &DAG) const override; 400*0b57cec5SDimitry Andric 401*0b57cec5SDimitry Andric void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, 402*0b57cec5SDimitry Andric const APInt &DemandedElts, 403*0b57cec5SDimitry Andric const SelectionDAG &DAG, 404*0b57cec5SDimitry Andric unsigned Depth) const override; 405*0b57cec5SDimitry Andric 406*0b57cec5SDimitry Andric bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded, 407*0b57cec5SDimitry Andric TargetLoweringOpt &TLO) const override; 408*0b57cec5SDimitry Andric 409*0b57cec5SDimitry Andric 410*0b57cec5SDimitry Andric bool ExpandInlineAsm(CallInst *CI) const override; 411*0b57cec5SDimitry Andric 412*0b57cec5SDimitry Andric ConstraintType getConstraintType(StringRef Constraint) const override; 413*0b57cec5SDimitry Andric 414*0b57cec5SDimitry Andric /// Examine constraint string and operand type and determine a weight value. 415*0b57cec5SDimitry Andric /// The operand object must already have been set up with the operand type. 416*0b57cec5SDimitry Andric ConstraintWeight getSingleConstraintMatchWeight( 417*0b57cec5SDimitry Andric AsmOperandInfo &info, const char *constraint) const override; 418*0b57cec5SDimitry Andric 419*0b57cec5SDimitry Andric std::pair<unsigned, const TargetRegisterClass *> 420*0b57cec5SDimitry Andric getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 421*0b57cec5SDimitry Andric StringRef Constraint, MVT VT) const override; 422*0b57cec5SDimitry Andric 423*0b57cec5SDimitry Andric const char *LowerXConstraint(EVT ConstraintVT) const override; 424*0b57cec5SDimitry Andric 425*0b57cec5SDimitry Andric /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 426*0b57cec5SDimitry Andric /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 427*0b57cec5SDimitry Andric /// true it means one of the asm constraint of the inline asm instruction 428*0b57cec5SDimitry Andric /// being processed is 'm'. 429*0b57cec5SDimitry Andric void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 430*0b57cec5SDimitry Andric std::vector<SDValue> &Ops, 431*0b57cec5SDimitry Andric SelectionDAG &DAG) const override; 432*0b57cec5SDimitry Andric 433*0b57cec5SDimitry Andric unsigned 434*0b57cec5SDimitry Andric getInlineAsmMemConstraint(StringRef ConstraintCode) const override { 435*0b57cec5SDimitry Andric if (ConstraintCode == "Q") 436*0b57cec5SDimitry Andric return InlineAsm::Constraint_Q; 437*0b57cec5SDimitry Andric else if (ConstraintCode == "o") 438*0b57cec5SDimitry Andric return InlineAsm::Constraint_o; 439*0b57cec5SDimitry Andric else if (ConstraintCode.size() == 2) { 440*0b57cec5SDimitry Andric if (ConstraintCode[0] == 'U') { 441*0b57cec5SDimitry Andric switch(ConstraintCode[1]) { 442*0b57cec5SDimitry Andric default: 443*0b57cec5SDimitry Andric break; 444*0b57cec5SDimitry Andric case 'm': 445*0b57cec5SDimitry Andric return InlineAsm::Constraint_Um; 446*0b57cec5SDimitry Andric case 'n': 447*0b57cec5SDimitry Andric return InlineAsm::Constraint_Un; 448*0b57cec5SDimitry Andric case 'q': 449*0b57cec5SDimitry Andric return InlineAsm::Constraint_Uq; 450*0b57cec5SDimitry Andric case 's': 451*0b57cec5SDimitry Andric return InlineAsm::Constraint_Us; 452*0b57cec5SDimitry Andric case 't': 453*0b57cec5SDimitry Andric return InlineAsm::Constraint_Ut; 454*0b57cec5SDimitry Andric case 'v': 455*0b57cec5SDimitry Andric return InlineAsm::Constraint_Uv; 456*0b57cec5SDimitry Andric case 'y': 457*0b57cec5SDimitry Andric return InlineAsm::Constraint_Uy; 458*0b57cec5SDimitry Andric } 459*0b57cec5SDimitry Andric } 460*0b57cec5SDimitry Andric } 461*0b57cec5SDimitry Andric return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 462*0b57cec5SDimitry Andric } 463*0b57cec5SDimitry Andric 464*0b57cec5SDimitry Andric const ARMSubtarget* getSubtarget() const { 465*0b57cec5SDimitry Andric return Subtarget; 466*0b57cec5SDimitry Andric } 467*0b57cec5SDimitry Andric 468*0b57cec5SDimitry Andric /// getRegClassFor - Return the register class that should be used for the 469*0b57cec5SDimitry Andric /// specified value type. 470*0b57cec5SDimitry Andric const TargetRegisterClass * 471*0b57cec5SDimitry Andric getRegClassFor(MVT VT, bool isDivergent = false) const override; 472*0b57cec5SDimitry Andric 473*0b57cec5SDimitry Andric /// Returns true if a cast between SrcAS and DestAS is a noop. 474*0b57cec5SDimitry Andric bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override { 475*0b57cec5SDimitry Andric // Addrspacecasts are always noops. 476*0b57cec5SDimitry Andric return true; 477*0b57cec5SDimitry Andric } 478*0b57cec5SDimitry Andric 479*0b57cec5SDimitry Andric bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 480*0b57cec5SDimitry Andric unsigned &PrefAlign) const override; 481*0b57cec5SDimitry Andric 482*0b57cec5SDimitry Andric /// createFastISel - This method returns a target specific FastISel object, 483*0b57cec5SDimitry Andric /// or null if the target does not support "fast" ISel. 484*0b57cec5SDimitry Andric FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 485*0b57cec5SDimitry Andric const TargetLibraryInfo *libInfo) const override; 486*0b57cec5SDimitry Andric 487*0b57cec5SDimitry Andric Sched::Preference getSchedulingPreference(SDNode *N) const override; 488*0b57cec5SDimitry Andric 489*0b57cec5SDimitry Andric bool 490*0b57cec5SDimitry Andric isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override; 491*0b57cec5SDimitry Andric bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 492*0b57cec5SDimitry Andric 493*0b57cec5SDimitry Andric /// isFPImmLegal - Returns true if the target can instruction select the 494*0b57cec5SDimitry Andric /// specified FP immediate natively. If false, the legalizer will 495*0b57cec5SDimitry Andric /// materialize the FP immediate as a load from a constant pool. 496*0b57cec5SDimitry Andric bool isFPImmLegal(const APFloat &Imm, EVT VT, 497*0b57cec5SDimitry Andric bool ForCodeSize = false) const override; 498*0b57cec5SDimitry Andric 499*0b57cec5SDimitry Andric bool getTgtMemIntrinsic(IntrinsicInfo &Info, 500*0b57cec5SDimitry Andric const CallInst &I, 501*0b57cec5SDimitry Andric MachineFunction &MF, 502*0b57cec5SDimitry Andric unsigned Intrinsic) const override; 503*0b57cec5SDimitry Andric 504*0b57cec5SDimitry Andric /// Returns true if it is beneficial to convert a load of a constant 505*0b57cec5SDimitry Andric /// to just the constant itself. 506*0b57cec5SDimitry Andric bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 507*0b57cec5SDimitry Andric Type *Ty) const override; 508*0b57cec5SDimitry Andric 509*0b57cec5SDimitry Andric /// Return true if EXTRACT_SUBVECTOR is cheap for this result type 510*0b57cec5SDimitry Andric /// with this index. 511*0b57cec5SDimitry Andric bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 512*0b57cec5SDimitry Andric unsigned Index) const override; 513*0b57cec5SDimitry Andric 514*0b57cec5SDimitry Andric /// Returns true if an argument of type Ty needs to be passed in a 515*0b57cec5SDimitry Andric /// contiguous block of registers in calling convention CallConv. 516*0b57cec5SDimitry Andric bool functionArgumentNeedsConsecutiveRegisters( 517*0b57cec5SDimitry Andric Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override; 518*0b57cec5SDimitry Andric 519*0b57cec5SDimitry Andric /// If a physical register, this returns the register that receives the 520*0b57cec5SDimitry Andric /// exception address on entry to an EH pad. 521*0b57cec5SDimitry Andric unsigned 522*0b57cec5SDimitry Andric getExceptionPointerRegister(const Constant *PersonalityFn) const override; 523*0b57cec5SDimitry Andric 524*0b57cec5SDimitry Andric /// If a physical register, this returns the register that receives the 525*0b57cec5SDimitry Andric /// exception typeid on entry to a landing pad. 526*0b57cec5SDimitry Andric unsigned 527*0b57cec5SDimitry Andric getExceptionSelectorRegister(const Constant *PersonalityFn) const override; 528*0b57cec5SDimitry Andric 529*0b57cec5SDimitry Andric Instruction *makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const; 530*0b57cec5SDimitry Andric Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 531*0b57cec5SDimitry Andric AtomicOrdering Ord) const override; 532*0b57cec5SDimitry Andric Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, 533*0b57cec5SDimitry Andric Value *Addr, AtomicOrdering Ord) const override; 534*0b57cec5SDimitry Andric 535*0b57cec5SDimitry Andric void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override; 536*0b57cec5SDimitry Andric 537*0b57cec5SDimitry Andric Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, 538*0b57cec5SDimitry Andric AtomicOrdering Ord) const override; 539*0b57cec5SDimitry Andric Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, 540*0b57cec5SDimitry Andric AtomicOrdering Ord) const override; 541*0b57cec5SDimitry Andric 542*0b57cec5SDimitry Andric unsigned getMaxSupportedInterleaveFactor() const override { return 4; } 543*0b57cec5SDimitry Andric 544*0b57cec5SDimitry Andric bool lowerInterleavedLoad(LoadInst *LI, 545*0b57cec5SDimitry Andric ArrayRef<ShuffleVectorInst *> Shuffles, 546*0b57cec5SDimitry Andric ArrayRef<unsigned> Indices, 547*0b57cec5SDimitry Andric unsigned Factor) const override; 548*0b57cec5SDimitry Andric bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, 549*0b57cec5SDimitry Andric unsigned Factor) const override; 550*0b57cec5SDimitry Andric 551*0b57cec5SDimitry Andric bool shouldInsertFencesForAtomic(const Instruction *I) const override; 552*0b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 553*0b57cec5SDimitry Andric shouldExpandAtomicLoadInIR(LoadInst *LI) const override; 554*0b57cec5SDimitry Andric bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; 555*0b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 556*0b57cec5SDimitry Andric shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; 557*0b57cec5SDimitry Andric TargetLoweringBase::AtomicExpansionKind 558*0b57cec5SDimitry Andric shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; 559*0b57cec5SDimitry Andric 560*0b57cec5SDimitry Andric bool useLoadStackGuardNode() const override; 561*0b57cec5SDimitry Andric 562*0b57cec5SDimitry Andric void insertSSPDeclarations(Module &M) const override; 563*0b57cec5SDimitry Andric Value *getSDagStackGuard(const Module &M) const override; 564*0b57cec5SDimitry Andric Function *getSSPStackGuardCheck(const Module &M) const override; 565*0b57cec5SDimitry Andric 566*0b57cec5SDimitry Andric bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 567*0b57cec5SDimitry Andric unsigned &Cost) const override; 568*0b57cec5SDimitry Andric 569*0b57cec5SDimitry Andric bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, 570*0b57cec5SDimitry Andric const SelectionDAG &DAG) const override { 571*0b57cec5SDimitry Andric // Do not merge to larger than i32. 572*0b57cec5SDimitry Andric return (MemVT.getSizeInBits() <= 32); 573*0b57cec5SDimitry Andric } 574*0b57cec5SDimitry Andric 575*0b57cec5SDimitry Andric bool isCheapToSpeculateCttz() const override; 576*0b57cec5SDimitry Andric bool isCheapToSpeculateCtlz() const override; 577*0b57cec5SDimitry Andric 578*0b57cec5SDimitry Andric bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { 579*0b57cec5SDimitry Andric return VT.isScalarInteger(); 580*0b57cec5SDimitry Andric } 581*0b57cec5SDimitry Andric 582*0b57cec5SDimitry Andric bool supportSwiftError() const override { 583*0b57cec5SDimitry Andric return true; 584*0b57cec5SDimitry Andric } 585*0b57cec5SDimitry Andric 586*0b57cec5SDimitry Andric bool hasStandaloneRem(EVT VT) const override { 587*0b57cec5SDimitry Andric return HasStandaloneRem; 588*0b57cec5SDimitry Andric } 589*0b57cec5SDimitry Andric 590*0b57cec5SDimitry Andric bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override; 591*0b57cec5SDimitry Andric 592*0b57cec5SDimitry Andric CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const; 593*0b57cec5SDimitry Andric CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const; 594*0b57cec5SDimitry Andric 595*0b57cec5SDimitry Andric /// Returns true if \p VecTy is a legal interleaved access type. This 596*0b57cec5SDimitry Andric /// function checks the vector element type and the overall width of the 597*0b57cec5SDimitry Andric /// vector. 598*0b57cec5SDimitry Andric bool isLegalInterleavedAccessType(VectorType *VecTy, 599*0b57cec5SDimitry Andric const DataLayout &DL) const; 600*0b57cec5SDimitry Andric 601*0b57cec5SDimitry Andric bool alignLoopsWithOptSize() const override; 602*0b57cec5SDimitry Andric 603*0b57cec5SDimitry Andric /// Returns the number of interleaved accesses that will be generated when 604*0b57cec5SDimitry Andric /// lowering accesses of the given type. 605*0b57cec5SDimitry Andric unsigned getNumInterleavedAccesses(VectorType *VecTy, 606*0b57cec5SDimitry Andric const DataLayout &DL) const; 607*0b57cec5SDimitry Andric 608*0b57cec5SDimitry Andric void finalizeLowering(MachineFunction &MF) const override; 609*0b57cec5SDimitry Andric 610*0b57cec5SDimitry Andric /// Return the correct alignment for the current calling convention. 611*0b57cec5SDimitry Andric unsigned getABIAlignmentForCallingConv(Type *ArgTy, 612*0b57cec5SDimitry Andric DataLayout DL) const override; 613*0b57cec5SDimitry Andric 614*0b57cec5SDimitry Andric bool isDesirableToCommuteWithShift(const SDNode *N, 615*0b57cec5SDimitry Andric CombineLevel Level) const override; 616*0b57cec5SDimitry Andric 617*0b57cec5SDimitry Andric bool shouldFoldConstantShiftPairToMask(const SDNode *N, 618*0b57cec5SDimitry Andric CombineLevel Level) const override; 619*0b57cec5SDimitry Andric 620*0b57cec5SDimitry Andric bool preferIncOfAddToSubOfNot(EVT VT) const override; 621*0b57cec5SDimitry Andric 622*0b57cec5SDimitry Andric protected: 623*0b57cec5SDimitry Andric std::pair<const TargetRegisterClass *, uint8_t> 624*0b57cec5SDimitry Andric findRepresentativeClass(const TargetRegisterInfo *TRI, 625*0b57cec5SDimitry Andric MVT VT) const override; 626*0b57cec5SDimitry Andric 627*0b57cec5SDimitry Andric private: 628*0b57cec5SDimitry Andric /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 629*0b57cec5SDimitry Andric /// make the right decision when generating code for different targets. 630*0b57cec5SDimitry Andric const ARMSubtarget *Subtarget; 631*0b57cec5SDimitry Andric 632*0b57cec5SDimitry Andric const TargetRegisterInfo *RegInfo; 633*0b57cec5SDimitry Andric 634*0b57cec5SDimitry Andric const InstrItineraryData *Itins; 635*0b57cec5SDimitry Andric 636*0b57cec5SDimitry Andric /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 637*0b57cec5SDimitry Andric unsigned ARMPCLabelIndex; 638*0b57cec5SDimitry Andric 639*0b57cec5SDimitry Andric // TODO: remove this, and have shouldInsertFencesForAtomic do the proper 640*0b57cec5SDimitry Andric // check. 641*0b57cec5SDimitry Andric bool InsertFencesForAtomic; 642*0b57cec5SDimitry Andric 643*0b57cec5SDimitry Andric bool HasStandaloneRem = true; 644*0b57cec5SDimitry Andric 645*0b57cec5SDimitry Andric void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT); 646*0b57cec5SDimitry Andric void addDRTypeForNEON(MVT VT); 647*0b57cec5SDimitry Andric void addQRTypeForNEON(MVT VT); 648*0b57cec5SDimitry Andric std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const; 649*0b57cec5SDimitry Andric 650*0b57cec5SDimitry Andric using RegsToPassVector = SmallVector<std::pair<unsigned, SDValue>, 8>; 651*0b57cec5SDimitry Andric 652*0b57cec5SDimitry Andric void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain, 653*0b57cec5SDimitry Andric SDValue &Arg, RegsToPassVector &RegsToPass, 654*0b57cec5SDimitry Andric CCValAssign &VA, CCValAssign &NextVA, 655*0b57cec5SDimitry Andric SDValue &StackPtr, 656*0b57cec5SDimitry Andric SmallVectorImpl<SDValue> &MemOpChains, 657*0b57cec5SDimitry Andric ISD::ArgFlagsTy Flags) const; 658*0b57cec5SDimitry Andric SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 659*0b57cec5SDimitry Andric SDValue &Root, SelectionDAG &DAG, 660*0b57cec5SDimitry Andric const SDLoc &dl) const; 661*0b57cec5SDimitry Andric 662*0b57cec5SDimitry Andric CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC, 663*0b57cec5SDimitry Andric bool isVarArg) const; 664*0b57cec5SDimitry Andric CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 665*0b57cec5SDimitry Andric bool isVarArg) const; 666*0b57cec5SDimitry Andric SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 667*0b57cec5SDimitry Andric const SDLoc &dl, SelectionDAG &DAG, 668*0b57cec5SDimitry Andric const CCValAssign &VA, 669*0b57cec5SDimitry Andric ISD::ArgFlagsTy Flags) const; 670*0b57cec5SDimitry Andric SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 671*0b57cec5SDimitry Andric SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 672*0b57cec5SDimitry Andric SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const; 673*0b57cec5SDimitry Andric SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 674*0b57cec5SDimitry Andric const ARMSubtarget *Subtarget) const; 675*0b57cec5SDimitry Andric SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 676*0b57cec5SDimitry Andric SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 677*0b57cec5SDimitry Andric SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 678*0b57cec5SDimitry Andric SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 679*0b57cec5SDimitry Andric SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 680*0b57cec5SDimitry Andric SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const; 681*0b57cec5SDimitry Andric SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 682*0b57cec5SDimitry Andric SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 683*0b57cec5SDimitry Andric SelectionDAG &DAG) const; 684*0b57cec5SDimitry Andric SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 685*0b57cec5SDimitry Andric SelectionDAG &DAG, 686*0b57cec5SDimitry Andric TLSModel::Model model) const; 687*0b57cec5SDimitry Andric SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 688*0b57cec5SDimitry Andric SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const; 689*0b57cec5SDimitry Andric SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 690*0b57cec5SDimitry Andric SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 691*0b57cec5SDimitry Andric SDValue LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const; 692*0b57cec5SDimitry Andric SDValue LowerUnsignedALUO(SDValue Op, SelectionDAG &DAG) const; 693*0b57cec5SDimitry Andric SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 694*0b57cec5SDimitry Andric SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 695*0b57cec5SDimitry Andric SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; 696*0b57cec5SDimitry Andric SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 697*0b57cec5SDimitry Andric SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 698*0b57cec5SDimitry Andric SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 699*0b57cec5SDimitry Andric SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 700*0b57cec5SDimitry Andric SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 701*0b57cec5SDimitry Andric SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 702*0b57cec5SDimitry Andric SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 703*0b57cec5SDimitry Andric SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG, 704*0b57cec5SDimitry Andric const ARMSubtarget *ST) const; 705*0b57cec5SDimitry Andric SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 706*0b57cec5SDimitry Andric const ARMSubtarget *ST) const; 707*0b57cec5SDimitry Andric SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 708*0b57cec5SDimitry Andric SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; 709*0b57cec5SDimitry Andric SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; 710*0b57cec5SDimitry Andric SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const; 711*0b57cec5SDimitry Andric void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed, 712*0b57cec5SDimitry Andric SmallVectorImpl<SDValue> &Results) const; 713*0b57cec5SDimitry Andric SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed, 714*0b57cec5SDimitry Andric SDValue &Chain) const; 715*0b57cec5SDimitry Andric SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const; 716*0b57cec5SDimitry Andric SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 717*0b57cec5SDimitry Andric SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; 718*0b57cec5SDimitry Andric SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; 719*0b57cec5SDimitry Andric SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 720*0b57cec5SDimitry Andric SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 721*0b57cec5SDimitry Andric void lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results, 722*0b57cec5SDimitry Andric SelectionDAG &DAG) const; 723*0b57cec5SDimitry Andric 724*0b57cec5SDimitry Andric unsigned getRegisterByName(const char* RegName, EVT VT, 725*0b57cec5SDimitry Andric SelectionDAG &DAG) const override; 726*0b57cec5SDimitry Andric 727*0b57cec5SDimitry Andric SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, 728*0b57cec5SDimitry Andric SmallVectorImpl<SDNode *> &Created) const override; 729*0b57cec5SDimitry Andric 730*0b57cec5SDimitry Andric /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster 731*0b57cec5SDimitry Andric /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be 732*0b57cec5SDimitry Andric /// expanded to FMAs when this method returns true, otherwise fmuladd is 733*0b57cec5SDimitry Andric /// expanded to fmul + fadd. 734*0b57cec5SDimitry Andric /// 735*0b57cec5SDimitry Andric /// ARM supports both fused and unfused multiply-add operations; we already 736*0b57cec5SDimitry Andric /// lower a pair of fmul and fadd to the latter so it's not clear that there 737*0b57cec5SDimitry Andric /// would be a gain or that the gain would be worthwhile enough to risk 738*0b57cec5SDimitry Andric /// correctness bugs. 739*0b57cec5SDimitry Andric bool isFMAFasterThanFMulAndFAdd(EVT VT) const override { return false; } 740*0b57cec5SDimitry Andric 741*0b57cec5SDimitry Andric SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 742*0b57cec5SDimitry Andric 743*0b57cec5SDimitry Andric SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 744*0b57cec5SDimitry Andric CallingConv::ID CallConv, bool isVarArg, 745*0b57cec5SDimitry Andric const SmallVectorImpl<ISD::InputArg> &Ins, 746*0b57cec5SDimitry Andric const SDLoc &dl, SelectionDAG &DAG, 747*0b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals, bool isThisReturn, 748*0b57cec5SDimitry Andric SDValue ThisVal) const; 749*0b57cec5SDimitry Andric 750*0b57cec5SDimitry Andric bool supportSplitCSR(MachineFunction *MF) const override { 751*0b57cec5SDimitry Andric return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 752*0b57cec5SDimitry Andric MF->getFunction().hasFnAttribute(Attribute::NoUnwind); 753*0b57cec5SDimitry Andric } 754*0b57cec5SDimitry Andric 755*0b57cec5SDimitry Andric void initializeSplitCSR(MachineBasicBlock *Entry) const override; 756*0b57cec5SDimitry Andric void insertCopiesSplitCSR( 757*0b57cec5SDimitry Andric MachineBasicBlock *Entry, 758*0b57cec5SDimitry Andric const SmallVectorImpl<MachineBasicBlock *> &Exits) const override; 759*0b57cec5SDimitry Andric 760*0b57cec5SDimitry Andric SDValue 761*0b57cec5SDimitry Andric LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 762*0b57cec5SDimitry Andric const SmallVectorImpl<ISD::InputArg> &Ins, 763*0b57cec5SDimitry Andric const SDLoc &dl, SelectionDAG &DAG, 764*0b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals) const override; 765*0b57cec5SDimitry Andric 766*0b57cec5SDimitry Andric int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl, 767*0b57cec5SDimitry Andric SDValue &Chain, const Value *OrigArg, 768*0b57cec5SDimitry Andric unsigned InRegsParamRecordIdx, int ArgOffset, 769*0b57cec5SDimitry Andric unsigned ArgSize) const; 770*0b57cec5SDimitry Andric 771*0b57cec5SDimitry Andric void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 772*0b57cec5SDimitry Andric const SDLoc &dl, SDValue &Chain, 773*0b57cec5SDimitry Andric unsigned ArgOffset, unsigned TotalArgRegsSaveSize, 774*0b57cec5SDimitry Andric bool ForceMutable = false) const; 775*0b57cec5SDimitry Andric 776*0b57cec5SDimitry Andric SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, 777*0b57cec5SDimitry Andric SmallVectorImpl<SDValue> &InVals) const override; 778*0b57cec5SDimitry Andric 779*0b57cec5SDimitry Andric /// HandleByVal - Target-specific cleanup for ByVal support. 780*0b57cec5SDimitry Andric void HandleByVal(CCState *, unsigned &, unsigned) const override; 781*0b57cec5SDimitry Andric 782*0b57cec5SDimitry Andric /// IsEligibleForTailCallOptimization - Check whether the call is eligible 783*0b57cec5SDimitry Andric /// for tail call optimization. Targets which want to do tail call 784*0b57cec5SDimitry Andric /// optimization should implement this function. 785*0b57cec5SDimitry Andric bool IsEligibleForTailCallOptimization( 786*0b57cec5SDimitry Andric SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, 787*0b57cec5SDimitry Andric bool isCalleeStructRet, bool isCallerStructRet, 788*0b57cec5SDimitry Andric const SmallVectorImpl<ISD::OutputArg> &Outs, 789*0b57cec5SDimitry Andric const SmallVectorImpl<SDValue> &OutVals, 790*0b57cec5SDimitry Andric const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG, 791*0b57cec5SDimitry Andric const bool isIndirect) const; 792*0b57cec5SDimitry Andric 793*0b57cec5SDimitry Andric bool CanLowerReturn(CallingConv::ID CallConv, 794*0b57cec5SDimitry Andric MachineFunction &MF, bool isVarArg, 795*0b57cec5SDimitry Andric const SmallVectorImpl<ISD::OutputArg> &Outs, 796*0b57cec5SDimitry Andric LLVMContext &Context) const override; 797*0b57cec5SDimitry Andric 798*0b57cec5SDimitry Andric SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 799*0b57cec5SDimitry Andric const SmallVectorImpl<ISD::OutputArg> &Outs, 800*0b57cec5SDimitry Andric const SmallVectorImpl<SDValue> &OutVals, 801*0b57cec5SDimitry Andric const SDLoc &dl, SelectionDAG &DAG) const override; 802*0b57cec5SDimitry Andric 803*0b57cec5SDimitry Andric bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; 804*0b57cec5SDimitry Andric 805*0b57cec5SDimitry Andric bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 806*0b57cec5SDimitry Andric 807*0b57cec5SDimitry Andric bool shouldConsiderGEPOffsetSplit() const override { return true; } 808*0b57cec5SDimitry Andric 809*0b57cec5SDimitry Andric bool isUnsupportedFloatingType(EVT VT) const; 810*0b57cec5SDimitry Andric 811*0b57cec5SDimitry Andric SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal, 812*0b57cec5SDimitry Andric SDValue ARMcc, SDValue CCR, SDValue Cmp, 813*0b57cec5SDimitry Andric SelectionDAG &DAG) const; 814*0b57cec5SDimitry Andric SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 815*0b57cec5SDimitry Andric SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const; 816*0b57cec5SDimitry Andric SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 817*0b57cec5SDimitry Andric const SDLoc &dl, bool InvalidOnQNaN) const; 818*0b57cec5SDimitry Andric SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; 819*0b57cec5SDimitry Andric 820*0b57cec5SDimitry Andric SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 821*0b57cec5SDimitry Andric 822*0b57cec5SDimitry Andric void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB, 823*0b57cec5SDimitry Andric MachineBasicBlock *DispatchBB, int FI) const; 824*0b57cec5SDimitry Andric 825*0b57cec5SDimitry Andric void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const; 826*0b57cec5SDimitry Andric 827*0b57cec5SDimitry Andric bool RemapAddSubWithFlags(MachineInstr &MI, MachineBasicBlock *BB) const; 828*0b57cec5SDimitry Andric 829*0b57cec5SDimitry Andric MachineBasicBlock *EmitStructByval(MachineInstr &MI, 830*0b57cec5SDimitry Andric MachineBasicBlock *MBB) const; 831*0b57cec5SDimitry Andric 832*0b57cec5SDimitry Andric MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI, 833*0b57cec5SDimitry Andric MachineBasicBlock *MBB) const; 834*0b57cec5SDimitry Andric MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI, 835*0b57cec5SDimitry Andric MachineBasicBlock *MBB) const; 836*0b57cec5SDimitry Andric void addMVEVectorTypes(bool HasMVEFP); 837*0b57cec5SDimitry Andric void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action); 838*0b57cec5SDimitry Andric void setAllExpand(MVT VT); 839*0b57cec5SDimitry Andric }; 840*0b57cec5SDimitry Andric 841*0b57cec5SDimitry Andric enum NEONModImmType { 842*0b57cec5SDimitry Andric VMOVModImm, 843*0b57cec5SDimitry Andric VMVNModImm, 844*0b57cec5SDimitry Andric MVEVMVNModImm, 845*0b57cec5SDimitry Andric OtherModImm 846*0b57cec5SDimitry Andric }; 847*0b57cec5SDimitry Andric 848*0b57cec5SDimitry Andric namespace ARM { 849*0b57cec5SDimitry Andric 850*0b57cec5SDimitry Andric FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 851*0b57cec5SDimitry Andric const TargetLibraryInfo *libInfo); 852*0b57cec5SDimitry Andric 853*0b57cec5SDimitry Andric } // end namespace ARM 854*0b57cec5SDimitry Andric 855*0b57cec5SDimitry Andric } // end namespace llvm 856*0b57cec5SDimitry Andric 857*0b57cec5SDimitry Andric #endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 858