1 //===-- RISCVISelLowering.h - RISCV DAG Lowering Interface ------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H 15 #define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H 16 17 #include "RISCV.h" 18 #include "llvm/CodeGen/SelectionDAG.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 21 namespace llvm { 22 class RISCVSubtarget; 23 namespace RISCVISD { 24 enum NodeType : unsigned { 25 FIRST_NUMBER = ISD::BUILTIN_OP_END, 26 RET_FLAG, 27 URET_FLAG, 28 SRET_FLAG, 29 MRET_FLAG, 30 CALL, 31 SELECT_CC, 32 BuildPairF64, 33 SplitF64, 34 TAIL, 35 // RV64I shifts, directly matching the semantics of the named RISC-V 36 // instructions. 37 SLLW, 38 SRAW, 39 SRLW, 40 // 32-bit operations from RV64M that can't be simply matched with a pattern 41 // at instruction selection time. 42 DIVW, 43 DIVUW, 44 REMUW, 45 // FPR32<->GPR transfer operations for RV64. Needed as an i32<->f32 bitcast 46 // is not legal on RV64. FMV_W_X_RV64 matches the semantics of the FMV.W.X. 47 // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result. 48 // This is a more convenient semantic for producing dagcombines that remove 49 // unnecessary GPR->FPR->GPR moves. 50 FMV_W_X_RV64, 51 FMV_X_ANYEXTW_RV64, 52 // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target 53 // (returns (Lo, Hi)). It takes a chain operand. 54 READ_CYCLE_WIDE 55 }; 56 } 57 58 class RISCVTargetLowering : public TargetLowering { 59 const RISCVSubtarget &Subtarget; 60 61 public: 62 explicit RISCVTargetLowering(const TargetMachine &TM, 63 const RISCVSubtarget &STI); 64 65 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, 66 MachineFunction &MF, 67 unsigned Intrinsic) const override; 68 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, 69 unsigned AS, 70 Instruction *I = nullptr) const override; 71 bool isLegalICmpImmediate(int64_t Imm) const override; 72 bool isLegalAddImmediate(int64_t Imm) const override; 73 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override; 74 bool isTruncateFree(EVT SrcVT, EVT DstVT) const override; 75 bool isZExtFree(SDValue Val, EVT VT2) const override; 76 bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override; 77 bool isFPImmLegal(const APFloat &Imm, EVT VT, 78 bool ForCodeSize) const override; 79 80 bool hasBitPreservingFPLogic(EVT VT) const override; 81 82 // Provide custom lowering hooks for some operations. 83 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 84 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, 85 SelectionDAG &DAG) const override; 86 87 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 88 89 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 90 const APInt &DemandedElts, 91 const SelectionDAG &DAG, 92 unsigned Depth) const override; 93 94 // This method returns the name of a target specific DAG node. 95 const char *getTargetNodeName(unsigned Opcode) const override; 96 97 ConstraintType getConstraintType(StringRef Constraint) const override; 98 99 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override; 100 101 std::pair<unsigned, const TargetRegisterClass *> 102 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 103 StringRef Constraint, MVT VT) const override; 104 105 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 106 std::vector<SDValue> &Ops, 107 SelectionDAG &DAG) const override; 108 109 MachineBasicBlock * 110 EmitInstrWithCustomInserter(MachineInstr &MI, 111 MachineBasicBlock *BB) const override; 112 113 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 114 EVT VT) const override; 115 116 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { 117 return VT.isScalarInteger(); 118 } 119 bool convertSelectOfConstantsToMath(EVT VT) const override { return true; } 120 121 bool shouldInsertFencesForAtomic(const Instruction *I) const override { 122 return isa<LoadInst>(I) || isa<StoreInst>(I); 123 } 124 Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, 125 AtomicOrdering Ord) const override; 126 Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, 127 AtomicOrdering Ord) const override; 128 129 ISD::NodeType getExtendForAtomicOps() const override { 130 return ISD::SIGN_EXTEND; 131 } 132 133 ISD::NodeType getExtendForAtomicCmpSwapArg() const override { 134 return ISD::SIGN_EXTEND; 135 } 136 137 bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override { 138 if (DAG.getMachineFunction().getFunction().hasMinSize()) 139 return false; 140 return true; 141 } 142 bool isDesirableToCommuteWithShift(const SDNode *N, 143 CombineLevel Level) const override; 144 145 /// If a physical register, this returns the register that receives the 146 /// exception address on entry to an EH pad. 147 Register 148 getExceptionPointerRegister(const Constant *PersonalityFn) const override; 149 150 /// If a physical register, this returns the register that receives the 151 /// exception typeid on entry to a landing pad. 152 Register 153 getExceptionSelectorRegister(const Constant *PersonalityFn) const override; 154 155 bool shouldExtendTypeInLibCall(EVT Type) const override; 156 157 /// Returns the register with the specified architectural or ABI name. This 158 /// method is necessary to lower the llvm.read_register.* and 159 /// llvm.write_register.* intrinsics. Allocatable registers must be reserved 160 /// with the clang -ffixed-xX flag for access to be allowed. 161 Register getRegisterByName(const char *RegName, LLT VT, 162 const MachineFunction &MF) const override; 163 164 // Lower incoming arguments, copy physregs into vregs 165 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, 166 bool IsVarArg, 167 const SmallVectorImpl<ISD::InputArg> &Ins, 168 const SDLoc &DL, SelectionDAG &DAG, 169 SmallVectorImpl<SDValue> &InVals) const override; 170 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 171 bool IsVarArg, 172 const SmallVectorImpl<ISD::OutputArg> &Outs, 173 LLVMContext &Context) const override; 174 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 175 const SmallVectorImpl<ISD::OutputArg> &Outs, 176 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 177 SelectionDAG &DAG) const override; 178 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, 179 SmallVectorImpl<SDValue> &InVals) const override; 180 181 bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 182 Type *Ty) const override { 183 return true; 184 } 185 bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 186 bool shouldConsiderGEPOffsetSplit() const override { return true; } 187 188 bool decomposeMulByConstant(LLVMContext &Context, EVT VT, 189 SDValue C) const override; 190 191 TargetLowering::AtomicExpansionKind 192 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; 193 Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder, AtomicRMWInst *AI, 194 Value *AlignedAddr, Value *Incr, 195 Value *Mask, Value *ShiftAmt, 196 AtomicOrdering Ord) const override; 197 TargetLowering::AtomicExpansionKind 198 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override; 199 Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> &Builder, 200 AtomicCmpXchgInst *CI, 201 Value *AlignedAddr, Value *CmpVal, 202 Value *NewVal, Value *Mask, 203 AtomicOrdering Ord) const override; 204 205 private: 206 void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo, 207 const SmallVectorImpl<ISD::InputArg> &Ins, 208 bool IsRet) const; 209 void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo, 210 const SmallVectorImpl<ISD::OutputArg> &Outs, 211 bool IsRet, CallLoweringInfo *CLI) const; 212 213 template <class NodeTy> 214 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const; 215 216 SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG, 217 bool UseGOT) const; 218 SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const; 219 220 SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 221 SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 222 SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 223 SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 224 SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const; 225 SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const; 226 SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 227 SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 228 SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 229 SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const; 230 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 231 232 bool isEligibleForTailCallOptimization( 233 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 234 const SmallVector<CCValAssign, 16> &ArgLocs) const; 235 236 /// Generate error diagnostics if any register used by CC has been marked 237 /// reserved. 238 void validateCCReservedRegs( 239 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 240 MachineFunction &MF) const; 241 }; 242 } 243 244 #endif 245