1 //===-- RISCVISelLowering.h - RISCV DAG Lowering Interface ------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H 15 #define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H 16 17 #include "RISCV.h" 18 #include "llvm/CodeGen/SelectionDAG.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 21 namespace llvm { 22 class RISCVSubtarget; 23 namespace RISCVISD { 24 enum NodeType : unsigned { 25 FIRST_NUMBER = ISD::BUILTIN_OP_END, 26 RET_FLAG, 27 URET_FLAG, 28 SRET_FLAG, 29 MRET_FLAG, 30 CALL, 31 SELECT_CC, 32 BuildPairF64, 33 SplitF64, 34 TAIL, 35 // RV64I shifts, directly matching the semantics of the named RISC-V 36 // instructions. 37 SLLW, 38 SRAW, 39 SRLW, 40 // 32-bit operations from RV64M that can't be simply matched with a pattern 41 // at instruction selection time. 42 DIVW, 43 DIVUW, 44 REMUW, 45 // FPR32<->GPR transfer operations for RV64. Needed as an i32<->f32 bitcast 46 // is not legal on RV64. FMV_W_X_RV64 matches the semantics of the FMV.W.X. 47 // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result. 48 // This is a more convenient semantic for producing dagcombines that remove 49 // unnecessary GPR->FPR->GPR moves. 50 FMV_W_X_RV64, 51 FMV_X_ANYEXTW_RV64, 52 // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target 53 // (returns (Lo, Hi)). It takes a chain operand. 54 READ_CYCLE_WIDE 55 }; 56 } 57 58 class RISCVTargetLowering : public TargetLowering { 59 const RISCVSubtarget &Subtarget; 60 61 public: 62 explicit RISCVTargetLowering(const TargetMachine &TM, 63 const RISCVSubtarget &STI); 64 65 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, 66 MachineFunction &MF, 67 unsigned Intrinsic) const override; 68 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, 69 unsigned AS, 70 Instruction *I = nullptr) const override; 71 bool isLegalICmpImmediate(int64_t Imm) const override; 72 bool isLegalAddImmediate(int64_t Imm) const override; 73 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override; 74 bool isTruncateFree(EVT SrcVT, EVT DstVT) const override; 75 bool isZExtFree(SDValue Val, EVT VT2) const override; 76 bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override; 77 78 bool hasBitPreservingFPLogic(EVT VT) const override; 79 80 // Provide custom lowering hooks for some operations. 81 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 82 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, 83 SelectionDAG &DAG) const override; 84 85 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 86 87 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 88 const APInt &DemandedElts, 89 const SelectionDAG &DAG, 90 unsigned Depth) const override; 91 92 // This method returns the name of a target specific DAG node. 93 const char *getTargetNodeName(unsigned Opcode) const override; 94 95 ConstraintType getConstraintType(StringRef Constraint) const override; 96 97 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override; 98 99 std::pair<unsigned, const TargetRegisterClass *> 100 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 101 StringRef Constraint, MVT VT) const override; 102 103 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 104 std::vector<SDValue> &Ops, 105 SelectionDAG &DAG) const override; 106 107 MachineBasicBlock * 108 EmitInstrWithCustomInserter(MachineInstr &MI, 109 MachineBasicBlock *BB) const override; 110 111 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 112 EVT VT) const override; 113 114 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { 115 return VT.isScalarInteger(); 116 } 117 118 bool shouldInsertFencesForAtomic(const Instruction *I) const override { 119 return isa<LoadInst>(I) || isa<StoreInst>(I); 120 } 121 Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, 122 AtomicOrdering Ord) const override; 123 Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, 124 AtomicOrdering Ord) const override; 125 126 ISD::NodeType getExtendForAtomicOps() const override { 127 return ISD::SIGN_EXTEND; 128 } 129 130 bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override { 131 if (DAG.getMachineFunction().getFunction().hasMinSize()) 132 return false; 133 return true; 134 } 135 bool isDesirableToCommuteWithShift(const SDNode *N, 136 CombineLevel Level) const override; 137 138 /// If a physical register, this returns the register that receives the 139 /// exception address on entry to an EH pad. 140 unsigned 141 getExceptionPointerRegister(const Constant *PersonalityFn) const override; 142 143 /// If a physical register, this returns the register that receives the 144 /// exception typeid on entry to a landing pad. 145 unsigned 146 getExceptionSelectorRegister(const Constant *PersonalityFn) const override; 147 148 bool shouldExtendTypeInLibCall(EVT Type) const override; 149 150 /// Returns the register with the specified architectural or ABI name. This 151 /// method is necessary to lower the llvm.read_register.* and 152 /// llvm.write_register.* intrinsics. Allocatable registers must be reserved 153 /// with the clang -ffixed-xX flag for access to be allowed. 154 Register getRegisterByName(const char *RegName, LLT VT, 155 const MachineFunction &MF) const override; 156 157 private: 158 void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo, 159 const SmallVectorImpl<ISD::InputArg> &Ins, 160 bool IsRet) const; 161 void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo, 162 const SmallVectorImpl<ISD::OutputArg> &Outs, 163 bool IsRet, CallLoweringInfo *CLI) const; 164 // Lower incoming arguments, copy physregs into vregs 165 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, 166 bool IsVarArg, 167 const SmallVectorImpl<ISD::InputArg> &Ins, 168 const SDLoc &DL, SelectionDAG &DAG, 169 SmallVectorImpl<SDValue> &InVals) const override; 170 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 171 bool IsVarArg, 172 const SmallVectorImpl<ISD::OutputArg> &Outs, 173 LLVMContext &Context) const override; 174 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 175 const SmallVectorImpl<ISD::OutputArg> &Outs, 176 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 177 SelectionDAG &DAG) const override; 178 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, 179 SmallVectorImpl<SDValue> &InVals) const override; 180 bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 181 Type *Ty) const override { 182 return true; 183 } 184 185 template <class NodeTy> 186 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const; 187 188 SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG, 189 bool UseGOT) const; 190 SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const; 191 192 bool shouldConsiderGEPOffsetSplit() const override { return true; } 193 SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 194 SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 195 SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 196 SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 197 SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const; 198 SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const; 199 SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 200 SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 201 SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 202 SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const; 203 204 bool isEligibleForTailCallOptimization( 205 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 206 const SmallVector<CCValAssign, 16> &ArgLocs) const; 207 208 TargetLowering::AtomicExpansionKind 209 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; 210 virtual Value *emitMaskedAtomicRMWIntrinsic( 211 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 212 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override; 213 TargetLowering::AtomicExpansionKind 214 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override; 215 virtual Value * 216 emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> &Builder, AtomicCmpXchgInst *CI, 217 Value *AlignedAddr, Value *CmpVal, 218 Value *NewVal, Value *Mask, 219 AtomicOrdering Ord) const override; 220 221 /// Generate error diagnostics if any register used by CC has been marked 222 /// reserved. 223 void validateCCReservedRegs( 224 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 225 MachineFunction &MF) const; 226 }; 227 } 228 229 #endif 230