1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCVISelLowering.h" 15 #include "RISCV.h" 16 #include "RISCVMachineFunctionInfo.h" 17 #include "RISCVRegisterInfo.h" 18 #include "RISCVSubtarget.h" 19 #include "RISCVTargetMachine.h" 20 #include "Utils/RISCVMatInt.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/CallingConvLower.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/SelectionDAGISel.h" 29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 30 #include "llvm/CodeGen/ValueTypes.h" 31 #include "llvm/IR/DiagnosticInfo.h" 32 #include "llvm/IR/DiagnosticPrinter.h" 33 #include "llvm/IR/IntrinsicsRISCV.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/raw_ostream.h" 37 38 using namespace llvm; 39 40 #define DEBUG_TYPE "riscv-lower" 41 42 STATISTIC(NumTailCalls, "Number of tail calls"); 43 44 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 45 const RISCVSubtarget &STI) 46 : TargetLowering(TM), Subtarget(STI) { 47 48 if (Subtarget.isRV32E()) 49 report_fatal_error("Codegen not yet implemented for RV32E"); 50 51 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 52 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"); 53 54 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && 55 !Subtarget.hasStdExtF()) { 56 errs() << "Hard-float 'f' ABI can't be used for a target that " 57 "doesn't support the F instruction set extension (ignoring " 58 "target-abi)\n"; 59 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 60 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && 61 !Subtarget.hasStdExtD()) { 62 errs() << "Hard-float 'd' ABI can't be used for a target that " 63 "doesn't support the D instruction set extension (ignoring " 64 "target-abi)\n"; 65 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 66 } 67 68 switch (ABI) { 69 default: 70 report_fatal_error("Don't know how to lower this ABI"); 71 case RISCVABI::ABI_ILP32: 72 case RISCVABI::ABI_ILP32F: 73 case RISCVABI::ABI_ILP32D: 74 case RISCVABI::ABI_LP64: 75 case RISCVABI::ABI_LP64F: 76 case RISCVABI::ABI_LP64D: 77 break; 78 } 79 80 MVT XLenVT = Subtarget.getXLenVT(); 81 82 // Set up the register classes. 83 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 84 85 if (Subtarget.hasStdExtF()) 86 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); 87 if (Subtarget.hasStdExtD()) 88 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); 89 90 // Compute derived properties from the register classes. 91 computeRegisterProperties(STI.getRegisterInfo()); 92 93 setStackPointerRegisterToSaveRestore(RISCV::X2); 94 95 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 96 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 97 98 // TODO: add all necessary setOperationAction calls. 99 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 100 101 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 102 setOperationAction(ISD::BR_CC, XLenVT, Expand); 103 setOperationAction(ISD::SELECT, XLenVT, Custom); 104 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 105 106 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 107 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 108 109 setOperationAction(ISD::VASTART, MVT::Other, Custom); 110 setOperationAction(ISD::VAARG, MVT::Other, Expand); 111 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 112 setOperationAction(ISD::VAEND, MVT::Other, Expand); 113 114 for (auto VT : {MVT::i1, MVT::i8, MVT::i16}) 115 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 116 117 if (Subtarget.is64Bit()) { 118 setOperationAction(ISD::ADD, MVT::i32, Custom); 119 setOperationAction(ISD::SUB, MVT::i32, Custom); 120 setOperationAction(ISD::SHL, MVT::i32, Custom); 121 setOperationAction(ISD::SRA, MVT::i32, Custom); 122 setOperationAction(ISD::SRL, MVT::i32, Custom); 123 } 124 125 if (!Subtarget.hasStdExtM()) { 126 setOperationAction(ISD::MUL, XLenVT, Expand); 127 setOperationAction(ISD::MULHS, XLenVT, Expand); 128 setOperationAction(ISD::MULHU, XLenVT, Expand); 129 setOperationAction(ISD::SDIV, XLenVT, Expand); 130 setOperationAction(ISD::UDIV, XLenVT, Expand); 131 setOperationAction(ISD::SREM, XLenVT, Expand); 132 setOperationAction(ISD::UREM, XLenVT, Expand); 133 } 134 135 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) { 136 setOperationAction(ISD::MUL, MVT::i32, Custom); 137 setOperationAction(ISD::SDIV, MVT::i32, Custom); 138 setOperationAction(ISD::UDIV, MVT::i32, Custom); 139 setOperationAction(ISD::UREM, MVT::i32, Custom); 140 } 141 142 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 143 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 144 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 145 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 146 147 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); 148 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); 149 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); 150 151 setOperationAction(ISD::ROTL, XLenVT, Expand); 152 setOperationAction(ISD::ROTR, XLenVT, Expand); 153 setOperationAction(ISD::BSWAP, XLenVT, Expand); 154 setOperationAction(ISD::CTTZ, XLenVT, Expand); 155 setOperationAction(ISD::CTLZ, XLenVT, Expand); 156 setOperationAction(ISD::CTPOP, XLenVT, Expand); 157 158 ISD::CondCode FPCCToExtend[] = { 159 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 160 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, 161 ISD::SETGE, ISD::SETNE}; 162 163 ISD::NodeType FPOpToExtend[] = { 164 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, 165 ISD::FP_TO_FP16}; 166 167 if (Subtarget.hasStdExtF()) { 168 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 169 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 170 for (auto CC : FPCCToExtend) 171 setCondCodeAction(CC, MVT::f32, Expand); 172 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 173 setOperationAction(ISD::SELECT, MVT::f32, Custom); 174 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 175 for (auto Op : FPOpToExtend) 176 setOperationAction(Op, MVT::f32, Expand); 177 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 178 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 179 } 180 181 if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) 182 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 183 184 if (Subtarget.hasStdExtD()) { 185 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 186 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 187 for (auto CC : FPCCToExtend) 188 setCondCodeAction(CC, MVT::f64, Expand); 189 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 190 setOperationAction(ISD::SELECT, MVT::f64, Custom); 191 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 192 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 193 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 194 for (auto Op : FPOpToExtend) 195 setOperationAction(Op, MVT::f64, Expand); 196 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 197 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 198 } 199 200 if (Subtarget.is64Bit() && 201 !(Subtarget.hasStdExtD() || Subtarget.hasStdExtF())) { 202 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 203 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 204 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 205 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 206 } 207 208 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 209 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 210 setOperationAction(ISD::ConstantPool, XLenVT, Custom); 211 212 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); 213 214 // TODO: On M-mode only targets, the cycle[h] CSR may not be present. 215 // Unfortunately this can't be determined just from the ISA naming string. 216 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, 217 Subtarget.is64Bit() ? Legal : Custom); 218 219 setOperationAction(ISD::TRAP, MVT::Other, Legal); 220 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 221 222 if (Subtarget.hasStdExtA()) { 223 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); 224 setMinCmpXchgSizeInBits(32); 225 } else { 226 setMaxAtomicSizeInBitsSupported(0); 227 } 228 229 setBooleanContents(ZeroOrOneBooleanContent); 230 231 // Function alignments. 232 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); 233 setMinFunctionAlignment(FunctionAlignment); 234 setPrefFunctionAlignment(FunctionAlignment); 235 236 // Effectively disable jump table generation. 237 setMinimumJumpTableEntries(INT_MAX); 238 } 239 240 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 241 EVT VT) const { 242 if (!VT.isVector()) 243 return getPointerTy(DL); 244 return VT.changeVectorElementTypeToInteger(); 245 } 246 247 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 248 const CallInst &I, 249 MachineFunction &MF, 250 unsigned Intrinsic) const { 251 switch (Intrinsic) { 252 default: 253 return false; 254 case Intrinsic::riscv_masked_atomicrmw_xchg_i32: 255 case Intrinsic::riscv_masked_atomicrmw_add_i32: 256 case Intrinsic::riscv_masked_atomicrmw_sub_i32: 257 case Intrinsic::riscv_masked_atomicrmw_nand_i32: 258 case Intrinsic::riscv_masked_atomicrmw_max_i32: 259 case Intrinsic::riscv_masked_atomicrmw_min_i32: 260 case Intrinsic::riscv_masked_atomicrmw_umax_i32: 261 case Intrinsic::riscv_masked_atomicrmw_umin_i32: 262 case Intrinsic::riscv_masked_cmpxchg_i32: 263 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 264 Info.opc = ISD::INTRINSIC_W_CHAIN; 265 Info.memVT = MVT::getVT(PtrTy->getElementType()); 266 Info.ptrVal = I.getArgOperand(0); 267 Info.offset = 0; 268 Info.align = Align(4); 269 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 270 MachineMemOperand::MOVolatile; 271 return true; 272 } 273 } 274 275 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, 276 const AddrMode &AM, Type *Ty, 277 unsigned AS, 278 Instruction *I) const { 279 // No global is ever allowed as a base. 280 if (AM.BaseGV) 281 return false; 282 283 // Require a 12-bit signed offset. 284 if (!isInt<12>(AM.BaseOffs)) 285 return false; 286 287 switch (AM.Scale) { 288 case 0: // "r+i" or just "i", depending on HasBaseReg. 289 break; 290 case 1: 291 if (!AM.HasBaseReg) // allow "r+i". 292 break; 293 return false; // disallow "r+r" or "r+r+i". 294 default: 295 return false; 296 } 297 298 return true; 299 } 300 301 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 302 return isInt<12>(Imm); 303 } 304 305 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { 306 return isInt<12>(Imm); 307 } 308 309 // On RV32, 64-bit integers are split into their high and low parts and held 310 // in two different registers, so the trunc is free since the low register can 311 // just be used. 312 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 313 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 314 return false; 315 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 316 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 317 return (SrcBits == 64 && DestBits == 32); 318 } 319 320 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 321 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || 322 !SrcVT.isInteger() || !DstVT.isInteger()) 323 return false; 324 unsigned SrcBits = SrcVT.getSizeInBits(); 325 unsigned DestBits = DstVT.getSizeInBits(); 326 return (SrcBits == 64 && DestBits == 32); 327 } 328 329 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 330 // Zexts are free if they can be combined with a load. 331 if (auto *LD = dyn_cast<LoadSDNode>(Val)) { 332 EVT MemVT = LD->getMemoryVT(); 333 if ((MemVT == MVT::i8 || MemVT == MVT::i16 || 334 (Subtarget.is64Bit() && MemVT == MVT::i32)) && 335 (LD->getExtensionType() == ISD::NON_EXTLOAD || 336 LD->getExtensionType() == ISD::ZEXTLOAD)) 337 return true; 338 } 339 340 return TargetLowering::isZExtFree(Val, VT2); 341 } 342 343 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { 344 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; 345 } 346 347 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 348 return (VT == MVT::f32 && Subtarget.hasStdExtF()) || 349 (VT == MVT::f64 && Subtarget.hasStdExtD()); 350 } 351 352 // Changes the condition code and swaps operands if necessary, so the SetCC 353 // operation matches one of the comparisons supported directly in the RISC-V 354 // ISA. 355 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { 356 switch (CC) { 357 default: 358 break; 359 case ISD::SETGT: 360 case ISD::SETLE: 361 case ISD::SETUGT: 362 case ISD::SETULE: 363 CC = ISD::getSetCCSwappedOperands(CC); 364 std::swap(LHS, RHS); 365 break; 366 } 367 } 368 369 // Return the RISC-V branch opcode that matches the given DAG integer 370 // condition code. The CondCode must be one of those supported by the RISC-V 371 // ISA (see normaliseSetCC). 372 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 373 switch (CC) { 374 default: 375 llvm_unreachable("Unsupported CondCode"); 376 case ISD::SETEQ: 377 return RISCV::BEQ; 378 case ISD::SETNE: 379 return RISCV::BNE; 380 case ISD::SETLT: 381 return RISCV::BLT; 382 case ISD::SETGE: 383 return RISCV::BGE; 384 case ISD::SETULT: 385 return RISCV::BLTU; 386 case ISD::SETUGE: 387 return RISCV::BGEU; 388 } 389 } 390 391 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 392 SelectionDAG &DAG) const { 393 switch (Op.getOpcode()) { 394 default: 395 report_fatal_error("unimplemented operand"); 396 case ISD::GlobalAddress: 397 return lowerGlobalAddress(Op, DAG); 398 case ISD::BlockAddress: 399 return lowerBlockAddress(Op, DAG); 400 case ISD::ConstantPool: 401 return lowerConstantPool(Op, DAG); 402 case ISD::GlobalTLSAddress: 403 return lowerGlobalTLSAddress(Op, DAG); 404 case ISD::SELECT: 405 return lowerSELECT(Op, DAG); 406 case ISD::VASTART: 407 return lowerVASTART(Op, DAG); 408 case ISD::FRAMEADDR: 409 return lowerFRAMEADDR(Op, DAG); 410 case ISD::RETURNADDR: 411 return lowerRETURNADDR(Op, DAG); 412 case ISD::SHL_PARTS: 413 return lowerShiftLeftParts(Op, DAG); 414 case ISD::SRA_PARTS: 415 return lowerShiftRightParts(Op, DAG, true); 416 case ISD::SRL_PARTS: 417 return lowerShiftRightParts(Op, DAG, false); 418 case ISD::BITCAST: { 419 assert(Subtarget.is64Bit() && Subtarget.hasStdExtF() && 420 "Unexpected custom legalisation"); 421 SDLoc DL(Op); 422 SDValue Op0 = Op.getOperand(0); 423 if (Op.getValueType() != MVT::f32 || Op0.getValueType() != MVT::i32) 424 return SDValue(); 425 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 426 SDValue FPConv = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); 427 return FPConv; 428 } 429 } 430 } 431 432 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, 433 SelectionDAG &DAG, unsigned Flags) { 434 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); 435 } 436 437 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, 438 SelectionDAG &DAG, unsigned Flags) { 439 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), 440 Flags); 441 } 442 443 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, 444 SelectionDAG &DAG, unsigned Flags) { 445 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(), 446 N->getOffset(), Flags); 447 } 448 449 template <class NodeTy> 450 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, 451 bool IsLocal) const { 452 SDLoc DL(N); 453 EVT Ty = getPointerTy(DAG.getDataLayout()); 454 455 if (isPositionIndependent()) { 456 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 457 if (IsLocal) 458 // Use PC-relative addressing to access the symbol. This generates the 459 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 460 // %pcrel_lo(auipc)). 461 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 462 463 // Use PC-relative addressing to access the GOT for this symbol, then load 464 // the address from the GOT. This generates the pattern (PseudoLA sym), 465 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 466 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); 467 } 468 469 switch (getTargetMachine().getCodeModel()) { 470 default: 471 report_fatal_error("Unsupported code model for lowering"); 472 case CodeModel::Small: { 473 // Generate a sequence for accessing addresses within the first 2 GiB of 474 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). 475 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); 476 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); 477 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 478 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); 479 } 480 case CodeModel::Medium: { 481 // Generate a sequence for accessing addresses within any 2GiB range within 482 // the address space. This generates the pattern (PseudoLLA sym), which 483 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 484 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 485 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 486 } 487 } 488 } 489 490 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 491 SelectionDAG &DAG) const { 492 SDLoc DL(Op); 493 EVT Ty = Op.getValueType(); 494 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 495 int64_t Offset = N->getOffset(); 496 MVT XLenVT = Subtarget.getXLenVT(); 497 498 const GlobalValue *GV = N->getGlobal(); 499 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 500 SDValue Addr = getAddr(N, DAG, IsLocal); 501 502 // In order to maximise the opportunity for common subexpression elimination, 503 // emit a separate ADD node for the global address offset instead of folding 504 // it in the global address node. Later peephole optimisations may choose to 505 // fold it back in when profitable. 506 if (Offset != 0) 507 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 508 DAG.getConstant(Offset, DL, XLenVT)); 509 return Addr; 510 } 511 512 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 513 SelectionDAG &DAG) const { 514 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 515 516 return getAddr(N, DAG); 517 } 518 519 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, 520 SelectionDAG &DAG) const { 521 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 522 523 return getAddr(N, DAG); 524 } 525 526 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, 527 SelectionDAG &DAG, 528 bool UseGOT) const { 529 SDLoc DL(N); 530 EVT Ty = getPointerTy(DAG.getDataLayout()); 531 const GlobalValue *GV = N->getGlobal(); 532 MVT XLenVT = Subtarget.getXLenVT(); 533 534 if (UseGOT) { 535 // Use PC-relative addressing to access the GOT for this TLS symbol, then 536 // load the address from the GOT and add the thread pointer. This generates 537 // the pattern (PseudoLA_TLS_IE sym), which expands to 538 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). 539 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 540 SDValue Load = 541 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); 542 543 // Add the thread pointer. 544 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 545 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); 546 } 547 548 // Generate a sequence for accessing the address relative to the thread 549 // pointer, with the appropriate adjustment for the thread pointer offset. 550 // This generates the pattern 551 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) 552 SDValue AddrHi = 553 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); 554 SDValue AddrAdd = 555 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); 556 SDValue AddrLo = 557 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); 558 559 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 560 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 561 SDValue MNAdd = SDValue( 562 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), 563 0); 564 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); 565 } 566 567 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, 568 SelectionDAG &DAG) const { 569 SDLoc DL(N); 570 EVT Ty = getPointerTy(DAG.getDataLayout()); 571 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); 572 const GlobalValue *GV = N->getGlobal(); 573 574 // Use a PC-relative addressing mode to access the global dynamic GOT address. 575 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to 576 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). 577 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 578 SDValue Load = 579 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); 580 581 // Prepare argument list to generate call. 582 ArgListTy Args; 583 ArgListEntry Entry; 584 Entry.Node = Load; 585 Entry.Ty = CallTy; 586 Args.push_back(Entry); 587 588 // Setup call to __tls_get_addr. 589 TargetLowering::CallLoweringInfo CLI(DAG); 590 CLI.setDebugLoc(DL) 591 .setChain(DAG.getEntryNode()) 592 .setLibCallee(CallingConv::C, CallTy, 593 DAG.getExternalSymbol("__tls_get_addr", Ty), 594 std::move(Args)); 595 596 return LowerCallTo(CLI).first; 597 } 598 599 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, 600 SelectionDAG &DAG) const { 601 SDLoc DL(Op); 602 EVT Ty = Op.getValueType(); 603 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 604 int64_t Offset = N->getOffset(); 605 MVT XLenVT = Subtarget.getXLenVT(); 606 607 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); 608 609 SDValue Addr; 610 switch (Model) { 611 case TLSModel::LocalExec: 612 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); 613 break; 614 case TLSModel::InitialExec: 615 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); 616 break; 617 case TLSModel::LocalDynamic: 618 case TLSModel::GeneralDynamic: 619 Addr = getDynamicTLSAddr(N, DAG); 620 break; 621 } 622 623 // In order to maximise the opportunity for common subexpression elimination, 624 // emit a separate ADD node for the global address offset instead of folding 625 // it in the global address node. Later peephole optimisations may choose to 626 // fold it back in when profitable. 627 if (Offset != 0) 628 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 629 DAG.getConstant(Offset, DL, XLenVT)); 630 return Addr; 631 } 632 633 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 634 SDValue CondV = Op.getOperand(0); 635 SDValue TrueV = Op.getOperand(1); 636 SDValue FalseV = Op.getOperand(2); 637 SDLoc DL(Op); 638 MVT XLenVT = Subtarget.getXLenVT(); 639 640 // If the result type is XLenVT and CondV is the output of a SETCC node 641 // which also operated on XLenVT inputs, then merge the SETCC node into the 642 // lowered RISCVISD::SELECT_CC to take advantage of the integer 643 // compare+branch instructions. i.e.: 644 // (select (setcc lhs, rhs, cc), truev, falsev) 645 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 646 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && 647 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 648 SDValue LHS = CondV.getOperand(0); 649 SDValue RHS = CondV.getOperand(1); 650 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 651 ISD::CondCode CCVal = CC->get(); 652 653 normaliseSetCC(LHS, RHS, CCVal); 654 655 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); 656 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 657 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 658 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 659 } 660 661 // Otherwise: 662 // (select condv, truev, falsev) 663 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 664 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 665 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); 666 667 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 668 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 669 670 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 671 } 672 673 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 674 MachineFunction &MF = DAG.getMachineFunction(); 675 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 676 677 SDLoc DL(Op); 678 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 679 getPointerTy(MF.getDataLayout())); 680 681 // vastart just stores the address of the VarArgsFrameIndex slot into the 682 // memory location argument. 683 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 684 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 685 MachinePointerInfo(SV)); 686 } 687 688 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, 689 SelectionDAG &DAG) const { 690 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 691 MachineFunction &MF = DAG.getMachineFunction(); 692 MachineFrameInfo &MFI = MF.getFrameInfo(); 693 MFI.setFrameAddressIsTaken(true); 694 Register FrameReg = RI.getFrameRegister(MF); 695 int XLenInBytes = Subtarget.getXLen() / 8; 696 697 EVT VT = Op.getValueType(); 698 SDLoc DL(Op); 699 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 700 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 701 while (Depth--) { 702 int Offset = -(XLenInBytes * 2); 703 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 704 DAG.getIntPtrConstant(Offset, DL)); 705 FrameAddr = 706 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 707 } 708 return FrameAddr; 709 } 710 711 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, 712 SelectionDAG &DAG) const { 713 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 714 MachineFunction &MF = DAG.getMachineFunction(); 715 MachineFrameInfo &MFI = MF.getFrameInfo(); 716 MFI.setReturnAddressIsTaken(true); 717 MVT XLenVT = Subtarget.getXLenVT(); 718 int XLenInBytes = Subtarget.getXLen() / 8; 719 720 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 721 return SDValue(); 722 723 EVT VT = Op.getValueType(); 724 SDLoc DL(Op); 725 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 726 if (Depth) { 727 int Off = -XLenInBytes; 728 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); 729 SDValue Offset = DAG.getConstant(Off, DL, VT); 730 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 731 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 732 MachinePointerInfo()); 733 } 734 735 // Return the value of the return address register, marking it an implicit 736 // live-in. 737 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 738 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 739 } 740 741 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, 742 SelectionDAG &DAG) const { 743 SDLoc DL(Op); 744 SDValue Lo = Op.getOperand(0); 745 SDValue Hi = Op.getOperand(1); 746 SDValue Shamt = Op.getOperand(2); 747 EVT VT = Lo.getValueType(); 748 749 // if Shamt-XLEN < 0: // Shamt < XLEN 750 // Lo = Lo << Shamt 751 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt)) 752 // else: 753 // Lo = 0 754 // Hi = Lo << (Shamt-XLEN) 755 756 SDValue Zero = DAG.getConstant(0, DL, VT); 757 SDValue One = DAG.getConstant(1, DL, VT); 758 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 759 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 760 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 761 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 762 763 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); 764 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); 765 SDValue ShiftRightLo = 766 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); 767 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); 768 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); 769 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); 770 771 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 772 773 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); 774 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 775 776 SDValue Parts[2] = {Lo, Hi}; 777 return DAG.getMergeValues(Parts, DL); 778 } 779 780 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, 781 bool IsSRA) const { 782 SDLoc DL(Op); 783 SDValue Lo = Op.getOperand(0); 784 SDValue Hi = Op.getOperand(1); 785 SDValue Shamt = Op.getOperand(2); 786 EVT VT = Lo.getValueType(); 787 788 // SRA expansion: 789 // if Shamt-XLEN < 0: // Shamt < XLEN 790 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 791 // Hi = Hi >>s Shamt 792 // else: 793 // Lo = Hi >>s (Shamt-XLEN); 794 // Hi = Hi >>s (XLEN-1) 795 // 796 // SRL expansion: 797 // if Shamt-XLEN < 0: // Shamt < XLEN 798 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 799 // Hi = Hi >>u Shamt 800 // else: 801 // Lo = Hi >>u (Shamt-XLEN); 802 // Hi = 0; 803 804 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; 805 806 SDValue Zero = DAG.getConstant(0, DL, VT); 807 SDValue One = DAG.getConstant(1, DL, VT); 808 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 809 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 810 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 811 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 812 813 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); 814 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); 815 SDValue ShiftLeftHi = 816 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); 817 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); 818 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); 819 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); 820 SDValue HiFalse = 821 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; 822 823 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 824 825 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); 826 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 827 828 SDValue Parts[2] = {Lo, Hi}; 829 return DAG.getMergeValues(Parts, DL); 830 } 831 832 // Returns the opcode of the target-specific SDNode that implements the 32-bit 833 // form of the given Opcode. 834 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { 835 switch (Opcode) { 836 default: 837 llvm_unreachable("Unexpected opcode"); 838 case ISD::SHL: 839 return RISCVISD::SLLW; 840 case ISD::SRA: 841 return RISCVISD::SRAW; 842 case ISD::SRL: 843 return RISCVISD::SRLW; 844 case ISD::SDIV: 845 return RISCVISD::DIVW; 846 case ISD::UDIV: 847 return RISCVISD::DIVUW; 848 case ISD::UREM: 849 return RISCVISD::REMUW; 850 } 851 } 852 853 // Converts the given 32-bit operation to a target-specific SelectionDAG node. 854 // Because i32 isn't a legal type for RV64, these operations would otherwise 855 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W 856 // later one because the fact the operation was originally of type i32 is 857 // lost. 858 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) { 859 SDLoc DL(N); 860 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 861 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 862 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 863 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 864 // ReplaceNodeResults requires we maintain the same type for the return value. 865 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 866 } 867 868 // Converts the given 32-bit operation to a i64 operation with signed extension 869 // semantic to reduce the signed extension instructions. 870 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { 871 SDLoc DL(N); 872 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 873 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 874 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); 875 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, 876 DAG.getValueType(MVT::i32)); 877 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 878 } 879 880 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, 881 SmallVectorImpl<SDValue> &Results, 882 SelectionDAG &DAG) const { 883 SDLoc DL(N); 884 switch (N->getOpcode()) { 885 default: 886 llvm_unreachable("Don't know how to custom type legalize this operation!"); 887 case ISD::STRICT_FP_TO_SINT: 888 case ISD::STRICT_FP_TO_UINT: 889 case ISD::FP_TO_SINT: 890 case ISD::FP_TO_UINT: { 891 bool IsStrict = N->isStrictFPOpcode(); 892 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 893 "Unexpected custom legalisation"); 894 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0); 895 RTLIB::Libcall LC; 896 if (N->getOpcode() == ISD::FP_TO_SINT || 897 N->getOpcode() == ISD::STRICT_FP_TO_SINT) 898 LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0)); 899 else 900 LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0)); 901 MakeLibCallOptions CallOptions; 902 EVT OpVT = Op0.getValueType(); 903 CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true); 904 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); 905 SDValue Result; 906 std::tie(Result, Chain) = 907 makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain); 908 Results.push_back(Result); 909 if (IsStrict) 910 Results.push_back(Chain); 911 break; 912 } 913 case ISD::READCYCLECOUNTER: { 914 assert(!Subtarget.is64Bit() && 915 "READCYCLECOUNTER only has custom type legalization on riscv32"); 916 917 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 918 SDValue RCW = 919 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); 920 921 Results.push_back(RCW); 922 Results.push_back(RCW.getValue(1)); 923 Results.push_back(RCW.getValue(2)); 924 break; 925 } 926 case ISD::ADD: 927 case ISD::SUB: 928 case ISD::MUL: 929 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 930 "Unexpected custom legalisation"); 931 if (N->getOperand(1).getOpcode() == ISD::Constant) 932 return; 933 Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); 934 break; 935 case ISD::SHL: 936 case ISD::SRA: 937 case ISD::SRL: 938 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 939 "Unexpected custom legalisation"); 940 if (N->getOperand(1).getOpcode() == ISD::Constant) 941 return; 942 Results.push_back(customLegalizeToWOp(N, DAG)); 943 break; 944 case ISD::SDIV: 945 case ISD::UDIV: 946 case ISD::UREM: 947 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 948 Subtarget.hasStdExtM() && "Unexpected custom legalisation"); 949 if (N->getOperand(0).getOpcode() == ISD::Constant || 950 N->getOperand(1).getOpcode() == ISD::Constant) 951 return; 952 Results.push_back(customLegalizeToWOp(N, DAG)); 953 break; 954 case ISD::BITCAST: { 955 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 956 Subtarget.hasStdExtF() && "Unexpected custom legalisation"); 957 SDLoc DL(N); 958 SDValue Op0 = N->getOperand(0); 959 if (Op0.getValueType() != MVT::f32) 960 return; 961 SDValue FPConv = 962 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); 963 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); 964 break; 965 } 966 } 967 } 968 969 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, 970 DAGCombinerInfo &DCI) const { 971 SelectionDAG &DAG = DCI.DAG; 972 973 switch (N->getOpcode()) { 974 default: 975 break; 976 case RISCVISD::SplitF64: { 977 SDValue Op0 = N->getOperand(0); 978 // If the input to SplitF64 is just BuildPairF64 then the operation is 979 // redundant. Instead, use BuildPairF64's operands directly. 980 if (Op0->getOpcode() == RISCVISD::BuildPairF64) 981 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); 982 983 SDLoc DL(N); 984 985 // It's cheaper to materialise two 32-bit integers than to load a double 986 // from the constant pool and transfer it to integer registers through the 987 // stack. 988 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { 989 APInt V = C->getValueAPF().bitcastToAPInt(); 990 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); 991 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); 992 return DCI.CombineTo(N, Lo, Hi); 993 } 994 995 // This is a target-specific version of a DAGCombine performed in 996 // DAGCombiner::visitBITCAST. It performs the equivalent of: 997 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 998 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 999 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 1000 !Op0.getNode()->hasOneUse()) 1001 break; 1002 SDValue NewSplitF64 = 1003 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), 1004 Op0.getOperand(0)); 1005 SDValue Lo = NewSplitF64.getValue(0); 1006 SDValue Hi = NewSplitF64.getValue(1); 1007 APInt SignBit = APInt::getSignMask(32); 1008 if (Op0.getOpcode() == ISD::FNEG) { 1009 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, 1010 DAG.getConstant(SignBit, DL, MVT::i32)); 1011 return DCI.CombineTo(N, Lo, NewHi); 1012 } 1013 assert(Op0.getOpcode() == ISD::FABS); 1014 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, 1015 DAG.getConstant(~SignBit, DL, MVT::i32)); 1016 return DCI.CombineTo(N, Lo, NewHi); 1017 } 1018 case RISCVISD::SLLW: 1019 case RISCVISD::SRAW: 1020 case RISCVISD::SRLW: { 1021 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 1022 SDValue LHS = N->getOperand(0); 1023 SDValue RHS = N->getOperand(1); 1024 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 1025 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); 1026 if ((SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI)) || 1027 (SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI))) 1028 return SDValue(); 1029 break; 1030 } 1031 case RISCVISD::FMV_X_ANYEXTW_RV64: { 1032 SDLoc DL(N); 1033 SDValue Op0 = N->getOperand(0); 1034 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the 1035 // conversion is unnecessary and can be replaced with an ANY_EXTEND 1036 // of the FMV_W_X_RV64 operand. 1037 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) { 1038 SDValue AExtOp = 1039 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0.getOperand(0)); 1040 return DCI.CombineTo(N, AExtOp); 1041 } 1042 1043 // This is a target-specific version of a DAGCombine performed in 1044 // DAGCombiner::visitBITCAST. It performs the equivalent of: 1045 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 1046 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 1047 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 1048 !Op0.getNode()->hasOneUse()) 1049 break; 1050 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, 1051 Op0.getOperand(0)); 1052 APInt SignBit = APInt::getSignMask(32).sext(64); 1053 if (Op0.getOpcode() == ISD::FNEG) { 1054 return DCI.CombineTo(N, 1055 DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV, 1056 DAG.getConstant(SignBit, DL, MVT::i64))); 1057 } 1058 assert(Op0.getOpcode() == ISD::FABS); 1059 return DCI.CombineTo(N, 1060 DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV, 1061 DAG.getConstant(~SignBit, DL, MVT::i64))); 1062 } 1063 } 1064 1065 return SDValue(); 1066 } 1067 1068 bool RISCVTargetLowering::isDesirableToCommuteWithShift( 1069 const SDNode *N, CombineLevel Level) const { 1070 // The following folds are only desirable if `(OP _, c1 << c2)` can be 1071 // materialised in fewer instructions than `(OP _, c1)`: 1072 // 1073 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 1074 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 1075 SDValue N0 = N->getOperand(0); 1076 EVT Ty = N0.getValueType(); 1077 if (Ty.isScalarInteger() && 1078 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { 1079 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 1080 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 1081 if (C1 && C2) { 1082 APInt C1Int = C1->getAPIntValue(); 1083 APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); 1084 1085 // We can materialise `c1 << c2` into an add immediate, so it's "free", 1086 // and the combine should happen, to potentially allow further combines 1087 // later. 1088 if (ShiftedC1Int.getMinSignedBits() <= 64 && 1089 isLegalAddImmediate(ShiftedC1Int.getSExtValue())) 1090 return true; 1091 1092 // We can materialise `c1` in an add immediate, so it's "free", and the 1093 // combine should be prevented. 1094 if (C1Int.getMinSignedBits() <= 64 && 1095 isLegalAddImmediate(C1Int.getSExtValue())) 1096 return false; 1097 1098 // Neither constant will fit into an immediate, so find materialisation 1099 // costs. 1100 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), 1101 Subtarget.is64Bit()); 1102 int ShiftedC1Cost = RISCVMatInt::getIntMatCost( 1103 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit()); 1104 1105 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the 1106 // combine should be prevented. 1107 if (C1Cost < ShiftedC1Cost) 1108 return false; 1109 } 1110 } 1111 return true; 1112 } 1113 1114 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( 1115 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 1116 unsigned Depth) const { 1117 switch (Op.getOpcode()) { 1118 default: 1119 break; 1120 case RISCVISD::SLLW: 1121 case RISCVISD::SRAW: 1122 case RISCVISD::SRLW: 1123 case RISCVISD::DIVW: 1124 case RISCVISD::DIVUW: 1125 case RISCVISD::REMUW: 1126 // TODO: As the result is sign-extended, this is conservatively correct. A 1127 // more precise answer could be calculated for SRAW depending on known 1128 // bits in the shift amount. 1129 return 33; 1130 } 1131 1132 return 1; 1133 } 1134 1135 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, 1136 MachineBasicBlock *BB) { 1137 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"); 1138 1139 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. 1140 // Should the count have wrapped while it was being read, we need to try 1141 // again. 1142 // ... 1143 // read: 1144 // rdcycleh x3 # load high word of cycle 1145 // rdcycle x2 # load low word of cycle 1146 // rdcycleh x4 # load high word of cycle 1147 // bne x3, x4, read # check if high word reads match, otherwise try again 1148 // ... 1149 1150 MachineFunction &MF = *BB->getParent(); 1151 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1152 MachineFunction::iterator It = ++BB->getIterator(); 1153 1154 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1155 MF.insert(It, LoopMBB); 1156 1157 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1158 MF.insert(It, DoneMBB); 1159 1160 // Transfer the remainder of BB and its successor edges to DoneMBB. 1161 DoneMBB->splice(DoneMBB->begin(), BB, 1162 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1163 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 1164 1165 BB->addSuccessor(LoopMBB); 1166 1167 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1168 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1169 Register LoReg = MI.getOperand(0).getReg(); 1170 Register HiReg = MI.getOperand(1).getReg(); 1171 DebugLoc DL = MI.getDebugLoc(); 1172 1173 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 1174 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) 1175 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 1176 .addReg(RISCV::X0); 1177 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) 1178 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) 1179 .addReg(RISCV::X0); 1180 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) 1181 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 1182 .addReg(RISCV::X0); 1183 1184 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 1185 .addReg(HiReg) 1186 .addReg(ReadAgainReg) 1187 .addMBB(LoopMBB); 1188 1189 LoopMBB->addSuccessor(LoopMBB); 1190 LoopMBB->addSuccessor(DoneMBB); 1191 1192 MI.eraseFromParent(); 1193 1194 return DoneMBB; 1195 } 1196 1197 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, 1198 MachineBasicBlock *BB) { 1199 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); 1200 1201 MachineFunction &MF = *BB->getParent(); 1202 DebugLoc DL = MI.getDebugLoc(); 1203 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1204 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 1205 Register LoReg = MI.getOperand(0).getReg(); 1206 Register HiReg = MI.getOperand(1).getReg(); 1207 Register SrcReg = MI.getOperand(2).getReg(); 1208 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; 1209 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(); 1210 1211 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, 1212 RI); 1213 MachineMemOperand *MMO = 1214 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), 1215 MachineMemOperand::MOLoad, 8, 8); 1216 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) 1217 .addFrameIndex(FI) 1218 .addImm(0) 1219 .addMemOperand(MMO); 1220 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) 1221 .addFrameIndex(FI) 1222 .addImm(4) 1223 .addMemOperand(MMO); 1224 MI.eraseFromParent(); // The pseudo instruction is gone now. 1225 return BB; 1226 } 1227 1228 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, 1229 MachineBasicBlock *BB) { 1230 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && 1231 "Unexpected instruction"); 1232 1233 MachineFunction &MF = *BB->getParent(); 1234 DebugLoc DL = MI.getDebugLoc(); 1235 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1236 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 1237 Register DstReg = MI.getOperand(0).getReg(); 1238 Register LoReg = MI.getOperand(1).getReg(); 1239 Register HiReg = MI.getOperand(2).getReg(); 1240 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; 1241 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(); 1242 1243 MachineMemOperand *MMO = 1244 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), 1245 MachineMemOperand::MOStore, 8, 8); 1246 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 1247 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) 1248 .addFrameIndex(FI) 1249 .addImm(0) 1250 .addMemOperand(MMO); 1251 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 1252 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) 1253 .addFrameIndex(FI) 1254 .addImm(4) 1255 .addMemOperand(MMO); 1256 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); 1257 MI.eraseFromParent(); // The pseudo instruction is gone now. 1258 return BB; 1259 } 1260 1261 static bool isSelectPseudo(MachineInstr &MI) { 1262 switch (MI.getOpcode()) { 1263 default: 1264 return false; 1265 case RISCV::Select_GPR_Using_CC_GPR: 1266 case RISCV::Select_FPR32_Using_CC_GPR: 1267 case RISCV::Select_FPR64_Using_CC_GPR: 1268 return true; 1269 } 1270 } 1271 1272 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, 1273 MachineBasicBlock *BB) { 1274 // To "insert" Select_* instructions, we actually have to insert the triangle 1275 // control-flow pattern. The incoming instructions know the destination vreg 1276 // to set, the condition code register to branch on, the true/false values to 1277 // select between, and the condcode to use to select the appropriate branch. 1278 // 1279 // We produce the following control flow: 1280 // HeadMBB 1281 // | \ 1282 // | IfFalseMBB 1283 // | / 1284 // TailMBB 1285 // 1286 // When we find a sequence of selects we attempt to optimize their emission 1287 // by sharing the control flow. Currently we only handle cases where we have 1288 // multiple selects with the exact same condition (same LHS, RHS and CC). 1289 // The selects may be interleaved with other instructions if the other 1290 // instructions meet some requirements we deem safe: 1291 // - They are debug instructions. Otherwise, 1292 // - They do not have side-effects, do not access memory and their inputs do 1293 // not depend on the results of the select pseudo-instructions. 1294 // The TrueV/FalseV operands of the selects cannot depend on the result of 1295 // previous selects in the sequence. 1296 // These conditions could be further relaxed. See the X86 target for a 1297 // related approach and more information. 1298 Register LHS = MI.getOperand(1).getReg(); 1299 Register RHS = MI.getOperand(2).getReg(); 1300 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 1301 1302 SmallVector<MachineInstr *, 4> SelectDebugValues; 1303 SmallSet<Register, 4> SelectDests; 1304 SelectDests.insert(MI.getOperand(0).getReg()); 1305 1306 MachineInstr *LastSelectPseudo = &MI; 1307 1308 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); 1309 SequenceMBBI != E; ++SequenceMBBI) { 1310 if (SequenceMBBI->isDebugInstr()) 1311 continue; 1312 else if (isSelectPseudo(*SequenceMBBI)) { 1313 if (SequenceMBBI->getOperand(1).getReg() != LHS || 1314 SequenceMBBI->getOperand(2).getReg() != RHS || 1315 SequenceMBBI->getOperand(3).getImm() != CC || 1316 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || 1317 SelectDests.count(SequenceMBBI->getOperand(5).getReg())) 1318 break; 1319 LastSelectPseudo = &*SequenceMBBI; 1320 SequenceMBBI->collectDebugValues(SelectDebugValues); 1321 SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); 1322 } else { 1323 if (SequenceMBBI->hasUnmodeledSideEffects() || 1324 SequenceMBBI->mayLoadOrStore()) 1325 break; 1326 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { 1327 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); 1328 })) 1329 break; 1330 } 1331 } 1332 1333 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 1334 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1335 DebugLoc DL = MI.getDebugLoc(); 1336 MachineFunction::iterator I = ++BB->getIterator(); 1337 1338 MachineBasicBlock *HeadMBB = BB; 1339 MachineFunction *F = BB->getParent(); 1340 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 1341 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 1342 1343 F->insert(I, IfFalseMBB); 1344 F->insert(I, TailMBB); 1345 1346 // Transfer debug instructions associated with the selects to TailMBB. 1347 for (MachineInstr *DebugInstr : SelectDebugValues) { 1348 TailMBB->push_back(DebugInstr->removeFromParent()); 1349 } 1350 1351 // Move all instructions after the sequence to TailMBB. 1352 TailMBB->splice(TailMBB->end(), HeadMBB, 1353 std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); 1354 // Update machine-CFG edges by transferring all successors of the current 1355 // block to the new block which will contain the Phi nodes for the selects. 1356 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 1357 // Set the successors for HeadMBB. 1358 HeadMBB->addSuccessor(IfFalseMBB); 1359 HeadMBB->addSuccessor(TailMBB); 1360 1361 // Insert appropriate branch. 1362 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 1363 1364 BuildMI(HeadMBB, DL, TII.get(Opcode)) 1365 .addReg(LHS) 1366 .addReg(RHS) 1367 .addMBB(TailMBB); 1368 1369 // IfFalseMBB just falls through to TailMBB. 1370 IfFalseMBB->addSuccessor(TailMBB); 1371 1372 // Create PHIs for all of the select pseudo-instructions. 1373 auto SelectMBBI = MI.getIterator(); 1374 auto SelectEnd = std::next(LastSelectPseudo->getIterator()); 1375 auto InsertionPoint = TailMBB->begin(); 1376 while (SelectMBBI != SelectEnd) { 1377 auto Next = std::next(SelectMBBI); 1378 if (isSelectPseudo(*SelectMBBI)) { 1379 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 1380 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), 1381 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) 1382 .addReg(SelectMBBI->getOperand(4).getReg()) 1383 .addMBB(HeadMBB) 1384 .addReg(SelectMBBI->getOperand(5).getReg()) 1385 .addMBB(IfFalseMBB); 1386 SelectMBBI->eraseFromParent(); 1387 } 1388 SelectMBBI = Next; 1389 } 1390 1391 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 1392 return TailMBB; 1393 } 1394 1395 MachineBasicBlock * 1396 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 1397 MachineBasicBlock *BB) const { 1398 switch (MI.getOpcode()) { 1399 default: 1400 llvm_unreachable("Unexpected instr type to insert"); 1401 case RISCV::ReadCycleWide: 1402 assert(!Subtarget.is64Bit() && 1403 "ReadCycleWrite is only to be used on riscv32"); 1404 return emitReadCycleWidePseudo(MI, BB); 1405 case RISCV::Select_GPR_Using_CC_GPR: 1406 case RISCV::Select_FPR32_Using_CC_GPR: 1407 case RISCV::Select_FPR64_Using_CC_GPR: 1408 return emitSelectPseudo(MI, BB); 1409 case RISCV::BuildPairF64Pseudo: 1410 return emitBuildPairF64Pseudo(MI, BB); 1411 case RISCV::SplitF64Pseudo: 1412 return emitSplitF64Pseudo(MI, BB); 1413 } 1414 } 1415 1416 // Calling Convention Implementation. 1417 // The expectations for frontend ABI lowering vary from target to target. 1418 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 1419 // details, but this is a longer term goal. For now, we simply try to keep the 1420 // role of the frontend as simple and well-defined as possible. The rules can 1421 // be summarised as: 1422 // * Never split up large scalar arguments. We handle them here. 1423 // * If a hardfloat calling convention is being used, and the struct may be 1424 // passed in a pair of registers (fp+fp, int+fp), and both registers are 1425 // available, then pass as two separate arguments. If either the GPRs or FPRs 1426 // are exhausted, then pass according to the rule below. 1427 // * If a struct could never be passed in registers or directly in a stack 1428 // slot (as it is larger than 2*XLEN and the floating point rules don't 1429 // apply), then pass it using a pointer with the byval attribute. 1430 // * If a struct is less than 2*XLEN, then coerce to either a two-element 1431 // word-sized array or a 2*XLEN scalar (depending on alignment). 1432 // * The frontend can determine whether a struct is returned by reference or 1433 // not based on its size and fields. If it will be returned by reference, the 1434 // frontend must modify the prototype so a pointer with the sret annotation is 1435 // passed as the first argument. This is not necessary for large scalar 1436 // returns. 1437 // * Struct return values and varargs should be coerced to structs containing 1438 // register-size fields in the same situations they would be for fixed 1439 // arguments. 1440 1441 static const MCPhysReg ArgGPRs[] = { 1442 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 1443 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 1444 }; 1445 static const MCPhysReg ArgFPR32s[] = { 1446 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, 1447 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F 1448 }; 1449 static const MCPhysReg ArgFPR64s[] = { 1450 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, 1451 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D 1452 }; 1453 1454 // Pass a 2*XLEN argument that has been split into two XLEN values through 1455 // registers or the stack as necessary. 1456 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 1457 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 1458 MVT ValVT2, MVT LocVT2, 1459 ISD::ArgFlagsTy ArgFlags2) { 1460 unsigned XLenInBytes = XLen / 8; 1461 if (Register Reg = State.AllocateReg(ArgGPRs)) { 1462 // At least one half can be passed via register. 1463 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 1464 VA1.getLocVT(), CCValAssign::Full)); 1465 } else { 1466 // Both halves must be passed on the stack, with proper alignment. 1467 unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign()); 1468 State.addLoc( 1469 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 1470 State.AllocateStack(XLenInBytes, StackAlign), 1471 VA1.getLocVT(), CCValAssign::Full)); 1472 State.addLoc(CCValAssign::getMem( 1473 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, 1474 CCValAssign::Full)); 1475 return false; 1476 } 1477 1478 if (Register Reg = State.AllocateReg(ArgGPRs)) { 1479 // The second half can also be passed via register. 1480 State.addLoc( 1481 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 1482 } else { 1483 // The second half is passed via the stack, without additional alignment. 1484 State.addLoc(CCValAssign::getMem( 1485 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, 1486 CCValAssign::Full)); 1487 } 1488 1489 return false; 1490 } 1491 1492 // Implements the RISC-V calling convention. Returns true upon failure. 1493 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, 1494 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, 1495 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, 1496 bool IsRet, Type *OrigTy) { 1497 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 1498 assert(XLen == 32 || XLen == 64); 1499 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 1500 1501 // Any return value split in to more than two values can't be returned 1502 // directly. 1503 if (IsRet && ValNo > 1) 1504 return true; 1505 1506 // UseGPRForF32 if targeting one of the soft-float ABIs, if passing a 1507 // variadic argument, or if no F32 argument registers are available. 1508 bool UseGPRForF32 = true; 1509 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 1510 // variadic argument, or if no F64 argument registers are available. 1511 bool UseGPRForF64 = true; 1512 1513 switch (ABI) { 1514 default: 1515 llvm_unreachable("Unexpected ABI"); 1516 case RISCVABI::ABI_ILP32: 1517 case RISCVABI::ABI_LP64: 1518 break; 1519 case RISCVABI::ABI_ILP32F: 1520 case RISCVABI::ABI_LP64F: 1521 UseGPRForF32 = !IsFixed; 1522 break; 1523 case RISCVABI::ABI_ILP32D: 1524 case RISCVABI::ABI_LP64D: 1525 UseGPRForF32 = !IsFixed; 1526 UseGPRForF64 = !IsFixed; 1527 break; 1528 } 1529 1530 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) 1531 UseGPRForF32 = true; 1532 if (State.getFirstUnallocated(ArgFPR64s) == array_lengthof(ArgFPR64s)) 1533 UseGPRForF64 = true; 1534 1535 // From this point on, rely on UseGPRForF32, UseGPRForF64 and similar local 1536 // variables rather than directly checking against the target ABI. 1537 1538 if (UseGPRForF32 && ValVT == MVT::f32) { 1539 LocVT = XLenVT; 1540 LocInfo = CCValAssign::BCvt; 1541 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 1542 LocVT = MVT::i64; 1543 LocInfo = CCValAssign::BCvt; 1544 } 1545 1546 // If this is a variadic argument, the RISC-V calling convention requires 1547 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 1548 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 1549 // be used regardless of whether the original argument was split during 1550 // legalisation or not. The argument will not be passed by registers if the 1551 // original type is larger than 2*XLEN, so the register alignment rule does 1552 // not apply. 1553 unsigned TwoXLenInBytes = (2 * XLen) / 8; 1554 if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes && 1555 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 1556 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 1557 // Skip 'odd' register if necessary. 1558 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 1559 State.AllocateReg(ArgGPRs); 1560 } 1561 1562 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 1563 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 1564 State.getPendingArgFlags(); 1565 1566 assert(PendingLocs.size() == PendingArgFlags.size() && 1567 "PendingLocs and PendingArgFlags out of sync"); 1568 1569 // Handle passing f64 on RV32D with a soft float ABI or when floating point 1570 // registers are exhausted. 1571 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 1572 assert(!ArgFlags.isSplit() && PendingLocs.empty() && 1573 "Can't lower f64 if it is split"); 1574 // Depending on available argument GPRS, f64 may be passed in a pair of 1575 // GPRs, split between a GPR and the stack, or passed completely on the 1576 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 1577 // cases. 1578 Register Reg = State.AllocateReg(ArgGPRs); 1579 LocVT = MVT::i32; 1580 if (!Reg) { 1581 unsigned StackOffset = State.AllocateStack(8, 8); 1582 State.addLoc( 1583 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 1584 return false; 1585 } 1586 if (!State.AllocateReg(ArgGPRs)) 1587 State.AllocateStack(4, 4); 1588 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1589 return false; 1590 } 1591 1592 // Split arguments might be passed indirectly, so keep track of the pending 1593 // values. 1594 if (ArgFlags.isSplit() || !PendingLocs.empty()) { 1595 LocVT = XLenVT; 1596 LocInfo = CCValAssign::Indirect; 1597 PendingLocs.push_back( 1598 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 1599 PendingArgFlags.push_back(ArgFlags); 1600 if (!ArgFlags.isSplitEnd()) { 1601 return false; 1602 } 1603 } 1604 1605 // If the split argument only had two elements, it should be passed directly 1606 // in registers or on the stack. 1607 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { 1608 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 1609 // Apply the normal calling convention rules to the first half of the 1610 // split argument. 1611 CCValAssign VA = PendingLocs[0]; 1612 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 1613 PendingLocs.clear(); 1614 PendingArgFlags.clear(); 1615 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 1616 ArgFlags); 1617 } 1618 1619 // Allocate to a register if possible, or else a stack slot. 1620 Register Reg; 1621 if (ValVT == MVT::f32 && !UseGPRForF32) 1622 Reg = State.AllocateReg(ArgFPR32s, ArgFPR64s); 1623 else if (ValVT == MVT::f64 && !UseGPRForF64) 1624 Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s); 1625 else 1626 Reg = State.AllocateReg(ArgGPRs); 1627 unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8); 1628 1629 // If we reach this point and PendingLocs is non-empty, we must be at the 1630 // end of a split argument that must be passed indirectly. 1631 if (!PendingLocs.empty()) { 1632 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 1633 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 1634 1635 for (auto &It : PendingLocs) { 1636 if (Reg) 1637 It.convertToReg(Reg); 1638 else 1639 It.convertToMem(StackOffset); 1640 State.addLoc(It); 1641 } 1642 PendingLocs.clear(); 1643 PendingArgFlags.clear(); 1644 return false; 1645 } 1646 1647 assert((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) && 1648 "Expected an XLenVT at this stage"); 1649 1650 if (Reg) { 1651 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1652 return false; 1653 } 1654 1655 // When an f32 or f64 is passed on the stack, no bit-conversion is needed. 1656 if (ValVT == MVT::f32 || ValVT == MVT::f64) { 1657 LocVT = ValVT; 1658 LocInfo = CCValAssign::Full; 1659 } 1660 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 1661 return false; 1662 } 1663 1664 void RISCVTargetLowering::analyzeInputArgs( 1665 MachineFunction &MF, CCState &CCInfo, 1666 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const { 1667 unsigned NumArgs = Ins.size(); 1668 FunctionType *FType = MF.getFunction().getFunctionType(); 1669 1670 for (unsigned i = 0; i != NumArgs; ++i) { 1671 MVT ArgVT = Ins[i].VT; 1672 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 1673 1674 Type *ArgTy = nullptr; 1675 if (IsRet) 1676 ArgTy = FType->getReturnType(); 1677 else if (Ins[i].isOrigArg()) 1678 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 1679 1680 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 1681 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 1682 ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) { 1683 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 1684 << EVT(ArgVT).getEVTString() << '\n'); 1685 llvm_unreachable(nullptr); 1686 } 1687 } 1688 } 1689 1690 void RISCVTargetLowering::analyzeOutputArgs( 1691 MachineFunction &MF, CCState &CCInfo, 1692 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 1693 CallLoweringInfo *CLI) const { 1694 unsigned NumArgs = Outs.size(); 1695 1696 for (unsigned i = 0; i != NumArgs; i++) { 1697 MVT ArgVT = Outs[i].VT; 1698 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 1699 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 1700 1701 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 1702 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 1703 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) { 1704 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 1705 << EVT(ArgVT).getEVTString() << "\n"); 1706 llvm_unreachable(nullptr); 1707 } 1708 } 1709 } 1710 1711 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect 1712 // values. 1713 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 1714 const CCValAssign &VA, const SDLoc &DL) { 1715 switch (VA.getLocInfo()) { 1716 default: 1717 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1718 case CCValAssign::Full: 1719 break; 1720 case CCValAssign::BCvt: 1721 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) { 1722 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); 1723 break; 1724 } 1725 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 1726 break; 1727 } 1728 return Val; 1729 } 1730 1731 // The caller is responsible for loading the full value if the argument is 1732 // passed with CCValAssign::Indirect. 1733 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 1734 const CCValAssign &VA, const SDLoc &DL) { 1735 MachineFunction &MF = DAG.getMachineFunction(); 1736 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1737 EVT LocVT = VA.getLocVT(); 1738 SDValue Val; 1739 const TargetRegisterClass *RC; 1740 1741 switch (LocVT.getSimpleVT().SimpleTy) { 1742 default: 1743 llvm_unreachable("Unexpected register type"); 1744 case MVT::i32: 1745 case MVT::i64: 1746 RC = &RISCV::GPRRegClass; 1747 break; 1748 case MVT::f32: 1749 RC = &RISCV::FPR32RegClass; 1750 break; 1751 case MVT::f64: 1752 RC = &RISCV::FPR64RegClass; 1753 break; 1754 } 1755 1756 Register VReg = RegInfo.createVirtualRegister(RC); 1757 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1758 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 1759 1760 if (VA.getLocInfo() == CCValAssign::Indirect) 1761 return Val; 1762 1763 return convertLocVTToValVT(DAG, Val, VA, DL); 1764 } 1765 1766 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 1767 const CCValAssign &VA, const SDLoc &DL) { 1768 EVT LocVT = VA.getLocVT(); 1769 1770 switch (VA.getLocInfo()) { 1771 default: 1772 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1773 case CCValAssign::Full: 1774 break; 1775 case CCValAssign::BCvt: 1776 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) { 1777 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); 1778 break; 1779 } 1780 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 1781 break; 1782 } 1783 return Val; 1784 } 1785 1786 // The caller is responsible for loading the full value if the argument is 1787 // passed with CCValAssign::Indirect. 1788 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 1789 const CCValAssign &VA, const SDLoc &DL) { 1790 MachineFunction &MF = DAG.getMachineFunction(); 1791 MachineFrameInfo &MFI = MF.getFrameInfo(); 1792 EVT LocVT = VA.getLocVT(); 1793 EVT ValVT = VA.getValVT(); 1794 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 1795 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, 1796 VA.getLocMemOffset(), /*Immutable=*/true); 1797 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1798 SDValue Val; 1799 1800 ISD::LoadExtType ExtType; 1801 switch (VA.getLocInfo()) { 1802 default: 1803 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1804 case CCValAssign::Full: 1805 case CCValAssign::Indirect: 1806 case CCValAssign::BCvt: 1807 ExtType = ISD::NON_EXTLOAD; 1808 break; 1809 } 1810 Val = DAG.getExtLoad( 1811 ExtType, DL, LocVT, Chain, FIN, 1812 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 1813 return Val; 1814 } 1815 1816 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, 1817 const CCValAssign &VA, const SDLoc &DL) { 1818 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && 1819 "Unexpected VA"); 1820 MachineFunction &MF = DAG.getMachineFunction(); 1821 MachineFrameInfo &MFI = MF.getFrameInfo(); 1822 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1823 1824 if (VA.isMemLoc()) { 1825 // f64 is passed on the stack. 1826 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); 1827 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1828 return DAG.getLoad(MVT::f64, DL, Chain, FIN, 1829 MachinePointerInfo::getFixedStack(MF, FI)); 1830 } 1831 1832 assert(VA.isRegLoc() && "Expected register VA assignment"); 1833 1834 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1835 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 1836 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 1837 SDValue Hi; 1838 if (VA.getLocReg() == RISCV::X17) { 1839 // Second half of f64 is passed on the stack. 1840 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); 1841 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1842 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 1843 MachinePointerInfo::getFixedStack(MF, FI)); 1844 } else { 1845 // Second half of f64 is passed in another GPR. 1846 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1847 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 1848 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 1849 } 1850 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 1851 } 1852 1853 // FastCC has less than 1% performance improvement for some particular 1854 // benchmark. But theoretically, it may has benenfit for some cases. 1855 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, 1856 CCValAssign::LocInfo LocInfo, 1857 ISD::ArgFlagsTy ArgFlags, CCState &State) { 1858 1859 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 1860 // X5 and X6 might be used for save-restore libcall. 1861 static const MCPhysReg GPRList[] = { 1862 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, 1863 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, 1864 RISCV::X29, RISCV::X30, RISCV::X31}; 1865 if (unsigned Reg = State.AllocateReg(GPRList)) { 1866 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1867 return false; 1868 } 1869 } 1870 1871 if (LocVT == MVT::f32) { 1872 static const MCPhysReg FPR32List[] = { 1873 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 1874 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 1875 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 1876 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 1877 if (unsigned Reg = State.AllocateReg(FPR32List)) { 1878 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1879 return false; 1880 } 1881 } 1882 1883 if (LocVT == MVT::f64) { 1884 static const MCPhysReg FPR64List[] = { 1885 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 1886 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 1887 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 1888 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 1889 if (unsigned Reg = State.AllocateReg(FPR64List)) { 1890 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1891 return false; 1892 } 1893 } 1894 1895 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 1896 unsigned Offset4 = State.AllocateStack(4, 4); 1897 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 1898 return false; 1899 } 1900 1901 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 1902 unsigned Offset5 = State.AllocateStack(8, 8); 1903 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 1904 return false; 1905 } 1906 1907 return true; // CC didn't match. 1908 } 1909 1910 // Transform physical registers into virtual registers. 1911 SDValue RISCVTargetLowering::LowerFormalArguments( 1912 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 1913 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1914 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1915 1916 switch (CallConv) { 1917 default: 1918 report_fatal_error("Unsupported calling convention"); 1919 case CallingConv::C: 1920 case CallingConv::Fast: 1921 break; 1922 } 1923 1924 MachineFunction &MF = DAG.getMachineFunction(); 1925 1926 const Function &Func = MF.getFunction(); 1927 if (Func.hasFnAttribute("interrupt")) { 1928 if (!Func.arg_empty()) 1929 report_fatal_error( 1930 "Functions with the interrupt attribute cannot have arguments!"); 1931 1932 StringRef Kind = 1933 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 1934 1935 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) 1936 report_fatal_error( 1937 "Function interrupt attribute argument not supported!"); 1938 } 1939 1940 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1941 MVT XLenVT = Subtarget.getXLenVT(); 1942 unsigned XLenInBytes = Subtarget.getXLen() / 8; 1943 // Used with vargs to acumulate store chains. 1944 std::vector<SDValue> OutChains; 1945 1946 // Assign locations to all of the incoming arguments. 1947 SmallVector<CCValAssign, 16> ArgLocs; 1948 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1949 1950 if (CallConv == CallingConv::Fast) 1951 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC); 1952 else 1953 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false); 1954 1955 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1956 CCValAssign &VA = ArgLocs[i]; 1957 SDValue ArgValue; 1958 // Passing f64 on RV32D with a soft float ABI must be handled as a special 1959 // case. 1960 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) 1961 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); 1962 else if (VA.isRegLoc()) 1963 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL); 1964 else 1965 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 1966 1967 if (VA.getLocInfo() == CCValAssign::Indirect) { 1968 // If the original argument was split and passed by reference (e.g. i128 1969 // on RV32), we need to load all parts of it here (using the same 1970 // address). 1971 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 1972 MachinePointerInfo())); 1973 unsigned ArgIndex = Ins[i].OrigArgIndex; 1974 assert(Ins[i].PartOffset == 0); 1975 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 1976 CCValAssign &PartVA = ArgLocs[i + 1]; 1977 unsigned PartOffset = Ins[i + 1].PartOffset; 1978 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 1979 DAG.getIntPtrConstant(PartOffset, DL)); 1980 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 1981 MachinePointerInfo())); 1982 ++i; 1983 } 1984 continue; 1985 } 1986 InVals.push_back(ArgValue); 1987 } 1988 1989 if (IsVarArg) { 1990 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 1991 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 1992 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 1993 MachineFrameInfo &MFI = MF.getFrameInfo(); 1994 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1995 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 1996 1997 // Offset of the first variable argument from stack pointer, and size of 1998 // the vararg save area. For now, the varargs save area is either zero or 1999 // large enough to hold a0-a7. 2000 int VaArgOffset, VarArgsSaveSize; 2001 2002 // If all registers are allocated, then all varargs must be passed on the 2003 // stack and we don't need to save any argregs. 2004 if (ArgRegs.size() == Idx) { 2005 VaArgOffset = CCInfo.getNextStackOffset(); 2006 VarArgsSaveSize = 0; 2007 } else { 2008 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 2009 VaArgOffset = -VarArgsSaveSize; 2010 } 2011 2012 // Record the frame index of the first variable argument 2013 // which is a value necessary to VASTART. 2014 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 2015 RVFI->setVarArgsFrameIndex(FI); 2016 2017 // If saving an odd number of registers then create an extra stack slot to 2018 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 2019 // offsets to even-numbered registered remain 2*XLEN-aligned. 2020 if (Idx % 2) { 2021 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); 2022 VarArgsSaveSize += XLenInBytes; 2023 } 2024 2025 // Copy the integer registers that may have been used for passing varargs 2026 // to the vararg save area. 2027 for (unsigned I = Idx; I < ArgRegs.size(); 2028 ++I, VaArgOffset += XLenInBytes) { 2029 const Register Reg = RegInfo.createVirtualRegister(RC); 2030 RegInfo.addLiveIn(ArgRegs[I], Reg); 2031 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 2032 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 2033 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 2034 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 2035 MachinePointerInfo::getFixedStack(MF, FI)); 2036 cast<StoreSDNode>(Store.getNode()) 2037 ->getMemOperand() 2038 ->setValue((Value *)nullptr); 2039 OutChains.push_back(Store); 2040 } 2041 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 2042 } 2043 2044 // All stores are grouped in one node to allow the matching between 2045 // the size of Ins and InVals. This only happens for vararg functions. 2046 if (!OutChains.empty()) { 2047 OutChains.push_back(Chain); 2048 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 2049 } 2050 2051 return Chain; 2052 } 2053 2054 /// isEligibleForTailCallOptimization - Check whether the call is eligible 2055 /// for tail call optimization. 2056 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. 2057 bool RISCVTargetLowering::isEligibleForTailCallOptimization( 2058 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 2059 const SmallVector<CCValAssign, 16> &ArgLocs) const { 2060 2061 auto &Callee = CLI.Callee; 2062 auto CalleeCC = CLI.CallConv; 2063 auto &Outs = CLI.Outs; 2064 auto &Caller = MF.getFunction(); 2065 auto CallerCC = Caller.getCallingConv(); 2066 2067 // Exception-handling functions need a special set of instructions to 2068 // indicate a return to the hardware. Tail-calling another function would 2069 // probably break this. 2070 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This 2071 // should be expanded as new function attributes are introduced. 2072 if (Caller.hasFnAttribute("interrupt")) 2073 return false; 2074 2075 // Do not tail call opt if the stack is used to pass parameters. 2076 if (CCInfo.getNextStackOffset() != 0) 2077 return false; 2078 2079 // Do not tail call opt if any parameters need to be passed indirectly. 2080 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are 2081 // passed indirectly. So the address of the value will be passed in a 2082 // register, or if not available, then the address is put on the stack. In 2083 // order to pass indirectly, space on the stack often needs to be allocated 2084 // in order to store the value. In this case the CCInfo.getNextStackOffset() 2085 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs 2086 // are passed CCValAssign::Indirect. 2087 for (auto &VA : ArgLocs) 2088 if (VA.getLocInfo() == CCValAssign::Indirect) 2089 return false; 2090 2091 // Do not tail call opt if either caller or callee uses struct return 2092 // semantics. 2093 auto IsCallerStructRet = Caller.hasStructRetAttr(); 2094 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 2095 if (IsCallerStructRet || IsCalleeStructRet) 2096 return false; 2097 2098 // Externally-defined functions with weak linkage should not be 2099 // tail-called. The behaviour of branch instructions in this situation (as 2100 // used for tail calls) is implementation-defined, so we cannot rely on the 2101 // linker replacing the tail call with a return. 2102 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2103 const GlobalValue *GV = G->getGlobal(); 2104 if (GV->hasExternalWeakLinkage()) 2105 return false; 2106 } 2107 2108 // The callee has to preserve all registers the caller needs to preserve. 2109 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2110 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2111 if (CalleeCC != CallerCC) { 2112 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2113 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2114 return false; 2115 } 2116 2117 // Byval parameters hand the function a pointer directly into the stack area 2118 // we want to reuse during a tail call. Working around this *is* possible 2119 // but less efficient and uglier in LowerCall. 2120 for (auto &Arg : Outs) 2121 if (Arg.Flags.isByVal()) 2122 return false; 2123 2124 return true; 2125 } 2126 2127 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 2128 // and output parameter nodes. 2129 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 2130 SmallVectorImpl<SDValue> &InVals) const { 2131 SelectionDAG &DAG = CLI.DAG; 2132 SDLoc &DL = CLI.DL; 2133 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 2134 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 2135 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 2136 SDValue Chain = CLI.Chain; 2137 SDValue Callee = CLI.Callee; 2138 bool &IsTailCall = CLI.IsTailCall; 2139 CallingConv::ID CallConv = CLI.CallConv; 2140 bool IsVarArg = CLI.IsVarArg; 2141 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2142 MVT XLenVT = Subtarget.getXLenVT(); 2143 2144 MachineFunction &MF = DAG.getMachineFunction(); 2145 2146 // Analyze the operands of the call, assigning locations to each operand. 2147 SmallVector<CCValAssign, 16> ArgLocs; 2148 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 2149 2150 if (CallConv == CallingConv::Fast) 2151 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC); 2152 else 2153 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI); 2154 2155 // Check if it's really possible to do a tail call. 2156 if (IsTailCall) 2157 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); 2158 2159 if (IsTailCall) 2160 ++NumTailCalls; 2161 else if (CLI.CS && CLI.CS.isMustTailCall()) 2162 report_fatal_error("failed to perform tail call elimination on a call " 2163 "site marked musttail"); 2164 2165 // Get a count of how many bytes are to be pushed on the stack. 2166 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 2167 2168 // Create local copies for byval args 2169 SmallVector<SDValue, 8> ByValArgs; 2170 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 2171 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2172 if (!Flags.isByVal()) 2173 continue; 2174 2175 SDValue Arg = OutVals[i]; 2176 unsigned Size = Flags.getByValSize(); 2177 unsigned Align = Flags.getByValAlign(); 2178 2179 int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false); 2180 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 2181 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 2182 2183 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align, 2184 /*IsVolatile=*/false, 2185 /*AlwaysInline=*/false, 2186 IsTailCall, MachinePointerInfo(), 2187 MachinePointerInfo()); 2188 ByValArgs.push_back(FIPtr); 2189 } 2190 2191 if (!IsTailCall) 2192 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 2193 2194 // Copy argument values to their designated locations. 2195 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; 2196 SmallVector<SDValue, 8> MemOpChains; 2197 SDValue StackPtr; 2198 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 2199 CCValAssign &VA = ArgLocs[i]; 2200 SDValue ArgValue = OutVals[i]; 2201 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2202 2203 // Handle passing f64 on RV32D with a soft float ABI as a special case. 2204 bool IsF64OnRV32DSoftABI = 2205 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 2206 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { 2207 SDValue SplitF64 = DAG.getNode( 2208 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 2209 SDValue Lo = SplitF64.getValue(0); 2210 SDValue Hi = SplitF64.getValue(1); 2211 2212 Register RegLo = VA.getLocReg(); 2213 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 2214 2215 if (RegLo == RISCV::X17) { 2216 // Second half of f64 is passed on the stack. 2217 // Work out the address of the stack slot. 2218 if (!StackPtr.getNode()) 2219 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 2220 // Emit the store. 2221 MemOpChains.push_back( 2222 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 2223 } else { 2224 // Second half of f64 is passed in another GPR. 2225 assert(RegLo < RISCV::X31 && "Invalid register pair"); 2226 Register RegHigh = RegLo + 1; 2227 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 2228 } 2229 continue; 2230 } 2231 2232 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way 2233 // as any other MemLoc. 2234 2235 // Promote the value if needed. 2236 // For now, only handle fully promoted and indirect arguments. 2237 if (VA.getLocInfo() == CCValAssign::Indirect) { 2238 // Store the argument in a stack slot and pass its address. 2239 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT); 2240 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2241 MemOpChains.push_back( 2242 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 2243 MachinePointerInfo::getFixedStack(MF, FI))); 2244 // If the original argument was split (e.g. i128), we need 2245 // to store all parts of it here (and pass just one address). 2246 unsigned ArgIndex = Outs[i].OrigArgIndex; 2247 assert(Outs[i].PartOffset == 0); 2248 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 2249 SDValue PartValue = OutVals[i + 1]; 2250 unsigned PartOffset = Outs[i + 1].PartOffset; 2251 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 2252 DAG.getIntPtrConstant(PartOffset, DL)); 2253 MemOpChains.push_back( 2254 DAG.getStore(Chain, DL, PartValue, Address, 2255 MachinePointerInfo::getFixedStack(MF, FI))); 2256 ++i; 2257 } 2258 ArgValue = SpillSlot; 2259 } else { 2260 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL); 2261 } 2262 2263 // Use local copy if it is a byval arg. 2264 if (Flags.isByVal()) 2265 ArgValue = ByValArgs[j++]; 2266 2267 if (VA.isRegLoc()) { 2268 // Queue up the argument copies and emit them at the end. 2269 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 2270 } else { 2271 assert(VA.isMemLoc() && "Argument not register or memory"); 2272 assert(!IsTailCall && "Tail call not allowed if stack is used " 2273 "for passing parameters"); 2274 2275 // Work out the address of the stack slot. 2276 if (!StackPtr.getNode()) 2277 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 2278 SDValue Address = 2279 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 2280 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 2281 2282 // Emit the store. 2283 MemOpChains.push_back( 2284 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 2285 } 2286 } 2287 2288 // Join the stores, which are independent of one another. 2289 if (!MemOpChains.empty()) 2290 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 2291 2292 SDValue Glue; 2293 2294 // Build a sequence of copy-to-reg nodes, chained and glued together. 2295 for (auto &Reg : RegsToPass) { 2296 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 2297 Glue = Chain.getValue(1); 2298 } 2299 2300 // Validate that none of the argument registers have been marked as 2301 // reserved, if so report an error. Do the same for the return address if this 2302 // is not a tailcall. 2303 validateCCReservedRegs(RegsToPass, MF); 2304 if (!IsTailCall && 2305 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) 2306 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 2307 MF.getFunction(), 2308 "Return address register required, but has been reserved."}); 2309 2310 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a 2311 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't 2312 // split it and then direct call can be matched by PseudoCALL. 2313 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 2314 const GlobalValue *GV = S->getGlobal(); 2315 2316 unsigned OpFlags = RISCVII::MO_CALL; 2317 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) 2318 OpFlags = RISCVII::MO_PLT; 2319 2320 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); 2321 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2322 unsigned OpFlags = RISCVII::MO_CALL; 2323 2324 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), 2325 nullptr)) 2326 OpFlags = RISCVII::MO_PLT; 2327 2328 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); 2329 } 2330 2331 // The first call operand is the chain and the second is the target address. 2332 SmallVector<SDValue, 8> Ops; 2333 Ops.push_back(Chain); 2334 Ops.push_back(Callee); 2335 2336 // Add argument registers to the end of the list so that they are 2337 // known live into the call. 2338 for (auto &Reg : RegsToPass) 2339 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 2340 2341 if (!IsTailCall) { 2342 // Add a register mask operand representing the call-preserved registers. 2343 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2344 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 2345 assert(Mask && "Missing call preserved mask for calling convention"); 2346 Ops.push_back(DAG.getRegisterMask(Mask)); 2347 } 2348 2349 // Glue the call to the argument copies, if any. 2350 if (Glue.getNode()) 2351 Ops.push_back(Glue); 2352 2353 // Emit the call. 2354 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2355 2356 if (IsTailCall) { 2357 MF.getFrameInfo().setHasTailCall(); 2358 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); 2359 } 2360 2361 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 2362 Glue = Chain.getValue(1); 2363 2364 // Mark the end of the call, which is glued to the call itself. 2365 Chain = DAG.getCALLSEQ_END(Chain, 2366 DAG.getConstant(NumBytes, DL, PtrVT, true), 2367 DAG.getConstant(0, DL, PtrVT, true), 2368 Glue, DL); 2369 Glue = Chain.getValue(1); 2370 2371 // Assign locations to each value returned by this call. 2372 SmallVector<CCValAssign, 16> RVLocs; 2373 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 2374 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true); 2375 2376 // Copy all of the result registers out of their specified physreg. 2377 for (auto &VA : RVLocs) { 2378 // Copy the value out 2379 SDValue RetValue = 2380 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 2381 // Glue the RetValue to the end of the call sequence 2382 Chain = RetValue.getValue(1); 2383 Glue = RetValue.getValue(2); 2384 2385 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 2386 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"); 2387 SDValue RetValue2 = 2388 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); 2389 Chain = RetValue2.getValue(1); 2390 Glue = RetValue2.getValue(2); 2391 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, 2392 RetValue2); 2393 } 2394 2395 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL); 2396 2397 InVals.push_back(RetValue); 2398 } 2399 2400 return Chain; 2401 } 2402 2403 bool RISCVTargetLowering::CanLowerReturn( 2404 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 2405 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 2406 SmallVector<CCValAssign, 16> RVLocs; 2407 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 2408 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 2409 MVT VT = Outs[i].VT; 2410 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 2411 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 2412 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, 2413 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr)) 2414 return false; 2415 } 2416 return true; 2417 } 2418 2419 SDValue 2420 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 2421 bool IsVarArg, 2422 const SmallVectorImpl<ISD::OutputArg> &Outs, 2423 const SmallVectorImpl<SDValue> &OutVals, 2424 const SDLoc &DL, SelectionDAG &DAG) const { 2425 const MachineFunction &MF = DAG.getMachineFunction(); 2426 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 2427 2428 // Stores the assignment of the return value to a location. 2429 SmallVector<CCValAssign, 16> RVLocs; 2430 2431 // Info about the registers and stack slot. 2432 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 2433 *DAG.getContext()); 2434 2435 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 2436 nullptr); 2437 2438 SDValue Glue; 2439 SmallVector<SDValue, 4> RetOps(1, Chain); 2440 2441 // Copy the result values into the output registers. 2442 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 2443 SDValue Val = OutVals[i]; 2444 CCValAssign &VA = RVLocs[i]; 2445 assert(VA.isRegLoc() && "Can only return in registers!"); 2446 2447 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 2448 // Handle returning f64 on RV32D with a soft float ABI. 2449 assert(VA.isRegLoc() && "Expected return via registers"); 2450 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, 2451 DAG.getVTList(MVT::i32, MVT::i32), Val); 2452 SDValue Lo = SplitF64.getValue(0); 2453 SDValue Hi = SplitF64.getValue(1); 2454 Register RegLo = VA.getLocReg(); 2455 assert(RegLo < RISCV::X31 && "Invalid register pair"); 2456 Register RegHi = RegLo + 1; 2457 2458 if (STI.isRegisterReservedByUser(RegLo) || 2459 STI.isRegisterReservedByUser(RegHi)) 2460 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 2461 MF.getFunction(), 2462 "Return value register required, but has been reserved."}); 2463 2464 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 2465 Glue = Chain.getValue(1); 2466 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 2467 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 2468 Glue = Chain.getValue(1); 2469 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 2470 } else { 2471 // Handle a 'normal' return. 2472 Val = convertValVTToLocVT(DAG, Val, VA, DL); 2473 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 2474 2475 if (STI.isRegisterReservedByUser(VA.getLocReg())) 2476 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 2477 MF.getFunction(), 2478 "Return value register required, but has been reserved."}); 2479 2480 // Guarantee that all emitted copies are stuck together. 2481 Glue = Chain.getValue(1); 2482 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2483 } 2484 } 2485 2486 RetOps[0] = Chain; // Update chain. 2487 2488 // Add the glue node if we have it. 2489 if (Glue.getNode()) { 2490 RetOps.push_back(Glue); 2491 } 2492 2493 // Interrupt service routines use different return instructions. 2494 const Function &Func = DAG.getMachineFunction().getFunction(); 2495 if (Func.hasFnAttribute("interrupt")) { 2496 if (!Func.getReturnType()->isVoidTy()) 2497 report_fatal_error( 2498 "Functions with the interrupt attribute must have void return type!"); 2499 2500 MachineFunction &MF = DAG.getMachineFunction(); 2501 StringRef Kind = 2502 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 2503 2504 unsigned RetOpc; 2505 if (Kind == "user") 2506 RetOpc = RISCVISD::URET_FLAG; 2507 else if (Kind == "supervisor") 2508 RetOpc = RISCVISD::SRET_FLAG; 2509 else 2510 RetOpc = RISCVISD::MRET_FLAG; 2511 2512 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); 2513 } 2514 2515 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); 2516 } 2517 2518 void RISCVTargetLowering::validateCCReservedRegs( 2519 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 2520 MachineFunction &MF) const { 2521 const Function &F = MF.getFunction(); 2522 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 2523 2524 if (std::any_of(std::begin(Regs), std::end(Regs), [&STI](auto Reg) { 2525 return STI.isRegisterReservedByUser(Reg.first); 2526 })) 2527 F.getContext().diagnose(DiagnosticInfoUnsupported{ 2528 F, "Argument register required, but has been reserved."}); 2529 } 2530 2531 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 2532 switch ((RISCVISD::NodeType)Opcode) { 2533 case RISCVISD::FIRST_NUMBER: 2534 break; 2535 case RISCVISD::RET_FLAG: 2536 return "RISCVISD::RET_FLAG"; 2537 case RISCVISD::URET_FLAG: 2538 return "RISCVISD::URET_FLAG"; 2539 case RISCVISD::SRET_FLAG: 2540 return "RISCVISD::SRET_FLAG"; 2541 case RISCVISD::MRET_FLAG: 2542 return "RISCVISD::MRET_FLAG"; 2543 case RISCVISD::CALL: 2544 return "RISCVISD::CALL"; 2545 case RISCVISD::SELECT_CC: 2546 return "RISCVISD::SELECT_CC"; 2547 case RISCVISD::BuildPairF64: 2548 return "RISCVISD::BuildPairF64"; 2549 case RISCVISD::SplitF64: 2550 return "RISCVISD::SplitF64"; 2551 case RISCVISD::TAIL: 2552 return "RISCVISD::TAIL"; 2553 case RISCVISD::SLLW: 2554 return "RISCVISD::SLLW"; 2555 case RISCVISD::SRAW: 2556 return "RISCVISD::SRAW"; 2557 case RISCVISD::SRLW: 2558 return "RISCVISD::SRLW"; 2559 case RISCVISD::DIVW: 2560 return "RISCVISD::DIVW"; 2561 case RISCVISD::DIVUW: 2562 return "RISCVISD::DIVUW"; 2563 case RISCVISD::REMUW: 2564 return "RISCVISD::REMUW"; 2565 case RISCVISD::FMV_W_X_RV64: 2566 return "RISCVISD::FMV_W_X_RV64"; 2567 case RISCVISD::FMV_X_ANYEXTW_RV64: 2568 return "RISCVISD::FMV_X_ANYEXTW_RV64"; 2569 case RISCVISD::READ_CYCLE_WIDE: 2570 return "RISCVISD::READ_CYCLE_WIDE"; 2571 } 2572 return nullptr; 2573 } 2574 2575 /// getConstraintType - Given a constraint letter, return the type of 2576 /// constraint it is for this target. 2577 RISCVTargetLowering::ConstraintType 2578 RISCVTargetLowering::getConstraintType(StringRef Constraint) const { 2579 if (Constraint.size() == 1) { 2580 switch (Constraint[0]) { 2581 default: 2582 break; 2583 case 'f': 2584 return C_RegisterClass; 2585 case 'I': 2586 case 'J': 2587 case 'K': 2588 return C_Immediate; 2589 case 'A': 2590 return C_Memory; 2591 } 2592 } 2593 return TargetLowering::getConstraintType(Constraint); 2594 } 2595 2596 std::pair<unsigned, const TargetRegisterClass *> 2597 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 2598 StringRef Constraint, 2599 MVT VT) const { 2600 // First, see if this is a constraint that directly corresponds to a 2601 // RISCV register class. 2602 if (Constraint.size() == 1) { 2603 switch (Constraint[0]) { 2604 case 'r': 2605 return std::make_pair(0U, &RISCV::GPRRegClass); 2606 case 'f': 2607 if (Subtarget.hasStdExtF() && VT == MVT::f32) 2608 return std::make_pair(0U, &RISCV::FPR32RegClass); 2609 if (Subtarget.hasStdExtD() && VT == MVT::f64) 2610 return std::make_pair(0U, &RISCV::FPR64RegClass); 2611 break; 2612 default: 2613 break; 2614 } 2615 } 2616 2617 // Clang will correctly decode the usage of register name aliases into their 2618 // official names. However, other frontends like `rustc` do not. This allows 2619 // users of these frontends to use the ABI names for registers in LLVM-style 2620 // register constraints. 2621 Register XRegFromAlias = StringSwitch<Register>(Constraint.lower()) 2622 .Case("{zero}", RISCV::X0) 2623 .Case("{ra}", RISCV::X1) 2624 .Case("{sp}", RISCV::X2) 2625 .Case("{gp}", RISCV::X3) 2626 .Case("{tp}", RISCV::X4) 2627 .Case("{t0}", RISCV::X5) 2628 .Case("{t1}", RISCV::X6) 2629 .Case("{t2}", RISCV::X7) 2630 .Cases("{s0}", "{fp}", RISCV::X8) 2631 .Case("{s1}", RISCV::X9) 2632 .Case("{a0}", RISCV::X10) 2633 .Case("{a1}", RISCV::X11) 2634 .Case("{a2}", RISCV::X12) 2635 .Case("{a3}", RISCV::X13) 2636 .Case("{a4}", RISCV::X14) 2637 .Case("{a5}", RISCV::X15) 2638 .Case("{a6}", RISCV::X16) 2639 .Case("{a7}", RISCV::X17) 2640 .Case("{s2}", RISCV::X18) 2641 .Case("{s3}", RISCV::X19) 2642 .Case("{s4}", RISCV::X20) 2643 .Case("{s5}", RISCV::X21) 2644 .Case("{s6}", RISCV::X22) 2645 .Case("{s7}", RISCV::X23) 2646 .Case("{s8}", RISCV::X24) 2647 .Case("{s9}", RISCV::X25) 2648 .Case("{s10}", RISCV::X26) 2649 .Case("{s11}", RISCV::X27) 2650 .Case("{t3}", RISCV::X28) 2651 .Case("{t4}", RISCV::X29) 2652 .Case("{t5}", RISCV::X30) 2653 .Case("{t6}", RISCV::X31) 2654 .Default(RISCV::NoRegister); 2655 if (XRegFromAlias != RISCV::NoRegister) 2656 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass); 2657 2658 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the 2659 // TableGen record rather than the AsmName to choose registers for InlineAsm 2660 // constraints, plus we want to match those names to the widest floating point 2661 // register type available, manually select floating point registers here. 2662 // 2663 // The second case is the ABI name of the register, so that frontends can also 2664 // use the ABI names in register constraint lists. 2665 if (Subtarget.hasStdExtF() || Subtarget.hasStdExtD()) { 2666 std::pair<Register, Register> FReg = 2667 StringSwitch<std::pair<Register, Register>>(Constraint.lower()) 2668 .Cases("{f0}", "{ft0}", {RISCV::F0_F, RISCV::F0_D}) 2669 .Cases("{f1}", "{ft1}", {RISCV::F1_F, RISCV::F1_D}) 2670 .Cases("{f2}", "{ft2}", {RISCV::F2_F, RISCV::F2_D}) 2671 .Cases("{f3}", "{ft3}", {RISCV::F3_F, RISCV::F3_D}) 2672 .Cases("{f4}", "{ft4}", {RISCV::F4_F, RISCV::F4_D}) 2673 .Cases("{f5}", "{ft5}", {RISCV::F5_F, RISCV::F5_D}) 2674 .Cases("{f6}", "{ft6}", {RISCV::F6_F, RISCV::F6_D}) 2675 .Cases("{f7}", "{ft7}", {RISCV::F7_F, RISCV::F7_D}) 2676 .Cases("{f8}", "{fs0}", {RISCV::F8_F, RISCV::F8_D}) 2677 .Cases("{f9}", "{fs1}", {RISCV::F9_F, RISCV::F9_D}) 2678 .Cases("{f10}", "{fa0}", {RISCV::F10_F, RISCV::F10_D}) 2679 .Cases("{f11}", "{fa1}", {RISCV::F11_F, RISCV::F11_D}) 2680 .Cases("{f12}", "{fa2}", {RISCV::F12_F, RISCV::F12_D}) 2681 .Cases("{f13}", "{fa3}", {RISCV::F13_F, RISCV::F13_D}) 2682 .Cases("{f14}", "{fa4}", {RISCV::F14_F, RISCV::F14_D}) 2683 .Cases("{f15}", "{fa5}", {RISCV::F15_F, RISCV::F15_D}) 2684 .Cases("{f16}", "{fa6}", {RISCV::F16_F, RISCV::F16_D}) 2685 .Cases("{f17}", "{fa7}", {RISCV::F17_F, RISCV::F17_D}) 2686 .Cases("{f18}", "{fs2}", {RISCV::F18_F, RISCV::F18_D}) 2687 .Cases("{f19}", "{fs3}", {RISCV::F19_F, RISCV::F19_D}) 2688 .Cases("{f20}", "{fs4}", {RISCV::F20_F, RISCV::F20_D}) 2689 .Cases("{f21}", "{fs5}", {RISCV::F21_F, RISCV::F21_D}) 2690 .Cases("{f22}", "{fs6}", {RISCV::F22_F, RISCV::F22_D}) 2691 .Cases("{f23}", "{fs7}", {RISCV::F23_F, RISCV::F23_D}) 2692 .Cases("{f24}", "{fs8}", {RISCV::F24_F, RISCV::F24_D}) 2693 .Cases("{f25}", "{fs9}", {RISCV::F25_F, RISCV::F25_D}) 2694 .Cases("{f26}", "{fs10}", {RISCV::F26_F, RISCV::F26_D}) 2695 .Cases("{f27}", "{fs11}", {RISCV::F27_F, RISCV::F27_D}) 2696 .Cases("{f28}", "{ft8}", {RISCV::F28_F, RISCV::F28_D}) 2697 .Cases("{f29}", "{ft9}", {RISCV::F29_F, RISCV::F29_D}) 2698 .Cases("{f30}", "{ft10}", {RISCV::F30_F, RISCV::F30_D}) 2699 .Cases("{f31}", "{ft11}", {RISCV::F31_F, RISCV::F31_D}) 2700 .Default({RISCV::NoRegister, RISCV::NoRegister}); 2701 if (FReg.first != RISCV::NoRegister) 2702 return Subtarget.hasStdExtD() 2703 ? std::make_pair(FReg.second, &RISCV::FPR64RegClass) 2704 : std::make_pair(FReg.first, &RISCV::FPR32RegClass); 2705 } 2706 2707 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 2708 } 2709 2710 unsigned 2711 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { 2712 // Currently only support length 1 constraints. 2713 if (ConstraintCode.size() == 1) { 2714 switch (ConstraintCode[0]) { 2715 case 'A': 2716 return InlineAsm::Constraint_A; 2717 default: 2718 break; 2719 } 2720 } 2721 2722 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 2723 } 2724 2725 void RISCVTargetLowering::LowerAsmOperandForConstraint( 2726 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, 2727 SelectionDAG &DAG) const { 2728 // Currently only support length 1 constraints. 2729 if (Constraint.length() == 1) { 2730 switch (Constraint[0]) { 2731 case 'I': 2732 // Validate & create a 12-bit signed immediate operand. 2733 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2734 uint64_t CVal = C->getSExtValue(); 2735 if (isInt<12>(CVal)) 2736 Ops.push_back( 2737 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 2738 } 2739 return; 2740 case 'J': 2741 // Validate & create an integer zero operand. 2742 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 2743 if (C->getZExtValue() == 0) 2744 Ops.push_back( 2745 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT())); 2746 return; 2747 case 'K': 2748 // Validate & create a 5-bit unsigned immediate operand. 2749 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2750 uint64_t CVal = C->getZExtValue(); 2751 if (isUInt<5>(CVal)) 2752 Ops.push_back( 2753 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 2754 } 2755 return; 2756 default: 2757 break; 2758 } 2759 } 2760 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 2761 } 2762 2763 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 2764 Instruction *Inst, 2765 AtomicOrdering Ord) const { 2766 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) 2767 return Builder.CreateFence(Ord); 2768 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) 2769 return Builder.CreateFence(AtomicOrdering::Release); 2770 return nullptr; 2771 } 2772 2773 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 2774 Instruction *Inst, 2775 AtomicOrdering Ord) const { 2776 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) 2777 return Builder.CreateFence(AtomicOrdering::Acquire); 2778 return nullptr; 2779 } 2780 2781 TargetLowering::AtomicExpansionKind 2782 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 2783 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating 2784 // point operations can't be used in an lr/sc sequence without breaking the 2785 // forward-progress guarantee. 2786 if (AI->isFloatingPointOperation()) 2787 return AtomicExpansionKind::CmpXChg; 2788 2789 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 2790 if (Size == 8 || Size == 16) 2791 return AtomicExpansionKind::MaskedIntrinsic; 2792 return AtomicExpansionKind::None; 2793 } 2794 2795 static Intrinsic::ID 2796 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { 2797 if (XLen == 32) { 2798 switch (BinOp) { 2799 default: 2800 llvm_unreachable("Unexpected AtomicRMW BinOp"); 2801 case AtomicRMWInst::Xchg: 2802 return Intrinsic::riscv_masked_atomicrmw_xchg_i32; 2803 case AtomicRMWInst::Add: 2804 return Intrinsic::riscv_masked_atomicrmw_add_i32; 2805 case AtomicRMWInst::Sub: 2806 return Intrinsic::riscv_masked_atomicrmw_sub_i32; 2807 case AtomicRMWInst::Nand: 2808 return Intrinsic::riscv_masked_atomicrmw_nand_i32; 2809 case AtomicRMWInst::Max: 2810 return Intrinsic::riscv_masked_atomicrmw_max_i32; 2811 case AtomicRMWInst::Min: 2812 return Intrinsic::riscv_masked_atomicrmw_min_i32; 2813 case AtomicRMWInst::UMax: 2814 return Intrinsic::riscv_masked_atomicrmw_umax_i32; 2815 case AtomicRMWInst::UMin: 2816 return Intrinsic::riscv_masked_atomicrmw_umin_i32; 2817 } 2818 } 2819 2820 if (XLen == 64) { 2821 switch (BinOp) { 2822 default: 2823 llvm_unreachable("Unexpected AtomicRMW BinOp"); 2824 case AtomicRMWInst::Xchg: 2825 return Intrinsic::riscv_masked_atomicrmw_xchg_i64; 2826 case AtomicRMWInst::Add: 2827 return Intrinsic::riscv_masked_atomicrmw_add_i64; 2828 case AtomicRMWInst::Sub: 2829 return Intrinsic::riscv_masked_atomicrmw_sub_i64; 2830 case AtomicRMWInst::Nand: 2831 return Intrinsic::riscv_masked_atomicrmw_nand_i64; 2832 case AtomicRMWInst::Max: 2833 return Intrinsic::riscv_masked_atomicrmw_max_i64; 2834 case AtomicRMWInst::Min: 2835 return Intrinsic::riscv_masked_atomicrmw_min_i64; 2836 case AtomicRMWInst::UMax: 2837 return Intrinsic::riscv_masked_atomicrmw_umax_i64; 2838 case AtomicRMWInst::UMin: 2839 return Intrinsic::riscv_masked_atomicrmw_umin_i64; 2840 } 2841 } 2842 2843 llvm_unreachable("Unexpected XLen\n"); 2844 } 2845 2846 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( 2847 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 2848 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 2849 unsigned XLen = Subtarget.getXLen(); 2850 Value *Ordering = 2851 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); 2852 Type *Tys[] = {AlignedAddr->getType()}; 2853 Function *LrwOpScwLoop = Intrinsic::getDeclaration( 2854 AI->getModule(), 2855 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); 2856 2857 if (XLen == 64) { 2858 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); 2859 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 2860 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); 2861 } 2862 2863 Value *Result; 2864 2865 // Must pass the shift amount needed to sign extend the loaded value prior 2866 // to performing a signed comparison for min/max. ShiftAmt is the number of 2867 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which 2868 // is the number of bits to left+right shift the value in order to 2869 // sign-extend. 2870 if (AI->getOperation() == AtomicRMWInst::Min || 2871 AI->getOperation() == AtomicRMWInst::Max) { 2872 const DataLayout &DL = AI->getModule()->getDataLayout(); 2873 unsigned ValWidth = 2874 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); 2875 Value *SextShamt = 2876 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); 2877 Result = Builder.CreateCall(LrwOpScwLoop, 2878 {AlignedAddr, Incr, Mask, SextShamt, Ordering}); 2879 } else { 2880 Result = 2881 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); 2882 } 2883 2884 if (XLen == 64) 2885 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 2886 return Result; 2887 } 2888 2889 TargetLowering::AtomicExpansionKind 2890 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( 2891 AtomicCmpXchgInst *CI) const { 2892 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); 2893 if (Size == 8 || Size == 16) 2894 return AtomicExpansionKind::MaskedIntrinsic; 2895 return AtomicExpansionKind::None; 2896 } 2897 2898 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 2899 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 2900 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 2901 unsigned XLen = Subtarget.getXLen(); 2902 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); 2903 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; 2904 if (XLen == 64) { 2905 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); 2906 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); 2907 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 2908 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; 2909 } 2910 Type *Tys[] = {AlignedAddr->getType()}; 2911 Function *MaskedCmpXchg = 2912 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); 2913 Value *Result = Builder.CreateCall( 2914 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); 2915 if (XLen == 64) 2916 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 2917 return Result; 2918 } 2919 2920 unsigned RISCVTargetLowering::getExceptionPointerRegister( 2921 const Constant *PersonalityFn) const { 2922 return RISCV::X10; 2923 } 2924 2925 unsigned RISCVTargetLowering::getExceptionSelectorRegister( 2926 const Constant *PersonalityFn) const { 2927 return RISCV::X11; 2928 } 2929 2930 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const { 2931 // Return false to suppress the unnecessary extensions if the LibCall 2932 // arguments or return value is f32 type for LP64 ABI. 2933 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 2934 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32)) 2935 return false; 2936 2937 return true; 2938 } 2939 2940 #define GET_REGISTER_MATCHER 2941 #include "RISCVGenAsmMatcher.inc" 2942 2943 Register 2944 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT, 2945 const MachineFunction &MF) const { 2946 Register Reg = MatchRegisterAltName(RegName); 2947 if (Reg == RISCV::NoRegister) 2948 Reg = MatchRegisterName(RegName); 2949 if (Reg == RISCV::NoRegister) 2950 report_fatal_error( 2951 Twine("Invalid register name \"" + StringRef(RegName) + "\".")); 2952 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF); 2953 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg)) 2954 report_fatal_error(Twine("Trying to obtain non-reserved register \"" + 2955 StringRef(RegName) + "\".")); 2956 return Reg; 2957 } 2958