1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines an instruction selector for the RISCV target. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "RISCVISelDAGToDAG.h" 14 #include "MCTargetDesc/RISCVMCTargetDesc.h" 15 #include "Utils/RISCVMatInt.h" 16 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/Support/Alignment.h" 18 #include "llvm/Support/Debug.h" 19 #include "llvm/Support/MathExtras.h" 20 #include "llvm/Support/raw_ostream.h" 21 22 using namespace llvm; 23 24 #define DEBUG_TYPE "riscv-isel" 25 26 void RISCVDAGToDAGISel::PostprocessISelDAG() { 27 doPeepholeLoadStoreADDI(); 28 } 29 30 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm, 31 MVT XLenVT) { 32 RISCVMatInt::InstSeq Seq; 33 RISCVMatInt::generateInstSeq(Imm, XLenVT == MVT::i64, Seq); 34 35 SDNode *Result = nullptr; 36 SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT); 37 for (RISCVMatInt::Inst &Inst : Seq) { 38 SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT); 39 if (Inst.Opc == RISCV::LUI) 40 Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm); 41 else 42 Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm); 43 44 // Only the first instruction has X0 as its source. 45 SrcReg = SDValue(Result, 0); 46 } 47 48 return Result; 49 } 50 51 // Returns true if the Node is an ISD::AND with a constant argument. If so, 52 // set Mask to that constant value. 53 static bool isConstantMask(SDNode *Node, uint64_t &Mask) { 54 if (Node->getOpcode() == ISD::AND && 55 Node->getOperand(1).getOpcode() == ISD::Constant) { 56 Mask = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 57 return true; 58 } 59 return false; 60 } 61 62 void RISCVDAGToDAGISel::Select(SDNode *Node) { 63 // If we have a custom node, we have already selected. 64 if (Node->isMachineOpcode()) { 65 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n"); 66 Node->setNodeId(-1); 67 return; 68 } 69 70 // Instruction Selection not handled by the auto-generated tablegen selection 71 // should be handled here. 72 unsigned Opcode = Node->getOpcode(); 73 MVT XLenVT = Subtarget->getXLenVT(); 74 SDLoc DL(Node); 75 EVT VT = Node->getValueType(0); 76 77 switch (Opcode) { 78 case ISD::ADD: { 79 // Optimize (add r, imm) to (addi (addi r, imm0) imm1) if applicable. The 80 // immediate must be in specific ranges and have a single use. 81 if (auto *ConstOp = dyn_cast<ConstantSDNode>(Node->getOperand(1))) { 82 if (!(ConstOp->hasOneUse())) 83 break; 84 // The imm must be in range [-4096,-2049] or [2048,4094]. 85 int64_t Imm = ConstOp->getSExtValue(); 86 if (!(-4096 <= Imm && Imm <= -2049) && !(2048 <= Imm && Imm <= 4094)) 87 break; 88 // Break the imm to imm0+imm1. 89 SDLoc DL(Node); 90 EVT VT = Node->getValueType(0); 91 const SDValue ImmOp0 = CurDAG->getTargetConstant(Imm - Imm / 2, DL, VT); 92 const SDValue ImmOp1 = CurDAG->getTargetConstant(Imm / 2, DL, VT); 93 auto *NodeAddi0 = CurDAG->getMachineNode(RISCV::ADDI, DL, VT, 94 Node->getOperand(0), ImmOp0); 95 auto *NodeAddi1 = CurDAG->getMachineNode(RISCV::ADDI, DL, VT, 96 SDValue(NodeAddi0, 0), ImmOp1); 97 ReplaceNode(Node, NodeAddi1); 98 return; 99 } 100 break; 101 } 102 case ISD::Constant: { 103 auto ConstNode = cast<ConstantSDNode>(Node); 104 if (VT == XLenVT && ConstNode->isNullValue()) { 105 SDValue New = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node), 106 RISCV::X0, XLenVT); 107 ReplaceNode(Node, New.getNode()); 108 return; 109 } 110 int64_t Imm = ConstNode->getSExtValue(); 111 if (XLenVT == MVT::i64) { 112 ReplaceNode(Node, selectImm(CurDAG, SDLoc(Node), Imm, XLenVT)); 113 return; 114 } 115 break; 116 } 117 case ISD::FrameIndex: { 118 SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT); 119 int FI = cast<FrameIndexSDNode>(Node)->getIndex(); 120 SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT); 121 ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm)); 122 return; 123 } 124 case ISD::SRL: { 125 if (!Subtarget->is64Bit()) 126 break; 127 SDValue Op0 = Node->getOperand(0); 128 SDValue Op1 = Node->getOperand(1); 129 uint64_t Mask; 130 // Match (srl (and val, mask), imm) where the result would be a 131 // zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result 132 // is equivalent to this (SimplifyDemandedBits may have removed lower bits 133 // from the mask that aren't necessary due to the right-shifting). 134 if (Op1.getOpcode() == ISD::Constant && 135 isConstantMask(Op0.getNode(), Mask)) { 136 uint64_t ShAmt = cast<ConstantSDNode>(Op1.getNode())->getZExtValue(); 137 138 if ((Mask | maskTrailingOnes<uint64_t>(ShAmt)) == 0xffffffff) { 139 SDValue ShAmtVal = 140 CurDAG->getTargetConstant(ShAmt, SDLoc(Node), XLenVT); 141 CurDAG->SelectNodeTo(Node, RISCV::SRLIW, XLenVT, Op0.getOperand(0), 142 ShAmtVal); 143 return; 144 } 145 } 146 break; 147 } 148 case RISCVISD::READ_CYCLE_WIDE: 149 assert(!Subtarget->is64Bit() && "READ_CYCLE_WIDE is only used on riscv32"); 150 151 ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ReadCycleWide, DL, MVT::i32, 152 MVT::i32, MVT::Other, 153 Node->getOperand(0))); 154 return; 155 } 156 157 // Select the default instruction. 158 SelectCode(Node); 159 } 160 161 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand( 162 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) { 163 switch (ConstraintID) { 164 case InlineAsm::Constraint_m: 165 // We just support simple memory operands that have a single address 166 // operand and need no special handling. 167 OutOps.push_back(Op); 168 return false; 169 case InlineAsm::Constraint_A: 170 OutOps.push_back(Op); 171 return false; 172 default: 173 break; 174 } 175 176 return true; 177 } 178 179 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) { 180 if (auto FIN = dyn_cast<FrameIndexSDNode>(Addr)) { 181 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT()); 182 return true; 183 } 184 return false; 185 } 186 187 // Check that it is a SLOI (Shift Left Ones Immediate). We first check that 188 // it is the right node tree: 189 // 190 // (OR (SHL RS1, VC2), VC1) 191 // 192 // and then we check that VC1, the mask used to fill with ones, is compatible 193 // with VC2, the shamt: 194 // 195 // VC1 == maskTrailingOnes<uint64_t>(VC2) 196 197 bool RISCVDAGToDAGISel::SelectSLOI(SDValue N, SDValue &RS1, SDValue &Shamt) { 198 MVT XLenVT = Subtarget->getXLenVT(); 199 if (N.getOpcode() == ISD::OR) { 200 SDValue Or = N; 201 if (Or.getOperand(0).getOpcode() == ISD::SHL) { 202 SDValue Shl = Or.getOperand(0); 203 if (isa<ConstantSDNode>(Shl.getOperand(1)) && 204 isa<ConstantSDNode>(Or.getOperand(1))) { 205 if (XLenVT == MVT::i64) { 206 uint64_t VC1 = Or.getConstantOperandVal(1); 207 uint64_t VC2 = Shl.getConstantOperandVal(1); 208 if (VC1 == maskTrailingOnes<uint64_t>(VC2)) { 209 RS1 = Shl.getOperand(0); 210 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 211 Shl.getOperand(1).getValueType()); 212 return true; 213 } 214 } 215 if (XLenVT == MVT::i32) { 216 uint32_t VC1 = Or.getConstantOperandVal(1); 217 uint32_t VC2 = Shl.getConstantOperandVal(1); 218 if (VC1 == maskTrailingOnes<uint32_t>(VC2)) { 219 RS1 = Shl.getOperand(0); 220 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 221 Shl.getOperand(1).getValueType()); 222 return true; 223 } 224 } 225 } 226 } 227 } 228 return false; 229 } 230 231 // Check that it is a SROI (Shift Right Ones Immediate). We first check that 232 // it is the right node tree: 233 // 234 // (OR (SRL RS1, VC2), VC1) 235 // 236 // and then we check that VC1, the mask used to fill with ones, is compatible 237 // with VC2, the shamt: 238 // 239 // VC1 == maskLeadingOnes<uint64_t>(VC2) 240 241 bool RISCVDAGToDAGISel::SelectSROI(SDValue N, SDValue &RS1, SDValue &Shamt) { 242 MVT XLenVT = Subtarget->getXLenVT(); 243 if (N.getOpcode() == ISD::OR) { 244 SDValue Or = N; 245 if (Or.getOperand(0).getOpcode() == ISD::SRL) { 246 SDValue Srl = Or.getOperand(0); 247 if (isa<ConstantSDNode>(Srl.getOperand(1)) && 248 isa<ConstantSDNode>(Or.getOperand(1))) { 249 if (XLenVT == MVT::i64) { 250 uint64_t VC1 = Or.getConstantOperandVal(1); 251 uint64_t VC2 = Srl.getConstantOperandVal(1); 252 if (VC1 == maskLeadingOnes<uint64_t>(VC2)) { 253 RS1 = Srl.getOperand(0); 254 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 255 Srl.getOperand(1).getValueType()); 256 return true; 257 } 258 } 259 if (XLenVT == MVT::i32) { 260 uint32_t VC1 = Or.getConstantOperandVal(1); 261 uint32_t VC2 = Srl.getConstantOperandVal(1); 262 if (VC1 == maskLeadingOnes<uint32_t>(VC2)) { 263 RS1 = Srl.getOperand(0); 264 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 265 Srl.getOperand(1).getValueType()); 266 return true; 267 } 268 } 269 } 270 } 271 } 272 return false; 273 } 274 275 // Check that it is a RORI (Rotate Right Immediate). We first check that 276 // it is the right node tree: 277 // 278 // (ROTL RS1, VC) 279 // 280 // The compiler translates immediate rotations to the right given by the call 281 // to the rotateright32/rotateright64 intrinsics as rotations to the left. 282 // Since the rotation to the left can be easily emulated as a rotation to the 283 // right by negating the constant, there is no encoding for ROLI. 284 // We then select the immediate left rotations as RORI by the complementary 285 // constant: 286 // 287 // Shamt == XLen - VC 288 289 bool RISCVDAGToDAGISel::SelectRORI(SDValue N, SDValue &RS1, SDValue &Shamt) { 290 MVT XLenVT = Subtarget->getXLenVT(); 291 if (N.getOpcode() == ISD::ROTL) { 292 if (isa<ConstantSDNode>(N.getOperand(1))) { 293 if (XLenVT == MVT::i64) { 294 uint64_t VC = N.getConstantOperandVal(1); 295 Shamt = CurDAG->getTargetConstant((64 - VC), SDLoc(N), 296 N.getOperand(1).getValueType()); 297 RS1 = N.getOperand(0); 298 return true; 299 } 300 if (XLenVT == MVT::i32) { 301 uint32_t VC = N.getConstantOperandVal(1); 302 Shamt = CurDAG->getTargetConstant((32 - VC), SDLoc(N), 303 N.getOperand(1).getValueType()); 304 RS1 = N.getOperand(0); 305 return true; 306 } 307 } 308 } 309 return false; 310 } 311 312 313 // Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32 314 // on RV64). 315 // SLLIUW is the same as SLLI except for the fact that it clears the bits 316 // XLEN-1:32 of the input RS1 before shifting. 317 // We first check that it is the right node tree: 318 // 319 // (AND (SHL RS1, VC2), VC1) 320 // 321 // We check that VC2, the shamt is less than 32, otherwise the pattern is 322 // exactly the same as SLLI and we give priority to that. 323 // Eventually we check that that VC1, the mask used to clear the upper 32 bits 324 // of RS1, is correct: 325 // 326 // VC1 == (0xFFFFFFFF << VC2) 327 328 bool RISCVDAGToDAGISel::SelectSLLIUW(SDValue N, SDValue &RS1, SDValue &Shamt) { 329 if (N.getOpcode() == ISD::AND && Subtarget->getXLenVT() == MVT::i64) { 330 SDValue And = N; 331 if (And.getOperand(0).getOpcode() == ISD::SHL) { 332 SDValue Shl = And.getOperand(0); 333 if (isa<ConstantSDNode>(Shl.getOperand(1)) && 334 isa<ConstantSDNode>(And.getOperand(1))) { 335 uint64_t VC1 = And.getConstantOperandVal(1); 336 uint64_t VC2 = Shl.getConstantOperandVal(1); 337 if (VC2 < 32 && VC1 == ((uint64_t)0xFFFFFFFF << VC2)) { 338 RS1 = Shl.getOperand(0); 339 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 340 Shl.getOperand(1).getValueType()); 341 return true; 342 } 343 } 344 } 345 } 346 return false; 347 } 348 349 // Check that it is a SLOIW (Shift Left Ones Immediate i32 on RV64). 350 // We first check that it is the right node tree: 351 // 352 // (SIGN_EXTEND_INREG (OR (SHL RS1, VC2), VC1)) 353 // 354 // and then we check that VC1, the mask used to fill with ones, is compatible 355 // with VC2, the shamt: 356 // 357 // VC1 == maskTrailingOnes<uint32_t>(VC2) 358 359 bool RISCVDAGToDAGISel::SelectSLOIW(SDValue N, SDValue &RS1, SDValue &Shamt) { 360 if (Subtarget->getXLenVT() == MVT::i64 && 361 N.getOpcode() == ISD::SIGN_EXTEND_INREG && 362 cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) { 363 if (N.getOperand(0).getOpcode() == ISD::OR) { 364 SDValue Or = N.getOperand(0); 365 if (Or.getOperand(0).getOpcode() == ISD::SHL) { 366 SDValue Shl = Or.getOperand(0); 367 if (isa<ConstantSDNode>(Shl.getOperand(1)) && 368 isa<ConstantSDNode>(Or.getOperand(1))) { 369 uint32_t VC1 = Or.getConstantOperandVal(1); 370 uint32_t VC2 = Shl.getConstantOperandVal(1); 371 if (VC1 == maskTrailingOnes<uint32_t>(VC2)) { 372 RS1 = Shl.getOperand(0); 373 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 374 Shl.getOperand(1).getValueType()); 375 return true; 376 } 377 } 378 } 379 } 380 } 381 return false; 382 } 383 384 // Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64). 385 // We first check that it is the right node tree: 386 // 387 // (OR (SHL RS1, VC2), VC1) 388 // 389 // and then we check that VC1, the mask used to fill with ones, is compatible 390 // with VC2, the shamt: 391 // 392 // VC1 == maskLeadingOnes<uint32_t>(VC2) 393 394 bool RISCVDAGToDAGISel::SelectSROIW(SDValue N, SDValue &RS1, SDValue &Shamt) { 395 if (N.getOpcode() == ISD::OR && Subtarget->getXLenVT() == MVT::i64) { 396 SDValue Or = N; 397 if (Or.getOperand(0).getOpcode() == ISD::SRL) { 398 SDValue Srl = Or.getOperand(0); 399 if (isa<ConstantSDNode>(Srl.getOperand(1)) && 400 isa<ConstantSDNode>(Or.getOperand(1))) { 401 uint32_t VC1 = Or.getConstantOperandVal(1); 402 uint32_t VC2 = Srl.getConstantOperandVal(1); 403 if (VC1 == maskLeadingOnes<uint32_t>(VC2)) { 404 RS1 = Srl.getOperand(0); 405 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 406 Srl.getOperand(1).getValueType()); 407 return true; 408 } 409 } 410 } 411 } 412 return false; 413 } 414 415 // Check that it is a RORIW (i32 Right Rotate Immediate on RV64). 416 // We first check that it is the right node tree: 417 // 418 // (SIGN_EXTEND_INREG (OR (SHL (AsserSext RS1, i32), VC2), 419 // (SRL (AND (AssertSext RS2, i32), VC3), VC1))) 420 // 421 // Then we check that the constant operands respect these constraints: 422 // 423 // VC2 == 32 - VC1 424 // VC3 == maskLeadingOnes<uint32_t>(VC2) 425 // 426 // being VC1 the Shamt we need, VC2 the complementary of Shamt over 32 427 // and VC3 a 32 bit mask of (32 - VC1) leading ones. 428 429 bool RISCVDAGToDAGISel::SelectRORIW(SDValue N, SDValue &RS1, SDValue &Shamt) { 430 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG && 431 Subtarget->getXLenVT() == MVT::i64 && 432 cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) { 433 if (N.getOperand(0).getOpcode() == ISD::OR) { 434 SDValue Or = N.getOperand(0); 435 if (Or.getOperand(0).getOpcode() == ISD::SHL && 436 Or.getOperand(1).getOpcode() == ISD::SRL) { 437 SDValue Shl = Or.getOperand(0); 438 SDValue Srl = Or.getOperand(1); 439 if (Srl.getOperand(0).getOpcode() == ISD::AND) { 440 SDValue And = Srl.getOperand(0); 441 if (isa<ConstantSDNode>(Srl.getOperand(1)) && 442 isa<ConstantSDNode>(Shl.getOperand(1)) && 443 isa<ConstantSDNode>(And.getOperand(1))) { 444 uint32_t VC1 = Srl.getConstantOperandVal(1); 445 uint32_t VC2 = Shl.getConstantOperandVal(1); 446 uint32_t VC3 = And.getConstantOperandVal(1); 447 if (VC2 == (32 - VC1) && 448 VC3 == maskLeadingOnes<uint32_t>(VC2)) { 449 RS1 = Shl.getOperand(0); 450 Shamt = CurDAG->getTargetConstant(VC1, SDLoc(N), 451 Srl.getOperand(1).getValueType()); 452 return true; 453 } 454 } 455 } 456 } 457 } 458 } 459 return false; 460 } 461 462 // Check that it is a FSRIW (i32 Funnel Shift Right Immediate on RV64). 463 // We first check that it is the right node tree: 464 // 465 // (SIGN_EXTEND_INREG (OR (SHL (AsserSext RS1, i32), VC2), 466 // (SRL (AND (AssertSext RS2, i32), VC3), VC1))) 467 // 468 // Then we check that the constant operands respect these constraints: 469 // 470 // VC2 == 32 - VC1 471 // VC3 == maskLeadingOnes<uint32_t>(VC2) 472 // 473 // being VC1 the Shamt we need, VC2 the complementary of Shamt over 32 474 // and VC3 a 32 bit mask of (32 - VC1) leading ones. 475 476 bool RISCVDAGToDAGISel::SelectFSRIW(SDValue N, SDValue &RS1, SDValue &RS2, 477 SDValue &Shamt) { 478 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG && 479 Subtarget->getXLenVT() == MVT::i64 && 480 cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) { 481 if (N.getOperand(0).getOpcode() == ISD::OR) { 482 SDValue Or = N.getOperand(0); 483 if (Or.getOperand(0).getOpcode() == ISD::SHL && 484 Or.getOperand(1).getOpcode() == ISD::SRL) { 485 SDValue Shl = Or.getOperand(0); 486 SDValue Srl = Or.getOperand(1); 487 if (Srl.getOperand(0).getOpcode() == ISD::AND) { 488 SDValue And = Srl.getOperand(0); 489 if (isa<ConstantSDNode>(Srl.getOperand(1)) && 490 isa<ConstantSDNode>(Shl.getOperand(1)) && 491 isa<ConstantSDNode>(And.getOperand(1))) { 492 uint32_t VC1 = Srl.getConstantOperandVal(1); 493 uint32_t VC2 = Shl.getConstantOperandVal(1); 494 uint32_t VC3 = And.getConstantOperandVal(1); 495 if (VC2 == (32 - VC1) && 496 VC3 == maskLeadingOnes<uint32_t>(VC2)) { 497 RS1 = Shl.getOperand(0); 498 RS2 = And.getOperand(0); 499 Shamt = CurDAG->getTargetConstant(VC1, SDLoc(N), 500 Srl.getOperand(1).getValueType()); 501 return true; 502 } 503 } 504 } 505 } 506 } 507 } 508 return false; 509 } 510 511 // Merge an ADDI into the offset of a load/store instruction where possible. 512 // (load (addi base, off1), off2) -> (load base, off1+off2) 513 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2) 514 // This is possible when off1+off2 fits a 12-bit immediate. 515 void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() { 516 SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode()); 517 ++Position; 518 519 while (Position != CurDAG->allnodes_begin()) { 520 SDNode *N = &*--Position; 521 // Skip dead nodes and any non-machine opcodes. 522 if (N->use_empty() || !N->isMachineOpcode()) 523 continue; 524 525 int OffsetOpIdx; 526 int BaseOpIdx; 527 528 // Only attempt this optimisation for I-type loads and S-type stores. 529 switch (N->getMachineOpcode()) { 530 default: 531 continue; 532 case RISCV::LB: 533 case RISCV::LH: 534 case RISCV::LW: 535 case RISCV::LBU: 536 case RISCV::LHU: 537 case RISCV::LWU: 538 case RISCV::LD: 539 case RISCV::FLW: 540 case RISCV::FLD: 541 BaseOpIdx = 0; 542 OffsetOpIdx = 1; 543 break; 544 case RISCV::SB: 545 case RISCV::SH: 546 case RISCV::SW: 547 case RISCV::SD: 548 case RISCV::FSW: 549 case RISCV::FSD: 550 BaseOpIdx = 1; 551 OffsetOpIdx = 2; 552 break; 553 } 554 555 if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx))) 556 continue; 557 558 SDValue Base = N->getOperand(BaseOpIdx); 559 560 // If the base is an ADDI, we can merge it in to the load/store. 561 if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI) 562 continue; 563 564 SDValue ImmOperand = Base.getOperand(1); 565 uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx); 566 567 if (auto Const = dyn_cast<ConstantSDNode>(ImmOperand)) { 568 int64_t Offset1 = Const->getSExtValue(); 569 int64_t CombinedOffset = Offset1 + Offset2; 570 if (!isInt<12>(CombinedOffset)) 571 continue; 572 ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand), 573 ImmOperand.getValueType()); 574 } else if (auto GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) { 575 // If the off1 in (addi base, off1) is a global variable's address (its 576 // low part, really), then we can rely on the alignment of that variable 577 // to provide a margin of safety before off1 can overflow the 12 bits. 578 // Check if off2 falls within that margin; if so off1+off2 can't overflow. 579 const DataLayout &DL = CurDAG->getDataLayout(); 580 Align Alignment = GA->getGlobal()->getPointerAlignment(DL); 581 if (Offset2 != 0 && Alignment <= Offset2) 582 continue; 583 int64_t Offset1 = GA->getOffset(); 584 int64_t CombinedOffset = Offset1 + Offset2; 585 ImmOperand = CurDAG->getTargetGlobalAddress( 586 GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(), 587 CombinedOffset, GA->getTargetFlags()); 588 } else if (auto CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) { 589 // Ditto. 590 Align Alignment = CP->getAlign(); 591 if (Offset2 != 0 && Alignment <= Offset2) 592 continue; 593 int64_t Offset1 = CP->getOffset(); 594 int64_t CombinedOffset = Offset1 + Offset2; 595 ImmOperand = CurDAG->getTargetConstantPool( 596 CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(), 597 CombinedOffset, CP->getTargetFlags()); 598 } else { 599 continue; 600 } 601 602 LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: "); 603 LLVM_DEBUG(Base->dump(CurDAG)); 604 LLVM_DEBUG(dbgs() << "\nN: "); 605 LLVM_DEBUG(N->dump(CurDAG)); 606 LLVM_DEBUG(dbgs() << "\n"); 607 608 // Modify the offset operand of the load/store. 609 if (BaseOpIdx == 0) // Load 610 CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand, 611 N->getOperand(2)); 612 else // Store 613 CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0), 614 ImmOperand, N->getOperand(3)); 615 616 // The add-immediate may now be dead, in which case remove it. 617 if (Base.getNode()->use_empty()) 618 CurDAG->RemoveDeadNode(Base.getNode()); 619 } 620 } 621 622 // This pass converts a legalized DAG into a RISCV-specific DAG, ready 623 // for instruction scheduling. 624 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) { 625 return new RISCVDAGToDAGISel(TM); 626 } 627