1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the RISCV implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "RISCVInstrInfo.h" 14 #include "RISCV.h" 15 #include "RISCVSubtarget.h" 16 #include "RISCVTargetMachine.h" 17 #include "Utils/RISCVMatInt.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/CodeGen/MachineFunctionPass.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/CodeGen/RegisterScavenging.h" 24 #include "llvm/Support/ErrorHandling.h" 25 #include "llvm/Support/TargetRegistry.h" 26 27 using namespace llvm; 28 29 #define GEN_CHECK_COMPRESS_INSTR 30 #include "RISCVGenCompressInstEmitter.inc" 31 32 #define GET_INSTRINFO_CTOR_DTOR 33 #include "RISCVGenInstrInfo.inc" 34 35 RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI) 36 : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP), 37 STI(STI) {} 38 39 unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 40 int &FrameIndex) const { 41 switch (MI.getOpcode()) { 42 default: 43 return 0; 44 case RISCV::LB: 45 case RISCV::LBU: 46 case RISCV::LH: 47 case RISCV::LHU: 48 case RISCV::LW: 49 case RISCV::FLW: 50 case RISCV::LWU: 51 case RISCV::LD: 52 case RISCV::FLD: 53 break; 54 } 55 56 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 57 MI.getOperand(2).getImm() == 0) { 58 FrameIndex = MI.getOperand(1).getIndex(); 59 return MI.getOperand(0).getReg(); 60 } 61 62 return 0; 63 } 64 65 unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 66 int &FrameIndex) const { 67 switch (MI.getOpcode()) { 68 default: 69 return 0; 70 case RISCV::SB: 71 case RISCV::SH: 72 case RISCV::SW: 73 case RISCV::FSW: 74 case RISCV::SD: 75 case RISCV::FSD: 76 break; 77 } 78 79 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 80 MI.getOperand(2).getImm() == 0) { 81 FrameIndex = MI.getOperand(1).getIndex(); 82 return MI.getOperand(0).getReg(); 83 } 84 85 return 0; 86 } 87 88 void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 89 MachineBasicBlock::iterator MBBI, 90 const DebugLoc &DL, MCRegister DstReg, 91 MCRegister SrcReg, bool KillSrc) const { 92 if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) { 93 BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg) 94 .addReg(SrcReg, getKillRegState(KillSrc)) 95 .addImm(0); 96 return; 97 } 98 99 // FPR->FPR copies 100 unsigned Opc; 101 if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) 102 Opc = RISCV::FSGNJ_S; 103 else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) 104 Opc = RISCV::FSGNJ_D; 105 else 106 llvm_unreachable("Impossible reg-to-reg copy"); 107 108 BuildMI(MBB, MBBI, DL, get(Opc), DstReg) 109 .addReg(SrcReg, getKillRegState(KillSrc)) 110 .addReg(SrcReg, getKillRegState(KillSrc)); 111 } 112 113 void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 114 MachineBasicBlock::iterator I, 115 Register SrcReg, bool IsKill, int FI, 116 const TargetRegisterClass *RC, 117 const TargetRegisterInfo *TRI) const { 118 DebugLoc DL; 119 if (I != MBB.end()) 120 DL = I->getDebugLoc(); 121 122 unsigned Opcode; 123 124 if (RISCV::GPRRegClass.hasSubClassEq(RC)) 125 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ? 126 RISCV::SW : RISCV::SD; 127 else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) 128 Opcode = RISCV::FSW; 129 else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) 130 Opcode = RISCV::FSD; 131 else 132 llvm_unreachable("Can't store this register to stack slot"); 133 134 BuildMI(MBB, I, DL, get(Opcode)) 135 .addReg(SrcReg, getKillRegState(IsKill)) 136 .addFrameIndex(FI) 137 .addImm(0); 138 } 139 140 void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 141 MachineBasicBlock::iterator I, 142 Register DstReg, int FI, 143 const TargetRegisterClass *RC, 144 const TargetRegisterInfo *TRI) const { 145 DebugLoc DL; 146 if (I != MBB.end()) 147 DL = I->getDebugLoc(); 148 149 unsigned Opcode; 150 151 if (RISCV::GPRRegClass.hasSubClassEq(RC)) 152 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ? 153 RISCV::LW : RISCV::LD; 154 else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) 155 Opcode = RISCV::FLW; 156 else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) 157 Opcode = RISCV::FLD; 158 else 159 llvm_unreachable("Can't load this register from stack slot"); 160 161 BuildMI(MBB, I, DL, get(Opcode), DstReg).addFrameIndex(FI).addImm(0); 162 } 163 164 void RISCVInstrInfo::movImm(MachineBasicBlock &MBB, 165 MachineBasicBlock::iterator MBBI, 166 const DebugLoc &DL, Register DstReg, uint64_t Val, 167 MachineInstr::MIFlag Flag) const { 168 MachineFunction *MF = MBB.getParent(); 169 MachineRegisterInfo &MRI = MF->getRegInfo(); 170 bool IsRV64 = MF->getSubtarget<RISCVSubtarget>().is64Bit(); 171 Register SrcReg = RISCV::X0; 172 Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass); 173 unsigned Num = 0; 174 175 if (!IsRV64 && !isInt<32>(Val)) 176 report_fatal_error("Should only materialize 32-bit constants for RV32"); 177 178 RISCVMatInt::InstSeq Seq; 179 RISCVMatInt::generateInstSeq(Val, IsRV64, Seq); 180 assert(Seq.size() > 0); 181 182 for (RISCVMatInt::Inst &Inst : Seq) { 183 // Write the final result to DstReg if it's the last instruction in the Seq. 184 // Otherwise, write the result to the temp register. 185 if (++Num == Seq.size()) 186 Result = DstReg; 187 188 if (Inst.Opc == RISCV::LUI) { 189 BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result) 190 .addImm(Inst.Imm) 191 .setMIFlag(Flag); 192 } else { 193 BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result) 194 .addReg(SrcReg, RegState::Kill) 195 .addImm(Inst.Imm) 196 .setMIFlag(Flag); 197 } 198 // Only the first instruction has X0 as its source. 199 SrcReg = Result; 200 } 201 } 202 203 // The contents of values added to Cond are not examined outside of 204 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we 205 // push BranchOpcode, Reg1, Reg2. 206 static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target, 207 SmallVectorImpl<MachineOperand> &Cond) { 208 // Block ends with fall-through condbranch. 209 assert(LastInst.getDesc().isConditionalBranch() && 210 "Unknown conditional branch"); 211 Target = LastInst.getOperand(2).getMBB(); 212 Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode())); 213 Cond.push_back(LastInst.getOperand(0)); 214 Cond.push_back(LastInst.getOperand(1)); 215 } 216 217 static unsigned getOppositeBranchOpcode(int Opc) { 218 switch (Opc) { 219 default: 220 llvm_unreachable("Unrecognized conditional branch"); 221 case RISCV::BEQ: 222 return RISCV::BNE; 223 case RISCV::BNE: 224 return RISCV::BEQ; 225 case RISCV::BLT: 226 return RISCV::BGE; 227 case RISCV::BGE: 228 return RISCV::BLT; 229 case RISCV::BLTU: 230 return RISCV::BGEU; 231 case RISCV::BGEU: 232 return RISCV::BLTU; 233 } 234 } 235 236 bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB, 237 MachineBasicBlock *&TBB, 238 MachineBasicBlock *&FBB, 239 SmallVectorImpl<MachineOperand> &Cond, 240 bool AllowModify) const { 241 TBB = FBB = nullptr; 242 Cond.clear(); 243 244 // If the block has no terminators, it just falls into the block after it. 245 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 246 if (I == MBB.end() || !isUnpredicatedTerminator(*I)) 247 return false; 248 249 // Count the number of terminators and find the first unconditional or 250 // indirect branch. 251 MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end(); 252 int NumTerminators = 0; 253 for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J); 254 J++) { 255 NumTerminators++; 256 if (J->getDesc().isUnconditionalBranch() || 257 J->getDesc().isIndirectBranch()) { 258 FirstUncondOrIndirectBr = J.getReverse(); 259 } 260 } 261 262 // If AllowModify is true, we can erase any terminators after 263 // FirstUncondOrIndirectBR. 264 if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) { 265 while (std::next(FirstUncondOrIndirectBr) != MBB.end()) { 266 std::next(FirstUncondOrIndirectBr)->eraseFromParent(); 267 NumTerminators--; 268 } 269 I = FirstUncondOrIndirectBr; 270 } 271 272 // We can't handle blocks that end in an indirect branch. 273 if (I->getDesc().isIndirectBranch()) 274 return true; 275 276 // We can't handle blocks with more than 2 terminators. 277 if (NumTerminators > 2) 278 return true; 279 280 // Handle a single unconditional branch. 281 if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) { 282 TBB = I->getOperand(0).getMBB(); 283 return false; 284 } 285 286 // Handle a single conditional branch. 287 if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) { 288 parseCondBranch(*I, TBB, Cond); 289 return false; 290 } 291 292 // Handle a conditional branch followed by an unconditional branch. 293 if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() && 294 I->getDesc().isUnconditionalBranch()) { 295 parseCondBranch(*std::prev(I), TBB, Cond); 296 FBB = I->getOperand(0).getMBB(); 297 return false; 298 } 299 300 // Otherwise, we can't handle this. 301 return true; 302 } 303 304 unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB, 305 int *BytesRemoved) const { 306 if (BytesRemoved) 307 *BytesRemoved = 0; 308 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 309 if (I == MBB.end()) 310 return 0; 311 312 if (!I->getDesc().isUnconditionalBranch() && 313 !I->getDesc().isConditionalBranch()) 314 return 0; 315 316 // Remove the branch. 317 if (BytesRemoved) 318 *BytesRemoved += getInstSizeInBytes(*I); 319 I->eraseFromParent(); 320 321 I = MBB.end(); 322 323 if (I == MBB.begin()) 324 return 1; 325 --I; 326 if (!I->getDesc().isConditionalBranch()) 327 return 1; 328 329 // Remove the branch. 330 if (BytesRemoved) 331 *BytesRemoved += getInstSizeInBytes(*I); 332 I->eraseFromParent(); 333 return 2; 334 } 335 336 // Inserts a branch into the end of the specific MachineBasicBlock, returning 337 // the number of instructions inserted. 338 unsigned RISCVInstrInfo::insertBranch( 339 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, 340 ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const { 341 if (BytesAdded) 342 *BytesAdded = 0; 343 344 // Shouldn't be a fall through. 345 assert(TBB && "insertBranch must not be told to insert a fallthrough"); 346 assert((Cond.size() == 3 || Cond.size() == 0) && 347 "RISCV branch conditions have two components!"); 348 349 // Unconditional branch. 350 if (Cond.empty()) { 351 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB); 352 if (BytesAdded) 353 *BytesAdded += getInstSizeInBytes(MI); 354 return 1; 355 } 356 357 // Either a one or two-way conditional branch. 358 unsigned Opc = Cond[0].getImm(); 359 MachineInstr &CondMI = 360 *BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).add(Cond[2]).addMBB(TBB); 361 if (BytesAdded) 362 *BytesAdded += getInstSizeInBytes(CondMI); 363 364 // One-way conditional branch. 365 if (!FBB) 366 return 1; 367 368 // Two-way conditional branch. 369 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB); 370 if (BytesAdded) 371 *BytesAdded += getInstSizeInBytes(MI); 372 return 2; 373 } 374 375 unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 376 MachineBasicBlock &DestBB, 377 const DebugLoc &DL, 378 int64_t BrOffset, 379 RegScavenger *RS) const { 380 assert(RS && "RegScavenger required for long branching"); 381 assert(MBB.empty() && 382 "new block should be inserted for expanding unconditional branch"); 383 assert(MBB.pred_size() == 1); 384 385 MachineFunction *MF = MBB.getParent(); 386 MachineRegisterInfo &MRI = MF->getRegInfo(); 387 const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget()); 388 389 if (TM.isPositionIndependent()) 390 report_fatal_error("Unable to insert indirect branch"); 391 392 if (!isInt<32>(BrOffset)) 393 report_fatal_error( 394 "Branch offsets outside of the signed 32-bit range not supported"); 395 396 // FIXME: A virtual register must be used initially, as the register 397 // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch 398 // uses the same workaround). 399 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 400 auto II = MBB.end(); 401 402 MachineInstr &LuiMI = *BuildMI(MBB, II, DL, get(RISCV::LUI), ScratchReg) 403 .addMBB(&DestBB, RISCVII::MO_HI); 404 BuildMI(MBB, II, DL, get(RISCV::PseudoBRIND)) 405 .addReg(ScratchReg, RegState::Kill) 406 .addMBB(&DestBB, RISCVII::MO_LO); 407 408 RS->enterBasicBlockEnd(MBB); 409 unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass, 410 LuiMI.getIterator(), false, 0); 411 MRI.replaceRegWith(ScratchReg, Scav); 412 MRI.clearVirtRegs(); 413 RS->setRegUsed(Scav); 414 return 8; 415 } 416 417 bool RISCVInstrInfo::reverseBranchCondition( 418 SmallVectorImpl<MachineOperand> &Cond) const { 419 assert((Cond.size() == 3) && "Invalid branch condition!"); 420 Cond[0].setImm(getOppositeBranchOpcode(Cond[0].getImm())); 421 return false; 422 } 423 424 MachineBasicBlock * 425 RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const { 426 assert(MI.getDesc().isBranch() && "Unexpected opcode!"); 427 // The branch target is always the last operand. 428 int NumOp = MI.getNumExplicitOperands(); 429 return MI.getOperand(NumOp - 1).getMBB(); 430 } 431 432 bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 433 int64_t BrOffset) const { 434 // Ideally we could determine the supported branch offset from the 435 // RISCVII::FormMask, but this can't be used for Pseudo instructions like 436 // PseudoBR. 437 switch (BranchOp) { 438 default: 439 llvm_unreachable("Unexpected opcode!"); 440 case RISCV::BEQ: 441 case RISCV::BNE: 442 case RISCV::BLT: 443 case RISCV::BGE: 444 case RISCV::BLTU: 445 case RISCV::BGEU: 446 return isIntN(13, BrOffset); 447 case RISCV::JAL: 448 case RISCV::PseudoBR: 449 return isIntN(21, BrOffset); 450 } 451 } 452 453 unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 454 unsigned Opcode = MI.getOpcode(); 455 456 switch (Opcode) { 457 default: { 458 if (MI.getParent() && MI.getParent()->getParent()) { 459 const auto MF = MI.getMF(); 460 const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget()); 461 const MCRegisterInfo &MRI = *TM.getMCRegisterInfo(); 462 const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo(); 463 const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>(); 464 if (isCompressibleInst(MI, &ST, MRI, STI)) 465 return 2; 466 } 467 return get(Opcode).getSize(); 468 } 469 case TargetOpcode::EH_LABEL: 470 case TargetOpcode::IMPLICIT_DEF: 471 case TargetOpcode::KILL: 472 case TargetOpcode::DBG_VALUE: 473 return 0; 474 // These values are determined based on RISCVExpandAtomicPseudoInsts, 475 // RISCVExpandPseudoInsts and RISCVMCCodeEmitter, depending on where the 476 // pseudos are expanded. 477 case RISCV::PseudoCALLReg: 478 case RISCV::PseudoCALL: 479 case RISCV::PseudoJump: 480 case RISCV::PseudoTAIL: 481 case RISCV::PseudoLLA: 482 case RISCV::PseudoLA: 483 case RISCV::PseudoLA_TLS_IE: 484 case RISCV::PseudoLA_TLS_GD: 485 return 8; 486 case RISCV::PseudoAtomicLoadNand32: 487 case RISCV::PseudoAtomicLoadNand64: 488 return 20; 489 case RISCV::PseudoMaskedAtomicSwap32: 490 case RISCV::PseudoMaskedAtomicLoadAdd32: 491 case RISCV::PseudoMaskedAtomicLoadSub32: 492 return 28; 493 case RISCV::PseudoMaskedAtomicLoadNand32: 494 return 32; 495 case RISCV::PseudoMaskedAtomicLoadMax32: 496 case RISCV::PseudoMaskedAtomicLoadMin32: 497 return 44; 498 case RISCV::PseudoMaskedAtomicLoadUMax32: 499 case RISCV::PseudoMaskedAtomicLoadUMin32: 500 return 36; 501 case RISCV::PseudoCmpXchg32: 502 case RISCV::PseudoCmpXchg64: 503 return 16; 504 case RISCV::PseudoMaskedCmpXchg32: 505 return 32; 506 case TargetOpcode::INLINEASM: 507 case TargetOpcode::INLINEASM_BR: { 508 const MachineFunction &MF = *MI.getParent()->getParent(); 509 const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget()); 510 return getInlineAsmLength(MI.getOperand(0).getSymbolName(), 511 *TM.getMCAsmInfo()); 512 } 513 } 514 } 515 516 bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const { 517 const unsigned Opcode = MI.getOpcode(); 518 switch(Opcode) { 519 default: 520 break; 521 case RISCV::ADDI: 522 case RISCV::ORI: 523 case RISCV::XORI: 524 return (MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0); 525 } 526 return MI.isAsCheapAsAMove(); 527 } 528 529 bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI, 530 StringRef &ErrInfo) const { 531 const MCInstrInfo *MCII = STI.getInstrInfo(); 532 MCInstrDesc const &Desc = MCII->get(MI.getOpcode()); 533 534 for (auto &OI : enumerate(Desc.operands())) { 535 unsigned OpType = OI.value().OperandType; 536 if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM && 537 OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) { 538 const MachineOperand &MO = MI.getOperand(OI.index()); 539 if (MO.isImm()) { 540 int64_t Imm = MO.getImm(); 541 bool Ok; 542 switch (OpType) { 543 default: 544 llvm_unreachable("Unexpected operand type"); 545 case RISCVOp::OPERAND_UIMM4: 546 Ok = isUInt<4>(Imm); 547 break; 548 case RISCVOp::OPERAND_UIMM5: 549 Ok = isUInt<5>(Imm); 550 break; 551 case RISCVOp::OPERAND_UIMM12: 552 Ok = isUInt<12>(Imm); 553 break; 554 case RISCVOp::OPERAND_SIMM12: 555 Ok = isInt<12>(Imm); 556 break; 557 case RISCVOp::OPERAND_SIMM13_LSB0: 558 Ok = isShiftedInt<12, 1>(Imm); 559 break; 560 case RISCVOp::OPERAND_UIMM20: 561 Ok = isUInt<20>(Imm); 562 break; 563 case RISCVOp::OPERAND_SIMM21_LSB0: 564 Ok = isShiftedInt<20, 1>(Imm); 565 break; 566 case RISCVOp::OPERAND_UIMMLOG2XLEN: 567 if (STI.getTargetTriple().isArch64Bit()) 568 Ok = isUInt<6>(Imm); 569 else 570 Ok = isUInt<5>(Imm); 571 break; 572 } 573 if (!Ok) { 574 ErrInfo = "Invalid immediate"; 575 return false; 576 } 577 } 578 } 579 } 580 581 return true; 582 } 583 584 // Return true if get the base operand, byte offset of an instruction and the 585 // memory width. Width is the size of memory that is being loaded/stored. 586 bool RISCVInstrInfo::getMemOperandWithOffsetWidth( 587 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset, 588 unsigned &Width, const TargetRegisterInfo *TRI) const { 589 if (!LdSt.mayLoadOrStore()) 590 return false; 591 592 // Here we assume the standard RISC-V ISA, which uses a base+offset 593 // addressing mode. You'll need to relax these conditions to support custom 594 // load/stores instructions. 595 if (LdSt.getNumExplicitOperands() != 3) 596 return false; 597 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm()) 598 return false; 599 600 if (!LdSt.hasOneMemOperand()) 601 return false; 602 603 Width = (*LdSt.memoperands_begin())->getSize(); 604 BaseReg = &LdSt.getOperand(1); 605 Offset = LdSt.getOperand(2).getImm(); 606 return true; 607 } 608 609 bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint( 610 const MachineInstr &MIa, const MachineInstr &MIb) const { 611 assert(MIa.mayLoadOrStore() && "MIa must be a load or store."); 612 assert(MIb.mayLoadOrStore() && "MIb must be a load or store."); 613 614 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() || 615 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 616 return false; 617 618 // Retrieve the base register, offset from the base register and width. Width 619 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If 620 // base registers are identical, and the offset of a lower memory access + 621 // the width doesn't overlap the offset of a higher memory access, 622 // then the memory accesses are different. 623 const TargetRegisterInfo *TRI = STI.getRegisterInfo(); 624 const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr; 625 int64_t OffsetA = 0, OffsetB = 0; 626 unsigned int WidthA = 0, WidthB = 0; 627 if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) && 628 getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) { 629 if (BaseOpA->isIdenticalTo(*BaseOpB)) { 630 int LowOffset = std::min(OffsetA, OffsetB); 631 int HighOffset = std::max(OffsetA, OffsetB); 632 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 633 if (LowOffset + LowWidth <= HighOffset) 634 return true; 635 } 636 } 637 return false; 638 } 639 640 std::pair<unsigned, unsigned> 641 RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 642 const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK; 643 return std::make_pair(TF & Mask, TF & ~Mask); 644 } 645 646 ArrayRef<std::pair<unsigned, const char *>> 647 RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 648 using namespace RISCVII; 649 static const std::pair<unsigned, const char *> TargetFlags[] = { 650 {MO_CALL, "riscv-call"}, 651 {MO_PLT, "riscv-plt"}, 652 {MO_LO, "riscv-lo"}, 653 {MO_HI, "riscv-hi"}, 654 {MO_PCREL_LO, "riscv-pcrel-lo"}, 655 {MO_PCREL_HI, "riscv-pcrel-hi"}, 656 {MO_GOT_HI, "riscv-got-hi"}, 657 {MO_TPREL_LO, "riscv-tprel-lo"}, 658 {MO_TPREL_HI, "riscv-tprel-hi"}, 659 {MO_TPREL_ADD, "riscv-tprel-add"}, 660 {MO_TLS_GOT_HI, "riscv-tls-got-hi"}, 661 {MO_TLS_GD_HI, "riscv-tls-gd-hi"}}; 662 return makeArrayRef(TargetFlags); 663 } 664 bool RISCVInstrInfo::isFunctionSafeToOutlineFrom( 665 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const { 666 const Function &F = MF.getFunction(); 667 668 // Can F be deduplicated by the linker? If it can, don't outline from it. 669 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) 670 return false; 671 672 // Don't outline from functions with section markings; the program could 673 // expect that all the code is in the named section. 674 if (F.hasSection()) 675 return false; 676 677 // It's safe to outline from MF. 678 return true; 679 } 680 681 bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, 682 unsigned &Flags) const { 683 // More accurate safety checking is done in getOutliningCandidateInfo. 684 return true; 685 } 686 687 // Enum values indicating how an outlined call should be constructed. 688 enum MachineOutlinerConstructionID { 689 MachineOutlinerDefault 690 }; 691 692 outliner::OutlinedFunction RISCVInstrInfo::getOutliningCandidateInfo( 693 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { 694 695 // First we need to filter out candidates where the X5 register (IE t0) can't 696 // be used to setup the function call. 697 auto CannotInsertCall = [](outliner::Candidate &C) { 698 const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo(); 699 700 C.initLRU(*TRI); 701 LiveRegUnits LRU = C.LRU; 702 return !LRU.available(RISCV::X5); 703 }; 704 705 RepeatedSequenceLocs.erase(std::remove_if(RepeatedSequenceLocs.begin(), 706 RepeatedSequenceLocs.end(), 707 CannotInsertCall), 708 RepeatedSequenceLocs.end()); 709 710 // If the sequence doesn't have enough candidates left, then we're done. 711 if (RepeatedSequenceLocs.size() < 2) 712 return outliner::OutlinedFunction(); 713 714 unsigned SequenceSize = 0; 715 716 auto I = RepeatedSequenceLocs[0].front(); 717 auto E = std::next(RepeatedSequenceLocs[0].back()); 718 for (; I != E; ++I) 719 SequenceSize += getInstSizeInBytes(*I); 720 721 // call t0, function = 8 bytes. 722 unsigned CallOverhead = 8; 723 for (auto &C : RepeatedSequenceLocs) 724 C.setCallInfo(MachineOutlinerDefault, CallOverhead); 725 726 // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled. 727 unsigned FrameOverhead = 4; 728 if (RepeatedSequenceLocs[0].getMF()->getSubtarget() 729 .getFeatureBits()[RISCV::FeatureStdExtC]) 730 FrameOverhead = 2; 731 732 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 733 FrameOverhead, MachineOutlinerDefault); 734 } 735 736 outliner::InstrType 737 RISCVInstrInfo::getOutliningType(MachineBasicBlock::iterator &MBBI, 738 unsigned Flags) const { 739 MachineInstr &MI = *MBBI; 740 MachineBasicBlock *MBB = MI.getParent(); 741 const TargetRegisterInfo *TRI = 742 MBB->getParent()->getSubtarget().getRegisterInfo(); 743 744 // Positions generally can't safely be outlined. 745 if (MI.isPosition()) { 746 // We can manually strip out CFI instructions later. 747 if (MI.isCFIInstruction()) 748 return outliner::InstrType::Invisible; 749 750 return outliner::InstrType::Illegal; 751 } 752 753 // Don't trust the user to write safe inline assembly. 754 if (MI.isInlineAsm()) 755 return outliner::InstrType::Illegal; 756 757 // We can't outline branches to other basic blocks. 758 if (MI.isTerminator() && !MBB->succ_empty()) 759 return outliner::InstrType::Illegal; 760 761 // We need support for tail calls to outlined functions before return 762 // statements can be allowed. 763 if (MI.isReturn()) 764 return outliner::InstrType::Illegal; 765 766 // Don't allow modifying the X5 register which we use for return addresses for 767 // these outlined functions. 768 if (MI.modifiesRegister(RISCV::X5, TRI) || 769 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5)) 770 return outliner::InstrType::Illegal; 771 772 // Make sure the operands don't reference something unsafe. 773 for (const auto &MO : MI.operands()) 774 if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI()) 775 return outliner::InstrType::Illegal; 776 777 // Don't allow instructions which won't be materialized to impact outlining 778 // analysis. 779 if (MI.isMetaInstruction()) 780 return outliner::InstrType::Invisible; 781 782 return outliner::InstrType::Legal; 783 } 784 785 void RISCVInstrInfo::buildOutlinedFrame( 786 MachineBasicBlock &MBB, MachineFunction &MF, 787 const outliner::OutlinedFunction &OF) const { 788 789 // Strip out any CFI instructions 790 bool Changed = true; 791 while (Changed) { 792 Changed = false; 793 auto I = MBB.begin(); 794 auto E = MBB.end(); 795 for (; I != E; ++I) { 796 if (I->isCFIInstruction()) { 797 I->removeFromParent(); 798 Changed = true; 799 break; 800 } 801 } 802 } 803 804 MBB.addLiveIn(RISCV::X5); 805 806 // Add in a return instruction to the end of the outlined frame. 807 MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR)) 808 .addReg(RISCV::X0, RegState::Define) 809 .addReg(RISCV::X5) 810 .addImm(0)); 811 } 812 813 MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall( 814 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, 815 MachineFunction &MF, const outliner::Candidate &C) const { 816 817 // Add in a call instruction to the outlined function at the given location. 818 It = MBB.insert(It, 819 BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5) 820 .addGlobalAddress(M.getNamedValue(MF.getName()), 0, 821 RISCVII::MO_CALL)); 822 return It; 823 } 824