1 //===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the RISC-V implementation of the TargetRegisterInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "RISCVRegisterInfo.h" 14 #include "RISCV.h" 15 #include "RISCVMachineFunctionInfo.h" 16 #include "RISCVSubtarget.h" 17 #include "llvm/ADT/SmallSet.h" 18 #include "llvm/BinaryFormat/Dwarf.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineFunction.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/RegisterScavenging.h" 23 #include "llvm/CodeGen/TargetFrameLowering.h" 24 #include "llvm/CodeGen/TargetInstrInfo.h" 25 #include "llvm/IR/DebugInfoMetadata.h" 26 #include "llvm/Support/ErrorHandling.h" 27 28 #define GET_REGINFO_TARGET_DESC 29 #include "RISCVGenRegisterInfo.inc" 30 31 using namespace llvm; 32 33 static cl::opt<bool> 34 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, 35 cl::init(false), 36 cl::desc("Disable two address hints for register " 37 "allocation")); 38 39 static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive"); 40 static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive"); 41 static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive"); 42 static_assert(RISCV::F31_H == RISCV::F0_H + 31, 43 "Register list not consecutive"); 44 static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive"); 45 static_assert(RISCV::F31_F == RISCV::F0_F + 31, 46 "Register list not consecutive"); 47 static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive"); 48 static_assert(RISCV::F31_D == RISCV::F0_D + 31, 49 "Register list not consecutive"); 50 static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive"); 51 static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive"); 52 53 RISCVRegisterInfo::RISCVRegisterInfo(unsigned HwMode) 54 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0, 55 /*PC*/0, HwMode) {} 56 57 const MCPhysReg * 58 RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 59 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>(); 60 if (MF->getFunction().getCallingConv() == CallingConv::GHC) 61 return CSR_NoRegs_SaveList; 62 if (MF->getFunction().hasFnAttribute("interrupt")) { 63 if (Subtarget.hasStdExtD()) 64 return CSR_XLEN_F64_Interrupt_SaveList; 65 if (Subtarget.hasStdExtF()) 66 return CSR_XLEN_F32_Interrupt_SaveList; 67 return CSR_Interrupt_SaveList; 68 } 69 70 switch (Subtarget.getTargetABI()) { 71 default: 72 llvm_unreachable("Unrecognized ABI"); 73 case RISCVABI::ABI_ILP32: 74 case RISCVABI::ABI_LP64: 75 return CSR_ILP32_LP64_SaveList; 76 case RISCVABI::ABI_ILP32F: 77 case RISCVABI::ABI_LP64F: 78 return CSR_ILP32F_LP64F_SaveList; 79 case RISCVABI::ABI_ILP32D: 80 case RISCVABI::ABI_LP64D: 81 return CSR_ILP32D_LP64D_SaveList; 82 } 83 } 84 85 BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const { 86 const RISCVFrameLowering *TFI = getFrameLowering(MF); 87 BitVector Reserved(getNumRegs()); 88 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>(); 89 90 // Mark any registers requested to be reserved as such 91 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) { 92 if (Subtarget.isRegisterReservedByUser(Reg)) 93 markSuperRegs(Reserved, Reg); 94 } 95 96 // Use markSuperRegs to ensure any register aliases are also reserved 97 markSuperRegs(Reserved, RISCV::X0); // zero 98 markSuperRegs(Reserved, RISCV::X2); // sp 99 markSuperRegs(Reserved, RISCV::X3); // gp 100 markSuperRegs(Reserved, RISCV::X4); // tp 101 if (TFI->hasFP(MF)) 102 markSuperRegs(Reserved, RISCV::X8); // fp 103 // Reserve the base register if we need to realign the stack and allocate 104 // variable-sized objects at runtime. 105 if (TFI->hasBP(MF)) 106 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp 107 108 // Additionally reserve dummy register used to form the register pair 109 // beginning with 'x0' for instructions that take register pairs. 110 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0); 111 112 // V registers for code generation. We handle them manually. 113 markSuperRegs(Reserved, RISCV::VL); 114 markSuperRegs(Reserved, RISCV::VTYPE); 115 markSuperRegs(Reserved, RISCV::VXSAT); 116 markSuperRegs(Reserved, RISCV::VXRM); 117 markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant) 118 119 // Floating point environment registers. 120 markSuperRegs(Reserved, RISCV::FRM); 121 markSuperRegs(Reserved, RISCV::FFLAGS); 122 123 if (MF.getFunction().getCallingConv() == CallingConv::GRAAL) { 124 if (Subtarget.isRVE()) 125 report_fatal_error("Graal reserved registers do not exist in RVE"); 126 markSuperRegs(Reserved, RISCV::X23); 127 markSuperRegs(Reserved, RISCV::X27); 128 } 129 130 assert(checkAllSuperRegsMarked(Reserved)); 131 return Reserved; 132 } 133 134 bool RISCVRegisterInfo::isAsmClobberable(const MachineFunction &MF, 135 MCRegister PhysReg) const { 136 return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg); 137 } 138 139 const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const { 140 return CSR_NoRegs_RegMask; 141 } 142 143 // Frame indexes representing locations of CSRs which are given a fixed location 144 // by save/restore libcalls or Zcmp Push/Pop. 145 static const std::pair<unsigned, int> FixedCSRFIMap[] = { 146 {/*ra*/ RISCV::X1, -1}, 147 {/*s0*/ RISCV::X8, -2}, 148 {/*s1*/ RISCV::X9, -3}, 149 {/*s2*/ RISCV::X18, -4}, 150 {/*s3*/ RISCV::X19, -5}, 151 {/*s4*/ RISCV::X20, -6}, 152 {/*s5*/ RISCV::X21, -7}, 153 {/*s6*/ RISCV::X22, -8}, 154 {/*s7*/ RISCV::X23, -9}, 155 {/*s8*/ RISCV::X24, -10}, 156 {/*s9*/ RISCV::X25, -11}, 157 {/*s10*/ RISCV::X26, -12}, 158 {/*s11*/ RISCV::X27, -13} 159 }; 160 161 bool RISCVRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 162 Register Reg, 163 int &FrameIdx) const { 164 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 165 if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF)) 166 return false; 167 168 const auto *FII = 169 llvm::find_if(FixedCSRFIMap, [&](auto P) { return P.first == Reg; }); 170 if (FII == std::end(FixedCSRFIMap)) 171 return false; 172 173 FrameIdx = FII->second; 174 return true; 175 } 176 177 void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB, 178 MachineBasicBlock::iterator II, 179 const DebugLoc &DL, Register DestReg, 180 Register SrcReg, StackOffset Offset, 181 MachineInstr::MIFlag Flag, 182 MaybeAlign RequiredAlign) const { 183 184 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable()) 185 return; 186 187 MachineFunction &MF = *MBB.getParent(); 188 MachineRegisterInfo &MRI = MF.getRegInfo(); 189 const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>(); 190 const RISCVInstrInfo *TII = ST.getInstrInfo(); 191 192 bool KillSrcReg = false; 193 194 if (Offset.getScalable()) { 195 unsigned ScalableAdjOpc = RISCV::ADD; 196 int64_t ScalableValue = Offset.getScalable(); 197 if (ScalableValue < 0) { 198 ScalableValue = -ScalableValue; 199 ScalableAdjOpc = RISCV::SUB; 200 } 201 // Get vlenb and multiply vlen with the number of vector registers. 202 Register ScratchReg = DestReg; 203 if (DestReg == SrcReg) 204 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 205 TII->getVLENFactoredAmount(MF, MBB, II, DL, ScratchReg, ScalableValue, Flag); 206 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg) 207 .addReg(SrcReg).addReg(ScratchReg, RegState::Kill) 208 .setMIFlag(Flag); 209 SrcReg = DestReg; 210 KillSrcReg = true; 211 } 212 213 int64_t Val = Offset.getFixed(); 214 if (DestReg == SrcReg && Val == 0) 215 return; 216 217 const uint64_t Align = RequiredAlign.valueOrOne().value(); 218 219 if (isInt<12>(Val)) { 220 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg) 221 .addReg(SrcReg, getKillRegState(KillSrcReg)) 222 .addImm(Val) 223 .setMIFlag(Flag); 224 return; 225 } 226 227 // Try to split the offset across two ADDIs. We need to keep the intermediate 228 // result aligned after each ADDI. We need to determine the maximum value we 229 // can put in each ADDI. In the negative direction, we can use -2048 which is 230 // always sufficiently aligned. In the positive direction, we need to find the 231 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be 232 // created with LUI. 233 assert(Align < 2048 && "Required alignment too large"); 234 int64_t MaxPosAdjStep = 2048 - Align; 235 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) { 236 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep; 237 Val -= FirstAdj; 238 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg) 239 .addReg(SrcReg, getKillRegState(KillSrcReg)) 240 .addImm(FirstAdj) 241 .setMIFlag(Flag); 242 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg) 243 .addReg(DestReg, RegState::Kill) 244 .addImm(Val) 245 .setMIFlag(Flag); 246 return; 247 } 248 249 unsigned Opc = RISCV::ADD; 250 if (Val < 0) { 251 Val = -Val; 252 Opc = RISCV::SUB; 253 } 254 255 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 256 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag); 257 BuildMI(MBB, II, DL, TII->get(Opc), DestReg) 258 .addReg(SrcReg, getKillRegState(KillSrcReg)) 259 .addReg(ScratchReg, RegState::Kill) 260 .setMIFlag(Flag); 261 } 262 263 // Split a VSPILLx_Mx pseudo into multiple whole register stores separated by 264 // LMUL*VLENB bytes. 265 void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const { 266 DebugLoc DL = II->getDebugLoc(); 267 MachineBasicBlock &MBB = *II->getParent(); 268 MachineFunction &MF = *MBB.getParent(); 269 MachineRegisterInfo &MRI = MF.getRegInfo(); 270 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 271 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 272 273 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode()); 274 unsigned NF = ZvlssegInfo->first; 275 unsigned LMUL = ZvlssegInfo->second; 276 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations."); 277 unsigned Opcode, SubRegIdx; 278 switch (LMUL) { 279 default: 280 llvm_unreachable("LMUL must be 1, 2, or 4."); 281 case 1: 282 Opcode = RISCV::VS1R_V; 283 SubRegIdx = RISCV::sub_vrm1_0; 284 break; 285 case 2: 286 Opcode = RISCV::VS2R_V; 287 SubRegIdx = RISCV::sub_vrm2_0; 288 break; 289 case 4: 290 Opcode = RISCV::VS4R_V; 291 SubRegIdx = RISCV::sub_vrm4_0; 292 break; 293 } 294 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, 295 "Unexpected subreg numbering"); 296 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, 297 "Unexpected subreg numbering"); 298 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, 299 "Unexpected subreg numbering"); 300 301 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass); 302 // Optimize for constant VLEN. 303 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 304 if (STI.getRealMinVLen() == STI.getRealMaxVLen()) { 305 const int64_t VLENB = STI.getRealMinVLen() / 8; 306 int64_t Offset = VLENB * LMUL; 307 STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset); 308 } else { 309 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL); 310 uint32_t ShiftAmount = Log2_32(LMUL); 311 if (ShiftAmount != 0) 312 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL) 313 .addReg(VL) 314 .addImm(ShiftAmount); 315 } 316 317 Register SrcReg = II->getOperand(0).getReg(); 318 Register Base = II->getOperand(1).getReg(); 319 bool IsBaseKill = II->getOperand(1).isKill(); 320 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass); 321 for (unsigned I = 0; I < NF; ++I) { 322 // Adding implicit-use of super register to describe we are using part of 323 // super register, that prevents machine verifier complaining when part of 324 // subreg is undef, see comment in MachineVerifier::checkLiveness for more 325 // detail. 326 BuildMI(MBB, II, DL, TII->get(Opcode)) 327 .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I)) 328 .addReg(Base, getKillRegState(I == NF - 1)) 329 .addMemOperand(*(II->memoperands_begin())) 330 .addReg(SrcReg, RegState::Implicit); 331 if (I != NF - 1) 332 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase) 333 .addReg(Base, getKillRegState(I != 0 || IsBaseKill)) 334 .addReg(VL, getKillRegState(I == NF - 2)); 335 Base = NewBase; 336 } 337 II->eraseFromParent(); 338 } 339 340 // Split a VSPILLx_Mx pseudo into multiple whole register loads separated by 341 // LMUL*VLENB bytes. 342 void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const { 343 DebugLoc DL = II->getDebugLoc(); 344 MachineBasicBlock &MBB = *II->getParent(); 345 MachineFunction &MF = *MBB.getParent(); 346 MachineRegisterInfo &MRI = MF.getRegInfo(); 347 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 348 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 349 350 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode()); 351 unsigned NF = ZvlssegInfo->first; 352 unsigned LMUL = ZvlssegInfo->second; 353 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations."); 354 unsigned Opcode, SubRegIdx; 355 switch (LMUL) { 356 default: 357 llvm_unreachable("LMUL must be 1, 2, or 4."); 358 case 1: 359 Opcode = RISCV::VL1RE8_V; 360 SubRegIdx = RISCV::sub_vrm1_0; 361 break; 362 case 2: 363 Opcode = RISCV::VL2RE8_V; 364 SubRegIdx = RISCV::sub_vrm2_0; 365 break; 366 case 4: 367 Opcode = RISCV::VL4RE8_V; 368 SubRegIdx = RISCV::sub_vrm4_0; 369 break; 370 } 371 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, 372 "Unexpected subreg numbering"); 373 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, 374 "Unexpected subreg numbering"); 375 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, 376 "Unexpected subreg numbering"); 377 378 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass); 379 // Optimize for constant VLEN. 380 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 381 if (STI.getRealMinVLen() == STI.getRealMaxVLen()) { 382 const int64_t VLENB = STI.getRealMinVLen() / 8; 383 int64_t Offset = VLENB * LMUL; 384 STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset); 385 } else { 386 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL); 387 uint32_t ShiftAmount = Log2_32(LMUL); 388 if (ShiftAmount != 0) 389 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL) 390 .addReg(VL) 391 .addImm(ShiftAmount); 392 } 393 394 Register DestReg = II->getOperand(0).getReg(); 395 Register Base = II->getOperand(1).getReg(); 396 bool IsBaseKill = II->getOperand(1).isKill(); 397 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass); 398 for (unsigned I = 0; I < NF; ++I) { 399 BuildMI(MBB, II, DL, TII->get(Opcode), 400 TRI->getSubReg(DestReg, SubRegIdx + I)) 401 .addReg(Base, getKillRegState(I == NF - 1)) 402 .addMemOperand(*(II->memoperands_begin())); 403 if (I != NF - 1) 404 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase) 405 .addReg(Base, getKillRegState(I != 0 || IsBaseKill)) 406 .addReg(VL, getKillRegState(I == NF - 2)); 407 Base = NewBase; 408 } 409 II->eraseFromParent(); 410 } 411 412 bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 413 int SPAdj, unsigned FIOperandNum, 414 RegScavenger *RS) const { 415 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value"); 416 417 MachineInstr &MI = *II; 418 MachineFunction &MF = *MI.getParent()->getParent(); 419 MachineRegisterInfo &MRI = MF.getRegInfo(); 420 const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>(); 421 DebugLoc DL = MI.getDebugLoc(); 422 423 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 424 Register FrameReg; 425 StackOffset Offset = 426 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg); 427 bool IsRVVSpill = RISCV::isRVVSpill(MI); 428 if (!IsRVVSpill) 429 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm()); 430 431 if (Offset.getScalable() && 432 ST.getRealMinVLen() == ST.getRealMaxVLen()) { 433 // For an exact VLEN value, scalable offsets become constant and thus 434 // can be converted entirely into fixed offsets. 435 int64_t FixedValue = Offset.getFixed(); 436 int64_t ScalableValue = Offset.getScalable(); 437 assert(ScalableValue % 8 == 0 && 438 "Scalable offset is not a multiple of a single vector size."); 439 int64_t NumOfVReg = ScalableValue / 8; 440 int64_t VLENB = ST.getRealMinVLen() / 8; 441 Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB); 442 } 443 444 if (!isInt<32>(Offset.getFixed())) { 445 report_fatal_error( 446 "Frame offsets outside of the signed 32-bit range not supported"); 447 } 448 449 if (!IsRVVSpill) { 450 if (MI.getOpcode() == RISCV::ADDI && !isInt<12>(Offset.getFixed())) { 451 // We chose to emit the canonical immediate sequence rather than folding 452 // the offset into the using add under the theory that doing so doesn't 453 // save dynamic instruction count and some target may fuse the canonical 454 // 32 bit immediate sequence. We still need to clear the portion of the 455 // offset encoded in the immediate. 456 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0); 457 } else { 458 // We can encode an add with 12 bit signed immediate in the immediate 459 // operand of our user instruction. As a result, the remaining 460 // offset can by construction, at worst, a LUI and a ADD. 461 int64_t Val = Offset.getFixed(); 462 int64_t Lo12 = SignExtend64<12>(Val); 463 if ((MI.getOpcode() == RISCV::PREFETCH_I || 464 MI.getOpcode() == RISCV::PREFETCH_R || 465 MI.getOpcode() == RISCV::PREFETCH_W) && 466 (Lo12 & 0b11111) != 0) 467 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0); 468 else { 469 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12); 470 Offset = StackOffset::get((uint64_t)Val - (uint64_t)Lo12, 471 Offset.getScalable()); 472 } 473 } 474 } 475 476 if (Offset.getScalable() || Offset.getFixed()) { 477 Register DestReg; 478 if (MI.getOpcode() == RISCV::ADDI) 479 DestReg = MI.getOperand(0).getReg(); 480 else 481 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 482 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset, 483 MachineInstr::NoFlags, std::nullopt); 484 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false, 485 /*IsImp*/false, 486 /*IsKill*/true); 487 } else { 488 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false, 489 /*IsImp*/false, 490 /*IsKill*/false); 491 } 492 493 // If after materializing the adjustment, we have a pointless ADDI, remove it 494 if (MI.getOpcode() == RISCV::ADDI && 495 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() && 496 MI.getOperand(2).getImm() == 0) { 497 MI.eraseFromParent(); 498 return true; 499 } 500 501 // Handle spill/fill of synthetic register classes for segment operations to 502 // ensure correctness in the edge case one gets spilled. There are many 503 // possible optimizations here, but given the extreme rarity of such spills, 504 // we prefer simplicity of implementation for now. 505 switch (MI.getOpcode()) { 506 case RISCV::PseudoVSPILL2_M1: 507 case RISCV::PseudoVSPILL2_M2: 508 case RISCV::PseudoVSPILL2_M4: 509 case RISCV::PseudoVSPILL3_M1: 510 case RISCV::PseudoVSPILL3_M2: 511 case RISCV::PseudoVSPILL4_M1: 512 case RISCV::PseudoVSPILL4_M2: 513 case RISCV::PseudoVSPILL5_M1: 514 case RISCV::PseudoVSPILL6_M1: 515 case RISCV::PseudoVSPILL7_M1: 516 case RISCV::PseudoVSPILL8_M1: 517 lowerVSPILL(II); 518 return true; 519 case RISCV::PseudoVRELOAD2_M1: 520 case RISCV::PseudoVRELOAD2_M2: 521 case RISCV::PseudoVRELOAD2_M4: 522 case RISCV::PseudoVRELOAD3_M1: 523 case RISCV::PseudoVRELOAD3_M2: 524 case RISCV::PseudoVRELOAD4_M1: 525 case RISCV::PseudoVRELOAD4_M2: 526 case RISCV::PseudoVRELOAD5_M1: 527 case RISCV::PseudoVRELOAD6_M1: 528 case RISCV::PseudoVRELOAD7_M1: 529 case RISCV::PseudoVRELOAD8_M1: 530 lowerVRELOAD(II); 531 return true; 532 } 533 534 return false; 535 } 536 537 bool RISCVRegisterInfo::requiresVirtualBaseRegisters( 538 const MachineFunction &MF) const { 539 return true; 540 } 541 542 // Returns true if the instruction's frame index reference would be better 543 // served by a base register other than FP or SP. 544 // Used by LocalStackSlotAllocation pass to determine which frame index 545 // references it should create new base registers for. 546 bool RISCVRegisterInfo::needsFrameBaseReg(MachineInstr *MI, 547 int64_t Offset) const { 548 unsigned FIOperandNum = 0; 549 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++) 550 assert(FIOperandNum < MI->getNumOperands() && 551 "Instr doesn't have FrameIndex operand"); 552 553 // For RISC-V, The machine instructions that include a FrameIndex operand 554 // are load/store, ADDI instructions. 555 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags); 556 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS) 557 return false; 558 // We only generate virtual base registers for loads and stores, so 559 // return false for everything else. 560 if (!MI->mayLoad() && !MI->mayStore()) 561 return false; 562 563 const MachineFunction &MF = *MI->getMF(); 564 const MachineFrameInfo &MFI = MF.getFrameInfo(); 565 const RISCVFrameLowering *TFI = getFrameLowering(MF); 566 const MachineRegisterInfo &MRI = MF.getRegInfo(); 567 unsigned CalleeSavedSize = 0; 568 Offset += getFrameIndexInstrOffset(MI, FIOperandNum); 569 570 // Estimate the stack size used to store callee saved registers( 571 // excludes reserved registers). 572 BitVector ReservedRegs = getReservedRegs(MF); 573 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R; ++R) { 574 if (!ReservedRegs.test(Reg)) 575 CalleeSavedSize += getSpillSize(*getMinimalPhysRegClass(Reg)); 576 } 577 578 int64_t MaxFPOffset = Offset - CalleeSavedSize; 579 if (TFI->hasFP(MF) && !shouldRealignStack(MF)) 580 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset); 581 582 // Assume 128 bytes spill slots size to estimate the maximum possible 583 // offset relative to the stack pointer. 584 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a 585 // real one for RISC-V. 586 int64_t MaxSPOffset = Offset + 128; 587 MaxSPOffset += MFI.getLocalFrameSize(); 588 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset); 589 } 590 591 // Determine whether a given base register plus offset immediate is 592 // encodable to resolve a frame index. 593 bool RISCVRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, 594 Register BaseReg, 595 int64_t Offset) const { 596 unsigned FIOperandNum = 0; 597 while (!MI->getOperand(FIOperandNum).isFI()) { 598 FIOperandNum++; 599 assert(FIOperandNum < MI->getNumOperands() && 600 "Instr does not have a FrameIndex operand!"); 601 } 602 603 Offset += getFrameIndexInstrOffset(MI, FIOperandNum); 604 return isInt<12>(Offset); 605 } 606 607 // Insert defining instruction(s) for a pointer to FrameIdx before 608 // insertion point I. 609 // Return materialized frame pointer. 610 Register RISCVRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, 611 int FrameIdx, 612 int64_t Offset) const { 613 MachineBasicBlock::iterator MBBI = MBB->begin(); 614 DebugLoc DL; 615 if (MBBI != MBB->end()) 616 DL = MBBI->getDebugLoc(); 617 MachineFunction *MF = MBB->getParent(); 618 MachineRegisterInfo &MFI = MF->getRegInfo(); 619 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 620 621 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass); 622 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg) 623 .addFrameIndex(FrameIdx) 624 .addImm(Offset); 625 return BaseReg; 626 } 627 628 // Resolve a frame index operand of an instruction to reference the 629 // indicated base register plus offset instead. 630 void RISCVRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, 631 int64_t Offset) const { 632 unsigned FIOperandNum = 0; 633 while (!MI.getOperand(FIOperandNum).isFI()) { 634 FIOperandNum++; 635 assert(FIOperandNum < MI.getNumOperands() && 636 "Instr does not have a FrameIndex operand!"); 637 } 638 639 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum); 640 // FrameIndex Operands are always represented as a 641 // register followed by an immediate. 642 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false); 643 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 644 } 645 646 // Get the offset from the referenced frame index in the instruction, 647 // if there is one. 648 int64_t RISCVRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI, 649 int Idx) const { 650 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI || 651 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) && 652 "The MI must be I or S format."); 653 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a " 654 "FrameIndex operand"); 655 return MI->getOperand(Idx + 1).getImm(); 656 } 657 658 Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 659 const TargetFrameLowering *TFI = getFrameLowering(MF); 660 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2; 661 } 662 663 const uint32_t * 664 RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF, 665 CallingConv::ID CC) const { 666 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>(); 667 668 if (CC == CallingConv::GHC) 669 return CSR_NoRegs_RegMask; 670 switch (Subtarget.getTargetABI()) { 671 default: 672 llvm_unreachable("Unrecognized ABI"); 673 case RISCVABI::ABI_ILP32: 674 case RISCVABI::ABI_LP64: 675 return CSR_ILP32_LP64_RegMask; 676 case RISCVABI::ABI_ILP32F: 677 case RISCVABI::ABI_LP64F: 678 return CSR_ILP32F_LP64F_RegMask; 679 case RISCVABI::ABI_ILP32D: 680 case RISCVABI::ABI_LP64D: 681 return CSR_ILP32D_LP64D_RegMask; 682 } 683 } 684 685 const TargetRegisterClass * 686 RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 687 const MachineFunction &) const { 688 if (RC == &RISCV::VMV0RegClass) 689 return &RISCV::VRRegClass; 690 if (RC == &RISCV::VRNoV0RegClass) 691 return &RISCV::VRRegClass; 692 if (RC == &RISCV::VRM2NoV0RegClass) 693 return &RISCV::VRM2RegClass; 694 if (RC == &RISCV::VRM4NoV0RegClass) 695 return &RISCV::VRM4RegClass; 696 if (RC == &RISCV::VRM8NoV0RegClass) 697 return &RISCV::VRM8RegClass; 698 return RC; 699 } 700 701 void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset, 702 SmallVectorImpl<uint64_t> &Ops) const { 703 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8> 704 // to represent one vector register. The dwarf offset is 705 // VLENB * scalable_offset / 8. 706 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset"); 707 708 // Add fixed-sized offset using existing DIExpression interface. 709 DIExpression::appendOffset(Ops, Offset.getFixed()); 710 711 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true); 712 int64_t VLENBSized = Offset.getScalable() / 8; 713 if (VLENBSized > 0) { 714 Ops.push_back(dwarf::DW_OP_constu); 715 Ops.push_back(VLENBSized); 716 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL}); 717 Ops.push_back(dwarf::DW_OP_mul); 718 Ops.push_back(dwarf::DW_OP_plus); 719 } else if (VLENBSized < 0) { 720 Ops.push_back(dwarf::DW_OP_constu); 721 Ops.push_back(-VLENBSized); 722 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL}); 723 Ops.push_back(dwarf::DW_OP_mul); 724 Ops.push_back(dwarf::DW_OP_minus); 725 } 726 } 727 728 unsigned 729 RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const { 730 return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() ? 1 : 0; 731 } 732 733 // Add two address hints to improve chances of being able to use a compressed 734 // instruction. 735 bool RISCVRegisterInfo::getRegAllocationHints( 736 Register VirtReg, ArrayRef<MCPhysReg> Order, 737 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 738 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 739 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 740 741 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints( 742 VirtReg, Order, Hints, MF, VRM, Matrix); 743 744 if (!VRM || DisableRegAllocHints) 745 return BaseImplRetVal; 746 747 // Add any two address hints after any copy hints. 748 SmallSet<Register, 4> TwoAddrHints; 749 750 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO, 751 bool NeedGPRC) -> void { 752 Register Reg = MO.getReg(); 753 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg)); 754 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg))) { 755 assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!"); 756 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg)) 757 TwoAddrHints.insert(PhysReg); 758 } 759 }; 760 761 // This is all of the compressible binary instructions. If an instruction 762 // needs GPRC register class operands \p NeedGPRC will be set to true. 763 auto isCompressible = [](const MachineInstr &MI, bool &NeedGPRC) { 764 NeedGPRC = false; 765 switch (MI.getOpcode()) { 766 default: 767 return false; 768 case RISCV::AND: 769 case RISCV::OR: 770 case RISCV::XOR: 771 case RISCV::SUB: 772 case RISCV::ADDW: 773 case RISCV::SUBW: 774 NeedGPRC = true; 775 return true; 776 case RISCV::ANDI: 777 NeedGPRC = true; 778 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm()); 779 case RISCV::SRAI: 780 case RISCV::SRLI: 781 NeedGPRC = true; 782 return true; 783 case RISCV::ADD: 784 case RISCV::SLLI: 785 return true; 786 case RISCV::ADDI: 787 case RISCV::ADDIW: 788 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm()); 789 } 790 }; 791 792 // Returns true if this operand is compressible. For non-registers it always 793 // returns true. Immediate range was already checked in isCompressible. 794 // For registers, it checks if the register is a GPRC register. reg-reg 795 // instructions that require GPRC need all register operands to be GPRC. 796 auto isCompressibleOpnd = [&](const MachineOperand &MO) { 797 if (!MO.isReg()) 798 return true; 799 Register Reg = MO.getReg(); 800 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg)); 801 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg); 802 }; 803 804 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) { 805 const MachineInstr &MI = *MO.getParent(); 806 unsigned OpIdx = MO.getOperandNo(); 807 bool NeedGPRC; 808 if (isCompressible(MI, NeedGPRC)) { 809 if (OpIdx == 0 && MI.getOperand(1).isReg()) { 810 if (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2))) 811 tryAddHint(MO, MI.getOperand(1), NeedGPRC); 812 if (MI.isCommutable() && MI.getOperand(2).isReg() && 813 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) 814 tryAddHint(MO, MI.getOperand(2), NeedGPRC); 815 } else if (OpIdx == 1 && 816 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))) { 817 tryAddHint(MO, MI.getOperand(0), NeedGPRC); 818 } else if (MI.isCommutable() && OpIdx == 2 && 819 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) { 820 tryAddHint(MO, MI.getOperand(0), NeedGPRC); 821 } 822 } 823 } 824 825 for (MCPhysReg OrderReg : Order) 826 if (TwoAddrHints.count(OrderReg)) 827 Hints.push_back(OrderReg); 828 829 return BaseImplRetVal; 830 } 831