xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISC-V implementation of the TargetRegisterInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVRegisterInfo.h"
14 #include "RISCV.h"
15 #include "RISCVMachineFunctionInfo.h"
16 #include "RISCVSubtarget.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/BinaryFormat/Dwarf.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/RegisterScavenging.h"
23 #include "llvm/CodeGen/TargetFrameLowering.h"
24 #include "llvm/CodeGen/TargetInstrInfo.h"
25 #include "llvm/IR/DebugInfoMetadata.h"
26 #include "llvm/Support/ErrorHandling.h"
27 
28 #define GET_REGINFO_TARGET_DESC
29 #include "RISCVGenRegisterInfo.inc"
30 
31 using namespace llvm;
32 
33 static cl::opt<bool>
34     DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
35                          cl::init(false),
36                          cl::desc("Disable two address hints for register "
37                                   "allocation"));
38 
39 static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
40 static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
41 static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
42 static_assert(RISCV::F31_H == RISCV::F0_H + 31,
43               "Register list not consecutive");
44 static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
45 static_assert(RISCV::F31_F == RISCV::F0_F + 31,
46               "Register list not consecutive");
47 static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
48 static_assert(RISCV::F31_D == RISCV::F0_D + 31,
49               "Register list not consecutive");
50 static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
51 static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
52 
53 RISCVRegisterInfo::RISCVRegisterInfo(unsigned HwMode)
54     : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
55                            /*PC*/0, HwMode) {}
56 
57 const MCPhysReg *
58 RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
59   auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
60   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
61     return CSR_NoRegs_SaveList;
62   if (MF->getFunction().hasFnAttribute("interrupt")) {
63     if (Subtarget.hasStdExtD())
64       return CSR_XLEN_F64_Interrupt_SaveList;
65     if (Subtarget.hasStdExtF())
66       return CSR_XLEN_F32_Interrupt_SaveList;
67     return CSR_Interrupt_SaveList;
68   }
69 
70   switch (Subtarget.getTargetABI()) {
71   default:
72     llvm_unreachable("Unrecognized ABI");
73   case RISCVABI::ABI_ILP32:
74   case RISCVABI::ABI_LP64:
75     return CSR_ILP32_LP64_SaveList;
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_LP64F:
78     return CSR_ILP32F_LP64F_SaveList;
79   case RISCVABI::ABI_ILP32D:
80   case RISCVABI::ABI_LP64D:
81     return CSR_ILP32D_LP64D_SaveList;
82   }
83 }
84 
85 BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
86   const RISCVFrameLowering *TFI = getFrameLowering(MF);
87   BitVector Reserved(getNumRegs());
88   auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
89 
90   // Mark any registers requested to be reserved as such
91   for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
92     if (Subtarget.isRegisterReservedByUser(Reg))
93       markSuperRegs(Reserved, Reg);
94   }
95 
96   // Use markSuperRegs to ensure any register aliases are also reserved
97   markSuperRegs(Reserved, RISCV::X0); // zero
98   markSuperRegs(Reserved, RISCV::X2); // sp
99   markSuperRegs(Reserved, RISCV::X3); // gp
100   markSuperRegs(Reserved, RISCV::X4); // tp
101   if (TFI->hasFP(MF))
102     markSuperRegs(Reserved, RISCV::X8); // fp
103   // Reserve the base register if we need to realign the stack and allocate
104   // variable-sized objects at runtime.
105   if (TFI->hasBP(MF))
106     markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
107 
108   // Additionally reserve dummy register used to form the register pair
109   // beginning with 'x0' for instructions that take register pairs.
110   markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
111 
112   // V registers for code generation. We handle them manually.
113   markSuperRegs(Reserved, RISCV::VL);
114   markSuperRegs(Reserved, RISCV::VTYPE);
115   markSuperRegs(Reserved, RISCV::VXSAT);
116   markSuperRegs(Reserved, RISCV::VXRM);
117   markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant)
118 
119   // Floating point environment registers.
120   markSuperRegs(Reserved, RISCV::FRM);
121   markSuperRegs(Reserved, RISCV::FFLAGS);
122 
123   if (MF.getFunction().getCallingConv() == CallingConv::GRAAL) {
124     if (Subtarget.isRVE())
125       report_fatal_error("Graal reserved registers do not exist in RVE");
126     markSuperRegs(Reserved, RISCV::X23);
127     markSuperRegs(Reserved, RISCV::X27);
128   }
129 
130   // Shadow stack pointer.
131   markSuperRegs(Reserved, RISCV::SSP);
132 
133   assert(checkAllSuperRegsMarked(Reserved));
134   return Reserved;
135 }
136 
137 bool RISCVRegisterInfo::isAsmClobberable(const MachineFunction &MF,
138                                          MCRegister PhysReg) const {
139   return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
140 }
141 
142 const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const {
143   return CSR_NoRegs_RegMask;
144 }
145 
146 // Frame indexes representing locations of CSRs which are given a fixed location
147 // by save/restore libcalls or Zcmp Push/Pop.
148 static const std::pair<unsigned, int> FixedCSRFIMap[] = {
149   {/*ra*/  RISCV::X1,   -1},
150   {/*s0*/  RISCV::X8,   -2},
151   {/*s1*/  RISCV::X9,   -3},
152   {/*s2*/  RISCV::X18,  -4},
153   {/*s3*/  RISCV::X19,  -5},
154   {/*s4*/  RISCV::X20,  -6},
155   {/*s5*/  RISCV::X21,  -7},
156   {/*s6*/  RISCV::X22,  -8},
157   {/*s7*/  RISCV::X23,  -9},
158   {/*s8*/  RISCV::X24,  -10},
159   {/*s9*/  RISCV::X25,  -11},
160   {/*s10*/ RISCV::X26,  -12},
161   {/*s11*/ RISCV::X27,  -13}
162 };
163 
164 bool RISCVRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
165                                              Register Reg,
166                                              int &FrameIdx) const {
167   const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
168   if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF))
169     return false;
170 
171   const auto *FII =
172       llvm::find_if(FixedCSRFIMap, [&](auto P) { return P.first == Reg; });
173   if (FII == std::end(FixedCSRFIMap))
174     return false;
175 
176   FrameIdx = FII->second;
177   return true;
178 }
179 
180 void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB,
181                                   MachineBasicBlock::iterator II,
182                                   const DebugLoc &DL, Register DestReg,
183                                   Register SrcReg, StackOffset Offset,
184                                   MachineInstr::MIFlag Flag,
185                                   MaybeAlign RequiredAlign) const {
186 
187   if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
188     return;
189 
190   MachineFunction &MF = *MBB.getParent();
191   MachineRegisterInfo &MRI = MF.getRegInfo();
192   const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
193   const RISCVInstrInfo *TII = ST.getInstrInfo();
194 
195   bool KillSrcReg = false;
196 
197   if (Offset.getScalable()) {
198     unsigned ScalableAdjOpc = RISCV::ADD;
199     int64_t ScalableValue = Offset.getScalable();
200     if (ScalableValue < 0) {
201       ScalableValue = -ScalableValue;
202       ScalableAdjOpc = RISCV::SUB;
203     }
204     // Get vlenb and multiply vlen with the number of vector registers.
205     Register ScratchReg = DestReg;
206     if (DestReg == SrcReg)
207       ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
208     TII->getVLENFactoredAmount(MF, MBB, II, DL, ScratchReg, ScalableValue, Flag);
209     BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
210       .addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
211       .setMIFlag(Flag);
212     SrcReg = DestReg;
213     KillSrcReg = true;
214   }
215 
216   int64_t Val = Offset.getFixed();
217   if (DestReg == SrcReg && Val == 0)
218     return;
219 
220   const uint64_t Align = RequiredAlign.valueOrOne().value();
221 
222   if (isInt<12>(Val)) {
223     BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
224         .addReg(SrcReg, getKillRegState(KillSrcReg))
225         .addImm(Val)
226         .setMIFlag(Flag);
227     return;
228   }
229 
230   // Try to split the offset across two ADDIs. We need to keep the intermediate
231   // result aligned after each ADDI.  We need to determine the maximum value we
232   // can put in each ADDI. In the negative direction, we can use -2048 which is
233   // always sufficiently aligned. In the positive direction, we need to find the
234   // largest 12-bit immediate that is aligned.  Exclude -4096 since it can be
235   // created with LUI.
236   assert(Align < 2048 && "Required alignment too large");
237   int64_t MaxPosAdjStep = 2048 - Align;
238   if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
239     int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
240     Val -= FirstAdj;
241     BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
242         .addReg(SrcReg, getKillRegState(KillSrcReg))
243         .addImm(FirstAdj)
244         .setMIFlag(Flag);
245     BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
246         .addReg(DestReg, RegState::Kill)
247         .addImm(Val)
248         .setMIFlag(Flag);
249     return;
250   }
251 
252   unsigned Opc = RISCV::ADD;
253   if (Val < 0) {
254     Val = -Val;
255     Opc = RISCV::SUB;
256   }
257 
258   Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
259   TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
260   BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
261       .addReg(SrcReg, getKillRegState(KillSrcReg))
262       .addReg(ScratchReg, RegState::Kill)
263       .setMIFlag(Flag);
264 }
265 
266 // Split a VSPILLx_Mx pseudo into multiple whole register stores separated by
267 // LMUL*VLENB bytes.
268 void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const {
269   DebugLoc DL = II->getDebugLoc();
270   MachineBasicBlock &MBB = *II->getParent();
271   MachineFunction &MF = *MBB.getParent();
272   MachineRegisterInfo &MRI = MF.getRegInfo();
273   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
274   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
275 
276   auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
277   unsigned NF = ZvlssegInfo->first;
278   unsigned LMUL = ZvlssegInfo->second;
279   assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
280   unsigned Opcode, SubRegIdx;
281   switch (LMUL) {
282   default:
283     llvm_unreachable("LMUL must be 1, 2, or 4.");
284   case 1:
285     Opcode = RISCV::VS1R_V;
286     SubRegIdx = RISCV::sub_vrm1_0;
287     break;
288   case 2:
289     Opcode = RISCV::VS2R_V;
290     SubRegIdx = RISCV::sub_vrm2_0;
291     break;
292   case 4:
293     Opcode = RISCV::VS4R_V;
294     SubRegIdx = RISCV::sub_vrm4_0;
295     break;
296   }
297   static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
298                 "Unexpected subreg numbering");
299   static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
300                 "Unexpected subreg numbering");
301   static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
302                 "Unexpected subreg numbering");
303 
304   Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
305   // Optimize for constant VLEN.
306   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
307   if (STI.getRealMinVLen() == STI.getRealMaxVLen()) {
308     const int64_t VLENB = STI.getRealMinVLen() / 8;
309     int64_t Offset = VLENB * LMUL;
310     STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
311   } else {
312     BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
313     uint32_t ShiftAmount = Log2_32(LMUL);
314     if (ShiftAmount != 0)
315       BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
316           .addReg(VL)
317           .addImm(ShiftAmount);
318   }
319 
320   Register SrcReg = II->getOperand(0).getReg();
321   Register Base = II->getOperand(1).getReg();
322   bool IsBaseKill = II->getOperand(1).isKill();
323   Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
324   for (unsigned I = 0; I < NF; ++I) {
325     // Adding implicit-use of super register to describe we are using part of
326     // super register, that prevents machine verifier complaining when part of
327     // subreg is undef, see comment in MachineVerifier::checkLiveness for more
328     // detail.
329     BuildMI(MBB, II, DL, TII->get(Opcode))
330         .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I))
331         .addReg(Base, getKillRegState(I == NF - 1))
332         .addMemOperand(*(II->memoperands_begin()))
333         .addReg(SrcReg, RegState::Implicit);
334     if (I != NF - 1)
335       BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
336           .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
337           .addReg(VL, getKillRegState(I == NF - 2));
338     Base = NewBase;
339   }
340   II->eraseFromParent();
341 }
342 
343 // Split a VSPILLx_Mx pseudo into multiple whole register loads separated by
344 // LMUL*VLENB bytes.
345 void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const {
346   DebugLoc DL = II->getDebugLoc();
347   MachineBasicBlock &MBB = *II->getParent();
348   MachineFunction &MF = *MBB.getParent();
349   MachineRegisterInfo &MRI = MF.getRegInfo();
350   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
351   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
352 
353   auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
354   unsigned NF = ZvlssegInfo->first;
355   unsigned LMUL = ZvlssegInfo->second;
356   assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
357   unsigned Opcode, SubRegIdx;
358   switch (LMUL) {
359   default:
360     llvm_unreachable("LMUL must be 1, 2, or 4.");
361   case 1:
362     Opcode = RISCV::VL1RE8_V;
363     SubRegIdx = RISCV::sub_vrm1_0;
364     break;
365   case 2:
366     Opcode = RISCV::VL2RE8_V;
367     SubRegIdx = RISCV::sub_vrm2_0;
368     break;
369   case 4:
370     Opcode = RISCV::VL4RE8_V;
371     SubRegIdx = RISCV::sub_vrm4_0;
372     break;
373   }
374   static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
375                 "Unexpected subreg numbering");
376   static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
377                 "Unexpected subreg numbering");
378   static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
379                 "Unexpected subreg numbering");
380 
381   Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
382   // Optimize for constant VLEN.
383   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
384   if (STI.getRealMinVLen() == STI.getRealMaxVLen()) {
385     const int64_t VLENB = STI.getRealMinVLen() / 8;
386     int64_t Offset = VLENB * LMUL;
387     STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
388   } else {
389     BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
390     uint32_t ShiftAmount = Log2_32(LMUL);
391     if (ShiftAmount != 0)
392       BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
393           .addReg(VL)
394           .addImm(ShiftAmount);
395   }
396 
397   Register DestReg = II->getOperand(0).getReg();
398   Register Base = II->getOperand(1).getReg();
399   bool IsBaseKill = II->getOperand(1).isKill();
400   Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
401   for (unsigned I = 0; I < NF; ++I) {
402     BuildMI(MBB, II, DL, TII->get(Opcode),
403             TRI->getSubReg(DestReg, SubRegIdx + I))
404         .addReg(Base, getKillRegState(I == NF - 1))
405         .addMemOperand(*(II->memoperands_begin()));
406     if (I != NF - 1)
407       BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
408           .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
409           .addReg(VL, getKillRegState(I == NF - 2));
410     Base = NewBase;
411   }
412   II->eraseFromParent();
413 }
414 
415 bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
416                                             int SPAdj, unsigned FIOperandNum,
417                                             RegScavenger *RS) const {
418   assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
419 
420   MachineInstr &MI = *II;
421   MachineFunction &MF = *MI.getParent()->getParent();
422   MachineRegisterInfo &MRI = MF.getRegInfo();
423   const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
424   DebugLoc DL = MI.getDebugLoc();
425 
426   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
427   Register FrameReg;
428   StackOffset Offset =
429       getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
430   bool IsRVVSpill = RISCV::isRVVSpill(MI);
431   if (!IsRVVSpill)
432     Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
433 
434   if (Offset.getScalable() &&
435       ST.getRealMinVLen() == ST.getRealMaxVLen()) {
436     // For an exact VLEN value, scalable offsets become constant and thus
437     // can be converted entirely into fixed offsets.
438     int64_t FixedValue = Offset.getFixed();
439     int64_t ScalableValue = Offset.getScalable();
440     assert(ScalableValue % 8 == 0 &&
441            "Scalable offset is not a multiple of a single vector size.");
442     int64_t NumOfVReg = ScalableValue / 8;
443     int64_t VLENB = ST.getRealMinVLen() / 8;
444     Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB);
445   }
446 
447   if (!isInt<32>(Offset.getFixed())) {
448     report_fatal_error(
449         "Frame offsets outside of the signed 32-bit range not supported");
450   }
451 
452   if (!IsRVVSpill) {
453     if (MI.getOpcode() == RISCV::ADDI && !isInt<12>(Offset.getFixed())) {
454       // We chose to emit the canonical immediate sequence rather than folding
455       // the offset into the using add under the theory that doing so doesn't
456       // save dynamic instruction count and some target may fuse the canonical
457       // 32 bit immediate sequence.  We still need to clear the portion of the
458       // offset encoded in the immediate.
459       MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
460     } else {
461       // We can encode an add with 12 bit signed immediate in the immediate
462       // operand of our user instruction.  As a result, the remaining
463       // offset can by construction, at worst, a LUI and a ADD.
464       int64_t Val = Offset.getFixed();
465       int64_t Lo12 = SignExtend64<12>(Val);
466       if ((MI.getOpcode() == RISCV::PREFETCH_I ||
467            MI.getOpcode() == RISCV::PREFETCH_R ||
468            MI.getOpcode() == RISCV::PREFETCH_W) &&
469           (Lo12 & 0b11111) != 0)
470         MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
471       else {
472         MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
473         Offset = StackOffset::get((uint64_t)Val - (uint64_t)Lo12,
474                                   Offset.getScalable());
475       }
476     }
477   }
478 
479   if (Offset.getScalable() || Offset.getFixed()) {
480     Register DestReg;
481     if (MI.getOpcode() == RISCV::ADDI)
482       DestReg = MI.getOperand(0).getReg();
483     else
484       DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
485     adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
486               MachineInstr::NoFlags, std::nullopt);
487     MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
488                                                  /*IsImp*/false,
489                                                  /*IsKill*/true);
490   } else {
491     MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
492                                                  /*IsImp*/false,
493                                                  /*IsKill*/false);
494   }
495 
496   // If after materializing the adjustment, we have a pointless ADDI, remove it
497   if (MI.getOpcode() == RISCV::ADDI &&
498       MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
499       MI.getOperand(2).getImm() == 0) {
500     MI.eraseFromParent();
501     return true;
502   }
503 
504   // Handle spill/fill of synthetic register classes for segment operations to
505   // ensure correctness in the edge case one gets spilled. There are many
506   // possible optimizations here, but given the extreme rarity of such spills,
507   // we prefer simplicity of implementation for now.
508   switch (MI.getOpcode()) {
509   case RISCV::PseudoVSPILL2_M1:
510   case RISCV::PseudoVSPILL2_M2:
511   case RISCV::PseudoVSPILL2_M4:
512   case RISCV::PseudoVSPILL3_M1:
513   case RISCV::PseudoVSPILL3_M2:
514   case RISCV::PseudoVSPILL4_M1:
515   case RISCV::PseudoVSPILL4_M2:
516   case RISCV::PseudoVSPILL5_M1:
517   case RISCV::PseudoVSPILL6_M1:
518   case RISCV::PseudoVSPILL7_M1:
519   case RISCV::PseudoVSPILL8_M1:
520     lowerVSPILL(II);
521     return true;
522   case RISCV::PseudoVRELOAD2_M1:
523   case RISCV::PseudoVRELOAD2_M2:
524   case RISCV::PseudoVRELOAD2_M4:
525   case RISCV::PseudoVRELOAD3_M1:
526   case RISCV::PseudoVRELOAD3_M2:
527   case RISCV::PseudoVRELOAD4_M1:
528   case RISCV::PseudoVRELOAD4_M2:
529   case RISCV::PseudoVRELOAD5_M1:
530   case RISCV::PseudoVRELOAD6_M1:
531   case RISCV::PseudoVRELOAD7_M1:
532   case RISCV::PseudoVRELOAD8_M1:
533     lowerVRELOAD(II);
534     return true;
535   }
536 
537   return false;
538 }
539 
540 bool RISCVRegisterInfo::requiresVirtualBaseRegisters(
541     const MachineFunction &MF) const {
542   return true;
543 }
544 
545 // Returns true if the instruction's frame index reference would be better
546 // served by a base register other than FP or SP.
547 // Used by LocalStackSlotAllocation pass to determine which frame index
548 // references it should create new base registers for.
549 bool RISCVRegisterInfo::needsFrameBaseReg(MachineInstr *MI,
550                                           int64_t Offset) const {
551   unsigned FIOperandNum = 0;
552   for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
553     assert(FIOperandNum < MI->getNumOperands() &&
554            "Instr doesn't have FrameIndex operand");
555 
556   // For RISC-V, The machine instructions that include a FrameIndex operand
557   // are load/store, ADDI instructions.
558   unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
559   if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
560     return false;
561   // We only generate virtual base registers for loads and stores, so
562   // return false for everything else.
563   if (!MI->mayLoad() && !MI->mayStore())
564     return false;
565 
566   const MachineFunction &MF = *MI->getMF();
567   const MachineFrameInfo &MFI = MF.getFrameInfo();
568   const RISCVFrameLowering *TFI = getFrameLowering(MF);
569   const MachineRegisterInfo &MRI = MF.getRegInfo();
570   unsigned CalleeSavedSize = 0;
571   Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
572 
573   // Estimate the stack size used to store callee saved registers(
574   // excludes reserved registers).
575   BitVector ReservedRegs = getReservedRegs(MF);
576   for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R; ++R) {
577     if (!ReservedRegs.test(Reg))
578       CalleeSavedSize += getSpillSize(*getMinimalPhysRegClass(Reg));
579   }
580 
581   int64_t MaxFPOffset = Offset - CalleeSavedSize;
582   if (TFI->hasFP(MF) && !shouldRealignStack(MF))
583     return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
584 
585   // Assume 128 bytes spill slots size to estimate the maximum possible
586   // offset relative to the stack pointer.
587   // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
588   // real one for RISC-V.
589   int64_t MaxSPOffset = Offset + 128;
590   MaxSPOffset += MFI.getLocalFrameSize();
591   return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
592 }
593 
594 // Determine whether a given base register plus offset immediate is
595 // encodable to resolve a frame index.
596 bool RISCVRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
597                                            Register BaseReg,
598                                            int64_t Offset) const {
599   unsigned FIOperandNum = 0;
600   while (!MI->getOperand(FIOperandNum).isFI()) {
601     FIOperandNum++;
602     assert(FIOperandNum < MI->getNumOperands() &&
603            "Instr does not have a FrameIndex operand!");
604   }
605 
606   Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
607   return isInt<12>(Offset);
608 }
609 
610 // Insert defining instruction(s) for a pointer to FrameIdx before
611 // insertion point I.
612 // Return materialized frame pointer.
613 Register RISCVRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
614                                                          int FrameIdx,
615                                                          int64_t Offset) const {
616   MachineBasicBlock::iterator MBBI = MBB->begin();
617   DebugLoc DL;
618   if (MBBI != MBB->end())
619     DL = MBBI->getDebugLoc();
620   MachineFunction *MF = MBB->getParent();
621   MachineRegisterInfo &MFI = MF->getRegInfo();
622   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
623 
624   Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
625   BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
626       .addFrameIndex(FrameIdx)
627       .addImm(Offset);
628   return BaseReg;
629 }
630 
631 // Resolve a frame index operand of an instruction to reference the
632 // indicated base register plus offset instead.
633 void RISCVRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
634                                           int64_t Offset) const {
635   unsigned FIOperandNum = 0;
636   while (!MI.getOperand(FIOperandNum).isFI()) {
637     FIOperandNum++;
638     assert(FIOperandNum < MI.getNumOperands() &&
639            "Instr does not have a FrameIndex operand!");
640   }
641 
642   Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
643   // FrameIndex Operands are always represented as a
644   // register followed by an immediate.
645   MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
646   MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
647 }
648 
649 // Get the offset from the referenced frame index in the instruction,
650 // if there is one.
651 int64_t RISCVRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
652                                                     int Idx) const {
653   assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
654           RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
655          "The MI must be I or S format.");
656   assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
657                                        "FrameIndex operand");
658   return MI->getOperand(Idx + 1).getImm();
659 }
660 
661 Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
662   const TargetFrameLowering *TFI = getFrameLowering(MF);
663   return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
664 }
665 
666 const uint32_t *
667 RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
668                                         CallingConv::ID CC) const {
669   auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
670 
671   if (CC == CallingConv::GHC)
672     return CSR_NoRegs_RegMask;
673   switch (Subtarget.getTargetABI()) {
674   default:
675     llvm_unreachable("Unrecognized ABI");
676   case RISCVABI::ABI_ILP32:
677   case RISCVABI::ABI_LP64:
678     return CSR_ILP32_LP64_RegMask;
679   case RISCVABI::ABI_ILP32F:
680   case RISCVABI::ABI_LP64F:
681     return CSR_ILP32F_LP64F_RegMask;
682   case RISCVABI::ABI_ILP32D:
683   case RISCVABI::ABI_LP64D:
684     return CSR_ILP32D_LP64D_RegMask;
685   }
686 }
687 
688 const TargetRegisterClass *
689 RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
690                                              const MachineFunction &) const {
691   if (RC == &RISCV::VMV0RegClass)
692     return &RISCV::VRRegClass;
693   if (RC == &RISCV::VRNoV0RegClass)
694     return &RISCV::VRRegClass;
695   if (RC == &RISCV::VRM2NoV0RegClass)
696     return &RISCV::VRM2RegClass;
697   if (RC == &RISCV::VRM4NoV0RegClass)
698     return &RISCV::VRM4RegClass;
699   if (RC == &RISCV::VRM8NoV0RegClass)
700     return &RISCV::VRM8RegClass;
701   return RC;
702 }
703 
704 void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
705                                          SmallVectorImpl<uint64_t> &Ops) const {
706   // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
707   // to represent one vector register. The dwarf offset is
708   // VLENB * scalable_offset / 8.
709   assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
710 
711   // Add fixed-sized offset using existing DIExpression interface.
712   DIExpression::appendOffset(Ops, Offset.getFixed());
713 
714   unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
715   int64_t VLENBSized = Offset.getScalable() / 8;
716   if (VLENBSized > 0) {
717     Ops.push_back(dwarf::DW_OP_constu);
718     Ops.push_back(VLENBSized);
719     Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
720     Ops.push_back(dwarf::DW_OP_mul);
721     Ops.push_back(dwarf::DW_OP_plus);
722   } else if (VLENBSized < 0) {
723     Ops.push_back(dwarf::DW_OP_constu);
724     Ops.push_back(-VLENBSized);
725     Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
726     Ops.push_back(dwarf::DW_OP_mul);
727     Ops.push_back(dwarf::DW_OP_minus);
728   }
729 }
730 
731 unsigned
732 RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const {
733   return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() ? 1 : 0;
734 }
735 
736 // Add two address hints to improve chances of being able to use a compressed
737 // instruction.
738 bool RISCVRegisterInfo::getRegAllocationHints(
739     Register VirtReg, ArrayRef<MCPhysReg> Order,
740     SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
741     const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
742   const MachineRegisterInfo *MRI = &MF.getRegInfo();
743 
744   bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
745       VirtReg, Order, Hints, MF, VRM, Matrix);
746 
747   if (!VRM || DisableRegAllocHints)
748     return BaseImplRetVal;
749 
750   // Add any two address hints after any copy hints.
751   SmallSet<Register, 4> TwoAddrHints;
752 
753   auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
754                         bool NeedGPRC) -> void {
755     Register Reg = MO.getReg();
756     Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
757     if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg))) {
758       assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!");
759       if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
760         TwoAddrHints.insert(PhysReg);
761     }
762   };
763 
764   // This is all of the compressible binary instructions. If an instruction
765   // needs GPRC register class operands \p NeedGPRC will be set to true.
766   auto isCompressible = [](const MachineInstr &MI, bool &NeedGPRC) {
767     NeedGPRC = false;
768     switch (MI.getOpcode()) {
769     default:
770       return false;
771     case RISCV::AND:
772     case RISCV::OR:
773     case RISCV::XOR:
774     case RISCV::SUB:
775     case RISCV::ADDW:
776     case RISCV::SUBW:
777       NeedGPRC = true;
778       return true;
779     case RISCV::ANDI:
780       NeedGPRC = true;
781       return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
782     case RISCV::SRAI:
783     case RISCV::SRLI:
784       NeedGPRC = true;
785       return true;
786     case RISCV::ADD:
787     case RISCV::SLLI:
788       return true;
789     case RISCV::ADDI:
790     case RISCV::ADDIW:
791       return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
792     }
793   };
794 
795   // Returns true if this operand is compressible. For non-registers it always
796   // returns true. Immediate range was already checked in isCompressible.
797   // For registers, it checks if the register is a GPRC register. reg-reg
798   // instructions that require GPRC need all register operands to be GPRC.
799   auto isCompressibleOpnd = [&](const MachineOperand &MO) {
800     if (!MO.isReg())
801       return true;
802     Register Reg = MO.getReg();
803     Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
804     return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
805   };
806 
807   for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
808     const MachineInstr &MI = *MO.getParent();
809     unsigned OpIdx = MO.getOperandNo();
810     bool NeedGPRC;
811     if (isCompressible(MI, NeedGPRC)) {
812       if (OpIdx == 0 && MI.getOperand(1).isReg()) {
813         if (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))
814           tryAddHint(MO, MI.getOperand(1), NeedGPRC);
815         if (MI.isCommutable() && MI.getOperand(2).isReg() &&
816             (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
817           tryAddHint(MO, MI.getOperand(2), NeedGPRC);
818       } else if (OpIdx == 1 &&
819                  (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))) {
820         tryAddHint(MO, MI.getOperand(0), NeedGPRC);
821       } else if (MI.isCommutable() && OpIdx == 2 &&
822                  (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
823         tryAddHint(MO, MI.getOperand(0), NeedGPRC);
824       }
825     }
826   }
827 
828   for (MCPhysReg OrderReg : Order)
829     if (TwoAddrHints.count(OrderReg))
830       Hints.push_back(OrderReg);
831 
832   return BaseImplRetVal;
833 }
834