106c3fb27SDimitry Andric //===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
906c3fb27SDimitry Andric // This file contains the RISC-V implementation of the TargetRegisterInfo class.
100b57cec5SDimitry Andric //
110b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
120b57cec5SDimitry Andric
130b57cec5SDimitry Andric #include "RISCVRegisterInfo.h"
140b57cec5SDimitry Andric #include "RISCV.h"
155ffd83dbSDimitry Andric #include "RISCVMachineFunctionInfo.h"
160b57cec5SDimitry Andric #include "RISCVSubtarget.h"
175f757f3fSDimitry Andric #include "llvm/ADT/SmallSet.h"
1881ad6265SDimitry Andric #include "llvm/BinaryFormat/Dwarf.h"
190b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFrameInfo.h"
200b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFunction.h"
210b57cec5SDimitry Andric #include "llvm/CodeGen/MachineInstrBuilder.h"
220b57cec5SDimitry Andric #include "llvm/CodeGen/RegisterScavenging.h"
230b57cec5SDimitry Andric #include "llvm/CodeGen/TargetFrameLowering.h"
240b57cec5SDimitry Andric #include "llvm/CodeGen/TargetInstrInfo.h"
254824e7fdSDimitry Andric #include "llvm/IR/DebugInfoMetadata.h"
260b57cec5SDimitry Andric #include "llvm/Support/ErrorHandling.h"
270b57cec5SDimitry Andric
280b57cec5SDimitry Andric #define GET_REGINFO_TARGET_DESC
290b57cec5SDimitry Andric #include "RISCVGenRegisterInfo.inc"
300b57cec5SDimitry Andric
310b57cec5SDimitry Andric using namespace llvm;
320b57cec5SDimitry Andric
33*0fca6ea1SDimitry Andric static cl::opt<bool> DisableCostPerUse("riscv-disable-cost-per-use",
34*0fca6ea1SDimitry Andric cl::init(false), cl::Hidden);
35bdd1243dSDimitry Andric static cl::opt<bool>
36bdd1243dSDimitry Andric DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
37bdd1243dSDimitry Andric cl::init(false),
38bdd1243dSDimitry Andric cl::desc("Disable two address hints for register "
39bdd1243dSDimitry Andric "allocation"));
40bdd1243dSDimitry Andric
418bcb0991SDimitry Andric static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
428bcb0991SDimitry Andric static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
43e8d8bef9SDimitry Andric static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
44e8d8bef9SDimitry Andric static_assert(RISCV::F31_H == RISCV::F0_H + 31,
45e8d8bef9SDimitry Andric "Register list not consecutive");
468bcb0991SDimitry Andric static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
478bcb0991SDimitry Andric static_assert(RISCV::F31_F == RISCV::F0_F + 31,
488bcb0991SDimitry Andric "Register list not consecutive");
498bcb0991SDimitry Andric static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
508bcb0991SDimitry Andric static_assert(RISCV::F31_D == RISCV::F0_D + 31,
518bcb0991SDimitry Andric "Register list not consecutive");
525ffd83dbSDimitry Andric static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
535ffd83dbSDimitry Andric static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
548bcb0991SDimitry Andric
RISCVRegisterInfo(unsigned HwMode)550b57cec5SDimitry Andric RISCVRegisterInfo::RISCVRegisterInfo(unsigned HwMode)
560b57cec5SDimitry Andric : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
570b57cec5SDimitry Andric /*PC*/0, HwMode) {}
580b57cec5SDimitry Andric
590b57cec5SDimitry Andric const MCPhysReg *
getCalleeSavedRegs(const MachineFunction * MF) const600b57cec5SDimitry Andric RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
610b57cec5SDimitry Andric auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
62e8d8bef9SDimitry Andric if (MF->getFunction().getCallingConv() == CallingConv::GHC)
63e8d8bef9SDimitry Andric return CSR_NoRegs_SaveList;
640b57cec5SDimitry Andric if (MF->getFunction().hasFnAttribute("interrupt")) {
650b57cec5SDimitry Andric if (Subtarget.hasStdExtD())
660b57cec5SDimitry Andric return CSR_XLEN_F64_Interrupt_SaveList;
670b57cec5SDimitry Andric if (Subtarget.hasStdExtF())
68*0fca6ea1SDimitry Andric return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
697a6dacacSDimitry Andric : CSR_XLEN_F32_Interrupt_SaveList;
70*0fca6ea1SDimitry Andric return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
717a6dacacSDimitry Andric : CSR_Interrupt_SaveList;
720b57cec5SDimitry Andric }
730b57cec5SDimitry Andric
74*0fca6ea1SDimitry Andric bool HasVectorCSR =
75*0fca6ea1SDimitry Andric MF->getFunction().getCallingConv() == CallingConv::RISCV_VectorCall &&
76*0fca6ea1SDimitry Andric Subtarget.hasVInstructions();
77*0fca6ea1SDimitry Andric
780b57cec5SDimitry Andric switch (Subtarget.getTargetABI()) {
790b57cec5SDimitry Andric default:
800b57cec5SDimitry Andric llvm_unreachable("Unrecognized ABI");
817a6dacacSDimitry Andric case RISCVABI::ABI_ILP32E:
827a6dacacSDimitry Andric case RISCVABI::ABI_LP64E:
837a6dacacSDimitry Andric return CSR_ILP32E_LP64E_SaveList;
840b57cec5SDimitry Andric case RISCVABI::ABI_ILP32:
850b57cec5SDimitry Andric case RISCVABI::ABI_LP64:
86*0fca6ea1SDimitry Andric if (HasVectorCSR)
87*0fca6ea1SDimitry Andric return CSR_ILP32_LP64_V_SaveList;
880b57cec5SDimitry Andric return CSR_ILP32_LP64_SaveList;
890b57cec5SDimitry Andric case RISCVABI::ABI_ILP32F:
900b57cec5SDimitry Andric case RISCVABI::ABI_LP64F:
91*0fca6ea1SDimitry Andric if (HasVectorCSR)
92*0fca6ea1SDimitry Andric return CSR_ILP32F_LP64F_V_SaveList;
930b57cec5SDimitry Andric return CSR_ILP32F_LP64F_SaveList;
940b57cec5SDimitry Andric case RISCVABI::ABI_ILP32D:
950b57cec5SDimitry Andric case RISCVABI::ABI_LP64D:
96*0fca6ea1SDimitry Andric if (HasVectorCSR)
97*0fca6ea1SDimitry Andric return CSR_ILP32D_LP64D_V_SaveList;
980b57cec5SDimitry Andric return CSR_ILP32D_LP64D_SaveList;
990b57cec5SDimitry Andric }
1000b57cec5SDimitry Andric }
1010b57cec5SDimitry Andric
getReservedRegs(const MachineFunction & MF) const1020b57cec5SDimitry Andric BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
103480093f4SDimitry Andric const RISCVFrameLowering *TFI = getFrameLowering(MF);
1040b57cec5SDimitry Andric BitVector Reserved(getNumRegs());
1055f757f3fSDimitry Andric auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
1060b57cec5SDimitry Andric
107480093f4SDimitry Andric for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
108*0fca6ea1SDimitry Andric // Mark any GPRs requested to be reserved as such
1095f757f3fSDimitry Andric if (Subtarget.isRegisterReservedByUser(Reg))
110480093f4SDimitry Andric markSuperRegs(Reserved, Reg);
111*0fca6ea1SDimitry Andric
112*0fca6ea1SDimitry Andric // Mark all the registers defined as constant in TableGen as reserved.
113*0fca6ea1SDimitry Andric if (isConstantPhysReg(Reg))
114*0fca6ea1SDimitry Andric markSuperRegs(Reserved, Reg);
115480093f4SDimitry Andric }
116480093f4SDimitry Andric
1170b57cec5SDimitry Andric // Use markSuperRegs to ensure any register aliases are also reserved
1180b57cec5SDimitry Andric markSuperRegs(Reserved, RISCV::X2); // sp
1190b57cec5SDimitry Andric markSuperRegs(Reserved, RISCV::X3); // gp
1200b57cec5SDimitry Andric markSuperRegs(Reserved, RISCV::X4); // tp
1210b57cec5SDimitry Andric if (TFI->hasFP(MF))
1220b57cec5SDimitry Andric markSuperRegs(Reserved, RISCV::X8); // fp
123480093f4SDimitry Andric // Reserve the base register if we need to realign the stack and allocate
124480093f4SDimitry Andric // variable-sized objects at runtime.
125480093f4SDimitry Andric if (TFI->hasBP(MF))
126480093f4SDimitry Andric markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
127e8d8bef9SDimitry Andric
12806c3fb27SDimitry Andric // Additionally reserve dummy register used to form the register pair
12906c3fb27SDimitry Andric // beginning with 'x0' for instructions that take register pairs.
13006c3fb27SDimitry Andric markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
13106c3fb27SDimitry Andric
1327a6dacacSDimitry Andric // There are only 16 GPRs for RVE.
133*0fca6ea1SDimitry Andric if (Subtarget.hasStdExtE())
1347a6dacacSDimitry Andric for (MCPhysReg Reg = RISCV::X16; Reg <= RISCV::X31; Reg++)
1357a6dacacSDimitry Andric markSuperRegs(Reserved, Reg);
1367a6dacacSDimitry Andric
137e8d8bef9SDimitry Andric // V registers for code generation. We handle them manually.
138e8d8bef9SDimitry Andric markSuperRegs(Reserved, RISCV::VL);
139e8d8bef9SDimitry Andric markSuperRegs(Reserved, RISCV::VTYPE);
140e8d8bef9SDimitry Andric markSuperRegs(Reserved, RISCV::VXSAT);
141e8d8bef9SDimitry Andric markSuperRegs(Reserved, RISCV::VXRM);
142e8d8bef9SDimitry Andric
143fe6060f1SDimitry Andric // Floating point environment registers.
144fe6060f1SDimitry Andric markSuperRegs(Reserved, RISCV::FRM);
145fe6060f1SDimitry Andric markSuperRegs(Reserved, RISCV::FFLAGS);
146fe6060f1SDimitry Andric
147*0fca6ea1SDimitry Andric // SiFive VCIX state registers.
148*0fca6ea1SDimitry Andric markSuperRegs(Reserved, RISCV::VCIX_STATE);
149*0fca6ea1SDimitry Andric
1505f757f3fSDimitry Andric if (MF.getFunction().getCallingConv() == CallingConv::GRAAL) {
151*0fca6ea1SDimitry Andric if (Subtarget.hasStdExtE())
1525f757f3fSDimitry Andric report_fatal_error("Graal reserved registers do not exist in RVE");
1535f757f3fSDimitry Andric markSuperRegs(Reserved, RISCV::X23);
1545f757f3fSDimitry Andric markSuperRegs(Reserved, RISCV::X27);
1555f757f3fSDimitry Andric }
1565f757f3fSDimitry Andric
157647cbc5dSDimitry Andric // Shadow stack pointer.
158647cbc5dSDimitry Andric markSuperRegs(Reserved, RISCV::SSP);
159647cbc5dSDimitry Andric
1600b57cec5SDimitry Andric assert(checkAllSuperRegsMarked(Reserved));
1610b57cec5SDimitry Andric return Reserved;
1620b57cec5SDimitry Andric }
1630b57cec5SDimitry Andric
isAsmClobberable(const MachineFunction & MF,MCRegister PhysReg) const164480093f4SDimitry Andric bool RISCVRegisterInfo::isAsmClobberable(const MachineFunction &MF,
1655ffd83dbSDimitry Andric MCRegister PhysReg) const {
166480093f4SDimitry Andric return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
167480093f4SDimitry Andric }
168480093f4SDimitry Andric
getNoPreservedMask() const1690b57cec5SDimitry Andric const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const {
1700b57cec5SDimitry Andric return CSR_NoRegs_RegMask;
1710b57cec5SDimitry Andric }
1720b57cec5SDimitry Andric
adjustReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator II,const DebugLoc & DL,Register DestReg,Register SrcReg,StackOffset Offset,MachineInstr::MIFlag Flag,MaybeAlign RequiredAlign) const173bdd1243dSDimitry Andric void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB,
174bdd1243dSDimitry Andric MachineBasicBlock::iterator II,
175bdd1243dSDimitry Andric const DebugLoc &DL, Register DestReg,
176bdd1243dSDimitry Andric Register SrcReg, StackOffset Offset,
177bdd1243dSDimitry Andric MachineInstr::MIFlag Flag,
178bdd1243dSDimitry Andric MaybeAlign RequiredAlign) const {
179bdd1243dSDimitry Andric
180bdd1243dSDimitry Andric if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
181bdd1243dSDimitry Andric return;
182bdd1243dSDimitry Andric
183bdd1243dSDimitry Andric MachineFunction &MF = *MBB.getParent();
184bdd1243dSDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo();
185bdd1243dSDimitry Andric const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
186bdd1243dSDimitry Andric const RISCVInstrInfo *TII = ST.getInstrInfo();
187bdd1243dSDimitry Andric
188bdd1243dSDimitry Andric bool KillSrcReg = false;
189bdd1243dSDimitry Andric
190bdd1243dSDimitry Andric if (Offset.getScalable()) {
191bdd1243dSDimitry Andric unsigned ScalableAdjOpc = RISCV::ADD;
192bdd1243dSDimitry Andric int64_t ScalableValue = Offset.getScalable();
193bdd1243dSDimitry Andric if (ScalableValue < 0) {
194bdd1243dSDimitry Andric ScalableValue = -ScalableValue;
195bdd1243dSDimitry Andric ScalableAdjOpc = RISCV::SUB;
196bdd1243dSDimitry Andric }
197bdd1243dSDimitry Andric // Get vlenb and multiply vlen with the number of vector registers.
198bdd1243dSDimitry Andric Register ScratchReg = DestReg;
199bdd1243dSDimitry Andric if (DestReg == SrcReg)
200bdd1243dSDimitry Andric ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
201*0fca6ea1SDimitry Andric
202*0fca6ea1SDimitry Andric assert(ScalableValue > 0 && "There is no need to get VLEN scaled value.");
203*0fca6ea1SDimitry Andric assert(ScalableValue % 8 == 0 &&
204*0fca6ea1SDimitry Andric "Reserve the stack by the multiple of one vector size.");
205*0fca6ea1SDimitry Andric assert(isInt<32>(ScalableValue / 8) &&
206*0fca6ea1SDimitry Andric "Expect the number of vector registers within 32-bits.");
207*0fca6ea1SDimitry Andric uint32_t NumOfVReg = ScalableValue / 8;
208*0fca6ea1SDimitry Andric BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), ScratchReg)
209*0fca6ea1SDimitry Andric .setMIFlag(Flag);
210*0fca6ea1SDimitry Andric
211*0fca6ea1SDimitry Andric if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
212*0fca6ea1SDimitry Andric (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
213*0fca6ea1SDimitry Andric unsigned Opc = NumOfVReg == 2 ? RISCV::SH1ADD :
214*0fca6ea1SDimitry Andric (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
215*0fca6ea1SDimitry Andric BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
216*0fca6ea1SDimitry Andric .addReg(ScratchReg, RegState::Kill).addReg(SrcReg)
217*0fca6ea1SDimitry Andric .setMIFlag(Flag);
218*0fca6ea1SDimitry Andric } else {
219*0fca6ea1SDimitry Andric TII->mulImm(MF, MBB, II, DL, ScratchReg, NumOfVReg, Flag);
220bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
221bdd1243dSDimitry Andric .addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
222bdd1243dSDimitry Andric .setMIFlag(Flag);
223*0fca6ea1SDimitry Andric }
224bdd1243dSDimitry Andric SrcReg = DestReg;
225bdd1243dSDimitry Andric KillSrcReg = true;
226bdd1243dSDimitry Andric }
227bdd1243dSDimitry Andric
228bdd1243dSDimitry Andric int64_t Val = Offset.getFixed();
229bdd1243dSDimitry Andric if (DestReg == SrcReg && Val == 0)
230bdd1243dSDimitry Andric return;
231bdd1243dSDimitry Andric
232bdd1243dSDimitry Andric const uint64_t Align = RequiredAlign.valueOrOne().value();
233bdd1243dSDimitry Andric
234bdd1243dSDimitry Andric if (isInt<12>(Val)) {
235bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
236bdd1243dSDimitry Andric .addReg(SrcReg, getKillRegState(KillSrcReg))
237bdd1243dSDimitry Andric .addImm(Val)
238bdd1243dSDimitry Andric .setMIFlag(Flag);
239bdd1243dSDimitry Andric return;
240bdd1243dSDimitry Andric }
241bdd1243dSDimitry Andric
242bdd1243dSDimitry Andric // Try to split the offset across two ADDIs. We need to keep the intermediate
243bdd1243dSDimitry Andric // result aligned after each ADDI. We need to determine the maximum value we
244bdd1243dSDimitry Andric // can put in each ADDI. In the negative direction, we can use -2048 which is
245bdd1243dSDimitry Andric // always sufficiently aligned. In the positive direction, we need to find the
246bdd1243dSDimitry Andric // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
247bdd1243dSDimitry Andric // created with LUI.
248bdd1243dSDimitry Andric assert(Align < 2048 && "Required alignment too large");
249bdd1243dSDimitry Andric int64_t MaxPosAdjStep = 2048 - Align;
250bdd1243dSDimitry Andric if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
251bdd1243dSDimitry Andric int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
252bdd1243dSDimitry Andric Val -= FirstAdj;
253bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
254bdd1243dSDimitry Andric .addReg(SrcReg, getKillRegState(KillSrcReg))
255bdd1243dSDimitry Andric .addImm(FirstAdj)
256bdd1243dSDimitry Andric .setMIFlag(Flag);
257bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
258bdd1243dSDimitry Andric .addReg(DestReg, RegState::Kill)
259bdd1243dSDimitry Andric .addImm(Val)
260bdd1243dSDimitry Andric .setMIFlag(Flag);
261bdd1243dSDimitry Andric return;
262bdd1243dSDimitry Andric }
263bdd1243dSDimitry Andric
264*0fca6ea1SDimitry Andric // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
265*0fca6ea1SDimitry Andric // instruction. This saves 1 instruction over the full lui/addi+add fallback
266*0fca6ea1SDimitry Andric // path. We avoid anything which can be done with a single lui as it might
267*0fca6ea1SDimitry Andric // be compressible. Note that the sh1add case is fully covered by the 2x addi
268*0fca6ea1SDimitry Andric // case just above and is thus ommitted.
269*0fca6ea1SDimitry Andric if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
270*0fca6ea1SDimitry Andric unsigned Opc = 0;
271*0fca6ea1SDimitry Andric if (isShiftedInt<12, 3>(Val)) {
272*0fca6ea1SDimitry Andric Opc = RISCV::SH3ADD;
273*0fca6ea1SDimitry Andric Val = Val >> 3;
274*0fca6ea1SDimitry Andric } else if (isShiftedInt<12, 2>(Val)) {
275*0fca6ea1SDimitry Andric Opc = RISCV::SH2ADD;
276*0fca6ea1SDimitry Andric Val = Val >> 2;
277*0fca6ea1SDimitry Andric }
278*0fca6ea1SDimitry Andric if (Opc) {
279*0fca6ea1SDimitry Andric Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
280*0fca6ea1SDimitry Andric TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
281*0fca6ea1SDimitry Andric BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
282*0fca6ea1SDimitry Andric .addReg(ScratchReg, RegState::Kill)
283*0fca6ea1SDimitry Andric .addReg(SrcReg, getKillRegState(KillSrcReg))
284*0fca6ea1SDimitry Andric .setMIFlag(Flag);
285*0fca6ea1SDimitry Andric return;
286*0fca6ea1SDimitry Andric }
287*0fca6ea1SDimitry Andric }
288*0fca6ea1SDimitry Andric
289bdd1243dSDimitry Andric unsigned Opc = RISCV::ADD;
290bdd1243dSDimitry Andric if (Val < 0) {
291bdd1243dSDimitry Andric Val = -Val;
292bdd1243dSDimitry Andric Opc = RISCV::SUB;
293bdd1243dSDimitry Andric }
294bdd1243dSDimitry Andric
295bdd1243dSDimitry Andric Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
296bdd1243dSDimitry Andric TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
297bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
298bdd1243dSDimitry Andric .addReg(SrcReg, getKillRegState(KillSrcReg))
299bdd1243dSDimitry Andric .addReg(ScratchReg, RegState::Kill)
300bdd1243dSDimitry Andric .setMIFlag(Flag);
301bdd1243dSDimitry Andric }
302bdd1243dSDimitry Andric
303bdd1243dSDimitry Andric // Split a VSPILLx_Mx pseudo into multiple whole register stores separated by
304bdd1243dSDimitry Andric // LMUL*VLENB bytes.
lowerVSPILL(MachineBasicBlock::iterator II) const305bdd1243dSDimitry Andric void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const {
306bdd1243dSDimitry Andric DebugLoc DL = II->getDebugLoc();
307bdd1243dSDimitry Andric MachineBasicBlock &MBB = *II->getParent();
308bdd1243dSDimitry Andric MachineFunction &MF = *MBB.getParent();
309bdd1243dSDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo();
3107a6dacacSDimitry Andric const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
3117a6dacacSDimitry Andric const TargetInstrInfo *TII = STI.getInstrInfo();
3127a6dacacSDimitry Andric const TargetRegisterInfo *TRI = STI.getRegisterInfo();
313bdd1243dSDimitry Andric
314bdd1243dSDimitry Andric auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
315bdd1243dSDimitry Andric unsigned NF = ZvlssegInfo->first;
316bdd1243dSDimitry Andric unsigned LMUL = ZvlssegInfo->second;
317bdd1243dSDimitry Andric assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
318bdd1243dSDimitry Andric unsigned Opcode, SubRegIdx;
319bdd1243dSDimitry Andric switch (LMUL) {
320bdd1243dSDimitry Andric default:
321bdd1243dSDimitry Andric llvm_unreachable("LMUL must be 1, 2, or 4.");
322bdd1243dSDimitry Andric case 1:
323bdd1243dSDimitry Andric Opcode = RISCV::VS1R_V;
324bdd1243dSDimitry Andric SubRegIdx = RISCV::sub_vrm1_0;
325bdd1243dSDimitry Andric break;
326bdd1243dSDimitry Andric case 2:
327bdd1243dSDimitry Andric Opcode = RISCV::VS2R_V;
328bdd1243dSDimitry Andric SubRegIdx = RISCV::sub_vrm2_0;
329bdd1243dSDimitry Andric break;
330bdd1243dSDimitry Andric case 4:
331bdd1243dSDimitry Andric Opcode = RISCV::VS4R_V;
332bdd1243dSDimitry Andric SubRegIdx = RISCV::sub_vrm4_0;
333bdd1243dSDimitry Andric break;
334bdd1243dSDimitry Andric }
335bdd1243dSDimitry Andric static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
336bdd1243dSDimitry Andric "Unexpected subreg numbering");
337bdd1243dSDimitry Andric static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
338bdd1243dSDimitry Andric "Unexpected subreg numbering");
339bdd1243dSDimitry Andric static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
340bdd1243dSDimitry Andric "Unexpected subreg numbering");
341bdd1243dSDimitry Andric
342bdd1243dSDimitry Andric Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
3435f757f3fSDimitry Andric // Optimize for constant VLEN.
344*0fca6ea1SDimitry Andric if (auto VLEN = STI.getRealVLen()) {
345*0fca6ea1SDimitry Andric const int64_t VLENB = *VLEN / 8;
3465f757f3fSDimitry Andric int64_t Offset = VLENB * LMUL;
3475f757f3fSDimitry Andric STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
3485f757f3fSDimitry Andric } else {
349bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
350bdd1243dSDimitry Andric uint32_t ShiftAmount = Log2_32(LMUL);
351bdd1243dSDimitry Andric if (ShiftAmount != 0)
352bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
353bdd1243dSDimitry Andric .addReg(VL)
354bdd1243dSDimitry Andric .addImm(ShiftAmount);
3555f757f3fSDimitry Andric }
356bdd1243dSDimitry Andric
357bdd1243dSDimitry Andric Register SrcReg = II->getOperand(0).getReg();
358bdd1243dSDimitry Andric Register Base = II->getOperand(1).getReg();
359bdd1243dSDimitry Andric bool IsBaseKill = II->getOperand(1).isKill();
360bdd1243dSDimitry Andric Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
361bdd1243dSDimitry Andric for (unsigned I = 0; I < NF; ++I) {
362bdd1243dSDimitry Andric // Adding implicit-use of super register to describe we are using part of
363bdd1243dSDimitry Andric // super register, that prevents machine verifier complaining when part of
364bdd1243dSDimitry Andric // subreg is undef, see comment in MachineVerifier::checkLiveness for more
365bdd1243dSDimitry Andric // detail.
366bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(Opcode))
367bdd1243dSDimitry Andric .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I))
368bdd1243dSDimitry Andric .addReg(Base, getKillRegState(I == NF - 1))
369bdd1243dSDimitry Andric .addMemOperand(*(II->memoperands_begin()))
370bdd1243dSDimitry Andric .addReg(SrcReg, RegState::Implicit);
371bdd1243dSDimitry Andric if (I != NF - 1)
372bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
373bdd1243dSDimitry Andric .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
374bdd1243dSDimitry Andric .addReg(VL, getKillRegState(I == NF - 2));
375bdd1243dSDimitry Andric Base = NewBase;
376bdd1243dSDimitry Andric }
377bdd1243dSDimitry Andric II->eraseFromParent();
378bdd1243dSDimitry Andric }
379bdd1243dSDimitry Andric
380bdd1243dSDimitry Andric // Split a VSPILLx_Mx pseudo into multiple whole register loads separated by
381bdd1243dSDimitry Andric // LMUL*VLENB bytes.
lowerVRELOAD(MachineBasicBlock::iterator II) const382bdd1243dSDimitry Andric void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const {
383bdd1243dSDimitry Andric DebugLoc DL = II->getDebugLoc();
384bdd1243dSDimitry Andric MachineBasicBlock &MBB = *II->getParent();
385bdd1243dSDimitry Andric MachineFunction &MF = *MBB.getParent();
386bdd1243dSDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo();
3877a6dacacSDimitry Andric const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
3887a6dacacSDimitry Andric const TargetInstrInfo *TII = STI.getInstrInfo();
3897a6dacacSDimitry Andric const TargetRegisterInfo *TRI = STI.getRegisterInfo();
390bdd1243dSDimitry Andric
391bdd1243dSDimitry Andric auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
392bdd1243dSDimitry Andric unsigned NF = ZvlssegInfo->first;
393bdd1243dSDimitry Andric unsigned LMUL = ZvlssegInfo->second;
394bdd1243dSDimitry Andric assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
395bdd1243dSDimitry Andric unsigned Opcode, SubRegIdx;
396bdd1243dSDimitry Andric switch (LMUL) {
397bdd1243dSDimitry Andric default:
398bdd1243dSDimitry Andric llvm_unreachable("LMUL must be 1, 2, or 4.");
399bdd1243dSDimitry Andric case 1:
400bdd1243dSDimitry Andric Opcode = RISCV::VL1RE8_V;
401bdd1243dSDimitry Andric SubRegIdx = RISCV::sub_vrm1_0;
402bdd1243dSDimitry Andric break;
403bdd1243dSDimitry Andric case 2:
404bdd1243dSDimitry Andric Opcode = RISCV::VL2RE8_V;
405bdd1243dSDimitry Andric SubRegIdx = RISCV::sub_vrm2_0;
406bdd1243dSDimitry Andric break;
407bdd1243dSDimitry Andric case 4:
408bdd1243dSDimitry Andric Opcode = RISCV::VL4RE8_V;
409bdd1243dSDimitry Andric SubRegIdx = RISCV::sub_vrm4_0;
410bdd1243dSDimitry Andric break;
411bdd1243dSDimitry Andric }
412bdd1243dSDimitry Andric static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
413bdd1243dSDimitry Andric "Unexpected subreg numbering");
414bdd1243dSDimitry Andric static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
415bdd1243dSDimitry Andric "Unexpected subreg numbering");
416bdd1243dSDimitry Andric static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
417bdd1243dSDimitry Andric "Unexpected subreg numbering");
418bdd1243dSDimitry Andric
419bdd1243dSDimitry Andric Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
4205f757f3fSDimitry Andric // Optimize for constant VLEN.
421*0fca6ea1SDimitry Andric if (auto VLEN = STI.getRealVLen()) {
422*0fca6ea1SDimitry Andric const int64_t VLENB = *VLEN / 8;
4235f757f3fSDimitry Andric int64_t Offset = VLENB * LMUL;
4245f757f3fSDimitry Andric STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
4255f757f3fSDimitry Andric } else {
426bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
427bdd1243dSDimitry Andric uint32_t ShiftAmount = Log2_32(LMUL);
428bdd1243dSDimitry Andric if (ShiftAmount != 0)
429bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
430bdd1243dSDimitry Andric .addReg(VL)
431bdd1243dSDimitry Andric .addImm(ShiftAmount);
4325f757f3fSDimitry Andric }
433bdd1243dSDimitry Andric
434bdd1243dSDimitry Andric Register DestReg = II->getOperand(0).getReg();
435bdd1243dSDimitry Andric Register Base = II->getOperand(1).getReg();
436bdd1243dSDimitry Andric bool IsBaseKill = II->getOperand(1).isKill();
437bdd1243dSDimitry Andric Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
438bdd1243dSDimitry Andric for (unsigned I = 0; I < NF; ++I) {
439bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(Opcode),
440bdd1243dSDimitry Andric TRI->getSubReg(DestReg, SubRegIdx + I))
441bdd1243dSDimitry Andric .addReg(Base, getKillRegState(I == NF - 1))
442bdd1243dSDimitry Andric .addMemOperand(*(II->memoperands_begin()));
443bdd1243dSDimitry Andric if (I != NF - 1)
444bdd1243dSDimitry Andric BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
445bdd1243dSDimitry Andric .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
446bdd1243dSDimitry Andric .addReg(VL, getKillRegState(I == NF - 2));
447bdd1243dSDimitry Andric Base = NewBase;
448bdd1243dSDimitry Andric }
449bdd1243dSDimitry Andric II->eraseFromParent();
450bdd1243dSDimitry Andric }
451bdd1243dSDimitry Andric
eliminateFrameIndex(MachineBasicBlock::iterator II,int SPAdj,unsigned FIOperandNum,RegScavenger * RS) const452bdd1243dSDimitry Andric bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
4530b57cec5SDimitry Andric int SPAdj, unsigned FIOperandNum,
4540b57cec5SDimitry Andric RegScavenger *RS) const {
4550b57cec5SDimitry Andric assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
4560b57cec5SDimitry Andric
4570b57cec5SDimitry Andric MachineInstr &MI = *II;
4580b57cec5SDimitry Andric MachineFunction &MF = *MI.getParent()->getParent();
4590b57cec5SDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo();
460bdd1243dSDimitry Andric const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
4610b57cec5SDimitry Andric DebugLoc DL = MI.getDebugLoc();
4620b57cec5SDimitry Andric
4630b57cec5SDimitry Andric int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
4645ffd83dbSDimitry Andric Register FrameReg;
465fe6060f1SDimitry Andric StackOffset Offset =
466fe6060f1SDimitry Andric getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
46781ad6265SDimitry Andric bool IsRVVSpill = RISCV::isRVVSpill(MI);
468fe6060f1SDimitry Andric if (!IsRVVSpill)
469fe6060f1SDimitry Andric Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
4700b57cec5SDimitry Andric
471bdd1243dSDimitry Andric if (Offset.getScalable() &&
472bdd1243dSDimitry Andric ST.getRealMinVLen() == ST.getRealMaxVLen()) {
473bdd1243dSDimitry Andric // For an exact VLEN value, scalable offsets become constant and thus
474bdd1243dSDimitry Andric // can be converted entirely into fixed offsets.
475bdd1243dSDimitry Andric int64_t FixedValue = Offset.getFixed();
476bdd1243dSDimitry Andric int64_t ScalableValue = Offset.getScalable();
477bdd1243dSDimitry Andric assert(ScalableValue % 8 == 0 &&
478bdd1243dSDimitry Andric "Scalable offset is not a multiple of a single vector size.");
479bdd1243dSDimitry Andric int64_t NumOfVReg = ScalableValue / 8;
480bdd1243dSDimitry Andric int64_t VLENB = ST.getRealMinVLen() / 8;
481bdd1243dSDimitry Andric Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB);
482bdd1243dSDimitry Andric }
483bdd1243dSDimitry Andric
484fe6060f1SDimitry Andric if (!isInt<32>(Offset.getFixed())) {
4850b57cec5SDimitry Andric report_fatal_error(
4860b57cec5SDimitry Andric "Frame offsets outside of the signed 32-bit range not supported");
4870b57cec5SDimitry Andric }
4880b57cec5SDimitry Andric
489bdd1243dSDimitry Andric if (!IsRVVSpill) {
490*0fca6ea1SDimitry Andric int64_t Val = Offset.getFixed();
491*0fca6ea1SDimitry Andric int64_t Lo12 = SignExtend64<12>(Val);
492*0fca6ea1SDimitry Andric unsigned Opc = MI.getOpcode();
493*0fca6ea1SDimitry Andric if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
494bdd1243dSDimitry Andric // We chose to emit the canonical immediate sequence rather than folding
495bdd1243dSDimitry Andric // the offset into the using add under the theory that doing so doesn't
496bdd1243dSDimitry Andric // save dynamic instruction count and some target may fuse the canonical
497bdd1243dSDimitry Andric // 32 bit immediate sequence. We still need to clear the portion of the
498bdd1243dSDimitry Andric // offset encoded in the immediate.
499bdd1243dSDimitry Andric MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
500*0fca6ea1SDimitry Andric } else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
501*0fca6ea1SDimitry Andric Opc == RISCV::PREFETCH_W) &&
502*0fca6ea1SDimitry Andric (Lo12 & 0b11111) != 0) {
503*0fca6ea1SDimitry Andric // Prefetch instructions require the offset to be 32 byte aligned.
504*0fca6ea1SDimitry Andric MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
505*0fca6ea1SDimitry Andric } else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
506*0fca6ea1SDimitry Andric Opc == RISCV::PseudoRV32ZdinxSD) &&
507*0fca6ea1SDimitry Andric Lo12 >= 2044) {
508*0fca6ea1SDimitry Andric // This instruction will be split into 2 instructions. The second
509*0fca6ea1SDimitry Andric // instruction will add 4 to the immediate. If that would overflow 12
510*0fca6ea1SDimitry Andric // bits, we can't fold the offset.
511*0fca6ea1SDimitry Andric MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
512fe6060f1SDimitry Andric } else {
513bdd1243dSDimitry Andric // We can encode an add with 12 bit signed immediate in the immediate
514bdd1243dSDimitry Andric // operand of our user instruction. As a result, the remaining
515bdd1243dSDimitry Andric // offset can by construction, at worst, a LUI and a ADD.
516bdd1243dSDimitry Andric MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
517bdd1243dSDimitry Andric Offset = StackOffset::get((uint64_t)Val - (uint64_t)Lo12,
518bdd1243dSDimitry Andric Offset.getScalable());
519bdd1243dSDimitry Andric }
520bdd1243dSDimitry Andric }
521fe6060f1SDimitry Andric
522bdd1243dSDimitry Andric if (Offset.getScalable() || Offset.getFixed()) {
523bdd1243dSDimitry Andric Register DestReg;
524bdd1243dSDimitry Andric if (MI.getOpcode() == RISCV::ADDI)
525bdd1243dSDimitry Andric DestReg = MI.getOperand(0).getReg();
526bdd1243dSDimitry Andric else
527bdd1243dSDimitry Andric DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
528bdd1243dSDimitry Andric adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
529bdd1243dSDimitry Andric MachineInstr::NoFlags, std::nullopt);
530bdd1243dSDimitry Andric MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
531bdd1243dSDimitry Andric /*IsImp*/false,
532bdd1243dSDimitry Andric /*IsKill*/true);
533bdd1243dSDimitry Andric } else {
534bdd1243dSDimitry Andric MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
535bdd1243dSDimitry Andric /*IsImp*/false,
536bdd1243dSDimitry Andric /*IsKill*/false);
537bdd1243dSDimitry Andric }
538bdd1243dSDimitry Andric
539bdd1243dSDimitry Andric // If after materializing the adjustment, we have a pointless ADDI, remove it
540bdd1243dSDimitry Andric if (MI.getOpcode() == RISCV::ADDI &&
541bdd1243dSDimitry Andric MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
542bdd1243dSDimitry Andric MI.getOperand(2).getImm() == 0) {
543fe6060f1SDimitry Andric MI.eraseFromParent();
544bdd1243dSDimitry Andric return true;
545fe6060f1SDimitry Andric }
546fe6060f1SDimitry Andric
547bdd1243dSDimitry Andric // Handle spill/fill of synthetic register classes for segment operations to
548bdd1243dSDimitry Andric // ensure correctness in the edge case one gets spilled. There are many
549bdd1243dSDimitry Andric // possible optimizations here, but given the extreme rarity of such spills,
550bdd1243dSDimitry Andric // we prefer simplicity of implementation for now.
551bdd1243dSDimitry Andric switch (MI.getOpcode()) {
552bdd1243dSDimitry Andric case RISCV::PseudoVSPILL2_M1:
553bdd1243dSDimitry Andric case RISCV::PseudoVSPILL2_M2:
554bdd1243dSDimitry Andric case RISCV::PseudoVSPILL2_M4:
555bdd1243dSDimitry Andric case RISCV::PseudoVSPILL3_M1:
556bdd1243dSDimitry Andric case RISCV::PseudoVSPILL3_M2:
557bdd1243dSDimitry Andric case RISCV::PseudoVSPILL4_M1:
558bdd1243dSDimitry Andric case RISCV::PseudoVSPILL4_M2:
559bdd1243dSDimitry Andric case RISCV::PseudoVSPILL5_M1:
560bdd1243dSDimitry Andric case RISCV::PseudoVSPILL6_M1:
561bdd1243dSDimitry Andric case RISCV::PseudoVSPILL7_M1:
562bdd1243dSDimitry Andric case RISCV::PseudoVSPILL8_M1:
563bdd1243dSDimitry Andric lowerVSPILL(II);
564bdd1243dSDimitry Andric return true;
565bdd1243dSDimitry Andric case RISCV::PseudoVRELOAD2_M1:
566bdd1243dSDimitry Andric case RISCV::PseudoVRELOAD2_M2:
567bdd1243dSDimitry Andric case RISCV::PseudoVRELOAD2_M4:
568bdd1243dSDimitry Andric case RISCV::PseudoVRELOAD3_M1:
569bdd1243dSDimitry Andric case RISCV::PseudoVRELOAD3_M2:
570bdd1243dSDimitry Andric case RISCV::PseudoVRELOAD4_M1:
571bdd1243dSDimitry Andric case RISCV::PseudoVRELOAD4_M2:
572bdd1243dSDimitry Andric case RISCV::PseudoVRELOAD5_M1:
573bdd1243dSDimitry Andric case RISCV::PseudoVRELOAD6_M1:
574bdd1243dSDimitry Andric case RISCV::PseudoVRELOAD7_M1:
575bdd1243dSDimitry Andric case RISCV::PseudoVRELOAD8_M1:
576bdd1243dSDimitry Andric lowerVRELOAD(II);
577bdd1243dSDimitry Andric return true;
578fe6060f1SDimitry Andric }
579fe6060f1SDimitry Andric
580bdd1243dSDimitry Andric return false;
581fe6060f1SDimitry Andric }
582bdd1243dSDimitry Andric
requiresVirtualBaseRegisters(const MachineFunction & MF) const583bdd1243dSDimitry Andric bool RISCVRegisterInfo::requiresVirtualBaseRegisters(
584bdd1243dSDimitry Andric const MachineFunction &MF) const {
585bdd1243dSDimitry Andric return true;
586bdd1243dSDimitry Andric }
587bdd1243dSDimitry Andric
588bdd1243dSDimitry Andric // Returns true if the instruction's frame index reference would be better
589bdd1243dSDimitry Andric // served by a base register other than FP or SP.
590bdd1243dSDimitry Andric // Used by LocalStackSlotAllocation pass to determine which frame index
591bdd1243dSDimitry Andric // references it should create new base registers for.
needsFrameBaseReg(MachineInstr * MI,int64_t Offset) const592bdd1243dSDimitry Andric bool RISCVRegisterInfo::needsFrameBaseReg(MachineInstr *MI,
593bdd1243dSDimitry Andric int64_t Offset) const {
594bdd1243dSDimitry Andric unsigned FIOperandNum = 0;
595bdd1243dSDimitry Andric for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
596bdd1243dSDimitry Andric assert(FIOperandNum < MI->getNumOperands() &&
597bdd1243dSDimitry Andric "Instr doesn't have FrameIndex operand");
598bdd1243dSDimitry Andric
599bdd1243dSDimitry Andric // For RISC-V, The machine instructions that include a FrameIndex operand
600bdd1243dSDimitry Andric // are load/store, ADDI instructions.
601bdd1243dSDimitry Andric unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
602bdd1243dSDimitry Andric if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
603bdd1243dSDimitry Andric return false;
604bdd1243dSDimitry Andric // We only generate virtual base registers for loads and stores, so
605bdd1243dSDimitry Andric // return false for everything else.
606bdd1243dSDimitry Andric if (!MI->mayLoad() && !MI->mayStore())
607bdd1243dSDimitry Andric return false;
608bdd1243dSDimitry Andric
609bdd1243dSDimitry Andric const MachineFunction &MF = *MI->getMF();
610bdd1243dSDimitry Andric const MachineFrameInfo &MFI = MF.getFrameInfo();
611bdd1243dSDimitry Andric const RISCVFrameLowering *TFI = getFrameLowering(MF);
612bdd1243dSDimitry Andric const MachineRegisterInfo &MRI = MF.getRegInfo();
613bdd1243dSDimitry Andric
614*0fca6ea1SDimitry Andric if (TFI->hasFP(MF) && !shouldRealignStack(MF)) {
615*0fca6ea1SDimitry Andric auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
616bdd1243dSDimitry Andric // Estimate the stack size used to store callee saved registers(
617bdd1243dSDimitry Andric // excludes reserved registers).
618*0fca6ea1SDimitry Andric unsigned CalleeSavedSize = 0;
619*0fca6ea1SDimitry Andric for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R;
620*0fca6ea1SDimitry Andric ++R) {
621*0fca6ea1SDimitry Andric if (Subtarget.isRegisterReservedByUser(Reg))
622*0fca6ea1SDimitry Andric continue;
623*0fca6ea1SDimitry Andric
624*0fca6ea1SDimitry Andric if (RISCV::GPRRegClass.contains(Reg))
625*0fca6ea1SDimitry Andric CalleeSavedSize += getSpillSize(RISCV::GPRRegClass);
626*0fca6ea1SDimitry Andric else if (RISCV::FPR64RegClass.contains(Reg))
627*0fca6ea1SDimitry Andric CalleeSavedSize += getSpillSize(RISCV::FPR64RegClass);
628*0fca6ea1SDimitry Andric else if (RISCV::FPR32RegClass.contains(Reg))
629*0fca6ea1SDimitry Andric CalleeSavedSize += getSpillSize(RISCV::FPR32RegClass);
630*0fca6ea1SDimitry Andric // Ignore vector registers.
631bdd1243dSDimitry Andric }
632bdd1243dSDimitry Andric
633bdd1243dSDimitry Andric int64_t MaxFPOffset = Offset - CalleeSavedSize;
634bdd1243dSDimitry Andric return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
635*0fca6ea1SDimitry Andric }
636bdd1243dSDimitry Andric
637bdd1243dSDimitry Andric // Assume 128 bytes spill slots size to estimate the maximum possible
638bdd1243dSDimitry Andric // offset relative to the stack pointer.
639bdd1243dSDimitry Andric // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
640bdd1243dSDimitry Andric // real one for RISC-V.
641bdd1243dSDimitry Andric int64_t MaxSPOffset = Offset + 128;
642bdd1243dSDimitry Andric MaxSPOffset += MFI.getLocalFrameSize();
643bdd1243dSDimitry Andric return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
644bdd1243dSDimitry Andric }
645bdd1243dSDimitry Andric
646bdd1243dSDimitry Andric // Determine whether a given base register plus offset immediate is
647bdd1243dSDimitry Andric // encodable to resolve a frame index.
isFrameOffsetLegal(const MachineInstr * MI,Register BaseReg,int64_t Offset) const648bdd1243dSDimitry Andric bool RISCVRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
649bdd1243dSDimitry Andric Register BaseReg,
650bdd1243dSDimitry Andric int64_t Offset) const {
651bdd1243dSDimitry Andric unsigned FIOperandNum = 0;
652bdd1243dSDimitry Andric while (!MI->getOperand(FIOperandNum).isFI()) {
653bdd1243dSDimitry Andric FIOperandNum++;
654bdd1243dSDimitry Andric assert(FIOperandNum < MI->getNumOperands() &&
655bdd1243dSDimitry Andric "Instr does not have a FrameIndex operand!");
656bdd1243dSDimitry Andric }
657bdd1243dSDimitry Andric
658bdd1243dSDimitry Andric Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
659bdd1243dSDimitry Andric return isInt<12>(Offset);
660bdd1243dSDimitry Andric }
661bdd1243dSDimitry Andric
662bdd1243dSDimitry Andric // Insert defining instruction(s) for a pointer to FrameIdx before
663bdd1243dSDimitry Andric // insertion point I.
664bdd1243dSDimitry Andric // Return materialized frame pointer.
materializeFrameBaseRegister(MachineBasicBlock * MBB,int FrameIdx,int64_t Offset) const665bdd1243dSDimitry Andric Register RISCVRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
666bdd1243dSDimitry Andric int FrameIdx,
667bdd1243dSDimitry Andric int64_t Offset) const {
668bdd1243dSDimitry Andric MachineBasicBlock::iterator MBBI = MBB->begin();
669bdd1243dSDimitry Andric DebugLoc DL;
670bdd1243dSDimitry Andric if (MBBI != MBB->end())
671bdd1243dSDimitry Andric DL = MBBI->getDebugLoc();
672bdd1243dSDimitry Andric MachineFunction *MF = MBB->getParent();
673bdd1243dSDimitry Andric MachineRegisterInfo &MFI = MF->getRegInfo();
674bdd1243dSDimitry Andric const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
675bdd1243dSDimitry Andric
676bdd1243dSDimitry Andric Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
677bdd1243dSDimitry Andric BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
678bdd1243dSDimitry Andric .addFrameIndex(FrameIdx)
679bdd1243dSDimitry Andric .addImm(Offset);
680bdd1243dSDimitry Andric return BaseReg;
681bdd1243dSDimitry Andric }
682bdd1243dSDimitry Andric
683bdd1243dSDimitry Andric // Resolve a frame index operand of an instruction to reference the
684bdd1243dSDimitry Andric // indicated base register plus offset instead.
resolveFrameIndex(MachineInstr & MI,Register BaseReg,int64_t Offset) const685bdd1243dSDimitry Andric void RISCVRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
686bdd1243dSDimitry Andric int64_t Offset) const {
687bdd1243dSDimitry Andric unsigned FIOperandNum = 0;
688bdd1243dSDimitry Andric while (!MI.getOperand(FIOperandNum).isFI()) {
689bdd1243dSDimitry Andric FIOperandNum++;
690bdd1243dSDimitry Andric assert(FIOperandNum < MI.getNumOperands() &&
691bdd1243dSDimitry Andric "Instr does not have a FrameIndex operand!");
692bdd1243dSDimitry Andric }
693bdd1243dSDimitry Andric
694bdd1243dSDimitry Andric Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
695bdd1243dSDimitry Andric // FrameIndex Operands are always represented as a
696bdd1243dSDimitry Andric // register followed by an immediate.
697bdd1243dSDimitry Andric MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
698bdd1243dSDimitry Andric MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
699bdd1243dSDimitry Andric }
700bdd1243dSDimitry Andric
701bdd1243dSDimitry Andric // Get the offset from the referenced frame index in the instruction,
702bdd1243dSDimitry Andric // if there is one.
getFrameIndexInstrOffset(const MachineInstr * MI,int Idx) const703bdd1243dSDimitry Andric int64_t RISCVRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
704bdd1243dSDimitry Andric int Idx) const {
705bdd1243dSDimitry Andric assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
706bdd1243dSDimitry Andric RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
707bdd1243dSDimitry Andric "The MI must be I or S format.");
708bdd1243dSDimitry Andric assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
709bdd1243dSDimitry Andric "FrameIndex operand");
710bdd1243dSDimitry Andric return MI->getOperand(Idx + 1).getImm();
7110b57cec5SDimitry Andric }
7120b57cec5SDimitry Andric
getFrameRegister(const MachineFunction & MF) const7130b57cec5SDimitry Andric Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
7140b57cec5SDimitry Andric const TargetFrameLowering *TFI = getFrameLowering(MF);
7150b57cec5SDimitry Andric return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
7160b57cec5SDimitry Andric }
7170b57cec5SDimitry Andric
7180b57cec5SDimitry Andric const uint32_t *
getCallPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const7190b57cec5SDimitry Andric RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
720e8d8bef9SDimitry Andric CallingConv::ID CC) const {
7210b57cec5SDimitry Andric auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
7220b57cec5SDimitry Andric
723e8d8bef9SDimitry Andric if (CC == CallingConv::GHC)
724e8d8bef9SDimitry Andric return CSR_NoRegs_RegMask;
7250b57cec5SDimitry Andric switch (Subtarget.getTargetABI()) {
7260b57cec5SDimitry Andric default:
7270b57cec5SDimitry Andric llvm_unreachable("Unrecognized ABI");
7287a6dacacSDimitry Andric case RISCVABI::ABI_ILP32E:
7297a6dacacSDimitry Andric case RISCVABI::ABI_LP64E:
7307a6dacacSDimitry Andric return CSR_ILP32E_LP64E_RegMask;
7310b57cec5SDimitry Andric case RISCVABI::ABI_ILP32:
7320b57cec5SDimitry Andric case RISCVABI::ABI_LP64:
733*0fca6ea1SDimitry Andric if (CC == CallingConv::RISCV_VectorCall)
734*0fca6ea1SDimitry Andric return CSR_ILP32_LP64_V_RegMask;
7350b57cec5SDimitry Andric return CSR_ILP32_LP64_RegMask;
7360b57cec5SDimitry Andric case RISCVABI::ABI_ILP32F:
7370b57cec5SDimitry Andric case RISCVABI::ABI_LP64F:
738*0fca6ea1SDimitry Andric if (CC == CallingConv::RISCV_VectorCall)
739*0fca6ea1SDimitry Andric return CSR_ILP32F_LP64F_V_RegMask;
7400b57cec5SDimitry Andric return CSR_ILP32F_LP64F_RegMask;
7410b57cec5SDimitry Andric case RISCVABI::ABI_ILP32D:
7420b57cec5SDimitry Andric case RISCVABI::ABI_LP64D:
743*0fca6ea1SDimitry Andric if (CC == CallingConv::RISCV_VectorCall)
744*0fca6ea1SDimitry Andric return CSR_ILP32D_LP64D_V_RegMask;
7450b57cec5SDimitry Andric return CSR_ILP32D_LP64D_RegMask;
7460b57cec5SDimitry Andric }
7470b57cec5SDimitry Andric }
748fe6060f1SDimitry Andric
749fe6060f1SDimitry Andric const TargetRegisterClass *
getLargestLegalSuperClass(const TargetRegisterClass * RC,const MachineFunction &) const750fe6060f1SDimitry Andric RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
751fe6060f1SDimitry Andric const MachineFunction &) const {
752fe6060f1SDimitry Andric if (RC == &RISCV::VMV0RegClass)
753fe6060f1SDimitry Andric return &RISCV::VRRegClass;
7545f757f3fSDimitry Andric if (RC == &RISCV::VRNoV0RegClass)
7555f757f3fSDimitry Andric return &RISCV::VRRegClass;
7565f757f3fSDimitry Andric if (RC == &RISCV::VRM2NoV0RegClass)
7575f757f3fSDimitry Andric return &RISCV::VRM2RegClass;
7585f757f3fSDimitry Andric if (RC == &RISCV::VRM4NoV0RegClass)
7595f757f3fSDimitry Andric return &RISCV::VRM4RegClass;
7605f757f3fSDimitry Andric if (RC == &RISCV::VRM8NoV0RegClass)
7615f757f3fSDimitry Andric return &RISCV::VRM8RegClass;
762fe6060f1SDimitry Andric return RC;
763fe6060f1SDimitry Andric }
7644824e7fdSDimitry Andric
getOffsetOpcodes(const StackOffset & Offset,SmallVectorImpl<uint64_t> & Ops) const7654824e7fdSDimitry Andric void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
7664824e7fdSDimitry Andric SmallVectorImpl<uint64_t> &Ops) const {
7674824e7fdSDimitry Andric // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
7684824e7fdSDimitry Andric // to represent one vector register. The dwarf offset is
7694824e7fdSDimitry Andric // VLENB * scalable_offset / 8.
7704824e7fdSDimitry Andric assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
7714824e7fdSDimitry Andric
7724824e7fdSDimitry Andric // Add fixed-sized offset using existing DIExpression interface.
7734824e7fdSDimitry Andric DIExpression::appendOffset(Ops, Offset.getFixed());
7744824e7fdSDimitry Andric
7754824e7fdSDimitry Andric unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
7764824e7fdSDimitry Andric int64_t VLENBSized = Offset.getScalable() / 8;
7774824e7fdSDimitry Andric if (VLENBSized > 0) {
7784824e7fdSDimitry Andric Ops.push_back(dwarf::DW_OP_constu);
7794824e7fdSDimitry Andric Ops.push_back(VLENBSized);
7804824e7fdSDimitry Andric Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
7814824e7fdSDimitry Andric Ops.push_back(dwarf::DW_OP_mul);
7824824e7fdSDimitry Andric Ops.push_back(dwarf::DW_OP_plus);
7834824e7fdSDimitry Andric } else if (VLENBSized < 0) {
7844824e7fdSDimitry Andric Ops.push_back(dwarf::DW_OP_constu);
7854824e7fdSDimitry Andric Ops.push_back(-VLENBSized);
7864824e7fdSDimitry Andric Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
7874824e7fdSDimitry Andric Ops.push_back(dwarf::DW_OP_mul);
7884824e7fdSDimitry Andric Ops.push_back(dwarf::DW_OP_minus);
7894824e7fdSDimitry Andric }
7904824e7fdSDimitry Andric }
79104eeddc0SDimitry Andric
79204eeddc0SDimitry Andric unsigned
getRegisterCostTableIndex(const MachineFunction & MF) const79304eeddc0SDimitry Andric RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const {
794*0fca6ea1SDimitry Andric return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() &&
795*0fca6ea1SDimitry Andric !DisableCostPerUse
796*0fca6ea1SDimitry Andric ? 1
797*0fca6ea1SDimitry Andric : 0;
798bdd1243dSDimitry Andric }
799bdd1243dSDimitry Andric
800bdd1243dSDimitry Andric // Add two address hints to improve chances of being able to use a compressed
801bdd1243dSDimitry Andric // instruction.
getRegAllocationHints(Register VirtReg,ArrayRef<MCPhysReg> Order,SmallVectorImpl<MCPhysReg> & Hints,const MachineFunction & MF,const VirtRegMap * VRM,const LiveRegMatrix * Matrix) const802bdd1243dSDimitry Andric bool RISCVRegisterInfo::getRegAllocationHints(
803bdd1243dSDimitry Andric Register VirtReg, ArrayRef<MCPhysReg> Order,
804bdd1243dSDimitry Andric SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
805bdd1243dSDimitry Andric const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
806bdd1243dSDimitry Andric const MachineRegisterInfo *MRI = &MF.getRegInfo();
8077a6dacacSDimitry Andric auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
808bdd1243dSDimitry Andric
809bdd1243dSDimitry Andric bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
810bdd1243dSDimitry Andric VirtReg, Order, Hints, MF, VRM, Matrix);
811bdd1243dSDimitry Andric
812bdd1243dSDimitry Andric if (!VRM || DisableRegAllocHints)
813bdd1243dSDimitry Andric return BaseImplRetVal;
814bdd1243dSDimitry Andric
815bdd1243dSDimitry Andric // Add any two address hints after any copy hints.
816bdd1243dSDimitry Andric SmallSet<Register, 4> TwoAddrHints;
817bdd1243dSDimitry Andric
818bdd1243dSDimitry Andric auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
819bdd1243dSDimitry Andric bool NeedGPRC) -> void {
820bdd1243dSDimitry Andric Register Reg = MO.getReg();
821bdd1243dSDimitry Andric Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
822*0fca6ea1SDimitry Andric // TODO: Support GPRPair subregisters? Need to be careful with even/odd
823*0fca6ea1SDimitry Andric // registers. If the virtual register is an odd register of a pair and the
824*0fca6ea1SDimitry Andric // physical register is even (or vice versa), we should not add the hint.
825*0fca6ea1SDimitry Andric if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg)) &&
826*0fca6ea1SDimitry Andric !MO.getSubReg() && !VRRegMO.getSubReg()) {
827bdd1243dSDimitry Andric if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
828bdd1243dSDimitry Andric TwoAddrHints.insert(PhysReg);
829bdd1243dSDimitry Andric }
830bdd1243dSDimitry Andric };
831bdd1243dSDimitry Andric
832bdd1243dSDimitry Andric // This is all of the compressible binary instructions. If an instruction
833bdd1243dSDimitry Andric // needs GPRC register class operands \p NeedGPRC will be set to true.
8347a6dacacSDimitry Andric auto isCompressible = [&Subtarget](const MachineInstr &MI, bool &NeedGPRC) {
835bdd1243dSDimitry Andric NeedGPRC = false;
836bdd1243dSDimitry Andric switch (MI.getOpcode()) {
837bdd1243dSDimitry Andric default:
838bdd1243dSDimitry Andric return false;
839bdd1243dSDimitry Andric case RISCV::AND:
840bdd1243dSDimitry Andric case RISCV::OR:
841bdd1243dSDimitry Andric case RISCV::XOR:
842bdd1243dSDimitry Andric case RISCV::SUB:
843bdd1243dSDimitry Andric case RISCV::ADDW:
844bdd1243dSDimitry Andric case RISCV::SUBW:
845bdd1243dSDimitry Andric NeedGPRC = true;
846bdd1243dSDimitry Andric return true;
8477a6dacacSDimitry Andric case RISCV::ANDI: {
848bdd1243dSDimitry Andric NeedGPRC = true;
8497a6dacacSDimitry Andric if (!MI.getOperand(2).isImm())
8507a6dacacSDimitry Andric return false;
8517a6dacacSDimitry Andric int64_t Imm = MI.getOperand(2).getImm();
8527a6dacacSDimitry Andric if (isInt<6>(Imm))
8537a6dacacSDimitry Andric return true;
8547a6dacacSDimitry Andric // c.zext.b
8557a6dacacSDimitry Andric return Subtarget.hasStdExtZcb() && Imm == 255;
8567a6dacacSDimitry Andric }
857bdd1243dSDimitry Andric case RISCV::SRAI:
858bdd1243dSDimitry Andric case RISCV::SRLI:
859bdd1243dSDimitry Andric NeedGPRC = true;
860bdd1243dSDimitry Andric return true;
861bdd1243dSDimitry Andric case RISCV::ADD:
862bdd1243dSDimitry Andric case RISCV::SLLI:
863bdd1243dSDimitry Andric return true;
864bdd1243dSDimitry Andric case RISCV::ADDI:
865bdd1243dSDimitry Andric case RISCV::ADDIW:
866bdd1243dSDimitry Andric return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
8677a6dacacSDimitry Andric case RISCV::MUL:
8687a6dacacSDimitry Andric case RISCV::SEXT_B:
8697a6dacacSDimitry Andric case RISCV::SEXT_H:
8707a6dacacSDimitry Andric case RISCV::ZEXT_H_RV32:
8717a6dacacSDimitry Andric case RISCV::ZEXT_H_RV64:
8727a6dacacSDimitry Andric // c.mul, c.sext.b, c.sext.h, c.zext.h
8737a6dacacSDimitry Andric NeedGPRC = true;
8747a6dacacSDimitry Andric return Subtarget.hasStdExtZcb();
8757a6dacacSDimitry Andric case RISCV::ADD_UW:
8767a6dacacSDimitry Andric // c.zext.w
8777a6dacacSDimitry Andric NeedGPRC = true;
8787a6dacacSDimitry Andric return Subtarget.hasStdExtZcb() && MI.getOperand(2).isReg() &&
8797a6dacacSDimitry Andric MI.getOperand(2).getReg() == RISCV::X0;
8807a6dacacSDimitry Andric case RISCV::XORI:
8817a6dacacSDimitry Andric // c.not
8827a6dacacSDimitry Andric NeedGPRC = true;
8837a6dacacSDimitry Andric return Subtarget.hasStdExtZcb() && MI.getOperand(2).isImm() &&
8847a6dacacSDimitry Andric MI.getOperand(2).getImm() == -1;
885bdd1243dSDimitry Andric }
886bdd1243dSDimitry Andric };
887bdd1243dSDimitry Andric
888bdd1243dSDimitry Andric // Returns true if this operand is compressible. For non-registers it always
889bdd1243dSDimitry Andric // returns true. Immediate range was already checked in isCompressible.
890bdd1243dSDimitry Andric // For registers, it checks if the register is a GPRC register. reg-reg
891bdd1243dSDimitry Andric // instructions that require GPRC need all register operands to be GPRC.
892bdd1243dSDimitry Andric auto isCompressibleOpnd = [&](const MachineOperand &MO) {
893bdd1243dSDimitry Andric if (!MO.isReg())
894bdd1243dSDimitry Andric return true;
895bdd1243dSDimitry Andric Register Reg = MO.getReg();
896bdd1243dSDimitry Andric Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
897bdd1243dSDimitry Andric return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
898bdd1243dSDimitry Andric };
899bdd1243dSDimitry Andric
900bdd1243dSDimitry Andric for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
901bdd1243dSDimitry Andric const MachineInstr &MI = *MO.getParent();
90206c3fb27SDimitry Andric unsigned OpIdx = MO.getOperandNo();
903bdd1243dSDimitry Andric bool NeedGPRC;
904bdd1243dSDimitry Andric if (isCompressible(MI, NeedGPRC)) {
905bdd1243dSDimitry Andric if (OpIdx == 0 && MI.getOperand(1).isReg()) {
9067a6dacacSDimitry Andric if (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
9077a6dacacSDimitry Andric MI.getOpcode() == RISCV::ADD_UW ||
9087a6dacacSDimitry Andric isCompressibleOpnd(MI.getOperand(2)))
909bdd1243dSDimitry Andric tryAddHint(MO, MI.getOperand(1), NeedGPRC);
910bdd1243dSDimitry Andric if (MI.isCommutable() && MI.getOperand(2).isReg() &&
911bdd1243dSDimitry Andric (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
912bdd1243dSDimitry Andric tryAddHint(MO, MI.getOperand(2), NeedGPRC);
9137a6dacacSDimitry Andric } else if (OpIdx == 1 && (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
9147a6dacacSDimitry Andric isCompressibleOpnd(MI.getOperand(2)))) {
915bdd1243dSDimitry Andric tryAddHint(MO, MI.getOperand(0), NeedGPRC);
916bdd1243dSDimitry Andric } else if (MI.isCommutable() && OpIdx == 2 &&
917bdd1243dSDimitry Andric (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
918bdd1243dSDimitry Andric tryAddHint(MO, MI.getOperand(0), NeedGPRC);
919bdd1243dSDimitry Andric }
920bdd1243dSDimitry Andric }
921bdd1243dSDimitry Andric }
922bdd1243dSDimitry Andric
923bdd1243dSDimitry Andric for (MCPhysReg OrderReg : Order)
924bdd1243dSDimitry Andric if (TwoAddrHints.count(OrderReg))
925bdd1243dSDimitry Andric Hints.push_back(OrderReg);
926bdd1243dSDimitry Andric
927bdd1243dSDimitry Andric return BaseImplRetVal;
92804eeddc0SDimitry Andric }
929