xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 //===-- RISCVInstrInfo.cpp - RISC-V Instruction Information -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISC-V implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
14 #include "MCTargetDesc/RISCVMatInt.h"
15 #include "RISCV.h"
16 #include "RISCVMachineFunctionInfo.h"
17 #include "RISCVSubtarget.h"
18 #include "RISCVTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Analysis/MemoryLocation.h"
22 #include "llvm/CodeGen/LiveIntervals.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineCombinerPattern.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/MachineTraceMetrics.h"
29 #include "llvm/CodeGen/RegisterScavenging.h"
30 #include "llvm/IR/DebugInfoMetadata.h"
31 #include "llvm/MC/MCInstBuilder.h"
32 #include "llvm/MC/TargetRegistry.h"
33 #include "llvm/Support/ErrorHandling.h"
34 
35 using namespace llvm;
36 
37 #define GEN_CHECK_COMPRESS_INSTR
38 #include "RISCVGenCompressInstEmitter.inc"
39 
40 #define GET_INSTRINFO_CTOR_DTOR
41 #define GET_INSTRINFO_NAMED_OPS
42 #include "RISCVGenInstrInfo.inc"
43 
44 static cl::opt<bool> PreferWholeRegisterMove(
45     "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
46     cl::desc("Prefer whole register move for vector registers."));
47 
48 static cl::opt<MachineTraceStrategy> ForceMachineCombinerStrategy(
49     "riscv-force-machine-combiner-strategy", cl::Hidden,
50     cl::desc("Force machine combiner to use a specific strategy for machine "
51              "trace metrics evaluation."),
52     cl::init(MachineTraceStrategy::TS_NumStrategies),
53     cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local",
54                           "Local strategy."),
55                clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr",
56                           "MinInstrCount strategy.")));
57 
58 namespace llvm::RISCVVPseudosTable {
59 
60 using namespace RISCV;
61 
62 #define GET_RISCVVPseudosTable_IMPL
63 #include "RISCVGenSearchableTables.inc"
64 
65 } // namespace llvm::RISCVVPseudosTable
66 
67 RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
68     : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
69       STI(STI) {}
70 
71 MCInst RISCVInstrInfo::getNop() const {
72   if (STI.hasStdExtCOrZca())
73     return MCInstBuilder(RISCV::C_NOP);
74   return MCInstBuilder(RISCV::ADDI)
75       .addReg(RISCV::X0)
76       .addReg(RISCV::X0)
77       .addImm(0);
78 }
79 
80 unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
81                                              int &FrameIndex) const {
82   unsigned Dummy;
83   return isLoadFromStackSlot(MI, FrameIndex, Dummy);
84 }
85 
86 unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
87                                              int &FrameIndex,
88                                              unsigned &MemBytes) const {
89   switch (MI.getOpcode()) {
90   default:
91     return 0;
92   case RISCV::LB:
93   case RISCV::LBU:
94     MemBytes = 1;
95     break;
96   case RISCV::LH:
97   case RISCV::LHU:
98   case RISCV::FLH:
99     MemBytes = 2;
100     break;
101   case RISCV::LW:
102   case RISCV::FLW:
103   case RISCV::LWU:
104     MemBytes = 4;
105     break;
106   case RISCV::LD:
107   case RISCV::FLD:
108     MemBytes = 8;
109     break;
110   }
111 
112   if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
113       MI.getOperand(2).getImm() == 0) {
114     FrameIndex = MI.getOperand(1).getIndex();
115     return MI.getOperand(0).getReg();
116   }
117 
118   return 0;
119 }
120 
121 unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
122                                             int &FrameIndex) const {
123   unsigned Dummy;
124   return isStoreToStackSlot(MI, FrameIndex, Dummy);
125 }
126 
127 unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
128                                             int &FrameIndex,
129                                             unsigned &MemBytes) const {
130   switch (MI.getOpcode()) {
131   default:
132     return 0;
133   case RISCV::SB:
134     MemBytes = 1;
135     break;
136   case RISCV::SH:
137   case RISCV::FSH:
138     MemBytes = 2;
139     break;
140   case RISCV::SW:
141   case RISCV::FSW:
142     MemBytes = 4;
143     break;
144   case RISCV::SD:
145   case RISCV::FSD:
146     MemBytes = 8;
147     break;
148   }
149 
150   if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
151       MI.getOperand(2).getImm() == 0) {
152     FrameIndex = MI.getOperand(1).getIndex();
153     return MI.getOperand(0).getReg();
154   }
155 
156   return 0;
157 }
158 
159 static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
160                                         unsigned NumRegs) {
161   return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
162 }
163 
164 static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
165                                    const MachineBasicBlock &MBB,
166                                    MachineBasicBlock::const_iterator MBBI,
167                                    MachineBasicBlock::const_iterator &DefMBBI,
168                                    RISCVII::VLMUL LMul) {
169   if (PreferWholeRegisterMove)
170     return false;
171 
172   assert(MBBI->getOpcode() == TargetOpcode::COPY &&
173          "Unexpected COPY instruction.");
174   Register SrcReg = MBBI->getOperand(1).getReg();
175   const TargetRegisterInfo *TRI = STI.getRegisterInfo();
176 
177   bool FoundDef = false;
178   bool FirstVSetVLI = false;
179   unsigned FirstSEW = 0;
180   while (MBBI != MBB.begin()) {
181     --MBBI;
182     if (MBBI->isMetaInstruction())
183       continue;
184 
185     if (MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
186         MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
187         MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
188       // There is a vsetvli between COPY and source define instruction.
189       // vy = def_vop ...  (producing instruction)
190       // ...
191       // vsetvli
192       // ...
193       // vx = COPY vy
194       if (!FoundDef) {
195         if (!FirstVSetVLI) {
196           FirstVSetVLI = true;
197           unsigned FirstVType = MBBI->getOperand(2).getImm();
198           RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
199           FirstSEW = RISCVVType::getSEW(FirstVType);
200           // The first encountered vsetvli must have the same lmul as the
201           // register class of COPY.
202           if (FirstLMul != LMul)
203             return false;
204         }
205         // Only permit `vsetvli x0, x0, vtype` between COPY and the source
206         // define instruction.
207         if (MBBI->getOperand(0).getReg() != RISCV::X0)
208           return false;
209         if (MBBI->getOperand(1).isImm())
210           return false;
211         if (MBBI->getOperand(1).getReg() != RISCV::X0)
212           return false;
213         continue;
214       }
215 
216       // MBBI is the first vsetvli before the producing instruction.
217       unsigned VType = MBBI->getOperand(2).getImm();
218       // If there is a vsetvli between COPY and the producing instruction.
219       if (FirstVSetVLI) {
220         // If SEW is different, return false.
221         if (RISCVVType::getSEW(VType) != FirstSEW)
222           return false;
223       }
224 
225       // If the vsetvli is tail undisturbed, keep the whole register move.
226       if (!RISCVVType::isTailAgnostic(VType))
227         return false;
228 
229       // The checking is conservative. We only have register classes for
230       // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
231       // for fractional LMUL operations. However, we could not use the vsetvli
232       // lmul for widening operations. The result of widening operation is
233       // 2 x LMUL.
234       return LMul == RISCVVType::getVLMUL(VType);
235     } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
236       return false;
237     } else if (MBBI->getNumDefs()) {
238       // Check all the instructions which will change VL.
239       // For example, vleff has implicit def VL.
240       if (MBBI->modifiesRegister(RISCV::VL))
241         return false;
242 
243       // Only converting whole register copies to vmv.v.v when the defining
244       // value appears in the explicit operands.
245       for (const MachineOperand &MO : MBBI->explicit_operands()) {
246         if (!MO.isReg() || !MO.isDef())
247           continue;
248         if (!FoundDef && TRI->regsOverlap(MO.getReg(), SrcReg)) {
249           // We only permit the source of COPY has the same LMUL as the defined
250           // operand.
251           // There are cases we need to keep the whole register copy if the LMUL
252           // is different.
253           // For example,
254           // $x0 = PseudoVSETIVLI 4, 73   // vsetivli zero, 4, e16,m2,ta,m
255           // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
256           // # The COPY may be created by vlmul_trunc intrinsic.
257           // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
258           //
259           // After widening, the valid value will be 4 x e32 elements. If we
260           // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
261           // FIXME: The COPY of subregister of Zvlsseg register will not be able
262           // to convert to vmv.v.[v|i] under the constraint.
263           if (MO.getReg() != SrcReg)
264             return false;
265 
266           // In widening reduction instructions with LMUL_1 input vector case,
267           // only checking the LMUL is insufficient due to reduction result is
268           // always LMUL_1.
269           // For example,
270           // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
271           // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
272           // $v26 = COPY killed renamable $v8
273           // After widening, The valid value will be 1 x e16 elements. If we
274           // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
275           uint64_t TSFlags = MBBI->getDesc().TSFlags;
276           if (RISCVII::isRVVWideningReduction(TSFlags))
277             return false;
278 
279           // If the producing instruction does not depend on vsetvli, do not
280           // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
281           if (!RISCVII::hasSEWOp(TSFlags) || !RISCVII::hasVLOp(TSFlags))
282             return false;
283 
284           // Found the definition.
285           FoundDef = true;
286           DefMBBI = MBBI;
287           break;
288         }
289       }
290     }
291   }
292 
293   return false;
294 }
295 
296 void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
297                                  MachineBasicBlock::iterator MBBI,
298                                  const DebugLoc &DL, MCRegister DstReg,
299                                  MCRegister SrcReg, bool KillSrc) const {
300   const TargetRegisterInfo *TRI = STI.getRegisterInfo();
301 
302   if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
303     BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
304         .addReg(SrcReg, getKillRegState(KillSrc))
305         .addImm(0);
306     return;
307   }
308 
309   if (RISCV::GPRPF64RegClass.contains(DstReg, SrcReg)) {
310     // Emit an ADDI for both parts of GPRPF64.
311     BuildMI(MBB, MBBI, DL, get(RISCV::ADDI),
312             TRI->getSubReg(DstReg, RISCV::sub_32))
313         .addReg(TRI->getSubReg(SrcReg, RISCV::sub_32), getKillRegState(KillSrc))
314         .addImm(0);
315     BuildMI(MBB, MBBI, DL, get(RISCV::ADDI),
316             TRI->getSubReg(DstReg, RISCV::sub_32_hi))
317         .addReg(TRI->getSubReg(SrcReg, RISCV::sub_32_hi),
318                 getKillRegState(KillSrc))
319         .addImm(0);
320     return;
321   }
322 
323   // Handle copy from csr
324   if (RISCV::VCSRRegClass.contains(SrcReg) &&
325       RISCV::GPRRegClass.contains(DstReg)) {
326     BuildMI(MBB, MBBI, DL, get(RISCV::CSRRS), DstReg)
327         .addImm(RISCVSysReg::lookupSysRegByName(TRI->getName(SrcReg))->Encoding)
328         .addReg(RISCV::X0);
329     return;
330   }
331 
332   // FPR->FPR copies and VR->VR copies.
333   unsigned Opc;
334   bool IsScalableVector = true;
335   unsigned NF = 1;
336   RISCVII::VLMUL LMul = RISCVII::LMUL_1;
337   unsigned SubRegIdx = RISCV::sub_vrm1_0;
338   if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
339     if (STI.hasStdExtZfh()) {
340       Opc = RISCV::FSGNJ_H;
341     } else {
342       assert(STI.hasStdExtF() && STI.hasStdExtZfhmin() &&
343              "Unexpected extensions");
344       // Zfhmin subset doesn't have FSGNJ_H, replaces FSGNJ_H with FSGNJ_S.
345       DstReg = TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
346                                         &RISCV::FPR32RegClass);
347       SrcReg = TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
348                                         &RISCV::FPR32RegClass);
349       Opc = RISCV::FSGNJ_S;
350     }
351     IsScalableVector = false;
352   } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
353     Opc = RISCV::FSGNJ_S;
354     IsScalableVector = false;
355   } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
356     Opc = RISCV::FSGNJ_D;
357     IsScalableVector = false;
358   } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
359     Opc = RISCV::VMV1R_V;
360     LMul = RISCVII::LMUL_1;
361   } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
362     Opc = RISCV::VMV2R_V;
363     LMul = RISCVII::LMUL_2;
364   } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
365     Opc = RISCV::VMV4R_V;
366     LMul = RISCVII::LMUL_4;
367   } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
368     Opc = RISCV::VMV8R_V;
369     LMul = RISCVII::LMUL_8;
370   } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
371     Opc = RISCV::VMV1R_V;
372     SubRegIdx = RISCV::sub_vrm1_0;
373     NF = 2;
374     LMul = RISCVII::LMUL_1;
375   } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
376     Opc = RISCV::VMV2R_V;
377     SubRegIdx = RISCV::sub_vrm2_0;
378     NF = 2;
379     LMul = RISCVII::LMUL_2;
380   } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
381     Opc = RISCV::VMV4R_V;
382     SubRegIdx = RISCV::sub_vrm4_0;
383     NF = 2;
384     LMul = RISCVII::LMUL_4;
385   } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
386     Opc = RISCV::VMV1R_V;
387     SubRegIdx = RISCV::sub_vrm1_0;
388     NF = 3;
389     LMul = RISCVII::LMUL_1;
390   } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
391     Opc = RISCV::VMV2R_V;
392     SubRegIdx = RISCV::sub_vrm2_0;
393     NF = 3;
394     LMul = RISCVII::LMUL_2;
395   } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
396     Opc = RISCV::VMV1R_V;
397     SubRegIdx = RISCV::sub_vrm1_0;
398     NF = 4;
399     LMul = RISCVII::LMUL_1;
400   } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
401     Opc = RISCV::VMV2R_V;
402     SubRegIdx = RISCV::sub_vrm2_0;
403     NF = 4;
404     LMul = RISCVII::LMUL_2;
405   } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
406     Opc = RISCV::VMV1R_V;
407     SubRegIdx = RISCV::sub_vrm1_0;
408     NF = 5;
409     LMul = RISCVII::LMUL_1;
410   } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
411     Opc = RISCV::VMV1R_V;
412     SubRegIdx = RISCV::sub_vrm1_0;
413     NF = 6;
414     LMul = RISCVII::LMUL_1;
415   } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
416     Opc = RISCV::VMV1R_V;
417     SubRegIdx = RISCV::sub_vrm1_0;
418     NF = 7;
419     LMul = RISCVII::LMUL_1;
420   } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
421     Opc = RISCV::VMV1R_V;
422     SubRegIdx = RISCV::sub_vrm1_0;
423     NF = 8;
424     LMul = RISCVII::LMUL_1;
425   } else {
426     llvm_unreachable("Impossible reg-to-reg copy");
427   }
428 
429   if (IsScalableVector) {
430     bool UseVMV_V_V = false;
431     bool UseVMV_V_I = false;
432     MachineBasicBlock::const_iterator DefMBBI;
433     if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
434       UseVMV_V_V = true;
435       // We only need to handle LMUL = 1/2/4/8 here because we only define
436       // vector register classes for LMUL = 1/2/4/8.
437       unsigned VIOpc;
438       switch (LMul) {
439       default:
440         llvm_unreachable("Impossible LMUL for vector register copy.");
441       case RISCVII::LMUL_1:
442         Opc = RISCV::PseudoVMV_V_V_M1;
443         VIOpc = RISCV::PseudoVMV_V_I_M1;
444         break;
445       case RISCVII::LMUL_2:
446         Opc = RISCV::PseudoVMV_V_V_M2;
447         VIOpc = RISCV::PseudoVMV_V_I_M2;
448         break;
449       case RISCVII::LMUL_4:
450         Opc = RISCV::PseudoVMV_V_V_M4;
451         VIOpc = RISCV::PseudoVMV_V_I_M4;
452         break;
453       case RISCVII::LMUL_8:
454         Opc = RISCV::PseudoVMV_V_V_M8;
455         VIOpc = RISCV::PseudoVMV_V_I_M8;
456         break;
457       }
458 
459       if (DefMBBI->getOpcode() == VIOpc) {
460         UseVMV_V_I = true;
461         Opc = VIOpc;
462       }
463     }
464 
465     if (NF == 1) {
466       auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
467       if (UseVMV_V_V)
468         MIB.addReg(DstReg, RegState::Undef);
469       if (UseVMV_V_I)
470         MIB = MIB.add(DefMBBI->getOperand(2));
471       else
472         MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
473       if (UseVMV_V_V) {
474         const MCInstrDesc &Desc = DefMBBI->getDesc();
475         MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
476         MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
477         MIB.addImm(0); // tu, mu
478         MIB.addReg(RISCV::VL, RegState::Implicit);
479         MIB.addReg(RISCV::VTYPE, RegState::Implicit);
480       }
481     } else {
482       int I = 0, End = NF, Incr = 1;
483       unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
484       unsigned DstEncoding = TRI->getEncodingValue(DstReg);
485       unsigned LMulVal;
486       bool Fractional;
487       std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
488       assert(!Fractional && "It is impossible be fractional lmul here.");
489       if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
490         I = NF - 1;
491         End = -1;
492         Incr = -1;
493       }
494 
495       for (; I != End; I += Incr) {
496         auto MIB = BuildMI(MBB, MBBI, DL, get(Opc),
497                            TRI->getSubReg(DstReg, SubRegIdx + I));
498         if (UseVMV_V_V)
499           MIB.addReg(TRI->getSubReg(DstReg, SubRegIdx + I),
500                      RegState::Undef);
501         if (UseVMV_V_I)
502           MIB = MIB.add(DefMBBI->getOperand(2));
503         else
504           MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
505                            getKillRegState(KillSrc));
506         if (UseVMV_V_V) {
507           const MCInstrDesc &Desc = DefMBBI->getDesc();
508           MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
509           MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
510           MIB.addImm(0);  // tu, mu
511           MIB.addReg(RISCV::VL, RegState::Implicit);
512           MIB.addReg(RISCV::VTYPE, RegState::Implicit);
513         }
514       }
515     }
516   } else {
517     BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
518         .addReg(SrcReg, getKillRegState(KillSrc))
519         .addReg(SrcReg, getKillRegState(KillSrc));
520   }
521 }
522 
523 void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
524                                          MachineBasicBlock::iterator I,
525                                          Register SrcReg, bool IsKill, int FI,
526                                          const TargetRegisterClass *RC,
527                                          const TargetRegisterInfo *TRI,
528                                          Register VReg) const {
529   DebugLoc DL;
530   if (I != MBB.end())
531     DL = I->getDebugLoc();
532 
533   MachineFunction *MF = MBB.getParent();
534   MachineFrameInfo &MFI = MF->getFrameInfo();
535 
536   unsigned Opcode;
537   bool IsScalableVector = true;
538   if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
539     Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
540              RISCV::SW : RISCV::SD;
541     IsScalableVector = false;
542   } else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
543     Opcode = RISCV::PseudoRV32ZdinxSD;
544     IsScalableVector = false;
545   } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
546     Opcode = RISCV::FSH;
547     IsScalableVector = false;
548   } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
549     Opcode = RISCV::FSW;
550     IsScalableVector = false;
551   } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
552     Opcode = RISCV::FSD;
553     IsScalableVector = false;
554   } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
555     Opcode = RISCV::VS1R_V;
556   } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
557     Opcode = RISCV::VS2R_V;
558   } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
559     Opcode = RISCV::VS4R_V;
560   } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
561     Opcode = RISCV::VS8R_V;
562   } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
563     Opcode = RISCV::PseudoVSPILL2_M1;
564   else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
565     Opcode = RISCV::PseudoVSPILL2_M2;
566   else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
567     Opcode = RISCV::PseudoVSPILL2_M4;
568   else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
569     Opcode = RISCV::PseudoVSPILL3_M1;
570   else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
571     Opcode = RISCV::PseudoVSPILL3_M2;
572   else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
573     Opcode = RISCV::PseudoVSPILL4_M1;
574   else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
575     Opcode = RISCV::PseudoVSPILL4_M2;
576   else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
577     Opcode = RISCV::PseudoVSPILL5_M1;
578   else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
579     Opcode = RISCV::PseudoVSPILL6_M1;
580   else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
581     Opcode = RISCV::PseudoVSPILL7_M1;
582   else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
583     Opcode = RISCV::PseudoVSPILL8_M1;
584   else
585     llvm_unreachable("Can't store this register to stack slot");
586 
587   if (IsScalableVector) {
588     MachineMemOperand *MMO = MF->getMachineMemOperand(
589         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
590         MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
591 
592     MFI.setStackID(FI, TargetStackID::ScalableVector);
593     BuildMI(MBB, I, DL, get(Opcode))
594         .addReg(SrcReg, getKillRegState(IsKill))
595         .addFrameIndex(FI)
596         .addMemOperand(MMO);
597   } else {
598     MachineMemOperand *MMO = MF->getMachineMemOperand(
599         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
600         MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
601 
602     BuildMI(MBB, I, DL, get(Opcode))
603         .addReg(SrcReg, getKillRegState(IsKill))
604         .addFrameIndex(FI)
605         .addImm(0)
606         .addMemOperand(MMO);
607   }
608 }
609 
610 void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
611                                           MachineBasicBlock::iterator I,
612                                           Register DstReg, int FI,
613                                           const TargetRegisterClass *RC,
614                                           const TargetRegisterInfo *TRI,
615                                           Register VReg) const {
616   DebugLoc DL;
617   if (I != MBB.end())
618     DL = I->getDebugLoc();
619 
620   MachineFunction *MF = MBB.getParent();
621   MachineFrameInfo &MFI = MF->getFrameInfo();
622 
623   unsigned Opcode;
624   bool IsScalableVector = true;
625   if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
626     Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
627              RISCV::LW : RISCV::LD;
628     IsScalableVector = false;
629   } else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
630     Opcode = RISCV::PseudoRV32ZdinxLD;
631     IsScalableVector = false;
632   } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
633     Opcode = RISCV::FLH;
634     IsScalableVector = false;
635   } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
636     Opcode = RISCV::FLW;
637     IsScalableVector = false;
638   } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
639     Opcode = RISCV::FLD;
640     IsScalableVector = false;
641   } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
642     Opcode = RISCV::VL1RE8_V;
643   } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
644     Opcode = RISCV::VL2RE8_V;
645   } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
646     Opcode = RISCV::VL4RE8_V;
647   } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
648     Opcode = RISCV::VL8RE8_V;
649   } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
650     Opcode = RISCV::PseudoVRELOAD2_M1;
651   else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
652     Opcode = RISCV::PseudoVRELOAD2_M2;
653   else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
654     Opcode = RISCV::PseudoVRELOAD2_M4;
655   else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
656     Opcode = RISCV::PseudoVRELOAD3_M1;
657   else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
658     Opcode = RISCV::PseudoVRELOAD3_M2;
659   else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
660     Opcode = RISCV::PseudoVRELOAD4_M1;
661   else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
662     Opcode = RISCV::PseudoVRELOAD4_M2;
663   else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
664     Opcode = RISCV::PseudoVRELOAD5_M1;
665   else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
666     Opcode = RISCV::PseudoVRELOAD6_M1;
667   else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
668     Opcode = RISCV::PseudoVRELOAD7_M1;
669   else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
670     Opcode = RISCV::PseudoVRELOAD8_M1;
671   else
672     llvm_unreachable("Can't load this register from stack slot");
673 
674   if (IsScalableVector) {
675     MachineMemOperand *MMO = MF->getMachineMemOperand(
676         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
677         MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
678 
679     MFI.setStackID(FI, TargetStackID::ScalableVector);
680     BuildMI(MBB, I, DL, get(Opcode), DstReg)
681         .addFrameIndex(FI)
682         .addMemOperand(MMO);
683   } else {
684     MachineMemOperand *MMO = MF->getMachineMemOperand(
685         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
686         MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
687 
688     BuildMI(MBB, I, DL, get(Opcode), DstReg)
689         .addFrameIndex(FI)
690         .addImm(0)
691         .addMemOperand(MMO);
692   }
693 }
694 
695 MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
696     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
697     MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
698     VirtRegMap *VRM) const {
699   const MachineFrameInfo &MFI = MF.getFrameInfo();
700 
701   // The below optimizations narrow the load so they are only valid for little
702   // endian.
703   // TODO: Support big endian by adding an offset into the frame object?
704   if (MF.getDataLayout().isBigEndian())
705     return nullptr;
706 
707   // Fold load from stack followed by sext.w into lw.
708   // TODO: Fold with sext.b, sext.h, zext.b, zext.h, zext.w?
709   if (Ops.size() != 1 || Ops[0] != 1)
710    return nullptr;
711 
712   unsigned LoadOpc;
713   switch (MI.getOpcode()) {
714   default:
715     if (RISCV::isSEXT_W(MI)) {
716       LoadOpc = RISCV::LW;
717       break;
718     }
719     if (RISCV::isZEXT_W(MI)) {
720       LoadOpc = RISCV::LWU;
721       break;
722     }
723     if (RISCV::isZEXT_B(MI)) {
724       LoadOpc = RISCV::LBU;
725       break;
726     }
727     return nullptr;
728   case RISCV::SEXT_H:
729     LoadOpc = RISCV::LH;
730     break;
731   case RISCV::SEXT_B:
732     LoadOpc = RISCV::LB;
733     break;
734   case RISCV::ZEXT_H_RV32:
735   case RISCV::ZEXT_H_RV64:
736     LoadOpc = RISCV::LHU;
737     break;
738   }
739 
740   MachineMemOperand *MMO = MF.getMachineMemOperand(
741       MachinePointerInfo::getFixedStack(MF, FrameIndex),
742       MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex),
743       MFI.getObjectAlign(FrameIndex));
744 
745   Register DstReg = MI.getOperand(0).getReg();
746   return BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(LoadOpc),
747                  DstReg)
748       .addFrameIndex(FrameIndex)
749       .addImm(0)
750       .addMemOperand(MMO);
751 }
752 
753 void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
754                             MachineBasicBlock::iterator MBBI,
755                             const DebugLoc &DL, Register DstReg, uint64_t Val,
756                             MachineInstr::MIFlag Flag) const {
757   Register SrcReg = RISCV::X0;
758 
759   if (!STI.is64Bit() && !isInt<32>(Val))
760     report_fatal_error("Should only materialize 32-bit constants for RV32");
761 
762   RISCVMatInt::InstSeq Seq =
763       RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
764   assert(!Seq.empty());
765 
766   for (const RISCVMatInt::Inst &Inst : Seq) {
767     switch (Inst.getOpndKind()) {
768     case RISCVMatInt::Imm:
769       BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
770           .addImm(Inst.getImm())
771           .setMIFlag(Flag);
772       break;
773     case RISCVMatInt::RegX0:
774       BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
775           .addReg(SrcReg, RegState::Kill)
776           .addReg(RISCV::X0)
777           .setMIFlag(Flag);
778       break;
779     case RISCVMatInt::RegReg:
780       BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
781           .addReg(SrcReg, RegState::Kill)
782           .addReg(SrcReg, RegState::Kill)
783           .setMIFlag(Flag);
784       break;
785     case RISCVMatInt::RegImm:
786       BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
787           .addReg(SrcReg, RegState::Kill)
788           .addImm(Inst.getImm())
789           .setMIFlag(Flag);
790       break;
791     }
792 
793     // Only the first instruction has X0 as its source.
794     SrcReg = DstReg;
795   }
796 }
797 
798 static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc) {
799   switch (Opc) {
800   default:
801     return RISCVCC::COND_INVALID;
802   case RISCV::BEQ:
803     return RISCVCC::COND_EQ;
804   case RISCV::BNE:
805     return RISCVCC::COND_NE;
806   case RISCV::BLT:
807     return RISCVCC::COND_LT;
808   case RISCV::BGE:
809     return RISCVCC::COND_GE;
810   case RISCV::BLTU:
811     return RISCVCC::COND_LTU;
812   case RISCV::BGEU:
813     return RISCVCC::COND_GEU;
814   }
815 }
816 
817 // The contents of values added to Cond are not examined outside of
818 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
819 // push BranchOpcode, Reg1, Reg2.
820 static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
821                             SmallVectorImpl<MachineOperand> &Cond) {
822   // Block ends with fall-through condbranch.
823   assert(LastInst.getDesc().isConditionalBranch() &&
824          "Unknown conditional branch");
825   Target = LastInst.getOperand(2).getMBB();
826   unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
827   Cond.push_back(MachineOperand::CreateImm(CC));
828   Cond.push_back(LastInst.getOperand(0));
829   Cond.push_back(LastInst.getOperand(1));
830 }
831 
832 const MCInstrDesc &RISCVInstrInfo::getBrCond(RISCVCC::CondCode CC) const {
833   switch (CC) {
834   default:
835     llvm_unreachable("Unknown condition code!");
836   case RISCVCC::COND_EQ:
837     return get(RISCV::BEQ);
838   case RISCVCC::COND_NE:
839     return get(RISCV::BNE);
840   case RISCVCC::COND_LT:
841     return get(RISCV::BLT);
842   case RISCVCC::COND_GE:
843     return get(RISCV::BGE);
844   case RISCVCC::COND_LTU:
845     return get(RISCV::BLTU);
846   case RISCVCC::COND_GEU:
847     return get(RISCV::BGEU);
848   }
849 }
850 
851 RISCVCC::CondCode RISCVCC::getOppositeBranchCondition(RISCVCC::CondCode CC) {
852   switch (CC) {
853   default:
854     llvm_unreachable("Unrecognized conditional branch");
855   case RISCVCC::COND_EQ:
856     return RISCVCC::COND_NE;
857   case RISCVCC::COND_NE:
858     return RISCVCC::COND_EQ;
859   case RISCVCC::COND_LT:
860     return RISCVCC::COND_GE;
861   case RISCVCC::COND_GE:
862     return RISCVCC::COND_LT;
863   case RISCVCC::COND_LTU:
864     return RISCVCC::COND_GEU;
865   case RISCVCC::COND_GEU:
866     return RISCVCC::COND_LTU;
867   }
868 }
869 
870 bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
871                                    MachineBasicBlock *&TBB,
872                                    MachineBasicBlock *&FBB,
873                                    SmallVectorImpl<MachineOperand> &Cond,
874                                    bool AllowModify) const {
875   TBB = FBB = nullptr;
876   Cond.clear();
877 
878   // If the block has no terminators, it just falls into the block after it.
879   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
880   if (I == MBB.end() || !isUnpredicatedTerminator(*I))
881     return false;
882 
883   // Count the number of terminators and find the first unconditional or
884   // indirect branch.
885   MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
886   int NumTerminators = 0;
887   for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
888        J++) {
889     NumTerminators++;
890     if (J->getDesc().isUnconditionalBranch() ||
891         J->getDesc().isIndirectBranch()) {
892       FirstUncondOrIndirectBr = J.getReverse();
893     }
894   }
895 
896   // If AllowModify is true, we can erase any terminators after
897   // FirstUncondOrIndirectBR.
898   if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
899     while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
900       std::next(FirstUncondOrIndirectBr)->eraseFromParent();
901       NumTerminators--;
902     }
903     I = FirstUncondOrIndirectBr;
904   }
905 
906   // We can't handle blocks that end in an indirect branch.
907   if (I->getDesc().isIndirectBranch())
908     return true;
909 
910   // We can't handle blocks with more than 2 terminators.
911   if (NumTerminators > 2)
912     return true;
913 
914   // Handle a single unconditional branch.
915   if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
916     TBB = getBranchDestBlock(*I);
917     return false;
918   }
919 
920   // Handle a single conditional branch.
921   if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
922     parseCondBranch(*I, TBB, Cond);
923     return false;
924   }
925 
926   // Handle a conditional branch followed by an unconditional branch.
927   if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
928       I->getDesc().isUnconditionalBranch()) {
929     parseCondBranch(*std::prev(I), TBB, Cond);
930     FBB = getBranchDestBlock(*I);
931     return false;
932   }
933 
934   // Otherwise, we can't handle this.
935   return true;
936 }
937 
938 unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
939                                       int *BytesRemoved) const {
940   if (BytesRemoved)
941     *BytesRemoved = 0;
942   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
943   if (I == MBB.end())
944     return 0;
945 
946   if (!I->getDesc().isUnconditionalBranch() &&
947       !I->getDesc().isConditionalBranch())
948     return 0;
949 
950   // Remove the branch.
951   if (BytesRemoved)
952     *BytesRemoved += getInstSizeInBytes(*I);
953   I->eraseFromParent();
954 
955   I = MBB.end();
956 
957   if (I == MBB.begin())
958     return 1;
959   --I;
960   if (!I->getDesc().isConditionalBranch())
961     return 1;
962 
963   // Remove the branch.
964   if (BytesRemoved)
965     *BytesRemoved += getInstSizeInBytes(*I);
966   I->eraseFromParent();
967   return 2;
968 }
969 
970 // Inserts a branch into the end of the specific MachineBasicBlock, returning
971 // the number of instructions inserted.
972 unsigned RISCVInstrInfo::insertBranch(
973     MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
974     ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
975   if (BytesAdded)
976     *BytesAdded = 0;
977 
978   // Shouldn't be a fall through.
979   assert(TBB && "insertBranch must not be told to insert a fallthrough");
980   assert((Cond.size() == 3 || Cond.size() == 0) &&
981          "RISC-V branch conditions have two components!");
982 
983   // Unconditional branch.
984   if (Cond.empty()) {
985     MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
986     if (BytesAdded)
987       *BytesAdded += getInstSizeInBytes(MI);
988     return 1;
989   }
990 
991   // Either a one or two-way conditional branch.
992   auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
993   MachineInstr &CondMI =
994       *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
995   if (BytesAdded)
996     *BytesAdded += getInstSizeInBytes(CondMI);
997 
998   // One-way conditional branch.
999   if (!FBB)
1000     return 1;
1001 
1002   // Two-way conditional branch.
1003   MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
1004   if (BytesAdded)
1005     *BytesAdded += getInstSizeInBytes(MI);
1006   return 2;
1007 }
1008 
1009 void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
1010                                           MachineBasicBlock &DestBB,
1011                                           MachineBasicBlock &RestoreBB,
1012                                           const DebugLoc &DL, int64_t BrOffset,
1013                                           RegScavenger *RS) const {
1014   assert(RS && "RegScavenger required for long branching");
1015   assert(MBB.empty() &&
1016          "new block should be inserted for expanding unconditional branch");
1017   assert(MBB.pred_size() == 1);
1018   assert(RestoreBB.empty() &&
1019          "restore block should be inserted for restoring clobbered registers");
1020 
1021   MachineFunction *MF = MBB.getParent();
1022   MachineRegisterInfo &MRI = MF->getRegInfo();
1023   RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
1024   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1025 
1026   if (!isInt<32>(BrOffset))
1027     report_fatal_error(
1028         "Branch offsets outside of the signed 32-bit range not supported");
1029 
1030   // FIXME: A virtual register must be used initially, as the register
1031   // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
1032   // uses the same workaround).
1033   Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1034   auto II = MBB.end();
1035   // We may also update the jump target to RestoreBB later.
1036   MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
1037                           .addReg(ScratchReg, RegState::Define | RegState::Dead)
1038                           .addMBB(&DestBB, RISCVII::MO_CALL);
1039 
1040   RS->enterBasicBlockEnd(MBB);
1041   Register TmpGPR =
1042       RS->scavengeRegisterBackwards(RISCV::GPRRegClass, MI.getIterator(),
1043                                     /*RestoreAfter=*/false, /*SpAdj=*/0,
1044                                     /*AllowSpill=*/false);
1045   if (TmpGPR != RISCV::NoRegister)
1046     RS->setRegUsed(TmpGPR);
1047   else {
1048     // The case when there is no scavenged register needs special handling.
1049 
1050     // Pick s11 because it doesn't make a difference.
1051     TmpGPR = RISCV::X27;
1052 
1053     int FrameIndex = RVFI->getBranchRelaxationScratchFrameIndex();
1054     if (FrameIndex == -1)
1055       report_fatal_error("underestimated function size");
1056 
1057     storeRegToStackSlot(MBB, MI, TmpGPR, /*IsKill=*/true, FrameIndex,
1058                         &RISCV::GPRRegClass, TRI, Register());
1059     TRI->eliminateFrameIndex(std::prev(MI.getIterator()),
1060                              /*SpAdj=*/0, /*FIOperandNum=*/1);
1061 
1062     MI.getOperand(1).setMBB(&RestoreBB);
1063 
1064     loadRegFromStackSlot(RestoreBB, RestoreBB.end(), TmpGPR, FrameIndex,
1065                          &RISCV::GPRRegClass, TRI, Register());
1066     TRI->eliminateFrameIndex(RestoreBB.back(),
1067                              /*SpAdj=*/0, /*FIOperandNum=*/1);
1068   }
1069 
1070   MRI.replaceRegWith(ScratchReg, TmpGPR);
1071   MRI.clearVirtRegs();
1072 }
1073 
1074 bool RISCVInstrInfo::reverseBranchCondition(
1075     SmallVectorImpl<MachineOperand> &Cond) const {
1076   assert((Cond.size() == 3) && "Invalid branch condition!");
1077   auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
1078   Cond[0].setImm(getOppositeBranchCondition(CC));
1079   return false;
1080 }
1081 
1082 MachineBasicBlock *
1083 RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
1084   assert(MI.getDesc().isBranch() && "Unexpected opcode!");
1085   // The branch target is always the last operand.
1086   int NumOp = MI.getNumExplicitOperands();
1087   return MI.getOperand(NumOp - 1).getMBB();
1088 }
1089 
1090 bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
1091                                            int64_t BrOffset) const {
1092   unsigned XLen = STI.getXLen();
1093   // Ideally we could determine the supported branch offset from the
1094   // RISCVII::FormMask, but this can't be used for Pseudo instructions like
1095   // PseudoBR.
1096   switch (BranchOp) {
1097   default:
1098     llvm_unreachable("Unexpected opcode!");
1099   case RISCV::BEQ:
1100   case RISCV::BNE:
1101   case RISCV::BLT:
1102   case RISCV::BGE:
1103   case RISCV::BLTU:
1104   case RISCV::BGEU:
1105     return isIntN(13, BrOffset);
1106   case RISCV::JAL:
1107   case RISCV::PseudoBR:
1108     return isIntN(21, BrOffset);
1109   case RISCV::PseudoJump:
1110     return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
1111   }
1112 }
1113 
1114 // If the operation has a predicated pseudo instruction, return the pseudo
1115 // instruction opcode. Otherwise, return RISCV::INSTRUCTION_LIST_END.
1116 // TODO: Support more operations.
1117 unsigned getPredicatedOpcode(unsigned Opcode) {
1118   switch (Opcode) {
1119   case RISCV::ADD:   return RISCV::PseudoCCADD;   break;
1120   case RISCV::SUB:   return RISCV::PseudoCCSUB;   break;
1121   case RISCV::AND:   return RISCV::PseudoCCAND;   break;
1122   case RISCV::OR:    return RISCV::PseudoCCOR;    break;
1123   case RISCV::XOR:   return RISCV::PseudoCCXOR;   break;
1124 
1125   case RISCV::ADDW:  return RISCV::PseudoCCADDW;  break;
1126   case RISCV::SUBW:  return RISCV::PseudoCCSUBW;  break;
1127   }
1128 
1129   return RISCV::INSTRUCTION_LIST_END;
1130 }
1131 
1132 /// Identify instructions that can be folded into a CCMOV instruction, and
1133 /// return the defining instruction.
1134 static MachineInstr *canFoldAsPredicatedOp(Register Reg,
1135                                            const MachineRegisterInfo &MRI,
1136                                            const TargetInstrInfo *TII) {
1137   if (!Reg.isVirtual())
1138     return nullptr;
1139   if (!MRI.hasOneNonDBGUse(Reg))
1140     return nullptr;
1141   MachineInstr *MI = MRI.getVRegDef(Reg);
1142   if (!MI)
1143     return nullptr;
1144   // Check if MI can be predicated and folded into the CCMOV.
1145   if (getPredicatedOpcode(MI->getOpcode()) == RISCV::INSTRUCTION_LIST_END)
1146     return nullptr;
1147   // Check if MI has any other defs or physreg uses.
1148   for (const MachineOperand &MO : llvm::drop_begin(MI->operands())) {
1149     // Reject frame index operands, PEI can't handle the predicated pseudos.
1150     if (MO.isFI() || MO.isCPI() || MO.isJTI())
1151       return nullptr;
1152     if (!MO.isReg())
1153       continue;
1154     // MI can't have any tied operands, that would conflict with predication.
1155     if (MO.isTied())
1156       return nullptr;
1157     if (MO.isDef())
1158       return nullptr;
1159     // Allow constant physregs.
1160     if (MO.getReg().isPhysical() && !MRI.isConstantPhysReg(MO.getReg()))
1161       return nullptr;
1162   }
1163   bool DontMoveAcrossStores = true;
1164   if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores))
1165     return nullptr;
1166   return MI;
1167 }
1168 
1169 bool RISCVInstrInfo::analyzeSelect(const MachineInstr &MI,
1170                                    SmallVectorImpl<MachineOperand> &Cond,
1171                                    unsigned &TrueOp, unsigned &FalseOp,
1172                                    bool &Optimizable) const {
1173   assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1174          "Unknown select instruction");
1175   // CCMOV operands:
1176   // 0: Def.
1177   // 1: LHS of compare.
1178   // 2: RHS of compare.
1179   // 3: Condition code.
1180   // 4: False use.
1181   // 5: True use.
1182   TrueOp = 5;
1183   FalseOp = 4;
1184   Cond.push_back(MI.getOperand(1));
1185   Cond.push_back(MI.getOperand(2));
1186   Cond.push_back(MI.getOperand(3));
1187   // We can only fold when we support short forward branch opt.
1188   Optimizable = STI.hasShortForwardBranchOpt();
1189   return false;
1190 }
1191 
1192 MachineInstr *
1193 RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
1194                                SmallPtrSetImpl<MachineInstr *> &SeenMIs,
1195                                bool PreferFalse) const {
1196   assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1197          "Unknown select instruction");
1198   if (!STI.hasShortForwardBranchOpt())
1199     return nullptr;
1200 
1201   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1202   MachineInstr *DefMI =
1203       canFoldAsPredicatedOp(MI.getOperand(5).getReg(), MRI, this);
1204   bool Invert = !DefMI;
1205   if (!DefMI)
1206     DefMI = canFoldAsPredicatedOp(MI.getOperand(4).getReg(), MRI, this);
1207   if (!DefMI)
1208     return nullptr;
1209 
1210   // Find new register class to use.
1211   MachineOperand FalseReg = MI.getOperand(Invert ? 5 : 4);
1212   Register DestReg = MI.getOperand(0).getReg();
1213   const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
1214   if (!MRI.constrainRegClass(DestReg, PreviousClass))
1215     return nullptr;
1216 
1217   unsigned PredOpc = getPredicatedOpcode(DefMI->getOpcode());
1218   assert(PredOpc != RISCV::INSTRUCTION_LIST_END && "Unexpected opcode!");
1219 
1220   // Create a new predicated version of DefMI.
1221   MachineInstrBuilder NewMI =
1222       BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(PredOpc), DestReg);
1223 
1224   // Copy the condition portion.
1225   NewMI.add(MI.getOperand(1));
1226   NewMI.add(MI.getOperand(2));
1227 
1228   // Add condition code, inverting if necessary.
1229   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
1230   if (Invert)
1231     CC = RISCVCC::getOppositeBranchCondition(CC);
1232   NewMI.addImm(CC);
1233 
1234   // Copy the false register.
1235   NewMI.add(FalseReg);
1236 
1237   // Copy all the DefMI operands.
1238   const MCInstrDesc &DefDesc = DefMI->getDesc();
1239   for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e; ++i)
1240     NewMI.add(DefMI->getOperand(i));
1241 
1242   // Update SeenMIs set: register newly created MI and erase removed DefMI.
1243   SeenMIs.insert(NewMI);
1244   SeenMIs.erase(DefMI);
1245 
1246   // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
1247   // DefMI would be invalid when tranferred inside the loop.  Checking for a
1248   // loop is expensive, but at least remove kill flags if they are in different
1249   // BBs.
1250   if (DefMI->getParent() != MI.getParent())
1251     NewMI->clearKillInfo();
1252 
1253   // The caller will erase MI, but not DefMI.
1254   DefMI->eraseFromParent();
1255   return NewMI;
1256 }
1257 
1258 unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
1259   if (MI.isMetaInstruction())
1260     return 0;
1261 
1262   unsigned Opcode = MI.getOpcode();
1263 
1264   if (Opcode == TargetOpcode::INLINEASM ||
1265       Opcode == TargetOpcode::INLINEASM_BR) {
1266     const MachineFunction &MF = *MI.getParent()->getParent();
1267     const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
1268     return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
1269                               *TM.getMCAsmInfo());
1270   }
1271 
1272   if (!MI.memoperands_empty()) {
1273     MachineMemOperand *MMO = *(MI.memoperands_begin());
1274     const MachineFunction &MF = *MI.getParent()->getParent();
1275     const auto &ST = MF.getSubtarget<RISCVSubtarget>();
1276     if (ST.hasStdExtZihintntl() && MMO->isNonTemporal()) {
1277       if (ST.hasStdExtCOrZca() && ST.enableRVCHintInstrs()) {
1278         if (isCompressibleInst(MI, STI))
1279           return 4; // c.ntl.all + c.load/c.store
1280         return 6;   // c.ntl.all + load/store
1281       }
1282       return 8; // ntl.all + load/store
1283     }
1284   }
1285 
1286   if (Opcode == TargetOpcode::BUNDLE)
1287     return getInstBundleLength(MI);
1288 
1289   if (MI.getParent() && MI.getParent()->getParent()) {
1290     if (isCompressibleInst(MI, STI))
1291       return 2;
1292   }
1293   return get(Opcode).getSize();
1294 }
1295 
1296 unsigned RISCVInstrInfo::getInstBundleLength(const MachineInstr &MI) const {
1297   unsigned Size = 0;
1298   MachineBasicBlock::const_instr_iterator I = MI.getIterator();
1299   MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
1300   while (++I != E && I->isInsideBundle()) {
1301     assert(!I->isBundle() && "No nested bundle!");
1302     Size += getInstSizeInBytes(*I);
1303   }
1304   return Size;
1305 }
1306 
1307 bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
1308   const unsigned Opcode = MI.getOpcode();
1309   switch (Opcode) {
1310   default:
1311     break;
1312   case RISCV::FSGNJ_D:
1313   case RISCV::FSGNJ_S:
1314   case RISCV::FSGNJ_H:
1315   case RISCV::FSGNJ_D_INX:
1316   case RISCV::FSGNJ_D_IN32X:
1317   case RISCV::FSGNJ_S_INX:
1318   case RISCV::FSGNJ_H_INX:
1319     // The canonical floating-point move is fsgnj rd, rs, rs.
1320     return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1321            MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
1322   case RISCV::ADDI:
1323   case RISCV::ORI:
1324   case RISCV::XORI:
1325     return (MI.getOperand(1).isReg() &&
1326             MI.getOperand(1).getReg() == RISCV::X0) ||
1327            (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
1328   }
1329   return MI.isAsCheapAsAMove();
1330 }
1331 
1332 std::optional<DestSourcePair>
1333 RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
1334   if (MI.isMoveReg())
1335     return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1336   switch (MI.getOpcode()) {
1337   default:
1338     break;
1339   case RISCV::ADDI:
1340     // Operand 1 can be a frameindex but callers expect registers
1341     if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
1342         MI.getOperand(2).getImm() == 0)
1343       return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1344     break;
1345   case RISCV::FSGNJ_D:
1346   case RISCV::FSGNJ_S:
1347   case RISCV::FSGNJ_H:
1348   case RISCV::FSGNJ_D_INX:
1349   case RISCV::FSGNJ_D_IN32X:
1350   case RISCV::FSGNJ_S_INX:
1351   case RISCV::FSGNJ_H_INX:
1352     // The canonical floating-point move is fsgnj rd, rs, rs.
1353     if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1354         MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
1355       return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1356     break;
1357   }
1358   return std::nullopt;
1359 }
1360 
1361 MachineTraceStrategy RISCVInstrInfo::getMachineCombinerTraceStrategy() const {
1362   if (ForceMachineCombinerStrategy.getNumOccurrences() == 0) {
1363     // The option is unused. Choose Local strategy only for in-order cores. When
1364     // scheduling model is unspecified, use MinInstrCount strategy as more
1365     // generic one.
1366     const auto &SchedModel = STI.getSchedModel();
1367     return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1368                ? MachineTraceStrategy::TS_MinInstrCount
1369                : MachineTraceStrategy::TS_Local;
1370   }
1371   // The strategy was forced by the option.
1372   return ForceMachineCombinerStrategy;
1373 }
1374 
1375 void RISCVInstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
1376                                            MachineInstr &OldMI2,
1377                                            MachineInstr &NewMI1,
1378                                            MachineInstr &NewMI2) const {
1379   uint32_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags();
1380   NewMI1.setFlags(IntersectedFlags);
1381   NewMI2.setFlags(IntersectedFlags);
1382 }
1383 
1384 void RISCVInstrInfo::finalizeInsInstrs(
1385     MachineInstr &Root, MachineCombinerPattern &P,
1386     SmallVectorImpl<MachineInstr *> &InsInstrs) const {
1387   int16_t FrmOpIdx =
1388       RISCV::getNamedOperandIdx(Root.getOpcode(), RISCV::OpName::frm);
1389   if (FrmOpIdx < 0) {
1390     assert(all_of(InsInstrs,
1391                   [](MachineInstr *MI) {
1392                     return RISCV::getNamedOperandIdx(MI->getOpcode(),
1393                                                      RISCV::OpName::frm) < 0;
1394                   }) &&
1395            "New instructions require FRM whereas the old one does not have it");
1396     return;
1397   }
1398 
1399   const MachineOperand &FRM = Root.getOperand(FrmOpIdx);
1400   MachineFunction &MF = *Root.getMF();
1401 
1402   for (auto *NewMI : InsInstrs) {
1403     assert(static_cast<unsigned>(RISCV::getNamedOperandIdx(
1404                NewMI->getOpcode(), RISCV::OpName::frm)) ==
1405                NewMI->getNumOperands() &&
1406            "Instruction has unexpected number of operands");
1407     MachineInstrBuilder MIB(MF, NewMI);
1408     MIB.add(FRM);
1409     if (FRM.getImm() == RISCVFPRndMode::DYN)
1410       MIB.addUse(RISCV::FRM, RegState::Implicit);
1411   }
1412 }
1413 
1414 static bool isFADD(unsigned Opc) {
1415   switch (Opc) {
1416   default:
1417     return false;
1418   case RISCV::FADD_H:
1419   case RISCV::FADD_S:
1420   case RISCV::FADD_D:
1421     return true;
1422   }
1423 }
1424 
1425 static bool isFSUB(unsigned Opc) {
1426   switch (Opc) {
1427   default:
1428     return false;
1429   case RISCV::FSUB_H:
1430   case RISCV::FSUB_S:
1431   case RISCV::FSUB_D:
1432     return true;
1433   }
1434 }
1435 
1436 static bool isFMUL(unsigned Opc) {
1437   switch (Opc) {
1438   default:
1439     return false;
1440   case RISCV::FMUL_H:
1441   case RISCV::FMUL_S:
1442   case RISCV::FMUL_D:
1443     return true;
1444   }
1445 }
1446 
1447 bool RISCVInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
1448                                             bool &Commuted) const {
1449   if (!TargetInstrInfo::hasReassociableSibling(Inst, Commuted))
1450     return false;
1451 
1452   const MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo();
1453   unsigned OperandIdx = Commuted ? 2 : 1;
1454   const MachineInstr &Sibling =
1455       *MRI.getVRegDef(Inst.getOperand(OperandIdx).getReg());
1456 
1457   int16_t InstFrmOpIdx =
1458       RISCV::getNamedOperandIdx(Inst.getOpcode(), RISCV::OpName::frm);
1459   int16_t SiblingFrmOpIdx =
1460       RISCV::getNamedOperandIdx(Sibling.getOpcode(), RISCV::OpName::frm);
1461 
1462   return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1463          RISCV::hasEqualFRM(Inst, Sibling);
1464 }
1465 
1466 bool RISCVInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
1467                                                  bool Invert) const {
1468   unsigned Opc = Inst.getOpcode();
1469   if (Invert) {
1470     auto InverseOpcode = getInverseOpcode(Opc);
1471     if (!InverseOpcode)
1472       return false;
1473     Opc = *InverseOpcode;
1474   }
1475 
1476   if (isFADD(Opc) || isFMUL(Opc))
1477     return Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
1478            Inst.getFlag(MachineInstr::MIFlag::FmNsz);
1479 
1480   switch (Opc) {
1481   default:
1482     return false;
1483   case RISCV::ADD:
1484   case RISCV::ADDW:
1485   case RISCV::AND:
1486   case RISCV::OR:
1487   case RISCV::XOR:
1488   // From RISC-V ISA spec, if both the high and low bits of the same product
1489   // are required, then the recommended code sequence is:
1490   //
1491   // MULH[[S]U] rdh, rs1, rs2
1492   // MUL        rdl, rs1, rs2
1493   // (source register specifiers must be in same order and rdh cannot be the
1494   //  same as rs1 or rs2)
1495   //
1496   // Microarchitectures can then fuse these into a single multiply operation
1497   // instead of performing two separate multiplies.
1498   // MachineCombiner may reassociate MUL operands and lose the fusion
1499   // opportunity.
1500   case RISCV::MUL:
1501   case RISCV::MULW:
1502   case RISCV::MIN:
1503   case RISCV::MINU:
1504   case RISCV::MAX:
1505   case RISCV::MAXU:
1506   case RISCV::FMIN_H:
1507   case RISCV::FMIN_S:
1508   case RISCV::FMIN_D:
1509   case RISCV::FMAX_H:
1510   case RISCV::FMAX_S:
1511   case RISCV::FMAX_D:
1512     return true;
1513   }
1514 
1515   return false;
1516 }
1517 
1518 std::optional<unsigned>
1519 RISCVInstrInfo::getInverseOpcode(unsigned Opcode) const {
1520   switch (Opcode) {
1521   default:
1522     return std::nullopt;
1523   case RISCV::FADD_H:
1524     return RISCV::FSUB_H;
1525   case RISCV::FADD_S:
1526     return RISCV::FSUB_S;
1527   case RISCV::FADD_D:
1528     return RISCV::FSUB_D;
1529   case RISCV::FSUB_H:
1530     return RISCV::FADD_H;
1531   case RISCV::FSUB_S:
1532     return RISCV::FADD_S;
1533   case RISCV::FSUB_D:
1534     return RISCV::FADD_D;
1535   case RISCV::ADD:
1536     return RISCV::SUB;
1537   case RISCV::SUB:
1538     return RISCV::ADD;
1539   case RISCV::ADDW:
1540     return RISCV::SUBW;
1541   case RISCV::SUBW:
1542     return RISCV::ADDW;
1543   }
1544 }
1545 
1546 static bool canCombineFPFusedMultiply(const MachineInstr &Root,
1547                                       const MachineOperand &MO,
1548                                       bool DoRegPressureReduce) {
1549   if (!MO.isReg() || !MO.getReg().isVirtual())
1550     return false;
1551   const MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1552   MachineInstr *MI = MRI.getVRegDef(MO.getReg());
1553   if (!MI || !isFMUL(MI->getOpcode()))
1554     return false;
1555 
1556   if (!Root.getFlag(MachineInstr::MIFlag::FmContract) ||
1557       !MI->getFlag(MachineInstr::MIFlag::FmContract))
1558     return false;
1559 
1560   // Try combining even if fmul has more than one use as it eliminates
1561   // dependency between fadd(fsub) and fmul. However, it can extend liveranges
1562   // for fmul operands, so reject the transformation in register pressure
1563   // reduction mode.
1564   if (DoRegPressureReduce && !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1565     return false;
1566 
1567   // Do not combine instructions from different basic blocks.
1568   if (Root.getParent() != MI->getParent())
1569     return false;
1570   return RISCV::hasEqualFRM(Root, *MI);
1571 }
1572 
1573 static bool
1574 getFPFusedMultiplyPatterns(MachineInstr &Root,
1575                            SmallVectorImpl<MachineCombinerPattern> &Patterns,
1576                            bool DoRegPressureReduce) {
1577   unsigned Opc = Root.getOpcode();
1578   bool IsFAdd = isFADD(Opc);
1579   if (!IsFAdd && !isFSUB(Opc))
1580     return false;
1581   bool Added = false;
1582   if (canCombineFPFusedMultiply(Root, Root.getOperand(1),
1583                                 DoRegPressureReduce)) {
1584     Patterns.push_back(IsFAdd ? MachineCombinerPattern::FMADD_AX
1585                               : MachineCombinerPattern::FMSUB);
1586     Added = true;
1587   }
1588   if (canCombineFPFusedMultiply(Root, Root.getOperand(2),
1589                                 DoRegPressureReduce)) {
1590     Patterns.push_back(IsFAdd ? MachineCombinerPattern::FMADD_XA
1591                               : MachineCombinerPattern::FNMSUB);
1592     Added = true;
1593   }
1594   return Added;
1595 }
1596 
1597 static bool getFPPatterns(MachineInstr &Root,
1598                           SmallVectorImpl<MachineCombinerPattern> &Patterns,
1599                           bool DoRegPressureReduce) {
1600   return getFPFusedMultiplyPatterns(Root, Patterns, DoRegPressureReduce);
1601 }
1602 
1603 bool RISCVInstrInfo::getMachineCombinerPatterns(
1604     MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
1605     bool DoRegPressureReduce) const {
1606 
1607   if (getFPPatterns(Root, Patterns, DoRegPressureReduce))
1608     return true;
1609 
1610   return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns,
1611                                                      DoRegPressureReduce);
1612 }
1613 
1614 static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc,
1615                                          MachineCombinerPattern Pattern) {
1616   switch (RootOpc) {
1617   default:
1618     llvm_unreachable("Unexpected opcode");
1619   case RISCV::FADD_H:
1620     return RISCV::FMADD_H;
1621   case RISCV::FADD_S:
1622     return RISCV::FMADD_S;
1623   case RISCV::FADD_D:
1624     return RISCV::FMADD_D;
1625   case RISCV::FSUB_H:
1626     return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_H
1627                                                     : RISCV::FNMSUB_H;
1628   case RISCV::FSUB_S:
1629     return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_S
1630                                                     : RISCV::FNMSUB_S;
1631   case RISCV::FSUB_D:
1632     return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_D
1633                                                     : RISCV::FNMSUB_D;
1634   }
1635 }
1636 
1637 static unsigned getAddendOperandIdx(MachineCombinerPattern Pattern) {
1638   switch (Pattern) {
1639   default:
1640     llvm_unreachable("Unexpected pattern");
1641   case MachineCombinerPattern::FMADD_AX:
1642   case MachineCombinerPattern::FMSUB:
1643     return 2;
1644   case MachineCombinerPattern::FMADD_XA:
1645   case MachineCombinerPattern::FNMSUB:
1646     return 1;
1647   }
1648 }
1649 
1650 static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev,
1651                                    MachineCombinerPattern Pattern,
1652                                    SmallVectorImpl<MachineInstr *> &InsInstrs,
1653                                    SmallVectorImpl<MachineInstr *> &DelInstrs) {
1654   MachineFunction *MF = Root.getMF();
1655   MachineRegisterInfo &MRI = MF->getRegInfo();
1656   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1657 
1658   MachineOperand &Mul1 = Prev.getOperand(1);
1659   MachineOperand &Mul2 = Prev.getOperand(2);
1660   MachineOperand &Dst = Root.getOperand(0);
1661   MachineOperand &Addend = Root.getOperand(getAddendOperandIdx(Pattern));
1662 
1663   Register DstReg = Dst.getReg();
1664   unsigned FusedOpc = getFPFusedMultiplyOpcode(Root.getOpcode(), Pattern);
1665   uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1666   DebugLoc MergedLoc =
1667       DILocation::getMergedLocation(Root.getDebugLoc(), Prev.getDebugLoc());
1668 
1669   bool Mul1IsKill = Mul1.isKill();
1670   bool Mul2IsKill = Mul2.isKill();
1671   bool AddendIsKill = Addend.isKill();
1672 
1673   // We need to clear kill flags since we may be extending the live range past
1674   // a kill. If the mul had kill flags, we can preserve those since we know
1675   // where the previous range stopped.
1676   MRI.clearKillFlags(Mul1.getReg());
1677   MRI.clearKillFlags(Mul2.getReg());
1678 
1679   MachineInstrBuilder MIB =
1680       BuildMI(*MF, MergedLoc, TII->get(FusedOpc), DstReg)
1681           .addReg(Mul1.getReg(), getKillRegState(Mul1IsKill))
1682           .addReg(Mul2.getReg(), getKillRegState(Mul2IsKill))
1683           .addReg(Addend.getReg(), getKillRegState(AddendIsKill))
1684           .setMIFlags(IntersectedFlags);
1685 
1686   InsInstrs.push_back(MIB);
1687   if (MRI.hasOneNonDBGUse(Prev.getOperand(0).getReg()))
1688     DelInstrs.push_back(&Prev);
1689   DelInstrs.push_back(&Root);
1690 }
1691 
1692 void RISCVInstrInfo::genAlternativeCodeSequence(
1693     MachineInstr &Root, MachineCombinerPattern Pattern,
1694     SmallVectorImpl<MachineInstr *> &InsInstrs,
1695     SmallVectorImpl<MachineInstr *> &DelInstrs,
1696     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
1697   MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1698   switch (Pattern) {
1699   default:
1700     TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
1701                                                 DelInstrs, InstrIdxForVirtReg);
1702     return;
1703   case MachineCombinerPattern::FMADD_AX:
1704   case MachineCombinerPattern::FMSUB: {
1705     MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(1).getReg());
1706     combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
1707     return;
1708   }
1709   case MachineCombinerPattern::FMADD_XA:
1710   case MachineCombinerPattern::FNMSUB: {
1711     MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(2).getReg());
1712     combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
1713     return;
1714   }
1715   }
1716 }
1717 
1718 bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
1719                                        StringRef &ErrInfo) const {
1720   MCInstrDesc const &Desc = MI.getDesc();
1721 
1722   for (const auto &[Index, Operand] : enumerate(Desc.operands())) {
1723     unsigned OpType = Operand.OperandType;
1724     if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
1725         OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
1726       const MachineOperand &MO = MI.getOperand(Index);
1727       if (MO.isImm()) {
1728         int64_t Imm = MO.getImm();
1729         bool Ok;
1730         switch (OpType) {
1731         default:
1732           llvm_unreachable("Unexpected operand type");
1733 
1734           // clang-format off
1735 #define CASE_OPERAND_UIMM(NUM)                                                 \
1736   case RISCVOp::OPERAND_UIMM##NUM:                                             \
1737     Ok = isUInt<NUM>(Imm);                                                     \
1738     break;
1739         CASE_OPERAND_UIMM(1)
1740         CASE_OPERAND_UIMM(2)
1741         CASE_OPERAND_UIMM(3)
1742         CASE_OPERAND_UIMM(4)
1743         CASE_OPERAND_UIMM(5)
1744         CASE_OPERAND_UIMM(6)
1745         CASE_OPERAND_UIMM(7)
1746         CASE_OPERAND_UIMM(8)
1747         CASE_OPERAND_UIMM(12)
1748         CASE_OPERAND_UIMM(20)
1749           // clang-format on
1750         case RISCVOp::OPERAND_UIMM2_LSB0:
1751           Ok = isShiftedUInt<1, 1>(Imm);
1752           break;
1753         case RISCVOp::OPERAND_UIMM7_LSB00:
1754           Ok = isShiftedUInt<5, 2>(Imm);
1755           break;
1756         case RISCVOp::OPERAND_UIMM8_LSB00:
1757           Ok = isShiftedUInt<6, 2>(Imm);
1758           break;
1759         case RISCVOp::OPERAND_UIMM8_LSB000:
1760           Ok = isShiftedUInt<5, 3>(Imm);
1761           break;
1762         case RISCVOp::OPERAND_UIMM8_GE32:
1763           Ok = isUInt<8>(Imm) && Imm >= 32;
1764           break;
1765         case RISCVOp::OPERAND_UIMM9_LSB000:
1766           Ok = isShiftedUInt<6, 3>(Imm);
1767           break;
1768         case RISCVOp::OPERAND_SIMM10_LSB0000_NONZERO:
1769           Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
1770           break;
1771         case RISCVOp::OPERAND_UIMM10_LSB00_NONZERO:
1772           Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
1773           break;
1774         case RISCVOp::OPERAND_ZERO:
1775           Ok = Imm == 0;
1776           break;
1777         case RISCVOp::OPERAND_SIMM5:
1778           Ok = isInt<5>(Imm);
1779           break;
1780         case RISCVOp::OPERAND_SIMM5_PLUS1:
1781           Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
1782           break;
1783         case RISCVOp::OPERAND_SIMM6:
1784           Ok = isInt<6>(Imm);
1785           break;
1786         case RISCVOp::OPERAND_SIMM6_NONZERO:
1787           Ok = Imm != 0 && isInt<6>(Imm);
1788           break;
1789         case RISCVOp::OPERAND_VTYPEI10:
1790           Ok = isUInt<10>(Imm);
1791           break;
1792         case RISCVOp::OPERAND_VTYPEI11:
1793           Ok = isUInt<11>(Imm);
1794           break;
1795         case RISCVOp::OPERAND_SIMM12:
1796           Ok = isInt<12>(Imm);
1797           break;
1798         case RISCVOp::OPERAND_SIMM12_LSB00000:
1799           Ok = isShiftedInt<7, 5>(Imm);
1800           break;
1801         case RISCVOp::OPERAND_UIMMLOG2XLEN:
1802           Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1803           break;
1804         case RISCVOp::OPERAND_UIMMLOG2XLEN_NONZERO:
1805           Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1806           Ok = Ok && Imm != 0;
1807           break;
1808         case RISCVOp::OPERAND_CLUI_IMM:
1809           Ok = (isUInt<5>(Imm) && Imm != 0) ||
1810                (Imm >= 0xfffe0 && Imm <= 0xfffff);
1811           break;
1812         case RISCVOp::OPERAND_RVKRNUM:
1813           Ok = Imm >= 0 && Imm <= 10;
1814           break;
1815         case RISCVOp::OPERAND_RVKRNUM_0_7:
1816           Ok = Imm >= 0 && Imm <= 7;
1817           break;
1818         case RISCVOp::OPERAND_RVKRNUM_1_10:
1819           Ok = Imm >= 1 && Imm <= 10;
1820           break;
1821         case RISCVOp::OPERAND_RVKRNUM_2_14:
1822           Ok = Imm >= 2 && Imm <= 14;
1823           break;
1824         }
1825         if (!Ok) {
1826           ErrInfo = "Invalid immediate";
1827           return false;
1828         }
1829       }
1830     }
1831   }
1832 
1833   const uint64_t TSFlags = Desc.TSFlags;
1834   if (RISCVII::hasVLOp(TSFlags)) {
1835     const MachineOperand &Op = MI.getOperand(RISCVII::getVLOpNum(Desc));
1836     if (!Op.isImm() && !Op.isReg())  {
1837       ErrInfo = "Invalid operand type for VL operand";
1838       return false;
1839     }
1840     if (Op.isReg() && Op.getReg() != RISCV::NoRegister) {
1841       const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1842       auto *RC = MRI.getRegClass(Op.getReg());
1843       if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
1844         ErrInfo = "Invalid register class for VL operand";
1845         return false;
1846       }
1847     }
1848     if (!RISCVII::hasSEWOp(TSFlags)) {
1849       ErrInfo = "VL operand w/o SEW operand?";
1850       return false;
1851     }
1852   }
1853   if (RISCVII::hasSEWOp(TSFlags)) {
1854     unsigned OpIdx = RISCVII::getSEWOpNum(Desc);
1855     if (!MI.getOperand(OpIdx).isImm()) {
1856       ErrInfo = "SEW value expected to be an immediate";
1857       return false;
1858     }
1859     uint64_t Log2SEW = MI.getOperand(OpIdx).getImm();
1860     if (Log2SEW > 31) {
1861       ErrInfo = "Unexpected SEW value";
1862       return false;
1863     }
1864     unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1865     if (!RISCVVType::isValidSEW(SEW)) {
1866       ErrInfo = "Unexpected SEW value";
1867       return false;
1868     }
1869   }
1870   if (RISCVII::hasVecPolicyOp(TSFlags)) {
1871     unsigned OpIdx = RISCVII::getVecPolicyOpNum(Desc);
1872     if (!MI.getOperand(OpIdx).isImm()) {
1873       ErrInfo = "Policy operand expected to be an immediate";
1874       return false;
1875     }
1876     uint64_t Policy = MI.getOperand(OpIdx).getImm();
1877     if (Policy > (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC)) {
1878       ErrInfo = "Invalid Policy Value";
1879       return false;
1880     }
1881     if (!RISCVII::hasVLOp(TSFlags)) {
1882       ErrInfo = "policy operand w/o VL operand?";
1883       return false;
1884     }
1885 
1886     // VecPolicy operands can only exist on instructions with passthru/merge
1887     // arguments. Note that not all arguments with passthru have vec policy
1888     // operands- some instructions have implicit policies.
1889     unsigned UseOpIdx;
1890     if (!MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
1891       ErrInfo = "policy operand w/o tied operand?";
1892       return false;
1893     }
1894   }
1895 
1896   return true;
1897 }
1898 
1899 // Return true if get the base operand, byte offset of an instruction and the
1900 // memory width. Width is the size of memory that is being loaded/stored.
1901 bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
1902     const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
1903     unsigned &Width, const TargetRegisterInfo *TRI) const {
1904   if (!LdSt.mayLoadOrStore())
1905     return false;
1906 
1907   // Here we assume the standard RISC-V ISA, which uses a base+offset
1908   // addressing mode. You'll need to relax these conditions to support custom
1909   // load/stores instructions.
1910   if (LdSt.getNumExplicitOperands() != 3)
1911     return false;
1912   if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
1913     return false;
1914 
1915   if (!LdSt.hasOneMemOperand())
1916     return false;
1917 
1918   Width = (*LdSt.memoperands_begin())->getSize();
1919   BaseReg = &LdSt.getOperand(1);
1920   Offset = LdSt.getOperand(2).getImm();
1921   return true;
1922 }
1923 
1924 bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
1925     const MachineInstr &MIa, const MachineInstr &MIb) const {
1926   assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
1927   assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
1928 
1929   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1930       MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1931     return false;
1932 
1933   // Retrieve the base register, offset from the base register and width. Width
1934   // is the size of memory that is being loaded/stored (e.g. 1, 2, 4).  If
1935   // base registers are identical, and the offset of a lower memory access +
1936   // the width doesn't overlap the offset of a higher memory access,
1937   // then the memory accesses are different.
1938   const TargetRegisterInfo *TRI = STI.getRegisterInfo();
1939   const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
1940   int64_t OffsetA = 0, OffsetB = 0;
1941   unsigned int WidthA = 0, WidthB = 0;
1942   if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
1943       getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
1944     if (BaseOpA->isIdenticalTo(*BaseOpB)) {
1945       int LowOffset = std::min(OffsetA, OffsetB);
1946       int HighOffset = std::max(OffsetA, OffsetB);
1947       int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1948       if (LowOffset + LowWidth <= HighOffset)
1949         return true;
1950     }
1951   }
1952   return false;
1953 }
1954 
1955 std::pair<unsigned, unsigned>
1956 RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
1957   const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
1958   return std::make_pair(TF & Mask, TF & ~Mask);
1959 }
1960 
1961 ArrayRef<std::pair<unsigned, const char *>>
1962 RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
1963   using namespace RISCVII;
1964   static const std::pair<unsigned, const char *> TargetFlags[] = {
1965       {MO_CALL, "riscv-call"},
1966       {MO_PLT, "riscv-plt"},
1967       {MO_LO, "riscv-lo"},
1968       {MO_HI, "riscv-hi"},
1969       {MO_PCREL_LO, "riscv-pcrel-lo"},
1970       {MO_PCREL_HI, "riscv-pcrel-hi"},
1971       {MO_GOT_HI, "riscv-got-hi"},
1972       {MO_TPREL_LO, "riscv-tprel-lo"},
1973       {MO_TPREL_HI, "riscv-tprel-hi"},
1974       {MO_TPREL_ADD, "riscv-tprel-add"},
1975       {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
1976       {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
1977   return ArrayRef(TargetFlags);
1978 }
1979 bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
1980     MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
1981   const Function &F = MF.getFunction();
1982 
1983   // Can F be deduplicated by the linker? If it can, don't outline from it.
1984   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
1985     return false;
1986 
1987   // Don't outline from functions with section markings; the program could
1988   // expect that all the code is in the named section.
1989   if (F.hasSection())
1990     return false;
1991 
1992   // It's safe to outline from MF.
1993   return true;
1994 }
1995 
1996 bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
1997                                             unsigned &Flags) const {
1998   // More accurate safety checking is done in getOutliningCandidateInfo.
1999   return TargetInstrInfo::isMBBSafeToOutlineFrom(MBB, Flags);
2000 }
2001 
2002 // Enum values indicating how an outlined call should be constructed.
2003 enum MachineOutlinerConstructionID {
2004   MachineOutlinerDefault
2005 };
2006 
2007 bool RISCVInstrInfo::shouldOutlineFromFunctionByDefault(
2008     MachineFunction &MF) const {
2009   return MF.getFunction().hasMinSize();
2010 }
2011 
2012 std::optional<outliner::OutlinedFunction>
2013 RISCVInstrInfo::getOutliningCandidateInfo(
2014     std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
2015 
2016   // First we need to filter out candidates where the X5 register (IE t0) can't
2017   // be used to setup the function call.
2018   auto CannotInsertCall = [](outliner::Candidate &C) {
2019     const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
2020     return !C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);
2021   };
2022 
2023   llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
2024 
2025   // If the sequence doesn't have enough candidates left, then we're done.
2026   if (RepeatedSequenceLocs.size() < 2)
2027     return std::nullopt;
2028 
2029   unsigned SequenceSize = 0;
2030 
2031   auto I = RepeatedSequenceLocs[0].front();
2032   auto E = std::next(RepeatedSequenceLocs[0].back());
2033   for (; I != E; ++I)
2034     SequenceSize += getInstSizeInBytes(*I);
2035 
2036   // call t0, function = 8 bytes.
2037   unsigned CallOverhead = 8;
2038   for (auto &C : RepeatedSequenceLocs)
2039     C.setCallInfo(MachineOutlinerDefault, CallOverhead);
2040 
2041   // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
2042   unsigned FrameOverhead = 4;
2043   if (RepeatedSequenceLocs[0]
2044           .getMF()
2045           ->getSubtarget<RISCVSubtarget>()
2046           .hasStdExtCOrZca())
2047     FrameOverhead = 2;
2048 
2049   return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
2050                                     FrameOverhead, MachineOutlinerDefault);
2051 }
2052 
2053 outliner::InstrType
2054 RISCVInstrInfo::getOutliningTypeImpl(MachineBasicBlock::iterator &MBBI,
2055                                  unsigned Flags) const {
2056   MachineInstr &MI = *MBBI;
2057   MachineBasicBlock *MBB = MI.getParent();
2058   const TargetRegisterInfo *TRI =
2059       MBB->getParent()->getSubtarget().getRegisterInfo();
2060   const auto &F = MI.getMF()->getFunction();
2061 
2062   // We can manually strip out CFI instructions later.
2063   if (MI.isCFIInstruction())
2064     // If current function has exception handling code, we can't outline &
2065     // strip these CFI instructions since it may break .eh_frame section
2066     // needed in unwinding.
2067     return F.needsUnwindTableEntry() ? outliner::InstrType::Illegal
2068                                      : outliner::InstrType::Invisible;
2069 
2070   // We need support for tail calls to outlined functions before return
2071   // statements can be allowed.
2072   if (MI.isReturn())
2073     return outliner::InstrType::Illegal;
2074 
2075   // Don't allow modifying the X5 register which we use for return addresses for
2076   // these outlined functions.
2077   if (MI.modifiesRegister(RISCV::X5, TRI) ||
2078       MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
2079     return outliner::InstrType::Illegal;
2080 
2081   // Make sure the operands don't reference something unsafe.
2082   for (const auto &MO : MI.operands()) {
2083 
2084     // pcrel-hi and pcrel-lo can't put in separate sections, filter that out
2085     // if any possible.
2086     if (MO.getTargetFlags() == RISCVII::MO_PCREL_LO &&
2087         (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() ||
2088          F.hasSection()))
2089       return outliner::InstrType::Illegal;
2090   }
2091 
2092   return outliner::InstrType::Legal;
2093 }
2094 
2095 void RISCVInstrInfo::buildOutlinedFrame(
2096     MachineBasicBlock &MBB, MachineFunction &MF,
2097     const outliner::OutlinedFunction &OF) const {
2098 
2099   // Strip out any CFI instructions
2100   bool Changed = true;
2101   while (Changed) {
2102     Changed = false;
2103     auto I = MBB.begin();
2104     auto E = MBB.end();
2105     for (; I != E; ++I) {
2106       if (I->isCFIInstruction()) {
2107         I->removeFromParent();
2108         Changed = true;
2109         break;
2110       }
2111     }
2112   }
2113 
2114   MBB.addLiveIn(RISCV::X5);
2115 
2116   // Add in a return instruction to the end of the outlined frame.
2117   MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
2118       .addReg(RISCV::X0, RegState::Define)
2119       .addReg(RISCV::X5)
2120       .addImm(0));
2121 }
2122 
2123 MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
2124     Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
2125     MachineFunction &MF, outliner::Candidate &C) const {
2126 
2127   // Add in a call instruction to the outlined function at the given location.
2128   It = MBB.insert(It,
2129                   BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
2130                       .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
2131                                         RISCVII::MO_CALL));
2132   return It;
2133 }
2134 
2135 // MIR printer helper function to annotate Operands with a comment.
2136 std::string RISCVInstrInfo::createMIROperandComment(
2137     const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2138     const TargetRegisterInfo *TRI) const {
2139   // Print a generic comment for this operand if there is one.
2140   std::string GenericComment =
2141       TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI);
2142   if (!GenericComment.empty())
2143     return GenericComment;
2144 
2145   // If not, we must have an immediate operand.
2146   if (!Op.isImm())
2147     return std::string();
2148 
2149   std::string Comment;
2150   raw_string_ostream OS(Comment);
2151 
2152   uint64_t TSFlags = MI.getDesc().TSFlags;
2153 
2154   // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
2155   // operand of vector codegen pseudos.
2156   if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
2157        MI.getOpcode() == RISCV::PseudoVSETVLI ||
2158        MI.getOpcode() == RISCV::PseudoVSETIVLI ||
2159        MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
2160       OpIdx == 2) {
2161     unsigned Imm = MI.getOperand(OpIdx).getImm();
2162     RISCVVType::printVType(Imm, OS);
2163   } else if (RISCVII::hasSEWOp(TSFlags) &&
2164              OpIdx == RISCVII::getSEWOpNum(MI.getDesc())) {
2165     unsigned Log2SEW = MI.getOperand(OpIdx).getImm();
2166     unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2167     assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
2168     OS << "e" << SEW;
2169   } else if (RISCVII::hasVecPolicyOp(TSFlags) &&
2170              OpIdx == RISCVII::getVecPolicyOpNum(MI.getDesc())) {
2171     unsigned Policy = MI.getOperand(OpIdx).getImm();
2172     assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
2173            "Invalid Policy Value");
2174     OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", "
2175        << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu");
2176   }
2177 
2178   OS.flush();
2179   return Comment;
2180 }
2181 
2182 // clang-format off
2183 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL)                                \
2184   RISCV::PseudoV##OP##_##TYPE##_##LMUL
2185 
2186 #define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)                                    \
2187   CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1):                                       \
2188   case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2):                                  \
2189   case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4):                                  \
2190   case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
2191 
2192 #define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)                                   \
2193   CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2):                                      \
2194   case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
2195 
2196 #define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)                                   \
2197   CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4):                                      \
2198   case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
2199 
2200 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE)                                       \
2201   CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8):                                      \
2202   case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
2203 
2204 #define CASE_VFMA_SPLATS(OP)                                                   \
2205   CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16):                                        \
2206   case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32):                                   \
2207   case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
2208 // clang-format on
2209 
2210 bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
2211                                            unsigned &SrcOpIdx1,
2212                                            unsigned &SrcOpIdx2) const {
2213   const MCInstrDesc &Desc = MI.getDesc();
2214   if (!Desc.isCommutable())
2215     return false;
2216 
2217   switch (MI.getOpcode()) {
2218   case RISCV::TH_MVEQZ:
2219   case RISCV::TH_MVNEZ:
2220     // We can't commute operands if operand 2 (i.e., rs1 in
2221     // mveqz/mvnez rd,rs1,rs2) is the zero-register (as it is
2222     // not valid as the in/out-operand 1).
2223     if (MI.getOperand(2).getReg() == RISCV::X0)
2224       return false;
2225     // Operands 1 and 2 are commutable, if we switch the opcode.
2226     return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
2227   case RISCV::TH_MULA:
2228   case RISCV::TH_MULAW:
2229   case RISCV::TH_MULAH:
2230   case RISCV::TH_MULS:
2231   case RISCV::TH_MULSW:
2232   case RISCV::TH_MULSH:
2233     // Operands 2 and 3 are commutable.
2234     return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
2235   case RISCV::PseudoCCMOVGPR:
2236     // Operands 4 and 5 are commutable.
2237     return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
2238   case CASE_VFMA_SPLATS(FMADD):
2239   case CASE_VFMA_SPLATS(FMSUB):
2240   case CASE_VFMA_SPLATS(FMACC):
2241   case CASE_VFMA_SPLATS(FMSAC):
2242   case CASE_VFMA_SPLATS(FNMADD):
2243   case CASE_VFMA_SPLATS(FNMSUB):
2244   case CASE_VFMA_SPLATS(FNMACC):
2245   case CASE_VFMA_SPLATS(FNMSAC):
2246   case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
2247   case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
2248   case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
2249   case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
2250   case CASE_VFMA_OPCODE_LMULS(MADD, VX):
2251   case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
2252   case CASE_VFMA_OPCODE_LMULS(MACC, VX):
2253   case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
2254   case CASE_VFMA_OPCODE_LMULS(MACC, VV):
2255   case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
2256     // If the tail policy is undisturbed we can't commute.
2257     assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
2258     if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2259       return false;
2260 
2261     // For these instructions we can only swap operand 1 and operand 3 by
2262     // changing the opcode.
2263     unsigned CommutableOpIdx1 = 1;
2264     unsigned CommutableOpIdx2 = 3;
2265     if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2266                               CommutableOpIdx2))
2267       return false;
2268     return true;
2269   }
2270   case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
2271   case CASE_VFMA_OPCODE_LMULS_MF4(FMSUB, VV):
2272   case CASE_VFMA_OPCODE_LMULS_MF4(FNMADD, VV):
2273   case CASE_VFMA_OPCODE_LMULS_MF4(FNMSUB, VV):
2274   case CASE_VFMA_OPCODE_LMULS(MADD, VV):
2275   case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
2276     // If the tail policy is undisturbed we can't commute.
2277     assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
2278     if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2279       return false;
2280 
2281     // For these instructions we have more freedom. We can commute with the
2282     // other multiplicand or with the addend/subtrahend/minuend.
2283 
2284     // Any fixed operand must be from source 1, 2 or 3.
2285     if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
2286       return false;
2287     if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
2288       return false;
2289 
2290     // It both ops are fixed one must be the tied source.
2291     if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2292         SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
2293       return false;
2294 
2295     // Look for two different register operands assumed to be commutable
2296     // regardless of the FMA opcode. The FMA opcode is adjusted later if
2297     // needed.
2298     if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2299         SrcOpIdx2 == CommuteAnyOperandIndex) {
2300       // At least one of operands to be commuted is not specified and
2301       // this method is free to choose appropriate commutable operands.
2302       unsigned CommutableOpIdx1 = SrcOpIdx1;
2303       if (SrcOpIdx1 == SrcOpIdx2) {
2304         // Both of operands are not fixed. Set one of commutable
2305         // operands to the tied source.
2306         CommutableOpIdx1 = 1;
2307       } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
2308         // Only one of the operands is not fixed.
2309         CommutableOpIdx1 = SrcOpIdx2;
2310       }
2311 
2312       // CommutableOpIdx1 is well defined now. Let's choose another commutable
2313       // operand and assign its index to CommutableOpIdx2.
2314       unsigned CommutableOpIdx2;
2315       if (CommutableOpIdx1 != 1) {
2316         // If we haven't already used the tied source, we must use it now.
2317         CommutableOpIdx2 = 1;
2318       } else {
2319         Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
2320 
2321         // The commuted operands should have different registers.
2322         // Otherwise, the commute transformation does not change anything and
2323         // is useless. We use this as a hint to make our decision.
2324         if (Op1Reg != MI.getOperand(2).getReg())
2325           CommutableOpIdx2 = 2;
2326         else
2327           CommutableOpIdx2 = 3;
2328       }
2329 
2330       // Assign the found pair of commutable indices to SrcOpIdx1 and
2331       // SrcOpIdx2 to return those values.
2332       if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2333                                 CommutableOpIdx2))
2334         return false;
2335     }
2336 
2337     return true;
2338   }
2339   }
2340 
2341   return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2342 }
2343 
2344 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL)               \
2345   case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL:                                \
2346     Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL;                             \
2347     break;
2348 
2349 #define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)                   \
2350   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1)                       \
2351   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2)                       \
2352   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4)                       \
2353   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
2354 
2355 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)                  \
2356   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2)                      \
2357   CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
2358 
2359 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)                  \
2360   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4)                      \
2361   CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
2362 
2363 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)                      \
2364   CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8)                      \
2365   CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
2366 
2367 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)                           \
2368   CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16)                        \
2369   CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32)                        \
2370   CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
2371 
2372 MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
2373                                                      bool NewMI,
2374                                                      unsigned OpIdx1,
2375                                                      unsigned OpIdx2) const {
2376   auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
2377     if (NewMI)
2378       return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
2379     return MI;
2380   };
2381 
2382   switch (MI.getOpcode()) {
2383   case RISCV::TH_MVEQZ:
2384   case RISCV::TH_MVNEZ: {
2385     auto &WorkingMI = cloneIfNew(MI);
2386     WorkingMI.setDesc(get(MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
2387                                                             : RISCV::TH_MVEQZ));
2388     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, OpIdx1,
2389                                                    OpIdx2);
2390   }
2391   case RISCV::PseudoCCMOVGPR: {
2392     // CCMOV can be commuted by inverting the condition.
2393     auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
2394     CC = RISCVCC::getOppositeBranchCondition(CC);
2395     auto &WorkingMI = cloneIfNew(MI);
2396     WorkingMI.getOperand(3).setImm(CC);
2397     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI*/ false,
2398                                                    OpIdx1, OpIdx2);
2399   }
2400   case CASE_VFMA_SPLATS(FMACC):
2401   case CASE_VFMA_SPLATS(FMADD):
2402   case CASE_VFMA_SPLATS(FMSAC):
2403   case CASE_VFMA_SPLATS(FMSUB):
2404   case CASE_VFMA_SPLATS(FNMACC):
2405   case CASE_VFMA_SPLATS(FNMADD):
2406   case CASE_VFMA_SPLATS(FNMSAC):
2407   case CASE_VFMA_SPLATS(FNMSUB):
2408   case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
2409   case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
2410   case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
2411   case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
2412   case CASE_VFMA_OPCODE_LMULS(MADD, VX):
2413   case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
2414   case CASE_VFMA_OPCODE_LMULS(MACC, VX):
2415   case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
2416   case CASE_VFMA_OPCODE_LMULS(MACC, VV):
2417   case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
2418     // It only make sense to toggle these between clobbering the
2419     // addend/subtrahend/minuend one of the multiplicands.
2420     assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
2421     assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
2422     unsigned Opc;
2423     switch (MI.getOpcode()) {
2424       default:
2425         llvm_unreachable("Unexpected opcode");
2426       CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
2427       CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
2428       CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSAC, FMSUB)
2429       CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSUB, FMSAC)
2430       CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMACC, FNMADD)
2431       CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMADD, FNMACC)
2432       CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSAC, FNMSUB)
2433       CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSUB, FNMSAC)
2434       CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMACC, FMADD, VV)
2435       CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMSAC, FMSUB, VV)
2436       CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMACC, FNMADD, VV)
2437       CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMSAC, FNMSUB, VV)
2438       CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
2439       CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
2440       CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
2441       CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
2442       CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
2443       CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
2444     }
2445 
2446     auto &WorkingMI = cloneIfNew(MI);
2447     WorkingMI.setDesc(get(Opc));
2448     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2449                                                    OpIdx1, OpIdx2);
2450   }
2451   case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
2452   case CASE_VFMA_OPCODE_LMULS_MF4(FMSUB, VV):
2453   case CASE_VFMA_OPCODE_LMULS_MF4(FNMADD, VV):
2454   case CASE_VFMA_OPCODE_LMULS_MF4(FNMSUB, VV):
2455   case CASE_VFMA_OPCODE_LMULS(MADD, VV):
2456   case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
2457     assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
2458     // If one of the operands, is the addend we need to change opcode.
2459     // Otherwise we're just swapping 2 of the multiplicands.
2460     if (OpIdx1 == 3 || OpIdx2 == 3) {
2461       unsigned Opc;
2462       switch (MI.getOpcode()) {
2463         default:
2464           llvm_unreachable("Unexpected opcode");
2465         CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMADD, FMACC, VV)
2466         CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMSUB, FMSAC, VV)
2467         CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMADD, FNMACC, VV)
2468         CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMSUB, FNMSAC, VV)
2469         CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
2470         CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
2471       }
2472 
2473       auto &WorkingMI = cloneIfNew(MI);
2474       WorkingMI.setDesc(get(Opc));
2475       return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2476                                                      OpIdx1, OpIdx2);
2477     }
2478     // Let the default code handle it.
2479     break;
2480   }
2481   }
2482 
2483   return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2484 }
2485 
2486 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
2487 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
2488 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
2489 #undef CASE_VFMA_SPLATS
2490 #undef CASE_VFMA_OPCODE_LMULS
2491 #undef CASE_VFMA_OPCODE_COMMON
2492 
2493 // clang-format off
2494 #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL)                                    \
2495   RISCV::PseudoV##OP##_##LMUL##_TIED
2496 
2497 #define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)                                       \
2498   CASE_WIDEOP_OPCODE_COMMON(OP, MF4):                                          \
2499   case CASE_WIDEOP_OPCODE_COMMON(OP, MF2):                                     \
2500   case CASE_WIDEOP_OPCODE_COMMON(OP, M1):                                      \
2501   case CASE_WIDEOP_OPCODE_COMMON(OP, M2):                                      \
2502   case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
2503 
2504 #define CASE_WIDEOP_OPCODE_LMULS(OP)                                           \
2505   CASE_WIDEOP_OPCODE_COMMON(OP, MF8):                                          \
2506   case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
2507 // clang-format on
2508 
2509 #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL)                             \
2510   case RISCV::PseudoV##OP##_##LMUL##_TIED:                                     \
2511     NewOpc = RISCV::PseudoV##OP##_##LMUL;                                      \
2512     break;
2513 
2514 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)                                 \
2515   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4)                                    \
2516   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2)                                    \
2517   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1)                                     \
2518   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2)                                     \
2519   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
2520 
2521 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)                                    \
2522   CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8)                                    \
2523   CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
2524 
2525 MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
2526                                                     LiveVariables *LV,
2527                                                     LiveIntervals *LIS) const {
2528   MachineInstrBuilder MIB;
2529   switch (MI.getOpcode()) {
2530   default:
2531     return nullptr;
2532   case CASE_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
2533   case CASE_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV): {
2534     assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
2535            MI.getNumExplicitOperands() == 7 &&
2536            "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
2537     // If the tail policy is undisturbed we can't convert.
2538     if ((MI.getOperand(RISCVII::getVecPolicyOpNum(MI.getDesc())).getImm() &
2539          1) == 0)
2540       return nullptr;
2541     // clang-format off
2542     unsigned NewOpc;
2543     switch (MI.getOpcode()) {
2544     default:
2545       llvm_unreachable("Unexpected opcode");
2546     CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(FWADD_WV)
2547     CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(FWSUB_WV)
2548     }
2549     // clang-format on
2550 
2551     MachineBasicBlock &MBB = *MI.getParent();
2552     MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
2553               .add(MI.getOperand(0))
2554               .addReg(MI.getOperand(0).getReg(), RegState::Undef)
2555               .add(MI.getOperand(1))
2556               .add(MI.getOperand(2))
2557               .add(MI.getOperand(3))
2558               .add(MI.getOperand(4))
2559               .add(MI.getOperand(5))
2560               .add(MI.getOperand(6));
2561     break;
2562   }
2563   case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
2564   case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
2565   case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
2566   case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
2567     // If the tail policy is undisturbed we can't convert.
2568     assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
2569            MI.getNumExplicitOperands() == 6);
2570     if ((MI.getOperand(5).getImm() & 1) == 0)
2571       return nullptr;
2572 
2573     // clang-format off
2574     unsigned NewOpc;
2575     switch (MI.getOpcode()) {
2576     default:
2577       llvm_unreachable("Unexpected opcode");
2578     CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADD_WV)
2579     CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADDU_WV)
2580     CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUB_WV)
2581     CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUBU_WV)
2582     }
2583     // clang-format on
2584 
2585     MachineBasicBlock &MBB = *MI.getParent();
2586     MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
2587               .add(MI.getOperand(0))
2588               .addReg(MI.getOperand(0).getReg(), RegState::Undef)
2589               .add(MI.getOperand(1))
2590               .add(MI.getOperand(2))
2591               .add(MI.getOperand(3))
2592               .add(MI.getOperand(4))
2593               .add(MI.getOperand(5));
2594   }
2595   }
2596   MIB.copyImplicitOps(MI);
2597 
2598   if (LV) {
2599     unsigned NumOps = MI.getNumOperands();
2600     for (unsigned I = 1; I < NumOps; ++I) {
2601       MachineOperand &Op = MI.getOperand(I);
2602       if (Op.isReg() && Op.isKill())
2603         LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
2604     }
2605   }
2606 
2607   if (LIS) {
2608     SlotIndex Idx = LIS->ReplaceMachineInstrInMaps(MI, *MIB);
2609 
2610     if (MI.getOperand(0).isEarlyClobber()) {
2611       // Use operand 1 was tied to early-clobber def operand 0, so its live
2612       // interval could have ended at an early-clobber slot. Now they are not
2613       // tied we need to update it to the normal register slot.
2614       LiveInterval &LI = LIS->getInterval(MI.getOperand(1).getReg());
2615       LiveRange::Segment *S = LI.getSegmentContaining(Idx);
2616       if (S->end == Idx.getRegSlot(true))
2617         S->end = Idx.getRegSlot();
2618     }
2619   }
2620 
2621   return MIB;
2622 }
2623 
2624 #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
2625 #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
2626 #undef CASE_WIDEOP_OPCODE_LMULS
2627 #undef CASE_WIDEOP_OPCODE_COMMON
2628 
2629 void RISCVInstrInfo::getVLENFactoredAmount(MachineFunction &MF,
2630                                            MachineBasicBlock &MBB,
2631                                            MachineBasicBlock::iterator II,
2632                                            const DebugLoc &DL, Register DestReg,
2633                                            int64_t Amount,
2634                                            MachineInstr::MIFlag Flag) const {
2635   assert(Amount > 0 && "There is no need to get VLEN scaled value.");
2636   assert(Amount % 8 == 0 &&
2637          "Reserve the stack by the multiple of one vector size.");
2638 
2639   MachineRegisterInfo &MRI = MF.getRegInfo();
2640   int64_t NumOfVReg = Amount / 8;
2641 
2642   BuildMI(MBB, II, DL, get(RISCV::PseudoReadVLENB), DestReg).setMIFlag(Flag);
2643   assert(isInt<32>(NumOfVReg) &&
2644          "Expect the number of vector registers within 32-bits.");
2645   if (llvm::has_single_bit<uint32_t>(NumOfVReg)) {
2646     uint32_t ShiftAmount = Log2_32(NumOfVReg);
2647     if (ShiftAmount == 0)
2648       return;
2649     BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
2650         .addReg(DestReg, RegState::Kill)
2651         .addImm(ShiftAmount)
2652         .setMIFlag(Flag);
2653   } else if (STI.hasStdExtZba() &&
2654              ((NumOfVReg % 3 == 0 && isPowerOf2_64(NumOfVReg / 3)) ||
2655               (NumOfVReg % 5 == 0 && isPowerOf2_64(NumOfVReg / 5)) ||
2656               (NumOfVReg % 9 == 0 && isPowerOf2_64(NumOfVReg / 9)))) {
2657     // We can use Zba SHXADD+SLLI instructions for multiply in some cases.
2658     unsigned Opc;
2659     uint32_t ShiftAmount;
2660     if (NumOfVReg % 9 == 0) {
2661       Opc = RISCV::SH3ADD;
2662       ShiftAmount = Log2_64(NumOfVReg / 9);
2663     } else if (NumOfVReg % 5 == 0) {
2664       Opc = RISCV::SH2ADD;
2665       ShiftAmount = Log2_64(NumOfVReg / 5);
2666     } else if (NumOfVReg % 3 == 0) {
2667       Opc = RISCV::SH1ADD;
2668       ShiftAmount = Log2_64(NumOfVReg / 3);
2669     } else {
2670       llvm_unreachable("Unexpected number of vregs");
2671     }
2672     if (ShiftAmount)
2673       BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
2674           .addReg(DestReg, RegState::Kill)
2675           .addImm(ShiftAmount)
2676           .setMIFlag(Flag);
2677     BuildMI(MBB, II, DL, get(Opc), DestReg)
2678         .addReg(DestReg, RegState::Kill)
2679         .addReg(DestReg)
2680         .setMIFlag(Flag);
2681   } else if (llvm::has_single_bit<uint32_t>(NumOfVReg - 1)) {
2682     Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2683     uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
2684     BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
2685         .addReg(DestReg)
2686         .addImm(ShiftAmount)
2687         .setMIFlag(Flag);
2688     BuildMI(MBB, II, DL, get(RISCV::ADD), DestReg)
2689         .addReg(ScaledRegister, RegState::Kill)
2690         .addReg(DestReg, RegState::Kill)
2691         .setMIFlag(Flag);
2692   } else if (llvm::has_single_bit<uint32_t>(NumOfVReg + 1)) {
2693     Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2694     uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
2695     BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
2696         .addReg(DestReg)
2697         .addImm(ShiftAmount)
2698         .setMIFlag(Flag);
2699     BuildMI(MBB, II, DL, get(RISCV::SUB), DestReg)
2700         .addReg(ScaledRegister, RegState::Kill)
2701         .addReg(DestReg, RegState::Kill)
2702         .setMIFlag(Flag);
2703   } else {
2704     Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2705     movImm(MBB, II, DL, N, NumOfVReg, Flag);
2706     if (!STI.hasStdExtM() && !STI.hasStdExtZmmul())
2707       MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2708           MF.getFunction(),
2709           "M- or Zmmul-extension must be enabled to calculate the vscaled size/"
2710           "offset."});
2711     BuildMI(MBB, II, DL, get(RISCV::MUL), DestReg)
2712         .addReg(DestReg, RegState::Kill)
2713         .addReg(N, RegState::Kill)
2714         .setMIFlag(Flag);
2715   }
2716 }
2717 
2718 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
2719 RISCVInstrInfo::getSerializableMachineMemOperandTargetFlags() const {
2720   static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
2721       {{MONontemporalBit0, "riscv-nontemporal-domain-bit-0"},
2722        {MONontemporalBit1, "riscv-nontemporal-domain-bit-1"}};
2723   return ArrayRef(TargetFlags);
2724 }
2725 
2726 // Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
2727 bool RISCV::isSEXT_W(const MachineInstr &MI) {
2728   return MI.getOpcode() == RISCV::ADDIW && MI.getOperand(1).isReg() &&
2729          MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
2730 }
2731 
2732 // Returns true if this is the zext.w pattern, adduw rd, rs1, x0.
2733 bool RISCV::isZEXT_W(const MachineInstr &MI) {
2734   return MI.getOpcode() == RISCV::ADD_UW && MI.getOperand(1).isReg() &&
2735          MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0;
2736 }
2737 
2738 // Returns true if this is the zext.b pattern, andi rd, rs1, 255.
2739 bool RISCV::isZEXT_B(const MachineInstr &MI) {
2740   return MI.getOpcode() == RISCV::ANDI && MI.getOperand(1).isReg() &&
2741          MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 255;
2742 }
2743 
2744 static bool isRVVWholeLoadStore(unsigned Opcode) {
2745   switch (Opcode) {
2746   default:
2747     return false;
2748   case RISCV::VS1R_V:
2749   case RISCV::VS2R_V:
2750   case RISCV::VS4R_V:
2751   case RISCV::VS8R_V:
2752   case RISCV::VL1RE8_V:
2753   case RISCV::VL2RE8_V:
2754   case RISCV::VL4RE8_V:
2755   case RISCV::VL8RE8_V:
2756   case RISCV::VL1RE16_V:
2757   case RISCV::VL2RE16_V:
2758   case RISCV::VL4RE16_V:
2759   case RISCV::VL8RE16_V:
2760   case RISCV::VL1RE32_V:
2761   case RISCV::VL2RE32_V:
2762   case RISCV::VL4RE32_V:
2763   case RISCV::VL8RE32_V:
2764   case RISCV::VL1RE64_V:
2765   case RISCV::VL2RE64_V:
2766   case RISCV::VL4RE64_V:
2767   case RISCV::VL8RE64_V:
2768     return true;
2769   }
2770 }
2771 
2772 bool RISCV::isRVVSpill(const MachineInstr &MI) {
2773   // RVV lacks any support for immediate addressing for stack addresses, so be
2774   // conservative.
2775   unsigned Opcode = MI.getOpcode();
2776   if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
2777       !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
2778     return false;
2779   return true;
2780 }
2781 
2782 std::optional<std::pair<unsigned, unsigned>>
2783 RISCV::isRVVSpillForZvlsseg(unsigned Opcode) {
2784   switch (Opcode) {
2785   default:
2786     return std::nullopt;
2787   case RISCV::PseudoVSPILL2_M1:
2788   case RISCV::PseudoVRELOAD2_M1:
2789     return std::make_pair(2u, 1u);
2790   case RISCV::PseudoVSPILL2_M2:
2791   case RISCV::PseudoVRELOAD2_M2:
2792     return std::make_pair(2u, 2u);
2793   case RISCV::PseudoVSPILL2_M4:
2794   case RISCV::PseudoVRELOAD2_M4:
2795     return std::make_pair(2u, 4u);
2796   case RISCV::PseudoVSPILL3_M1:
2797   case RISCV::PseudoVRELOAD3_M1:
2798     return std::make_pair(3u, 1u);
2799   case RISCV::PseudoVSPILL3_M2:
2800   case RISCV::PseudoVRELOAD3_M2:
2801     return std::make_pair(3u, 2u);
2802   case RISCV::PseudoVSPILL4_M1:
2803   case RISCV::PseudoVRELOAD4_M1:
2804     return std::make_pair(4u, 1u);
2805   case RISCV::PseudoVSPILL4_M2:
2806   case RISCV::PseudoVRELOAD4_M2:
2807     return std::make_pair(4u, 2u);
2808   case RISCV::PseudoVSPILL5_M1:
2809   case RISCV::PseudoVRELOAD5_M1:
2810     return std::make_pair(5u, 1u);
2811   case RISCV::PseudoVSPILL6_M1:
2812   case RISCV::PseudoVRELOAD6_M1:
2813     return std::make_pair(6u, 1u);
2814   case RISCV::PseudoVSPILL7_M1:
2815   case RISCV::PseudoVRELOAD7_M1:
2816     return std::make_pair(7u, 1u);
2817   case RISCV::PseudoVSPILL8_M1:
2818   case RISCV::PseudoVRELOAD8_M1:
2819     return std::make_pair(8u, 1u);
2820   }
2821 }
2822 
2823 bool RISCV::isFaultFirstLoad(const MachineInstr &MI) {
2824   return MI.getNumExplicitDefs() == 2 && MI.modifiesRegister(RISCV::VL) &&
2825          !MI.isInlineAsm();
2826 }
2827 
2828 bool RISCV::hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2) {
2829   int16_t MI1FrmOpIdx =
2830       RISCV::getNamedOperandIdx(MI1.getOpcode(), RISCV::OpName::frm);
2831   int16_t MI2FrmOpIdx =
2832       RISCV::getNamedOperandIdx(MI2.getOpcode(), RISCV::OpName::frm);
2833   if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
2834     return false;
2835   MachineOperand FrmOp1 = MI1.getOperand(MI1FrmOpIdx);
2836   MachineOperand FrmOp2 = MI2.getOperand(MI2FrmOpIdx);
2837   return FrmOp1.getImm() == FrmOp2.getImm();
2838 }
2839