xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp (revision bc5304a006238115291e7568583632889dffbab9)
1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVInstrInfo.h"
14 #include "MCTargetDesc/RISCVMatInt.h"
15 #include "RISCV.h"
16 #include "RISCVSubtarget.h"
17 #include "RISCVTargetMachine.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/RegisterScavenging.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/TargetRegistry.h"
26 
27 using namespace llvm;
28 
29 #define GEN_CHECK_COMPRESS_INSTR
30 #include "RISCVGenCompressInstEmitter.inc"
31 
32 #define GET_INSTRINFO_CTOR_DTOR
33 #include "RISCVGenInstrInfo.inc"
34 
35 RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
36     : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
37       STI(STI) {}
38 
39 unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
40                                              int &FrameIndex) const {
41   switch (MI.getOpcode()) {
42   default:
43     return 0;
44   case RISCV::LB:
45   case RISCV::LBU:
46   case RISCV::LH:
47   case RISCV::LHU:
48   case RISCV::FLH:
49   case RISCV::LW:
50   case RISCV::FLW:
51   case RISCV::LWU:
52   case RISCV::LD:
53   case RISCV::FLD:
54     break;
55   }
56 
57   if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
58       MI.getOperand(2).getImm() == 0) {
59     FrameIndex = MI.getOperand(1).getIndex();
60     return MI.getOperand(0).getReg();
61   }
62 
63   return 0;
64 }
65 
66 unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
67                                             int &FrameIndex) const {
68   switch (MI.getOpcode()) {
69   default:
70     return 0;
71   case RISCV::SB:
72   case RISCV::SH:
73   case RISCV::SW:
74   case RISCV::FSH:
75   case RISCV::FSW:
76   case RISCV::SD:
77   case RISCV::FSD:
78     break;
79   }
80 
81   if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
82       MI.getOperand(2).getImm() == 0) {
83     FrameIndex = MI.getOperand(1).getIndex();
84     return MI.getOperand(0).getReg();
85   }
86 
87   return 0;
88 }
89 
90 void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
91                                  MachineBasicBlock::iterator MBBI,
92                                  const DebugLoc &DL, MCRegister DstReg,
93                                  MCRegister SrcReg, bool KillSrc) const {
94   if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
95     BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
96         .addReg(SrcReg, getKillRegState(KillSrc))
97         .addImm(0);
98     return;
99   }
100 
101   // FPR->FPR copies and VR->VR copies.
102   unsigned Opc;
103   bool IsScalableVector = false;
104   if (RISCV::FPR16RegClass.contains(DstReg, SrcReg))
105     Opc = RISCV::FSGNJ_H;
106   else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg))
107     Opc = RISCV::FSGNJ_S;
108   else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg))
109     Opc = RISCV::FSGNJ_D;
110   else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
111     Opc = RISCV::PseudoVMV1R_V;
112     IsScalableVector = true;
113   } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
114     Opc = RISCV::PseudoVMV2R_V;
115     IsScalableVector = true;
116   } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
117     Opc = RISCV::PseudoVMV4R_V;
118     IsScalableVector = true;
119   } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
120     Opc = RISCV::PseudoVMV8R_V;
121     IsScalableVector = true;
122   } else
123     llvm_unreachable("Impossible reg-to-reg copy");
124 
125   if (IsScalableVector)
126     BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
127         .addReg(SrcReg, getKillRegState(KillSrc));
128   else
129     BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
130         .addReg(SrcReg, getKillRegState(KillSrc))
131         .addReg(SrcReg, getKillRegState(KillSrc));
132 }
133 
134 void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
135                                          MachineBasicBlock::iterator I,
136                                          Register SrcReg, bool IsKill, int FI,
137                                          const TargetRegisterClass *RC,
138                                          const TargetRegisterInfo *TRI) const {
139   DebugLoc DL;
140   if (I != MBB.end())
141     DL = I->getDebugLoc();
142 
143   MachineFunction *MF = MBB.getParent();
144   const MachineFrameInfo &MFI = MF->getFrameInfo();
145   MachineMemOperand *MMO = MF->getMachineMemOperand(
146       MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
147       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
148 
149   unsigned Opcode;
150   if (RISCV::GPRRegClass.hasSubClassEq(RC))
151     Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
152              RISCV::SW : RISCV::SD;
153   else if (RISCV::FPR16RegClass.hasSubClassEq(RC))
154     Opcode = RISCV::FSH;
155   else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
156     Opcode = RISCV::FSW;
157   else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
158     Opcode = RISCV::FSD;
159   else
160     llvm_unreachable("Can't store this register to stack slot");
161 
162   BuildMI(MBB, I, DL, get(Opcode))
163       .addReg(SrcReg, getKillRegState(IsKill))
164       .addFrameIndex(FI)
165       .addImm(0)
166       .addMemOperand(MMO);
167 }
168 
169 void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
170                                           MachineBasicBlock::iterator I,
171                                           Register DstReg, int FI,
172                                           const TargetRegisterClass *RC,
173                                           const TargetRegisterInfo *TRI) const {
174   DebugLoc DL;
175   if (I != MBB.end())
176     DL = I->getDebugLoc();
177 
178   MachineFunction *MF = MBB.getParent();
179   const MachineFrameInfo &MFI = MF->getFrameInfo();
180   MachineMemOperand *MMO = MF->getMachineMemOperand(
181       MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
182       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
183 
184   unsigned Opcode;
185   if (RISCV::GPRRegClass.hasSubClassEq(RC))
186     Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
187              RISCV::LW : RISCV::LD;
188   else if (RISCV::FPR16RegClass.hasSubClassEq(RC))
189     Opcode = RISCV::FLH;
190   else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
191     Opcode = RISCV::FLW;
192   else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
193     Opcode = RISCV::FLD;
194   else
195     llvm_unreachable("Can't load this register from stack slot");
196 
197   BuildMI(MBB, I, DL, get(Opcode), DstReg)
198     .addFrameIndex(FI)
199     .addImm(0)
200     .addMemOperand(MMO);
201 }
202 
203 void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
204                             MachineBasicBlock::iterator MBBI,
205                             const DebugLoc &DL, Register DstReg, uint64_t Val,
206                             MachineInstr::MIFlag Flag) const {
207   MachineFunction *MF = MBB.getParent();
208   MachineRegisterInfo &MRI = MF->getRegInfo();
209   bool IsRV64 = MF->getSubtarget<RISCVSubtarget>().is64Bit();
210   Register SrcReg = RISCV::X0;
211   Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass);
212   unsigned Num = 0;
213 
214   if (!IsRV64 && !isInt<32>(Val))
215     report_fatal_error("Should only materialize 32-bit constants for RV32");
216 
217   RISCVMatInt::InstSeq Seq;
218   RISCVMatInt::generateInstSeq(Val, IsRV64, Seq);
219   assert(Seq.size() > 0);
220 
221   for (RISCVMatInt::Inst &Inst : Seq) {
222     // Write the final result to DstReg if it's the last instruction in the Seq.
223     // Otherwise, write the result to the temp register.
224     if (++Num == Seq.size())
225       Result = DstReg;
226 
227     if (Inst.Opc == RISCV::LUI) {
228       BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result)
229           .addImm(Inst.Imm)
230           .setMIFlag(Flag);
231     } else {
232       BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
233           .addReg(SrcReg, RegState::Kill)
234           .addImm(Inst.Imm)
235           .setMIFlag(Flag);
236     }
237     // Only the first instruction has X0 as its source.
238     SrcReg = Result;
239   }
240 }
241 
242 // The contents of values added to Cond are not examined outside of
243 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
244 // push BranchOpcode, Reg1, Reg2.
245 static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
246                             SmallVectorImpl<MachineOperand> &Cond) {
247   // Block ends with fall-through condbranch.
248   assert(LastInst.getDesc().isConditionalBranch() &&
249          "Unknown conditional branch");
250   Target = LastInst.getOperand(2).getMBB();
251   Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
252   Cond.push_back(LastInst.getOperand(0));
253   Cond.push_back(LastInst.getOperand(1));
254 }
255 
256 static unsigned getOppositeBranchOpcode(int Opc) {
257   switch (Opc) {
258   default:
259     llvm_unreachable("Unrecognized conditional branch");
260   case RISCV::BEQ:
261     return RISCV::BNE;
262   case RISCV::BNE:
263     return RISCV::BEQ;
264   case RISCV::BLT:
265     return RISCV::BGE;
266   case RISCV::BGE:
267     return RISCV::BLT;
268   case RISCV::BLTU:
269     return RISCV::BGEU;
270   case RISCV::BGEU:
271     return RISCV::BLTU;
272   }
273 }
274 
275 bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
276                                    MachineBasicBlock *&TBB,
277                                    MachineBasicBlock *&FBB,
278                                    SmallVectorImpl<MachineOperand> &Cond,
279                                    bool AllowModify) const {
280   TBB = FBB = nullptr;
281   Cond.clear();
282 
283   // If the block has no terminators, it just falls into the block after it.
284   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
285   if (I == MBB.end() || !isUnpredicatedTerminator(*I))
286     return false;
287 
288   // Count the number of terminators and find the first unconditional or
289   // indirect branch.
290   MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
291   int NumTerminators = 0;
292   for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
293        J++) {
294     NumTerminators++;
295     if (J->getDesc().isUnconditionalBranch() ||
296         J->getDesc().isIndirectBranch()) {
297       FirstUncondOrIndirectBr = J.getReverse();
298     }
299   }
300 
301   // If AllowModify is true, we can erase any terminators after
302   // FirstUncondOrIndirectBR.
303   if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
304     while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
305       std::next(FirstUncondOrIndirectBr)->eraseFromParent();
306       NumTerminators--;
307     }
308     I = FirstUncondOrIndirectBr;
309   }
310 
311   // We can't handle blocks that end in an indirect branch.
312   if (I->getDesc().isIndirectBranch())
313     return true;
314 
315   // We can't handle blocks with more than 2 terminators.
316   if (NumTerminators > 2)
317     return true;
318 
319   // Handle a single unconditional branch.
320   if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
321     TBB = getBranchDestBlock(*I);
322     return false;
323   }
324 
325   // Handle a single conditional branch.
326   if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
327     parseCondBranch(*I, TBB, Cond);
328     return false;
329   }
330 
331   // Handle a conditional branch followed by an unconditional branch.
332   if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
333       I->getDesc().isUnconditionalBranch()) {
334     parseCondBranch(*std::prev(I), TBB, Cond);
335     FBB = getBranchDestBlock(*I);
336     return false;
337   }
338 
339   // Otherwise, we can't handle this.
340   return true;
341 }
342 
343 unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
344                                       int *BytesRemoved) const {
345   if (BytesRemoved)
346     *BytesRemoved = 0;
347   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
348   if (I == MBB.end())
349     return 0;
350 
351   if (!I->getDesc().isUnconditionalBranch() &&
352       !I->getDesc().isConditionalBranch())
353     return 0;
354 
355   // Remove the branch.
356   if (BytesRemoved)
357     *BytesRemoved += getInstSizeInBytes(*I);
358   I->eraseFromParent();
359 
360   I = MBB.end();
361 
362   if (I == MBB.begin())
363     return 1;
364   --I;
365   if (!I->getDesc().isConditionalBranch())
366     return 1;
367 
368   // Remove the branch.
369   if (BytesRemoved)
370     *BytesRemoved += getInstSizeInBytes(*I);
371   I->eraseFromParent();
372   return 2;
373 }
374 
375 // Inserts a branch into the end of the specific MachineBasicBlock, returning
376 // the number of instructions inserted.
377 unsigned RISCVInstrInfo::insertBranch(
378     MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
379     ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
380   if (BytesAdded)
381     *BytesAdded = 0;
382 
383   // Shouldn't be a fall through.
384   assert(TBB && "insertBranch must not be told to insert a fallthrough");
385   assert((Cond.size() == 3 || Cond.size() == 0) &&
386          "RISCV branch conditions have two components!");
387 
388   // Unconditional branch.
389   if (Cond.empty()) {
390     MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
391     if (BytesAdded)
392       *BytesAdded += getInstSizeInBytes(MI);
393     return 1;
394   }
395 
396   // Either a one or two-way conditional branch.
397   unsigned Opc = Cond[0].getImm();
398   MachineInstr &CondMI =
399       *BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
400   if (BytesAdded)
401     *BytesAdded += getInstSizeInBytes(CondMI);
402 
403   // One-way conditional branch.
404   if (!FBB)
405     return 1;
406 
407   // Two-way conditional branch.
408   MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
409   if (BytesAdded)
410     *BytesAdded += getInstSizeInBytes(MI);
411   return 2;
412 }
413 
414 unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
415                                               MachineBasicBlock &DestBB,
416                                               const DebugLoc &DL,
417                                               int64_t BrOffset,
418                                               RegScavenger *RS) const {
419   assert(RS && "RegScavenger required for long branching");
420   assert(MBB.empty() &&
421          "new block should be inserted for expanding unconditional branch");
422   assert(MBB.pred_size() == 1);
423 
424   MachineFunction *MF = MBB.getParent();
425   MachineRegisterInfo &MRI = MF->getRegInfo();
426 
427   if (!isInt<32>(BrOffset))
428     report_fatal_error(
429         "Branch offsets outside of the signed 32-bit range not supported");
430 
431   // FIXME: A virtual register must be used initially, as the register
432   // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
433   // uses the same workaround).
434   Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
435   auto II = MBB.end();
436 
437   MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
438                           .addReg(ScratchReg, RegState::Define | RegState::Dead)
439                           .addMBB(&DestBB, RISCVII::MO_CALL);
440 
441   RS->enterBasicBlockEnd(MBB);
442   unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
443                                                 MI.getIterator(), false, 0);
444   MRI.replaceRegWith(ScratchReg, Scav);
445   MRI.clearVirtRegs();
446   RS->setRegUsed(Scav);
447   return 8;
448 }
449 
450 bool RISCVInstrInfo::reverseBranchCondition(
451     SmallVectorImpl<MachineOperand> &Cond) const {
452   assert((Cond.size() == 3) && "Invalid branch condition!");
453   Cond[0].setImm(getOppositeBranchOpcode(Cond[0].getImm()));
454   return false;
455 }
456 
457 MachineBasicBlock *
458 RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
459   assert(MI.getDesc().isBranch() && "Unexpected opcode!");
460   // The branch target is always the last operand.
461   int NumOp = MI.getNumExplicitOperands();
462   return MI.getOperand(NumOp - 1).getMBB();
463 }
464 
465 bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
466                                            int64_t BrOffset) const {
467   unsigned XLen = STI.getXLen();
468   // Ideally we could determine the supported branch offset from the
469   // RISCVII::FormMask, but this can't be used for Pseudo instructions like
470   // PseudoBR.
471   switch (BranchOp) {
472   default:
473     llvm_unreachable("Unexpected opcode!");
474   case RISCV::BEQ:
475   case RISCV::BNE:
476   case RISCV::BLT:
477   case RISCV::BGE:
478   case RISCV::BLTU:
479   case RISCV::BGEU:
480     return isIntN(13, BrOffset);
481   case RISCV::JAL:
482   case RISCV::PseudoBR:
483     return isIntN(21, BrOffset);
484   case RISCV::PseudoJump:
485     return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
486   }
487 }
488 
489 unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
490   unsigned Opcode = MI.getOpcode();
491 
492   switch (Opcode) {
493   default: {
494     if (MI.getParent() && MI.getParent()->getParent()) {
495       const auto MF = MI.getMF();
496       const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
497       const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
498       const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
499       const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
500       if (isCompressibleInst(MI, &ST, MRI, STI))
501         return 2;
502     }
503     return get(Opcode).getSize();
504   }
505   case TargetOpcode::EH_LABEL:
506   case TargetOpcode::IMPLICIT_DEF:
507   case TargetOpcode::KILL:
508   case TargetOpcode::DBG_VALUE:
509     return 0;
510   // These values are determined based on RISCVExpandAtomicPseudoInsts,
511   // RISCVExpandPseudoInsts and RISCVMCCodeEmitter, depending on where the
512   // pseudos are expanded.
513   case RISCV::PseudoCALLReg:
514   case RISCV::PseudoCALL:
515   case RISCV::PseudoJump:
516   case RISCV::PseudoTAIL:
517   case RISCV::PseudoLLA:
518   case RISCV::PseudoLA:
519   case RISCV::PseudoLA_TLS_IE:
520   case RISCV::PseudoLA_TLS_GD:
521     return 8;
522   case RISCV::PseudoAtomicLoadNand32:
523   case RISCV::PseudoAtomicLoadNand64:
524     return 20;
525   case RISCV::PseudoMaskedAtomicSwap32:
526   case RISCV::PseudoMaskedAtomicLoadAdd32:
527   case RISCV::PseudoMaskedAtomicLoadSub32:
528     return 28;
529   case RISCV::PseudoMaskedAtomicLoadNand32:
530     return 32;
531   case RISCV::PseudoMaskedAtomicLoadMax32:
532   case RISCV::PseudoMaskedAtomicLoadMin32:
533     return 44;
534   case RISCV::PseudoMaskedAtomicLoadUMax32:
535   case RISCV::PseudoMaskedAtomicLoadUMin32:
536     return 36;
537   case RISCV::PseudoCmpXchg32:
538   case RISCV::PseudoCmpXchg64:
539     return 16;
540   case RISCV::PseudoMaskedCmpXchg32:
541     return 32;
542   case TargetOpcode::INLINEASM:
543   case TargetOpcode::INLINEASM_BR: {
544     const MachineFunction &MF = *MI.getParent()->getParent();
545     const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
546     return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
547                               *TM.getMCAsmInfo());
548   }
549   }
550 }
551 
552 bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
553   const unsigned Opcode = MI.getOpcode();
554   switch (Opcode) {
555   default:
556     break;
557   case RISCV::FSGNJ_D:
558   case RISCV::FSGNJ_S:
559     // The canonical floating-point move is fsgnj rd, rs, rs.
560     return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
561            MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
562   case RISCV::ADDI:
563   case RISCV::ORI:
564   case RISCV::XORI:
565     return (MI.getOperand(1).isReg() &&
566             MI.getOperand(1).getReg() == RISCV::X0) ||
567            (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
568   }
569   return MI.isAsCheapAsAMove();
570 }
571 
572 Optional<DestSourcePair>
573 RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
574   if (MI.isMoveReg())
575     return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
576   switch (MI.getOpcode()) {
577   default:
578     break;
579   case RISCV::ADDI:
580     // Operand 1 can be a frameindex but callers expect registers
581     if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
582         MI.getOperand(2).getImm() == 0)
583       return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
584     break;
585   case RISCV::FSGNJ_D:
586   case RISCV::FSGNJ_S:
587     // The canonical floating-point move is fsgnj rd, rs, rs.
588     if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
589         MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
590       return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
591     break;
592   }
593   return None;
594 }
595 
596 bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
597                                        StringRef &ErrInfo) const {
598   const MCInstrInfo *MCII = STI.getInstrInfo();
599   MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
600 
601   for (auto &OI : enumerate(Desc.operands())) {
602     unsigned OpType = OI.value().OperandType;
603     if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
604         OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
605       const MachineOperand &MO = MI.getOperand(OI.index());
606       if (MO.isImm()) {
607         int64_t Imm = MO.getImm();
608         bool Ok;
609         switch (OpType) {
610         default:
611           llvm_unreachable("Unexpected operand type");
612         case RISCVOp::OPERAND_UIMM4:
613           Ok = isUInt<4>(Imm);
614           break;
615         case RISCVOp::OPERAND_UIMM5:
616           Ok = isUInt<5>(Imm);
617           break;
618         case RISCVOp::OPERAND_UIMM12:
619           Ok = isUInt<12>(Imm);
620           break;
621         case RISCVOp::OPERAND_SIMM12:
622           Ok = isInt<12>(Imm);
623           break;
624         case RISCVOp::OPERAND_UIMM20:
625           Ok = isUInt<20>(Imm);
626           break;
627         case RISCVOp::OPERAND_UIMMLOG2XLEN:
628           if (STI.getTargetTriple().isArch64Bit())
629             Ok = isUInt<6>(Imm);
630           else
631             Ok = isUInt<5>(Imm);
632           break;
633         }
634         if (!Ok) {
635           ErrInfo = "Invalid immediate";
636           return false;
637         }
638       }
639     }
640   }
641 
642   return true;
643 }
644 
645 // Return true if get the base operand, byte offset of an instruction and the
646 // memory width. Width is the size of memory that is being loaded/stored.
647 bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
648     const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
649     unsigned &Width, const TargetRegisterInfo *TRI) const {
650   if (!LdSt.mayLoadOrStore())
651     return false;
652 
653   // Here we assume the standard RISC-V ISA, which uses a base+offset
654   // addressing mode. You'll need to relax these conditions to support custom
655   // load/stores instructions.
656   if (LdSt.getNumExplicitOperands() != 3)
657     return false;
658   if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
659     return false;
660 
661   if (!LdSt.hasOneMemOperand())
662     return false;
663 
664   Width = (*LdSt.memoperands_begin())->getSize();
665   BaseReg = &LdSt.getOperand(1);
666   Offset = LdSt.getOperand(2).getImm();
667   return true;
668 }
669 
670 bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
671     const MachineInstr &MIa, const MachineInstr &MIb) const {
672   assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
673   assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
674 
675   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
676       MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
677     return false;
678 
679   // Retrieve the base register, offset from the base register and width. Width
680   // is the size of memory that is being loaded/stored (e.g. 1, 2, 4).  If
681   // base registers are identical, and the offset of a lower memory access +
682   // the width doesn't overlap the offset of a higher memory access,
683   // then the memory accesses are different.
684   const TargetRegisterInfo *TRI = STI.getRegisterInfo();
685   const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
686   int64_t OffsetA = 0, OffsetB = 0;
687   unsigned int WidthA = 0, WidthB = 0;
688   if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
689       getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
690     if (BaseOpA->isIdenticalTo(*BaseOpB)) {
691       int LowOffset = std::min(OffsetA, OffsetB);
692       int HighOffset = std::max(OffsetA, OffsetB);
693       int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
694       if (LowOffset + LowWidth <= HighOffset)
695         return true;
696     }
697   }
698   return false;
699 }
700 
701 std::pair<unsigned, unsigned>
702 RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
703   const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
704   return std::make_pair(TF & Mask, TF & ~Mask);
705 }
706 
707 ArrayRef<std::pair<unsigned, const char *>>
708 RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
709   using namespace RISCVII;
710   static const std::pair<unsigned, const char *> TargetFlags[] = {
711       {MO_CALL, "riscv-call"},
712       {MO_PLT, "riscv-plt"},
713       {MO_LO, "riscv-lo"},
714       {MO_HI, "riscv-hi"},
715       {MO_PCREL_LO, "riscv-pcrel-lo"},
716       {MO_PCREL_HI, "riscv-pcrel-hi"},
717       {MO_GOT_HI, "riscv-got-hi"},
718       {MO_TPREL_LO, "riscv-tprel-lo"},
719       {MO_TPREL_HI, "riscv-tprel-hi"},
720       {MO_TPREL_ADD, "riscv-tprel-add"},
721       {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
722       {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
723   return makeArrayRef(TargetFlags);
724 }
725 bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
726     MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
727   const Function &F = MF.getFunction();
728 
729   // Can F be deduplicated by the linker? If it can, don't outline from it.
730   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
731     return false;
732 
733   // Don't outline from functions with section markings; the program could
734   // expect that all the code is in the named section.
735   if (F.hasSection())
736     return false;
737 
738   // It's safe to outline from MF.
739   return true;
740 }
741 
742 bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
743                                             unsigned &Flags) const {
744   // More accurate safety checking is done in getOutliningCandidateInfo.
745   return true;
746 }
747 
748 // Enum values indicating how an outlined call should be constructed.
749 enum MachineOutlinerConstructionID {
750   MachineOutlinerDefault
751 };
752 
753 outliner::OutlinedFunction RISCVInstrInfo::getOutliningCandidateInfo(
754     std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
755 
756   // First we need to filter out candidates where the X5 register (IE t0) can't
757   // be used to setup the function call.
758   auto CannotInsertCall = [](outliner::Candidate &C) {
759     const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
760 
761     C.initLRU(*TRI);
762     LiveRegUnits LRU = C.LRU;
763     return !LRU.available(RISCV::X5);
764   };
765 
766   llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
767 
768   // If the sequence doesn't have enough candidates left, then we're done.
769   if (RepeatedSequenceLocs.size() < 2)
770     return outliner::OutlinedFunction();
771 
772   unsigned SequenceSize = 0;
773 
774   auto I = RepeatedSequenceLocs[0].front();
775   auto E = std::next(RepeatedSequenceLocs[0].back());
776   for (; I != E; ++I)
777     SequenceSize += getInstSizeInBytes(*I);
778 
779   // call t0, function = 8 bytes.
780   unsigned CallOverhead = 8;
781   for (auto &C : RepeatedSequenceLocs)
782     C.setCallInfo(MachineOutlinerDefault, CallOverhead);
783 
784   // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
785   unsigned FrameOverhead = 4;
786   if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
787           .getFeatureBits()[RISCV::FeatureStdExtC])
788     FrameOverhead = 2;
789 
790   return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
791                                     FrameOverhead, MachineOutlinerDefault);
792 }
793 
794 outliner::InstrType
795 RISCVInstrInfo::getOutliningType(MachineBasicBlock::iterator &MBBI,
796                                  unsigned Flags) const {
797   MachineInstr &MI = *MBBI;
798   MachineBasicBlock *MBB = MI.getParent();
799   const TargetRegisterInfo *TRI =
800       MBB->getParent()->getSubtarget().getRegisterInfo();
801 
802   // Positions generally can't safely be outlined.
803   if (MI.isPosition()) {
804     // We can manually strip out CFI instructions later.
805     if (MI.isCFIInstruction())
806       return outliner::InstrType::Invisible;
807 
808     return outliner::InstrType::Illegal;
809   }
810 
811   // Don't trust the user to write safe inline assembly.
812   if (MI.isInlineAsm())
813     return outliner::InstrType::Illegal;
814 
815   // We can't outline branches to other basic blocks.
816   if (MI.isTerminator() && !MBB->succ_empty())
817     return outliner::InstrType::Illegal;
818 
819   // We need support for tail calls to outlined functions before return
820   // statements can be allowed.
821   if (MI.isReturn())
822     return outliner::InstrType::Illegal;
823 
824   // Don't allow modifying the X5 register which we use for return addresses for
825   // these outlined functions.
826   if (MI.modifiesRegister(RISCV::X5, TRI) ||
827       MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
828     return outliner::InstrType::Illegal;
829 
830   // Make sure the operands don't reference something unsafe.
831   for (const auto &MO : MI.operands())
832     if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI())
833       return outliner::InstrType::Illegal;
834 
835   // Don't allow instructions which won't be materialized to impact outlining
836   // analysis.
837   if (MI.isMetaInstruction())
838     return outliner::InstrType::Invisible;
839 
840   return outliner::InstrType::Legal;
841 }
842 
843 void RISCVInstrInfo::buildOutlinedFrame(
844     MachineBasicBlock &MBB, MachineFunction &MF,
845     const outliner::OutlinedFunction &OF) const {
846 
847   // Strip out any CFI instructions
848   bool Changed = true;
849   while (Changed) {
850     Changed = false;
851     auto I = MBB.begin();
852     auto E = MBB.end();
853     for (; I != E; ++I) {
854       if (I->isCFIInstruction()) {
855         I->removeFromParent();
856         Changed = true;
857         break;
858       }
859     }
860   }
861 
862   MBB.addLiveIn(RISCV::X5);
863 
864   // Add in a return instruction to the end of the outlined frame.
865   MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
866       .addReg(RISCV::X0, RegState::Define)
867       .addReg(RISCV::X5)
868       .addImm(0));
869 }
870 
871 MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
872     Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
873     MachineFunction &MF, const outliner::Candidate &C) const {
874 
875   // Add in a call instruction to the outlined function at the given location.
876   It = MBB.insert(It,
877                   BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
878                       .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
879                                         RISCVII::MO_CALL));
880   return It;
881 }
882