xref: /freebsd/contrib/llvm-project/llvm/lib/Target/Mips/MipsInstructionSelector.cpp (revision b1879975794772ee51f0b4865753364c7d7626c3)
1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// Mips.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "MCTargetDesc/MipsInstPrinter.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterBankInfo.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/IR/IntrinsicsMips.h"
22 
23 #define DEBUG_TYPE "mips-isel"
24 
25 using namespace llvm;
26 
27 namespace {
28 
29 #define GET_GLOBALISEL_PREDICATE_BITSET
30 #include "MipsGenGlobalISel.inc"
31 #undef GET_GLOBALISEL_PREDICATE_BITSET
32 
33 class MipsInstructionSelector : public InstructionSelector {
34 public:
35   MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
36                           const MipsRegisterBankInfo &RBI);
37 
38   bool select(MachineInstr &I) override;
39   static const char *getName() { return DEBUG_TYPE; }
40 
41 private:
42   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
43   bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
44   bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
45   bool materialize32BitImm(Register DestReg, APInt Imm,
46                            MachineIRBuilder &B) const;
47   bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
48   const TargetRegisterClass *
49   getRegClassForTypeOnBank(Register Reg, MachineRegisterInfo &MRI) const;
50   unsigned selectLoadStoreOpCode(MachineInstr &I,
51                                  MachineRegisterInfo &MRI) const;
52   bool buildUnalignedStore(MachineInstr &I, unsigned Opc,
53                            MachineOperand &BaseAddr, unsigned Offset,
54                            MachineMemOperand *MMO) const;
55   bool buildUnalignedLoad(MachineInstr &I, unsigned Opc, Register Dest,
56                           MachineOperand &BaseAddr, unsigned Offset,
57                           Register TiedDest, MachineMemOperand *MMO) const;
58 
59   const MipsTargetMachine &TM;
60   const MipsSubtarget &STI;
61   const MipsInstrInfo &TII;
62   const MipsRegisterInfo &TRI;
63   const MipsRegisterBankInfo &RBI;
64 
65 #define GET_GLOBALISEL_PREDICATES_DECL
66 #include "MipsGenGlobalISel.inc"
67 #undef GET_GLOBALISEL_PREDICATES_DECL
68 
69 #define GET_GLOBALISEL_TEMPORARIES_DECL
70 #include "MipsGenGlobalISel.inc"
71 #undef GET_GLOBALISEL_TEMPORARIES_DECL
72 };
73 
74 } // end anonymous namespace
75 
76 #define GET_GLOBALISEL_IMPL
77 #include "MipsGenGlobalISel.inc"
78 #undef GET_GLOBALISEL_IMPL
79 
80 MipsInstructionSelector::MipsInstructionSelector(
81     const MipsTargetMachine &TM, const MipsSubtarget &STI,
82     const MipsRegisterBankInfo &RBI)
83     : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
84       RBI(RBI),
85 
86 #define GET_GLOBALISEL_PREDICATES_INIT
87 #include "MipsGenGlobalISel.inc"
88 #undef GET_GLOBALISEL_PREDICATES_INIT
89 #define GET_GLOBALISEL_TEMPORARIES_INIT
90 #include "MipsGenGlobalISel.inc"
91 #undef GET_GLOBALISEL_TEMPORARIES_INIT
92 {
93 }
94 
95 bool MipsInstructionSelector::isRegInGprb(Register Reg,
96                                           MachineRegisterInfo &MRI) const {
97   return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::GPRBRegBankID;
98 }
99 
100 bool MipsInstructionSelector::isRegInFprb(Register Reg,
101                                           MachineRegisterInfo &MRI) const {
102   return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::FPRBRegBankID;
103 }
104 
105 bool MipsInstructionSelector::selectCopy(MachineInstr &I,
106                                          MachineRegisterInfo &MRI) const {
107   Register DstReg = I.getOperand(0).getReg();
108   if (DstReg.isPhysical())
109     return true;
110 
111   const TargetRegisterClass *RC = getRegClassForTypeOnBank(DstReg, MRI);
112   if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
113     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
114                       << " operand\n");
115     return false;
116   }
117   return true;
118 }
119 
120 const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
121     Register Reg, MachineRegisterInfo &MRI) const {
122   const LLT Ty = MRI.getType(Reg);
123   const unsigned TySize = Ty.getSizeInBits();
124 
125   if (isRegInGprb(Reg, MRI)) {
126     assert((Ty.isScalar() || Ty.isPointer()) && TySize == 32 &&
127            "Register class not available for LLT, register bank combination");
128     return &Mips::GPR32RegClass;
129   }
130 
131   if (isRegInFprb(Reg, MRI)) {
132     if (Ty.isScalar()) {
133       assert((TySize == 32 || TySize == 64) &&
134              "Register class not available for LLT, register bank combination");
135       if (TySize == 32)
136         return &Mips::FGR32RegClass;
137       return STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
138     }
139   }
140 
141   llvm_unreachable("Unsupported register bank.");
142 }
143 
144 bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
145                                                   MachineIRBuilder &B) const {
146   assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
147   // Ori zero extends immediate. Used for values with zeros in high 16 bits.
148   if (Imm.getHiBits(16).isZero()) {
149     MachineInstr *Inst =
150         B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
151             .addImm(Imm.getLoBits(16).getLimitedValue());
152     return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
153   }
154   // Lui places immediate in high 16 bits and sets low 16 bits to zero.
155   if (Imm.getLoBits(16).isZero()) {
156     MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {})
157                              .addImm(Imm.getHiBits(16).getLimitedValue());
158     return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
159   }
160   // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
161   if (Imm.isSignedIntN(16)) {
162     MachineInstr *Inst =
163         B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
164             .addImm(Imm.getLoBits(16).getLimitedValue());
165     return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
166   }
167   // Values that cannot be materialized with single immediate instruction.
168   Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
169   MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
170                           .addImm(Imm.getHiBits(16).getLimitedValue());
171   MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
172                           .addImm(Imm.getLoBits(16).getLimitedValue());
173   if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
174     return false;
175   if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI))
176     return false;
177   return true;
178 }
179 
180 /// When I.getOpcode() is returned, we failed to select MIPS instruction opcode.
181 unsigned
182 MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I,
183                                                MachineRegisterInfo &MRI) const {
184   const Register ValueReg = I.getOperand(0).getReg();
185   const LLT Ty = MRI.getType(ValueReg);
186   const unsigned TySize = Ty.getSizeInBits();
187   const unsigned MemSizeInBytes =
188       (*I.memoperands_begin())->getSize().getValue();
189   unsigned Opc = I.getOpcode();
190   const bool isStore = Opc == TargetOpcode::G_STORE;
191 
192   if (isRegInGprb(ValueReg, MRI)) {
193     assert(((Ty.isScalar() && TySize == 32) ||
194             (Ty.isPointer() && TySize == 32 && MemSizeInBytes == 4)) &&
195            "Unsupported register bank, LLT, MemSizeInBytes combination");
196     (void)TySize;
197     if (isStore)
198       switch (MemSizeInBytes) {
199       case 4:
200         return Mips::SW;
201       case 2:
202         return Mips::SH;
203       case 1:
204         return Mips::SB;
205       default:
206         return Opc;
207       }
208     else
209       // Unspecified extending load is selected into zeroExtending load.
210       switch (MemSizeInBytes) {
211       case 4:
212         return Mips::LW;
213       case 2:
214         return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
215       case 1:
216         return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
217       default:
218         return Opc;
219       }
220   }
221 
222   if (isRegInFprb(ValueReg, MRI)) {
223     if (Ty.isScalar()) {
224       assert(((TySize == 32 && MemSizeInBytes == 4) ||
225               (TySize == 64 && MemSizeInBytes == 8)) &&
226              "Unsupported register bank, LLT, MemSizeInBytes combination");
227 
228       if (MemSizeInBytes == 4)
229         return isStore ? Mips::SWC1 : Mips::LWC1;
230 
231       if (STI.isFP64bit())
232         return isStore ? Mips::SDC164 : Mips::LDC164;
233       return isStore ? Mips::SDC1 : Mips::LDC1;
234     }
235 
236     if (Ty.isVector()) {
237       assert(STI.hasMSA() && "Vector instructions require target with MSA.");
238       assert((TySize == 128 && MemSizeInBytes == 16) &&
239              "Unsupported register bank, LLT, MemSizeInBytes combination");
240       switch (Ty.getElementType().getSizeInBits()) {
241       case 8:
242         return isStore ? Mips::ST_B : Mips::LD_B;
243       case 16:
244         return isStore ? Mips::ST_H : Mips::LD_H;
245       case 32:
246         return isStore ? Mips::ST_W : Mips::LD_W;
247       case 64:
248         return isStore ? Mips::ST_D : Mips::LD_D;
249       default:
250         return Opc;
251       }
252     }
253   }
254 
255   return Opc;
256 }
257 
258 bool MipsInstructionSelector::buildUnalignedStore(
259     MachineInstr &I, unsigned Opc, MachineOperand &BaseAddr, unsigned Offset,
260     MachineMemOperand *MMO) const {
261   MachineInstr *NewInst =
262       BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
263           .add(I.getOperand(0))
264           .add(BaseAddr)
265           .addImm(Offset)
266           .addMemOperand(MMO);
267   if (!constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI))
268     return false;
269   return true;
270 }
271 
272 bool MipsInstructionSelector::buildUnalignedLoad(
273     MachineInstr &I, unsigned Opc, Register Dest, MachineOperand &BaseAddr,
274     unsigned Offset, Register TiedDest, MachineMemOperand *MMO) const {
275   MachineInstr *NewInst =
276       BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
277           .addDef(Dest)
278           .add(BaseAddr)
279           .addImm(Offset)
280           .addUse(TiedDest)
281           .addMemOperand(*I.memoperands_begin());
282   if (!constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI))
283     return false;
284   return true;
285 }
286 
287 bool MipsInstructionSelector::select(MachineInstr &I) {
288 
289   MachineBasicBlock &MBB = *I.getParent();
290   MachineFunction &MF = *MBB.getParent();
291   MachineRegisterInfo &MRI = MF.getRegInfo();
292 
293   if (!isPreISelGenericOpcode(I.getOpcode())) {
294     if (I.isCopy())
295       return selectCopy(I, MRI);
296 
297     return true;
298   }
299 
300   if (I.getOpcode() == Mips::G_MUL &&
301       isRegInGprb(I.getOperand(0).getReg(), MRI)) {
302     MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL))
303                             .add(I.getOperand(0))
304                             .add(I.getOperand(1))
305                             .add(I.getOperand(2));
306     if (!constrainSelectedInstRegOperands(*Mul, TII, TRI, RBI))
307       return false;
308     Mul->getOperand(3).setIsDead(true);
309     Mul->getOperand(4).setIsDead(true);
310 
311     I.eraseFromParent();
312     return true;
313   }
314 
315   if (selectImpl(I, *CoverageInfo))
316     return true;
317 
318   MachineInstr *MI = nullptr;
319   using namespace TargetOpcode;
320 
321   switch (I.getOpcode()) {
322   case G_UMULH: {
323     Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
324     MachineInstr *PseudoMULTu, *PseudoMove;
325 
326     PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
327                       .addDef(PseudoMULTuReg)
328                       .add(I.getOperand(1))
329                       .add(I.getOperand(2));
330     if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI))
331       return false;
332 
333     PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI))
334                      .addDef(I.getOperand(0).getReg())
335                      .addUse(PseudoMULTuReg);
336     if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
337       return false;
338 
339     I.eraseFromParent();
340     return true;
341   }
342   case G_PTR_ADD: {
343     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
344              .add(I.getOperand(0))
345              .add(I.getOperand(1))
346              .add(I.getOperand(2));
347     break;
348   }
349   case G_INTTOPTR:
350   case G_PTRTOINT: {
351     I.setDesc(TII.get(COPY));
352     return selectCopy(I, MRI);
353   }
354   case G_FRAME_INDEX: {
355     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
356              .add(I.getOperand(0))
357              .add(I.getOperand(1))
358              .addImm(0);
359     break;
360   }
361   case G_BRJT: {
362     unsigned EntrySize =
363         MF.getJumpTableInfo()->getEntrySize(MF.getDataLayout());
364     assert(isPowerOf2_32(EntrySize) &&
365            "Non-power-of-two jump-table entry size not supported.");
366 
367     Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass);
368     MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL))
369                             .addDef(JTIndex)
370                             .addUse(I.getOperand(2).getReg())
371                             .addImm(Log2_32(EntrySize));
372     if (!constrainSelectedInstRegOperands(*SLL, TII, TRI, RBI))
373       return false;
374 
375     Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass);
376     MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
377                              .addDef(DestAddress)
378                              .addUse(I.getOperand(0).getReg())
379                              .addUse(JTIndex);
380     if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
381       return false;
382 
383     Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass);
384     MachineInstr *LW =
385         BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
386             .addDef(Dest)
387             .addUse(DestAddress)
388             .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO)
389             .addMemOperand(MF.getMachineMemOperand(
390                 MachinePointerInfo(), MachineMemOperand::MOLoad, 4, Align(4)));
391     if (!constrainSelectedInstRegOperands(*LW, TII, TRI, RBI))
392       return false;
393 
394     if (MF.getTarget().isPositionIndependent()) {
395       Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
396       LW->getOperand(0).setReg(DestTmp);
397       MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
398                                .addDef(Dest)
399                                .addUse(DestTmp)
400                                .addUse(MF.getInfo<MipsFunctionInfo>()
401                                            ->getGlobalBaseRegForGlobalISel(MF));
402       if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
403         return false;
404     }
405 
406     MachineInstr *Branch =
407         BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
408             .addUse(Dest);
409     if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
410       return false;
411 
412     I.eraseFromParent();
413     return true;
414   }
415   case G_BRINDIRECT: {
416     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
417              .add(I.getOperand(0));
418     break;
419   }
420   case G_PHI: {
421     const Register DestReg = I.getOperand(0).getReg();
422 
423     const TargetRegisterClass *DefRC = nullptr;
424     if (DestReg.isPhysical())
425       DefRC = TRI.getRegClass(DestReg);
426     else
427       DefRC = getRegClassForTypeOnBank(DestReg, MRI);
428 
429     I.setDesc(TII.get(TargetOpcode::PHI));
430     return RBI.constrainGenericRegister(DestReg, *DefRC, MRI);
431   }
432   case G_STORE:
433   case G_LOAD:
434   case G_ZEXTLOAD:
435   case G_SEXTLOAD: {
436     auto MMO = *I.memoperands_begin();
437     MachineOperand BaseAddr = I.getOperand(1);
438     int64_t SignedOffset = 0;
439     // Try to fold load/store + G_PTR_ADD + G_CONSTANT
440     // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
441     // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
442     // %LoadResult/%StoreSrc = load/store %Addr(p0)
443     // into:
444     // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
445 
446     MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
447     if (Addr->getOpcode() == G_PTR_ADD) {
448       MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg());
449       if (Offset->getOpcode() == G_CONSTANT) {
450         APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();
451         if (OffsetValue.isSignedIntN(16)) {
452           BaseAddr = Addr->getOperand(1);
453           SignedOffset = OffsetValue.getSExtValue();
454         }
455       }
456     }
457 
458     // Unaligned memory access
459     if ((!MMO->getSize().hasValue() ||
460          MMO->getAlign() < MMO->getSize().getValue()) &&
461         !STI.systemSupportsUnalignedAccess()) {
462       if (MMO->getSize() != 4 || !isRegInGprb(I.getOperand(0).getReg(), MRI))
463         return false;
464 
465       if (I.getOpcode() == G_STORE) {
466         if (!buildUnalignedStore(I, Mips::SWL, BaseAddr, SignedOffset + 3, MMO))
467           return false;
468         if (!buildUnalignedStore(I, Mips::SWR, BaseAddr, SignedOffset, MMO))
469           return false;
470         I.eraseFromParent();
471         return true;
472       }
473 
474       if (I.getOpcode() == G_LOAD) {
475         Register ImplDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
476         BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
477             .addDef(ImplDef);
478         Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
479         if (!buildUnalignedLoad(I, Mips::LWL, Tmp, BaseAddr, SignedOffset + 3,
480                                 ImplDef, MMO))
481           return false;
482         if (!buildUnalignedLoad(I, Mips::LWR, I.getOperand(0).getReg(),
483                                 BaseAddr, SignedOffset, Tmp, MMO))
484           return false;
485         I.eraseFromParent();
486         return true;
487       }
488 
489       return false;
490     }
491 
492     const unsigned NewOpc = selectLoadStoreOpCode(I, MRI);
493     if (NewOpc == I.getOpcode())
494       return false;
495 
496     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
497              .add(I.getOperand(0))
498              .add(BaseAddr)
499              .addImm(SignedOffset)
500              .addMemOperand(MMO);
501     break;
502   }
503   case G_UDIV:
504   case G_UREM:
505   case G_SDIV:
506   case G_SREM: {
507     Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
508     bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
509     bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
510 
511     MachineInstr *PseudoDIV, *PseudoMove;
512     PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(),
513                         TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
514                     .addDef(HILOReg)
515                     .add(I.getOperand(1))
516                     .add(I.getOperand(2));
517     if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI))
518       return false;
519 
520     PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
521                          TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
522                      .addDef(I.getOperand(0).getReg())
523                      .addUse(HILOReg);
524     if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
525       return false;
526 
527     I.eraseFromParent();
528     return true;
529   }
530   case G_SELECT: {
531     // Handle operands with pointer type.
532     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I))
533              .add(I.getOperand(0))
534              .add(I.getOperand(2))
535              .add(I.getOperand(1))
536              .add(I.getOperand(3));
537     break;
538   }
539   case G_UNMERGE_VALUES: {
540     if (I.getNumOperands() != 3)
541       return false;
542     Register Src = I.getOperand(2).getReg();
543     Register Lo = I.getOperand(0).getReg();
544     Register Hi = I.getOperand(1).getReg();
545     if (!isRegInFprb(Src, MRI) ||
546         !(isRegInGprb(Lo, MRI) && isRegInGprb(Hi, MRI)))
547       return false;
548 
549     unsigned Opcode =
550         STI.isFP64bit() ? Mips::ExtractElementF64_64 : Mips::ExtractElementF64;
551 
552     MachineInstr *ExtractLo = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
553                                   .addDef(Lo)
554                                   .addUse(Src)
555                                   .addImm(0);
556     if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
557       return false;
558 
559     MachineInstr *ExtractHi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
560                                   .addDef(Hi)
561                                   .addUse(Src)
562                                   .addImm(1);
563     if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
564       return false;
565 
566     I.eraseFromParent();
567     return true;
568   }
569   case G_IMPLICIT_DEF: {
570     Register Dst = I.getOperand(0).getReg();
571     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
572              .addDef(Dst);
573 
574     // Set class based on register bank, there can be fpr and gpr implicit def.
575     MRI.setRegClass(Dst, getRegClassForTypeOnBank(Dst, MRI));
576     break;
577   }
578   case G_CONSTANT: {
579     MachineIRBuilder B(I);
580     if (!materialize32BitImm(I.getOperand(0).getReg(),
581                              I.getOperand(1).getCImm()->getValue(), B))
582       return false;
583 
584     I.eraseFromParent();
585     return true;
586   }
587   case G_FCONSTANT: {
588     const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF();
589     APInt APImm = FPimm.bitcastToAPInt();
590     unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
591 
592     if (Size == 32) {
593       Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
594       MachineIRBuilder B(I);
595       if (!materialize32BitImm(GPRReg, APImm, B))
596         return false;
597 
598       MachineInstrBuilder MTC1 =
599           B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg});
600       if (!MTC1.constrainAllUses(TII, TRI, RBI))
601         return false;
602     }
603     if (Size == 64) {
604       Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
605       Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
606       MachineIRBuilder B(I);
607       if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
608         return false;
609       if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B))
610         return false;
611 
612       MachineInstrBuilder PairF64 = B.buildInstr(
613           STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
614           {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh});
615       if (!PairF64.constrainAllUses(TII, TRI, RBI))
616         return false;
617     }
618 
619     I.eraseFromParent();
620     return true;
621   }
622   case G_FABS: {
623     unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
624     unsigned FABSOpcode =
625         Size == 32 ? Mips::FABS_S
626                    : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
627     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode))
628              .add(I.getOperand(0))
629              .add(I.getOperand(1));
630     break;
631   }
632   case G_FPTOSI: {
633     unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits();
634     unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
635     (void)ToSize;
636     assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
637     assert((FromSize == 32 || FromSize == 64) &&
638            "Unsupported floating point size for G_FPTOSI");
639 
640     unsigned Opcode;
641     if (FromSize == 32)
642       Opcode = Mips::TRUNC_W_S;
643     else
644       Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
645     Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
646     MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
647                 .addDef(ResultInFPR)
648                 .addUse(I.getOperand(1).getReg());
649     if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI))
650       return false;
651 
652     MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1))
653                              .addDef(I.getOperand(0).getReg())
654                              .addUse(ResultInFPR);
655     if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
656       return false;
657 
658     I.eraseFromParent();
659     return true;
660   }
661   case G_GLOBAL_VALUE: {
662     const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal();
663     if (MF.getTarget().isPositionIndependent()) {
664       MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
665                                 .addDef(I.getOperand(0).getReg())
666                                 .addReg(MF.getInfo<MipsFunctionInfo>()
667                                             ->getGlobalBaseRegForGlobalISel(MF))
668                                 .addGlobalAddress(GVal);
669       // Global Values that don't have local linkage are handled differently
670       // when they are part of call sequence. MipsCallLowering::lowerCall
671       // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
672       // MO_GOT_CALL flag when Callee doesn't have local linkage.
673       if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL)
674         LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL);
675       else
676         LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT);
677       LWGOT->addMemOperand(
678           MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF),
679                                       MachineMemOperand::MOLoad, 4, Align(4)));
680       if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI))
681         return false;
682 
683       if (GVal->hasLocalLinkage()) {
684         Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
685         LWGOT->getOperand(0).setReg(LWGOTDef);
686 
687         MachineInstr *ADDiu =
688             BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
689                 .addDef(I.getOperand(0).getReg())
690                 .addReg(LWGOTDef)
691                 .addGlobalAddress(GVal);
692         ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
693         if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
694           return false;
695       }
696     } else {
697       Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
698 
699       MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
700                               .addDef(LUiReg)
701                               .addGlobalAddress(GVal);
702       LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI);
703       if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
704         return false;
705 
706       MachineInstr *ADDiu =
707           BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
708               .addDef(I.getOperand(0).getReg())
709               .addUse(LUiReg)
710               .addGlobalAddress(GVal);
711       ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
712       if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
713         return false;
714     }
715     I.eraseFromParent();
716     return true;
717   }
718   case G_JUMP_TABLE: {
719     if (MF.getTarget().isPositionIndependent()) {
720       MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
721                .addDef(I.getOperand(0).getReg())
722                .addReg(MF.getInfo<MipsFunctionInfo>()
723                            ->getGlobalBaseRegForGlobalISel(MF))
724                .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT)
725                .addMemOperand(MF.getMachineMemOperand(
726                    MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad, 4,
727                    Align(4)));
728     } else {
729       MI =
730           BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
731               .addDef(I.getOperand(0).getReg())
732               .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_HI);
733     }
734     break;
735   }
736   case G_ICMP: {
737     struct Instr {
738       unsigned Opcode;
739       Register Def, LHS, RHS;
740       Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
741           : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
742 
743       bool hasImm() const {
744         if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
745           return true;
746         return false;
747       }
748     };
749 
750     SmallVector<struct Instr, 2> Instructions;
751     Register ICMPReg = I.getOperand(0).getReg();
752     Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
753     Register LHS = I.getOperand(2).getReg();
754     Register RHS = I.getOperand(3).getReg();
755     CmpInst::Predicate Cond =
756         static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
757 
758     switch (Cond) {
759     case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
760       Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
761       Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1);
762       break;
763     case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
764       Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
765       Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp);
766       break;
767     case CmpInst::ICMP_UGT: // LHS >  RHS -> RHS < LHS
768       Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS);
769       break;
770     case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
771       Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS);
772       Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
773       break;
774     case CmpInst::ICMP_ULT: // LHS <  RHS -> LHS < RHS
775       Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS);
776       break;
777     case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
778       Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS);
779       Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
780       break;
781     case CmpInst::ICMP_SGT: // LHS >  RHS -> RHS < LHS
782       Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS);
783       break;
784     case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
785       Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS);
786       Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
787       break;
788     case CmpInst::ICMP_SLT: // LHS <  RHS -> LHS < RHS
789       Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS);
790       break;
791     case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
792       Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS);
793       Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
794       break;
795     default:
796       return false;
797     }
798 
799     MachineIRBuilder B(I);
800     for (const struct Instr &Instruction : Instructions) {
801       MachineInstrBuilder MIB = B.buildInstr(
802           Instruction.Opcode, {Instruction.Def}, {Instruction.LHS});
803 
804       if (Instruction.hasImm())
805         MIB.addImm(Instruction.RHS);
806       else
807         MIB.addUse(Instruction.RHS);
808 
809       if (!MIB.constrainAllUses(TII, TRI, RBI))
810         return false;
811     }
812 
813     I.eraseFromParent();
814     return true;
815   }
816   case G_FCMP: {
817     unsigned MipsFCMPCondCode;
818     bool isLogicallyNegated;
819     switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(
820                 I.getOperand(1).getPredicate())) {
821     case CmpInst::FCMP_UNO: // Unordered
822     case CmpInst::FCMP_ORD: // Ordered (OR)
823       MipsFCMPCondCode = Mips::FCOND_UN;
824       isLogicallyNegated = Cond != CmpInst::FCMP_UNO;
825       break;
826     case CmpInst::FCMP_OEQ: // Equal
827     case CmpInst::FCMP_UNE: // Not Equal (NEQ)
828       MipsFCMPCondCode = Mips::FCOND_OEQ;
829       isLogicallyNegated = Cond != CmpInst::FCMP_OEQ;
830       break;
831     case CmpInst::FCMP_UEQ: // Unordered or Equal
832     case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL)
833       MipsFCMPCondCode = Mips::FCOND_UEQ;
834       isLogicallyNegated = Cond != CmpInst::FCMP_UEQ;
835       break;
836     case CmpInst::FCMP_OLT: // Ordered or Less Than
837     case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE)
838       MipsFCMPCondCode = Mips::FCOND_OLT;
839       isLogicallyNegated = Cond != CmpInst::FCMP_OLT;
840       break;
841     case CmpInst::FCMP_ULT: // Unordered or Less Than
842     case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE)
843       MipsFCMPCondCode = Mips::FCOND_ULT;
844       isLogicallyNegated = Cond != CmpInst::FCMP_ULT;
845       break;
846     case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal
847     case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT)
848       MipsFCMPCondCode = Mips::FCOND_OLE;
849       isLogicallyNegated = Cond != CmpInst::FCMP_OLE;
850       break;
851     case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal
852     case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT)
853       MipsFCMPCondCode = Mips::FCOND_ULE;
854       isLogicallyNegated = Cond != CmpInst::FCMP_ULE;
855       break;
856     default:
857       return false;
858     }
859 
860     // Default compare result in gpr register will be `true`.
861     // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
862     // using MOVF_I. When orignal predicate (Cond) is logically negated
863     // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
864     unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
865 
866     Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
867     BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
868         .addDef(TrueInReg)
869         .addUse(Mips::ZERO)
870         .addImm(1);
871 
872     unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
873     unsigned FCMPOpcode =
874         Size == 32 ? Mips::FCMP_S32
875                    : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
876     MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode))
877                              .addUse(I.getOperand(2).getReg())
878                              .addUse(I.getOperand(3).getReg())
879                              .addImm(MipsFCMPCondCode);
880     if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI))
881       return false;
882 
883     MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode))
884                              .addDef(I.getOperand(0).getReg())
885                              .addUse(Mips::ZERO)
886                              .addUse(Mips::FCC0)
887                              .addUse(TrueInReg);
888     if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
889       return false;
890 
891     I.eraseFromParent();
892     return true;
893   }
894   case G_FENCE: {
895     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SYNC)).addImm(0);
896     break;
897   }
898   case G_VASTART: {
899     MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
900     int FI = FuncInfo->getVarArgsFrameIndex();
901 
902     Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
903     MachineInstr *LEA_ADDiu =
904         BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu))
905             .addDef(LeaReg)
906             .addFrameIndex(FI)
907             .addImm(0);
908     if (!constrainSelectedInstRegOperands(*LEA_ADDiu, TII, TRI, RBI))
909       return false;
910 
911     MachineInstr *Store = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SW))
912                               .addUse(LeaReg)
913                               .addUse(I.getOperand(0).getReg())
914                               .addImm(0);
915     if (!constrainSelectedInstRegOperands(*Store, TII, TRI, RBI))
916       return false;
917 
918     I.eraseFromParent();
919     return true;
920   }
921   default:
922     return false;
923   }
924 
925   I.eraseFromParent();
926   return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
927 }
928 
929 namespace llvm {
930 InstructionSelector *
931 createMipsInstructionSelector(const MipsTargetMachine &TM,
932                               const MipsSubtarget &Subtarget,
933                               const MipsRegisterBankInfo &RBI) {
934   return new MipsInstructionSelector(TM, Subtarget, RBI);
935 }
936 } // end namespace llvm
937