xref: /freebsd/contrib/llvm-project/llvm/lib/Target/Mips/MipsInstructionSelector.cpp (revision 02e9120893770924227138ba49df1edb3896112a)
1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// Mips.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "MCTargetDesc/MipsInstPrinter.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterBankInfo.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/IR/IntrinsicsMips.h"
22 
23 #define DEBUG_TYPE "mips-isel"
24 
25 using namespace llvm;
26 
27 namespace {
28 
29 #define GET_GLOBALISEL_PREDICATE_BITSET
30 #include "MipsGenGlobalISel.inc"
31 #undef GET_GLOBALISEL_PREDICATE_BITSET
32 
33 class MipsInstructionSelector : public InstructionSelector {
34 public:
35   MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
36                           const MipsRegisterBankInfo &RBI);
37 
38   bool select(MachineInstr &I) override;
39   static const char *getName() { return DEBUG_TYPE; }
40 
41 private:
42   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
43   bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
44   bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
45   bool materialize32BitImm(Register DestReg, APInt Imm,
46                            MachineIRBuilder &B) const;
47   bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
48   const TargetRegisterClass *
49   getRegClassForTypeOnBank(Register Reg, MachineRegisterInfo &MRI) const;
50   unsigned selectLoadStoreOpCode(MachineInstr &I,
51                                  MachineRegisterInfo &MRI) const;
52   bool buildUnalignedStore(MachineInstr &I, unsigned Opc,
53                            MachineOperand &BaseAddr, unsigned Offset,
54                            MachineMemOperand *MMO) const;
55   bool buildUnalignedLoad(MachineInstr &I, unsigned Opc, Register Dest,
56                           MachineOperand &BaseAddr, unsigned Offset,
57                           Register TiedDest, MachineMemOperand *MMO) const;
58 
59   const MipsTargetMachine &TM;
60   const MipsSubtarget &STI;
61   const MipsInstrInfo &TII;
62   const MipsRegisterInfo &TRI;
63   const MipsRegisterBankInfo &RBI;
64 
65 #define GET_GLOBALISEL_PREDICATES_DECL
66 #include "MipsGenGlobalISel.inc"
67 #undef GET_GLOBALISEL_PREDICATES_DECL
68 
69 #define GET_GLOBALISEL_TEMPORARIES_DECL
70 #include "MipsGenGlobalISel.inc"
71 #undef GET_GLOBALISEL_TEMPORARIES_DECL
72 };
73 
74 } // end anonymous namespace
75 
76 #define GET_GLOBALISEL_IMPL
77 #include "MipsGenGlobalISel.inc"
78 #undef GET_GLOBALISEL_IMPL
79 
80 MipsInstructionSelector::MipsInstructionSelector(
81     const MipsTargetMachine &TM, const MipsSubtarget &STI,
82     const MipsRegisterBankInfo &RBI)
83     : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
84       RBI(RBI),
85 
86 #define GET_GLOBALISEL_PREDICATES_INIT
87 #include "MipsGenGlobalISel.inc"
88 #undef GET_GLOBALISEL_PREDICATES_INIT
89 #define GET_GLOBALISEL_TEMPORARIES_INIT
90 #include "MipsGenGlobalISel.inc"
91 #undef GET_GLOBALISEL_TEMPORARIES_INIT
92 {
93 }
94 
95 bool MipsInstructionSelector::isRegInGprb(Register Reg,
96                                           MachineRegisterInfo &MRI) const {
97   return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::GPRBRegBankID;
98 }
99 
100 bool MipsInstructionSelector::isRegInFprb(Register Reg,
101                                           MachineRegisterInfo &MRI) const {
102   return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::FPRBRegBankID;
103 }
104 
105 bool MipsInstructionSelector::selectCopy(MachineInstr &I,
106                                          MachineRegisterInfo &MRI) const {
107   Register DstReg = I.getOperand(0).getReg();
108   if (DstReg.isPhysical())
109     return true;
110 
111   const TargetRegisterClass *RC = getRegClassForTypeOnBank(DstReg, MRI);
112   if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
113     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
114                       << " operand\n");
115     return false;
116   }
117   return true;
118 }
119 
120 const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
121     Register Reg, MachineRegisterInfo &MRI) const {
122   const LLT Ty = MRI.getType(Reg);
123   const unsigned TySize = Ty.getSizeInBits();
124 
125   if (isRegInGprb(Reg, MRI)) {
126     assert((Ty.isScalar() || Ty.isPointer()) && TySize == 32 &&
127            "Register class not available for LLT, register bank combination");
128     return &Mips::GPR32RegClass;
129   }
130 
131   if (isRegInFprb(Reg, MRI)) {
132     if (Ty.isScalar()) {
133       assert((TySize == 32 || TySize == 64) &&
134              "Register class not available for LLT, register bank combination");
135       if (TySize == 32)
136         return &Mips::FGR32RegClass;
137       return STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
138     }
139   }
140 
141   llvm_unreachable("Unsupported register bank.");
142 }
143 
144 bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
145                                                   MachineIRBuilder &B) const {
146   assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
147   // Ori zero extends immediate. Used for values with zeros in high 16 bits.
148   if (Imm.getHiBits(16).isZero()) {
149     MachineInstr *Inst =
150         B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
151             .addImm(Imm.getLoBits(16).getLimitedValue());
152     return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
153   }
154   // Lui places immediate in high 16 bits and sets low 16 bits to zero.
155   if (Imm.getLoBits(16).isZero()) {
156     MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {})
157                              .addImm(Imm.getHiBits(16).getLimitedValue());
158     return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
159   }
160   // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
161   if (Imm.isSignedIntN(16)) {
162     MachineInstr *Inst =
163         B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
164             .addImm(Imm.getLoBits(16).getLimitedValue());
165     return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
166   }
167   // Values that cannot be materialized with single immediate instruction.
168   Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
169   MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
170                           .addImm(Imm.getHiBits(16).getLimitedValue());
171   MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
172                           .addImm(Imm.getLoBits(16).getLimitedValue());
173   if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
174     return false;
175   if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI))
176     return false;
177   return true;
178 }
179 
180 /// When I.getOpcode() is returned, we failed to select MIPS instruction opcode.
181 unsigned
182 MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I,
183                                                MachineRegisterInfo &MRI) const {
184   const Register ValueReg = I.getOperand(0).getReg();
185   const LLT Ty = MRI.getType(ValueReg);
186   const unsigned TySize = Ty.getSizeInBits();
187   const unsigned MemSizeInBytes = (*I.memoperands_begin())->getSize();
188   unsigned Opc = I.getOpcode();
189   const bool isStore = Opc == TargetOpcode::G_STORE;
190 
191   if (isRegInGprb(ValueReg, MRI)) {
192     assert(((Ty.isScalar() && TySize == 32) ||
193             (Ty.isPointer() && TySize == 32 && MemSizeInBytes == 4)) &&
194            "Unsupported register bank, LLT, MemSizeInBytes combination");
195     (void)TySize;
196     if (isStore)
197       switch (MemSizeInBytes) {
198       case 4:
199         return Mips::SW;
200       case 2:
201         return Mips::SH;
202       case 1:
203         return Mips::SB;
204       default:
205         return Opc;
206       }
207     else
208       // Unspecified extending load is selected into zeroExtending load.
209       switch (MemSizeInBytes) {
210       case 4:
211         return Mips::LW;
212       case 2:
213         return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
214       case 1:
215         return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
216       default:
217         return Opc;
218       }
219   }
220 
221   if (isRegInFprb(ValueReg, MRI)) {
222     if (Ty.isScalar()) {
223       assert(((TySize == 32 && MemSizeInBytes == 4) ||
224               (TySize == 64 && MemSizeInBytes == 8)) &&
225              "Unsupported register bank, LLT, MemSizeInBytes combination");
226 
227       if (MemSizeInBytes == 4)
228         return isStore ? Mips::SWC1 : Mips::LWC1;
229 
230       if (STI.isFP64bit())
231         return isStore ? Mips::SDC164 : Mips::LDC164;
232       return isStore ? Mips::SDC1 : Mips::LDC1;
233     }
234 
235     if (Ty.isVector()) {
236       assert(STI.hasMSA() && "Vector instructions require target with MSA.");
237       assert((TySize == 128 && MemSizeInBytes == 16) &&
238              "Unsupported register bank, LLT, MemSizeInBytes combination");
239       switch (Ty.getElementType().getSizeInBits()) {
240       case 8:
241         return isStore ? Mips::ST_B : Mips::LD_B;
242       case 16:
243         return isStore ? Mips::ST_H : Mips::LD_H;
244       case 32:
245         return isStore ? Mips::ST_W : Mips::LD_W;
246       case 64:
247         return isStore ? Mips::ST_D : Mips::LD_D;
248       default:
249         return Opc;
250       }
251     }
252   }
253 
254   return Opc;
255 }
256 
257 bool MipsInstructionSelector::buildUnalignedStore(
258     MachineInstr &I, unsigned Opc, MachineOperand &BaseAddr, unsigned Offset,
259     MachineMemOperand *MMO) const {
260   MachineInstr *NewInst =
261       BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
262           .add(I.getOperand(0))
263           .add(BaseAddr)
264           .addImm(Offset)
265           .addMemOperand(MMO);
266   if (!constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI))
267     return false;
268   return true;
269 }
270 
271 bool MipsInstructionSelector::buildUnalignedLoad(
272     MachineInstr &I, unsigned Opc, Register Dest, MachineOperand &BaseAddr,
273     unsigned Offset, Register TiedDest, MachineMemOperand *MMO) const {
274   MachineInstr *NewInst =
275       BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
276           .addDef(Dest)
277           .add(BaseAddr)
278           .addImm(Offset)
279           .addUse(TiedDest)
280           .addMemOperand(*I.memoperands_begin());
281   if (!constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI))
282     return false;
283   return true;
284 }
285 
286 bool MipsInstructionSelector::select(MachineInstr &I) {
287 
288   MachineBasicBlock &MBB = *I.getParent();
289   MachineFunction &MF = *MBB.getParent();
290   MachineRegisterInfo &MRI = MF.getRegInfo();
291 
292   if (!isPreISelGenericOpcode(I.getOpcode())) {
293     if (I.isCopy())
294       return selectCopy(I, MRI);
295 
296     return true;
297   }
298 
299   if (I.getOpcode() == Mips::G_MUL &&
300       isRegInGprb(I.getOperand(0).getReg(), MRI)) {
301     MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL))
302                             .add(I.getOperand(0))
303                             .add(I.getOperand(1))
304                             .add(I.getOperand(2));
305     if (!constrainSelectedInstRegOperands(*Mul, TII, TRI, RBI))
306       return false;
307     Mul->getOperand(3).setIsDead(true);
308     Mul->getOperand(4).setIsDead(true);
309 
310     I.eraseFromParent();
311     return true;
312   }
313 
314   if (selectImpl(I, *CoverageInfo))
315     return true;
316 
317   MachineInstr *MI = nullptr;
318   using namespace TargetOpcode;
319 
320   switch (I.getOpcode()) {
321   case G_UMULH: {
322     Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
323     MachineInstr *PseudoMULTu, *PseudoMove;
324 
325     PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
326                       .addDef(PseudoMULTuReg)
327                       .add(I.getOperand(1))
328                       .add(I.getOperand(2));
329     if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI))
330       return false;
331 
332     PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI))
333                      .addDef(I.getOperand(0).getReg())
334                      .addUse(PseudoMULTuReg);
335     if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
336       return false;
337 
338     I.eraseFromParent();
339     return true;
340   }
341   case G_PTR_ADD: {
342     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
343              .add(I.getOperand(0))
344              .add(I.getOperand(1))
345              .add(I.getOperand(2));
346     break;
347   }
348   case G_INTTOPTR:
349   case G_PTRTOINT: {
350     I.setDesc(TII.get(COPY));
351     return selectCopy(I, MRI);
352   }
353   case G_FRAME_INDEX: {
354     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
355              .add(I.getOperand(0))
356              .add(I.getOperand(1))
357              .addImm(0);
358     break;
359   }
360   case G_BRCOND: {
361     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::BNE))
362              .add(I.getOperand(0))
363              .addUse(Mips::ZERO)
364              .add(I.getOperand(1));
365     break;
366   }
367   case G_BRJT: {
368     unsigned EntrySize =
369         MF.getJumpTableInfo()->getEntrySize(MF.getDataLayout());
370     assert(isPowerOf2_32(EntrySize) &&
371            "Non-power-of-two jump-table entry size not supported.");
372 
373     Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass);
374     MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL))
375                             .addDef(JTIndex)
376                             .addUse(I.getOperand(2).getReg())
377                             .addImm(Log2_32(EntrySize));
378     if (!constrainSelectedInstRegOperands(*SLL, TII, TRI, RBI))
379       return false;
380 
381     Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass);
382     MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
383                              .addDef(DestAddress)
384                              .addUse(I.getOperand(0).getReg())
385                              .addUse(JTIndex);
386     if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
387       return false;
388 
389     Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass);
390     MachineInstr *LW =
391         BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
392             .addDef(Dest)
393             .addUse(DestAddress)
394             .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO)
395             .addMemOperand(MF.getMachineMemOperand(
396                 MachinePointerInfo(), MachineMemOperand::MOLoad, 4, Align(4)));
397     if (!constrainSelectedInstRegOperands(*LW, TII, TRI, RBI))
398       return false;
399 
400     if (MF.getTarget().isPositionIndependent()) {
401       Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
402       LW->getOperand(0).setReg(DestTmp);
403       MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
404                                .addDef(Dest)
405                                .addUse(DestTmp)
406                                .addUse(MF.getInfo<MipsFunctionInfo>()
407                                            ->getGlobalBaseRegForGlobalISel(MF));
408       if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
409         return false;
410     }
411 
412     MachineInstr *Branch =
413         BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
414             .addUse(Dest);
415     if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
416       return false;
417 
418     I.eraseFromParent();
419     return true;
420   }
421   case G_BRINDIRECT: {
422     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
423              .add(I.getOperand(0));
424     break;
425   }
426   case G_PHI: {
427     const Register DestReg = I.getOperand(0).getReg();
428 
429     const TargetRegisterClass *DefRC = nullptr;
430     if (DestReg.isPhysical())
431       DefRC = TRI.getRegClass(DestReg);
432     else
433       DefRC = getRegClassForTypeOnBank(DestReg, MRI);
434 
435     I.setDesc(TII.get(TargetOpcode::PHI));
436     return RBI.constrainGenericRegister(DestReg, *DefRC, MRI);
437   }
438   case G_STORE:
439   case G_LOAD:
440   case G_ZEXTLOAD:
441   case G_SEXTLOAD: {
442     auto MMO = *I.memoperands_begin();
443     MachineOperand BaseAddr = I.getOperand(1);
444     int64_t SignedOffset = 0;
445     // Try to fold load/store + G_PTR_ADD + G_CONSTANT
446     // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
447     // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
448     // %LoadResult/%StoreSrc = load/store %Addr(p0)
449     // into:
450     // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
451 
452     MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
453     if (Addr->getOpcode() == G_PTR_ADD) {
454       MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg());
455       if (Offset->getOpcode() == G_CONSTANT) {
456         APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();
457         if (OffsetValue.isSignedIntN(16)) {
458           BaseAddr = Addr->getOperand(1);
459           SignedOffset = OffsetValue.getSExtValue();
460         }
461       }
462     }
463 
464     // Unaligned memory access
465     if (MMO->getAlign() < MMO->getSize() &&
466         !STI.systemSupportsUnalignedAccess()) {
467       if (MMO->getSize() != 4 || !isRegInGprb(I.getOperand(0).getReg(), MRI))
468         return false;
469 
470       if (I.getOpcode() == G_STORE) {
471         if (!buildUnalignedStore(I, Mips::SWL, BaseAddr, SignedOffset + 3, MMO))
472           return false;
473         if (!buildUnalignedStore(I, Mips::SWR, BaseAddr, SignedOffset, MMO))
474           return false;
475         I.eraseFromParent();
476         return true;
477       }
478 
479       if (I.getOpcode() == G_LOAD) {
480         Register ImplDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
481         BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
482             .addDef(ImplDef);
483         Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
484         if (!buildUnalignedLoad(I, Mips::LWL, Tmp, BaseAddr, SignedOffset + 3,
485                                 ImplDef, MMO))
486           return false;
487         if (!buildUnalignedLoad(I, Mips::LWR, I.getOperand(0).getReg(),
488                                 BaseAddr, SignedOffset, Tmp, MMO))
489           return false;
490         I.eraseFromParent();
491         return true;
492       }
493 
494       return false;
495     }
496 
497     const unsigned NewOpc = selectLoadStoreOpCode(I, MRI);
498     if (NewOpc == I.getOpcode())
499       return false;
500 
501     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
502              .add(I.getOperand(0))
503              .add(BaseAddr)
504              .addImm(SignedOffset)
505              .addMemOperand(MMO);
506     break;
507   }
508   case G_UDIV:
509   case G_UREM:
510   case G_SDIV:
511   case G_SREM: {
512     Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
513     bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
514     bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
515 
516     MachineInstr *PseudoDIV, *PseudoMove;
517     PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(),
518                         TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
519                     .addDef(HILOReg)
520                     .add(I.getOperand(1))
521                     .add(I.getOperand(2));
522     if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI))
523       return false;
524 
525     PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
526                          TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
527                      .addDef(I.getOperand(0).getReg())
528                      .addUse(HILOReg);
529     if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
530       return false;
531 
532     I.eraseFromParent();
533     return true;
534   }
535   case G_SELECT: {
536     // Handle operands with pointer type.
537     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I))
538              .add(I.getOperand(0))
539              .add(I.getOperand(2))
540              .add(I.getOperand(1))
541              .add(I.getOperand(3));
542     break;
543   }
544   case G_UNMERGE_VALUES: {
545     if (I.getNumOperands() != 3)
546       return false;
547     Register Src = I.getOperand(2).getReg();
548     Register Lo = I.getOperand(0).getReg();
549     Register Hi = I.getOperand(1).getReg();
550     if (!isRegInFprb(Src, MRI) ||
551         !(isRegInGprb(Lo, MRI) && isRegInGprb(Hi, MRI)))
552       return false;
553 
554     unsigned Opcode =
555         STI.isFP64bit() ? Mips::ExtractElementF64_64 : Mips::ExtractElementF64;
556 
557     MachineInstr *ExtractLo = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
558                                   .addDef(Lo)
559                                   .addUse(Src)
560                                   .addImm(0);
561     if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
562       return false;
563 
564     MachineInstr *ExtractHi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
565                                   .addDef(Hi)
566                                   .addUse(Src)
567                                   .addImm(1);
568     if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
569       return false;
570 
571     I.eraseFromParent();
572     return true;
573   }
574   case G_IMPLICIT_DEF: {
575     Register Dst = I.getOperand(0).getReg();
576     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
577              .addDef(Dst);
578 
579     // Set class based on register bank, there can be fpr and gpr implicit def.
580     MRI.setRegClass(Dst, getRegClassForTypeOnBank(Dst, MRI));
581     break;
582   }
583   case G_CONSTANT: {
584     MachineIRBuilder B(I);
585     if (!materialize32BitImm(I.getOperand(0).getReg(),
586                              I.getOperand(1).getCImm()->getValue(), B))
587       return false;
588 
589     I.eraseFromParent();
590     return true;
591   }
592   case G_FCONSTANT: {
593     const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF();
594     APInt APImm = FPimm.bitcastToAPInt();
595     unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
596 
597     if (Size == 32) {
598       Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
599       MachineIRBuilder B(I);
600       if (!materialize32BitImm(GPRReg, APImm, B))
601         return false;
602 
603       MachineInstrBuilder MTC1 =
604           B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg});
605       if (!MTC1.constrainAllUses(TII, TRI, RBI))
606         return false;
607     }
608     if (Size == 64) {
609       Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
610       Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
611       MachineIRBuilder B(I);
612       if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
613         return false;
614       if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B))
615         return false;
616 
617       MachineInstrBuilder PairF64 = B.buildInstr(
618           STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
619           {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh});
620       if (!PairF64.constrainAllUses(TII, TRI, RBI))
621         return false;
622     }
623 
624     I.eraseFromParent();
625     return true;
626   }
627   case G_FABS: {
628     unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
629     unsigned FABSOpcode =
630         Size == 32 ? Mips::FABS_S
631                    : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
632     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode))
633              .add(I.getOperand(0))
634              .add(I.getOperand(1));
635     break;
636   }
637   case G_FPTOSI: {
638     unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits();
639     unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
640     (void)ToSize;
641     assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
642     assert((FromSize == 32 || FromSize == 64) &&
643            "Unsupported floating point size for G_FPTOSI");
644 
645     unsigned Opcode;
646     if (FromSize == 32)
647       Opcode = Mips::TRUNC_W_S;
648     else
649       Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
650     Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
651     MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
652                 .addDef(ResultInFPR)
653                 .addUse(I.getOperand(1).getReg());
654     if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI))
655       return false;
656 
657     MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1))
658                              .addDef(I.getOperand(0).getReg())
659                              .addUse(ResultInFPR);
660     if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
661       return false;
662 
663     I.eraseFromParent();
664     return true;
665   }
666   case G_GLOBAL_VALUE: {
667     const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal();
668     if (MF.getTarget().isPositionIndependent()) {
669       MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
670                                 .addDef(I.getOperand(0).getReg())
671                                 .addReg(MF.getInfo<MipsFunctionInfo>()
672                                             ->getGlobalBaseRegForGlobalISel(MF))
673                                 .addGlobalAddress(GVal);
674       // Global Values that don't have local linkage are handled differently
675       // when they are part of call sequence. MipsCallLowering::lowerCall
676       // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
677       // MO_GOT_CALL flag when Callee doesn't have local linkage.
678       if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL)
679         LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL);
680       else
681         LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT);
682       LWGOT->addMemOperand(
683           MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF),
684                                       MachineMemOperand::MOLoad, 4, Align(4)));
685       if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI))
686         return false;
687 
688       if (GVal->hasLocalLinkage()) {
689         Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
690         LWGOT->getOperand(0).setReg(LWGOTDef);
691 
692         MachineInstr *ADDiu =
693             BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
694                 .addDef(I.getOperand(0).getReg())
695                 .addReg(LWGOTDef)
696                 .addGlobalAddress(GVal);
697         ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
698         if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
699           return false;
700       }
701     } else {
702       Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
703 
704       MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
705                               .addDef(LUiReg)
706                               .addGlobalAddress(GVal);
707       LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI);
708       if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
709         return false;
710 
711       MachineInstr *ADDiu =
712           BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
713               .addDef(I.getOperand(0).getReg())
714               .addUse(LUiReg)
715               .addGlobalAddress(GVal);
716       ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
717       if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
718         return false;
719     }
720     I.eraseFromParent();
721     return true;
722   }
723   case G_JUMP_TABLE: {
724     if (MF.getTarget().isPositionIndependent()) {
725       MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
726                .addDef(I.getOperand(0).getReg())
727                .addReg(MF.getInfo<MipsFunctionInfo>()
728                            ->getGlobalBaseRegForGlobalISel(MF))
729                .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT)
730                .addMemOperand(MF.getMachineMemOperand(
731                    MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad, 4,
732                    Align(4)));
733     } else {
734       MI =
735           BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
736               .addDef(I.getOperand(0).getReg())
737               .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_HI);
738     }
739     break;
740   }
741   case G_ICMP: {
742     struct Instr {
743       unsigned Opcode;
744       Register Def, LHS, RHS;
745       Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
746           : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
747 
748       bool hasImm() const {
749         if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
750           return true;
751         return false;
752       }
753     };
754 
755     SmallVector<struct Instr, 2> Instructions;
756     Register ICMPReg = I.getOperand(0).getReg();
757     Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
758     Register LHS = I.getOperand(2).getReg();
759     Register RHS = I.getOperand(3).getReg();
760     CmpInst::Predicate Cond =
761         static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
762 
763     switch (Cond) {
764     case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
765       Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
766       Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1);
767       break;
768     case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
769       Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
770       Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp);
771       break;
772     case CmpInst::ICMP_UGT: // LHS >  RHS -> RHS < LHS
773       Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS);
774       break;
775     case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
776       Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS);
777       Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
778       break;
779     case CmpInst::ICMP_ULT: // LHS <  RHS -> LHS < RHS
780       Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS);
781       break;
782     case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
783       Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS);
784       Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
785       break;
786     case CmpInst::ICMP_SGT: // LHS >  RHS -> RHS < LHS
787       Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS);
788       break;
789     case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
790       Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS);
791       Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
792       break;
793     case CmpInst::ICMP_SLT: // LHS <  RHS -> LHS < RHS
794       Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS);
795       break;
796     case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
797       Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS);
798       Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
799       break;
800     default:
801       return false;
802     }
803 
804     MachineIRBuilder B(I);
805     for (const struct Instr &Instruction : Instructions) {
806       MachineInstrBuilder MIB = B.buildInstr(
807           Instruction.Opcode, {Instruction.Def}, {Instruction.LHS});
808 
809       if (Instruction.hasImm())
810         MIB.addImm(Instruction.RHS);
811       else
812         MIB.addUse(Instruction.RHS);
813 
814       if (!MIB.constrainAllUses(TII, TRI, RBI))
815         return false;
816     }
817 
818     I.eraseFromParent();
819     return true;
820   }
821   case G_FCMP: {
822     unsigned MipsFCMPCondCode;
823     bool isLogicallyNegated;
824     switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(
825                 I.getOperand(1).getPredicate())) {
826     case CmpInst::FCMP_UNO: // Unordered
827     case CmpInst::FCMP_ORD: // Ordered (OR)
828       MipsFCMPCondCode = Mips::FCOND_UN;
829       isLogicallyNegated = Cond != CmpInst::FCMP_UNO;
830       break;
831     case CmpInst::FCMP_OEQ: // Equal
832     case CmpInst::FCMP_UNE: // Not Equal (NEQ)
833       MipsFCMPCondCode = Mips::FCOND_OEQ;
834       isLogicallyNegated = Cond != CmpInst::FCMP_OEQ;
835       break;
836     case CmpInst::FCMP_UEQ: // Unordered or Equal
837     case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL)
838       MipsFCMPCondCode = Mips::FCOND_UEQ;
839       isLogicallyNegated = Cond != CmpInst::FCMP_UEQ;
840       break;
841     case CmpInst::FCMP_OLT: // Ordered or Less Than
842     case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE)
843       MipsFCMPCondCode = Mips::FCOND_OLT;
844       isLogicallyNegated = Cond != CmpInst::FCMP_OLT;
845       break;
846     case CmpInst::FCMP_ULT: // Unordered or Less Than
847     case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE)
848       MipsFCMPCondCode = Mips::FCOND_ULT;
849       isLogicallyNegated = Cond != CmpInst::FCMP_ULT;
850       break;
851     case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal
852     case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT)
853       MipsFCMPCondCode = Mips::FCOND_OLE;
854       isLogicallyNegated = Cond != CmpInst::FCMP_OLE;
855       break;
856     case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal
857     case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT)
858       MipsFCMPCondCode = Mips::FCOND_ULE;
859       isLogicallyNegated = Cond != CmpInst::FCMP_ULE;
860       break;
861     default:
862       return false;
863     }
864 
865     // Default compare result in gpr register will be `true`.
866     // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
867     // using MOVF_I. When orignal predicate (Cond) is logically negated
868     // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
869     unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
870 
871     Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
872     BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
873         .addDef(TrueInReg)
874         .addUse(Mips::ZERO)
875         .addImm(1);
876 
877     unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
878     unsigned FCMPOpcode =
879         Size == 32 ? Mips::FCMP_S32
880                    : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
881     MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode))
882                              .addUse(I.getOperand(2).getReg())
883                              .addUse(I.getOperand(3).getReg())
884                              .addImm(MipsFCMPCondCode);
885     if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI))
886       return false;
887 
888     MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode))
889                              .addDef(I.getOperand(0).getReg())
890                              .addUse(Mips::ZERO)
891                              .addUse(Mips::FCC0)
892                              .addUse(TrueInReg);
893     if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
894       return false;
895 
896     I.eraseFromParent();
897     return true;
898   }
899   case G_FENCE: {
900     MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SYNC)).addImm(0);
901     break;
902   }
903   case G_VASTART: {
904     MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
905     int FI = FuncInfo->getVarArgsFrameIndex();
906 
907     Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
908     MachineInstr *LEA_ADDiu =
909         BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu))
910             .addDef(LeaReg)
911             .addFrameIndex(FI)
912             .addImm(0);
913     if (!constrainSelectedInstRegOperands(*LEA_ADDiu, TII, TRI, RBI))
914       return false;
915 
916     MachineInstr *Store = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SW))
917                               .addUse(LeaReg)
918                               .addUse(I.getOperand(0).getReg())
919                               .addImm(0);
920     if (!constrainSelectedInstRegOperands(*Store, TII, TRI, RBI))
921       return false;
922 
923     I.eraseFromParent();
924     return true;
925   }
926   default:
927     return false;
928   }
929 
930   I.eraseFromParent();
931   return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
932 }
933 
934 namespace llvm {
935 InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &TM,
936                                                    MipsSubtarget &Subtarget,
937                                                    MipsRegisterBankInfo &RBI) {
938   return new MipsInstructionSelector(TM, Subtarget, RBI);
939 }
940 } // end namespace llvm
941