xref: /freebsd/contrib/llvm-project/llvm/lib/Target/Mips/MipsFastISel.cpp (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 //===- MipsFastISel.cpp - Mips FastISel implementation --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file defines the MIPS-specific support for the FastISel class.
11 /// Some of the target-specific code is generated by tablegen in the file
12 /// MipsGenFastISel.inc, which is #included here.
13 ///
14 //===----------------------------------------------------------------------===//
15 
16 #include "MCTargetDesc/MipsABIInfo.h"
17 #include "MCTargetDesc/MipsBaseInfo.h"
18 #include "MipsCCState.h"
19 #include "MipsISelLowering.h"
20 #include "MipsInstrInfo.h"
21 #include "MipsMachineFunction.h"
22 #include "MipsSubtarget.h"
23 #include "MipsTargetMachine.h"
24 #include "llvm/ADT/APInt.h"
25 #include "llvm/ADT/ArrayRef.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/CodeGen/CallingConvLower.h"
30 #include "llvm/CodeGen/FastISel.h"
31 #include "llvm/CodeGen/FunctionLoweringInfo.h"
32 #include "llvm/CodeGen/ISDOpcodes.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineInstrBuilder.h"
36 #include "llvm/CodeGen/MachineMemOperand.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/TargetInstrInfo.h"
39 #include "llvm/CodeGen/TargetLowering.h"
40 #include "llvm/CodeGen/ValueTypes.h"
41 #include "llvm/IR/Attributes.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GetElementPtrTypeIterator.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Operator.h"
55 #include "llvm/IR/Type.h"
56 #include "llvm/IR/User.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/MC/MCContext.h"
59 #include "llvm/MC/MCInstrDesc.h"
60 #include "llvm/MC/MCRegisterInfo.h"
61 #include "llvm/MC/MCSymbol.h"
62 #include "llvm/Support/Casting.h"
63 #include "llvm/Support/Compiler.h"
64 #include "llvm/Support/Debug.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/MachineValueType.h"
67 #include "llvm/Support/MathExtras.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include <algorithm>
70 #include <array>
71 #include <cassert>
72 #include <cstdint>
73 
74 #define DEBUG_TYPE "mips-fastisel"
75 
76 using namespace llvm;
77 
78 extern cl::opt<bool> EmitJalrReloc;
79 
80 namespace {
81 
82 class MipsFastISel final : public FastISel {
83 
84   // All possible address modes.
85   class Address {
86   public:
87     using BaseKind = enum { RegBase, FrameIndexBase };
88 
89   private:
90     BaseKind Kind = RegBase;
91     union {
92       unsigned Reg;
93       int FI;
94     } Base;
95 
96     int64_t Offset = 0;
97 
98     const GlobalValue *GV = nullptr;
99 
100   public:
101     // Innocuous defaults for our address.
102     Address() { Base.Reg = 0; }
103 
104     void setKind(BaseKind K) { Kind = K; }
105     BaseKind getKind() const { return Kind; }
106     bool isRegBase() const { return Kind == RegBase; }
107     bool isFIBase() const { return Kind == FrameIndexBase; }
108 
109     void setReg(unsigned Reg) {
110       assert(isRegBase() && "Invalid base register access!");
111       Base.Reg = Reg;
112     }
113 
114     unsigned getReg() const {
115       assert(isRegBase() && "Invalid base register access!");
116       return Base.Reg;
117     }
118 
119     void setFI(unsigned FI) {
120       assert(isFIBase() && "Invalid base frame index access!");
121       Base.FI = FI;
122     }
123 
124     unsigned getFI() const {
125       assert(isFIBase() && "Invalid base frame index access!");
126       return Base.FI;
127     }
128 
129     void setOffset(int64_t Offset_) { Offset = Offset_; }
130     int64_t getOffset() const { return Offset; }
131     void setGlobalValue(const GlobalValue *G) { GV = G; }
132     const GlobalValue *getGlobalValue() { return GV; }
133   };
134 
135   /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
136   /// make the right decision when generating code for different targets.
137   const TargetMachine &TM;
138   const MipsSubtarget *Subtarget;
139   const TargetInstrInfo &TII;
140   const TargetLowering &TLI;
141   MipsFunctionInfo *MFI;
142 
143   // Convenience variables to avoid some queries.
144   LLVMContext *Context;
145 
146   bool fastLowerArguments() override;
147   bool fastLowerCall(CallLoweringInfo &CLI) override;
148   bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
149 
150   bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle
151   // floating point but not reject doing fast-isel in other
152   // situations
153 
154 private:
155   // Selection routines.
156   bool selectLogicalOp(const Instruction *I);
157   bool selectLoad(const Instruction *I);
158   bool selectStore(const Instruction *I);
159   bool selectBranch(const Instruction *I);
160   bool selectSelect(const Instruction *I);
161   bool selectCmp(const Instruction *I);
162   bool selectFPExt(const Instruction *I);
163   bool selectFPTrunc(const Instruction *I);
164   bool selectFPToInt(const Instruction *I, bool IsSigned);
165   bool selectRet(const Instruction *I);
166   bool selectTrunc(const Instruction *I);
167   bool selectIntExt(const Instruction *I);
168   bool selectShift(const Instruction *I);
169   bool selectDivRem(const Instruction *I, unsigned ISDOpcode);
170 
171   // Utility helper routines.
172   bool isTypeLegal(Type *Ty, MVT &VT);
173   bool isTypeSupported(Type *Ty, MVT &VT);
174   bool isLoadTypeLegal(Type *Ty, MVT &VT);
175   bool computeAddress(const Value *Obj, Address &Addr);
176   bool computeCallAddress(const Value *V, Address &Addr);
177   void simplifyAddress(Address &Addr);
178 
179   // Emit helper routines.
180   bool emitCmp(unsigned DestReg, const CmpInst *CI);
181   bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
182                 unsigned Alignment = 0);
183   bool emitStore(MVT VT, unsigned SrcReg, Address Addr,
184                  MachineMemOperand *MMO = nullptr);
185   bool emitStore(MVT VT, unsigned SrcReg, Address &Addr,
186                  unsigned Alignment = 0);
187   unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
188   bool emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg,
189 
190                   bool IsZExt);
191   bool emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
192 
193   bool emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
194   bool emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
195                        unsigned DestReg);
196   bool emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
197                        unsigned DestReg);
198 
199   unsigned getRegEnsuringSimpleIntegerWidening(const Value *, bool IsUnsigned);
200 
201   unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
202                          const Value *RHS);
203 
204   unsigned materializeFP(const ConstantFP *CFP, MVT VT);
205   unsigned materializeGV(const GlobalValue *GV, MVT VT);
206   unsigned materializeInt(const Constant *C, MVT VT);
207   unsigned materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC);
208   unsigned materializeExternalCallSym(MCSymbol *Syn);
209 
210   MachineInstrBuilder emitInst(unsigned Opc) {
211     return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
212   }
213 
214   MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) {
215     return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
216                    DstReg);
217   }
218 
219   MachineInstrBuilder emitInstStore(unsigned Opc, unsigned SrcReg,
220                                     unsigned MemReg, int64_t MemOffset) {
221     return emitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset);
222   }
223 
224   MachineInstrBuilder emitInstLoad(unsigned Opc, unsigned DstReg,
225                                    unsigned MemReg, int64_t MemOffset) {
226     return emitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset);
227   }
228 
229   unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
230                            const TargetRegisterClass *RC,
231                            unsigned Op0, bool Op0IsKill,
232                            unsigned Op1, bool Op1IsKill);
233 
234   // for some reason, this default is not generated by tablegen
235   // so we explicitly generate it here.
236   unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
237                              unsigned Op0, bool Op0IsKill, uint64_t imm1,
238                              uint64_t imm2, unsigned Op3, bool Op3IsKill) {
239     return 0;
240   }
241 
242   // Call handling routines.
243 private:
244   CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
245   bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
246                        unsigned &NumBytes);
247   bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
248 
249   const MipsABIInfo &getABI() const {
250     return static_cast<const MipsTargetMachine &>(TM).getABI();
251   }
252 
253 public:
254   // Backend specific FastISel code.
255   explicit MipsFastISel(FunctionLoweringInfo &funcInfo,
256                         const TargetLibraryInfo *libInfo)
257       : FastISel(funcInfo, libInfo), TM(funcInfo.MF->getTarget()),
258         Subtarget(&funcInfo.MF->getSubtarget<MipsSubtarget>()),
259         TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
260     MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
261     Context = &funcInfo.Fn->getContext();
262     UnsupportedFPMode = Subtarget->isFP64bit() || Subtarget->useSoftFloat();
263   }
264 
265   unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
266   unsigned fastMaterializeConstant(const Constant *C) override;
267   bool fastSelectInstruction(const Instruction *I) override;
268 
269 #include "MipsGenFastISel.inc"
270 };
271 
272 } // end anonymous namespace
273 
274 static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT,
275                     CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
276                     CCState &State) LLVM_ATTRIBUTE_UNUSED;
277 
278 static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT,
279                             CCValAssign::LocInfo LocInfo,
280                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
281   llvm_unreachable("should not be called");
282 }
283 
284 static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT,
285                             CCValAssign::LocInfo LocInfo,
286                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
287   llvm_unreachable("should not be called");
288 }
289 
290 #include "MipsGenCallingConv.inc"
291 
292 CCAssignFn *MipsFastISel::CCAssignFnForCall(CallingConv::ID CC) const {
293   return CC_MipsO32;
294 }
295 
296 unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
297                                      const Value *LHS, const Value *RHS) {
298   // Canonicalize immediates to the RHS first.
299   if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS))
300     std::swap(LHS, RHS);
301 
302   unsigned Opc;
303   switch (ISDOpc) {
304   case ISD::AND:
305     Opc = Mips::AND;
306     break;
307   case ISD::OR:
308     Opc = Mips::OR;
309     break;
310   case ISD::XOR:
311     Opc = Mips::XOR;
312     break;
313   default:
314     llvm_unreachable("unexpected opcode");
315   }
316 
317   unsigned LHSReg = getRegForValue(LHS);
318   if (!LHSReg)
319     return 0;
320 
321   unsigned RHSReg;
322   if (const auto *C = dyn_cast<ConstantInt>(RHS))
323     RHSReg = materializeInt(C, MVT::i32);
324   else
325     RHSReg = getRegForValue(RHS);
326   if (!RHSReg)
327     return 0;
328 
329   unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
330   if (!ResultReg)
331     return 0;
332 
333   emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg);
334   return ResultReg;
335 }
336 
337 unsigned MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
338   assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i32 &&
339          "Alloca should always return a pointer.");
340 
341   DenseMap<const AllocaInst *, int>::iterator SI =
342       FuncInfo.StaticAllocaMap.find(AI);
343 
344   if (SI != FuncInfo.StaticAllocaMap.end()) {
345     unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
346     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LEA_ADDiu),
347             ResultReg)
348         .addFrameIndex(SI->second)
349         .addImm(0);
350     return ResultReg;
351   }
352 
353   return 0;
354 }
355 
356 unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) {
357   if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
358     return 0;
359   const TargetRegisterClass *RC = &Mips::GPR32RegClass;
360   const ConstantInt *CI = cast<ConstantInt>(C);
361   return materialize32BitInt(CI->getZExtValue(), RC);
362 }
363 
364 unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
365                                            const TargetRegisterClass *RC) {
366   unsigned ResultReg = createResultReg(RC);
367 
368   if (isInt<16>(Imm)) {
369     unsigned Opc = Mips::ADDiu;
370     emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm);
371     return ResultReg;
372   } else if (isUInt<16>(Imm)) {
373     emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm);
374     return ResultReg;
375   }
376   unsigned Lo = Imm & 0xFFFF;
377   unsigned Hi = (Imm >> 16) & 0xFFFF;
378   if (Lo) {
379     // Both Lo and Hi have nonzero bits.
380     unsigned TmpReg = createResultReg(RC);
381     emitInst(Mips::LUi, TmpReg).addImm(Hi);
382     emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
383   } else {
384     emitInst(Mips::LUi, ResultReg).addImm(Hi);
385   }
386   return ResultReg;
387 }
388 
389 unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
390   if (UnsupportedFPMode)
391     return 0;
392   int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
393   if (VT == MVT::f32) {
394     const TargetRegisterClass *RC = &Mips::FGR32RegClass;
395     unsigned DestReg = createResultReg(RC);
396     unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
397     emitInst(Mips::MTC1, DestReg).addReg(TempReg);
398     return DestReg;
399   } else if (VT == MVT::f64) {
400     const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
401     unsigned DestReg = createResultReg(RC);
402     unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
403     unsigned TempReg2 =
404         materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
405     emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1);
406     return DestReg;
407   }
408   return 0;
409 }
410 
411 unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
412   // For now 32-bit only.
413   if (VT != MVT::i32)
414     return 0;
415   const TargetRegisterClass *RC = &Mips::GPR32RegClass;
416   unsigned DestReg = createResultReg(RC);
417   const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
418   bool IsThreadLocal = GVar && GVar->isThreadLocal();
419   // TLS not supported at this time.
420   if (IsThreadLocal)
421     return 0;
422   emitInst(Mips::LW, DestReg)
423       .addReg(MFI->getGlobalBaseReg(*MF))
424       .addGlobalAddress(GV, 0, MipsII::MO_GOT);
425   if ((GV->hasInternalLinkage() ||
426        (GV->hasLocalLinkage() && !isa<Function>(GV)))) {
427     unsigned TempReg = createResultReg(RC);
428     emitInst(Mips::ADDiu, TempReg)
429         .addReg(DestReg)
430         .addGlobalAddress(GV, 0, MipsII::MO_ABS_LO);
431     DestReg = TempReg;
432   }
433   return DestReg;
434 }
435 
436 unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) {
437   const TargetRegisterClass *RC = &Mips::GPR32RegClass;
438   unsigned DestReg = createResultReg(RC);
439   emitInst(Mips::LW, DestReg)
440       .addReg(MFI->getGlobalBaseReg(*MF))
441       .addSym(Sym, MipsII::MO_GOT);
442   return DestReg;
443 }
444 
445 // Materialize a constant into a register, and return the register
446 // number (or zero if we failed to handle it).
447 unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) {
448   EVT CEVT = TLI.getValueType(DL, C->getType(), true);
449 
450   // Only handle simple types.
451   if (!CEVT.isSimple())
452     return 0;
453   MVT VT = CEVT.getSimpleVT();
454 
455   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
456     return (UnsupportedFPMode) ? 0 : materializeFP(CFP, VT);
457   else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
458     return materializeGV(GV, VT);
459   else if (isa<ConstantInt>(C))
460     return materializeInt(C, VT);
461 
462   return 0;
463 }
464 
465 bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
466   const User *U = nullptr;
467   unsigned Opcode = Instruction::UserOp1;
468   if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
469     // Don't walk into other basic blocks unless the object is an alloca from
470     // another block, otherwise it may not have a virtual register assigned.
471     if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
472         FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
473       Opcode = I->getOpcode();
474       U = I;
475     }
476   } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
477     Opcode = C->getOpcode();
478     U = C;
479   }
480   switch (Opcode) {
481   default:
482     break;
483   case Instruction::BitCast:
484     // Look through bitcasts.
485     return computeAddress(U->getOperand(0), Addr);
486   case Instruction::GetElementPtr: {
487     Address SavedAddr = Addr;
488     int64_t TmpOffset = Addr.getOffset();
489     // Iterate through the GEP folding the constants into offsets where
490     // we can.
491     gep_type_iterator GTI = gep_type_begin(U);
492     for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
493          ++i, ++GTI) {
494       const Value *Op = *i;
495       if (StructType *STy = GTI.getStructTypeOrNull()) {
496         const StructLayout *SL = DL.getStructLayout(STy);
497         unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
498         TmpOffset += SL->getElementOffset(Idx);
499       } else {
500         uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
501         while (true) {
502           if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
503             // Constant-offset addressing.
504             TmpOffset += CI->getSExtValue() * S;
505             break;
506           }
507           if (canFoldAddIntoGEP(U, Op)) {
508             // A compatible add with a constant operand. Fold the constant.
509             ConstantInt *CI =
510                 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
511             TmpOffset += CI->getSExtValue() * S;
512             // Iterate on the other operand.
513             Op = cast<AddOperator>(Op)->getOperand(0);
514             continue;
515           }
516           // Unsupported
517           goto unsupported_gep;
518         }
519       }
520     }
521     // Try to grab the base operand now.
522     Addr.setOffset(TmpOffset);
523     if (computeAddress(U->getOperand(0), Addr))
524       return true;
525     // We failed, restore everything and try the other options.
526     Addr = SavedAddr;
527   unsupported_gep:
528     break;
529   }
530   case Instruction::Alloca: {
531     const AllocaInst *AI = cast<AllocaInst>(Obj);
532     DenseMap<const AllocaInst *, int>::iterator SI =
533         FuncInfo.StaticAllocaMap.find(AI);
534     if (SI != FuncInfo.StaticAllocaMap.end()) {
535       Addr.setKind(Address::FrameIndexBase);
536       Addr.setFI(SI->second);
537       return true;
538     }
539     break;
540   }
541   }
542   Addr.setReg(getRegForValue(Obj));
543   return Addr.getReg() != 0;
544 }
545 
546 bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
547   const User *U = nullptr;
548   unsigned Opcode = Instruction::UserOp1;
549 
550   if (const auto *I = dyn_cast<Instruction>(V)) {
551     // Check if the value is defined in the same basic block. This information
552     // is crucial to know whether or not folding an operand is valid.
553     if (I->getParent() == FuncInfo.MBB->getBasicBlock()) {
554       Opcode = I->getOpcode();
555       U = I;
556     }
557   } else if (const auto *C = dyn_cast<ConstantExpr>(V)) {
558     Opcode = C->getOpcode();
559     U = C;
560   }
561 
562   switch (Opcode) {
563   default:
564     break;
565   case Instruction::BitCast:
566     // Look past bitcasts if its operand is in the same BB.
567       return computeCallAddress(U->getOperand(0), Addr);
568     break;
569   case Instruction::IntToPtr:
570     // Look past no-op inttoptrs if its operand is in the same BB.
571     if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
572         TLI.getPointerTy(DL))
573       return computeCallAddress(U->getOperand(0), Addr);
574     break;
575   case Instruction::PtrToInt:
576     // Look past no-op ptrtoints if its operand is in the same BB.
577     if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
578       return computeCallAddress(U->getOperand(0), Addr);
579     break;
580   }
581 
582   if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
583     Addr.setGlobalValue(GV);
584     return true;
585   }
586 
587   // If all else fails, try to materialize the value in a register.
588   if (!Addr.getGlobalValue()) {
589     Addr.setReg(getRegForValue(V));
590     return Addr.getReg() != 0;
591   }
592 
593   return false;
594 }
595 
596 bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) {
597   EVT evt = TLI.getValueType(DL, Ty, true);
598   // Only handle simple types.
599   if (evt == MVT::Other || !evt.isSimple())
600     return false;
601   VT = evt.getSimpleVT();
602 
603   // Handle all legal types, i.e. a register that will directly hold this
604   // value.
605   return TLI.isTypeLegal(VT);
606 }
607 
608 bool MipsFastISel::isTypeSupported(Type *Ty, MVT &VT) {
609   if (Ty->isVectorTy())
610     return false;
611 
612   if (isTypeLegal(Ty, VT))
613     return true;
614 
615   // If this is a type than can be sign or zero-extended to a basic operation
616   // go ahead and accept it now.
617   if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
618     return true;
619 
620   return false;
621 }
622 
623 bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
624   if (isTypeLegal(Ty, VT))
625     return true;
626   // We will extend this in a later patch:
627   //   If this is a type than can be sign or zero-extended to a basic operation
628   //   go ahead and accept it now.
629   if (VT == MVT::i8 || VT == MVT::i16)
630     return true;
631   return false;
632 }
633 
634 // Because of how EmitCmp is called with fast-isel, you can
635 // end up with redundant "andi" instructions after the sequences emitted below.
636 // We should try and solve this issue in the future.
637 //
638 bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
639   const Value *Left = CI->getOperand(0), *Right = CI->getOperand(1);
640   bool IsUnsigned = CI->isUnsigned();
641   unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(Left, IsUnsigned);
642   if (LeftReg == 0)
643     return false;
644   unsigned RightReg = getRegEnsuringSimpleIntegerWidening(Right, IsUnsigned);
645   if (RightReg == 0)
646     return false;
647   CmpInst::Predicate P = CI->getPredicate();
648 
649   switch (P) {
650   default:
651     return false;
652   case CmpInst::ICMP_EQ: {
653     unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
654     emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
655     emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
656     break;
657   }
658   case CmpInst::ICMP_NE: {
659     unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
660     emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
661     emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
662     break;
663   }
664   case CmpInst::ICMP_UGT:
665     emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg);
666     break;
667   case CmpInst::ICMP_ULT:
668     emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
669     break;
670   case CmpInst::ICMP_UGE: {
671     unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
672     emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
673     emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
674     break;
675   }
676   case CmpInst::ICMP_ULE: {
677     unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
678     emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
679     emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
680     break;
681   }
682   case CmpInst::ICMP_SGT:
683     emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg);
684     break;
685   case CmpInst::ICMP_SLT:
686     emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
687     break;
688   case CmpInst::ICMP_SGE: {
689     unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
690     emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
691     emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
692     break;
693   }
694   case CmpInst::ICMP_SLE: {
695     unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
696     emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
697     emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
698     break;
699   }
700   case CmpInst::FCMP_OEQ:
701   case CmpInst::FCMP_UNE:
702   case CmpInst::FCMP_OLT:
703   case CmpInst::FCMP_OLE:
704   case CmpInst::FCMP_OGT:
705   case CmpInst::FCMP_OGE: {
706     if (UnsupportedFPMode)
707       return false;
708     bool IsFloat = Left->getType()->isFloatTy();
709     bool IsDouble = Left->getType()->isDoubleTy();
710     if (!IsFloat && !IsDouble)
711       return false;
712     unsigned Opc, CondMovOpc;
713     switch (P) {
714     case CmpInst::FCMP_OEQ:
715       Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
716       CondMovOpc = Mips::MOVT_I;
717       break;
718     case CmpInst::FCMP_UNE:
719       Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
720       CondMovOpc = Mips::MOVF_I;
721       break;
722     case CmpInst::FCMP_OLT:
723       Opc = IsFloat ? Mips::C_OLT_S : Mips::C_OLT_D32;
724       CondMovOpc = Mips::MOVT_I;
725       break;
726     case CmpInst::FCMP_OLE:
727       Opc = IsFloat ? Mips::C_OLE_S : Mips::C_OLE_D32;
728       CondMovOpc = Mips::MOVT_I;
729       break;
730     case CmpInst::FCMP_OGT:
731       Opc = IsFloat ? Mips::C_ULE_S : Mips::C_ULE_D32;
732       CondMovOpc = Mips::MOVF_I;
733       break;
734     case CmpInst::FCMP_OGE:
735       Opc = IsFloat ? Mips::C_ULT_S : Mips::C_ULT_D32;
736       CondMovOpc = Mips::MOVF_I;
737       break;
738     default:
739       llvm_unreachable("Only switching of a subset of CCs.");
740     }
741     unsigned RegWithZero = createResultReg(&Mips::GPR32RegClass);
742     unsigned RegWithOne = createResultReg(&Mips::GPR32RegClass);
743     emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
744     emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
745     emitInst(Opc).addReg(Mips::FCC0, RegState::Define).addReg(LeftReg)
746                  .addReg(RightReg);
747     emitInst(CondMovOpc, ResultReg)
748         .addReg(RegWithOne)
749         .addReg(Mips::FCC0)
750         .addReg(RegWithZero);
751     break;
752   }
753   }
754   return true;
755 }
756 
757 bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
758                             unsigned Alignment) {
759   //
760   // more cases will be handled here in following patches.
761   //
762   unsigned Opc;
763   switch (VT.SimpleTy) {
764   case MVT::i32:
765     ResultReg = createResultReg(&Mips::GPR32RegClass);
766     Opc = Mips::LW;
767     break;
768   case MVT::i16:
769     ResultReg = createResultReg(&Mips::GPR32RegClass);
770     Opc = Mips::LHu;
771     break;
772   case MVT::i8:
773     ResultReg = createResultReg(&Mips::GPR32RegClass);
774     Opc = Mips::LBu;
775     break;
776   case MVT::f32:
777     if (UnsupportedFPMode)
778       return false;
779     ResultReg = createResultReg(&Mips::FGR32RegClass);
780     Opc = Mips::LWC1;
781     break;
782   case MVT::f64:
783     if (UnsupportedFPMode)
784       return false;
785     ResultReg = createResultReg(&Mips::AFGR64RegClass);
786     Opc = Mips::LDC1;
787     break;
788   default:
789     return false;
790   }
791   if (Addr.isRegBase()) {
792     simplifyAddress(Addr);
793     emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
794     return true;
795   }
796   if (Addr.isFIBase()) {
797     unsigned FI = Addr.getFI();
798     int64_t Offset = Addr.getOffset();
799     MachineFrameInfo &MFI = MF->getFrameInfo();
800     MachineMemOperand *MMO = MF->getMachineMemOperand(
801         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
802         MFI.getObjectSize(FI), Align(4));
803     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
804         .addFrameIndex(FI)
805         .addImm(Offset)
806         .addMemOperand(MMO);
807     return true;
808   }
809   return false;
810 }
811 
812 bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr,
813                              unsigned Alignment) {
814   //
815   // more cases will be handled here in following patches.
816   //
817   unsigned Opc;
818   switch (VT.SimpleTy) {
819   case MVT::i8:
820     Opc = Mips::SB;
821     break;
822   case MVT::i16:
823     Opc = Mips::SH;
824     break;
825   case MVT::i32:
826     Opc = Mips::SW;
827     break;
828   case MVT::f32:
829     if (UnsupportedFPMode)
830       return false;
831     Opc = Mips::SWC1;
832     break;
833   case MVT::f64:
834     if (UnsupportedFPMode)
835       return false;
836     Opc = Mips::SDC1;
837     break;
838   default:
839     return false;
840   }
841   if (Addr.isRegBase()) {
842     simplifyAddress(Addr);
843     emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
844     return true;
845   }
846   if (Addr.isFIBase()) {
847     unsigned FI = Addr.getFI();
848     int64_t Offset = Addr.getOffset();
849     MachineFrameInfo &MFI = MF->getFrameInfo();
850     MachineMemOperand *MMO = MF->getMachineMemOperand(
851         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
852         MFI.getObjectSize(FI), Align(4));
853     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
854         .addReg(SrcReg)
855         .addFrameIndex(FI)
856         .addImm(Offset)
857         .addMemOperand(MMO);
858     return true;
859   }
860   return false;
861 }
862 
863 bool MipsFastISel::selectLogicalOp(const Instruction *I) {
864   MVT VT;
865   if (!isTypeSupported(I->getType(), VT))
866     return false;
867 
868   unsigned ResultReg;
869   switch (I->getOpcode()) {
870   default:
871     llvm_unreachable("Unexpected instruction.");
872   case Instruction::And:
873     ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1));
874     break;
875   case Instruction::Or:
876     ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1));
877     break;
878   case Instruction::Xor:
879     ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1));
880     break;
881   }
882 
883   if (!ResultReg)
884     return false;
885 
886   updateValueMap(I, ResultReg);
887   return true;
888 }
889 
890 bool MipsFastISel::selectLoad(const Instruction *I) {
891   // Atomic loads need special handling.
892   if (cast<LoadInst>(I)->isAtomic())
893     return false;
894 
895   // Verify we have a legal type before going any further.
896   MVT VT;
897   if (!isLoadTypeLegal(I->getType(), VT))
898     return false;
899 
900   // See if we can handle this address.
901   Address Addr;
902   if (!computeAddress(I->getOperand(0), Addr))
903     return false;
904 
905   unsigned ResultReg;
906   if (!emitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
907     return false;
908   updateValueMap(I, ResultReg);
909   return true;
910 }
911 
912 bool MipsFastISel::selectStore(const Instruction *I) {
913   Value *Op0 = I->getOperand(0);
914   unsigned SrcReg = 0;
915 
916   // Atomic stores need special handling.
917   if (cast<StoreInst>(I)->isAtomic())
918     return false;
919 
920   // Verify we have a legal type before going any further.
921   MVT VT;
922   if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
923     return false;
924 
925   // Get the value to be stored into a register.
926   SrcReg = getRegForValue(Op0);
927   if (SrcReg == 0)
928     return false;
929 
930   // See if we can handle this address.
931   Address Addr;
932   if (!computeAddress(I->getOperand(1), Addr))
933     return false;
934 
935   if (!emitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
936     return false;
937   return true;
938 }
939 
940 // This can cause a redundant sltiu to be generated.
941 // FIXME: try and eliminate this in a future patch.
942 bool MipsFastISel::selectBranch(const Instruction *I) {
943   const BranchInst *BI = cast<BranchInst>(I);
944   MachineBasicBlock *BrBB = FuncInfo.MBB;
945   //
946   // TBB is the basic block for the case where the comparison is true.
947   // FBB is the basic block for the case where the comparison is false.
948   // if (cond) goto TBB
949   // goto FBB
950   // TBB:
951   //
952   MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
953   MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
954 
955   // Fold the common case of a conditional branch with a comparison
956   // in the same block.
957   unsigned ZExtCondReg = 0;
958   if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
959     if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
960       ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
961       if (!emitCmp(ZExtCondReg, CI))
962         return false;
963     }
964   }
965 
966   // For the general case, we need to mask with 1.
967   if (ZExtCondReg == 0) {
968     unsigned CondReg = getRegForValue(BI->getCondition());
969     if (CondReg == 0)
970       return false;
971 
972     ZExtCondReg = emitIntExt(MVT::i1, CondReg, MVT::i32, true);
973     if (ZExtCondReg == 0)
974       return false;
975   }
976 
977   BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::BGTZ))
978       .addReg(ZExtCondReg)
979       .addMBB(TBB);
980   finishCondBranch(BI->getParent(), TBB, FBB);
981   return true;
982 }
983 
984 bool MipsFastISel::selectCmp(const Instruction *I) {
985   const CmpInst *CI = cast<CmpInst>(I);
986   unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
987   if (!emitCmp(ResultReg, CI))
988     return false;
989   updateValueMap(I, ResultReg);
990   return true;
991 }
992 
993 // Attempt to fast-select a floating-point extend instruction.
994 bool MipsFastISel::selectFPExt(const Instruction *I) {
995   if (UnsupportedFPMode)
996     return false;
997   Value *Src = I->getOperand(0);
998   EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
999   EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1000 
1001   if (SrcVT != MVT::f32 || DestVT != MVT::f64)
1002     return false;
1003 
1004   unsigned SrcReg =
1005       getRegForValue(Src); // this must be a 32bit floating point register class
1006                            // maybe we should handle this differently
1007   if (!SrcReg)
1008     return false;
1009 
1010   unsigned DestReg = createResultReg(&Mips::AFGR64RegClass);
1011   emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
1012   updateValueMap(I, DestReg);
1013   return true;
1014 }
1015 
1016 bool MipsFastISel::selectSelect(const Instruction *I) {
1017   assert(isa<SelectInst>(I) && "Expected a select instruction.");
1018 
1019   LLVM_DEBUG(dbgs() << "selectSelect\n");
1020 
1021   MVT VT;
1022   if (!isTypeSupported(I->getType(), VT) || UnsupportedFPMode) {
1023     LLVM_DEBUG(
1024         dbgs() << ".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
1025     return false;
1026   }
1027 
1028   unsigned CondMovOpc;
1029   const TargetRegisterClass *RC;
1030 
1031   if (VT.isInteger() && !VT.isVector() && VT.getSizeInBits() <= 32) {
1032     CondMovOpc = Mips::MOVN_I_I;
1033     RC = &Mips::GPR32RegClass;
1034   } else if (VT == MVT::f32) {
1035     CondMovOpc = Mips::MOVN_I_S;
1036     RC = &Mips::FGR32RegClass;
1037   } else if (VT == MVT::f64) {
1038     CondMovOpc = Mips::MOVN_I_D32;
1039     RC = &Mips::AFGR64RegClass;
1040   } else
1041     return false;
1042 
1043   const SelectInst *SI = cast<SelectInst>(I);
1044   const Value *Cond = SI->getCondition();
1045   unsigned Src1Reg = getRegForValue(SI->getTrueValue());
1046   unsigned Src2Reg = getRegForValue(SI->getFalseValue());
1047   unsigned CondReg = getRegForValue(Cond);
1048 
1049   if (!Src1Reg || !Src2Reg || !CondReg)
1050     return false;
1051 
1052   unsigned ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
1053   if (!ZExtCondReg)
1054     return false;
1055 
1056   if (!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg, true))
1057     return false;
1058 
1059   unsigned ResultReg = createResultReg(RC);
1060   unsigned TempReg = createResultReg(RC);
1061 
1062   if (!ResultReg || !TempReg)
1063     return false;
1064 
1065   emitInst(TargetOpcode::COPY, TempReg).addReg(Src2Reg);
1066   emitInst(CondMovOpc, ResultReg)
1067     .addReg(Src1Reg).addReg(ZExtCondReg).addReg(TempReg);
1068   updateValueMap(I, ResultReg);
1069   return true;
1070 }
1071 
1072 // Attempt to fast-select a floating-point truncate instruction.
1073 bool MipsFastISel::selectFPTrunc(const Instruction *I) {
1074   if (UnsupportedFPMode)
1075     return false;
1076   Value *Src = I->getOperand(0);
1077   EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
1078   EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1079 
1080   if (SrcVT != MVT::f64 || DestVT != MVT::f32)
1081     return false;
1082 
1083   unsigned SrcReg = getRegForValue(Src);
1084   if (!SrcReg)
1085     return false;
1086 
1087   unsigned DestReg = createResultReg(&Mips::FGR32RegClass);
1088   if (!DestReg)
1089     return false;
1090 
1091   emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg);
1092   updateValueMap(I, DestReg);
1093   return true;
1094 }
1095 
1096 // Attempt to fast-select a floating-point-to-integer conversion.
1097 bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) {
1098   if (UnsupportedFPMode)
1099     return false;
1100   MVT DstVT, SrcVT;
1101   if (!IsSigned)
1102     return false; // We don't handle this case yet. There is no native
1103                   // instruction for this but it can be synthesized.
1104   Type *DstTy = I->getType();
1105   if (!isTypeLegal(DstTy, DstVT))
1106     return false;
1107 
1108   if (DstVT != MVT::i32)
1109     return false;
1110 
1111   Value *Src = I->getOperand(0);
1112   Type *SrcTy = Src->getType();
1113   if (!isTypeLegal(SrcTy, SrcVT))
1114     return false;
1115 
1116   if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
1117     return false;
1118 
1119   unsigned SrcReg = getRegForValue(Src);
1120   if (SrcReg == 0)
1121     return false;
1122 
1123   // Determine the opcode for the conversion, which takes place
1124   // entirely within FPRs.
1125   unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1126   unsigned TempReg = createResultReg(&Mips::FGR32RegClass);
1127   unsigned Opc = (SrcVT == MVT::f32) ? Mips::TRUNC_W_S : Mips::TRUNC_W_D32;
1128 
1129   // Generate the convert.
1130   emitInst(Opc, TempReg).addReg(SrcReg);
1131   emitInst(Mips::MFC1, DestReg).addReg(TempReg);
1132 
1133   updateValueMap(I, DestReg);
1134   return true;
1135 }
1136 
1137 bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
1138                                    SmallVectorImpl<MVT> &OutVTs,
1139                                    unsigned &NumBytes) {
1140   CallingConv::ID CC = CLI.CallConv;
1141   SmallVector<CCValAssign, 16> ArgLocs;
1142   CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
1143   CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
1144   // Get a count of how many bytes are to be pushed on the stack.
1145   NumBytes = CCInfo.getNextStackOffset();
1146   // This is the minimum argument area used for A0-A3.
1147   if (NumBytes < 16)
1148     NumBytes = 16;
1149 
1150   emitInst(Mips::ADJCALLSTACKDOWN).addImm(16).addImm(0);
1151   // Process the args.
1152   MVT firstMVT;
1153   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1154     CCValAssign &VA = ArgLocs[i];
1155     const Value *ArgVal = CLI.OutVals[VA.getValNo()];
1156     MVT ArgVT = OutVTs[VA.getValNo()];
1157 
1158     if (i == 0) {
1159       firstMVT = ArgVT;
1160       if (ArgVT == MVT::f32) {
1161         VA.convertToReg(Mips::F12);
1162       } else if (ArgVT == MVT::f64) {
1163         if (Subtarget->isFP64bit())
1164           VA.convertToReg(Mips::D6_64);
1165         else
1166           VA.convertToReg(Mips::D6);
1167       }
1168     } else if (i == 1) {
1169       if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) {
1170         if (ArgVT == MVT::f32) {
1171           VA.convertToReg(Mips::F14);
1172         } else if (ArgVT == MVT::f64) {
1173           if (Subtarget->isFP64bit())
1174             VA.convertToReg(Mips::D7_64);
1175           else
1176             VA.convertToReg(Mips::D7);
1177         }
1178       }
1179     }
1180     if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32) || (ArgVT == MVT::i16) ||
1181          (ArgVT == MVT::i8)) &&
1182         VA.isMemLoc()) {
1183       switch (VA.getLocMemOffset()) {
1184       case 0:
1185         VA.convertToReg(Mips::A0);
1186         break;
1187       case 4:
1188         VA.convertToReg(Mips::A1);
1189         break;
1190       case 8:
1191         VA.convertToReg(Mips::A2);
1192         break;
1193       case 12:
1194         VA.convertToReg(Mips::A3);
1195         break;
1196       default:
1197         break;
1198       }
1199     }
1200     unsigned ArgReg = getRegForValue(ArgVal);
1201     if (!ArgReg)
1202       return false;
1203 
1204     // Handle arg promotion: SExt, ZExt, AExt.
1205     switch (VA.getLocInfo()) {
1206     case CCValAssign::Full:
1207       break;
1208     case CCValAssign::AExt:
1209     case CCValAssign::SExt: {
1210       MVT DestVT = VA.getLocVT();
1211       MVT SrcVT = ArgVT;
1212       ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
1213       if (!ArgReg)
1214         return false;
1215       break;
1216     }
1217     case CCValAssign::ZExt: {
1218       MVT DestVT = VA.getLocVT();
1219       MVT SrcVT = ArgVT;
1220       ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
1221       if (!ArgReg)
1222         return false;
1223       break;
1224     }
1225     default:
1226       llvm_unreachable("Unknown arg promotion!");
1227     }
1228 
1229     // Now copy/store arg to correct locations.
1230     if (VA.isRegLoc() && !VA.needsCustom()) {
1231       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1232               TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
1233       CLI.OutRegs.push_back(VA.getLocReg());
1234     } else if (VA.needsCustom()) {
1235       llvm_unreachable("Mips does not use custom args.");
1236       return false;
1237     } else {
1238       //
1239       // FIXME: This path will currently return false. It was copied
1240       // from the AArch64 port and should be essentially fine for Mips too.
1241       // The work to finish up this path will be done in a follow-on patch.
1242       //
1243       assert(VA.isMemLoc() && "Assuming store on stack.");
1244       // Don't emit stores for undef values.
1245       if (isa<UndefValue>(ArgVal))
1246         continue;
1247 
1248       // Need to store on the stack.
1249       // FIXME: This alignment is incorrect but this path is disabled
1250       // for now (will return false). We need to determine the right alignment
1251       // based on the normal alignment for the underlying machine type.
1252       //
1253       unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
1254 
1255       unsigned BEAlign = 0;
1256       if (ArgSize < 8 && !Subtarget->isLittle())
1257         BEAlign = 8 - ArgSize;
1258 
1259       Address Addr;
1260       Addr.setKind(Address::RegBase);
1261       Addr.setReg(Mips::SP);
1262       Addr.setOffset(VA.getLocMemOffset() + BEAlign);
1263 
1264       Align Alignment = DL.getABITypeAlign(ArgVal->getType());
1265       MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
1266           MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()),
1267           MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
1268       (void)(MMO);
1269       // if (!emitStore(ArgVT, ArgReg, Addr, MMO))
1270       return false; // can't store on the stack yet.
1271     }
1272   }
1273 
1274   return true;
1275 }
1276 
1277 bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
1278                               unsigned NumBytes) {
1279   CallingConv::ID CC = CLI.CallConv;
1280   emitInst(Mips::ADJCALLSTACKUP).addImm(16).addImm(0);
1281   if (RetVT != MVT::isVoid) {
1282     SmallVector<CCValAssign, 16> RVLocs;
1283     MipsCCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
1284 
1285     CCInfo.AnalyzeCallResult(CLI.Ins, RetCC_Mips, CLI.RetTy,
1286                              CLI.Symbol ? CLI.Symbol->getName().data()
1287                                         : nullptr);
1288 
1289     // Only handle a single return value.
1290     if (RVLocs.size() != 1)
1291       return false;
1292     // Copy all of the result registers out of their specified physreg.
1293     MVT CopyVT = RVLocs[0].getValVT();
1294     // Special handling for extended integers.
1295     if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
1296       CopyVT = MVT::i32;
1297 
1298     unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
1299     if (!ResultReg)
1300       return false;
1301     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1302             TII.get(TargetOpcode::COPY),
1303             ResultReg).addReg(RVLocs[0].getLocReg());
1304     CLI.InRegs.push_back(RVLocs[0].getLocReg());
1305 
1306     CLI.ResultReg = ResultReg;
1307     CLI.NumResultRegs = 1;
1308   }
1309   return true;
1310 }
1311 
1312 bool MipsFastISel::fastLowerArguments() {
1313   LLVM_DEBUG(dbgs() << "fastLowerArguments\n");
1314 
1315   if (!FuncInfo.CanLowerReturn) {
1316     LLVM_DEBUG(dbgs() << ".. gave up (!CanLowerReturn)\n");
1317     return false;
1318   }
1319 
1320   const Function *F = FuncInfo.Fn;
1321   if (F->isVarArg()) {
1322     LLVM_DEBUG(dbgs() << ".. gave up (varargs)\n");
1323     return false;
1324   }
1325 
1326   CallingConv::ID CC = F->getCallingConv();
1327   if (CC != CallingConv::C) {
1328     LLVM_DEBUG(dbgs() << ".. gave up (calling convention is not C)\n");
1329     return false;
1330   }
1331 
1332   std::array<MCPhysReg, 4> GPR32ArgRegs = {{Mips::A0, Mips::A1, Mips::A2,
1333                                            Mips::A3}};
1334   std::array<MCPhysReg, 2> FGR32ArgRegs = {{Mips::F12, Mips::F14}};
1335   std::array<MCPhysReg, 2> AFGR64ArgRegs = {{Mips::D6, Mips::D7}};
1336   auto NextGPR32 = GPR32ArgRegs.begin();
1337   auto NextFGR32 = FGR32ArgRegs.begin();
1338   auto NextAFGR64 = AFGR64ArgRegs.begin();
1339 
1340   struct AllocatedReg {
1341     const TargetRegisterClass *RC;
1342     unsigned Reg;
1343     AllocatedReg(const TargetRegisterClass *RC, unsigned Reg)
1344         : RC(RC), Reg(Reg) {}
1345   };
1346 
1347   // Only handle simple cases. i.e. All arguments are directly mapped to
1348   // registers of the appropriate type.
1349   SmallVector<AllocatedReg, 4> Allocation;
1350   for (const auto &FormalArg : F->args()) {
1351     if (FormalArg.hasAttribute(Attribute::InReg) ||
1352         FormalArg.hasAttribute(Attribute::StructRet) ||
1353         FormalArg.hasAttribute(Attribute::ByVal)) {
1354       LLVM_DEBUG(dbgs() << ".. gave up (inreg, structret, byval)\n");
1355       return false;
1356     }
1357 
1358     Type *ArgTy = FormalArg.getType();
1359     if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) {
1360       LLVM_DEBUG(dbgs() << ".. gave up (struct, array, or vector)\n");
1361       return false;
1362     }
1363 
1364     EVT ArgVT = TLI.getValueType(DL, ArgTy);
1365     LLVM_DEBUG(dbgs() << ".. " << FormalArg.getArgNo() << ": "
1366                       << ArgVT.getEVTString() << "\n");
1367     if (!ArgVT.isSimple()) {
1368       LLVM_DEBUG(dbgs() << ".. .. gave up (not a simple type)\n");
1369       return false;
1370     }
1371 
1372     switch (ArgVT.getSimpleVT().SimpleTy) {
1373     case MVT::i1:
1374     case MVT::i8:
1375     case MVT::i16:
1376       if (!FormalArg.hasAttribute(Attribute::SExt) &&
1377           !FormalArg.hasAttribute(Attribute::ZExt)) {
1378         // It must be any extend, this shouldn't happen for clang-generated IR
1379         // so just fall back on SelectionDAG.
1380         LLVM_DEBUG(dbgs() << ".. .. gave up (i8/i16 arg is not extended)\n");
1381         return false;
1382       }
1383 
1384       if (NextGPR32 == GPR32ArgRegs.end()) {
1385         LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1386         return false;
1387       }
1388 
1389       LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1390       Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1391 
1392       // Allocating any GPR32 prohibits further use of floating point arguments.
1393       NextFGR32 = FGR32ArgRegs.end();
1394       NextAFGR64 = AFGR64ArgRegs.end();
1395       break;
1396 
1397     case MVT::i32:
1398       if (FormalArg.hasAttribute(Attribute::ZExt)) {
1399         // The O32 ABI does not permit a zero-extended i32.
1400         LLVM_DEBUG(dbgs() << ".. .. gave up (i32 arg is zero extended)\n");
1401         return false;
1402       }
1403 
1404       if (NextGPR32 == GPR32ArgRegs.end()) {
1405         LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1406         return false;
1407       }
1408 
1409       LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1410       Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1411 
1412       // Allocating any GPR32 prohibits further use of floating point arguments.
1413       NextFGR32 = FGR32ArgRegs.end();
1414       NextAFGR64 = AFGR64ArgRegs.end();
1415       break;
1416 
1417     case MVT::f32:
1418       if (UnsupportedFPMode) {
1419         LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1420         return false;
1421       }
1422       if (NextFGR32 == FGR32ArgRegs.end()) {
1423         LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of FGR32 arguments)\n");
1424         return false;
1425       }
1426       LLVM_DEBUG(dbgs() << ".. .. FGR32(" << *NextFGR32 << ")\n");
1427       Allocation.emplace_back(&Mips::FGR32RegClass, *NextFGR32++);
1428       // Allocating an FGR32 also allocates the super-register AFGR64, and
1429       // ABI rules require us to skip the corresponding GPR32.
1430       if (NextGPR32 != GPR32ArgRegs.end())
1431         NextGPR32++;
1432       if (NextAFGR64 != AFGR64ArgRegs.end())
1433         NextAFGR64++;
1434       break;
1435 
1436     case MVT::f64:
1437       if (UnsupportedFPMode) {
1438         LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1439         return false;
1440       }
1441       if (NextAFGR64 == AFGR64ArgRegs.end()) {
1442         LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of AFGR64 arguments)\n");
1443         return false;
1444       }
1445       LLVM_DEBUG(dbgs() << ".. .. AFGR64(" << *NextAFGR64 << ")\n");
1446       Allocation.emplace_back(&Mips::AFGR64RegClass, *NextAFGR64++);
1447       // Allocating an FGR32 also allocates the super-register AFGR64, and
1448       // ABI rules require us to skip the corresponding GPR32 pair.
1449       if (NextGPR32 != GPR32ArgRegs.end())
1450         NextGPR32++;
1451       if (NextGPR32 != GPR32ArgRegs.end())
1452         NextGPR32++;
1453       if (NextFGR32 != FGR32ArgRegs.end())
1454         NextFGR32++;
1455       break;
1456 
1457     default:
1458       LLVM_DEBUG(dbgs() << ".. .. gave up (unknown type)\n");
1459       return false;
1460     }
1461   }
1462 
1463   for (const auto &FormalArg : F->args()) {
1464     unsigned ArgNo = FormalArg.getArgNo();
1465     unsigned SrcReg = Allocation[ArgNo].Reg;
1466     unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[ArgNo].RC);
1467     // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
1468     // Without this, EmitLiveInCopies may eliminate the livein if its only
1469     // use is a bitcast (which isn't turned into an instruction).
1470     unsigned ResultReg = createResultReg(Allocation[ArgNo].RC);
1471     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1472             TII.get(TargetOpcode::COPY), ResultReg)
1473         .addReg(DstReg, getKillRegState(true));
1474     updateValueMap(&FormalArg, ResultReg);
1475   }
1476 
1477   // Calculate the size of the incoming arguments area.
1478   // We currently reject all the cases where this would be non-zero.
1479   unsigned IncomingArgSizeInBytes = 0;
1480 
1481   // Account for the reserved argument area on ABI's that have one (O32).
1482   // It seems strange to do this on the caller side but it's necessary in
1483   // SelectionDAG's implementation.
1484   IncomingArgSizeInBytes = std::min(getABI().GetCalleeAllocdArgSizeInBytes(CC),
1485                                     IncomingArgSizeInBytes);
1486 
1487   MF->getInfo<MipsFunctionInfo>()->setFormalArgInfo(IncomingArgSizeInBytes,
1488                                                     false);
1489 
1490   return true;
1491 }
1492 
1493 bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
1494   CallingConv::ID CC = CLI.CallConv;
1495   bool IsTailCall = CLI.IsTailCall;
1496   bool IsVarArg = CLI.IsVarArg;
1497   const Value *Callee = CLI.Callee;
1498   MCSymbol *Symbol = CLI.Symbol;
1499 
1500   // Do not handle FastCC.
1501   if (CC == CallingConv::Fast)
1502     return false;
1503 
1504   // Allow SelectionDAG isel to handle tail calls.
1505   if (IsTailCall)
1506     return false;
1507 
1508   // Let SDISel handle vararg functions.
1509   if (IsVarArg)
1510     return false;
1511 
1512   // FIXME: Only handle *simple* calls for now.
1513   MVT RetVT;
1514   if (CLI.RetTy->isVoidTy())
1515     RetVT = MVT::isVoid;
1516   else if (!isTypeSupported(CLI.RetTy, RetVT))
1517     return false;
1518 
1519   for (auto Flag : CLI.OutFlags)
1520     if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
1521       return false;
1522 
1523   // Set up the argument vectors.
1524   SmallVector<MVT, 16> OutVTs;
1525   OutVTs.reserve(CLI.OutVals.size());
1526 
1527   for (auto *Val : CLI.OutVals) {
1528     MVT VT;
1529     if (!isTypeLegal(Val->getType(), VT) &&
1530         !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
1531       return false;
1532 
1533     // We don't handle vector parameters yet.
1534     if (VT.isVector() || VT.getSizeInBits() > 64)
1535       return false;
1536 
1537     OutVTs.push_back(VT);
1538   }
1539 
1540   Address Addr;
1541   if (!computeCallAddress(Callee, Addr))
1542     return false;
1543 
1544   // Handle the arguments now that we've gotten them.
1545   unsigned NumBytes;
1546   if (!processCallArgs(CLI, OutVTs, NumBytes))
1547     return false;
1548 
1549   if (!Addr.getGlobalValue())
1550     return false;
1551 
1552   // Issue the call.
1553   unsigned DestAddress;
1554   if (Symbol)
1555     DestAddress = materializeExternalCallSym(Symbol);
1556   else
1557     DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32);
1558   emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress);
1559   MachineInstrBuilder MIB =
1560       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::JALR),
1561               Mips::RA).addReg(Mips::T9);
1562 
1563   // Add implicit physical register uses to the call.
1564   for (auto Reg : CLI.OutRegs)
1565     MIB.addReg(Reg, RegState::Implicit);
1566 
1567   // Add a register mask with the call-preserved registers.
1568   // Proper defs for return values will be added by setPhysRegsDeadExcept().
1569   MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
1570 
1571   CLI.Call = MIB;
1572 
1573   if (EmitJalrReloc && !Subtarget->inMips16Mode()) {
1574     // Attach callee address to the instruction, let asm printer emit
1575     // .reloc R_MIPS_JALR.
1576     if (Symbol)
1577       MIB.addSym(Symbol, MipsII::MO_JALR);
1578     else
1579       MIB.addSym(FuncInfo.MF->getContext().getOrCreateSymbol(
1580 	                   Addr.getGlobalValue()->getName()), MipsII::MO_JALR);
1581   }
1582 
1583   // Finish off the call including any return values.
1584   return finishCall(CLI, RetVT, NumBytes);
1585 }
1586 
1587 bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
1588   switch (II->getIntrinsicID()) {
1589   default:
1590     return false;
1591   case Intrinsic::bswap: {
1592     Type *RetTy = II->getCalledFunction()->getReturnType();
1593 
1594     MVT VT;
1595     if (!isTypeSupported(RetTy, VT))
1596       return false;
1597 
1598     unsigned SrcReg = getRegForValue(II->getOperand(0));
1599     if (SrcReg == 0)
1600       return false;
1601     unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1602     if (DestReg == 0)
1603       return false;
1604     if (VT == MVT::i16) {
1605       if (Subtarget->hasMips32r2()) {
1606         emitInst(Mips::WSBH, DestReg).addReg(SrcReg);
1607         updateValueMap(II, DestReg);
1608         return true;
1609       } else {
1610         unsigned TempReg[3];
1611         for (int i = 0; i < 3; i++) {
1612           TempReg[i] = createResultReg(&Mips::GPR32RegClass);
1613           if (TempReg[i] == 0)
1614             return false;
1615         }
1616         emitInst(Mips::SLL, TempReg[0]).addReg(SrcReg).addImm(8);
1617         emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(8);
1618         emitInst(Mips::OR, TempReg[2]).addReg(TempReg[0]).addReg(TempReg[1]);
1619         emitInst(Mips::ANDi, DestReg).addReg(TempReg[2]).addImm(0xFFFF);
1620         updateValueMap(II, DestReg);
1621         return true;
1622       }
1623     } else if (VT == MVT::i32) {
1624       if (Subtarget->hasMips32r2()) {
1625         unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1626         emitInst(Mips::WSBH, TempReg).addReg(SrcReg);
1627         emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16);
1628         updateValueMap(II, DestReg);
1629         return true;
1630       } else {
1631         unsigned TempReg[8];
1632         for (int i = 0; i < 8; i++) {
1633           TempReg[i] = createResultReg(&Mips::GPR32RegClass);
1634           if (TempReg[i] == 0)
1635             return false;
1636         }
1637 
1638         emitInst(Mips::SRL, TempReg[0]).addReg(SrcReg).addImm(8);
1639         emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(24);
1640         emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[0]).addImm(0xFF00);
1641         emitInst(Mips::OR, TempReg[3]).addReg(TempReg[1]).addReg(TempReg[2]);
1642 
1643         emitInst(Mips::ANDi, TempReg[4]).addReg(SrcReg).addImm(0xFF00);
1644         emitInst(Mips::SLL, TempReg[5]).addReg(TempReg[4]).addImm(8);
1645 
1646         emitInst(Mips::SLL, TempReg[6]).addReg(SrcReg).addImm(24);
1647         emitInst(Mips::OR, TempReg[7]).addReg(TempReg[3]).addReg(TempReg[5]);
1648         emitInst(Mips::OR, DestReg).addReg(TempReg[6]).addReg(TempReg[7]);
1649         updateValueMap(II, DestReg);
1650         return true;
1651       }
1652     }
1653     return false;
1654   }
1655   case Intrinsic::memcpy:
1656   case Intrinsic::memmove: {
1657     const auto *MTI = cast<MemTransferInst>(II);
1658     // Don't handle volatile.
1659     if (MTI->isVolatile())
1660       return false;
1661     if (!MTI->getLength()->getType()->isIntegerTy(32))
1662       return false;
1663     const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
1664     return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 1);
1665   }
1666   case Intrinsic::memset: {
1667     const MemSetInst *MSI = cast<MemSetInst>(II);
1668     // Don't handle volatile.
1669     if (MSI->isVolatile())
1670       return false;
1671     if (!MSI->getLength()->getType()->isIntegerTy(32))
1672       return false;
1673     return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
1674   }
1675   }
1676   return false;
1677 }
1678 
1679 bool MipsFastISel::selectRet(const Instruction *I) {
1680   const Function &F = *I->getParent()->getParent();
1681   const ReturnInst *Ret = cast<ReturnInst>(I);
1682 
1683   LLVM_DEBUG(dbgs() << "selectRet\n");
1684 
1685   if (!FuncInfo.CanLowerReturn)
1686     return false;
1687 
1688   // Build a list of return value registers.
1689   SmallVector<unsigned, 4> RetRegs;
1690 
1691   if (Ret->getNumOperands() > 0) {
1692     CallingConv::ID CC = F.getCallingConv();
1693 
1694     // Do not handle FastCC.
1695     if (CC == CallingConv::Fast)
1696       return false;
1697 
1698     SmallVector<ISD::OutputArg, 4> Outs;
1699     GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1700 
1701     // Analyze operands of the call, assigning locations to each operand.
1702     SmallVector<CCValAssign, 16> ValLocs;
1703     MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs,
1704                        I->getContext());
1705     CCAssignFn *RetCC = RetCC_Mips;
1706     CCInfo.AnalyzeReturn(Outs, RetCC);
1707 
1708     // Only handle a single return value for now.
1709     if (ValLocs.size() != 1)
1710       return false;
1711 
1712     CCValAssign &VA = ValLocs[0];
1713     const Value *RV = Ret->getOperand(0);
1714 
1715     // Don't bother handling odd stuff for now.
1716     if ((VA.getLocInfo() != CCValAssign::Full) &&
1717         (VA.getLocInfo() != CCValAssign::BCvt))
1718       return false;
1719 
1720     // Only handle register returns for now.
1721     if (!VA.isRegLoc())
1722       return false;
1723 
1724     unsigned Reg = getRegForValue(RV);
1725     if (Reg == 0)
1726       return false;
1727 
1728     unsigned SrcReg = Reg + VA.getValNo();
1729     Register DestReg = VA.getLocReg();
1730     // Avoid a cross-class copy. This is very unlikely.
1731     if (!MRI.getRegClass(SrcReg)->contains(DestReg))
1732       return false;
1733 
1734     EVT RVEVT = TLI.getValueType(DL, RV->getType());
1735     if (!RVEVT.isSimple())
1736       return false;
1737 
1738     if (RVEVT.isVector())
1739       return false;
1740 
1741     MVT RVVT = RVEVT.getSimpleVT();
1742     if (RVVT == MVT::f128)
1743       return false;
1744 
1745     // Do not handle FGR64 returns for now.
1746     if (RVVT == MVT::f64 && UnsupportedFPMode) {
1747       LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode\n");
1748       return false;
1749     }
1750 
1751     MVT DestVT = VA.getValVT();
1752     // Special handling for extended integers.
1753     if (RVVT != DestVT) {
1754       if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1755         return false;
1756 
1757       if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
1758         bool IsZExt = Outs[0].Flags.isZExt();
1759         SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
1760         if (SrcReg == 0)
1761           return false;
1762       }
1763     }
1764 
1765     // Make the copy.
1766     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1767             TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
1768 
1769     // Add register to return instruction.
1770     RetRegs.push_back(VA.getLocReg());
1771   }
1772   MachineInstrBuilder MIB = emitInst(Mips::RetRA);
1773   for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
1774     MIB.addReg(RetRegs[i], RegState::Implicit);
1775   return true;
1776 }
1777 
1778 bool MipsFastISel::selectTrunc(const Instruction *I) {
1779   // The high bits for a type smaller than the register size are assumed to be
1780   // undefined.
1781   Value *Op = I->getOperand(0);
1782 
1783   EVT SrcVT, DestVT;
1784   SrcVT = TLI.getValueType(DL, Op->getType(), true);
1785   DestVT = TLI.getValueType(DL, I->getType(), true);
1786 
1787   if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1788     return false;
1789   if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1790     return false;
1791 
1792   unsigned SrcReg = getRegForValue(Op);
1793   if (!SrcReg)
1794     return false;
1795 
1796   // Because the high bits are undefined, a truncate doesn't generate
1797   // any code.
1798   updateValueMap(I, SrcReg);
1799   return true;
1800 }
1801 
1802 bool MipsFastISel::selectIntExt(const Instruction *I) {
1803   Type *DestTy = I->getType();
1804   Value *Src = I->getOperand(0);
1805   Type *SrcTy = Src->getType();
1806 
1807   bool isZExt = isa<ZExtInst>(I);
1808   unsigned SrcReg = getRegForValue(Src);
1809   if (!SrcReg)
1810     return false;
1811 
1812   EVT SrcEVT, DestEVT;
1813   SrcEVT = TLI.getValueType(DL, SrcTy, true);
1814   DestEVT = TLI.getValueType(DL, DestTy, true);
1815   if (!SrcEVT.isSimple())
1816     return false;
1817   if (!DestEVT.isSimple())
1818     return false;
1819 
1820   MVT SrcVT = SrcEVT.getSimpleVT();
1821   MVT DestVT = DestEVT.getSimpleVT();
1822   unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1823 
1824   if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
1825     return false;
1826   updateValueMap(I, ResultReg);
1827   return true;
1828 }
1829 
1830 bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1831                                    unsigned DestReg) {
1832   unsigned ShiftAmt;
1833   switch (SrcVT.SimpleTy) {
1834   default:
1835     return false;
1836   case MVT::i8:
1837     ShiftAmt = 24;
1838     break;
1839   case MVT::i16:
1840     ShiftAmt = 16;
1841     break;
1842   }
1843   unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1844   emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
1845   emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
1846   return true;
1847 }
1848 
1849 bool MipsFastISel::emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1850                                    unsigned DestReg) {
1851   switch (SrcVT.SimpleTy) {
1852   default:
1853     return false;
1854   case MVT::i8:
1855     emitInst(Mips::SEB, DestReg).addReg(SrcReg);
1856     break;
1857   case MVT::i16:
1858     emitInst(Mips::SEH, DestReg).addReg(SrcReg);
1859     break;
1860   }
1861   return true;
1862 }
1863 
1864 bool MipsFastISel::emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1865                                unsigned DestReg) {
1866   if ((DestVT != MVT::i32) && (DestVT != MVT::i16))
1867     return false;
1868   if (Subtarget->hasMips32r2())
1869     return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg);
1870   return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg);
1871 }
1872 
1873 bool MipsFastISel::emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1874                                unsigned DestReg) {
1875   int64_t Imm;
1876 
1877   switch (SrcVT.SimpleTy) {
1878   default:
1879     return false;
1880   case MVT::i1:
1881     Imm = 1;
1882     break;
1883   case MVT::i8:
1884     Imm = 0xff;
1885     break;
1886   case MVT::i16:
1887     Imm = 0xffff;
1888     break;
1889   }
1890 
1891   emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(Imm);
1892   return true;
1893 }
1894 
1895 bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1896                               unsigned DestReg, bool IsZExt) {
1897   // FastISel does not have plumbing to deal with extensions where the SrcVT or
1898   // DestVT are odd things, so test to make sure that they are both types we can
1899   // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
1900   // bail out to SelectionDAG.
1901   if (((DestVT != MVT::i8) && (DestVT != MVT::i16) && (DestVT != MVT::i32)) ||
1902       ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) && (SrcVT != MVT::i16)))
1903     return false;
1904   if (IsZExt)
1905     return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg);
1906   return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg);
1907 }
1908 
1909 unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1910                                   bool isZExt) {
1911   unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1912   bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
1913   return Success ? DestReg : 0;
1914 }
1915 
1916 bool MipsFastISel::selectDivRem(const Instruction *I, unsigned ISDOpcode) {
1917   EVT DestEVT = TLI.getValueType(DL, I->getType(), true);
1918   if (!DestEVT.isSimple())
1919     return false;
1920 
1921   MVT DestVT = DestEVT.getSimpleVT();
1922   if (DestVT != MVT::i32)
1923     return false;
1924 
1925   unsigned DivOpc;
1926   switch (ISDOpcode) {
1927   default:
1928     return false;
1929   case ISD::SDIV:
1930   case ISD::SREM:
1931     DivOpc = Mips::SDIV;
1932     break;
1933   case ISD::UDIV:
1934   case ISD::UREM:
1935     DivOpc = Mips::UDIV;
1936     break;
1937   }
1938 
1939   unsigned Src0Reg = getRegForValue(I->getOperand(0));
1940   unsigned Src1Reg = getRegForValue(I->getOperand(1));
1941   if (!Src0Reg || !Src1Reg)
1942     return false;
1943 
1944   emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg);
1945   emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7);
1946 
1947   unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1948   if (!ResultReg)
1949     return false;
1950 
1951   unsigned MFOpc = (ISDOpcode == ISD::SREM || ISDOpcode == ISD::UREM)
1952                        ? Mips::MFHI
1953                        : Mips::MFLO;
1954   emitInst(MFOpc, ResultReg);
1955 
1956   updateValueMap(I, ResultReg);
1957   return true;
1958 }
1959 
1960 bool MipsFastISel::selectShift(const Instruction *I) {
1961   MVT RetVT;
1962 
1963   if (!isTypeSupported(I->getType(), RetVT))
1964     return false;
1965 
1966   unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1967   if (!ResultReg)
1968     return false;
1969 
1970   unsigned Opcode = I->getOpcode();
1971   const Value *Op0 = I->getOperand(0);
1972   unsigned Op0Reg = getRegForValue(Op0);
1973   if (!Op0Reg)
1974     return false;
1975 
1976   // If AShr or LShr, then we need to make sure the operand0 is sign extended.
1977   if (Opcode == Instruction::AShr || Opcode == Instruction::LShr) {
1978     unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1979     if (!TempReg)
1980       return false;
1981 
1982     MVT Op0MVT = TLI.getValueType(DL, Op0->getType(), true).getSimpleVT();
1983     bool IsZExt = Opcode == Instruction::LShr;
1984     if (!emitIntExt(Op0MVT, Op0Reg, MVT::i32, TempReg, IsZExt))
1985       return false;
1986 
1987     Op0Reg = TempReg;
1988   }
1989 
1990   if (const auto *C = dyn_cast<ConstantInt>(I->getOperand(1))) {
1991     uint64_t ShiftVal = C->getZExtValue();
1992 
1993     switch (Opcode) {
1994     default:
1995       llvm_unreachable("Unexpected instruction.");
1996     case Instruction::Shl:
1997       Opcode = Mips::SLL;
1998       break;
1999     case Instruction::AShr:
2000       Opcode = Mips::SRA;
2001       break;
2002     case Instruction::LShr:
2003       Opcode = Mips::SRL;
2004       break;
2005     }
2006 
2007     emitInst(Opcode, ResultReg).addReg(Op0Reg).addImm(ShiftVal);
2008     updateValueMap(I, ResultReg);
2009     return true;
2010   }
2011 
2012   unsigned Op1Reg = getRegForValue(I->getOperand(1));
2013   if (!Op1Reg)
2014     return false;
2015 
2016   switch (Opcode) {
2017   default:
2018     llvm_unreachable("Unexpected instruction.");
2019   case Instruction::Shl:
2020     Opcode = Mips::SLLV;
2021     break;
2022   case Instruction::AShr:
2023     Opcode = Mips::SRAV;
2024     break;
2025   case Instruction::LShr:
2026     Opcode = Mips::SRLV;
2027     break;
2028   }
2029 
2030   emitInst(Opcode, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
2031   updateValueMap(I, ResultReg);
2032   return true;
2033 }
2034 
2035 bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
2036   switch (I->getOpcode()) {
2037   default:
2038     break;
2039   case Instruction::Load:
2040     return selectLoad(I);
2041   case Instruction::Store:
2042     return selectStore(I);
2043   case Instruction::SDiv:
2044     if (!selectBinaryOp(I, ISD::SDIV))
2045       return selectDivRem(I, ISD::SDIV);
2046     return true;
2047   case Instruction::UDiv:
2048     if (!selectBinaryOp(I, ISD::UDIV))
2049       return selectDivRem(I, ISD::UDIV);
2050     return true;
2051   case Instruction::SRem:
2052     if (!selectBinaryOp(I, ISD::SREM))
2053       return selectDivRem(I, ISD::SREM);
2054     return true;
2055   case Instruction::URem:
2056     if (!selectBinaryOp(I, ISD::UREM))
2057       return selectDivRem(I, ISD::UREM);
2058     return true;
2059   case Instruction::Shl:
2060   case Instruction::LShr:
2061   case Instruction::AShr:
2062     return selectShift(I);
2063   case Instruction::And:
2064   case Instruction::Or:
2065   case Instruction::Xor:
2066     return selectLogicalOp(I);
2067   case Instruction::Br:
2068     return selectBranch(I);
2069   case Instruction::Ret:
2070     return selectRet(I);
2071   case Instruction::Trunc:
2072     return selectTrunc(I);
2073   case Instruction::ZExt:
2074   case Instruction::SExt:
2075     return selectIntExt(I);
2076   case Instruction::FPTrunc:
2077     return selectFPTrunc(I);
2078   case Instruction::FPExt:
2079     return selectFPExt(I);
2080   case Instruction::FPToSI:
2081     return selectFPToInt(I, /*isSigned*/ true);
2082   case Instruction::FPToUI:
2083     return selectFPToInt(I, /*isSigned*/ false);
2084   case Instruction::ICmp:
2085   case Instruction::FCmp:
2086     return selectCmp(I);
2087   case Instruction::Select:
2088     return selectSelect(I);
2089   }
2090   return false;
2091 }
2092 
2093 unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
2094                                                            bool IsUnsigned) {
2095   unsigned VReg = getRegForValue(V);
2096   if (VReg == 0)
2097     return 0;
2098   MVT VMVT = TLI.getValueType(DL, V->getType(), true).getSimpleVT();
2099 
2100   if (VMVT == MVT::i1)
2101     return 0;
2102 
2103   if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) {
2104     unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
2105     if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
2106       return 0;
2107     VReg = TempReg;
2108   }
2109   return VReg;
2110 }
2111 
2112 void MipsFastISel::simplifyAddress(Address &Addr) {
2113   if (!isInt<16>(Addr.getOffset())) {
2114     unsigned TempReg =
2115         materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass);
2116     unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
2117     emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg());
2118     Addr.setReg(DestReg);
2119     Addr.setOffset(0);
2120   }
2121 }
2122 
2123 unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2124                                        const TargetRegisterClass *RC,
2125                                        unsigned Op0, bool Op0IsKill,
2126                                        unsigned Op1, bool Op1IsKill) {
2127   // We treat the MUL instruction in a special way because it clobbers
2128   // the HI0 & LO0 registers. The TableGen definition of this instruction can
2129   // mark these registers only as implicitly defined. As a result, the
2130   // register allocator runs out of registers when this instruction is
2131   // followed by another instruction that defines the same registers too.
2132   // We can fix this by explicitly marking those registers as dead.
2133   if (MachineInstOpcode == Mips::MUL) {
2134     unsigned ResultReg = createResultReg(RC);
2135     const MCInstrDesc &II = TII.get(MachineInstOpcode);
2136     Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2137     Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2138     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2139       .addReg(Op0, getKillRegState(Op0IsKill))
2140       .addReg(Op1, getKillRegState(Op1IsKill))
2141       .addReg(Mips::HI0, RegState::ImplicitDefine | RegState::Dead)
2142       .addReg(Mips::LO0, RegState::ImplicitDefine | RegState::Dead);
2143     return ResultReg;
2144   }
2145 
2146   return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op0IsKill, Op1,
2147                                    Op1IsKill);
2148 }
2149 
2150 namespace llvm {
2151 
2152 FastISel *Mips::createFastISel(FunctionLoweringInfo &funcInfo,
2153                                const TargetLibraryInfo *libInfo) {
2154   return new MipsFastISel(funcInfo, libInfo);
2155 }
2156 
2157 } // end namespace llvm
2158