xref: /freebsd/contrib/llvm-project/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp (revision aa1a8ff2d6dbc51ef058f46f3db5a8bb77967145)
1 //===- MipsLegalizerInfo.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the Machinelegalizer class for Mips.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
12 
13 #include "MipsLegalizerInfo.h"
14 #include "MipsTargetMachine.h"
15 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
16 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/IR/IntrinsicsMips.h"
19 
20 using namespace llvm;
21 
22 struct TypesAndMemOps {
23   LLT ValTy;
24   LLT PtrTy;
25   unsigned MemSize;
26   bool SystemSupportsUnalignedAccess;
27 };
28 
29 // Assumes power of 2 memory size. Subtargets that have only naturally-aligned
30 // memory access need to perform additional legalization here.
31 static bool isUnalignedMemmoryAccess(uint64_t MemSize, uint64_t AlignInBits) {
32   assert(isPowerOf2_64(MemSize) && "Expected power of 2 memory size");
33   assert(isPowerOf2_64(AlignInBits) && "Expected power of 2 align");
34   if (MemSize > AlignInBits)
35     return true;
36   return false;
37 }
38 
39 static bool
40 CheckTy0Ty1MemSizeAlign(const LegalityQuery &Query,
41                         std::initializer_list<TypesAndMemOps> SupportedValues) {
42   unsigned QueryMemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits();
43 
44   // Non power of two memory access is never legal.
45   if (!isPowerOf2_64(QueryMemSize))
46     return false;
47 
48   for (auto &Val : SupportedValues) {
49     if (Val.ValTy != Query.Types[0])
50       continue;
51     if (Val.PtrTy != Query.Types[1])
52       continue;
53     if (Val.MemSize != QueryMemSize)
54       continue;
55     if (!Val.SystemSupportsUnalignedAccess &&
56         isUnalignedMemmoryAccess(QueryMemSize, Query.MMODescrs[0].AlignInBits))
57       return false;
58     return true;
59   }
60   return false;
61 }
62 
63 static bool CheckTyN(unsigned N, const LegalityQuery &Query,
64                      std::initializer_list<LLT> SupportedValues) {
65   return llvm::is_contained(SupportedValues, Query.Types[N]);
66 }
67 
68 MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
69   using namespace TargetOpcode;
70 
71   const LLT s1 = LLT::scalar(1);
72   const LLT s8 = LLT::scalar(8);
73   const LLT s16 = LLT::scalar(16);
74   const LLT s32 = LLT::scalar(32);
75   const LLT s64 = LLT::scalar(64);
76   const LLT v16s8 = LLT::fixed_vector(16, 8);
77   const LLT v8s16 = LLT::fixed_vector(8, 16);
78   const LLT v4s32 = LLT::fixed_vector(4, 32);
79   const LLT v2s64 = LLT::fixed_vector(2, 64);
80   const LLT p0 = LLT::pointer(0, 32);
81 
82   getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
83       .legalIf([=, &ST](const LegalityQuery &Query) {
84         if (CheckTyN(0, Query, {s32}))
85           return true;
86         if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
87           return true;
88         return false;
89       })
90       .clampScalar(0, s32, s32);
91 
92   getActionDefinitionsBuilder({G_UADDO, G_UADDE, G_USUBO, G_USUBE, G_UMULO})
93       .lowerFor({{s32, s1}});
94 
95   getActionDefinitionsBuilder(G_UMULH)
96       .legalFor({s32})
97       .maxScalar(0, s32);
98 
99   // MIPS32r6 does not have alignment restrictions for memory access.
100   // For MIPS32r5 and older memory access must be naturally-aligned i.e. aligned
101   // to at least a multiple of its own size. There is however a two instruction
102   // combination that performs 4 byte unaligned access (lwr/lwl and swl/swr)
103   // therefore 4 byte load and store are legal and will use NoAlignRequirements.
104   bool NoAlignRequirements = true;
105 
106   getActionDefinitionsBuilder({G_LOAD, G_STORE})
107       .legalIf([=, &ST](const LegalityQuery &Query) {
108         if (CheckTy0Ty1MemSizeAlign(
109                 Query, {{s32, p0, 8, NoAlignRequirements},
110                         {s32, p0, 16, ST.systemSupportsUnalignedAccess()},
111                         {s32, p0, 32, NoAlignRequirements},
112                         {p0, p0, 32, NoAlignRequirements},
113                         {s64, p0, 64, ST.systemSupportsUnalignedAccess()}}))
114           return true;
115         if (ST.hasMSA() && CheckTy0Ty1MemSizeAlign(
116                                Query, {{v16s8, p0, 128, NoAlignRequirements},
117                                        {v8s16, p0, 128, NoAlignRequirements},
118                                        {v4s32, p0, 128, NoAlignRequirements},
119                                        {v2s64, p0, 128, NoAlignRequirements}}))
120           return true;
121         return false;
122       })
123       // Custom lower scalar memory access, up to 8 bytes, for:
124       // - non-power-of-2 MemSizes
125       // - unaligned 2 or 8 byte MemSizes for MIPS32r5 and older
126       .customIf([=, &ST](const LegalityQuery &Query) {
127         if (!Query.Types[0].isScalar() || Query.Types[1] != p0 ||
128             Query.Types[0] == s1)
129           return false;
130 
131         unsigned Size = Query.Types[0].getSizeInBits();
132         unsigned QueryMemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits();
133         assert(QueryMemSize <= Size && "Scalar can't hold MemSize");
134 
135         if (Size > 64 || QueryMemSize > 64)
136           return false;
137 
138         if (!isPowerOf2_64(Query.MMODescrs[0].MemoryTy.getSizeInBits()))
139           return true;
140 
141         if (!ST.systemSupportsUnalignedAccess() &&
142             isUnalignedMemmoryAccess(QueryMemSize,
143                                      Query.MMODescrs[0].AlignInBits)) {
144           assert(QueryMemSize != 32 && "4 byte load and store are legal");
145           return true;
146         }
147 
148         return false;
149       })
150       .minScalar(0, s32)
151       .lower();
152 
153   getActionDefinitionsBuilder(G_IMPLICIT_DEF)
154       .legalFor({s32, s64});
155 
156   getActionDefinitionsBuilder(G_UNMERGE_VALUES)
157      .legalFor({{s32, s64}});
158 
159   getActionDefinitionsBuilder(G_MERGE_VALUES)
160      .legalFor({{s64, s32}});
161 
162   getActionDefinitionsBuilder({G_ZEXTLOAD, G_SEXTLOAD})
163       .legalForTypesWithMemDesc({{s32, p0, s8, 8},
164                                  {s32, p0, s16, 8}})
165       .clampScalar(0, s32, s32);
166 
167   getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
168       .legalIf([](const LegalityQuery &Query) { return false; })
169       .maxScalar(0, s32);
170 
171   getActionDefinitionsBuilder(G_TRUNC)
172       .legalIf([](const LegalityQuery &Query) { return false; })
173       .maxScalar(1, s32);
174 
175   getActionDefinitionsBuilder(G_SELECT)
176       .legalForCartesianProduct({p0, s32, s64}, {s32})
177       .minScalar(0, s32)
178       .minScalar(1, s32);
179 
180   getActionDefinitionsBuilder(G_BRCOND)
181       .legalFor({s32})
182       .minScalar(0, s32);
183 
184   getActionDefinitionsBuilder(G_BRJT)
185       .legalFor({{p0, s32}});
186 
187   getActionDefinitionsBuilder(G_BRINDIRECT)
188       .legalFor({p0});
189 
190   getActionDefinitionsBuilder(G_PHI)
191       .legalFor({p0, s32, s64})
192       .minScalar(0, s32);
193 
194   getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
195       .legalFor({s32})
196       .clampScalar(0, s32, s32);
197 
198   getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UDIV, G_UREM})
199       .legalIf([=, &ST](const LegalityQuery &Query) {
200         if (CheckTyN(0, Query, {s32}))
201           return true;
202         if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
203           return true;
204         return false;
205       })
206       .minScalar(0, s32)
207       .libcallFor({s64});
208 
209   getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
210       .legalFor({{s32, s32}})
211       .clampScalar(1, s32, s32)
212       .clampScalar(0, s32, s32);
213 
214   getActionDefinitionsBuilder(G_ICMP)
215       .legalForCartesianProduct({s32}, {s32, p0})
216       .clampScalar(1, s32, s32)
217       .minScalar(0, s32);
218 
219   getActionDefinitionsBuilder(G_CONSTANT)
220       .legalFor({s32})
221       .clampScalar(0, s32, s32);
222 
223   getActionDefinitionsBuilder({G_PTR_ADD, G_INTTOPTR})
224       .legalFor({{p0, s32}});
225 
226   getActionDefinitionsBuilder(G_PTRTOINT)
227       .legalFor({{s32, p0}});
228 
229   getActionDefinitionsBuilder(G_FRAME_INDEX)
230       .legalFor({p0});
231 
232   getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE})
233       .legalFor({p0});
234 
235   getActionDefinitionsBuilder(G_DYN_STACKALLOC)
236       .lowerFor({{p0, s32}});
237 
238   getActionDefinitionsBuilder(G_VASTART)
239      .legalFor({p0});
240 
241   getActionDefinitionsBuilder(G_BSWAP)
242       .legalIf([=, &ST](const LegalityQuery &Query) {
243         if (ST.hasMips32r2() && CheckTyN(0, Query, {s32}))
244           return true;
245         return false;
246       })
247       .lowerIf([=, &ST](const LegalityQuery &Query) {
248         if (!ST.hasMips32r2() && CheckTyN(0, Query, {s32}))
249           return true;
250         return false;
251       })
252       .maxScalar(0, s32);
253 
254   getActionDefinitionsBuilder(G_BITREVERSE)
255       .lowerFor({s32})
256       .maxScalar(0, s32);
257 
258   getActionDefinitionsBuilder(G_CTLZ)
259       .legalFor({{s32, s32}})
260       .maxScalar(0, s32)
261       .maxScalar(1, s32);
262   getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
263       .lowerFor({{s32, s32}});
264 
265   getActionDefinitionsBuilder(G_CTTZ)
266       .lowerFor({{s32, s32}})
267       .maxScalar(0, s32)
268       .maxScalar(1, s32);
269   getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF)
270       .lowerFor({{s32, s32}, {s64, s64}});
271 
272   getActionDefinitionsBuilder(G_CTPOP)
273       .lowerFor({{s32, s32}})
274       .clampScalar(0, s32, s32)
275       .clampScalar(1, s32, s32);
276 
277   // FP instructions
278   getActionDefinitionsBuilder(G_FCONSTANT)
279       .legalFor({s32, s64});
280 
281   getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FABS, G_FSQRT})
282       .legalIf([=, &ST](const LegalityQuery &Query) {
283         if (CheckTyN(0, Query, {s32, s64}))
284           return true;
285         if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
286           return true;
287         return false;
288       });
289 
290   getActionDefinitionsBuilder(G_FCMP)
291       .legalFor({{s32, s32}, {s32, s64}})
292       .minScalar(0, s32);
293 
294   getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
295       .libcallFor({s32, s64});
296 
297   getActionDefinitionsBuilder(G_FPEXT)
298       .legalFor({{s64, s32}});
299 
300   getActionDefinitionsBuilder(G_FPTRUNC)
301       .legalFor({{s32, s64}});
302 
303   // FP to int conversion instructions
304   getActionDefinitionsBuilder(G_FPTOSI)
305       .legalForCartesianProduct({s32}, {s64, s32})
306       .libcallForCartesianProduct({s64}, {s64, s32})
307       .minScalar(0, s32);
308 
309   getActionDefinitionsBuilder(G_FPTOUI)
310       .libcallForCartesianProduct({s64}, {s64, s32})
311       .lowerForCartesianProduct({s32}, {s64, s32})
312       .minScalar(0, s32);
313 
314   // Int to FP conversion instructions
315   getActionDefinitionsBuilder(G_SITOFP)
316       .legalForCartesianProduct({s64, s32}, {s32})
317       .libcallForCartesianProduct({s64, s32}, {s64})
318       .minScalar(1, s32);
319 
320   getActionDefinitionsBuilder(G_UITOFP)
321       .libcallForCartesianProduct({s64, s32}, {s64})
322       .customForCartesianProduct({s64, s32}, {s32})
323       .minScalar(1, s32);
324 
325   getActionDefinitionsBuilder(G_SEXT_INREG).lower();
326 
327   getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
328 
329   getLegacyLegalizerInfo().computeTables();
330   verify(*ST.getInstrInfo());
331 }
332 
333 bool MipsLegalizerInfo::legalizeCustom(
334     LegalizerHelper &Helper, MachineInstr &MI,
335     LostDebugLocObserver &LocObserver) const {
336   using namespace TargetOpcode;
337 
338   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
339   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
340 
341   const LLT s32 = LLT::scalar(32);
342   const LLT s64 = LLT::scalar(64);
343 
344   switch (MI.getOpcode()) {
345   case G_LOAD:
346   case G_STORE: {
347     unsigned MemSize = (**MI.memoperands_begin()).getSize();
348     Register Val = MI.getOperand(0).getReg();
349     unsigned Size = MRI.getType(Val).getSizeInBits();
350 
351     MachineMemOperand *MMOBase = *MI.memoperands_begin();
352 
353     assert(MemSize <= 8 && "MemSize is too large");
354     assert(Size <= 64 && "Scalar size is too large");
355 
356     // Split MemSize into two, P2HalfMemSize is largest power of two smaller
357     // then MemSize. e.g. 8 = 4 + 4 , 6 = 4 + 2, 3 = 2 + 1.
358     unsigned P2HalfMemSize, RemMemSize;
359     if (isPowerOf2_64(MemSize)) {
360       P2HalfMemSize = RemMemSize = MemSize / 2;
361     } else {
362       P2HalfMemSize = 1 << Log2_32(MemSize);
363       RemMemSize = MemSize - P2HalfMemSize;
364     }
365 
366     Register BaseAddr = MI.getOperand(1).getReg();
367     LLT PtrTy = MRI.getType(BaseAddr);
368     MachineFunction &MF = MIRBuilder.getMF();
369 
370     auto P2HalfMemOp = MF.getMachineMemOperand(MMOBase, 0, P2HalfMemSize);
371     auto RemMemOp = MF.getMachineMemOperand(MMOBase, P2HalfMemSize, RemMemSize);
372 
373     if (MI.getOpcode() == G_STORE) {
374       // Widen Val to s32 or s64 in order to create legal G_LSHR or G_UNMERGE.
375       if (Size < 32)
376         Val = MIRBuilder.buildAnyExt(s32, Val).getReg(0);
377       if (Size > 32 && Size < 64)
378         Val = MIRBuilder.buildAnyExt(s64, Val).getReg(0);
379 
380       auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize);
381       auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize);
382 
383       if (MI.getOpcode() == G_STORE && MemSize <= 4) {
384         MIRBuilder.buildStore(Val, BaseAddr, *P2HalfMemOp);
385         auto C_P2Half_InBits = MIRBuilder.buildConstant(s32, P2HalfMemSize * 8);
386         auto Shift = MIRBuilder.buildLShr(s32, Val, C_P2Half_InBits);
387         MIRBuilder.buildStore(Shift, Addr, *RemMemOp);
388       } else {
389         auto Unmerge = MIRBuilder.buildUnmerge(s32, Val);
390         MIRBuilder.buildStore(Unmerge.getReg(0), BaseAddr, *P2HalfMemOp);
391         MIRBuilder.buildStore(Unmerge.getReg(1), Addr, *RemMemOp);
392       }
393     }
394 
395     if (MI.getOpcode() == G_LOAD) {
396 
397       if (MemSize <= 4) {
398         // This is anyextending load, use 4 byte lwr/lwl.
399         auto *Load4MMO = MF.getMachineMemOperand(MMOBase, 0, 4);
400 
401         if (Size == 32)
402           MIRBuilder.buildLoad(Val, BaseAddr, *Load4MMO);
403         else {
404           auto Load = MIRBuilder.buildLoad(s32, BaseAddr, *Load4MMO);
405           MIRBuilder.buildTrunc(Val, Load.getReg(0));
406         }
407 
408       } else {
409         auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize);
410         auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize);
411 
412         auto Load_P2Half = MIRBuilder.buildLoad(s32, BaseAddr, *P2HalfMemOp);
413         auto Load_Rem = MIRBuilder.buildLoad(s32, Addr, *RemMemOp);
414 
415         if (Size == 64)
416           MIRBuilder.buildMergeLikeInstr(Val, {Load_P2Half, Load_Rem});
417         else {
418           auto Merge =
419               MIRBuilder.buildMergeLikeInstr(s64, {Load_P2Half, Load_Rem});
420           MIRBuilder.buildTrunc(Val, Merge);
421         }
422       }
423     }
424     MI.eraseFromParent();
425     break;
426   }
427   case G_UITOFP: {
428     Register Dst = MI.getOperand(0).getReg();
429     Register Src = MI.getOperand(1).getReg();
430     LLT DstTy = MRI.getType(Dst);
431     LLT SrcTy = MRI.getType(Src);
432 
433     if (SrcTy != s32)
434       return false;
435     if (DstTy != s32 && DstTy != s64)
436       return false;
437 
438     // Let 0xABCDEFGH be given unsigned in MI.getOperand(1). First let's convert
439     // unsigned to double. Mantissa has 52 bits so we use following trick:
440     // First make floating point bit mask 0x43300000ABCDEFGH.
441     // Mask represents 2^52 * 0x1.00000ABCDEFGH i.e. 0x100000ABCDEFGH.0 .
442     // Next, subtract  2^52 * 0x1.0000000000000 i.e. 0x10000000000000.0 from it.
443     // Done. Trunc double to float if needed.
444 
445     auto C_HiMask = MIRBuilder.buildConstant(s32, UINT32_C(0x43300000));
446     auto Bitcast =
447         MIRBuilder.buildMergeLikeInstr(s64, {Src, C_HiMask.getReg(0)});
448 
449     MachineInstrBuilder TwoP52FP = MIRBuilder.buildFConstant(
450         s64, llvm::bit_cast<double>(UINT64_C(0x4330000000000000)));
451 
452     if (DstTy == s64)
453       MIRBuilder.buildFSub(Dst, Bitcast, TwoP52FP);
454     else {
455       MachineInstrBuilder ResF64 = MIRBuilder.buildFSub(s64, Bitcast, TwoP52FP);
456       MIRBuilder.buildFPTrunc(Dst, ResF64);
457     }
458 
459     MI.eraseFromParent();
460     break;
461   }
462   default:
463     return false;
464   }
465 
466   return true;
467 }
468 
469 static bool SelectMSA3OpIntrinsic(MachineInstr &MI, unsigned Opcode,
470                                   MachineIRBuilder &MIRBuilder,
471                                   const MipsSubtarget &ST) {
472   assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
473   if (!MIRBuilder.buildInstr(Opcode)
474            .add(MI.getOperand(0))
475            .add(MI.getOperand(2))
476            .add(MI.getOperand(3))
477            .constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(),
478                              *ST.getRegBankInfo()))
479     return false;
480   MI.eraseFromParent();
481   return true;
482 }
483 
484 static bool MSA3OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode,
485                                      MachineIRBuilder &MIRBuilder,
486                                      const MipsSubtarget &ST) {
487   assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
488   MIRBuilder.buildInstr(Opcode)
489       .add(MI.getOperand(0))
490       .add(MI.getOperand(2))
491       .add(MI.getOperand(3));
492   MI.eraseFromParent();
493   return true;
494 }
495 
496 static bool MSA2OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode,
497                                      MachineIRBuilder &MIRBuilder,
498                                      const MipsSubtarget &ST) {
499   assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
500   MIRBuilder.buildInstr(Opcode)
501       .add(MI.getOperand(0))
502       .add(MI.getOperand(2));
503   MI.eraseFromParent();
504   return true;
505 }
506 
507 bool MipsLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
508                                           MachineInstr &MI) const {
509   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
510   const MipsSubtarget &ST = MI.getMF()->getSubtarget<MipsSubtarget>();
511   const MipsInstrInfo &TII = *ST.getInstrInfo();
512   const MipsRegisterInfo &TRI = *ST.getRegisterInfo();
513   const RegisterBankInfo &RBI = *ST.getRegBankInfo();
514 
515   switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
516   case Intrinsic::trap: {
517     MachineInstr *Trap = MIRBuilder.buildInstr(Mips::TRAP);
518     MI.eraseFromParent();
519     return constrainSelectedInstRegOperands(*Trap, TII, TRI, RBI);
520   }
521   case Intrinsic::vacopy: {
522     MachinePointerInfo MPO;
523     LLT PtrTy = LLT::pointer(0, 32);
524     auto Tmp =
525         MIRBuilder.buildLoad(PtrTy, MI.getOperand(2),
526                              *MI.getMF()->getMachineMemOperand(
527                                  MPO, MachineMemOperand::MOLoad, PtrTy, Align(4)));
528     MIRBuilder.buildStore(Tmp, MI.getOperand(1),
529                           *MI.getMF()->getMachineMemOperand(
530                               MPO, MachineMemOperand::MOStore, PtrTy, Align(4)));
531     MI.eraseFromParent();
532     return true;
533   }
534   case Intrinsic::mips_addv_b:
535   case Intrinsic::mips_addv_h:
536   case Intrinsic::mips_addv_w:
537   case Intrinsic::mips_addv_d:
538     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_ADD, MIRBuilder, ST);
539   case Intrinsic::mips_addvi_b:
540     return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_B, MIRBuilder, ST);
541   case Intrinsic::mips_addvi_h:
542     return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_H, MIRBuilder, ST);
543   case Intrinsic::mips_addvi_w:
544     return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_W, MIRBuilder, ST);
545   case Intrinsic::mips_addvi_d:
546     return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_D, MIRBuilder, ST);
547   case Intrinsic::mips_subv_b:
548   case Intrinsic::mips_subv_h:
549   case Intrinsic::mips_subv_w:
550   case Intrinsic::mips_subv_d:
551     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SUB, MIRBuilder, ST);
552   case Intrinsic::mips_subvi_b:
553     return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_B, MIRBuilder, ST);
554   case Intrinsic::mips_subvi_h:
555     return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_H, MIRBuilder, ST);
556   case Intrinsic::mips_subvi_w:
557     return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_W, MIRBuilder, ST);
558   case Intrinsic::mips_subvi_d:
559     return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_D, MIRBuilder, ST);
560   case Intrinsic::mips_mulv_b:
561   case Intrinsic::mips_mulv_h:
562   case Intrinsic::mips_mulv_w:
563   case Intrinsic::mips_mulv_d:
564     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_MUL, MIRBuilder, ST);
565   case Intrinsic::mips_div_s_b:
566   case Intrinsic::mips_div_s_h:
567   case Intrinsic::mips_div_s_w:
568   case Intrinsic::mips_div_s_d:
569     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SDIV, MIRBuilder, ST);
570   case Intrinsic::mips_mod_s_b:
571   case Intrinsic::mips_mod_s_h:
572   case Intrinsic::mips_mod_s_w:
573   case Intrinsic::mips_mod_s_d:
574     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SREM, MIRBuilder, ST);
575   case Intrinsic::mips_div_u_b:
576   case Intrinsic::mips_div_u_h:
577   case Intrinsic::mips_div_u_w:
578   case Intrinsic::mips_div_u_d:
579     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UDIV, MIRBuilder, ST);
580   case Intrinsic::mips_mod_u_b:
581   case Intrinsic::mips_mod_u_h:
582   case Intrinsic::mips_mod_u_w:
583   case Intrinsic::mips_mod_u_d:
584     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UREM, MIRBuilder, ST);
585   case Intrinsic::mips_fadd_w:
586   case Intrinsic::mips_fadd_d:
587     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FADD, MIRBuilder, ST);
588   case Intrinsic::mips_fsub_w:
589   case Intrinsic::mips_fsub_d:
590     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FSUB, MIRBuilder, ST);
591   case Intrinsic::mips_fmul_w:
592   case Intrinsic::mips_fmul_d:
593     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FMUL, MIRBuilder, ST);
594   case Intrinsic::mips_fdiv_w:
595   case Intrinsic::mips_fdiv_d:
596     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FDIV, MIRBuilder, ST);
597   case Intrinsic::mips_fmax_a_w:
598     return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_W, MIRBuilder, ST);
599   case Intrinsic::mips_fmax_a_d:
600     return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_D, MIRBuilder, ST);
601   case Intrinsic::mips_fsqrt_w:
602     return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST);
603   case Intrinsic::mips_fsqrt_d:
604     return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST);
605   default:
606     break;
607   }
608   return true;
609 }
610