xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/MachineFunction.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetOpcodes.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DebugInfoMetadata.h"
21 
22 using namespace llvm;
23 
setMF(MachineFunction & MF)24 void MachineIRBuilder::setMF(MachineFunction &MF) {
25   State.MF = &MF;
26   State.MBB = nullptr;
27   State.MRI = &MF.getRegInfo();
28   State.TII = MF.getSubtarget().getInstrInfo();
29   State.DL = DebugLoc();
30   State.PCSections = nullptr;
31   State.MMRA = nullptr;
32   State.II = MachineBasicBlock::iterator();
33   State.Observer = nullptr;
34 }
35 
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39 
buildInstrNoInsert(unsigned Opcode)40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41   return BuildMI(getMF(), {getDL(), getPCSections(), getMMRAMetadata()},
42                  getTII().get(Opcode));
43 }
44 
insertInstr(MachineInstrBuilder MIB)45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46   getMBB().insert(getInsertPt(), MIB);
47   recordInsertion(MIB);
48   return MIB;
49 }
50 
51 MachineInstrBuilder
buildDirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53                                       const MDNode *Expr) {
54   assert(isa<DILocalVariable>(Variable) && "not a variable");
55   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56   assert(
57       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58       "Expected inlined-at fields to agree");
59   return insertInstr(BuildMI(getMF(), getDL(),
60                              getTII().get(TargetOpcode::DBG_VALUE),
61                              /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63 
64 MachineInstrBuilder
buildIndirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66                                         const MDNode *Expr) {
67   assert(isa<DILocalVariable>(Variable) && "not a variable");
68   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69   assert(
70       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71       "Expected inlined-at fields to agree");
72   return insertInstr(BuildMI(getMF(), getDL(),
73                              getTII().get(TargetOpcode::DBG_VALUE),
74                              /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76 
buildFIDbgValue(int FI,const MDNode * Variable,const MDNode * Expr)77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78                                                       const MDNode *Variable,
79                                                       const MDNode *Expr) {
80   assert(isa<DILocalVariable>(Variable) && "not a variable");
81   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82   assert(
83       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84       "Expected inlined-at fields to agree");
85   return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
86                          .addFrameIndex(FI)
87                          .addImm(0)
88                          .addMetadata(Variable)
89                          .addMetadata(Expr));
90 }
91 
buildConstDbgValue(const Constant & C,const MDNode * Variable,const MDNode * Expr)92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93                                                          const MDNode *Variable,
94                                                          const MDNode *Expr) {
95   assert(isa<DILocalVariable>(Variable) && "not a variable");
96   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97   assert(
98       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99       "Expected inlined-at fields to agree");
100   auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101 
102   auto *NumericConstant = [&] () -> const Constant* {
103     if (const auto *CE = dyn_cast<ConstantExpr>(&C))
104       if (CE->getOpcode() == Instruction::IntToPtr)
105         return CE->getOperand(0);
106     return &C;
107   }();
108 
109   if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
110     if (CI->getBitWidth() > 64)
111       MIB.addCImm(CI);
112     else
113       MIB.addImm(CI->getZExtValue());
114   } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
115     MIB.addFPImm(CFP);
116   } else if (isa<ConstantPointerNull>(NumericConstant)) {
117     MIB.addImm(0);
118   } else {
119     // Insert $noreg if we didn't find a usable constant and had to drop it.
120     MIB.addReg(Register());
121   }
122 
123   MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
124   return insertInstr(MIB);
125 }
126 
buildDbgLabel(const MDNode * Label)127 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
128   assert(isa<DILabel>(Label) && "not a label");
129   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
130          "Expected inlined-at fields to agree");
131   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
132 
133   return MIB.addMetadata(Label);
134 }
135 
buildDynStackAlloc(const DstOp & Res,const SrcOp & Size,Align Alignment)136 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
137                                                          const SrcOp &Size,
138                                                          Align Alignment) {
139   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
140   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
141   Res.addDefToMIB(*getMRI(), MIB);
142   Size.addSrcToMIB(MIB);
143   MIB.addImm(Alignment.value());
144   return MIB;
145 }
146 
buildFrameIndex(const DstOp & Res,int Idx)147 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
148                                                       int Idx) {
149   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
150   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
151   Res.addDefToMIB(*getMRI(), MIB);
152   MIB.addFrameIndex(Idx);
153   return MIB;
154 }
155 
buildGlobalValue(const DstOp & Res,const GlobalValue * GV)156 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
157                                                        const GlobalValue *GV) {
158   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
159   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
160              GV->getType()->getAddressSpace() &&
161          "address space mismatch");
162 
163   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
164   Res.addDefToMIB(*getMRI(), MIB);
165   MIB.addGlobalAddress(GV);
166   return MIB;
167 }
168 
buildConstantPool(const DstOp & Res,unsigned Idx)169 MachineInstrBuilder MachineIRBuilder::buildConstantPool(const DstOp &Res,
170                                                         unsigned Idx) {
171   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
172   auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
173   Res.addDefToMIB(*getMRI(), MIB);
174   MIB.addConstantPoolIndex(Idx);
175   return MIB;
176 }
177 
buildJumpTable(const LLT PtrTy,unsigned JTI)178 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
179                                                      unsigned JTI) {
180   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
181       .addJumpTableIndex(JTI);
182 }
183 
validateUnaryOp(const LLT Res,const LLT Op0)184 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
185   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
186   assert((Res == Op0) && "type mismatch");
187 }
188 
validateBinaryOp(const LLT Res,const LLT Op0,const LLT Op1)189 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
190                                         const LLT Op1) {
191   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
192   assert((Res == Op0 && Res == Op1) && "type mismatch");
193 }
194 
validateShiftOp(const LLT Res,const LLT Op0,const LLT Op1)195 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
196                                        const LLT Op1) {
197   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
198   assert((Res == Op0) && "type mismatch");
199 }
200 
201 MachineInstrBuilder
buildPtrAdd(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)202 MachineIRBuilder::buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
203                               const SrcOp &Op1, std::optional<unsigned> Flags) {
204   assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
205          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
206   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
207 
208   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
209 }
210 
211 std::optional<MachineInstrBuilder>
materializePtrAdd(Register & Res,Register Op0,const LLT ValueTy,uint64_t Value)212 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
213                                     const LLT ValueTy, uint64_t Value) {
214   assert(Res == 0 && "Res is a result argument");
215   assert(ValueTy.isScalar()  && "invalid offset type");
216 
217   if (Value == 0) {
218     Res = Op0;
219     return std::nullopt;
220   }
221 
222   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
223   auto Cst = buildConstant(ValueTy, Value);
224   return buildPtrAdd(Res, Op0, Cst.getReg(0));
225 }
226 
buildMaskLowPtrBits(const DstOp & Res,const SrcOp & Op0,uint32_t NumBits)227 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
228                                                           const SrcOp &Op0,
229                                                           uint32_t NumBits) {
230   LLT PtrTy = Res.getLLTTy(*getMRI());
231   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
232   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
233   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
234   return buildPtrMask(Res, Op0, MaskReg);
235 }
236 
237 MachineInstrBuilder
buildPadVectorWithUndefElements(const DstOp & Res,const SrcOp & Op0)238 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
239                                                   const SrcOp &Op0) {
240   LLT ResTy = Res.getLLTTy(*getMRI());
241   LLT Op0Ty = Op0.getLLTTy(*getMRI());
242 
243   assert(ResTy.isVector() && "Res non vector type");
244 
245   SmallVector<Register, 8> Regs;
246   if (Op0Ty.isVector()) {
247     assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
248            "Different vector element types");
249     assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
250            "Op0 has more elements");
251     auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
252 
253     for (auto Op : Unmerge.getInstr()->defs())
254       Regs.push_back(Op.getReg());
255   } else {
256     assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
257            "Op0 has more size");
258     Regs.push_back(Op0.getReg());
259   }
260   Register Undef =
261       buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
262   unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
263   for (unsigned i = 0; i < NumberOfPadElts; ++i)
264     Regs.push_back(Undef);
265   return buildMergeLikeInstr(Res, Regs);
266 }
267 
268 MachineInstrBuilder
buildDeleteTrailingVectorElements(const DstOp & Res,const SrcOp & Op0)269 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
270                                                     const SrcOp &Op0) {
271   LLT ResTy = Res.getLLTTy(*getMRI());
272   LLT Op0Ty = Op0.getLLTTy(*getMRI());
273 
274   assert(Op0Ty.isVector() && "Non vector type");
275   assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
276           (ResTy.isVector() &&
277            (ResTy.getElementType() == Op0Ty.getElementType()))) &&
278          "Different vector element types");
279   assert(
280       (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
281       "Op0 has fewer elements");
282 
283   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
284   if (ResTy.isScalar())
285     return buildCopy(Res, Unmerge.getReg(0));
286   SmallVector<Register, 8> Regs;
287   for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
288     Regs.push_back(Unmerge.getReg(i));
289   return buildMergeLikeInstr(Res, Regs);
290 }
291 
buildBr(MachineBasicBlock & Dest)292 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
293   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
294 }
295 
buildBrIndirect(Register Tgt)296 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
297   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
298   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
299 }
300 
buildBrJT(Register TablePtr,unsigned JTI,Register IndexReg)301 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
302                                                 unsigned JTI,
303                                                 Register IndexReg) {
304   assert(getMRI()->getType(TablePtr).isPointer() &&
305          "Table reg must be a pointer");
306   return buildInstr(TargetOpcode::G_BRJT)
307       .addUse(TablePtr)
308       .addJumpTableIndex(JTI)
309       .addUse(IndexReg);
310 }
311 
buildCopy(const DstOp & Res,const SrcOp & Op)312 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
313                                                 const SrcOp &Op) {
314   return buildInstr(TargetOpcode::COPY, Res, Op);
315 }
316 
buildConstant(const DstOp & Res,const ConstantInt & Val)317 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
318                                                     const ConstantInt &Val) {
319   LLT Ty = Res.getLLTTy(*getMRI());
320   LLT EltTy = Ty.getScalarType();
321   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
322          "creating constant with the wrong size");
323 
324   assert(!Ty.isScalableVector() &&
325          "unexpected scalable vector in buildConstant");
326 
327   if (Ty.isFixedVector()) {
328     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
329     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
330     .addCImm(&Val);
331     return buildSplatBuildVector(Res, Const);
332   }
333 
334   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
335   Const->setDebugLoc(DebugLoc());
336   Res.addDefToMIB(*getMRI(), Const);
337   Const.addCImm(&Val);
338   return Const;
339 }
340 
buildConstant(const DstOp & Res,int64_t Val)341 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
342                                                     int64_t Val) {
343   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
344                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
345   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
346   return buildConstant(Res, *CI);
347 }
348 
buildFConstant(const DstOp & Res,const ConstantFP & Val)349 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
350                                                      const ConstantFP &Val) {
351   LLT Ty = Res.getLLTTy(*getMRI());
352   LLT EltTy = Ty.getScalarType();
353 
354   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
355          == EltTy.getSizeInBits() &&
356          "creating fconstant with the wrong size");
357 
358   assert(!Ty.isPointer() && "invalid operand type");
359 
360   assert(!Ty.isScalableVector() &&
361          "unexpected scalable vector in buildFConstant");
362 
363   if (Ty.isFixedVector()) {
364     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
365     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
366     .addFPImm(&Val);
367 
368     return buildSplatBuildVector(Res, Const);
369   }
370 
371   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
372   Const->setDebugLoc(DebugLoc());
373   Res.addDefToMIB(*getMRI(), Const);
374   Const.addFPImm(&Val);
375   return Const;
376 }
377 
buildConstant(const DstOp & Res,const APInt & Val)378 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
379                                                     const APInt &Val) {
380   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
381   return buildConstant(Res, *CI);
382 }
383 
buildFConstant(const DstOp & Res,double Val)384 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
385                                                      double Val) {
386   LLT DstTy = Res.getLLTTy(*getMRI());
387   auto &Ctx = getMF().getFunction().getContext();
388   auto *CFP =
389       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
390   return buildFConstant(Res, *CFP);
391 }
392 
buildFConstant(const DstOp & Res,const APFloat & Val)393 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
394                                                      const APFloat &Val) {
395   auto &Ctx = getMF().getFunction().getContext();
396   auto *CFP = ConstantFP::get(Ctx, Val);
397   return buildFConstant(Res, *CFP);
398 }
399 
400 MachineInstrBuilder
buildConstantPtrAuth(const DstOp & Res,const ConstantPtrAuth * CPA,Register Addr,Register AddrDisc)401 MachineIRBuilder::buildConstantPtrAuth(const DstOp &Res,
402                                        const ConstantPtrAuth *CPA,
403                                        Register Addr, Register AddrDisc) {
404   auto MIB = buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
405   Res.addDefToMIB(*getMRI(), MIB);
406   MIB.addUse(Addr);
407   MIB.addImm(CPA->getKey()->getZExtValue());
408   MIB.addUse(AddrDisc);
409   MIB.addImm(CPA->getDiscriminator()->getZExtValue());
410   return MIB;
411 }
412 
buildBrCond(const SrcOp & Tst,MachineBasicBlock & Dest)413 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
414                                                   MachineBasicBlock &Dest) {
415   assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
416 
417   auto MIB = buildInstr(TargetOpcode::G_BRCOND);
418   Tst.addSrcToMIB(MIB);
419   MIB.addMBB(&Dest);
420   return MIB;
421 }
422 
423 MachineInstrBuilder
buildLoad(const DstOp & Dst,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)424 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
425                             MachinePointerInfo PtrInfo, Align Alignment,
426                             MachineMemOperand::Flags MMOFlags,
427                             const AAMDNodes &AAInfo) {
428   MMOFlags |= MachineMemOperand::MOLoad;
429   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
430 
431   LLT Ty = Dst.getLLTTy(*getMRI());
432   MachineMemOperand *MMO =
433       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
434   return buildLoad(Dst, Addr, *MMO);
435 }
436 
buildLoadInstr(unsigned Opcode,const DstOp & Res,const SrcOp & Addr,MachineMemOperand & MMO)437 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
438                                                      const DstOp &Res,
439                                                      const SrcOp &Addr,
440                                                      MachineMemOperand &MMO) {
441   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
442   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
443 
444   auto MIB = buildInstr(Opcode);
445   Res.addDefToMIB(*getMRI(), MIB);
446   Addr.addSrcToMIB(MIB);
447   MIB.addMemOperand(&MMO);
448   return MIB;
449 }
450 
buildLoadFromOffset(const DstOp & Dst,const SrcOp & BasePtr,MachineMemOperand & BaseMMO,int64_t Offset)451 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
452   const DstOp &Dst, const SrcOp &BasePtr,
453   MachineMemOperand &BaseMMO, int64_t Offset) {
454   LLT LoadTy = Dst.getLLTTy(*getMRI());
455   MachineMemOperand *OffsetMMO =
456       getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
457 
458   if (Offset == 0) // This may be a size or type changing load.
459     return buildLoad(Dst, BasePtr, *OffsetMMO);
460 
461   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
462   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
463   auto ConstOffset = buildConstant(OffsetTy, Offset);
464   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
465   return buildLoad(Dst, Ptr, *OffsetMMO);
466 }
467 
buildStore(const SrcOp & Val,const SrcOp & Addr,MachineMemOperand & MMO)468 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
469                                                  const SrcOp &Addr,
470                                                  MachineMemOperand &MMO) {
471   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
472   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
473 
474   auto MIB = buildInstr(TargetOpcode::G_STORE);
475   Val.addSrcToMIB(MIB);
476   Addr.addSrcToMIB(MIB);
477   MIB.addMemOperand(&MMO);
478   return MIB;
479 }
480 
481 MachineInstrBuilder
buildStore(const SrcOp & Val,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)482 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
483                              MachinePointerInfo PtrInfo, Align Alignment,
484                              MachineMemOperand::Flags MMOFlags,
485                              const AAMDNodes &AAInfo) {
486   MMOFlags |= MachineMemOperand::MOStore;
487   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
488 
489   LLT Ty = Val.getLLTTy(*getMRI());
490   MachineMemOperand *MMO =
491       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
492   return buildStore(Val, Addr, *MMO);
493 }
494 
buildAnyExt(const DstOp & Res,const SrcOp & Op)495 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
496                                                   const SrcOp &Op) {
497   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
498 }
499 
buildSExt(const DstOp & Res,const SrcOp & Op)500 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
501                                                 const SrcOp &Op) {
502   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
503 }
504 
buildZExt(const DstOp & Res,const SrcOp & Op,std::optional<unsigned> Flags)505 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
506                                                 const SrcOp &Op,
507                                                 std::optional<unsigned> Flags) {
508   return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
509 }
510 
getBoolExtOp(bool IsVec,bool IsFP) const511 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
512   const auto *TLI = getMF().getSubtarget().getTargetLowering();
513   switch (TLI->getBooleanContents(IsVec, IsFP)) {
514   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
515     return TargetOpcode::G_SEXT;
516   case TargetLoweringBase::ZeroOrOneBooleanContent:
517     return TargetOpcode::G_ZEXT;
518   default:
519     return TargetOpcode::G_ANYEXT;
520   }
521 }
522 
buildBoolExt(const DstOp & Res,const SrcOp & Op,bool IsFP)523 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
524                                                    const SrcOp &Op,
525                                                    bool IsFP) {
526   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
527   return buildInstr(ExtOp, Res, Op);
528 }
529 
buildBoolExtInReg(const DstOp & Res,const SrcOp & Op,bool IsVector,bool IsFP)530 MachineInstrBuilder MachineIRBuilder::buildBoolExtInReg(const DstOp &Res,
531                                                         const SrcOp &Op,
532                                                         bool IsVector,
533                                                         bool IsFP) {
534   const auto *TLI = getMF().getSubtarget().getTargetLowering();
535   switch (TLI->getBooleanContents(IsVector, IsFP)) {
536   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
537     return buildSExtInReg(Res, Op, 1);
538   case TargetLoweringBase::ZeroOrOneBooleanContent:
539     return buildZExtInReg(Res, Op, 1);
540   case TargetLoweringBase::UndefinedBooleanContent:
541     return buildCopy(Res, Op);
542   }
543 
544   llvm_unreachable("unexpected BooleanContent");
545 }
546 
buildExtOrTrunc(unsigned ExtOpc,const DstOp & Res,const SrcOp & Op)547 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
548                                                       const DstOp &Res,
549                                                       const SrcOp &Op) {
550   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
551           TargetOpcode::G_SEXT == ExtOpc) &&
552          "Expecting Extending Opc");
553   assert(Res.getLLTTy(*getMRI()).isScalar() ||
554          Res.getLLTTy(*getMRI()).isVector());
555   assert(Res.getLLTTy(*getMRI()).isScalar() ==
556          Op.getLLTTy(*getMRI()).isScalar());
557 
558   unsigned Opcode = TargetOpcode::COPY;
559   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
560       Op.getLLTTy(*getMRI()).getSizeInBits())
561     Opcode = ExtOpc;
562   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
563            Op.getLLTTy(*getMRI()).getSizeInBits())
564     Opcode = TargetOpcode::G_TRUNC;
565   else
566     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
567 
568   return buildInstr(Opcode, Res, Op);
569 }
570 
buildSExtOrTrunc(const DstOp & Res,const SrcOp & Op)571 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
572                                                        const SrcOp &Op) {
573   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
574 }
575 
buildZExtOrTrunc(const DstOp & Res,const SrcOp & Op)576 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
577                                                        const SrcOp &Op) {
578   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
579 }
580 
buildAnyExtOrTrunc(const DstOp & Res,const SrcOp & Op)581 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
582                                                          const SrcOp &Op) {
583   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
584 }
585 
buildZExtInReg(const DstOp & Res,const SrcOp & Op,int64_t ImmOp)586 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
587                                                      const SrcOp &Op,
588                                                      int64_t ImmOp) {
589   LLT ResTy = Res.getLLTTy(*getMRI());
590   auto Mask = buildConstant(
591       ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
592   return buildAnd(Res, Op, Mask);
593 }
594 
buildCast(const DstOp & Dst,const SrcOp & Src)595 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
596                                                 const SrcOp &Src) {
597   LLT SrcTy = Src.getLLTTy(*getMRI());
598   LLT DstTy = Dst.getLLTTy(*getMRI());
599   if (SrcTy == DstTy)
600     return buildCopy(Dst, Src);
601 
602   unsigned Opcode;
603   if (SrcTy.isPointer() && DstTy.isScalar())
604     Opcode = TargetOpcode::G_PTRTOINT;
605   else if (DstTy.isPointer() && SrcTy.isScalar())
606     Opcode = TargetOpcode::G_INTTOPTR;
607   else {
608     assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
609     Opcode = TargetOpcode::G_BITCAST;
610   }
611 
612   return buildInstr(Opcode, Dst, Src);
613 }
614 
buildExtract(const DstOp & Dst,const SrcOp & Src,uint64_t Index)615 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
616                                                    const SrcOp &Src,
617                                                    uint64_t Index) {
618   LLT SrcTy = Src.getLLTTy(*getMRI());
619   LLT DstTy = Dst.getLLTTy(*getMRI());
620 
621 #ifndef NDEBUG
622   assert(SrcTy.isValid() && "invalid operand type");
623   assert(DstTy.isValid() && "invalid operand type");
624   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
625          "extracting off end of register");
626 #endif
627 
628   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
629     assert(Index == 0 && "insertion past the end of a register");
630     return buildCast(Dst, Src);
631   }
632 
633   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
634   Dst.addDefToMIB(*getMRI(), Extract);
635   Src.addSrcToMIB(Extract);
636   Extract.addImm(Index);
637   return Extract;
638 }
639 
buildUndef(const DstOp & Res)640 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
641   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
642 }
643 
buildMergeValues(const DstOp & Res,ArrayRef<Register> Ops)644 MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
645                                                        ArrayRef<Register> Ops) {
646   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
647   // we need some temporary storage for the DstOp objects. Here we use a
648   // sufficiently large SmallVector to not go through the heap.
649   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
650   assert(TmpVec.size() > 1);
651   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
652 }
653 
654 MachineInstrBuilder
buildMergeLikeInstr(const DstOp & Res,ArrayRef<Register> Ops)655 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
656                                       ArrayRef<Register> Ops) {
657   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
658   // we need some temporary storage for the DstOp objects. Here we use a
659   // sufficiently large SmallVector to not go through the heap.
660   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
661   assert(TmpVec.size() > 1);
662   return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
663 }
664 
665 MachineInstrBuilder
buildMergeLikeInstr(const DstOp & Res,std::initializer_list<SrcOp> Ops)666 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
667                                       std::initializer_list<SrcOp> Ops) {
668   assert(Ops.size() > 1);
669   return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
670 }
671 
getOpcodeForMerge(const DstOp & DstOp,ArrayRef<SrcOp> SrcOps) const672 unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
673                                              ArrayRef<SrcOp> SrcOps) const {
674   if (DstOp.getLLTTy(*getMRI()).isVector()) {
675     if (SrcOps[0].getLLTTy(*getMRI()).isVector())
676       return TargetOpcode::G_CONCAT_VECTORS;
677     return TargetOpcode::G_BUILD_VECTOR;
678   }
679 
680   return TargetOpcode::G_MERGE_VALUES;
681 }
682 
buildUnmerge(ArrayRef<LLT> Res,const SrcOp & Op)683 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
684                                                    const SrcOp &Op) {
685   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
686   // we need some temporary storage for the DstOp objects. Here we use a
687   // sufficiently large SmallVector to not go through the heap.
688   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
689   assert(TmpVec.size() > 1);
690   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
691 }
692 
buildUnmerge(LLT Res,const SrcOp & Op)693 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
694                                                    const SrcOp &Op) {
695   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
696   SmallVector<DstOp, 8> TmpVec(NumReg, Res);
697   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
698 }
699 
buildUnmerge(ArrayRef<Register> Res,const SrcOp & Op)700 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
701                                                    const SrcOp &Op) {
702   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
703   // we need some temporary storage for the DstOp objects. Here we use a
704   // sufficiently large SmallVector to not go through the heap.
705   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
706   assert(TmpVec.size() > 1);
707   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
708 }
709 
buildBuildVector(const DstOp & Res,ArrayRef<Register> Ops)710 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
711                                                        ArrayRef<Register> Ops) {
712   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
713   // we need some temporary storage for the DstOp objects. Here we use a
714   // sufficiently large SmallVector to not go through the heap.
715   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
716   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
717 }
718 
719 MachineInstrBuilder
buildBuildVectorConstant(const DstOp & Res,ArrayRef<APInt> Ops)720 MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
721                                            ArrayRef<APInt> Ops) {
722   SmallVector<SrcOp> TmpVec;
723   TmpVec.reserve(Ops.size());
724   LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
725   for (const auto &Op : Ops)
726     TmpVec.push_back(buildConstant(EltTy, Op));
727   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
728 }
729 
buildSplatBuildVector(const DstOp & Res,const SrcOp & Src)730 MachineInstrBuilder MachineIRBuilder::buildSplatBuildVector(const DstOp &Res,
731                                                             const SrcOp &Src) {
732   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
733   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
734 }
735 
736 MachineInstrBuilder
buildBuildVectorTrunc(const DstOp & Res,ArrayRef<Register> Ops)737 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
738                                         ArrayRef<Register> Ops) {
739   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
740   // we need some temporary storage for the DstOp objects. Here we use a
741   // sufficiently large SmallVector to not go through the heap.
742   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
743   if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
744       Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
745     return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
746   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
747 }
748 
buildShuffleSplat(const DstOp & Res,const SrcOp & Src)749 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
750                                                         const SrcOp &Src) {
751   LLT DstTy = Res.getLLTTy(*getMRI());
752   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
753          "Expected Src to match Dst elt ty");
754   auto UndefVec = buildUndef(DstTy);
755   auto Zero = buildConstant(LLT::scalar(64), 0);
756   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
757   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
758   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
759 }
760 
buildSplatVector(const DstOp & Res,const SrcOp & Src)761 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
762                                                        const SrcOp &Src) {
763   assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
764          "Expected Src to match Dst elt ty");
765   return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
766 }
767 
buildShuffleVector(const DstOp & Res,const SrcOp & Src1,const SrcOp & Src2,ArrayRef<int> Mask)768 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
769                                                          const SrcOp &Src1,
770                                                          const SrcOp &Src2,
771                                                          ArrayRef<int> Mask) {
772   LLT DstTy = Res.getLLTTy(*getMRI());
773   LLT Src1Ty = Src1.getLLTTy(*getMRI());
774   LLT Src2Ty = Src2.getLLTTy(*getMRI());
775   assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
776          Mask.size());
777   assert(DstTy.getElementType() == Src1Ty.getElementType() &&
778          DstTy.getElementType() == Src2Ty.getElementType());
779   (void)DstTy;
780   (void)Src1Ty;
781   (void)Src2Ty;
782   ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
783   return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
784       .addShuffleMask(MaskAlloc);
785 }
786 
787 MachineInstrBuilder
buildConcatVectors(const DstOp & Res,ArrayRef<Register> Ops)788 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
789   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
790   // we need some temporary storage for the DstOp objects. Here we use a
791   // sufficiently large SmallVector to not go through the heap.
792   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
793   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
794 }
795 
buildInsert(const DstOp & Res,const SrcOp & Src,const SrcOp & Op,unsigned Index)796 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
797                                                   const SrcOp &Src,
798                                                   const SrcOp &Op,
799                                                   unsigned Index) {
800   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
801              Res.getLLTTy(*getMRI()).getSizeInBits() &&
802          "insertion past the end of a register");
803 
804   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
805       Op.getLLTTy(*getMRI()).getSizeInBits()) {
806     return buildCast(Res, Op);
807   }
808 
809   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
810 }
811 
buildVScale(const DstOp & Res,unsigned MinElts)812 MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
813                                                   unsigned MinElts) {
814 
815   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
816                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
817   ConstantInt *CI = ConstantInt::get(IntN, MinElts);
818   return buildVScale(Res, *CI);
819 }
820 
buildVScale(const DstOp & Res,const ConstantInt & MinElts)821 MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
822                                                   const ConstantInt &MinElts) {
823   auto VScale = buildInstr(TargetOpcode::G_VSCALE);
824   VScale->setDebugLoc(DebugLoc());
825   Res.addDefToMIB(*getMRI(), VScale);
826   VScale.addCImm(&MinElts);
827   return VScale;
828 }
829 
buildVScale(const DstOp & Res,const APInt & MinElts)830 MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
831                                                   const APInt &MinElts) {
832   ConstantInt *CI =
833       ConstantInt::get(getMF().getFunction().getContext(), MinElts);
834   return buildVScale(Res, *CI);
835 }
836 
getIntrinsicOpcode(bool HasSideEffects,bool IsConvergent)837 static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
838   if (HasSideEffects && IsConvergent)
839     return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
840   if (HasSideEffects)
841     return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
842   if (IsConvergent)
843     return TargetOpcode::G_INTRINSIC_CONVERGENT;
844   return TargetOpcode::G_INTRINSIC;
845 }
846 
847 MachineInstrBuilder
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs,bool HasSideEffects,bool isConvergent)848 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
849                                  ArrayRef<Register> ResultRegs,
850                                  bool HasSideEffects, bool isConvergent) {
851   auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
852   for (unsigned ResultReg : ResultRegs)
853     MIB.addDef(ResultReg);
854   MIB.addIntrinsicID(ID);
855   return MIB;
856 }
857 
858 MachineInstrBuilder
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs)859 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
860                                  ArrayRef<Register> ResultRegs) {
861   auto Attrs = Intrinsic::getAttributes(getContext(), ID);
862   bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
863   bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
864   return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
865 }
866 
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results,bool HasSideEffects,bool isConvergent)867 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
868                                                      ArrayRef<DstOp> Results,
869                                                      bool HasSideEffects,
870                                                      bool isConvergent) {
871   auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
872   for (DstOp Result : Results)
873     Result.addDefToMIB(*getMRI(), MIB);
874   MIB.addIntrinsicID(ID);
875   return MIB;
876 }
877 
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results)878 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
879                                                      ArrayRef<DstOp> Results) {
880   auto Attrs = Intrinsic::getAttributes(getContext(), ID);
881   bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
882   bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
883   return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
884 }
885 
886 MachineInstrBuilder
buildTrunc(const DstOp & Res,const SrcOp & Op,std::optional<unsigned> Flags)887 MachineIRBuilder::buildTrunc(const DstOp &Res, const SrcOp &Op,
888                              std::optional<unsigned> Flags) {
889   return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
890 }
891 
892 MachineInstrBuilder
buildFPTrunc(const DstOp & Res,const SrcOp & Op,std::optional<unsigned> Flags)893 MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
894                                std::optional<unsigned> Flags) {
895   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
896 }
897 
buildICmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)898 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
899                                                 const DstOp &Res,
900                                                 const SrcOp &Op0,
901                                                 const SrcOp &Op1) {
902   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
903 }
904 
buildFCmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)905 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
906                                                 const DstOp &Res,
907                                                 const SrcOp &Op0,
908                                                 const SrcOp &Op1,
909                                                 std::optional<unsigned> Flags) {
910 
911   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
912 }
913 
buildSCmp(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)914 MachineInstrBuilder MachineIRBuilder::buildSCmp(const DstOp &Res,
915                                                 const SrcOp &Op0,
916                                                 const SrcOp &Op1) {
917   return buildInstr(TargetOpcode::G_SCMP, Res, {Op0, Op1});
918 }
919 
buildUCmp(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)920 MachineInstrBuilder MachineIRBuilder::buildUCmp(const DstOp &Res,
921                                                 const SrcOp &Op0,
922                                                 const SrcOp &Op1) {
923   return buildInstr(TargetOpcode::G_UCMP, Res, {Op0, Op1});
924 }
925 
926 MachineInstrBuilder
buildSelect(const DstOp & Res,const SrcOp & Tst,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)927 MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
928                               const SrcOp &Op0, const SrcOp &Op1,
929                               std::optional<unsigned> Flags) {
930 
931   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
932 }
933 
buildInsertSubvector(const DstOp & Res,const SrcOp & Src0,const SrcOp & Src1,unsigned Idx)934 MachineInstrBuilder MachineIRBuilder::buildInsertSubvector(const DstOp &Res,
935                                                            const SrcOp &Src0,
936                                                            const SrcOp &Src1,
937                                                            unsigned Idx) {
938   return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
939                     {Src0, Src1, uint64_t(Idx)});
940 }
941 
buildExtractSubvector(const DstOp & Res,const SrcOp & Src,unsigned Idx)942 MachineInstrBuilder MachineIRBuilder::buildExtractSubvector(const DstOp &Res,
943                                                             const SrcOp &Src,
944                                                             unsigned Idx) {
945   return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
946                     {Src, uint64_t(Idx)});
947 }
948 
949 MachineInstrBuilder
buildInsertVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Elt,const SrcOp & Idx)950 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
951                                            const SrcOp &Elt, const SrcOp &Idx) {
952   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
953 }
954 
955 MachineInstrBuilder
buildExtractVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Idx)956 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
957                                             const SrcOp &Idx) {
958   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
959 }
960 
buildAtomicCmpXchgWithSuccess(const DstOp & OldValRes,const DstOp & SuccessRes,const SrcOp & Addr,const SrcOp & CmpVal,const SrcOp & NewVal,MachineMemOperand & MMO)961 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
962     const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
963     const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
964 #ifndef NDEBUG
965   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
966   LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
967   LLT AddrTy = Addr.getLLTTy(*getMRI());
968   LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
969   LLT NewValTy = NewVal.getLLTTy(*getMRI());
970   assert(OldValResTy.isScalar() && "invalid operand type");
971   assert(SuccessResTy.isScalar() && "invalid operand type");
972   assert(AddrTy.isPointer() && "invalid operand type");
973   assert(CmpValTy.isValid() && "invalid operand type");
974   assert(NewValTy.isValid() && "invalid operand type");
975   assert(OldValResTy == CmpValTy && "type mismatch");
976   assert(OldValResTy == NewValTy && "type mismatch");
977 #endif
978 
979   auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
980   OldValRes.addDefToMIB(*getMRI(), MIB);
981   SuccessRes.addDefToMIB(*getMRI(), MIB);
982   Addr.addSrcToMIB(MIB);
983   CmpVal.addSrcToMIB(MIB);
984   NewVal.addSrcToMIB(MIB);
985   MIB.addMemOperand(&MMO);
986   return MIB;
987 }
988 
989 MachineInstrBuilder
buildAtomicCmpXchg(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & CmpVal,const SrcOp & NewVal,MachineMemOperand & MMO)990 MachineIRBuilder::buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr,
991                                      const SrcOp &CmpVal, const SrcOp &NewVal,
992                                      MachineMemOperand &MMO) {
993 #ifndef NDEBUG
994   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
995   LLT AddrTy = Addr.getLLTTy(*getMRI());
996   LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
997   LLT NewValTy = NewVal.getLLTTy(*getMRI());
998   assert(OldValResTy.isScalar() && "invalid operand type");
999   assert(AddrTy.isPointer() && "invalid operand type");
1000   assert(CmpValTy.isValid() && "invalid operand type");
1001   assert(NewValTy.isValid() && "invalid operand type");
1002   assert(OldValResTy == CmpValTy && "type mismatch");
1003   assert(OldValResTy == NewValTy && "type mismatch");
1004 #endif
1005 
1006   auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
1007   OldValRes.addDefToMIB(*getMRI(), MIB);
1008   Addr.addSrcToMIB(MIB);
1009   CmpVal.addSrcToMIB(MIB);
1010   NewVal.addSrcToMIB(MIB);
1011   MIB.addMemOperand(&MMO);
1012   return MIB;
1013 }
1014 
buildAtomicRMW(unsigned Opcode,const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1015 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
1016   unsigned Opcode, const DstOp &OldValRes,
1017   const SrcOp &Addr, const SrcOp &Val,
1018   MachineMemOperand &MMO) {
1019 
1020 #ifndef NDEBUG
1021   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1022   LLT AddrTy = Addr.getLLTTy(*getMRI());
1023   LLT ValTy = Val.getLLTTy(*getMRI());
1024   assert(AddrTy.isPointer() && "invalid operand type");
1025   assert(ValTy.isValid() && "invalid operand type");
1026   assert(OldValResTy == ValTy && "type mismatch");
1027   assert(MMO.isAtomic() && "not atomic mem operand");
1028 #endif
1029 
1030   auto MIB = buildInstr(Opcode);
1031   OldValRes.addDefToMIB(*getMRI(), MIB);
1032   Addr.addSrcToMIB(MIB);
1033   Val.addSrcToMIB(MIB);
1034   MIB.addMemOperand(&MMO);
1035   return MIB;
1036 }
1037 
1038 MachineInstrBuilder
buildAtomicRMWXchg(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1039 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
1040                                      Register Val, MachineMemOperand &MMO) {
1041   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1042                         MMO);
1043 }
1044 MachineInstrBuilder
buildAtomicRMWAdd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1045 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
1046                                     Register Val, MachineMemOperand &MMO) {
1047   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1048                         MMO);
1049 }
1050 MachineInstrBuilder
buildAtomicRMWSub(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1051 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
1052                                     Register Val, MachineMemOperand &MMO) {
1053   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1054                         MMO);
1055 }
1056 MachineInstrBuilder
buildAtomicRMWAnd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1057 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
1058                                     Register Val, MachineMemOperand &MMO) {
1059   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1060                         MMO);
1061 }
1062 MachineInstrBuilder
buildAtomicRMWNand(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1063 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
1064                                      Register Val, MachineMemOperand &MMO) {
1065   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1066                         MMO);
1067 }
buildAtomicRMWOr(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1068 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
1069                                                        Register Addr,
1070                                                        Register Val,
1071                                                        MachineMemOperand &MMO) {
1072   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1073                         MMO);
1074 }
1075 MachineInstrBuilder
buildAtomicRMWXor(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1076 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
1077                                     Register Val, MachineMemOperand &MMO) {
1078   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1079                         MMO);
1080 }
1081 MachineInstrBuilder
buildAtomicRMWMax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1082 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
1083                                     Register Val, MachineMemOperand &MMO) {
1084   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1085                         MMO);
1086 }
1087 MachineInstrBuilder
buildAtomicRMWMin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1088 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
1089                                     Register Val, MachineMemOperand &MMO) {
1090   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1091                         MMO);
1092 }
1093 MachineInstrBuilder
buildAtomicRMWUmax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1094 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
1095                                      Register Val, MachineMemOperand &MMO) {
1096   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1097                         MMO);
1098 }
1099 MachineInstrBuilder
buildAtomicRMWUmin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1100 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
1101                                      Register Val, MachineMemOperand &MMO) {
1102   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1103                         MMO);
1104 }
1105 
1106 MachineInstrBuilder
buildAtomicRMWFAdd(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1107 MachineIRBuilder::buildAtomicRMWFAdd(
1108   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1109   MachineMemOperand &MMO) {
1110   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1111                         MMO);
1112 }
1113 
1114 MachineInstrBuilder
buildAtomicRMWFSub(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1115 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1116                                      MachineMemOperand &MMO) {
1117   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1118                         MMO);
1119 }
1120 
1121 MachineInstrBuilder
buildAtomicRMWFMax(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1122 MachineIRBuilder::buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr,
1123                                      const SrcOp &Val, MachineMemOperand &MMO) {
1124   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1125                         MMO);
1126 }
1127 
1128 MachineInstrBuilder
buildAtomicRMWFMin(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1129 MachineIRBuilder::buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr,
1130                                      const SrcOp &Val, MachineMemOperand &MMO) {
1131   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1132                         MMO);
1133 }
1134 
1135 MachineInstrBuilder
buildFence(unsigned Ordering,unsigned Scope)1136 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1137   return buildInstr(TargetOpcode::G_FENCE)
1138     .addImm(Ordering)
1139     .addImm(Scope);
1140 }
1141 
buildPrefetch(const SrcOp & Addr,unsigned RW,unsigned Locality,unsigned CacheType,MachineMemOperand & MMO)1142 MachineInstrBuilder MachineIRBuilder::buildPrefetch(const SrcOp &Addr,
1143                                                     unsigned RW,
1144                                                     unsigned Locality,
1145                                                     unsigned CacheType,
1146                                                     MachineMemOperand &MMO) {
1147   auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1148   Addr.addSrcToMIB(MIB);
1149   MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1150   MIB.addMemOperand(&MMO);
1151   return MIB;
1152 }
1153 
1154 MachineInstrBuilder
buildBlockAddress(Register Res,const BlockAddress * BA)1155 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
1156 #ifndef NDEBUG
1157   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1158 #endif
1159 
1160   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1161 }
1162 
validateTruncExt(const LLT DstTy,const LLT SrcTy,bool IsExtend)1163 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1164                                         bool IsExtend) {
1165 #ifndef NDEBUG
1166   if (DstTy.isVector()) {
1167     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1168     assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1169            "different number of elements in a trunc/ext");
1170   } else
1171     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1172 
1173   if (IsExtend)
1174     assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1175            "invalid narrowing extend");
1176   else
1177     assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1178            "invalid widening trunc");
1179 #endif
1180 }
1181 
validateSelectOp(const LLT ResTy,const LLT TstTy,const LLT Op0Ty,const LLT Op1Ty)1182 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1183                                         const LLT Op0Ty, const LLT Op1Ty) {
1184 #ifndef NDEBUG
1185   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1186          "invalid operand type");
1187   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1188   if (ResTy.isScalar() || ResTy.isPointer())
1189     assert(TstTy.isScalar() && "type mismatch");
1190   else
1191     assert((TstTy.isScalar() ||
1192             (TstTy.isVector() &&
1193              TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1194            "type mismatch");
1195 #endif
1196 }
1197 
1198 MachineInstrBuilder
buildInstr(unsigned Opc,ArrayRef<DstOp> DstOps,ArrayRef<SrcOp> SrcOps,std::optional<unsigned> Flags)1199 MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
1200                              ArrayRef<SrcOp> SrcOps,
1201                              std::optional<unsigned> Flags) {
1202   switch (Opc) {
1203   default:
1204     break;
1205   case TargetOpcode::G_SELECT: {
1206     assert(DstOps.size() == 1 && "Invalid select");
1207     assert(SrcOps.size() == 3 && "Invalid select");
1208     validateSelectOp(
1209         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1210         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1211     break;
1212   }
1213   case TargetOpcode::G_FNEG:
1214   case TargetOpcode::G_ABS:
1215     // All these are unary ops.
1216     assert(DstOps.size() == 1 && "Invalid Dst");
1217     assert(SrcOps.size() == 1 && "Invalid Srcs");
1218     validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1219                     SrcOps[0].getLLTTy(*getMRI()));
1220     break;
1221   case TargetOpcode::G_ADD:
1222   case TargetOpcode::G_AND:
1223   case TargetOpcode::G_MUL:
1224   case TargetOpcode::G_OR:
1225   case TargetOpcode::G_SUB:
1226   case TargetOpcode::G_XOR:
1227   case TargetOpcode::G_UDIV:
1228   case TargetOpcode::G_SDIV:
1229   case TargetOpcode::G_UREM:
1230   case TargetOpcode::G_SREM:
1231   case TargetOpcode::G_SMIN:
1232   case TargetOpcode::G_SMAX:
1233   case TargetOpcode::G_UMIN:
1234   case TargetOpcode::G_UMAX:
1235   case TargetOpcode::G_UADDSAT:
1236   case TargetOpcode::G_SADDSAT:
1237   case TargetOpcode::G_USUBSAT:
1238   case TargetOpcode::G_SSUBSAT: {
1239     // All these are binary ops.
1240     assert(DstOps.size() == 1 && "Invalid Dst");
1241     assert(SrcOps.size() == 2 && "Invalid Srcs");
1242     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1243                      SrcOps[0].getLLTTy(*getMRI()),
1244                      SrcOps[1].getLLTTy(*getMRI()));
1245     break;
1246   }
1247   case TargetOpcode::G_SHL:
1248   case TargetOpcode::G_ASHR:
1249   case TargetOpcode::G_LSHR:
1250   case TargetOpcode::G_USHLSAT:
1251   case TargetOpcode::G_SSHLSAT: {
1252     assert(DstOps.size() == 1 && "Invalid Dst");
1253     assert(SrcOps.size() == 2 && "Invalid Srcs");
1254     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1255                     SrcOps[0].getLLTTy(*getMRI()),
1256                     SrcOps[1].getLLTTy(*getMRI()));
1257     break;
1258   }
1259   case TargetOpcode::G_SEXT:
1260   case TargetOpcode::G_ZEXT:
1261   case TargetOpcode::G_ANYEXT:
1262     assert(DstOps.size() == 1 && "Invalid Dst");
1263     assert(SrcOps.size() == 1 && "Invalid Srcs");
1264     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1265                      SrcOps[0].getLLTTy(*getMRI()), true);
1266     break;
1267   case TargetOpcode::G_TRUNC:
1268   case TargetOpcode::G_FPTRUNC: {
1269     assert(DstOps.size() == 1 && "Invalid Dst");
1270     assert(SrcOps.size() == 1 && "Invalid Srcs");
1271     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1272                      SrcOps[0].getLLTTy(*getMRI()), false);
1273     break;
1274   }
1275   case TargetOpcode::G_BITCAST: {
1276     assert(DstOps.size() == 1 && "Invalid Dst");
1277     assert(SrcOps.size() == 1 && "Invalid Srcs");
1278     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1279            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1280     break;
1281   }
1282   case TargetOpcode::COPY:
1283     assert(DstOps.size() == 1 && "Invalid Dst");
1284     // If the caller wants to add a subreg source it has to be done separately
1285     // so we may not have any SrcOps at this point yet.
1286     break;
1287   case TargetOpcode::G_FCMP:
1288   case TargetOpcode::G_ICMP: {
1289     assert(DstOps.size() == 1 && "Invalid Dst Operands");
1290     assert(SrcOps.size() == 3 && "Invalid Src Operands");
1291     // For F/ICMP, the first src operand is the predicate, followed by
1292     // the two comparands.
1293     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1294            "Expecting predicate");
1295     assert([&]() -> bool {
1296       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1297       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1298                                          : CmpInst::isFPPredicate(Pred);
1299     }() && "Invalid predicate");
1300     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1301            "Type mismatch");
1302     assert([&]() -> bool {
1303       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1304       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1305       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1306         return DstTy.isScalar();
1307       else
1308         return DstTy.isVector() &&
1309                DstTy.getElementCount() == Op0Ty.getElementCount();
1310     }() && "Type Mismatch");
1311     break;
1312   }
1313   case TargetOpcode::G_UNMERGE_VALUES: {
1314     assert(!DstOps.empty() && "Invalid trivial sequence");
1315     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1316     assert(llvm::all_of(DstOps,
1317                         [&, this](const DstOp &Op) {
1318                           return Op.getLLTTy(*getMRI()) ==
1319                                  DstOps[0].getLLTTy(*getMRI());
1320                         }) &&
1321            "type mismatch in output list");
1322     assert((TypeSize::ScalarTy)DstOps.size() *
1323                    DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1324                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1325            "input operands do not cover output register");
1326     break;
1327   }
1328   case TargetOpcode::G_MERGE_VALUES: {
1329     assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1330     assert(DstOps.size() == 1 && "Invalid Dst");
1331     assert(llvm::all_of(SrcOps,
1332                         [&, this](const SrcOp &Op) {
1333                           return Op.getLLTTy(*getMRI()) ==
1334                                  SrcOps[0].getLLTTy(*getMRI());
1335                         }) &&
1336            "type mismatch in input list");
1337     assert((TypeSize::ScalarTy)SrcOps.size() *
1338                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1339                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1340            "input operands do not cover output register");
1341     assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1342            "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1343     break;
1344   }
1345   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1346     assert(DstOps.size() == 1 && "Invalid Dst size");
1347     assert(SrcOps.size() == 2 && "Invalid Src size");
1348     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1349     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1350             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1351            "Invalid operand type");
1352     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1353     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1354                DstOps[0].getLLTTy(*getMRI()) &&
1355            "Type mismatch");
1356     break;
1357   }
1358   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1359     assert(DstOps.size() == 1 && "Invalid dst size");
1360     assert(SrcOps.size() == 3 && "Invalid src size");
1361     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1362            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1363     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1364                SrcOps[1].getLLTTy(*getMRI()) &&
1365            "Type mismatch");
1366     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1367     assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1368                SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1369            "Type mismatch");
1370     break;
1371   }
1372   case TargetOpcode::G_BUILD_VECTOR: {
1373     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1374            "Must have at least 2 operands");
1375     assert(DstOps.size() == 1 && "Invalid DstOps");
1376     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1377            "Res type must be a vector");
1378     assert(llvm::all_of(SrcOps,
1379                         [&, this](const SrcOp &Op) {
1380                           return Op.getLLTTy(*getMRI()) ==
1381                                  SrcOps[0].getLLTTy(*getMRI());
1382                         }) &&
1383            "type mismatch in input list");
1384     assert((TypeSize::ScalarTy)SrcOps.size() *
1385                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1386                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1387            "input scalars do not exactly cover the output vector register");
1388     break;
1389   }
1390   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1391     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1392            "Must have at least 2 operands");
1393     assert(DstOps.size() == 1 && "Invalid DstOps");
1394     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1395            "Res type must be a vector");
1396     assert(llvm::all_of(SrcOps,
1397                         [&, this](const SrcOp &Op) {
1398                           return Op.getLLTTy(*getMRI()) ==
1399                                  SrcOps[0].getLLTTy(*getMRI());
1400                         }) &&
1401            "type mismatch in input list");
1402     break;
1403   }
1404   case TargetOpcode::G_CONCAT_VECTORS: {
1405     assert(DstOps.size() == 1 && "Invalid DstOps");
1406     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1407            "Must have at least 2 operands");
1408     assert(llvm::all_of(SrcOps,
1409                         [&, this](const SrcOp &Op) {
1410                           return (Op.getLLTTy(*getMRI()).isVector() &&
1411                                   Op.getLLTTy(*getMRI()) ==
1412                                       SrcOps[0].getLLTTy(*getMRI()));
1413                         }) &&
1414            "type mismatch in input list");
1415     assert((TypeSize::ScalarTy)SrcOps.size() *
1416                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1417                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1418            "input vectors do not exactly cover the output vector register");
1419     break;
1420   }
1421   case TargetOpcode::G_UADDE: {
1422     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1423     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1424     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1425     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1426            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1427            "Invalid operand");
1428     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1429     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1430            "type mismatch");
1431     break;
1432   }
1433   }
1434 
1435   auto MIB = buildInstr(Opc);
1436   for (const DstOp &Op : DstOps)
1437     Op.addDefToMIB(*getMRI(), MIB);
1438   for (const SrcOp &Op : SrcOps)
1439     Op.addSrcToMIB(MIB);
1440   if (Flags)
1441     MIB->setFlags(*Flags);
1442   return MIB;
1443 }
1444