xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp (revision 38a52bd3b5cac3da6f7f6eef3dd050e6aa08ebb3)
1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/Analysis/MemoryLocation.h"
13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
23 
24 using namespace llvm;
25 
26 void MachineIRBuilder::setMF(MachineFunction &MF) {
27   State.MF = &MF;
28   State.MBB = nullptr;
29   State.MRI = &MF.getRegInfo();
30   State.TII = MF.getSubtarget().getInstrInfo();
31   State.DL = DebugLoc();
32   State.II = MachineBasicBlock::iterator();
33   State.Observer = nullptr;
34 }
35 
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39 
40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41   MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
42   return MIB;
43 }
44 
45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46   getMBB().insert(getInsertPt(), MIB);
47   recordInsertion(MIB);
48   return MIB;
49 }
50 
51 MachineInstrBuilder
52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53                                       const MDNode *Expr) {
54   assert(isa<DILocalVariable>(Variable) && "not a variable");
55   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56   assert(
57       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58       "Expected inlined-at fields to agree");
59   return insertInstr(BuildMI(getMF(), getDL(),
60                              getTII().get(TargetOpcode::DBG_VALUE),
61                              /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63 
64 MachineInstrBuilder
65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66                                         const MDNode *Expr) {
67   assert(isa<DILocalVariable>(Variable) && "not a variable");
68   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69   assert(
70       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71       "Expected inlined-at fields to agree");
72   return insertInstr(BuildMI(getMF(), getDL(),
73                              getTII().get(TargetOpcode::DBG_VALUE),
74                              /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76 
77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78                                                       const MDNode *Variable,
79                                                       const MDNode *Expr) {
80   assert(isa<DILocalVariable>(Variable) && "not a variable");
81   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82   assert(
83       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84       "Expected inlined-at fields to agree");
85   return buildInstr(TargetOpcode::DBG_VALUE)
86       .addFrameIndex(FI)
87       .addImm(0)
88       .addMetadata(Variable)
89       .addMetadata(Expr);
90 }
91 
92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93                                                          const MDNode *Variable,
94                                                          const MDNode *Expr) {
95   assert(isa<DILocalVariable>(Variable) && "not a variable");
96   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97   assert(
98       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99       "Expected inlined-at fields to agree");
100   auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101   if (auto *CI = dyn_cast<ConstantInt>(&C)) {
102     if (CI->getBitWidth() > 64)
103       MIB.addCImm(CI);
104     else
105       MIB.addImm(CI->getZExtValue());
106   } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
107     MIB.addFPImm(CFP);
108   } else {
109     // Insert $noreg if we didn't find a usable constant and had to drop it.
110     MIB.addReg(Register());
111   }
112 
113   MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
114   return insertInstr(MIB);
115 }
116 
117 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
118   assert(isa<DILabel>(Label) && "not a label");
119   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
120          "Expected inlined-at fields to agree");
121   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
122 
123   return MIB.addMetadata(Label);
124 }
125 
126 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
127                                                          const SrcOp &Size,
128                                                          Align Alignment) {
129   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
130   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
131   Res.addDefToMIB(*getMRI(), MIB);
132   Size.addSrcToMIB(MIB);
133   MIB.addImm(Alignment.value());
134   return MIB;
135 }
136 
137 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
138                                                       int Idx) {
139   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
140   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
141   Res.addDefToMIB(*getMRI(), MIB);
142   MIB.addFrameIndex(Idx);
143   return MIB;
144 }
145 
146 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
147                                                        const GlobalValue *GV) {
148   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
149   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
150              GV->getType()->getAddressSpace() &&
151          "address space mismatch");
152 
153   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
154   Res.addDefToMIB(*getMRI(), MIB);
155   MIB.addGlobalAddress(GV);
156   return MIB;
157 }
158 
159 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
160                                                      unsigned JTI) {
161   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
162       .addJumpTableIndex(JTI);
163 }
164 
165 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
166   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
167   assert((Res == Op0) && "type mismatch");
168 }
169 
170 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
171                                         const LLT Op1) {
172   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
173   assert((Res == Op0 && Res == Op1) && "type mismatch");
174 }
175 
176 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
177                                        const LLT Op1) {
178   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
179   assert((Res == Op0) && "type mismatch");
180 }
181 
182 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
183                                                   const SrcOp &Op0,
184                                                   const SrcOp &Op1) {
185   assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
186          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
187   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
188 
189   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
190 }
191 
192 Optional<MachineInstrBuilder>
193 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
194                                     const LLT ValueTy, uint64_t Value) {
195   assert(Res == 0 && "Res is a result argument");
196   assert(ValueTy.isScalar()  && "invalid offset type");
197 
198   if (Value == 0) {
199     Res = Op0;
200     return None;
201   }
202 
203   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
204   auto Cst = buildConstant(ValueTy, Value);
205   return buildPtrAdd(Res, Op0, Cst.getReg(0));
206 }
207 
208 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
209                                                           const SrcOp &Op0,
210                                                           uint32_t NumBits) {
211   LLT PtrTy = Res.getLLTTy(*getMRI());
212   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
213   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
214   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
215   return buildPtrMask(Res, Op0, MaskReg);
216 }
217 
218 MachineInstrBuilder
219 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
220                                                   const SrcOp &Op0) {
221   LLT ResTy = Res.getLLTTy(*getMRI());
222   LLT Op0Ty = Op0.getLLTTy(*getMRI());
223 
224   assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
225   assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
226          "Different vector element types");
227   assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
228          "Op0 has more elements");
229 
230   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
231   SmallVector<Register, 8> Regs;
232   for (auto Op : Unmerge.getInstr()->defs())
233     Regs.push_back(Op.getReg());
234   Register Undef = buildUndef(Op0Ty.getElementType()).getReg(0);
235   unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
236   for (unsigned i = 0; i < NumberOfPadElts; ++i)
237     Regs.push_back(Undef);
238   return buildMerge(Res, Regs);
239 }
240 
241 MachineInstrBuilder
242 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
243                                                     const SrcOp &Op0) {
244   LLT ResTy = Res.getLLTTy(*getMRI());
245   LLT Op0Ty = Op0.getLLTTy(*getMRI());
246 
247   assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
248   assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
249          "Different vector element types");
250   assert((ResTy.getNumElements() < Op0Ty.getNumElements()) &&
251          "Op0 has fewer elements");
252 
253   SmallVector<Register, 8> Regs;
254   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
255   for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
256     Regs.push_back(Unmerge.getReg(i));
257   return buildMerge(Res, Regs);
258 }
259 
260 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
261   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
262 }
263 
264 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
265   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
266   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
267 }
268 
269 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
270                                                 unsigned JTI,
271                                                 Register IndexReg) {
272   assert(getMRI()->getType(TablePtr).isPointer() &&
273          "Table reg must be a pointer");
274   return buildInstr(TargetOpcode::G_BRJT)
275       .addUse(TablePtr)
276       .addJumpTableIndex(JTI)
277       .addUse(IndexReg);
278 }
279 
280 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
281                                                 const SrcOp &Op) {
282   return buildInstr(TargetOpcode::COPY, Res, Op);
283 }
284 
285 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
286                                                     const ConstantInt &Val) {
287   LLT Ty = Res.getLLTTy(*getMRI());
288   LLT EltTy = Ty.getScalarType();
289   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
290          "creating constant with the wrong size");
291 
292   if (Ty.isVector()) {
293     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
294     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
295     .addCImm(&Val);
296     return buildSplatVector(Res, Const);
297   }
298 
299   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
300   Const->setDebugLoc(DebugLoc());
301   Res.addDefToMIB(*getMRI(), Const);
302   Const.addCImm(&Val);
303   return Const;
304 }
305 
306 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
307                                                     int64_t Val) {
308   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
309                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
310   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
311   return buildConstant(Res, *CI);
312 }
313 
314 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
315                                                      const ConstantFP &Val) {
316   LLT Ty = Res.getLLTTy(*getMRI());
317   LLT EltTy = Ty.getScalarType();
318 
319   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
320          == EltTy.getSizeInBits() &&
321          "creating fconstant with the wrong size");
322 
323   assert(!Ty.isPointer() && "invalid operand type");
324 
325   if (Ty.isVector()) {
326     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
327     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
328     .addFPImm(&Val);
329 
330     return buildSplatVector(Res, Const);
331   }
332 
333   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
334   Const->setDebugLoc(DebugLoc());
335   Res.addDefToMIB(*getMRI(), Const);
336   Const.addFPImm(&Val);
337   return Const;
338 }
339 
340 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
341                                                     const APInt &Val) {
342   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
343   return buildConstant(Res, *CI);
344 }
345 
346 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
347                                                      double Val) {
348   LLT DstTy = Res.getLLTTy(*getMRI());
349   auto &Ctx = getMF().getFunction().getContext();
350   auto *CFP =
351       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
352   return buildFConstant(Res, *CFP);
353 }
354 
355 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
356                                                      const APFloat &Val) {
357   auto &Ctx = getMF().getFunction().getContext();
358   auto *CFP = ConstantFP::get(Ctx, Val);
359   return buildFConstant(Res, *CFP);
360 }
361 
362 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
363                                                   MachineBasicBlock &Dest) {
364   assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
365 
366   auto MIB = buildInstr(TargetOpcode::G_BRCOND);
367   Tst.addSrcToMIB(MIB);
368   MIB.addMBB(&Dest);
369   return MIB;
370 }
371 
372 MachineInstrBuilder
373 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
374                             MachinePointerInfo PtrInfo, Align Alignment,
375                             MachineMemOperand::Flags MMOFlags,
376                             const AAMDNodes &AAInfo) {
377   MMOFlags |= MachineMemOperand::MOLoad;
378   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
379 
380   LLT Ty = Dst.getLLTTy(*getMRI());
381   MachineMemOperand *MMO =
382       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
383   return buildLoad(Dst, Addr, *MMO);
384 }
385 
386 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
387                                                      const DstOp &Res,
388                                                      const SrcOp &Addr,
389                                                      MachineMemOperand &MMO) {
390   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
391   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
392 
393   auto MIB = buildInstr(Opcode);
394   Res.addDefToMIB(*getMRI(), MIB);
395   Addr.addSrcToMIB(MIB);
396   MIB.addMemOperand(&MMO);
397   return MIB;
398 }
399 
400 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
401   const DstOp &Dst, const SrcOp &BasePtr,
402   MachineMemOperand &BaseMMO, int64_t Offset) {
403   LLT LoadTy = Dst.getLLTTy(*getMRI());
404   MachineMemOperand *OffsetMMO =
405       getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
406 
407   if (Offset == 0) // This may be a size or type changing load.
408     return buildLoad(Dst, BasePtr, *OffsetMMO);
409 
410   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
411   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
412   auto ConstOffset = buildConstant(OffsetTy, Offset);
413   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
414   return buildLoad(Dst, Ptr, *OffsetMMO);
415 }
416 
417 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
418                                                  const SrcOp &Addr,
419                                                  MachineMemOperand &MMO) {
420   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
421   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
422 
423   auto MIB = buildInstr(TargetOpcode::G_STORE);
424   Val.addSrcToMIB(MIB);
425   Addr.addSrcToMIB(MIB);
426   MIB.addMemOperand(&MMO);
427   return MIB;
428 }
429 
430 MachineInstrBuilder
431 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
432                              MachinePointerInfo PtrInfo, Align Alignment,
433                              MachineMemOperand::Flags MMOFlags,
434                              const AAMDNodes &AAInfo) {
435   MMOFlags |= MachineMemOperand::MOStore;
436   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
437 
438   LLT Ty = Val.getLLTTy(*getMRI());
439   MachineMemOperand *MMO =
440       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
441   return buildStore(Val, Addr, *MMO);
442 }
443 
444 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
445                                                   const SrcOp &Op) {
446   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
447 }
448 
449 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
450                                                 const SrcOp &Op) {
451   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
452 }
453 
454 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
455                                                 const SrcOp &Op) {
456   return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
457 }
458 
459 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
460   const auto *TLI = getMF().getSubtarget().getTargetLowering();
461   switch (TLI->getBooleanContents(IsVec, IsFP)) {
462   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
463     return TargetOpcode::G_SEXT;
464   case TargetLoweringBase::ZeroOrOneBooleanContent:
465     return TargetOpcode::G_ZEXT;
466   default:
467     return TargetOpcode::G_ANYEXT;
468   }
469 }
470 
471 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
472                                                    const SrcOp &Op,
473                                                    bool IsFP) {
474   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
475   return buildInstr(ExtOp, Res, Op);
476 }
477 
478 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
479                                                       const DstOp &Res,
480                                                       const SrcOp &Op) {
481   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
482           TargetOpcode::G_SEXT == ExtOpc) &&
483          "Expecting Extending Opc");
484   assert(Res.getLLTTy(*getMRI()).isScalar() ||
485          Res.getLLTTy(*getMRI()).isVector());
486   assert(Res.getLLTTy(*getMRI()).isScalar() ==
487          Op.getLLTTy(*getMRI()).isScalar());
488 
489   unsigned Opcode = TargetOpcode::COPY;
490   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
491       Op.getLLTTy(*getMRI()).getSizeInBits())
492     Opcode = ExtOpc;
493   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
494            Op.getLLTTy(*getMRI()).getSizeInBits())
495     Opcode = TargetOpcode::G_TRUNC;
496   else
497     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
498 
499   return buildInstr(Opcode, Res, Op);
500 }
501 
502 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
503                                                        const SrcOp &Op) {
504   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
505 }
506 
507 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
508                                                        const SrcOp &Op) {
509   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
510 }
511 
512 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
513                                                          const SrcOp &Op) {
514   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
515 }
516 
517 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
518                                                      const SrcOp &Op,
519                                                      int64_t ImmOp) {
520   LLT ResTy = Res.getLLTTy(*getMRI());
521   auto Mask = buildConstant(
522       ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
523   return buildAnd(Res, Op, Mask);
524 }
525 
526 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
527                                                 const SrcOp &Src) {
528   LLT SrcTy = Src.getLLTTy(*getMRI());
529   LLT DstTy = Dst.getLLTTy(*getMRI());
530   if (SrcTy == DstTy)
531     return buildCopy(Dst, Src);
532 
533   unsigned Opcode;
534   if (SrcTy.isPointer() && DstTy.isScalar())
535     Opcode = TargetOpcode::G_PTRTOINT;
536   else if (DstTy.isPointer() && SrcTy.isScalar())
537     Opcode = TargetOpcode::G_INTTOPTR;
538   else {
539     assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
540     Opcode = TargetOpcode::G_BITCAST;
541   }
542 
543   return buildInstr(Opcode, Dst, Src);
544 }
545 
546 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
547                                                    const SrcOp &Src,
548                                                    uint64_t Index) {
549   LLT SrcTy = Src.getLLTTy(*getMRI());
550   LLT DstTy = Dst.getLLTTy(*getMRI());
551 
552 #ifndef NDEBUG
553   assert(SrcTy.isValid() && "invalid operand type");
554   assert(DstTy.isValid() && "invalid operand type");
555   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
556          "extracting off end of register");
557 #endif
558 
559   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
560     assert(Index == 0 && "insertion past the end of a register");
561     return buildCast(Dst, Src);
562   }
563 
564   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
565   Dst.addDefToMIB(*getMRI(), Extract);
566   Src.addSrcToMIB(Extract);
567   Extract.addImm(Index);
568   return Extract;
569 }
570 
571 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
572                                      ArrayRef<uint64_t> Indices) {
573 #ifndef NDEBUG
574   assert(Ops.size() == Indices.size() && "incompatible args");
575   assert(!Ops.empty() && "invalid trivial sequence");
576   assert(llvm::is_sorted(Indices) &&
577          "sequence offsets must be in ascending order");
578 
579   assert(getMRI()->getType(Res).isValid() && "invalid operand type");
580   for (auto Op : Ops)
581     assert(getMRI()->getType(Op).isValid() && "invalid operand type");
582 #endif
583 
584   LLT ResTy = getMRI()->getType(Res);
585   LLT OpTy = getMRI()->getType(Ops[0]);
586   unsigned OpSize = OpTy.getSizeInBits();
587   bool MaybeMerge = true;
588   for (unsigned i = 0; i < Ops.size(); ++i) {
589     if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
590       MaybeMerge = false;
591       break;
592     }
593   }
594 
595   if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
596     buildMerge(Res, Ops);
597     return;
598   }
599 
600   Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
601   buildUndef(ResIn);
602 
603   for (unsigned i = 0; i < Ops.size(); ++i) {
604     Register ResOut = i + 1 == Ops.size()
605                           ? Res
606                           : getMRI()->createGenericVirtualRegister(ResTy);
607     buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
608     ResIn = ResOut;
609   }
610 }
611 
612 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
613   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
614 }
615 
616 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
617                                                  ArrayRef<Register> Ops) {
618   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
619   // we need some temporary storage for the DstOp objects. Here we use a
620   // sufficiently large SmallVector to not go through the heap.
621   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
622   assert(TmpVec.size() > 1);
623   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
624 }
625 
626 MachineInstrBuilder
627 MachineIRBuilder::buildMerge(const DstOp &Res,
628                              std::initializer_list<SrcOp> Ops) {
629   assert(Ops.size() > 1);
630   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops);
631 }
632 
633 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
634                                                    const SrcOp &Op) {
635   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
636   // we need some temporary storage for the DstOp objects. Here we use a
637   // sufficiently large SmallVector to not go through the heap.
638   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
639   assert(TmpVec.size() > 1);
640   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
641 }
642 
643 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
644                                                    const SrcOp &Op) {
645   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
646   SmallVector<DstOp, 8> TmpVec(NumReg, Res);
647   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
648 }
649 
650 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
651                                                    const SrcOp &Op) {
652   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
653   // we need some temporary storage for the DstOp objects. Here we use a
654   // sufficiently large SmallVector to not go through the heap.
655   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
656   assert(TmpVec.size() > 1);
657   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
658 }
659 
660 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
661                                                        ArrayRef<Register> Ops) {
662   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
663   // we need some temporary storage for the DstOp objects. Here we use a
664   // sufficiently large SmallVector to not go through the heap.
665   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
666   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
667 }
668 
669 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
670                                                        const SrcOp &Src) {
671   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
672   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
673 }
674 
675 MachineInstrBuilder
676 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
677                                         ArrayRef<Register> Ops) {
678   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
679   // we need some temporary storage for the DstOp objects. Here we use a
680   // sufficiently large SmallVector to not go through the heap.
681   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
682   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
683 }
684 
685 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
686                                                         const SrcOp &Src) {
687   LLT DstTy = Res.getLLTTy(*getMRI());
688   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
689          "Expected Src to match Dst elt ty");
690   auto UndefVec = buildUndef(DstTy);
691   auto Zero = buildConstant(LLT::scalar(64), 0);
692   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
693   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
694   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
695 }
696 
697 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
698                                                          const SrcOp &Src1,
699                                                          const SrcOp &Src2,
700                                                          ArrayRef<int> Mask) {
701   LLT DstTy = Res.getLLTTy(*getMRI());
702   LLT Src1Ty = Src1.getLLTTy(*getMRI());
703   LLT Src2Ty = Src2.getLLTTy(*getMRI());
704   assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
705          Mask.size());
706   assert(DstTy.getElementType() == Src1Ty.getElementType() &&
707          DstTy.getElementType() == Src2Ty.getElementType());
708   (void)DstTy;
709   (void)Src1Ty;
710   (void)Src2Ty;
711   ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
712   return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
713       .addShuffleMask(MaskAlloc);
714 }
715 
716 MachineInstrBuilder
717 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
718   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
719   // we need some temporary storage for the DstOp objects. Here we use a
720   // sufficiently large SmallVector to not go through the heap.
721   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
722   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
723 }
724 
725 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
726                                                   const SrcOp &Src,
727                                                   const SrcOp &Op,
728                                                   unsigned Index) {
729   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
730              Res.getLLTTy(*getMRI()).getSizeInBits() &&
731          "insertion past the end of a register");
732 
733   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
734       Op.getLLTTy(*getMRI()).getSizeInBits()) {
735     return buildCast(Res, Op);
736   }
737 
738   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
739 }
740 
741 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
742                                                      ArrayRef<Register> ResultRegs,
743                                                      bool HasSideEffects) {
744   auto MIB =
745       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
746                                 : TargetOpcode::G_INTRINSIC);
747   for (unsigned ResultReg : ResultRegs)
748     MIB.addDef(ResultReg);
749   MIB.addIntrinsicID(ID);
750   return MIB;
751 }
752 
753 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
754                                                      ArrayRef<DstOp> Results,
755                                                      bool HasSideEffects) {
756   auto MIB =
757       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
758                                 : TargetOpcode::G_INTRINSIC);
759   for (DstOp Result : Results)
760     Result.addDefToMIB(*getMRI(), MIB);
761   MIB.addIntrinsicID(ID);
762   return MIB;
763 }
764 
765 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
766                                                  const SrcOp &Op) {
767   return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
768 }
769 
770 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
771                                                    const SrcOp &Op,
772                                                    Optional<unsigned> Flags) {
773   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
774 }
775 
776 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
777                                                 const DstOp &Res,
778                                                 const SrcOp &Op0,
779                                                 const SrcOp &Op1) {
780   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
781 }
782 
783 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
784                                                 const DstOp &Res,
785                                                 const SrcOp &Op0,
786                                                 const SrcOp &Op1,
787                                                 Optional<unsigned> Flags) {
788 
789   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
790 }
791 
792 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
793                                                   const SrcOp &Tst,
794                                                   const SrcOp &Op0,
795                                                   const SrcOp &Op1,
796                                                   Optional<unsigned> Flags) {
797 
798   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
799 }
800 
801 MachineInstrBuilder
802 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
803                                            const SrcOp &Elt, const SrcOp &Idx) {
804   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
805 }
806 
807 MachineInstrBuilder
808 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
809                                             const SrcOp &Idx) {
810   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
811 }
812 
813 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
814     Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
815     Register NewVal, MachineMemOperand &MMO) {
816 #ifndef NDEBUG
817   LLT OldValResTy = getMRI()->getType(OldValRes);
818   LLT SuccessResTy = getMRI()->getType(SuccessRes);
819   LLT AddrTy = getMRI()->getType(Addr);
820   LLT CmpValTy = getMRI()->getType(CmpVal);
821   LLT NewValTy = getMRI()->getType(NewVal);
822   assert(OldValResTy.isScalar() && "invalid operand type");
823   assert(SuccessResTy.isScalar() && "invalid operand type");
824   assert(AddrTy.isPointer() && "invalid operand type");
825   assert(CmpValTy.isValid() && "invalid operand type");
826   assert(NewValTy.isValid() && "invalid operand type");
827   assert(OldValResTy == CmpValTy && "type mismatch");
828   assert(OldValResTy == NewValTy && "type mismatch");
829 #endif
830 
831   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
832       .addDef(OldValRes)
833       .addDef(SuccessRes)
834       .addUse(Addr)
835       .addUse(CmpVal)
836       .addUse(NewVal)
837       .addMemOperand(&MMO);
838 }
839 
840 MachineInstrBuilder
841 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
842                                      Register CmpVal, Register NewVal,
843                                      MachineMemOperand &MMO) {
844 #ifndef NDEBUG
845   LLT OldValResTy = getMRI()->getType(OldValRes);
846   LLT AddrTy = getMRI()->getType(Addr);
847   LLT CmpValTy = getMRI()->getType(CmpVal);
848   LLT NewValTy = getMRI()->getType(NewVal);
849   assert(OldValResTy.isScalar() && "invalid operand type");
850   assert(AddrTy.isPointer() && "invalid operand type");
851   assert(CmpValTy.isValid() && "invalid operand type");
852   assert(NewValTy.isValid() && "invalid operand type");
853   assert(OldValResTy == CmpValTy && "type mismatch");
854   assert(OldValResTy == NewValTy && "type mismatch");
855 #endif
856 
857   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
858       .addDef(OldValRes)
859       .addUse(Addr)
860       .addUse(CmpVal)
861       .addUse(NewVal)
862       .addMemOperand(&MMO);
863 }
864 
865 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
866   unsigned Opcode, const DstOp &OldValRes,
867   const SrcOp &Addr, const SrcOp &Val,
868   MachineMemOperand &MMO) {
869 
870 #ifndef NDEBUG
871   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
872   LLT AddrTy = Addr.getLLTTy(*getMRI());
873   LLT ValTy = Val.getLLTTy(*getMRI());
874   assert(OldValResTy.isScalar() && "invalid operand type");
875   assert(AddrTy.isPointer() && "invalid operand type");
876   assert(ValTy.isValid() && "invalid operand type");
877   assert(OldValResTy == ValTy && "type mismatch");
878   assert(MMO.isAtomic() && "not atomic mem operand");
879 #endif
880 
881   auto MIB = buildInstr(Opcode);
882   OldValRes.addDefToMIB(*getMRI(), MIB);
883   Addr.addSrcToMIB(MIB);
884   Val.addSrcToMIB(MIB);
885   MIB.addMemOperand(&MMO);
886   return MIB;
887 }
888 
889 MachineInstrBuilder
890 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
891                                      Register Val, MachineMemOperand &MMO) {
892   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
893                         MMO);
894 }
895 MachineInstrBuilder
896 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
897                                     Register Val, MachineMemOperand &MMO) {
898   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
899                         MMO);
900 }
901 MachineInstrBuilder
902 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
903                                     Register Val, MachineMemOperand &MMO) {
904   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
905                         MMO);
906 }
907 MachineInstrBuilder
908 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
909                                     Register Val, MachineMemOperand &MMO) {
910   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
911                         MMO);
912 }
913 MachineInstrBuilder
914 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
915                                      Register Val, MachineMemOperand &MMO) {
916   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
917                         MMO);
918 }
919 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
920                                                        Register Addr,
921                                                        Register Val,
922                                                        MachineMemOperand &MMO) {
923   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
924                         MMO);
925 }
926 MachineInstrBuilder
927 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
928                                     Register Val, MachineMemOperand &MMO) {
929   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
930                         MMO);
931 }
932 MachineInstrBuilder
933 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
934                                     Register Val, MachineMemOperand &MMO) {
935   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
936                         MMO);
937 }
938 MachineInstrBuilder
939 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
940                                     Register Val, MachineMemOperand &MMO) {
941   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
942                         MMO);
943 }
944 MachineInstrBuilder
945 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
946                                      Register Val, MachineMemOperand &MMO) {
947   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
948                         MMO);
949 }
950 MachineInstrBuilder
951 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
952                                      Register Val, MachineMemOperand &MMO) {
953   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
954                         MMO);
955 }
956 
957 MachineInstrBuilder
958 MachineIRBuilder::buildAtomicRMWFAdd(
959   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
960   MachineMemOperand &MMO) {
961   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
962                         MMO);
963 }
964 
965 MachineInstrBuilder
966 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
967                                      MachineMemOperand &MMO) {
968   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
969                         MMO);
970 }
971 
972 MachineInstrBuilder
973 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
974   return buildInstr(TargetOpcode::G_FENCE)
975     .addImm(Ordering)
976     .addImm(Scope);
977 }
978 
979 MachineInstrBuilder
980 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
981 #ifndef NDEBUG
982   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
983 #endif
984 
985   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
986 }
987 
988 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
989                                         bool IsExtend) {
990 #ifndef NDEBUG
991   if (DstTy.isVector()) {
992     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
993     assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
994            "different number of elements in a trunc/ext");
995   } else
996     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
997 
998   if (IsExtend)
999     assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1000            "invalid narrowing extend");
1001   else
1002     assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
1003            "invalid widening trunc");
1004 #endif
1005 }
1006 
1007 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1008                                         const LLT Op0Ty, const LLT Op1Ty) {
1009 #ifndef NDEBUG
1010   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1011          "invalid operand type");
1012   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1013   if (ResTy.isScalar() || ResTy.isPointer())
1014     assert(TstTy.isScalar() && "type mismatch");
1015   else
1016     assert((TstTy.isScalar() ||
1017             (TstTy.isVector() &&
1018              TstTy.getNumElements() == Op0Ty.getNumElements())) &&
1019            "type mismatch");
1020 #endif
1021 }
1022 
1023 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
1024                                                  ArrayRef<DstOp> DstOps,
1025                                                  ArrayRef<SrcOp> SrcOps,
1026                                                  Optional<unsigned> Flags) {
1027   switch (Opc) {
1028   default:
1029     break;
1030   case TargetOpcode::G_SELECT: {
1031     assert(DstOps.size() == 1 && "Invalid select");
1032     assert(SrcOps.size() == 3 && "Invalid select");
1033     validateSelectOp(
1034         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1035         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1036     break;
1037   }
1038   case TargetOpcode::G_FNEG:
1039   case TargetOpcode::G_ABS:
1040     // All these are unary ops.
1041     assert(DstOps.size() == 1 && "Invalid Dst");
1042     assert(SrcOps.size() == 1 && "Invalid Srcs");
1043     validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1044                     SrcOps[0].getLLTTy(*getMRI()));
1045     break;
1046   case TargetOpcode::G_ADD:
1047   case TargetOpcode::G_AND:
1048   case TargetOpcode::G_MUL:
1049   case TargetOpcode::G_OR:
1050   case TargetOpcode::G_SUB:
1051   case TargetOpcode::G_XOR:
1052   case TargetOpcode::G_UDIV:
1053   case TargetOpcode::G_SDIV:
1054   case TargetOpcode::G_UREM:
1055   case TargetOpcode::G_SREM:
1056   case TargetOpcode::G_SMIN:
1057   case TargetOpcode::G_SMAX:
1058   case TargetOpcode::G_UMIN:
1059   case TargetOpcode::G_UMAX:
1060   case TargetOpcode::G_UADDSAT:
1061   case TargetOpcode::G_SADDSAT:
1062   case TargetOpcode::G_USUBSAT:
1063   case TargetOpcode::G_SSUBSAT: {
1064     // All these are binary ops.
1065     assert(DstOps.size() == 1 && "Invalid Dst");
1066     assert(SrcOps.size() == 2 && "Invalid Srcs");
1067     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1068                      SrcOps[0].getLLTTy(*getMRI()),
1069                      SrcOps[1].getLLTTy(*getMRI()));
1070     break;
1071   }
1072   case TargetOpcode::G_SHL:
1073   case TargetOpcode::G_ASHR:
1074   case TargetOpcode::G_LSHR:
1075   case TargetOpcode::G_USHLSAT:
1076   case TargetOpcode::G_SSHLSAT: {
1077     assert(DstOps.size() == 1 && "Invalid Dst");
1078     assert(SrcOps.size() == 2 && "Invalid Srcs");
1079     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1080                     SrcOps[0].getLLTTy(*getMRI()),
1081                     SrcOps[1].getLLTTy(*getMRI()));
1082     break;
1083   }
1084   case TargetOpcode::G_SEXT:
1085   case TargetOpcode::G_ZEXT:
1086   case TargetOpcode::G_ANYEXT:
1087     assert(DstOps.size() == 1 && "Invalid Dst");
1088     assert(SrcOps.size() == 1 && "Invalid Srcs");
1089     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1090                      SrcOps[0].getLLTTy(*getMRI()), true);
1091     break;
1092   case TargetOpcode::G_TRUNC:
1093   case TargetOpcode::G_FPTRUNC: {
1094     assert(DstOps.size() == 1 && "Invalid Dst");
1095     assert(SrcOps.size() == 1 && "Invalid Srcs");
1096     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1097                      SrcOps[0].getLLTTy(*getMRI()), false);
1098     break;
1099   }
1100   case TargetOpcode::G_BITCAST: {
1101     assert(DstOps.size() == 1 && "Invalid Dst");
1102     assert(SrcOps.size() == 1 && "Invalid Srcs");
1103     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1104            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1105     break;
1106   }
1107   case TargetOpcode::COPY:
1108     assert(DstOps.size() == 1 && "Invalid Dst");
1109     // If the caller wants to add a subreg source it has to be done separately
1110     // so we may not have any SrcOps at this point yet.
1111     break;
1112   case TargetOpcode::G_FCMP:
1113   case TargetOpcode::G_ICMP: {
1114     assert(DstOps.size() == 1 && "Invalid Dst Operands");
1115     assert(SrcOps.size() == 3 && "Invalid Src Operands");
1116     // For F/ICMP, the first src operand is the predicate, followed by
1117     // the two comparands.
1118     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1119            "Expecting predicate");
1120     assert([&]() -> bool {
1121       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1122       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1123                                          : CmpInst::isFPPredicate(Pred);
1124     }() && "Invalid predicate");
1125     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1126            "Type mismatch");
1127     assert([&]() -> bool {
1128       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1129       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1130       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1131         return DstTy.isScalar();
1132       else
1133         return DstTy.isVector() &&
1134                DstTy.getNumElements() == Op0Ty.getNumElements();
1135     }() && "Type Mismatch");
1136     break;
1137   }
1138   case TargetOpcode::G_UNMERGE_VALUES: {
1139     assert(!DstOps.empty() && "Invalid trivial sequence");
1140     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1141     assert(llvm::all_of(DstOps,
1142                         [&, this](const DstOp &Op) {
1143                           return Op.getLLTTy(*getMRI()) ==
1144                                  DstOps[0].getLLTTy(*getMRI());
1145                         }) &&
1146            "type mismatch in output list");
1147     assert((TypeSize::ScalarTy)DstOps.size() *
1148                    DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1149                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1150            "input operands do not cover output register");
1151     break;
1152   }
1153   case TargetOpcode::G_MERGE_VALUES: {
1154     assert(!SrcOps.empty() && "invalid trivial sequence");
1155     assert(DstOps.size() == 1 && "Invalid Dst");
1156     assert(llvm::all_of(SrcOps,
1157                         [&, this](const SrcOp &Op) {
1158                           return Op.getLLTTy(*getMRI()) ==
1159                                  SrcOps[0].getLLTTy(*getMRI());
1160                         }) &&
1161            "type mismatch in input list");
1162     assert((TypeSize::ScalarTy)SrcOps.size() *
1163                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1164                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1165            "input operands do not cover output register");
1166     if (SrcOps.size() == 1)
1167       return buildCast(DstOps[0], SrcOps[0]);
1168     if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
1169       if (SrcOps[0].getLLTTy(*getMRI()).isVector())
1170         return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1171       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1172     }
1173     break;
1174   }
1175   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1176     assert(DstOps.size() == 1 && "Invalid Dst size");
1177     assert(SrcOps.size() == 2 && "Invalid Src size");
1178     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1179     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1180             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1181            "Invalid operand type");
1182     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1183     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1184                DstOps[0].getLLTTy(*getMRI()) &&
1185            "Type mismatch");
1186     break;
1187   }
1188   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1189     assert(DstOps.size() == 1 && "Invalid dst size");
1190     assert(SrcOps.size() == 3 && "Invalid src size");
1191     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1192            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1193     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1194                SrcOps[1].getLLTTy(*getMRI()) &&
1195            "Type mismatch");
1196     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1197     assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1198                SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1199            "Type mismatch");
1200     break;
1201   }
1202   case TargetOpcode::G_BUILD_VECTOR: {
1203     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1204            "Must have at least 2 operands");
1205     assert(DstOps.size() == 1 && "Invalid DstOps");
1206     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1207            "Res type must be a vector");
1208     assert(llvm::all_of(SrcOps,
1209                         [&, this](const SrcOp &Op) {
1210                           return Op.getLLTTy(*getMRI()) ==
1211                                  SrcOps[0].getLLTTy(*getMRI());
1212                         }) &&
1213            "type mismatch in input list");
1214     assert((TypeSize::ScalarTy)SrcOps.size() *
1215                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1216                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1217            "input scalars do not exactly cover the output vector register");
1218     break;
1219   }
1220   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1221     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1222            "Must have at least 2 operands");
1223     assert(DstOps.size() == 1 && "Invalid DstOps");
1224     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1225            "Res type must be a vector");
1226     assert(llvm::all_of(SrcOps,
1227                         [&, this](const SrcOp &Op) {
1228                           return Op.getLLTTy(*getMRI()) ==
1229                                  SrcOps[0].getLLTTy(*getMRI());
1230                         }) &&
1231            "type mismatch in input list");
1232     if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1233         DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1234       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1235     break;
1236   }
1237   case TargetOpcode::G_CONCAT_VECTORS: {
1238     assert(DstOps.size() == 1 && "Invalid DstOps");
1239     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1240            "Must have at least 2 operands");
1241     assert(llvm::all_of(SrcOps,
1242                         [&, this](const SrcOp &Op) {
1243                           return (Op.getLLTTy(*getMRI()).isVector() &&
1244                                   Op.getLLTTy(*getMRI()) ==
1245                                       SrcOps[0].getLLTTy(*getMRI()));
1246                         }) &&
1247            "type mismatch in input list");
1248     assert((TypeSize::ScalarTy)SrcOps.size() *
1249                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1250                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1251            "input vectors do not exactly cover the output vector register");
1252     break;
1253   }
1254   case TargetOpcode::G_UADDE: {
1255     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1256     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1257     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1258     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1259            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1260            "Invalid operand");
1261     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1262     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1263            "type mismatch");
1264     break;
1265   }
1266   }
1267 
1268   auto MIB = buildInstr(Opc);
1269   for (const DstOp &Op : DstOps)
1270     Op.addDefToMIB(*getMRI(), MIB);
1271   for (const SrcOp &Op : SrcOps)
1272     Op.addSrcToMIB(MIB);
1273   if (Flags)
1274     MIB->setFlags(*Flags);
1275   return MIB;
1276 }
1277