1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/MachineFunction.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetOpcodes.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DebugInfoMetadata.h"
21
22 using namespace llvm;
23
setMF(MachineFunction & MF)24 void MachineIRBuilder::setMF(MachineFunction &MF) {
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34 }
35
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39
buildInstrNoInsert(unsigned Opcode)40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41 return BuildMI(getMF(), {getDL(), getPCSections(), getMMRAMetadata()},
42 getTII().get(Opcode));
43 }
44
insertInstr(MachineInstrBuilder MIB)45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46 getMBB().insert(getInsertPt(), MIB);
47 recordInsertion(MIB);
48 return MIB;
49 }
50
51 MachineInstrBuilder
buildDirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63
64 MachineInstrBuilder
buildIndirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76
buildFIDbgValue(int FI,const MDNode * Variable,const MDNode * Expr)77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
86 .addFrameIndex(FI)
87 .addImm(0)
88 .addMetadata(Variable)
89 .addMetadata(Expr));
90 }
91
buildConstDbgValue(const Constant & C,const MDNode * Variable,const MDNode * Expr)92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101
102 auto *NumericConstant = [&] () -> const Constant* {
103 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
104 if (CE->getOpcode() == Instruction::IntToPtr)
105 return CE->getOperand(0);
106 return &C;
107 }();
108
109 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
110 if (CI->getBitWidth() > 64)
111 MIB.addCImm(CI);
112 else
113 MIB.addImm(CI->getZExtValue());
114 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
115 MIB.addFPImm(CFP);
116 } else if (isa<ConstantPointerNull>(NumericConstant)) {
117 MIB.addImm(0);
118 } else {
119 // Insert $noreg if we didn't find a usable constant and had to drop it.
120 MIB.addReg(Register());
121 }
122
123 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
124 return insertInstr(MIB);
125 }
126
buildDbgLabel(const MDNode * Label)127 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
128 assert(isa<DILabel>(Label) && "not a label");
129 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
130 "Expected inlined-at fields to agree");
131 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
132
133 return MIB.addMetadata(Label);
134 }
135
buildDynStackAlloc(const DstOp & Res,const SrcOp & Size,Align Alignment)136 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
137 const SrcOp &Size,
138 Align Alignment) {
139 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
140 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
141 Res.addDefToMIB(*getMRI(), MIB);
142 Size.addSrcToMIB(MIB);
143 MIB.addImm(Alignment.value());
144 return MIB;
145 }
146
buildFrameIndex(const DstOp & Res,int Idx)147 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
148 int Idx) {
149 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
150 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
151 Res.addDefToMIB(*getMRI(), MIB);
152 MIB.addFrameIndex(Idx);
153 return MIB;
154 }
155
buildGlobalValue(const DstOp & Res,const GlobalValue * GV)156 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
157 const GlobalValue *GV) {
158 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
159 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
160 GV->getType()->getAddressSpace() &&
161 "address space mismatch");
162
163 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
164 Res.addDefToMIB(*getMRI(), MIB);
165 MIB.addGlobalAddress(GV);
166 return MIB;
167 }
168
buildConstantPool(const DstOp & Res,unsigned Idx)169 MachineInstrBuilder MachineIRBuilder::buildConstantPool(const DstOp &Res,
170 unsigned Idx) {
171 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
172 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
173 Res.addDefToMIB(*getMRI(), MIB);
174 MIB.addConstantPoolIndex(Idx);
175 return MIB;
176 }
177
buildJumpTable(const LLT PtrTy,unsigned JTI)178 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
179 unsigned JTI) {
180 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
181 .addJumpTableIndex(JTI);
182 }
183
validateUnaryOp(const LLT Res,const LLT Op0)184 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
185 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
186 assert((Res == Op0) && "type mismatch");
187 }
188
validateBinaryOp(const LLT Res,const LLT Op0,const LLT Op1)189 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
190 const LLT Op1) {
191 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
192 assert((Res == Op0 && Res == Op1) && "type mismatch");
193 }
194
validateShiftOp(const LLT Res,const LLT Op0,const LLT Op1)195 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
196 const LLT Op1) {
197 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
198 assert((Res == Op0) && "type mismatch");
199 }
200
201 MachineInstrBuilder
buildPtrAdd(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)202 MachineIRBuilder::buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
203 const SrcOp &Op1, std::optional<unsigned> Flags) {
204 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
205 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
206 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
207
208 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
209 }
210
211 std::optional<MachineInstrBuilder>
materializePtrAdd(Register & Res,Register Op0,const LLT ValueTy,uint64_t Value)212 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
213 const LLT ValueTy, uint64_t Value) {
214 assert(Res == 0 && "Res is a result argument");
215 assert(ValueTy.isScalar() && "invalid offset type");
216
217 if (Value == 0) {
218 Res = Op0;
219 return std::nullopt;
220 }
221
222 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
223 auto Cst = buildConstant(ValueTy, Value);
224 return buildPtrAdd(Res, Op0, Cst.getReg(0));
225 }
226
buildMaskLowPtrBits(const DstOp & Res,const SrcOp & Op0,uint32_t NumBits)227 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
228 const SrcOp &Op0,
229 uint32_t NumBits) {
230 LLT PtrTy = Res.getLLTTy(*getMRI());
231 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
232 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
233 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
234 return buildPtrMask(Res, Op0, MaskReg);
235 }
236
237 MachineInstrBuilder
buildPadVectorWithUndefElements(const DstOp & Res,const SrcOp & Op0)238 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
239 const SrcOp &Op0) {
240 LLT ResTy = Res.getLLTTy(*getMRI());
241 LLT Op0Ty = Op0.getLLTTy(*getMRI());
242
243 assert(ResTy.isVector() && "Res non vector type");
244
245 SmallVector<Register, 8> Regs;
246 if (Op0Ty.isVector()) {
247 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
248 "Different vector element types");
249 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
250 "Op0 has more elements");
251 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
252
253 for (auto Op : Unmerge.getInstr()->defs())
254 Regs.push_back(Op.getReg());
255 } else {
256 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
257 "Op0 has more size");
258 Regs.push_back(Op0.getReg());
259 }
260 Register Undef =
261 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
262 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
263 for (unsigned i = 0; i < NumberOfPadElts; ++i)
264 Regs.push_back(Undef);
265 return buildMergeLikeInstr(Res, Regs);
266 }
267
268 MachineInstrBuilder
buildDeleteTrailingVectorElements(const DstOp & Res,const SrcOp & Op0)269 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
270 const SrcOp &Op0) {
271 LLT ResTy = Res.getLLTTy(*getMRI());
272 LLT Op0Ty = Op0.getLLTTy(*getMRI());
273
274 assert(Op0Ty.isVector() && "Non vector type");
275 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
276 (ResTy.isVector() &&
277 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
278 "Different vector element types");
279 assert(
280 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
281 "Op0 has fewer elements");
282
283 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
284 if (ResTy.isScalar())
285 return buildCopy(Res, Unmerge.getReg(0));
286 SmallVector<Register, 8> Regs;
287 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
288 Regs.push_back(Unmerge.getReg(i));
289 return buildMergeLikeInstr(Res, Regs);
290 }
291
buildBr(MachineBasicBlock & Dest)292 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
293 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
294 }
295
buildBrIndirect(Register Tgt)296 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
297 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
298 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
299 }
300
buildBrJT(Register TablePtr,unsigned JTI,Register IndexReg)301 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
302 unsigned JTI,
303 Register IndexReg) {
304 assert(getMRI()->getType(TablePtr).isPointer() &&
305 "Table reg must be a pointer");
306 return buildInstr(TargetOpcode::G_BRJT)
307 .addUse(TablePtr)
308 .addJumpTableIndex(JTI)
309 .addUse(IndexReg);
310 }
311
buildCopy(const DstOp & Res,const SrcOp & Op)312 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
313 const SrcOp &Op) {
314 return buildInstr(TargetOpcode::COPY, Res, Op);
315 }
316
buildConstant(const DstOp & Res,const ConstantInt & Val)317 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
318 const ConstantInt &Val) {
319 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
320 LLT Ty = Res.getLLTTy(*getMRI());
321 LLT EltTy = Ty.getScalarType();
322 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
323 "creating constant with the wrong size");
324
325 assert(!Ty.isScalableVector() &&
326 "unexpected scalable vector in buildConstant");
327
328 if (Ty.isFixedVector()) {
329 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
330 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
331 .addCImm(&Val);
332 return buildSplatBuildVector(Res, Const);
333 }
334
335 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
336 Const->setDebugLoc(DebugLoc());
337 Res.addDefToMIB(*getMRI(), Const);
338 Const.addCImm(&Val);
339 return Const;
340 }
341
buildConstant(const DstOp & Res,int64_t Val)342 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
343 int64_t Val) {
344 auto IntN = IntegerType::get(getMF().getFunction().getContext(),
345 Res.getLLTTy(*getMRI()).getScalarSizeInBits());
346 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
347 return buildConstant(Res, *CI);
348 }
349
buildFConstant(const DstOp & Res,const ConstantFP & Val)350 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
351 const ConstantFP &Val) {
352 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
353 LLT Ty = Res.getLLTTy(*getMRI());
354 LLT EltTy = Ty.getScalarType();
355
356 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
357 == EltTy.getSizeInBits() &&
358 "creating fconstant with the wrong size");
359
360 assert(!Ty.isPointer() && "invalid operand type");
361
362 assert(!Ty.isScalableVector() &&
363 "unexpected scalable vector in buildFConstant");
364
365 if (Ty.isFixedVector()) {
366 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
367 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
368 .addFPImm(&Val);
369
370 return buildSplatBuildVector(Res, Const);
371 }
372
373 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
374 Const->setDebugLoc(DebugLoc());
375 Res.addDefToMIB(*getMRI(), Const);
376 Const.addFPImm(&Val);
377 return Const;
378 }
379
buildConstant(const DstOp & Res,const APInt & Val)380 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
381 const APInt &Val) {
382 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
383 return buildConstant(Res, *CI);
384 }
385
buildFConstant(const DstOp & Res,double Val)386 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
387 double Val) {
388 LLT DstTy = Res.getLLTTy(*getMRI());
389 auto &Ctx = getMF().getFunction().getContext();
390 auto *CFP =
391 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
392 return buildFConstant(Res, *CFP);
393 }
394
buildFConstant(const DstOp & Res,const APFloat & Val)395 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
396 const APFloat &Val) {
397 auto &Ctx = getMF().getFunction().getContext();
398 auto *CFP = ConstantFP::get(Ctx, Val);
399 return buildFConstant(Res, *CFP);
400 }
401
402 MachineInstrBuilder
buildConstantPtrAuth(const DstOp & Res,const ConstantPtrAuth * CPA,Register Addr,Register AddrDisc)403 MachineIRBuilder::buildConstantPtrAuth(const DstOp &Res,
404 const ConstantPtrAuth *CPA,
405 Register Addr, Register AddrDisc) {
406 auto MIB = buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
407 Res.addDefToMIB(*getMRI(), MIB);
408 MIB.addUse(Addr);
409 MIB.addImm(CPA->getKey()->getZExtValue());
410 MIB.addUse(AddrDisc);
411 MIB.addImm(CPA->getDiscriminator()->getZExtValue());
412 return MIB;
413 }
414
buildBrCond(const SrcOp & Tst,MachineBasicBlock & Dest)415 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
416 MachineBasicBlock &Dest) {
417 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
418
419 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
420 Tst.addSrcToMIB(MIB);
421 MIB.addMBB(&Dest);
422 return MIB;
423 }
424
425 MachineInstrBuilder
buildLoad(const DstOp & Dst,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)426 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
427 MachinePointerInfo PtrInfo, Align Alignment,
428 MachineMemOperand::Flags MMOFlags,
429 const AAMDNodes &AAInfo) {
430 MMOFlags |= MachineMemOperand::MOLoad;
431 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
432
433 LLT Ty = Dst.getLLTTy(*getMRI());
434 MachineMemOperand *MMO =
435 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
436 return buildLoad(Dst, Addr, *MMO);
437 }
438
buildLoadInstr(unsigned Opcode,const DstOp & Res,const SrcOp & Addr,MachineMemOperand & MMO)439 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
440 const DstOp &Res,
441 const SrcOp &Addr,
442 MachineMemOperand &MMO) {
443 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
444 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
445
446 auto MIB = buildInstr(Opcode);
447 Res.addDefToMIB(*getMRI(), MIB);
448 Addr.addSrcToMIB(MIB);
449 MIB.addMemOperand(&MMO);
450 return MIB;
451 }
452
buildLoadFromOffset(const DstOp & Dst,const SrcOp & BasePtr,MachineMemOperand & BaseMMO,int64_t Offset)453 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
454 const DstOp &Dst, const SrcOp &BasePtr,
455 MachineMemOperand &BaseMMO, int64_t Offset) {
456 LLT LoadTy = Dst.getLLTTy(*getMRI());
457 MachineMemOperand *OffsetMMO =
458 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
459
460 if (Offset == 0) // This may be a size or type changing load.
461 return buildLoad(Dst, BasePtr, *OffsetMMO);
462
463 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
464 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
465 auto ConstOffset = buildConstant(OffsetTy, Offset);
466 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
467 return buildLoad(Dst, Ptr, *OffsetMMO);
468 }
469
buildStore(const SrcOp & Val,const SrcOp & Addr,MachineMemOperand & MMO)470 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
471 const SrcOp &Addr,
472 MachineMemOperand &MMO) {
473 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
474 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
475
476 auto MIB = buildInstr(TargetOpcode::G_STORE);
477 Val.addSrcToMIB(MIB);
478 Addr.addSrcToMIB(MIB);
479 MIB.addMemOperand(&MMO);
480 return MIB;
481 }
482
483 MachineInstrBuilder
buildStore(const SrcOp & Val,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)484 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
485 MachinePointerInfo PtrInfo, Align Alignment,
486 MachineMemOperand::Flags MMOFlags,
487 const AAMDNodes &AAInfo) {
488 MMOFlags |= MachineMemOperand::MOStore;
489 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
490
491 LLT Ty = Val.getLLTTy(*getMRI());
492 MachineMemOperand *MMO =
493 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
494 return buildStore(Val, Addr, *MMO);
495 }
496
buildAnyExt(const DstOp & Res,const SrcOp & Op)497 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
498 const SrcOp &Op) {
499 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
500 }
501
buildSExt(const DstOp & Res,const SrcOp & Op)502 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
503 const SrcOp &Op) {
504 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
505 }
506
buildZExt(const DstOp & Res,const SrcOp & Op,std::optional<unsigned> Flags)507 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
508 const SrcOp &Op,
509 std::optional<unsigned> Flags) {
510 return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
511 }
512
getBoolExtOp(bool IsVec,bool IsFP) const513 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
514 const auto *TLI = getMF().getSubtarget().getTargetLowering();
515 switch (TLI->getBooleanContents(IsVec, IsFP)) {
516 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
517 return TargetOpcode::G_SEXT;
518 case TargetLoweringBase::ZeroOrOneBooleanContent:
519 return TargetOpcode::G_ZEXT;
520 default:
521 return TargetOpcode::G_ANYEXT;
522 }
523 }
524
buildBoolExt(const DstOp & Res,const SrcOp & Op,bool IsFP)525 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
526 const SrcOp &Op,
527 bool IsFP) {
528 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
529 return buildInstr(ExtOp, Res, Op);
530 }
531
buildBoolExtInReg(const DstOp & Res,const SrcOp & Op,bool IsVector,bool IsFP)532 MachineInstrBuilder MachineIRBuilder::buildBoolExtInReg(const DstOp &Res,
533 const SrcOp &Op,
534 bool IsVector,
535 bool IsFP) {
536 const auto *TLI = getMF().getSubtarget().getTargetLowering();
537 switch (TLI->getBooleanContents(IsVector, IsFP)) {
538 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
539 return buildSExtInReg(Res, Op, 1);
540 case TargetLoweringBase::ZeroOrOneBooleanContent:
541 return buildZExtInReg(Res, Op, 1);
542 case TargetLoweringBase::UndefinedBooleanContent:
543 return buildCopy(Res, Op);
544 }
545
546 llvm_unreachable("unexpected BooleanContent");
547 }
548
buildExtOrTrunc(unsigned ExtOpc,const DstOp & Res,const SrcOp & Op)549 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
550 const DstOp &Res,
551 const SrcOp &Op) {
552 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
553 TargetOpcode::G_SEXT == ExtOpc) &&
554 "Expecting Extending Opc");
555 assert(Res.getLLTTy(*getMRI()).isScalar() ||
556 Res.getLLTTy(*getMRI()).isVector());
557 assert(Res.getLLTTy(*getMRI()).isScalar() ==
558 Op.getLLTTy(*getMRI()).isScalar());
559
560 unsigned Opcode = TargetOpcode::COPY;
561 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
562 Op.getLLTTy(*getMRI()).getSizeInBits())
563 Opcode = ExtOpc;
564 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
565 Op.getLLTTy(*getMRI()).getSizeInBits())
566 Opcode = TargetOpcode::G_TRUNC;
567 else
568 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
569
570 return buildInstr(Opcode, Res, Op);
571 }
572
buildSExtOrTrunc(const DstOp & Res,const SrcOp & Op)573 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
574 const SrcOp &Op) {
575 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
576 }
577
buildZExtOrTrunc(const DstOp & Res,const SrcOp & Op)578 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
579 const SrcOp &Op) {
580 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
581 }
582
buildAnyExtOrTrunc(const DstOp & Res,const SrcOp & Op)583 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
584 const SrcOp &Op) {
585 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
586 }
587
buildZExtInReg(const DstOp & Res,const SrcOp & Op,int64_t ImmOp)588 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
589 const SrcOp &Op,
590 int64_t ImmOp) {
591 LLT ResTy = Res.getLLTTy(*getMRI());
592 auto Mask = buildConstant(
593 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
594 return buildAnd(Res, Op, Mask);
595 }
596
buildCast(const DstOp & Dst,const SrcOp & Src)597 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
598 const SrcOp &Src) {
599 LLT SrcTy = Src.getLLTTy(*getMRI());
600 LLT DstTy = Dst.getLLTTy(*getMRI());
601 if (SrcTy == DstTy)
602 return buildCopy(Dst, Src);
603
604 unsigned Opcode;
605 if (SrcTy.isPointerOrPointerVector())
606 Opcode = TargetOpcode::G_PTRTOINT;
607 else if (DstTy.isPointerOrPointerVector())
608 Opcode = TargetOpcode::G_INTTOPTR;
609 else {
610 assert(!SrcTy.isPointerOrPointerVector() &&
611 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
612 Opcode = TargetOpcode::G_BITCAST;
613 }
614
615 return buildInstr(Opcode, Dst, Src);
616 }
617
buildExtract(const DstOp & Dst,const SrcOp & Src,uint64_t Index)618 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
619 const SrcOp &Src,
620 uint64_t Index) {
621 LLT SrcTy = Src.getLLTTy(*getMRI());
622 LLT DstTy = Dst.getLLTTy(*getMRI());
623
624 #ifndef NDEBUG
625 assert(SrcTy.isValid() && "invalid operand type");
626 assert(DstTy.isValid() && "invalid operand type");
627 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
628 "extracting off end of register");
629 #endif
630
631 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
632 assert(Index == 0 && "insertion past the end of a register");
633 return buildCast(Dst, Src);
634 }
635
636 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
637 Dst.addDefToMIB(*getMRI(), Extract);
638 Src.addSrcToMIB(Extract);
639 Extract.addImm(Index);
640 return Extract;
641 }
642
buildUndef(const DstOp & Res)643 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
644 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
645 }
646
buildMergeValues(const DstOp & Res,ArrayRef<Register> Ops)647 MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
648 ArrayRef<Register> Ops) {
649 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
650 // we need some temporary storage for the DstOp objects. Here we use a
651 // sufficiently large SmallVector to not go through the heap.
652 SmallVector<SrcOp, 8> TmpVec(Ops);
653 assert(TmpVec.size() > 1);
654 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
655 }
656
657 MachineInstrBuilder
buildMergeLikeInstr(const DstOp & Res,ArrayRef<Register> Ops)658 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
659 ArrayRef<Register> Ops) {
660 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
661 // we need some temporary storage for the DstOp objects. Here we use a
662 // sufficiently large SmallVector to not go through the heap.
663 SmallVector<SrcOp, 8> TmpVec(Ops);
664 assert(TmpVec.size() > 1);
665 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
666 }
667
668 MachineInstrBuilder
buildMergeLikeInstr(const DstOp & Res,std::initializer_list<SrcOp> Ops)669 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
670 std::initializer_list<SrcOp> Ops) {
671 assert(Ops.size() > 1);
672 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
673 }
674
getOpcodeForMerge(const DstOp & DstOp,ArrayRef<SrcOp> SrcOps) const675 unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
676 ArrayRef<SrcOp> SrcOps) const {
677 if (DstOp.getLLTTy(*getMRI()).isVector()) {
678 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
679 return TargetOpcode::G_CONCAT_VECTORS;
680 return TargetOpcode::G_BUILD_VECTOR;
681 }
682
683 return TargetOpcode::G_MERGE_VALUES;
684 }
685
buildUnmerge(ArrayRef<LLT> Res,const SrcOp & Op)686 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
687 const SrcOp &Op) {
688 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
689 // we need some temporary storage for the DstOp objects. Here we use a
690 // sufficiently large SmallVector to not go through the heap.
691 SmallVector<DstOp, 8> TmpVec(Res);
692 assert(TmpVec.size() > 1);
693 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
694 }
695
buildUnmerge(LLT Res,const SrcOp & Op)696 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
697 const SrcOp &Op) {
698 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
699 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
700 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
701 }
702
703 MachineInstrBuilder
buildUnmerge(MachineRegisterInfo::VRegAttrs Attrs,const SrcOp & Op)704 MachineIRBuilder::buildUnmerge(MachineRegisterInfo::VRegAttrs Attrs,
705 const SrcOp &Op) {
706 LLT OpTy = Op.getLLTTy(*getMRI());
707 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
708 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
709 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
710 }
711
buildUnmerge(ArrayRef<Register> Res,const SrcOp & Op)712 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
713 const SrcOp &Op) {
714 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
715 // we need some temporary storage for the DstOp objects. Here we use a
716 // sufficiently large SmallVector to not go through the heap.
717 SmallVector<DstOp, 8> TmpVec(Res);
718 assert(TmpVec.size() > 1);
719 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
720 }
721
buildBuildVector(const DstOp & Res,ArrayRef<Register> Ops)722 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
723 ArrayRef<Register> Ops) {
724 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
725 // we need some temporary storage for the DstOp objects. Here we use a
726 // sufficiently large SmallVector to not go through the heap.
727 SmallVector<SrcOp, 8> TmpVec(Ops);
728 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
729 }
730
731 MachineInstrBuilder
buildBuildVectorConstant(const DstOp & Res,ArrayRef<APInt> Ops)732 MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
733 ArrayRef<APInt> Ops) {
734 SmallVector<SrcOp> TmpVec;
735 TmpVec.reserve(Ops.size());
736 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
737 for (const auto &Op : Ops)
738 TmpVec.push_back(buildConstant(EltTy, Op));
739 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
740 }
741
buildSplatBuildVector(const DstOp & Res,const SrcOp & Src)742 MachineInstrBuilder MachineIRBuilder::buildSplatBuildVector(const DstOp &Res,
743 const SrcOp &Src) {
744 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
745 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
746 }
747
748 MachineInstrBuilder
buildBuildVectorTrunc(const DstOp & Res,ArrayRef<Register> Ops)749 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
750 ArrayRef<Register> Ops) {
751 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
752 // we need some temporary storage for the DstOp objects. Here we use a
753 // sufficiently large SmallVector to not go through the heap.
754 SmallVector<SrcOp, 8> TmpVec(Ops);
755 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
756 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
757 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
758 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
759 }
760
buildShuffleSplat(const DstOp & Res,const SrcOp & Src)761 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
762 const SrcOp &Src) {
763 LLT DstTy = Res.getLLTTy(*getMRI());
764 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
765 "Expected Src to match Dst elt ty");
766 auto UndefVec = buildUndef(DstTy);
767 auto Zero = buildConstant(LLT::scalar(64), 0);
768 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
769 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
770 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
771 }
772
buildSplatVector(const DstOp & Res,const SrcOp & Src)773 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
774 const SrcOp &Src) {
775 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
776 "Expected Src to match Dst elt ty");
777 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
778 }
779
buildShuffleVector(const DstOp & Res,const SrcOp & Src1,const SrcOp & Src2,ArrayRef<int> Mask)780 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
781 const SrcOp &Src1,
782 const SrcOp &Src2,
783 ArrayRef<int> Mask) {
784 LLT DstTy = Res.getLLTTy(*getMRI());
785 LLT Src1Ty = Src1.getLLTTy(*getMRI());
786 LLT Src2Ty = Src2.getLLTTy(*getMRI());
787 const LLT DstElemTy = DstTy.isVector() ? DstTy.getElementType() : DstTy;
788 const LLT ElemTy1 = Src1Ty.isVector() ? Src1Ty.getElementType() : Src1Ty;
789 const LLT ElemTy2 = Src2Ty.isVector() ? Src2Ty.getElementType() : Src2Ty;
790 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
791 (void)DstElemTy;
792 (void)ElemTy1;
793 (void)ElemTy2;
794 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
795 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
796 .addShuffleMask(MaskAlloc);
797 }
798
799 MachineInstrBuilder
buildConcatVectors(const DstOp & Res,ArrayRef<Register> Ops)800 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
801 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
802 // we need some temporary storage for the DstOp objects. Here we use a
803 // sufficiently large SmallVector to not go through the heap.
804 SmallVector<SrcOp, 8> TmpVec(Ops);
805 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
806 }
807
buildInsert(const DstOp & Res,const SrcOp & Src,const SrcOp & Op,unsigned Index)808 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
809 const SrcOp &Src,
810 const SrcOp &Op,
811 unsigned Index) {
812 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
813 Res.getLLTTy(*getMRI()).getSizeInBits() &&
814 "insertion past the end of a register");
815
816 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
817 Op.getLLTTy(*getMRI()).getSizeInBits()) {
818 return buildCast(Res, Op);
819 }
820
821 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
822 }
823
buildStepVector(const DstOp & Res,unsigned Step)824 MachineInstrBuilder MachineIRBuilder::buildStepVector(const DstOp &Res,
825 unsigned Step) {
826 unsigned Bitwidth = Res.getLLTTy(*getMRI()).getElementType().getSizeInBits();
827 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(),
828 APInt(Bitwidth, Step));
829 auto StepVector = buildInstr(TargetOpcode::G_STEP_VECTOR);
830 StepVector->setDebugLoc(DebugLoc());
831 Res.addDefToMIB(*getMRI(), StepVector);
832 StepVector.addCImm(CI);
833 return StepVector;
834 }
835
buildVScale(const DstOp & Res,unsigned MinElts)836 MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
837 unsigned MinElts) {
838
839 auto IntN = IntegerType::get(getMF().getFunction().getContext(),
840 Res.getLLTTy(*getMRI()).getScalarSizeInBits());
841 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
842 return buildVScale(Res, *CI);
843 }
844
buildVScale(const DstOp & Res,const ConstantInt & MinElts)845 MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
846 const ConstantInt &MinElts) {
847 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
848 VScale->setDebugLoc(DebugLoc());
849 Res.addDefToMIB(*getMRI(), VScale);
850 VScale.addCImm(&MinElts);
851 return VScale;
852 }
853
buildVScale(const DstOp & Res,const APInt & MinElts)854 MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
855 const APInt &MinElts) {
856 ConstantInt *CI =
857 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
858 return buildVScale(Res, *CI);
859 }
860
getIntrinsicOpcode(bool HasSideEffects,bool IsConvergent)861 static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
862 if (HasSideEffects && IsConvergent)
863 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
864 if (HasSideEffects)
865 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
866 if (IsConvergent)
867 return TargetOpcode::G_INTRINSIC_CONVERGENT;
868 return TargetOpcode::G_INTRINSIC;
869 }
870
871 MachineInstrBuilder
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs,bool HasSideEffects,bool isConvergent)872 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
873 ArrayRef<Register> ResultRegs,
874 bool HasSideEffects, bool isConvergent) {
875 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
876 for (Register ResultReg : ResultRegs)
877 MIB.addDef(ResultReg);
878 MIB.addIntrinsicID(ID);
879 return MIB;
880 }
881
882 MachineInstrBuilder
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs)883 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
884 ArrayRef<Register> ResultRegs) {
885 AttributeSet Attrs = Intrinsic::getFnAttributes(getContext(), ID);
886 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
887 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
888 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
889 }
890
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results,bool HasSideEffects,bool isConvergent)891 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
892 ArrayRef<DstOp> Results,
893 bool HasSideEffects,
894 bool isConvergent) {
895 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
896 for (DstOp Result : Results)
897 Result.addDefToMIB(*getMRI(), MIB);
898 MIB.addIntrinsicID(ID);
899 return MIB;
900 }
901
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results)902 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
903 ArrayRef<DstOp> Results) {
904 AttributeSet Attrs = Intrinsic::getFnAttributes(getContext(), ID);
905 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
906 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
907 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
908 }
909
910 MachineInstrBuilder
buildTrunc(const DstOp & Res,const SrcOp & Op,std::optional<unsigned> Flags)911 MachineIRBuilder::buildTrunc(const DstOp &Res, const SrcOp &Op,
912 std::optional<unsigned> Flags) {
913 return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
914 }
915
916 MachineInstrBuilder
buildFPTrunc(const DstOp & Res,const SrcOp & Op,std::optional<unsigned> Flags)917 MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
918 std::optional<unsigned> Flags) {
919 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
920 }
921
buildICmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)922 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
923 const DstOp &Res,
924 const SrcOp &Op0,
925 const SrcOp &Op1,
926 std::optional<unsigned> Flags) {
927 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}, Flags);
928 }
929
buildFCmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)930 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
931 const DstOp &Res,
932 const SrcOp &Op0,
933 const SrcOp &Op1,
934 std::optional<unsigned> Flags) {
935
936 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
937 }
938
buildSCmp(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)939 MachineInstrBuilder MachineIRBuilder::buildSCmp(const DstOp &Res,
940 const SrcOp &Op0,
941 const SrcOp &Op1) {
942 return buildInstr(TargetOpcode::G_SCMP, Res, {Op0, Op1});
943 }
944
buildUCmp(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)945 MachineInstrBuilder MachineIRBuilder::buildUCmp(const DstOp &Res,
946 const SrcOp &Op0,
947 const SrcOp &Op1) {
948 return buildInstr(TargetOpcode::G_UCMP, Res, {Op0, Op1});
949 }
950
951 MachineInstrBuilder
buildSelect(const DstOp & Res,const SrcOp & Tst,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)952 MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
953 const SrcOp &Op0, const SrcOp &Op1,
954 std::optional<unsigned> Flags) {
955
956 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
957 }
958
buildInsertSubvector(const DstOp & Res,const SrcOp & Src0,const SrcOp & Src1,unsigned Idx)959 MachineInstrBuilder MachineIRBuilder::buildInsertSubvector(const DstOp &Res,
960 const SrcOp &Src0,
961 const SrcOp &Src1,
962 unsigned Idx) {
963 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
964 {Src0, Src1, uint64_t(Idx)});
965 }
966
buildExtractSubvector(const DstOp & Res,const SrcOp & Src,unsigned Idx)967 MachineInstrBuilder MachineIRBuilder::buildExtractSubvector(const DstOp &Res,
968 const SrcOp &Src,
969 unsigned Idx) {
970 return buildInstr(TargetOpcode::G_EXTRACT_SUBVECTOR, Res,
971 {Src, uint64_t(Idx)});
972 }
973
974 MachineInstrBuilder
buildInsertVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Elt,const SrcOp & Idx)975 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
976 const SrcOp &Elt, const SrcOp &Idx) {
977 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
978 }
979
980 MachineInstrBuilder
buildExtractVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Idx)981 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
982 const SrcOp &Idx) {
983 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
984 }
985
buildAtomicCmpXchgWithSuccess(const DstOp & OldValRes,const DstOp & SuccessRes,const SrcOp & Addr,const SrcOp & CmpVal,const SrcOp & NewVal,MachineMemOperand & MMO)986 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
987 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
988 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
989 #ifndef NDEBUG
990 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
991 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
992 LLT AddrTy = Addr.getLLTTy(*getMRI());
993 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
994 LLT NewValTy = NewVal.getLLTTy(*getMRI());
995 assert(OldValResTy.isScalar() && "invalid operand type");
996 assert(SuccessResTy.isScalar() && "invalid operand type");
997 assert(AddrTy.isPointer() && "invalid operand type");
998 assert(CmpValTy.isValid() && "invalid operand type");
999 assert(NewValTy.isValid() && "invalid operand type");
1000 assert(OldValResTy == CmpValTy && "type mismatch");
1001 assert(OldValResTy == NewValTy && "type mismatch");
1002 #endif
1003
1004 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1005 OldValRes.addDefToMIB(*getMRI(), MIB);
1006 SuccessRes.addDefToMIB(*getMRI(), MIB);
1007 Addr.addSrcToMIB(MIB);
1008 CmpVal.addSrcToMIB(MIB);
1009 NewVal.addSrcToMIB(MIB);
1010 MIB.addMemOperand(&MMO);
1011 return MIB;
1012 }
1013
1014 MachineInstrBuilder
buildAtomicCmpXchg(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & CmpVal,const SrcOp & NewVal,MachineMemOperand & MMO)1015 MachineIRBuilder::buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr,
1016 const SrcOp &CmpVal, const SrcOp &NewVal,
1017 MachineMemOperand &MMO) {
1018 #ifndef NDEBUG
1019 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1020 LLT AddrTy = Addr.getLLTTy(*getMRI());
1021 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1022 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1023 assert(OldValResTy.isScalar() && "invalid operand type");
1024 assert(AddrTy.isPointer() && "invalid operand type");
1025 assert(CmpValTy.isValid() && "invalid operand type");
1026 assert(NewValTy.isValid() && "invalid operand type");
1027 assert(OldValResTy == CmpValTy && "type mismatch");
1028 assert(OldValResTy == NewValTy && "type mismatch");
1029 #endif
1030
1031 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
1032 OldValRes.addDefToMIB(*getMRI(), MIB);
1033 Addr.addSrcToMIB(MIB);
1034 CmpVal.addSrcToMIB(MIB);
1035 NewVal.addSrcToMIB(MIB);
1036 MIB.addMemOperand(&MMO);
1037 return MIB;
1038 }
1039
buildAtomicRMW(unsigned Opcode,const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1040 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
1041 unsigned Opcode, const DstOp &OldValRes,
1042 const SrcOp &Addr, const SrcOp &Val,
1043 MachineMemOperand &MMO) {
1044
1045 #ifndef NDEBUG
1046 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1047 LLT AddrTy = Addr.getLLTTy(*getMRI());
1048 LLT ValTy = Val.getLLTTy(*getMRI());
1049 assert(AddrTy.isPointer() && "invalid operand type");
1050 assert(ValTy.isValid() && "invalid operand type");
1051 assert(OldValResTy == ValTy && "type mismatch");
1052 assert(MMO.isAtomic() && "not atomic mem operand");
1053 #endif
1054
1055 auto MIB = buildInstr(Opcode);
1056 OldValRes.addDefToMIB(*getMRI(), MIB);
1057 Addr.addSrcToMIB(MIB);
1058 Val.addSrcToMIB(MIB);
1059 MIB.addMemOperand(&MMO);
1060 return MIB;
1061 }
1062
1063 MachineInstrBuilder
buildAtomicRMWXchg(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1064 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
1065 Register Val, MachineMemOperand &MMO) {
1066 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1067 MMO);
1068 }
1069 MachineInstrBuilder
buildAtomicRMWAdd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1070 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
1071 Register Val, MachineMemOperand &MMO) {
1072 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1073 MMO);
1074 }
1075 MachineInstrBuilder
buildAtomicRMWSub(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1076 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
1077 Register Val, MachineMemOperand &MMO) {
1078 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1079 MMO);
1080 }
1081 MachineInstrBuilder
buildAtomicRMWAnd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1082 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
1083 Register Val, MachineMemOperand &MMO) {
1084 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1085 MMO);
1086 }
1087 MachineInstrBuilder
buildAtomicRMWNand(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1088 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
1089 Register Val, MachineMemOperand &MMO) {
1090 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1091 MMO);
1092 }
buildAtomicRMWOr(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1093 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
1094 Register Addr,
1095 Register Val,
1096 MachineMemOperand &MMO) {
1097 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1098 MMO);
1099 }
1100 MachineInstrBuilder
buildAtomicRMWXor(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1101 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
1102 Register Val, MachineMemOperand &MMO) {
1103 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1104 MMO);
1105 }
1106 MachineInstrBuilder
buildAtomicRMWMax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1107 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
1108 Register Val, MachineMemOperand &MMO) {
1109 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1110 MMO);
1111 }
1112 MachineInstrBuilder
buildAtomicRMWMin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1113 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
1114 Register Val, MachineMemOperand &MMO) {
1115 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1116 MMO);
1117 }
1118 MachineInstrBuilder
buildAtomicRMWUmax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1119 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
1120 Register Val, MachineMemOperand &MMO) {
1121 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1122 MMO);
1123 }
1124 MachineInstrBuilder
buildAtomicRMWUmin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1125 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
1126 Register Val, MachineMemOperand &MMO) {
1127 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1128 MMO);
1129 }
1130
1131 MachineInstrBuilder
buildAtomicRMWFAdd(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1132 MachineIRBuilder::buildAtomicRMWFAdd(
1133 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1134 MachineMemOperand &MMO) {
1135 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1136 MMO);
1137 }
1138
1139 MachineInstrBuilder
buildAtomicRMWFSub(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1140 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1141 MachineMemOperand &MMO) {
1142 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1143 MMO);
1144 }
1145
1146 MachineInstrBuilder
buildAtomicRMWFMax(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1147 MachineIRBuilder::buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr,
1148 const SrcOp &Val, MachineMemOperand &MMO) {
1149 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1150 MMO);
1151 }
1152
1153 MachineInstrBuilder
buildAtomicRMWFMin(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1154 MachineIRBuilder::buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr,
1155 const SrcOp &Val, MachineMemOperand &MMO) {
1156 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1157 MMO);
1158 }
1159
1160 MachineInstrBuilder
buildAtomicRMWFMaximum(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1161 MachineIRBuilder::buildAtomicRMWFMaximum(const DstOp &OldValRes,
1162 const SrcOp &Addr, const SrcOp &Val,
1163 MachineMemOperand &MMO) {
1164 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAXIMUM, OldValRes, Addr,
1165 Val, MMO);
1166 }
1167
1168 MachineInstrBuilder
buildAtomicRMWFMinimum(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1169 MachineIRBuilder::buildAtomicRMWFMinimum(const DstOp &OldValRes,
1170 const SrcOp &Addr, const SrcOp &Val,
1171 MachineMemOperand &MMO) {
1172 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMINIMUM, OldValRes, Addr,
1173 Val, MMO);
1174 }
1175
1176 MachineInstrBuilder
buildFence(unsigned Ordering,unsigned Scope)1177 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1178 return buildInstr(TargetOpcode::G_FENCE)
1179 .addImm(Ordering)
1180 .addImm(Scope);
1181 }
1182
buildPrefetch(const SrcOp & Addr,unsigned RW,unsigned Locality,unsigned CacheType,MachineMemOperand & MMO)1183 MachineInstrBuilder MachineIRBuilder::buildPrefetch(const SrcOp &Addr,
1184 unsigned RW,
1185 unsigned Locality,
1186 unsigned CacheType,
1187 MachineMemOperand &MMO) {
1188 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1189 Addr.addSrcToMIB(MIB);
1190 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1191 MIB.addMemOperand(&MMO);
1192 return MIB;
1193 }
1194
1195 MachineInstrBuilder
buildBlockAddress(Register Res,const BlockAddress * BA)1196 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
1197 #ifndef NDEBUG
1198 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1199 #endif
1200
1201 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1202 }
1203
validateTruncExt(const LLT DstTy,const LLT SrcTy,bool IsExtend)1204 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1205 bool IsExtend) {
1206 #ifndef NDEBUG
1207 if (DstTy.isVector()) {
1208 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1209 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1210 "different number of elements in a trunc/ext");
1211 } else
1212 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1213
1214 if (IsExtend)
1215 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1216 "invalid narrowing extend");
1217 else
1218 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1219 "invalid widening trunc");
1220 #endif
1221 }
1222
validateSelectOp(const LLT ResTy,const LLT TstTy,const LLT Op0Ty,const LLT Op1Ty)1223 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1224 const LLT Op0Ty, const LLT Op1Ty) {
1225 #ifndef NDEBUG
1226 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1227 "invalid operand type");
1228 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1229 if (ResTy.isScalar() || ResTy.isPointer())
1230 assert(TstTy.isScalar() && "type mismatch");
1231 else
1232 assert((TstTy.isScalar() ||
1233 (TstTy.isVector() &&
1234 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1235 "type mismatch");
1236 #endif
1237 }
1238
1239 MachineInstrBuilder
buildInstr(unsigned Opc,ArrayRef<DstOp> DstOps,ArrayRef<SrcOp> SrcOps,std::optional<unsigned> Flags)1240 MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
1241 ArrayRef<SrcOp> SrcOps,
1242 std::optional<unsigned> Flags) {
1243 switch (Opc) {
1244 default:
1245 break;
1246 case TargetOpcode::G_SELECT: {
1247 assert(DstOps.size() == 1 && "Invalid select");
1248 assert(SrcOps.size() == 3 && "Invalid select");
1249 validateSelectOp(
1250 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1251 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1252 break;
1253 }
1254 case TargetOpcode::G_FNEG:
1255 case TargetOpcode::G_ABS:
1256 // All these are unary ops.
1257 assert(DstOps.size() == 1 && "Invalid Dst");
1258 assert(SrcOps.size() == 1 && "Invalid Srcs");
1259 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1260 SrcOps[0].getLLTTy(*getMRI()));
1261 break;
1262 case TargetOpcode::G_ADD:
1263 case TargetOpcode::G_AND:
1264 case TargetOpcode::G_MUL:
1265 case TargetOpcode::G_OR:
1266 case TargetOpcode::G_SUB:
1267 case TargetOpcode::G_XOR:
1268 case TargetOpcode::G_UDIV:
1269 case TargetOpcode::G_SDIV:
1270 case TargetOpcode::G_UREM:
1271 case TargetOpcode::G_SREM:
1272 case TargetOpcode::G_SMIN:
1273 case TargetOpcode::G_SMAX:
1274 case TargetOpcode::G_UMIN:
1275 case TargetOpcode::G_UMAX:
1276 case TargetOpcode::G_UADDSAT:
1277 case TargetOpcode::G_SADDSAT:
1278 case TargetOpcode::G_USUBSAT:
1279 case TargetOpcode::G_SSUBSAT: {
1280 // All these are binary ops.
1281 assert(DstOps.size() == 1 && "Invalid Dst");
1282 assert(SrcOps.size() == 2 && "Invalid Srcs");
1283 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1284 SrcOps[0].getLLTTy(*getMRI()),
1285 SrcOps[1].getLLTTy(*getMRI()));
1286 break;
1287 }
1288 case TargetOpcode::G_SHL:
1289 case TargetOpcode::G_ASHR:
1290 case TargetOpcode::G_LSHR:
1291 case TargetOpcode::G_USHLSAT:
1292 case TargetOpcode::G_SSHLSAT: {
1293 assert(DstOps.size() == 1 && "Invalid Dst");
1294 assert(SrcOps.size() == 2 && "Invalid Srcs");
1295 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1296 SrcOps[0].getLLTTy(*getMRI()),
1297 SrcOps[1].getLLTTy(*getMRI()));
1298 break;
1299 }
1300 case TargetOpcode::G_SEXT:
1301 case TargetOpcode::G_ZEXT:
1302 case TargetOpcode::G_ANYEXT:
1303 assert(DstOps.size() == 1 && "Invalid Dst");
1304 assert(SrcOps.size() == 1 && "Invalid Srcs");
1305 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1306 SrcOps[0].getLLTTy(*getMRI()), true);
1307 break;
1308 case TargetOpcode::G_TRUNC:
1309 case TargetOpcode::G_FPTRUNC: {
1310 assert(DstOps.size() == 1 && "Invalid Dst");
1311 assert(SrcOps.size() == 1 && "Invalid Srcs");
1312 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1313 SrcOps[0].getLLTTy(*getMRI()), false);
1314 break;
1315 }
1316 case TargetOpcode::G_BITCAST: {
1317 assert(DstOps.size() == 1 && "Invalid Dst");
1318 assert(SrcOps.size() == 1 && "Invalid Srcs");
1319 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1320 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1321 break;
1322 }
1323 case TargetOpcode::COPY:
1324 assert(DstOps.size() == 1 && "Invalid Dst");
1325 // If the caller wants to add a subreg source it has to be done separately
1326 // so we may not have any SrcOps at this point yet.
1327 break;
1328 case TargetOpcode::G_FCMP:
1329 case TargetOpcode::G_ICMP: {
1330 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1331 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1332 // For F/ICMP, the first src operand is the predicate, followed by
1333 // the two comparands.
1334 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1335 "Expecting predicate");
1336 assert([&]() -> bool {
1337 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1338 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1339 : CmpInst::isFPPredicate(Pred);
1340 }() && "Invalid predicate");
1341 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1342 "Type mismatch");
1343 assert([&]() -> bool {
1344 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1345 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1346 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1347 return DstTy.isScalar();
1348 else
1349 return DstTy.isVector() &&
1350 DstTy.getElementCount() == Op0Ty.getElementCount();
1351 }() && "Type Mismatch");
1352 break;
1353 }
1354 case TargetOpcode::G_UNMERGE_VALUES: {
1355 assert(!DstOps.empty() && "Invalid trivial sequence");
1356 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1357 assert(llvm::all_of(DstOps,
1358 [&, this](const DstOp &Op) {
1359 return Op.getLLTTy(*getMRI()) ==
1360 DstOps[0].getLLTTy(*getMRI());
1361 }) &&
1362 "type mismatch in output list");
1363 assert((TypeSize::ScalarTy)DstOps.size() *
1364 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1365 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1366 "input operands do not cover output register");
1367 break;
1368 }
1369 case TargetOpcode::G_MERGE_VALUES: {
1370 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1371 assert(DstOps.size() == 1 && "Invalid Dst");
1372 assert(llvm::all_of(SrcOps,
1373 [&, this](const SrcOp &Op) {
1374 return Op.getLLTTy(*getMRI()) ==
1375 SrcOps[0].getLLTTy(*getMRI());
1376 }) &&
1377 "type mismatch in input list");
1378 assert((TypeSize::ScalarTy)SrcOps.size() *
1379 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1380 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1381 "input operands do not cover output register");
1382 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1383 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1384 break;
1385 }
1386 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1387 assert(DstOps.size() == 1 && "Invalid Dst size");
1388 assert(SrcOps.size() == 2 && "Invalid Src size");
1389 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1390 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1391 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1392 "Invalid operand type");
1393 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1394 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1395 DstOps[0].getLLTTy(*getMRI()) &&
1396 "Type mismatch");
1397 break;
1398 }
1399 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1400 assert(DstOps.size() == 1 && "Invalid dst size");
1401 assert(SrcOps.size() == 3 && "Invalid src size");
1402 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1403 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1404 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1405 SrcOps[1].getLLTTy(*getMRI()) &&
1406 "Type mismatch");
1407 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1408 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1409 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1410 "Type mismatch");
1411 break;
1412 }
1413 case TargetOpcode::G_BUILD_VECTOR: {
1414 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1415 "Must have at least 2 operands");
1416 assert(DstOps.size() == 1 && "Invalid DstOps");
1417 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1418 "Res type must be a vector");
1419 assert(llvm::all_of(SrcOps,
1420 [&, this](const SrcOp &Op) {
1421 return Op.getLLTTy(*getMRI()) ==
1422 SrcOps[0].getLLTTy(*getMRI());
1423 }) &&
1424 "type mismatch in input list");
1425 assert((TypeSize::ScalarTy)SrcOps.size() *
1426 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1427 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1428 "input scalars do not exactly cover the output vector register");
1429 break;
1430 }
1431 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1432 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1433 "Must have at least 2 operands");
1434 assert(DstOps.size() == 1 && "Invalid DstOps");
1435 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1436 "Res type must be a vector");
1437 assert(llvm::all_of(SrcOps,
1438 [&, this](const SrcOp &Op) {
1439 return Op.getLLTTy(*getMRI()) ==
1440 SrcOps[0].getLLTTy(*getMRI());
1441 }) &&
1442 "type mismatch in input list");
1443 break;
1444 }
1445 case TargetOpcode::G_CONCAT_VECTORS: {
1446 assert(DstOps.size() == 1 && "Invalid DstOps");
1447 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1448 "Must have at least 2 operands");
1449 assert(llvm::all_of(SrcOps,
1450 [&, this](const SrcOp &Op) {
1451 return (Op.getLLTTy(*getMRI()).isVector() &&
1452 Op.getLLTTy(*getMRI()) ==
1453 SrcOps[0].getLLTTy(*getMRI()));
1454 }) &&
1455 "type mismatch in input list");
1456 assert((TypeSize::ScalarTy)SrcOps.size() *
1457 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1458 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1459 "input vectors do not exactly cover the output vector register");
1460 break;
1461 }
1462 case TargetOpcode::G_UADDE: {
1463 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1464 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1465 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1466 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1467 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1468 "Invalid operand");
1469 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1470 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1471 "type mismatch");
1472 break;
1473 }
1474 }
1475
1476 auto MIB = buildInstr(Opc);
1477 for (const DstOp &Op : DstOps)
1478 Op.addDefToMIB(*getMRI(), MIB);
1479 for (const SrcOp &Op : SrcOps)
1480 Op.addSrcToMIB(MIB);
1481 if (Flags)
1482 MIB->setFlags(*Flags);
1483 return MIB;
1484 }
1485