1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the MachineIRBuidler class. 10 //===----------------------------------------------------------------------===// 11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 12 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 13 14 #include "llvm/CodeGen/MachineFunction.h" 15 #include "llvm/CodeGen/MachineInstr.h" 16 #include "llvm/CodeGen/MachineInstrBuilder.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/TargetInstrInfo.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 #include "llvm/CodeGen/TargetOpcodes.h" 21 #include "llvm/CodeGen/TargetSubtargetInfo.h" 22 #include "llvm/IR/DebugInfo.h" 23 24 using namespace llvm; 25 26 void MachineIRBuilder::setMF(MachineFunction &MF) { 27 State.MF = &MF; 28 State.MBB = nullptr; 29 State.MRI = &MF.getRegInfo(); 30 State.TII = MF.getSubtarget().getInstrInfo(); 31 State.DL = DebugLoc(); 32 State.II = MachineBasicBlock::iterator(); 33 State.Observer = nullptr; 34 } 35 36 void MachineIRBuilder::setMBB(MachineBasicBlock &MBB) { 37 State.MBB = &MBB; 38 State.II = MBB.end(); 39 assert(&getMF() == MBB.getParent() && 40 "Basic block is in a different function"); 41 } 42 43 void MachineIRBuilder::setInstr(MachineInstr &MI) { 44 assert(MI.getParent() && "Instruction is not part of a basic block"); 45 setMBB(*MI.getParent()); 46 State.II = MI.getIterator(); 47 } 48 49 void MachineIRBuilder::setCSEInfo(GISelCSEInfo *Info) { State.CSEInfo = Info; } 50 51 void MachineIRBuilder::setInsertPt(MachineBasicBlock &MBB, 52 MachineBasicBlock::iterator II) { 53 assert(MBB.getParent() == &getMF() && 54 "Basic block is in a different function"); 55 State.MBB = &MBB; 56 State.II = II; 57 } 58 59 void MachineIRBuilder::recordInsertion(MachineInstr *InsertedInstr) const { 60 if (State.Observer) 61 State.Observer->createdInstr(*InsertedInstr); 62 } 63 64 void MachineIRBuilder::setChangeObserver(GISelChangeObserver &Observer) { 65 State.Observer = &Observer; 66 } 67 68 void MachineIRBuilder::stopObservingChanges() { State.Observer = nullptr; } 69 70 //------------------------------------------------------------------------------ 71 // Build instruction variants. 72 //------------------------------------------------------------------------------ 73 74 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode) { 75 return insertInstr(buildInstrNoInsert(Opcode)); 76 } 77 78 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) { 79 MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode)); 80 return MIB; 81 } 82 83 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) { 84 getMBB().insert(getInsertPt(), MIB); 85 recordInsertion(MIB); 86 return MIB; 87 } 88 89 MachineInstrBuilder 90 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable, 91 const MDNode *Expr) { 92 assert(isa<DILocalVariable>(Variable) && "not a variable"); 93 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 94 assert( 95 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 96 "Expected inlined-at fields to agree"); 97 return insertInstr(BuildMI(getMF(), getDL(), 98 getTII().get(TargetOpcode::DBG_VALUE), 99 /*IsIndirect*/ false, Reg, Variable, Expr)); 100 } 101 102 MachineInstrBuilder 103 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable, 104 const MDNode *Expr) { 105 assert(isa<DILocalVariable>(Variable) && "not a variable"); 106 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 107 assert( 108 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 109 "Expected inlined-at fields to agree"); 110 return insertInstr(BuildMI(getMF(), getDL(), 111 getTII().get(TargetOpcode::DBG_VALUE), 112 /*IsIndirect*/ true, Reg, Variable, Expr)); 113 } 114 115 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI, 116 const MDNode *Variable, 117 const MDNode *Expr) { 118 assert(isa<DILocalVariable>(Variable) && "not a variable"); 119 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 120 assert( 121 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 122 "Expected inlined-at fields to agree"); 123 return buildInstr(TargetOpcode::DBG_VALUE) 124 .addFrameIndex(FI) 125 .addImm(0) 126 .addMetadata(Variable) 127 .addMetadata(Expr); 128 } 129 130 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C, 131 const MDNode *Variable, 132 const MDNode *Expr) { 133 assert(isa<DILocalVariable>(Variable) && "not a variable"); 134 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 135 assert( 136 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && 137 "Expected inlined-at fields to agree"); 138 auto MIB = buildInstr(TargetOpcode::DBG_VALUE); 139 if (auto *CI = dyn_cast<ConstantInt>(&C)) { 140 if (CI->getBitWidth() > 64) 141 MIB.addCImm(CI); 142 else 143 MIB.addImm(CI->getZExtValue()); 144 } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) { 145 MIB.addFPImm(CFP); 146 } else { 147 // Insert %noreg if we didn't find a usable constant and had to drop it. 148 MIB.addReg(0U); 149 } 150 151 return MIB.addImm(0).addMetadata(Variable).addMetadata(Expr); 152 } 153 154 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) { 155 assert(isa<DILabel>(Label) && "not a label"); 156 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) && 157 "Expected inlined-at fields to agree"); 158 auto MIB = buildInstr(TargetOpcode::DBG_LABEL); 159 160 return MIB.addMetadata(Label); 161 } 162 163 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res, 164 int Idx) { 165 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 166 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX); 167 Res.addDefToMIB(*getMRI(), MIB); 168 MIB.addFrameIndex(Idx); 169 return MIB; 170 } 171 172 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res, 173 const GlobalValue *GV) { 174 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 175 assert(Res.getLLTTy(*getMRI()).getAddressSpace() == 176 GV->getType()->getAddressSpace() && 177 "address space mismatch"); 178 179 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE); 180 Res.addDefToMIB(*getMRI(), MIB); 181 MIB.addGlobalAddress(GV); 182 return MIB; 183 } 184 185 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy, 186 unsigned JTI) { 187 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {}) 188 .addJumpTableIndex(JTI); 189 } 190 191 void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0, 192 const LLT &Op1) { 193 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 194 assert((Res == Op0 && Res == Op1) && "type mismatch"); 195 } 196 197 void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0, 198 const LLT &Op1) { 199 assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); 200 assert((Res == Op0) && "type mismatch"); 201 } 202 203 MachineInstrBuilder MachineIRBuilder::buildGEP(const DstOp &Res, 204 const SrcOp &Op0, 205 const SrcOp &Op1) { 206 assert(Res.getLLTTy(*getMRI()).isPointer() && 207 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); 208 assert(Op1.getLLTTy(*getMRI()).isScalar() && "invalid offset type"); 209 210 auto MIB = buildInstr(TargetOpcode::G_GEP); 211 Res.addDefToMIB(*getMRI(), MIB); 212 Op0.addSrcToMIB(MIB); 213 Op1.addSrcToMIB(MIB); 214 return MIB; 215 } 216 217 Optional<MachineInstrBuilder> 218 MachineIRBuilder::materializeGEP(Register &Res, Register Op0, 219 const LLT &ValueTy, uint64_t Value) { 220 assert(Res == 0 && "Res is a result argument"); 221 assert(ValueTy.isScalar() && "invalid offset type"); 222 223 if (Value == 0) { 224 Res = Op0; 225 return None; 226 } 227 228 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0)); 229 auto Cst = buildConstant(ValueTy, Value); 230 return buildGEP(Res, Op0, Cst.getReg(0)); 231 } 232 233 MachineInstrBuilder MachineIRBuilder::buildPtrMask(const DstOp &Res, 234 const SrcOp &Op0, 235 uint32_t NumBits) { 236 assert(Res.getLLTTy(*getMRI()).isPointer() && 237 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); 238 239 auto MIB = buildInstr(TargetOpcode::G_PTR_MASK); 240 Res.addDefToMIB(*getMRI(), MIB); 241 Op0.addSrcToMIB(MIB); 242 MIB.addImm(NumBits); 243 return MIB; 244 } 245 246 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) { 247 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest); 248 } 249 250 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) { 251 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination"); 252 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt); 253 } 254 255 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr, 256 unsigned JTI, 257 Register IndexReg) { 258 assert(getMRI()->getType(TablePtr).isPointer() && 259 "Table reg must be a pointer"); 260 return buildInstr(TargetOpcode::G_BRJT) 261 .addUse(TablePtr) 262 .addJumpTableIndex(JTI) 263 .addUse(IndexReg); 264 } 265 266 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res, 267 const SrcOp &Op) { 268 return buildInstr(TargetOpcode::COPY, Res, Op); 269 } 270 271 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 272 const ConstantInt &Val) { 273 LLT Ty = Res.getLLTTy(*getMRI()); 274 LLT EltTy = Ty.getScalarType(); 275 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() && 276 "creating constant with the wrong size"); 277 278 if (Ty.isVector()) { 279 auto Const = buildInstr(TargetOpcode::G_CONSTANT) 280 .addDef(getMRI()->createGenericVirtualRegister(EltTy)) 281 .addCImm(&Val); 282 return buildSplatVector(Res, Const); 283 } 284 285 auto Const = buildInstr(TargetOpcode::G_CONSTANT); 286 Res.addDefToMIB(*getMRI(), Const); 287 Const.addCImm(&Val); 288 return Const; 289 } 290 291 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 292 int64_t Val) { 293 auto IntN = IntegerType::get(getMF().getFunction().getContext(), 294 Res.getLLTTy(*getMRI()).getScalarSizeInBits()); 295 ConstantInt *CI = ConstantInt::get(IntN, Val, true); 296 return buildConstant(Res, *CI); 297 } 298 299 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 300 const ConstantFP &Val) { 301 LLT Ty = Res.getLLTTy(*getMRI()); 302 LLT EltTy = Ty.getScalarType(); 303 304 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics()) 305 == EltTy.getSizeInBits() && 306 "creating fconstant with the wrong size"); 307 308 assert(!Ty.isPointer() && "invalid operand type"); 309 310 if (Ty.isVector()) { 311 auto Const = buildInstr(TargetOpcode::G_FCONSTANT) 312 .addDef(getMRI()->createGenericVirtualRegister(EltTy)) 313 .addFPImm(&Val); 314 315 return buildSplatVector(Res, Const); 316 } 317 318 auto Const = buildInstr(TargetOpcode::G_FCONSTANT); 319 Res.addDefToMIB(*getMRI(), Const); 320 Const.addFPImm(&Val); 321 return Const; 322 } 323 324 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, 325 const APInt &Val) { 326 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val); 327 return buildConstant(Res, *CI); 328 } 329 330 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 331 double Val) { 332 LLT DstTy = Res.getLLTTy(*getMRI()); 333 auto &Ctx = getMF().getFunction().getContext(); 334 auto *CFP = 335 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits())); 336 return buildFConstant(Res, *CFP); 337 } 338 339 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, 340 const APFloat &Val) { 341 auto &Ctx = getMF().getFunction().getContext(); 342 auto *CFP = ConstantFP::get(Ctx, Val); 343 return buildFConstant(Res, *CFP); 344 } 345 346 MachineInstrBuilder MachineIRBuilder::buildBrCond(Register Tst, 347 MachineBasicBlock &Dest) { 348 assert(getMRI()->getType(Tst).isScalar() && "invalid operand type"); 349 350 return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest); 351 } 352 353 MachineInstrBuilder MachineIRBuilder::buildLoad(const DstOp &Res, 354 const SrcOp &Addr, 355 MachineMemOperand &MMO) { 356 return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO); 357 } 358 359 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode, 360 const DstOp &Res, 361 const SrcOp &Addr, 362 MachineMemOperand &MMO) { 363 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type"); 364 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 365 366 auto MIB = buildInstr(Opcode); 367 Res.addDefToMIB(*getMRI(), MIB); 368 Addr.addSrcToMIB(MIB); 369 MIB.addMemOperand(&MMO); 370 return MIB; 371 } 372 373 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val, 374 const SrcOp &Addr, 375 MachineMemOperand &MMO) { 376 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type"); 377 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); 378 379 auto MIB = buildInstr(TargetOpcode::G_STORE); 380 Val.addSrcToMIB(MIB); 381 Addr.addSrcToMIB(MIB); 382 MIB.addMemOperand(&MMO); 383 return MIB; 384 } 385 386 MachineInstrBuilder MachineIRBuilder::buildUAddo(const DstOp &Res, 387 const DstOp &CarryOut, 388 const SrcOp &Op0, 389 const SrcOp &Op1) { 390 return buildInstr(TargetOpcode::G_UADDO, {Res, CarryOut}, {Op0, Op1}); 391 } 392 393 MachineInstrBuilder MachineIRBuilder::buildUAdde(const DstOp &Res, 394 const DstOp &CarryOut, 395 const SrcOp &Op0, 396 const SrcOp &Op1, 397 const SrcOp &CarryIn) { 398 return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut}, 399 {Op0, Op1, CarryIn}); 400 } 401 402 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res, 403 const SrcOp &Op) { 404 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op); 405 } 406 407 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res, 408 const SrcOp &Op) { 409 return buildInstr(TargetOpcode::G_SEXT, Res, Op); 410 } 411 412 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res, 413 const SrcOp &Op) { 414 return buildInstr(TargetOpcode::G_ZEXT, Res, Op); 415 } 416 417 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const { 418 const auto *TLI = getMF().getSubtarget().getTargetLowering(); 419 switch (TLI->getBooleanContents(IsVec, IsFP)) { 420 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent: 421 return TargetOpcode::G_SEXT; 422 case TargetLoweringBase::ZeroOrOneBooleanContent: 423 return TargetOpcode::G_ZEXT; 424 default: 425 return TargetOpcode::G_ANYEXT; 426 } 427 } 428 429 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res, 430 const SrcOp &Op, 431 bool IsFP) { 432 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP); 433 return buildInstr(ExtOp, Res, Op); 434 } 435 436 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc, 437 const DstOp &Res, 438 const SrcOp &Op) { 439 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc || 440 TargetOpcode::G_SEXT == ExtOpc) && 441 "Expecting Extending Opc"); 442 assert(Res.getLLTTy(*getMRI()).isScalar() || 443 Res.getLLTTy(*getMRI()).isVector()); 444 assert(Res.getLLTTy(*getMRI()).isScalar() == 445 Op.getLLTTy(*getMRI()).isScalar()); 446 447 unsigned Opcode = TargetOpcode::COPY; 448 if (Res.getLLTTy(*getMRI()).getSizeInBits() > 449 Op.getLLTTy(*getMRI()).getSizeInBits()) 450 Opcode = ExtOpc; 451 else if (Res.getLLTTy(*getMRI()).getSizeInBits() < 452 Op.getLLTTy(*getMRI()).getSizeInBits()) 453 Opcode = TargetOpcode::G_TRUNC; 454 else 455 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI())); 456 457 return buildInstr(Opcode, Res, Op); 458 } 459 460 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res, 461 const SrcOp &Op) { 462 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op); 463 } 464 465 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res, 466 const SrcOp &Op) { 467 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op); 468 } 469 470 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res, 471 const SrcOp &Op) { 472 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op); 473 } 474 475 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst, 476 const SrcOp &Src) { 477 LLT SrcTy = Src.getLLTTy(*getMRI()); 478 LLT DstTy = Dst.getLLTTy(*getMRI()); 479 if (SrcTy == DstTy) 480 return buildCopy(Dst, Src); 481 482 unsigned Opcode; 483 if (SrcTy.isPointer() && DstTy.isScalar()) 484 Opcode = TargetOpcode::G_PTRTOINT; 485 else if (DstTy.isPointer() && SrcTy.isScalar()) 486 Opcode = TargetOpcode::G_INTTOPTR; 487 else { 488 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet"); 489 Opcode = TargetOpcode::G_BITCAST; 490 } 491 492 return buildInstr(Opcode, Dst, Src); 493 } 494 495 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst, 496 const SrcOp &Src, 497 uint64_t Index) { 498 LLT SrcTy = Src.getLLTTy(*getMRI()); 499 LLT DstTy = Dst.getLLTTy(*getMRI()); 500 501 #ifndef NDEBUG 502 assert(SrcTy.isValid() && "invalid operand type"); 503 assert(DstTy.isValid() && "invalid operand type"); 504 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() && 505 "extracting off end of register"); 506 #endif 507 508 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) { 509 assert(Index == 0 && "insertion past the end of a register"); 510 return buildCast(Dst, Src); 511 } 512 513 auto Extract = buildInstr(TargetOpcode::G_EXTRACT); 514 Dst.addDefToMIB(*getMRI(), Extract); 515 Src.addSrcToMIB(Extract); 516 Extract.addImm(Index); 517 return Extract; 518 } 519 520 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops, 521 ArrayRef<uint64_t> Indices) { 522 #ifndef NDEBUG 523 assert(Ops.size() == Indices.size() && "incompatible args"); 524 assert(!Ops.empty() && "invalid trivial sequence"); 525 assert(std::is_sorted(Indices.begin(), Indices.end()) && 526 "sequence offsets must be in ascending order"); 527 528 assert(getMRI()->getType(Res).isValid() && "invalid operand type"); 529 for (auto Op : Ops) 530 assert(getMRI()->getType(Op).isValid() && "invalid operand type"); 531 #endif 532 533 LLT ResTy = getMRI()->getType(Res); 534 LLT OpTy = getMRI()->getType(Ops[0]); 535 unsigned OpSize = OpTy.getSizeInBits(); 536 bool MaybeMerge = true; 537 for (unsigned i = 0; i < Ops.size(); ++i) { 538 if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) { 539 MaybeMerge = false; 540 break; 541 } 542 } 543 544 if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) { 545 buildMerge(Res, Ops); 546 return; 547 } 548 549 Register ResIn = getMRI()->createGenericVirtualRegister(ResTy); 550 buildUndef(ResIn); 551 552 for (unsigned i = 0; i < Ops.size(); ++i) { 553 Register ResOut = i + 1 == Ops.size() 554 ? Res 555 : getMRI()->createGenericVirtualRegister(ResTy); 556 buildInsert(ResOut, ResIn, Ops[i], Indices[i]); 557 ResIn = ResOut; 558 } 559 } 560 561 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) { 562 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {}); 563 } 564 565 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res, 566 ArrayRef<Register> Ops) { 567 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>, 568 // we need some temporary storage for the DstOp objects. Here we use a 569 // sufficiently large SmallVector to not go through the heap. 570 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 571 assert(TmpVec.size() > 1); 572 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec); 573 } 574 575 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res, 576 const SrcOp &Op) { 577 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>, 578 // we need some temporary storage for the DstOp objects. Here we use a 579 // sufficiently large SmallVector to not go through the heap. 580 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); 581 assert(TmpVec.size() > 1); 582 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); 583 } 584 585 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res, 586 const SrcOp &Op) { 587 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits(); 588 SmallVector<Register, 8> TmpVec; 589 for (unsigned I = 0; I != NumReg; ++I) 590 TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res)); 591 return buildUnmerge(TmpVec, Op); 592 } 593 594 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res, 595 const SrcOp &Op) { 596 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>, 597 // we need some temporary storage for the DstOp objects. Here we use a 598 // sufficiently large SmallVector to not go through the heap. 599 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); 600 assert(TmpVec.size() > 1); 601 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); 602 } 603 604 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res, 605 ArrayRef<Register> Ops) { 606 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 607 // we need some temporary storage for the DstOp objects. Here we use a 608 // sufficiently large SmallVector to not go through the heap. 609 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 610 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); 611 } 612 613 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res, 614 const SrcOp &Src) { 615 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src); 616 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); 617 } 618 619 MachineInstrBuilder 620 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res, 621 ArrayRef<Register> Ops) { 622 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 623 // we need some temporary storage for the DstOp objects. Here we use a 624 // sufficiently large SmallVector to not go through the heap. 625 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 626 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec); 627 } 628 629 MachineInstrBuilder 630 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) { 631 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, 632 // we need some temporary storage for the DstOp objects. Here we use a 633 // sufficiently large SmallVector to not go through the heap. 634 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); 635 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec); 636 } 637 638 MachineInstrBuilder MachineIRBuilder::buildInsert(Register Res, Register Src, 639 Register Op, unsigned Index) { 640 assert(Index + getMRI()->getType(Op).getSizeInBits() <= 641 getMRI()->getType(Res).getSizeInBits() && 642 "insertion past the end of a register"); 643 644 if (getMRI()->getType(Res).getSizeInBits() == 645 getMRI()->getType(Op).getSizeInBits()) { 646 return buildCast(Res, Op); 647 } 648 649 return buildInstr(TargetOpcode::G_INSERT) 650 .addDef(Res) 651 .addUse(Src) 652 .addUse(Op) 653 .addImm(Index); 654 } 655 656 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, 657 ArrayRef<Register> ResultRegs, 658 bool HasSideEffects) { 659 auto MIB = 660 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS 661 : TargetOpcode::G_INTRINSIC); 662 for (unsigned ResultReg : ResultRegs) 663 MIB.addDef(ResultReg); 664 MIB.addIntrinsicID(ID); 665 return MIB; 666 } 667 668 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, 669 ArrayRef<DstOp> Results, 670 bool HasSideEffects) { 671 auto MIB = 672 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS 673 : TargetOpcode::G_INTRINSIC); 674 for (DstOp Result : Results) 675 Result.addDefToMIB(*getMRI(), MIB); 676 MIB.addIntrinsicID(ID); 677 return MIB; 678 } 679 680 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res, 681 const SrcOp &Op) { 682 return buildInstr(TargetOpcode::G_TRUNC, Res, Op); 683 } 684 685 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res, 686 const SrcOp &Op) { 687 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op); 688 } 689 690 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred, 691 const DstOp &Res, 692 const SrcOp &Op0, 693 const SrcOp &Op1) { 694 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}); 695 } 696 697 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred, 698 const DstOp &Res, 699 const SrcOp &Op0, 700 const SrcOp &Op1) { 701 702 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}); 703 } 704 705 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res, 706 const SrcOp &Tst, 707 const SrcOp &Op0, 708 const SrcOp &Op1) { 709 710 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}); 711 } 712 713 MachineInstrBuilder 714 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, 715 const SrcOp &Elt, const SrcOp &Idx) { 716 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx}); 717 } 718 719 MachineInstrBuilder 720 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, 721 const SrcOp &Idx) { 722 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx}); 723 } 724 725 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess( 726 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, 727 Register NewVal, MachineMemOperand &MMO) { 728 #ifndef NDEBUG 729 LLT OldValResTy = getMRI()->getType(OldValRes); 730 LLT SuccessResTy = getMRI()->getType(SuccessRes); 731 LLT AddrTy = getMRI()->getType(Addr); 732 LLT CmpValTy = getMRI()->getType(CmpVal); 733 LLT NewValTy = getMRI()->getType(NewVal); 734 assert(OldValResTy.isScalar() && "invalid operand type"); 735 assert(SuccessResTy.isScalar() && "invalid operand type"); 736 assert(AddrTy.isPointer() && "invalid operand type"); 737 assert(CmpValTy.isValid() && "invalid operand type"); 738 assert(NewValTy.isValid() && "invalid operand type"); 739 assert(OldValResTy == CmpValTy && "type mismatch"); 740 assert(OldValResTy == NewValTy && "type mismatch"); 741 #endif 742 743 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS) 744 .addDef(OldValRes) 745 .addDef(SuccessRes) 746 .addUse(Addr) 747 .addUse(CmpVal) 748 .addUse(NewVal) 749 .addMemOperand(&MMO); 750 } 751 752 MachineInstrBuilder 753 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr, 754 Register CmpVal, Register NewVal, 755 MachineMemOperand &MMO) { 756 #ifndef NDEBUG 757 LLT OldValResTy = getMRI()->getType(OldValRes); 758 LLT AddrTy = getMRI()->getType(Addr); 759 LLT CmpValTy = getMRI()->getType(CmpVal); 760 LLT NewValTy = getMRI()->getType(NewVal); 761 assert(OldValResTy.isScalar() && "invalid operand type"); 762 assert(AddrTy.isPointer() && "invalid operand type"); 763 assert(CmpValTy.isValid() && "invalid operand type"); 764 assert(NewValTy.isValid() && "invalid operand type"); 765 assert(OldValResTy == CmpValTy && "type mismatch"); 766 assert(OldValResTy == NewValTy && "type mismatch"); 767 #endif 768 769 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG) 770 .addDef(OldValRes) 771 .addUse(Addr) 772 .addUse(CmpVal) 773 .addUse(NewVal) 774 .addMemOperand(&MMO); 775 } 776 777 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(unsigned Opcode, 778 Register OldValRes, 779 Register Addr, 780 Register Val, 781 MachineMemOperand &MMO) { 782 #ifndef NDEBUG 783 LLT OldValResTy = getMRI()->getType(OldValRes); 784 LLT AddrTy = getMRI()->getType(Addr); 785 LLT ValTy = getMRI()->getType(Val); 786 assert(OldValResTy.isScalar() && "invalid operand type"); 787 assert(AddrTy.isPointer() && "invalid operand type"); 788 assert(ValTy.isValid() && "invalid operand type"); 789 assert(OldValResTy == ValTy && "type mismatch"); 790 #endif 791 792 return buildInstr(Opcode) 793 .addDef(OldValRes) 794 .addUse(Addr) 795 .addUse(Val) 796 .addMemOperand(&MMO); 797 } 798 799 MachineInstrBuilder 800 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr, 801 Register Val, MachineMemOperand &MMO) { 802 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val, 803 MMO); 804 } 805 MachineInstrBuilder 806 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr, 807 Register Val, MachineMemOperand &MMO) { 808 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val, 809 MMO); 810 } 811 MachineInstrBuilder 812 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr, 813 Register Val, MachineMemOperand &MMO) { 814 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val, 815 MMO); 816 } 817 MachineInstrBuilder 818 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr, 819 Register Val, MachineMemOperand &MMO) { 820 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val, 821 MMO); 822 } 823 MachineInstrBuilder 824 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr, 825 Register Val, MachineMemOperand &MMO) { 826 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val, 827 MMO); 828 } 829 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes, 830 Register Addr, 831 Register Val, 832 MachineMemOperand &MMO) { 833 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val, 834 MMO); 835 } 836 MachineInstrBuilder 837 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr, 838 Register Val, MachineMemOperand &MMO) { 839 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val, 840 MMO); 841 } 842 MachineInstrBuilder 843 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr, 844 Register Val, MachineMemOperand &MMO) { 845 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val, 846 MMO); 847 } 848 MachineInstrBuilder 849 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr, 850 Register Val, MachineMemOperand &MMO) { 851 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val, 852 MMO); 853 } 854 MachineInstrBuilder 855 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr, 856 Register Val, MachineMemOperand &MMO) { 857 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val, 858 MMO); 859 } 860 MachineInstrBuilder 861 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr, 862 Register Val, MachineMemOperand &MMO) { 863 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val, 864 MMO); 865 } 866 867 MachineInstrBuilder 868 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) { 869 return buildInstr(TargetOpcode::G_FENCE) 870 .addImm(Ordering) 871 .addImm(Scope); 872 } 873 874 MachineInstrBuilder 875 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) { 876 #ifndef NDEBUG 877 assert(getMRI()->getType(Res).isPointer() && "invalid res type"); 878 #endif 879 880 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA); 881 } 882 883 void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy, 884 bool IsExtend) { 885 #ifndef NDEBUG 886 if (DstTy.isVector()) { 887 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector"); 888 assert(SrcTy.getNumElements() == DstTy.getNumElements() && 889 "different number of elements in a trunc/ext"); 890 } else 891 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc"); 892 893 if (IsExtend) 894 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() && 895 "invalid narrowing extend"); 896 else 897 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() && 898 "invalid widening trunc"); 899 #endif 900 } 901 902 void MachineIRBuilder::validateSelectOp(const LLT &ResTy, const LLT &TstTy, 903 const LLT &Op0Ty, const LLT &Op1Ty) { 904 #ifndef NDEBUG 905 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) && 906 "invalid operand type"); 907 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch"); 908 if (ResTy.isScalar() || ResTy.isPointer()) 909 assert(TstTy.isScalar() && "type mismatch"); 910 else 911 assert((TstTy.isScalar() || 912 (TstTy.isVector() && 913 TstTy.getNumElements() == Op0Ty.getNumElements())) && 914 "type mismatch"); 915 #endif 916 } 917 918 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, 919 ArrayRef<DstOp> DstOps, 920 ArrayRef<SrcOp> SrcOps, 921 Optional<unsigned> Flags) { 922 switch (Opc) { 923 default: 924 break; 925 case TargetOpcode::G_SELECT: { 926 assert(DstOps.size() == 1 && "Invalid select"); 927 assert(SrcOps.size() == 3 && "Invalid select"); 928 validateSelectOp( 929 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()), 930 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI())); 931 break; 932 } 933 case TargetOpcode::G_ADD: 934 case TargetOpcode::G_AND: 935 case TargetOpcode::G_MUL: 936 case TargetOpcode::G_OR: 937 case TargetOpcode::G_SUB: 938 case TargetOpcode::G_XOR: 939 case TargetOpcode::G_UDIV: 940 case TargetOpcode::G_SDIV: 941 case TargetOpcode::G_UREM: 942 case TargetOpcode::G_SREM: 943 case TargetOpcode::G_SMIN: 944 case TargetOpcode::G_SMAX: 945 case TargetOpcode::G_UMIN: 946 case TargetOpcode::G_UMAX: { 947 // All these are binary ops. 948 assert(DstOps.size() == 1 && "Invalid Dst"); 949 assert(SrcOps.size() == 2 && "Invalid Srcs"); 950 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()), 951 SrcOps[0].getLLTTy(*getMRI()), 952 SrcOps[1].getLLTTy(*getMRI())); 953 break; 954 } 955 case TargetOpcode::G_SHL: 956 case TargetOpcode::G_ASHR: 957 case TargetOpcode::G_LSHR: { 958 assert(DstOps.size() == 1 && "Invalid Dst"); 959 assert(SrcOps.size() == 2 && "Invalid Srcs"); 960 validateShiftOp(DstOps[0].getLLTTy(*getMRI()), 961 SrcOps[0].getLLTTy(*getMRI()), 962 SrcOps[1].getLLTTy(*getMRI())); 963 break; 964 } 965 case TargetOpcode::G_SEXT: 966 case TargetOpcode::G_ZEXT: 967 case TargetOpcode::G_ANYEXT: 968 assert(DstOps.size() == 1 && "Invalid Dst"); 969 assert(SrcOps.size() == 1 && "Invalid Srcs"); 970 validateTruncExt(DstOps[0].getLLTTy(*getMRI()), 971 SrcOps[0].getLLTTy(*getMRI()), true); 972 break; 973 case TargetOpcode::G_TRUNC: 974 case TargetOpcode::G_FPTRUNC: { 975 assert(DstOps.size() == 1 && "Invalid Dst"); 976 assert(SrcOps.size() == 1 && "Invalid Srcs"); 977 validateTruncExt(DstOps[0].getLLTTy(*getMRI()), 978 SrcOps[0].getLLTTy(*getMRI()), false); 979 break; 980 } 981 case TargetOpcode::COPY: 982 assert(DstOps.size() == 1 && "Invalid Dst"); 983 // If the caller wants to add a subreg source it has to be done separately 984 // so we may not have any SrcOps at this point yet. 985 break; 986 case TargetOpcode::G_FCMP: 987 case TargetOpcode::G_ICMP: { 988 assert(DstOps.size() == 1 && "Invalid Dst Operands"); 989 assert(SrcOps.size() == 3 && "Invalid Src Operands"); 990 // For F/ICMP, the first src operand is the predicate, followed by 991 // the two comparands. 992 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate && 993 "Expecting predicate"); 994 assert([&]() -> bool { 995 CmpInst::Predicate Pred = SrcOps[0].getPredicate(); 996 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred) 997 : CmpInst::isFPPredicate(Pred); 998 }() && "Invalid predicate"); 999 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && 1000 "Type mismatch"); 1001 assert([&]() -> bool { 1002 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI()); 1003 LLT DstTy = DstOps[0].getLLTTy(*getMRI()); 1004 if (Op0Ty.isScalar() || Op0Ty.isPointer()) 1005 return DstTy.isScalar(); 1006 else 1007 return DstTy.isVector() && 1008 DstTy.getNumElements() == Op0Ty.getNumElements(); 1009 }() && "Type Mismatch"); 1010 break; 1011 } 1012 case TargetOpcode::G_UNMERGE_VALUES: { 1013 assert(!DstOps.empty() && "Invalid trivial sequence"); 1014 assert(SrcOps.size() == 1 && "Invalid src for Unmerge"); 1015 assert(std::all_of(DstOps.begin(), DstOps.end(), 1016 [&, this](const DstOp &Op) { 1017 return Op.getLLTTy(*getMRI()) == 1018 DstOps[0].getLLTTy(*getMRI()); 1019 }) && 1020 "type mismatch in output list"); 1021 assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1022 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1023 "input operands do not cover output register"); 1024 break; 1025 } 1026 case TargetOpcode::G_MERGE_VALUES: { 1027 assert(!SrcOps.empty() && "invalid trivial sequence"); 1028 assert(DstOps.size() == 1 && "Invalid Dst"); 1029 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1030 [&, this](const SrcOp &Op) { 1031 return Op.getLLTTy(*getMRI()) == 1032 SrcOps[0].getLLTTy(*getMRI()); 1033 }) && 1034 "type mismatch in input list"); 1035 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1036 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1037 "input operands do not cover output register"); 1038 if (SrcOps.size() == 1) 1039 return buildCast(DstOps[0], SrcOps[0]); 1040 if (DstOps[0].getLLTTy(*getMRI()).isVector()) 1041 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps); 1042 break; 1043 } 1044 case TargetOpcode::G_EXTRACT_VECTOR_ELT: { 1045 assert(DstOps.size() == 1 && "Invalid Dst size"); 1046 assert(SrcOps.size() == 2 && "Invalid Src size"); 1047 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); 1048 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() || 1049 DstOps[0].getLLTTy(*getMRI()).isPointer()) && 1050 "Invalid operand type"); 1051 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type"); 1052 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() == 1053 DstOps[0].getLLTTy(*getMRI()) && 1054 "Type mismatch"); 1055 break; 1056 } 1057 case TargetOpcode::G_INSERT_VECTOR_ELT: { 1058 assert(DstOps.size() == 1 && "Invalid dst size"); 1059 assert(SrcOps.size() == 3 && "Invalid src size"); 1060 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1061 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); 1062 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() == 1063 SrcOps[1].getLLTTy(*getMRI()) && 1064 "Type mismatch"); 1065 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index"); 1066 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() == 1067 SrcOps[0].getLLTTy(*getMRI()).getNumElements() && 1068 "Type mismatch"); 1069 break; 1070 } 1071 case TargetOpcode::G_BUILD_VECTOR: { 1072 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1073 "Must have at least 2 operands"); 1074 assert(DstOps.size() == 1 && "Invalid DstOps"); 1075 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1076 "Res type must be a vector"); 1077 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1078 [&, this](const SrcOp &Op) { 1079 return Op.getLLTTy(*getMRI()) == 1080 SrcOps[0].getLLTTy(*getMRI()); 1081 }) && 1082 "type mismatch in input list"); 1083 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1084 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1085 "input scalars do not exactly cover the output vector register"); 1086 break; 1087 } 1088 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 1089 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1090 "Must have at least 2 operands"); 1091 assert(DstOps.size() == 1 && "Invalid DstOps"); 1092 assert(DstOps[0].getLLTTy(*getMRI()).isVector() && 1093 "Res type must be a vector"); 1094 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1095 [&, this](const SrcOp &Op) { 1096 return Op.getLLTTy(*getMRI()) == 1097 SrcOps[0].getLLTTy(*getMRI()); 1098 }) && 1099 "type mismatch in input list"); 1100 if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1101 DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits()) 1102 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); 1103 break; 1104 } 1105 case TargetOpcode::G_CONCAT_VECTORS: { 1106 assert(DstOps.size() == 1 && "Invalid DstOps"); 1107 assert((!SrcOps.empty() || SrcOps.size() < 2) && 1108 "Must have at least 2 operands"); 1109 assert(std::all_of(SrcOps.begin(), SrcOps.end(), 1110 [&, this](const SrcOp &Op) { 1111 return (Op.getLLTTy(*getMRI()).isVector() && 1112 Op.getLLTTy(*getMRI()) == 1113 SrcOps[0].getLLTTy(*getMRI())); 1114 }) && 1115 "type mismatch in input list"); 1116 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == 1117 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && 1118 "input vectors do not exactly cover the output vector register"); 1119 break; 1120 } 1121 case TargetOpcode::G_UADDE: { 1122 assert(DstOps.size() == 2 && "Invalid no of dst operands"); 1123 assert(SrcOps.size() == 3 && "Invalid no of src operands"); 1124 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); 1125 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) && 1126 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) && 1127 "Invalid operand"); 1128 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); 1129 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && 1130 "type mismatch"); 1131 break; 1132 } 1133 } 1134 1135 auto MIB = buildInstr(Opc); 1136 for (const DstOp &Op : DstOps) 1137 Op.addDefToMIB(*getMRI(), MIB); 1138 for (const SrcOp &Op : SrcOps) 1139 Op.addSrcToMIB(MIB); 1140 if (Flags) 1141 MIB->setFlags(*Flags); 1142 return MIB; 1143 } 1144