1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements all of the non-inline methods for the LLVM instruction 10 // classes. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/IR/Instructions.h" 15 #include "LLVMContextImpl.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Twine.h" 19 #include "llvm/IR/Attributes.h" 20 #include "llvm/IR/BasicBlock.h" 21 #include "llvm/IR/Constant.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/InstrTypes.h" 27 #include "llvm/IR/Instruction.h" 28 #include "llvm/IR/Intrinsics.h" 29 #include "llvm/IR/LLVMContext.h" 30 #include "llvm/IR/MDBuilder.h" 31 #include "llvm/IR/Metadata.h" 32 #include "llvm/IR/Module.h" 33 #include "llvm/IR/Operator.h" 34 #include "llvm/IR/Type.h" 35 #include "llvm/IR/Value.h" 36 #include "llvm/Support/AtomicOrdering.h" 37 #include "llvm/Support/Casting.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/TypeSize.h" 41 #include <algorithm> 42 #include <cassert> 43 #include <cstdint> 44 #include <vector> 45 46 using namespace llvm; 47 48 //===----------------------------------------------------------------------===// 49 // AllocaInst Class 50 //===----------------------------------------------------------------------===// 51 52 Optional<uint64_t> 53 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const { 54 uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType()); 55 if (isArrayAllocation()) { 56 auto *C = dyn_cast<ConstantInt>(getArraySize()); 57 if (!C) 58 return None; 59 Size *= C->getZExtValue(); 60 } 61 return Size; 62 } 63 64 //===----------------------------------------------------------------------===// 65 // SelectInst Class 66 //===----------------------------------------------------------------------===// 67 68 /// areInvalidOperands - Return a string if the specified operands are invalid 69 /// for a select operation, otherwise return null. 70 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { 71 if (Op1->getType() != Op2->getType()) 72 return "both values to select must have same type"; 73 74 if (Op1->getType()->isTokenTy()) 75 return "select values cannot have token type"; 76 77 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { 78 // Vector select. 79 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) 80 return "vector select condition element type must be i1"; 81 VectorType *ET = dyn_cast<VectorType>(Op1->getType()); 82 if (!ET) 83 return "selected values for vector select must be vectors"; 84 if (ET->getElementCount() != VT->getElementCount()) 85 return "vector select requires selected vectors to have " 86 "the same vector length as select condition"; 87 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { 88 return "select condition must be i1 or <n x i1>"; 89 } 90 return nullptr; 91 } 92 93 //===----------------------------------------------------------------------===// 94 // PHINode Class 95 //===----------------------------------------------------------------------===// 96 97 PHINode::PHINode(const PHINode &PN) 98 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()), 99 ReservedSpace(PN.getNumOperands()) { 100 allocHungoffUses(PN.getNumOperands()); 101 std::copy(PN.op_begin(), PN.op_end(), op_begin()); 102 std::copy(PN.block_begin(), PN.block_end(), block_begin()); 103 SubclassOptionalData = PN.SubclassOptionalData; 104 } 105 106 // removeIncomingValue - Remove an incoming value. This is useful if a 107 // predecessor basic block is deleted. 108 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { 109 Value *Removed = getIncomingValue(Idx); 110 111 // Move everything after this operand down. 112 // 113 // FIXME: we could just swap with the end of the list, then erase. However, 114 // clients might not expect this to happen. The code as it is thrashes the 115 // use/def lists, which is kinda lame. 116 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); 117 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); 118 119 // Nuke the last value. 120 Op<-1>().set(nullptr); 121 setNumHungOffUseOperands(getNumOperands() - 1); 122 123 // If the PHI node is dead, because it has zero entries, nuke it now. 124 if (getNumOperands() == 0 && DeletePHIIfEmpty) { 125 // If anyone is using this PHI, make them use a dummy value instead... 126 replaceAllUsesWith(UndefValue::get(getType())); 127 eraseFromParent(); 128 } 129 return Removed; 130 } 131 132 /// growOperands - grow operands - This grows the operand list in response 133 /// to a push_back style of operation. This grows the number of ops by 1.5 134 /// times. 135 /// 136 void PHINode::growOperands() { 137 unsigned e = getNumOperands(); 138 unsigned NumOps = e + e / 2; 139 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 140 141 ReservedSpace = NumOps; 142 growHungoffUses(ReservedSpace, /* IsPhi */ true); 143 } 144 145 /// hasConstantValue - If the specified PHI node always merges together the same 146 /// value, return the value, otherwise return null. 147 Value *PHINode::hasConstantValue() const { 148 // Exploit the fact that phi nodes always have at least one entry. 149 Value *ConstantValue = getIncomingValue(0); 150 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) 151 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { 152 if (ConstantValue != this) 153 return nullptr; // Incoming values not all the same. 154 // The case where the first value is this PHI. 155 ConstantValue = getIncomingValue(i); 156 } 157 if (ConstantValue == this) 158 return UndefValue::get(getType()); 159 return ConstantValue; 160 } 161 162 /// hasConstantOrUndefValue - Whether the specified PHI node always merges 163 /// together the same value, assuming that undefs result in the same value as 164 /// non-undefs. 165 /// Unlike \ref hasConstantValue, this does not return a value because the 166 /// unique non-undef incoming value need not dominate the PHI node. 167 bool PHINode::hasConstantOrUndefValue() const { 168 Value *ConstantValue = nullptr; 169 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) { 170 Value *Incoming = getIncomingValue(i); 171 if (Incoming != this && !isa<UndefValue>(Incoming)) { 172 if (ConstantValue && ConstantValue != Incoming) 173 return false; 174 ConstantValue = Incoming; 175 } 176 } 177 return true; 178 } 179 180 //===----------------------------------------------------------------------===// 181 // LandingPadInst Implementation 182 //===----------------------------------------------------------------------===// 183 184 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 185 const Twine &NameStr, Instruction *InsertBefore) 186 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) { 187 init(NumReservedValues, NameStr); 188 } 189 190 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 191 const Twine &NameStr, BasicBlock *InsertAtEnd) 192 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) { 193 init(NumReservedValues, NameStr); 194 } 195 196 LandingPadInst::LandingPadInst(const LandingPadInst &LP) 197 : Instruction(LP.getType(), Instruction::LandingPad, nullptr, 198 LP.getNumOperands()), 199 ReservedSpace(LP.getNumOperands()) { 200 allocHungoffUses(LP.getNumOperands()); 201 Use *OL = getOperandList(); 202 const Use *InOL = LP.getOperandList(); 203 for (unsigned I = 0, E = ReservedSpace; I != E; ++I) 204 OL[I] = InOL[I]; 205 206 setCleanup(LP.isCleanup()); 207 } 208 209 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 210 const Twine &NameStr, 211 Instruction *InsertBefore) { 212 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore); 213 } 214 215 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 216 const Twine &NameStr, 217 BasicBlock *InsertAtEnd) { 218 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd); 219 } 220 221 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) { 222 ReservedSpace = NumReservedValues; 223 setNumHungOffUseOperands(0); 224 allocHungoffUses(ReservedSpace); 225 setName(NameStr); 226 setCleanup(false); 227 } 228 229 /// growOperands - grow operands - This grows the operand list in response to a 230 /// push_back style of operation. This grows the number of ops by 2 times. 231 void LandingPadInst::growOperands(unsigned Size) { 232 unsigned e = getNumOperands(); 233 if (ReservedSpace >= e + Size) return; 234 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2; 235 growHungoffUses(ReservedSpace); 236 } 237 238 void LandingPadInst::addClause(Constant *Val) { 239 unsigned OpNo = getNumOperands(); 240 growOperands(1); 241 assert(OpNo < ReservedSpace && "Growing didn't work!"); 242 setNumHungOffUseOperands(getNumOperands() + 1); 243 getOperandList()[OpNo] = Val; 244 } 245 246 //===----------------------------------------------------------------------===// 247 // CallBase Implementation 248 //===----------------------------------------------------------------------===// 249 250 CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles, 251 Instruction *InsertPt) { 252 switch (CB->getOpcode()) { 253 case Instruction::Call: 254 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt); 255 case Instruction::Invoke: 256 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt); 257 case Instruction::CallBr: 258 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt); 259 default: 260 llvm_unreachable("Unknown CallBase sub-class!"); 261 } 262 } 263 264 Function *CallBase::getCaller() { return getParent()->getParent(); } 265 266 unsigned CallBase::getNumSubclassExtraOperandsDynamic() const { 267 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!"); 268 return cast<CallBrInst>(this)->getNumIndirectDests() + 1; 269 } 270 271 bool CallBase::isIndirectCall() const { 272 const Value *V = getCalledOperand(); 273 if (isa<Function>(V) || isa<Constant>(V)) 274 return false; 275 return !isInlineAsm(); 276 } 277 278 /// Tests if this call site must be tail call optimized. Only a CallInst can 279 /// be tail call optimized. 280 bool CallBase::isMustTailCall() const { 281 if (auto *CI = dyn_cast<CallInst>(this)) 282 return CI->isMustTailCall(); 283 return false; 284 } 285 286 /// Tests if this call site is marked as a tail call. 287 bool CallBase::isTailCall() const { 288 if (auto *CI = dyn_cast<CallInst>(this)) 289 return CI->isTailCall(); 290 return false; 291 } 292 293 Intrinsic::ID CallBase::getIntrinsicID() const { 294 if (auto *F = getCalledFunction()) 295 return F->getIntrinsicID(); 296 return Intrinsic::not_intrinsic; 297 } 298 299 bool CallBase::isReturnNonNull() const { 300 if (hasRetAttr(Attribute::NonNull)) 301 return true; 302 303 if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 && 304 !NullPointerIsDefined(getCaller(), 305 getType()->getPointerAddressSpace())) 306 return true; 307 308 return false; 309 } 310 311 Value *CallBase::getReturnedArgOperand() const { 312 unsigned Index; 313 314 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index) 315 return getArgOperand(Index - AttributeList::FirstArgIndex); 316 if (const Function *F = getCalledFunction()) 317 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) && 318 Index) 319 return getArgOperand(Index - AttributeList::FirstArgIndex); 320 321 return nullptr; 322 } 323 324 bool CallBase::hasRetAttr(Attribute::AttrKind Kind) const { 325 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind)) 326 return true; 327 328 // Look at the callee, if available. 329 if (const Function *F = getCalledFunction()) 330 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind); 331 return false; 332 } 333 334 /// Determine whether the argument or parameter has the given attribute. 335 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const { 336 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!"); 337 338 if (Attrs.hasParamAttribute(ArgNo, Kind)) 339 return true; 340 if (const Function *F = getCalledFunction()) 341 return F->getAttributes().hasParamAttribute(ArgNo, Kind); 342 return false; 343 } 344 345 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const { 346 if (const Function *F = getCalledFunction()) 347 return F->getAttributes().hasFnAttribute(Kind); 348 return false; 349 } 350 351 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const { 352 if (const Function *F = getCalledFunction()) 353 return F->getAttributes().hasFnAttribute(Kind); 354 return false; 355 } 356 357 void CallBase::getOperandBundlesAsDefs( 358 SmallVectorImpl<OperandBundleDef> &Defs) const { 359 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) 360 Defs.emplace_back(getOperandBundleAt(i)); 361 } 362 363 CallBase::op_iterator 364 CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles, 365 const unsigned BeginIndex) { 366 auto It = op_begin() + BeginIndex; 367 for (auto &B : Bundles) 368 It = std::copy(B.input_begin(), B.input_end(), It); 369 370 auto *ContextImpl = getContext().pImpl; 371 auto BI = Bundles.begin(); 372 unsigned CurrentIndex = BeginIndex; 373 374 for (auto &BOI : bundle_op_infos()) { 375 assert(BI != Bundles.end() && "Incorrect allocation?"); 376 377 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag()); 378 BOI.Begin = CurrentIndex; 379 BOI.End = CurrentIndex + BI->input_size(); 380 CurrentIndex = BOI.End; 381 BI++; 382 } 383 384 assert(BI == Bundles.end() && "Incorrect allocation?"); 385 386 return It; 387 } 388 389 CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) { 390 /// When there isn't many bundles, we do a simple linear search. 391 /// Else fallback to a binary-search that use the fact that bundles usually 392 /// have similar number of argument to get faster convergence. 393 if (bundle_op_info_end() - bundle_op_info_begin() < 8) { 394 for (auto &BOI : bundle_op_infos()) 395 if (BOI.Begin <= OpIdx && OpIdx < BOI.End) 396 return BOI; 397 398 llvm_unreachable("Did not find operand bundle for operand!"); 399 } 400 401 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles"); 402 assert(bundle_op_info_end() - bundle_op_info_begin() > 0 && 403 OpIdx < std::prev(bundle_op_info_end())->End && 404 "The Idx isn't in the operand bundle"); 405 406 /// We need a decimal number below and to prevent using floating point numbers 407 /// we use an intergal value multiplied by this constant. 408 constexpr unsigned NumberScaling = 1024; 409 410 bundle_op_iterator Begin = bundle_op_info_begin(); 411 bundle_op_iterator End = bundle_op_info_end(); 412 bundle_op_iterator Current; 413 414 while (Begin != End) { 415 unsigned ScaledOperandPerBundle = 416 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin); 417 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) / 418 ScaledOperandPerBundle); 419 if (Current >= End) 420 Current = std::prev(End); 421 assert(Current < End && Current >= Begin && 422 "the operand bundle doesn't cover every value in the range"); 423 if (OpIdx >= Current->Begin && OpIdx < Current->End) 424 break; 425 if (OpIdx >= Current->End) 426 Begin = Current + 1; 427 else 428 End = Current; 429 } 430 431 assert(OpIdx >= Current->Begin && OpIdx < Current->End && 432 "the operand bundle doesn't cover every value in the range"); 433 return *Current; 434 } 435 436 //===----------------------------------------------------------------------===// 437 // CallInst Implementation 438 //===----------------------------------------------------------------------===// 439 440 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 441 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { 442 this->FTy = FTy; 443 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 && 444 "NumOperands not set up?"); 445 setCalledOperand(Func); 446 447 #ifndef NDEBUG 448 assert((Args.size() == FTy->getNumParams() || 449 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 450 "Calling a function with bad signature!"); 451 452 for (unsigned i = 0; i != Args.size(); ++i) 453 assert((i >= FTy->getNumParams() || 454 FTy->getParamType(i) == Args[i]->getType()) && 455 "Calling a function with a bad signature!"); 456 #endif 457 458 llvm::copy(Args, op_begin()); 459 460 auto It = populateBundleOperandInfos(Bundles, Args.size()); 461 (void)It; 462 assert(It + 1 == op_end() && "Should add up!"); 463 464 setName(NameStr); 465 } 466 467 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) { 468 this->FTy = FTy; 469 assert(getNumOperands() == 1 && "NumOperands not set up?"); 470 setCalledOperand(Func); 471 472 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); 473 474 setName(NameStr); 475 } 476 477 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name, 478 Instruction *InsertBefore) 479 : CallBase(Ty->getReturnType(), Instruction::Call, 480 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) { 481 init(Ty, Func, Name); 482 } 483 484 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name, 485 BasicBlock *InsertAtEnd) 486 : CallBase(Ty->getReturnType(), Instruction::Call, 487 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) { 488 init(Ty, Func, Name); 489 } 490 491 CallInst::CallInst(const CallInst &CI) 492 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, 493 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(), 494 CI.getNumOperands()) { 495 setTailCallKind(CI.getTailCallKind()); 496 setCallingConv(CI.getCallingConv()); 497 498 std::copy(CI.op_begin(), CI.op_end(), op_begin()); 499 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(), 500 bundle_op_info_begin()); 501 SubclassOptionalData = CI.SubclassOptionalData; 502 } 503 504 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB, 505 Instruction *InsertPt) { 506 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end()); 507 508 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(), 509 Args, OpB, CI->getName(), InsertPt); 510 NewCI->setTailCallKind(CI->getTailCallKind()); 511 NewCI->setCallingConv(CI->getCallingConv()); 512 NewCI->SubclassOptionalData = CI->SubclassOptionalData; 513 NewCI->setAttributes(CI->getAttributes()); 514 NewCI->setDebugLoc(CI->getDebugLoc()); 515 return NewCI; 516 } 517 518 // Update profile weight for call instruction by scaling it using the ratio 519 // of S/T. The meaning of "branch_weights" meta data for call instruction is 520 // transfered to represent call count. 521 void CallInst::updateProfWeight(uint64_t S, uint64_t T) { 522 auto *ProfileData = getMetadata(LLVMContext::MD_prof); 523 if (ProfileData == nullptr) 524 return; 525 526 auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0)); 527 if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") && 528 !ProfDataName->getString().equals("VP"))) 529 return; 530 531 if (T == 0) { 532 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in " 533 "div by 0. Ignoring. Likely the function " 534 << getParent()->getParent()->getName() 535 << " has 0 entry count, and contains call instructions " 536 "with non-zero prof info."); 537 return; 538 } 539 540 MDBuilder MDB(getContext()); 541 SmallVector<Metadata *, 3> Vals; 542 Vals.push_back(ProfileData->getOperand(0)); 543 APInt APS(128, S), APT(128, T); 544 if (ProfDataName->getString().equals("branch_weights") && 545 ProfileData->getNumOperands() > 0) { 546 // Using APInt::div may be expensive, but most cases should fit 64 bits. 547 APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1)) 548 ->getValue() 549 .getZExtValue()); 550 Val *= APS; 551 Vals.push_back(MDB.createConstant(ConstantInt::get( 552 Type::getInt64Ty(getContext()), Val.udiv(APT).getLimitedValue()))); 553 } else if (ProfDataName->getString().equals("VP")) 554 for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) { 555 // The first value is the key of the value profile, which will not change. 556 Vals.push_back(ProfileData->getOperand(i)); 557 // Using APInt::div may be expensive, but most cases should fit 64 bits. 558 APInt Val(128, 559 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1)) 560 ->getValue() 561 .getZExtValue()); 562 Val *= APS; 563 Vals.push_back(MDB.createConstant( 564 ConstantInt::get(Type::getInt64Ty(getContext()), 565 Val.udiv(APT).getLimitedValue()))); 566 } 567 setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals)); 568 } 569 570 /// IsConstantOne - Return true only if val is constant int 1 571 static bool IsConstantOne(Value *val) { 572 assert(val && "IsConstantOne does not work with nullptr val"); 573 const ConstantInt *CVal = dyn_cast<ConstantInt>(val); 574 return CVal && CVal->isOne(); 575 } 576 577 static Instruction *createMalloc(Instruction *InsertBefore, 578 BasicBlock *InsertAtEnd, Type *IntPtrTy, 579 Type *AllocTy, Value *AllocSize, 580 Value *ArraySize, 581 ArrayRef<OperandBundleDef> OpB, 582 Function *MallocF, const Twine &Name) { 583 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 584 "createMalloc needs either InsertBefore or InsertAtEnd"); 585 586 // malloc(type) becomes: 587 // bitcast (i8* malloc(typeSize)) to type* 588 // malloc(type, arraySize) becomes: 589 // bitcast (i8* malloc(typeSize*arraySize)) to type* 590 if (!ArraySize) 591 ArraySize = ConstantInt::get(IntPtrTy, 1); 592 else if (ArraySize->getType() != IntPtrTy) { 593 if (InsertBefore) 594 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 595 "", InsertBefore); 596 else 597 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 598 "", InsertAtEnd); 599 } 600 601 if (!IsConstantOne(ArraySize)) { 602 if (IsConstantOne(AllocSize)) { 603 AllocSize = ArraySize; // Operand * 1 = Operand 604 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { 605 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, 606 false /*ZExt*/); 607 // Malloc arg is constant product of type size and array size 608 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); 609 } else { 610 // Multiply type size by the array size... 611 if (InsertBefore) 612 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 613 "mallocsize", InsertBefore); 614 else 615 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 616 "mallocsize", InsertAtEnd); 617 } 618 } 619 620 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); 621 // Create the call to Malloc. 622 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 623 Module *M = BB->getParent()->getParent(); 624 Type *BPTy = Type::getInt8PtrTy(BB->getContext()); 625 FunctionCallee MallocFunc = MallocF; 626 if (!MallocFunc) 627 // prototype malloc as "void *malloc(size_t)" 628 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy); 629 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); 630 CallInst *MCall = nullptr; 631 Instruction *Result = nullptr; 632 if (InsertBefore) { 633 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall", 634 InsertBefore); 635 Result = MCall; 636 if (Result->getType() != AllocPtrType) 637 // Create a cast instruction to convert to the right type... 638 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); 639 } else { 640 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall"); 641 Result = MCall; 642 if (Result->getType() != AllocPtrType) { 643 InsertAtEnd->getInstList().push_back(MCall); 644 // Create a cast instruction to convert to the right type... 645 Result = new BitCastInst(MCall, AllocPtrType, Name); 646 } 647 } 648 MCall->setTailCall(); 649 if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) { 650 MCall->setCallingConv(F->getCallingConv()); 651 if (!F->returnDoesNotAlias()) 652 F->setReturnDoesNotAlias(); 653 } 654 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); 655 656 return Result; 657 } 658 659 /// CreateMalloc - Generate the IR for a call to malloc: 660 /// 1. Compute the malloc call's argument as the specified type's size, 661 /// possibly multiplied by the array size if the array size is not 662 /// constant 1. 663 /// 2. Call malloc with that argument. 664 /// 3. Bitcast the result of the malloc call to the specified type. 665 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 666 Type *IntPtrTy, Type *AllocTy, 667 Value *AllocSize, Value *ArraySize, 668 Function *MallocF, 669 const Twine &Name) { 670 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 671 ArraySize, None, MallocF, Name); 672 } 673 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 674 Type *IntPtrTy, Type *AllocTy, 675 Value *AllocSize, Value *ArraySize, 676 ArrayRef<OperandBundleDef> OpB, 677 Function *MallocF, 678 const Twine &Name) { 679 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 680 ArraySize, OpB, MallocF, Name); 681 } 682 683 /// CreateMalloc - Generate the IR for a call to malloc: 684 /// 1. Compute the malloc call's argument as the specified type's size, 685 /// possibly multiplied by the array size if the array size is not 686 /// constant 1. 687 /// 2. Call malloc with that argument. 688 /// 3. Bitcast the result of the malloc call to the specified type. 689 /// Note: This function does not add the bitcast to the basic block, that is the 690 /// responsibility of the caller. 691 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 692 Type *IntPtrTy, Type *AllocTy, 693 Value *AllocSize, Value *ArraySize, 694 Function *MallocF, const Twine &Name) { 695 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 696 ArraySize, None, MallocF, Name); 697 } 698 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 699 Type *IntPtrTy, Type *AllocTy, 700 Value *AllocSize, Value *ArraySize, 701 ArrayRef<OperandBundleDef> OpB, 702 Function *MallocF, const Twine &Name) { 703 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 704 ArraySize, OpB, MallocF, Name); 705 } 706 707 static Instruction *createFree(Value *Source, 708 ArrayRef<OperandBundleDef> Bundles, 709 Instruction *InsertBefore, 710 BasicBlock *InsertAtEnd) { 711 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 712 "createFree needs either InsertBefore or InsertAtEnd"); 713 assert(Source->getType()->isPointerTy() && 714 "Can not free something of nonpointer type!"); 715 716 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 717 Module *M = BB->getParent()->getParent(); 718 719 Type *VoidTy = Type::getVoidTy(M->getContext()); 720 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); 721 // prototype free as "void free(void*)" 722 FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy); 723 CallInst *Result = nullptr; 724 Value *PtrCast = Source; 725 if (InsertBefore) { 726 if (Source->getType() != IntPtrTy) 727 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); 728 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore); 729 } else { 730 if (Source->getType() != IntPtrTy) 731 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); 732 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, ""); 733 } 734 Result->setTailCall(); 735 if (Function *F = dyn_cast<Function>(FreeFunc.getCallee())) 736 Result->setCallingConv(F->getCallingConv()); 737 738 return Result; 739 } 740 741 /// CreateFree - Generate the IR for a call to the builtin free function. 742 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) { 743 return createFree(Source, None, InsertBefore, nullptr); 744 } 745 Instruction *CallInst::CreateFree(Value *Source, 746 ArrayRef<OperandBundleDef> Bundles, 747 Instruction *InsertBefore) { 748 return createFree(Source, Bundles, InsertBefore, nullptr); 749 } 750 751 /// CreateFree - Generate the IR for a call to the builtin free function. 752 /// Note: This function does not add the call to the basic block, that is the 753 /// responsibility of the caller. 754 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) { 755 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd); 756 assert(FreeCall && "CreateFree did not create a CallInst"); 757 return FreeCall; 758 } 759 Instruction *CallInst::CreateFree(Value *Source, 760 ArrayRef<OperandBundleDef> Bundles, 761 BasicBlock *InsertAtEnd) { 762 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd); 763 assert(FreeCall && "CreateFree did not create a CallInst"); 764 return FreeCall; 765 } 766 767 //===----------------------------------------------------------------------===// 768 // InvokeInst Implementation 769 //===----------------------------------------------------------------------===// 770 771 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal, 772 BasicBlock *IfException, ArrayRef<Value *> Args, 773 ArrayRef<OperandBundleDef> Bundles, 774 const Twine &NameStr) { 775 this->FTy = FTy; 776 777 assert((int)getNumOperands() == 778 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) && 779 "NumOperands not set up?"); 780 setNormalDest(IfNormal); 781 setUnwindDest(IfException); 782 setCalledOperand(Fn); 783 784 #ifndef NDEBUG 785 assert(((Args.size() == FTy->getNumParams()) || 786 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 787 "Invoking a function with bad signature"); 788 789 for (unsigned i = 0, e = Args.size(); i != e; i++) 790 assert((i >= FTy->getNumParams() || 791 FTy->getParamType(i) == Args[i]->getType()) && 792 "Invoking a function with a bad signature!"); 793 #endif 794 795 llvm::copy(Args, op_begin()); 796 797 auto It = populateBundleOperandInfos(Bundles, Args.size()); 798 (void)It; 799 assert(It + 3 == op_end() && "Should add up!"); 800 801 setName(NameStr); 802 } 803 804 InvokeInst::InvokeInst(const InvokeInst &II) 805 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, 806 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(), 807 II.getNumOperands()) { 808 setCallingConv(II.getCallingConv()); 809 std::copy(II.op_begin(), II.op_end(), op_begin()); 810 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(), 811 bundle_op_info_begin()); 812 SubclassOptionalData = II.SubclassOptionalData; 813 } 814 815 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB, 816 Instruction *InsertPt) { 817 std::vector<Value *> Args(II->arg_begin(), II->arg_end()); 818 819 auto *NewII = InvokeInst::Create( 820 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(), 821 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt); 822 NewII->setCallingConv(II->getCallingConv()); 823 NewII->SubclassOptionalData = II->SubclassOptionalData; 824 NewII->setAttributes(II->getAttributes()); 825 NewII->setDebugLoc(II->getDebugLoc()); 826 return NewII; 827 } 828 829 830 LandingPadInst *InvokeInst::getLandingPadInst() const { 831 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); 832 } 833 834 //===----------------------------------------------------------------------===// 835 // CallBrInst Implementation 836 //===----------------------------------------------------------------------===// 837 838 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough, 839 ArrayRef<BasicBlock *> IndirectDests, 840 ArrayRef<Value *> Args, 841 ArrayRef<OperandBundleDef> Bundles, 842 const Twine &NameStr) { 843 this->FTy = FTy; 844 845 assert((int)getNumOperands() == 846 ComputeNumOperands(Args.size(), IndirectDests.size(), 847 CountBundleInputs(Bundles)) && 848 "NumOperands not set up?"); 849 NumIndirectDests = IndirectDests.size(); 850 setDefaultDest(Fallthrough); 851 for (unsigned i = 0; i != NumIndirectDests; ++i) 852 setIndirectDest(i, IndirectDests[i]); 853 setCalledOperand(Fn); 854 855 #ifndef NDEBUG 856 assert(((Args.size() == FTy->getNumParams()) || 857 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 858 "Calling a function with bad signature"); 859 860 for (unsigned i = 0, e = Args.size(); i != e; i++) 861 assert((i >= FTy->getNumParams() || 862 FTy->getParamType(i) == Args[i]->getType()) && 863 "Calling a function with a bad signature!"); 864 #endif 865 866 std::copy(Args.begin(), Args.end(), op_begin()); 867 868 auto It = populateBundleOperandInfos(Bundles, Args.size()); 869 (void)It; 870 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!"); 871 872 setName(NameStr); 873 } 874 875 void CallBrInst::updateArgBlockAddresses(unsigned i, BasicBlock *B) { 876 assert(getNumIndirectDests() > i && "IndirectDest # out of range for callbr"); 877 if (BasicBlock *OldBB = getIndirectDest(i)) { 878 BlockAddress *Old = BlockAddress::get(OldBB); 879 BlockAddress *New = BlockAddress::get(B); 880 for (unsigned ArgNo = 0, e = getNumArgOperands(); ArgNo != e; ++ArgNo) 881 if (dyn_cast<BlockAddress>(getArgOperand(ArgNo)) == Old) 882 setArgOperand(ArgNo, New); 883 } 884 } 885 886 CallBrInst::CallBrInst(const CallBrInst &CBI) 887 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr, 888 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(), 889 CBI.getNumOperands()) { 890 setCallingConv(CBI.getCallingConv()); 891 std::copy(CBI.op_begin(), CBI.op_end(), op_begin()); 892 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(), 893 bundle_op_info_begin()); 894 SubclassOptionalData = CBI.SubclassOptionalData; 895 NumIndirectDests = CBI.NumIndirectDests; 896 } 897 898 CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB, 899 Instruction *InsertPt) { 900 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end()); 901 902 auto *NewCBI = CallBrInst::Create( 903 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(), 904 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt); 905 NewCBI->setCallingConv(CBI->getCallingConv()); 906 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData; 907 NewCBI->setAttributes(CBI->getAttributes()); 908 NewCBI->setDebugLoc(CBI->getDebugLoc()); 909 NewCBI->NumIndirectDests = CBI->NumIndirectDests; 910 return NewCBI; 911 } 912 913 //===----------------------------------------------------------------------===// 914 // ReturnInst Implementation 915 //===----------------------------------------------------------------------===// 916 917 ReturnInst::ReturnInst(const ReturnInst &RI) 918 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret, 919 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(), 920 RI.getNumOperands()) { 921 if (RI.getNumOperands()) 922 Op<0>() = RI.Op<0>(); 923 SubclassOptionalData = RI.SubclassOptionalData; 924 } 925 926 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) 927 : Instruction(Type::getVoidTy(C), Instruction::Ret, 928 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 929 InsertBefore) { 930 if (retVal) 931 Op<0>() = retVal; 932 } 933 934 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) 935 : Instruction(Type::getVoidTy(C), Instruction::Ret, 936 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 937 InsertAtEnd) { 938 if (retVal) 939 Op<0>() = retVal; 940 } 941 942 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 943 : Instruction(Type::getVoidTy(Context), Instruction::Ret, 944 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {} 945 946 //===----------------------------------------------------------------------===// 947 // ResumeInst Implementation 948 //===----------------------------------------------------------------------===// 949 950 ResumeInst::ResumeInst(const ResumeInst &RI) 951 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume, 952 OperandTraits<ResumeInst>::op_begin(this), 1) { 953 Op<0>() = RI.Op<0>(); 954 } 955 956 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) 957 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 958 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { 959 Op<0>() = Exn; 960 } 961 962 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) 963 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 964 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { 965 Op<0>() = Exn; 966 } 967 968 //===----------------------------------------------------------------------===// 969 // CleanupReturnInst Implementation 970 //===----------------------------------------------------------------------===// 971 972 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) 973 : Instruction(CRI.getType(), Instruction::CleanupRet, 974 OperandTraits<CleanupReturnInst>::op_end(this) - 975 CRI.getNumOperands(), 976 CRI.getNumOperands()) { 977 setSubclassData<Instruction::OpaqueField>( 978 CRI.getSubclassData<Instruction::OpaqueField>()); 979 Op<0>() = CRI.Op<0>(); 980 if (CRI.hasUnwindDest()) 981 Op<1>() = CRI.Op<1>(); 982 } 983 984 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { 985 if (UnwindBB) 986 setSubclassData<UnwindDestField>(true); 987 988 Op<0>() = CleanupPad; 989 if (UnwindBB) 990 Op<1>() = UnwindBB; 991 } 992 993 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 994 unsigned Values, Instruction *InsertBefore) 995 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 996 Instruction::CleanupRet, 997 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 998 Values, InsertBefore) { 999 init(CleanupPad, UnwindBB); 1000 } 1001 1002 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 1003 unsigned Values, BasicBlock *InsertAtEnd) 1004 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 1005 Instruction::CleanupRet, 1006 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 1007 Values, InsertAtEnd) { 1008 init(CleanupPad, UnwindBB); 1009 } 1010 1011 //===----------------------------------------------------------------------===// 1012 // CatchReturnInst Implementation 1013 //===----------------------------------------------------------------------===// 1014 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) { 1015 Op<0>() = CatchPad; 1016 Op<1>() = BB; 1017 } 1018 1019 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI) 1020 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet, 1021 OperandTraits<CatchReturnInst>::op_begin(this), 2) { 1022 Op<0>() = CRI.Op<0>(); 1023 Op<1>() = CRI.Op<1>(); 1024 } 1025 1026 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 1027 Instruction *InsertBefore) 1028 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 1029 OperandTraits<CatchReturnInst>::op_begin(this), 2, 1030 InsertBefore) { 1031 init(CatchPad, BB); 1032 } 1033 1034 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 1035 BasicBlock *InsertAtEnd) 1036 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 1037 OperandTraits<CatchReturnInst>::op_begin(this), 2, 1038 InsertAtEnd) { 1039 init(CatchPad, BB); 1040 } 1041 1042 //===----------------------------------------------------------------------===// 1043 // CatchSwitchInst Implementation 1044 //===----------------------------------------------------------------------===// 1045 1046 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 1047 unsigned NumReservedValues, 1048 const Twine &NameStr, 1049 Instruction *InsertBefore) 1050 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 1051 InsertBefore) { 1052 if (UnwindDest) 1053 ++NumReservedValues; 1054 init(ParentPad, UnwindDest, NumReservedValues + 1); 1055 setName(NameStr); 1056 } 1057 1058 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 1059 unsigned NumReservedValues, 1060 const Twine &NameStr, BasicBlock *InsertAtEnd) 1061 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 1062 InsertAtEnd) { 1063 if (UnwindDest) 1064 ++NumReservedValues; 1065 init(ParentPad, UnwindDest, NumReservedValues + 1); 1066 setName(NameStr); 1067 } 1068 1069 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI) 1070 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr, 1071 CSI.getNumOperands()) { 1072 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands()); 1073 setNumHungOffUseOperands(ReservedSpace); 1074 Use *OL = getOperandList(); 1075 const Use *InOL = CSI.getOperandList(); 1076 for (unsigned I = 1, E = ReservedSpace; I != E; ++I) 1077 OL[I] = InOL[I]; 1078 } 1079 1080 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, 1081 unsigned NumReservedValues) { 1082 assert(ParentPad && NumReservedValues); 1083 1084 ReservedSpace = NumReservedValues; 1085 setNumHungOffUseOperands(UnwindDest ? 2 : 1); 1086 allocHungoffUses(ReservedSpace); 1087 1088 Op<0>() = ParentPad; 1089 if (UnwindDest) { 1090 setSubclassData<UnwindDestField>(true); 1091 setUnwindDest(UnwindDest); 1092 } 1093 } 1094 1095 /// growOperands - grow operands - This grows the operand list in response to a 1096 /// push_back style of operation. This grows the number of ops by 2 times. 1097 void CatchSwitchInst::growOperands(unsigned Size) { 1098 unsigned NumOperands = getNumOperands(); 1099 assert(NumOperands >= 1); 1100 if (ReservedSpace >= NumOperands + Size) 1101 return; 1102 ReservedSpace = (NumOperands + Size / 2) * 2; 1103 growHungoffUses(ReservedSpace); 1104 } 1105 1106 void CatchSwitchInst::addHandler(BasicBlock *Handler) { 1107 unsigned OpNo = getNumOperands(); 1108 growOperands(1); 1109 assert(OpNo < ReservedSpace && "Growing didn't work!"); 1110 setNumHungOffUseOperands(getNumOperands() + 1); 1111 getOperandList()[OpNo] = Handler; 1112 } 1113 1114 void CatchSwitchInst::removeHandler(handler_iterator HI) { 1115 // Move all subsequent handlers up one. 1116 Use *EndDst = op_end() - 1; 1117 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst) 1118 *CurDst = *(CurDst + 1); 1119 // Null out the last handler use. 1120 *EndDst = nullptr; 1121 1122 setNumHungOffUseOperands(getNumOperands() - 1); 1123 } 1124 1125 //===----------------------------------------------------------------------===// 1126 // FuncletPadInst Implementation 1127 //===----------------------------------------------------------------------===// 1128 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args, 1129 const Twine &NameStr) { 1130 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?"); 1131 llvm::copy(Args, op_begin()); 1132 setParentPad(ParentPad); 1133 setName(NameStr); 1134 } 1135 1136 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI) 1137 : Instruction(FPI.getType(), FPI.getOpcode(), 1138 OperandTraits<FuncletPadInst>::op_end(this) - 1139 FPI.getNumOperands(), 1140 FPI.getNumOperands()) { 1141 std::copy(FPI.op_begin(), FPI.op_end(), op_begin()); 1142 setParentPad(FPI.getParentPad()); 1143 } 1144 1145 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 1146 ArrayRef<Value *> Args, unsigned Values, 1147 const Twine &NameStr, Instruction *InsertBefore) 1148 : Instruction(ParentPad->getType(), Op, 1149 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 1150 InsertBefore) { 1151 init(ParentPad, Args, NameStr); 1152 } 1153 1154 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 1155 ArrayRef<Value *> Args, unsigned Values, 1156 const Twine &NameStr, BasicBlock *InsertAtEnd) 1157 : Instruction(ParentPad->getType(), Op, 1158 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 1159 InsertAtEnd) { 1160 init(ParentPad, Args, NameStr); 1161 } 1162 1163 //===----------------------------------------------------------------------===// 1164 // UnreachableInst Implementation 1165 //===----------------------------------------------------------------------===// 1166 1167 UnreachableInst::UnreachableInst(LLVMContext &Context, 1168 Instruction *InsertBefore) 1169 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 1170 0, InsertBefore) {} 1171 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 1172 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 1173 0, InsertAtEnd) {} 1174 1175 //===----------------------------------------------------------------------===// 1176 // BranchInst Implementation 1177 //===----------------------------------------------------------------------===// 1178 1179 void BranchInst::AssertOK() { 1180 if (isConditional()) 1181 assert(getCondition()->getType()->isIntegerTy(1) && 1182 "May only branch on boolean predicates!"); 1183 } 1184 1185 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) 1186 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1187 OperandTraits<BranchInst>::op_end(this) - 1, 1, 1188 InsertBefore) { 1189 assert(IfTrue && "Branch destination may not be null!"); 1190 Op<-1>() = IfTrue; 1191 } 1192 1193 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1194 Instruction *InsertBefore) 1195 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1196 OperandTraits<BranchInst>::op_end(this) - 3, 3, 1197 InsertBefore) { 1198 Op<-1>() = IfTrue; 1199 Op<-2>() = IfFalse; 1200 Op<-3>() = Cond; 1201 #ifndef NDEBUG 1202 AssertOK(); 1203 #endif 1204 } 1205 1206 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) 1207 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1208 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) { 1209 assert(IfTrue && "Branch destination may not be null!"); 1210 Op<-1>() = IfTrue; 1211 } 1212 1213 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1214 BasicBlock *InsertAtEnd) 1215 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1216 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) { 1217 Op<-1>() = IfTrue; 1218 Op<-2>() = IfFalse; 1219 Op<-3>() = Cond; 1220 #ifndef NDEBUG 1221 AssertOK(); 1222 #endif 1223 } 1224 1225 BranchInst::BranchInst(const BranchInst &BI) 1226 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br, 1227 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), 1228 BI.getNumOperands()) { 1229 Op<-1>() = BI.Op<-1>(); 1230 if (BI.getNumOperands() != 1) { 1231 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); 1232 Op<-3>() = BI.Op<-3>(); 1233 Op<-2>() = BI.Op<-2>(); 1234 } 1235 SubclassOptionalData = BI.SubclassOptionalData; 1236 } 1237 1238 void BranchInst::swapSuccessors() { 1239 assert(isConditional() && 1240 "Cannot swap successors of an unconditional branch"); 1241 Op<-1>().swap(Op<-2>()); 1242 1243 // Update profile metadata if present and it matches our structural 1244 // expectations. 1245 swapProfMetadata(); 1246 } 1247 1248 //===----------------------------------------------------------------------===// 1249 // AllocaInst Implementation 1250 //===----------------------------------------------------------------------===// 1251 1252 static Value *getAISize(LLVMContext &Context, Value *Amt) { 1253 if (!Amt) 1254 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); 1255 else { 1256 assert(!isa<BasicBlock>(Amt) && 1257 "Passed basic block into allocation size parameter! Use other ctor"); 1258 assert(Amt->getType()->isIntegerTy() && 1259 "Allocation array size is not an integer!"); 1260 } 1261 return Amt; 1262 } 1263 1264 static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB) { 1265 assert(BB && "Insertion BB cannot be null when alignment not provided!"); 1266 assert(BB->getParent() && 1267 "BB must be in a Function when alignment not provided!"); 1268 const DataLayout &DL = BB->getModule()->getDataLayout(); 1269 return DL.getPrefTypeAlign(Ty); 1270 } 1271 1272 static Align computeAllocaDefaultAlign(Type *Ty, Instruction *I) { 1273 assert(I && "Insertion position cannot be null when alignment not provided!"); 1274 return computeAllocaDefaultAlign(Ty, I->getParent()); 1275 } 1276 1277 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1278 Instruction *InsertBefore) 1279 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {} 1280 1281 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1282 BasicBlock *InsertAtEnd) 1283 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {} 1284 1285 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1286 const Twine &Name, Instruction *InsertBefore) 1287 : AllocaInst(Ty, AddrSpace, ArraySize, 1288 computeAllocaDefaultAlign(Ty, InsertBefore), Name, 1289 InsertBefore) {} 1290 1291 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1292 const Twine &Name, BasicBlock *InsertAtEnd) 1293 : AllocaInst(Ty, AddrSpace, ArraySize, 1294 computeAllocaDefaultAlign(Ty, InsertAtEnd), Name, 1295 InsertAtEnd) {} 1296 1297 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1298 Align Align, const Twine &Name, 1299 Instruction *InsertBefore) 1300 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1301 getAISize(Ty->getContext(), ArraySize), InsertBefore), 1302 AllocatedType(Ty) { 1303 setAlignment(Align); 1304 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1305 setName(Name); 1306 } 1307 1308 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1309 Align Align, const Twine &Name, BasicBlock *InsertAtEnd) 1310 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1311 getAISize(Ty->getContext(), ArraySize), InsertAtEnd), 1312 AllocatedType(Ty) { 1313 setAlignment(Align); 1314 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1315 setName(Name); 1316 } 1317 1318 1319 bool AllocaInst::isArrayAllocation() const { 1320 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) 1321 return !CI->isOne(); 1322 return true; 1323 } 1324 1325 /// isStaticAlloca - Return true if this alloca is in the entry block of the 1326 /// function and is a constant size. If so, the code generator will fold it 1327 /// into the prolog/epilog code, so it is basically free. 1328 bool AllocaInst::isStaticAlloca() const { 1329 // Must be constant size. 1330 if (!isa<ConstantInt>(getArraySize())) return false; 1331 1332 // Must be in the entry block. 1333 const BasicBlock *Parent = getParent(); 1334 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); 1335 } 1336 1337 //===----------------------------------------------------------------------===// 1338 // LoadInst Implementation 1339 //===----------------------------------------------------------------------===// 1340 1341 void LoadInst::AssertOK() { 1342 assert(getOperand(0)->getType()->isPointerTy() && 1343 "Ptr must have pointer type."); 1344 assert(!(isAtomic() && getAlignment() == 0) && 1345 "Alignment required for atomic load"); 1346 } 1347 1348 static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) { 1349 assert(BB && "Insertion BB cannot be null when alignment not provided!"); 1350 assert(BB->getParent() && 1351 "BB must be in a Function when alignment not provided!"); 1352 const DataLayout &DL = BB->getModule()->getDataLayout(); 1353 return DL.getABITypeAlign(Ty); 1354 } 1355 1356 static Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) { 1357 assert(I && "Insertion position cannot be null when alignment not provided!"); 1358 return computeLoadStoreDefaultAlign(Ty, I->getParent()); 1359 } 1360 1361 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, 1362 Instruction *InsertBef) 1363 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {} 1364 1365 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, 1366 BasicBlock *InsertAE) 1367 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {} 1368 1369 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1370 Instruction *InsertBef) 1371 : LoadInst(Ty, Ptr, Name, isVolatile, 1372 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {} 1373 1374 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1375 BasicBlock *InsertAE) 1376 : LoadInst(Ty, Ptr, Name, isVolatile, 1377 computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {} 1378 1379 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1380 Align Align, Instruction *InsertBef) 1381 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1382 SyncScope::System, InsertBef) {} 1383 1384 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1385 Align Align, BasicBlock *InsertAE) 1386 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1387 SyncScope::System, InsertAE) {} 1388 1389 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1390 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 1391 Instruction *InsertBef) 1392 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1393 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1394 setVolatile(isVolatile); 1395 setAlignment(Align); 1396 setAtomic(Order, SSID); 1397 AssertOK(); 1398 setName(Name); 1399 } 1400 1401 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1402 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 1403 BasicBlock *InsertAE) 1404 : UnaryInstruction(Ty, Load, Ptr, InsertAE) { 1405 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1406 setVolatile(isVolatile); 1407 setAlignment(Align); 1408 setAtomic(Order, SSID); 1409 AssertOK(); 1410 setName(Name); 1411 } 1412 1413 //===----------------------------------------------------------------------===// 1414 // StoreInst Implementation 1415 //===----------------------------------------------------------------------===// 1416 1417 void StoreInst::AssertOK() { 1418 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); 1419 assert(getOperand(1)->getType()->isPointerTy() && 1420 "Ptr must have pointer type!"); 1421 assert(getOperand(0)->getType() == 1422 cast<PointerType>(getOperand(1)->getType())->getElementType() 1423 && "Ptr must be a pointer to Val type!"); 1424 assert(!(isAtomic() && getAlignment() == 0) && 1425 "Alignment required for atomic store"); 1426 } 1427 1428 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) 1429 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {} 1430 1431 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) 1432 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {} 1433 1434 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1435 Instruction *InsertBefore) 1436 : StoreInst(val, addr, isVolatile, 1437 computeLoadStoreDefaultAlign(val->getType(), InsertBefore), 1438 InsertBefore) {} 1439 1440 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1441 BasicBlock *InsertAtEnd) 1442 : StoreInst(val, addr, isVolatile, 1443 computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd), 1444 InsertAtEnd) {} 1445 1446 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align, 1447 Instruction *InsertBefore) 1448 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1449 SyncScope::System, InsertBefore) {} 1450 1451 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align, 1452 BasicBlock *InsertAtEnd) 1453 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1454 SyncScope::System, InsertAtEnd) {} 1455 1456 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align, 1457 AtomicOrdering Order, SyncScope::ID SSID, 1458 Instruction *InsertBefore) 1459 : Instruction(Type::getVoidTy(val->getContext()), Store, 1460 OperandTraits<StoreInst>::op_begin(this), 1461 OperandTraits<StoreInst>::operands(this), InsertBefore) { 1462 Op<0>() = val; 1463 Op<1>() = addr; 1464 setVolatile(isVolatile); 1465 setAlignment(Align); 1466 setAtomic(Order, SSID); 1467 AssertOK(); 1468 } 1469 1470 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align, 1471 AtomicOrdering Order, SyncScope::ID SSID, 1472 BasicBlock *InsertAtEnd) 1473 : Instruction(Type::getVoidTy(val->getContext()), Store, 1474 OperandTraits<StoreInst>::op_begin(this), 1475 OperandTraits<StoreInst>::operands(this), InsertAtEnd) { 1476 Op<0>() = val; 1477 Op<1>() = addr; 1478 setVolatile(isVolatile); 1479 setAlignment(Align); 1480 setAtomic(Order, SSID); 1481 AssertOK(); 1482 } 1483 1484 1485 //===----------------------------------------------------------------------===// 1486 // AtomicCmpXchgInst Implementation 1487 //===----------------------------------------------------------------------===// 1488 1489 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, 1490 Align Alignment, AtomicOrdering SuccessOrdering, 1491 AtomicOrdering FailureOrdering, 1492 SyncScope::ID SSID) { 1493 Op<0>() = Ptr; 1494 Op<1>() = Cmp; 1495 Op<2>() = NewVal; 1496 setSuccessOrdering(SuccessOrdering); 1497 setFailureOrdering(FailureOrdering); 1498 setSyncScopeID(SSID); 1499 setAlignment(Alignment); 1500 1501 assert(getOperand(0) && getOperand(1) && getOperand(2) && 1502 "All operands must be non-null!"); 1503 assert(getOperand(0)->getType()->isPointerTy() && 1504 "Ptr must have pointer type!"); 1505 assert(getOperand(1)->getType() == 1506 cast<PointerType>(getOperand(0)->getType())->getElementType() 1507 && "Ptr must be a pointer to Cmp type!"); 1508 assert(getOperand(2)->getType() == 1509 cast<PointerType>(getOperand(0)->getType())->getElementType() 1510 && "Ptr must be a pointer to NewVal type!"); 1511 assert(SuccessOrdering != AtomicOrdering::NotAtomic && 1512 "AtomicCmpXchg instructions must be atomic!"); 1513 assert(FailureOrdering != AtomicOrdering::NotAtomic && 1514 "AtomicCmpXchg instructions must be atomic!"); 1515 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && 1516 "AtomicCmpXchg failure argument shall be no stronger than the success " 1517 "argument"); 1518 assert(FailureOrdering != AtomicOrdering::Release && 1519 FailureOrdering != AtomicOrdering::AcquireRelease && 1520 "AtomicCmpXchg failure ordering cannot include release semantics"); 1521 } 1522 1523 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1524 Align Alignment, 1525 AtomicOrdering SuccessOrdering, 1526 AtomicOrdering FailureOrdering, 1527 SyncScope::ID SSID, 1528 Instruction *InsertBefore) 1529 : Instruction( 1530 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1531 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1532 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { 1533 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID); 1534 } 1535 1536 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1537 Align Alignment, 1538 AtomicOrdering SuccessOrdering, 1539 AtomicOrdering FailureOrdering, 1540 SyncScope::ID SSID, 1541 BasicBlock *InsertAtEnd) 1542 : Instruction( 1543 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1544 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1545 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { 1546 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID); 1547 } 1548 1549 //===----------------------------------------------------------------------===// 1550 // AtomicRMWInst Implementation 1551 //===----------------------------------------------------------------------===// 1552 1553 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, 1554 Align Alignment, AtomicOrdering Ordering, 1555 SyncScope::ID SSID) { 1556 Op<0>() = Ptr; 1557 Op<1>() = Val; 1558 setOperation(Operation); 1559 setOrdering(Ordering); 1560 setSyncScopeID(SSID); 1561 setAlignment(Alignment); 1562 1563 assert(getOperand(0) && getOperand(1) && 1564 "All operands must be non-null!"); 1565 assert(getOperand(0)->getType()->isPointerTy() && 1566 "Ptr must have pointer type!"); 1567 assert(getOperand(1)->getType() == 1568 cast<PointerType>(getOperand(0)->getType())->getElementType() 1569 && "Ptr must be a pointer to Val type!"); 1570 assert(Ordering != AtomicOrdering::NotAtomic && 1571 "AtomicRMW instructions must be atomic!"); 1572 } 1573 1574 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1575 Align Alignment, AtomicOrdering Ordering, 1576 SyncScope::ID SSID, Instruction *InsertBefore) 1577 : Instruction(Val->getType(), AtomicRMW, 1578 OperandTraits<AtomicRMWInst>::op_begin(this), 1579 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) { 1580 Init(Operation, Ptr, Val, Alignment, Ordering, SSID); 1581 } 1582 1583 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1584 Align Alignment, AtomicOrdering Ordering, 1585 SyncScope::ID SSID, BasicBlock *InsertAtEnd) 1586 : Instruction(Val->getType(), AtomicRMW, 1587 OperandTraits<AtomicRMWInst>::op_begin(this), 1588 OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) { 1589 Init(Operation, Ptr, Val, Alignment, Ordering, SSID); 1590 } 1591 1592 StringRef AtomicRMWInst::getOperationName(BinOp Op) { 1593 switch (Op) { 1594 case AtomicRMWInst::Xchg: 1595 return "xchg"; 1596 case AtomicRMWInst::Add: 1597 return "add"; 1598 case AtomicRMWInst::Sub: 1599 return "sub"; 1600 case AtomicRMWInst::And: 1601 return "and"; 1602 case AtomicRMWInst::Nand: 1603 return "nand"; 1604 case AtomicRMWInst::Or: 1605 return "or"; 1606 case AtomicRMWInst::Xor: 1607 return "xor"; 1608 case AtomicRMWInst::Max: 1609 return "max"; 1610 case AtomicRMWInst::Min: 1611 return "min"; 1612 case AtomicRMWInst::UMax: 1613 return "umax"; 1614 case AtomicRMWInst::UMin: 1615 return "umin"; 1616 case AtomicRMWInst::FAdd: 1617 return "fadd"; 1618 case AtomicRMWInst::FSub: 1619 return "fsub"; 1620 case AtomicRMWInst::BAD_BINOP: 1621 return "<invalid operation>"; 1622 } 1623 1624 llvm_unreachable("invalid atomicrmw operation"); 1625 } 1626 1627 //===----------------------------------------------------------------------===// 1628 // FenceInst Implementation 1629 //===----------------------------------------------------------------------===// 1630 1631 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1632 SyncScope::ID SSID, 1633 Instruction *InsertBefore) 1634 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { 1635 setOrdering(Ordering); 1636 setSyncScopeID(SSID); 1637 } 1638 1639 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1640 SyncScope::ID SSID, 1641 BasicBlock *InsertAtEnd) 1642 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { 1643 setOrdering(Ordering); 1644 setSyncScopeID(SSID); 1645 } 1646 1647 //===----------------------------------------------------------------------===// 1648 // GetElementPtrInst Implementation 1649 //===----------------------------------------------------------------------===// 1650 1651 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, 1652 const Twine &Name) { 1653 assert(getNumOperands() == 1 + IdxList.size() && 1654 "NumOperands not initialized?"); 1655 Op<0>() = Ptr; 1656 llvm::copy(IdxList, op_begin() + 1); 1657 setName(Name); 1658 } 1659 1660 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) 1661 : Instruction(GEPI.getType(), GetElementPtr, 1662 OperandTraits<GetElementPtrInst>::op_end(this) - 1663 GEPI.getNumOperands(), 1664 GEPI.getNumOperands()), 1665 SourceElementType(GEPI.SourceElementType), 1666 ResultElementType(GEPI.ResultElementType) { 1667 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); 1668 SubclassOptionalData = GEPI.SubclassOptionalData; 1669 } 1670 1671 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) { 1672 if (auto *Struct = dyn_cast<StructType>(Ty)) { 1673 if (!Struct->indexValid(Idx)) 1674 return nullptr; 1675 return Struct->getTypeAtIndex(Idx); 1676 } 1677 if (!Idx->getType()->isIntOrIntVectorTy()) 1678 return nullptr; 1679 if (auto *Array = dyn_cast<ArrayType>(Ty)) 1680 return Array->getElementType(); 1681 if (auto *Vector = dyn_cast<VectorType>(Ty)) 1682 return Vector->getElementType(); 1683 return nullptr; 1684 } 1685 1686 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) { 1687 if (auto *Struct = dyn_cast<StructType>(Ty)) { 1688 if (Idx >= Struct->getNumElements()) 1689 return nullptr; 1690 return Struct->getElementType(Idx); 1691 } 1692 if (auto *Array = dyn_cast<ArrayType>(Ty)) 1693 return Array->getElementType(); 1694 if (auto *Vector = dyn_cast<VectorType>(Ty)) 1695 return Vector->getElementType(); 1696 return nullptr; 1697 } 1698 1699 template <typename IndexTy> 1700 static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) { 1701 if (IdxList.empty()) 1702 return Ty; 1703 for (IndexTy V : IdxList.slice(1)) { 1704 Ty = GetElementPtrInst::getTypeAtIndex(Ty, V); 1705 if (!Ty) 1706 return Ty; 1707 } 1708 return Ty; 1709 } 1710 1711 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) { 1712 return getIndexedTypeInternal(Ty, IdxList); 1713 } 1714 1715 Type *GetElementPtrInst::getIndexedType(Type *Ty, 1716 ArrayRef<Constant *> IdxList) { 1717 return getIndexedTypeInternal(Ty, IdxList); 1718 } 1719 1720 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) { 1721 return getIndexedTypeInternal(Ty, IdxList); 1722 } 1723 1724 /// hasAllZeroIndices - Return true if all of the indices of this GEP are 1725 /// zeros. If so, the result pointer and the first operand have the same 1726 /// value, just potentially different types. 1727 bool GetElementPtrInst::hasAllZeroIndices() const { 1728 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1729 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { 1730 if (!CI->isZero()) return false; 1731 } else { 1732 return false; 1733 } 1734 } 1735 return true; 1736 } 1737 1738 /// hasAllConstantIndices - Return true if all of the indices of this GEP are 1739 /// constant integers. If so, the result pointer and the first operand have 1740 /// a constant offset between them. 1741 bool GetElementPtrInst::hasAllConstantIndices() const { 1742 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1743 if (!isa<ConstantInt>(getOperand(i))) 1744 return false; 1745 } 1746 return true; 1747 } 1748 1749 void GetElementPtrInst::setIsInBounds(bool B) { 1750 cast<GEPOperator>(this)->setIsInBounds(B); 1751 } 1752 1753 bool GetElementPtrInst::isInBounds() const { 1754 return cast<GEPOperator>(this)->isInBounds(); 1755 } 1756 1757 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, 1758 APInt &Offset) const { 1759 // Delegate to the generic GEPOperator implementation. 1760 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); 1761 } 1762 1763 //===----------------------------------------------------------------------===// 1764 // ExtractElementInst Implementation 1765 //===----------------------------------------------------------------------===// 1766 1767 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1768 const Twine &Name, 1769 Instruction *InsertBef) 1770 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1771 ExtractElement, 1772 OperandTraits<ExtractElementInst>::op_begin(this), 1773 2, InsertBef) { 1774 assert(isValidOperands(Val, Index) && 1775 "Invalid extractelement instruction operands!"); 1776 Op<0>() = Val; 1777 Op<1>() = Index; 1778 setName(Name); 1779 } 1780 1781 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1782 const Twine &Name, 1783 BasicBlock *InsertAE) 1784 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1785 ExtractElement, 1786 OperandTraits<ExtractElementInst>::op_begin(this), 1787 2, InsertAE) { 1788 assert(isValidOperands(Val, Index) && 1789 "Invalid extractelement instruction operands!"); 1790 1791 Op<0>() = Val; 1792 Op<1>() = Index; 1793 setName(Name); 1794 } 1795 1796 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { 1797 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy()) 1798 return false; 1799 return true; 1800 } 1801 1802 //===----------------------------------------------------------------------===// 1803 // InsertElementInst Implementation 1804 //===----------------------------------------------------------------------===// 1805 1806 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1807 const Twine &Name, 1808 Instruction *InsertBef) 1809 : Instruction(Vec->getType(), InsertElement, 1810 OperandTraits<InsertElementInst>::op_begin(this), 1811 3, InsertBef) { 1812 assert(isValidOperands(Vec, Elt, Index) && 1813 "Invalid insertelement instruction operands!"); 1814 Op<0>() = Vec; 1815 Op<1>() = Elt; 1816 Op<2>() = Index; 1817 setName(Name); 1818 } 1819 1820 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1821 const Twine &Name, 1822 BasicBlock *InsertAE) 1823 : Instruction(Vec->getType(), InsertElement, 1824 OperandTraits<InsertElementInst>::op_begin(this), 1825 3, InsertAE) { 1826 assert(isValidOperands(Vec, Elt, Index) && 1827 "Invalid insertelement instruction operands!"); 1828 1829 Op<0>() = Vec; 1830 Op<1>() = Elt; 1831 Op<2>() = Index; 1832 setName(Name); 1833 } 1834 1835 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, 1836 const Value *Index) { 1837 if (!Vec->getType()->isVectorTy()) 1838 return false; // First operand of insertelement must be vector type. 1839 1840 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) 1841 return false;// Second operand of insertelement must be vector element type. 1842 1843 if (!Index->getType()->isIntegerTy()) 1844 return false; // Third operand of insertelement must be i32. 1845 return true; 1846 } 1847 1848 //===----------------------------------------------------------------------===// 1849 // ShuffleVectorInst Implementation 1850 //===----------------------------------------------------------------------===// 1851 1852 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1853 const Twine &Name, 1854 Instruction *InsertBefore) 1855 : Instruction( 1856 VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1857 cast<VectorType>(Mask->getType())->getElementCount()), 1858 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this), 1859 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) { 1860 assert(isValidOperands(V1, V2, Mask) && 1861 "Invalid shuffle vector instruction operands!"); 1862 1863 Op<0>() = V1; 1864 Op<1>() = V2; 1865 SmallVector<int, 16> MaskArr; 1866 getShuffleMask(cast<Constant>(Mask), MaskArr); 1867 setShuffleMask(MaskArr); 1868 setName(Name); 1869 } 1870 1871 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1872 const Twine &Name, BasicBlock *InsertAtEnd) 1873 : Instruction( 1874 VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1875 cast<VectorType>(Mask->getType())->getElementCount()), 1876 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this), 1877 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) { 1878 assert(isValidOperands(V1, V2, Mask) && 1879 "Invalid shuffle vector instruction operands!"); 1880 1881 Op<0>() = V1; 1882 Op<1>() = V2; 1883 SmallVector<int, 16> MaskArr; 1884 getShuffleMask(cast<Constant>(Mask), MaskArr); 1885 setShuffleMask(MaskArr); 1886 setName(Name); 1887 } 1888 1889 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 1890 const Twine &Name, 1891 Instruction *InsertBefore) 1892 : Instruction( 1893 VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1894 Mask.size(), isa<ScalableVectorType>(V1->getType())), 1895 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this), 1896 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) { 1897 assert(isValidOperands(V1, V2, Mask) && 1898 "Invalid shuffle vector instruction operands!"); 1899 Op<0>() = V1; 1900 Op<1>() = V2; 1901 setShuffleMask(Mask); 1902 setName(Name); 1903 } 1904 1905 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 1906 const Twine &Name, BasicBlock *InsertAtEnd) 1907 : Instruction( 1908 VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1909 Mask.size(), isa<ScalableVectorType>(V1->getType())), 1910 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this), 1911 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) { 1912 assert(isValidOperands(V1, V2, Mask) && 1913 "Invalid shuffle vector instruction operands!"); 1914 1915 Op<0>() = V1; 1916 Op<1>() = V2; 1917 setShuffleMask(Mask); 1918 setName(Name); 1919 } 1920 1921 void ShuffleVectorInst::commute() { 1922 int NumOpElts = cast<VectorType>(Op<0>()->getType())->getNumElements(); 1923 int NumMaskElts = ShuffleMask.size(); 1924 SmallVector<int, 16> NewMask(NumMaskElts); 1925 for (int i = 0; i != NumMaskElts; ++i) { 1926 int MaskElt = getMaskValue(i); 1927 if (MaskElt == UndefMaskElem) { 1928 NewMask[i] = UndefMaskElem; 1929 continue; 1930 } 1931 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask"); 1932 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts; 1933 NewMask[i] = MaskElt; 1934 } 1935 setShuffleMask(NewMask); 1936 Op<0>().swap(Op<1>()); 1937 } 1938 1939 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1940 ArrayRef<int> Mask) { 1941 // V1 and V2 must be vectors of the same type. 1942 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType()) 1943 return false; 1944 1945 // Make sure the mask elements make sense. 1946 int V1Size = cast<VectorType>(V1->getType())->getElementCount().Min; 1947 for (int Elem : Mask) 1948 if (Elem != UndefMaskElem && Elem >= V1Size * 2) 1949 return false; 1950 1951 if (isa<ScalableVectorType>(V1->getType())) 1952 if ((Mask[0] != 0 && Mask[0] != UndefMaskElem) || !is_splat(Mask)) 1953 return false; 1954 1955 return true; 1956 } 1957 1958 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1959 const Value *Mask) { 1960 // V1 and V2 must be vectors of the same type. 1961 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) 1962 return false; 1963 1964 // Mask must be vector of i32, and must be the same kind of vector as the 1965 // input vectors 1966 auto *MaskTy = dyn_cast<VectorType>(Mask->getType()); 1967 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) || 1968 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType())) 1969 return false; 1970 1971 // Check to see if Mask is valid. 1972 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) 1973 return true; 1974 1975 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) { 1976 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1977 for (Value *Op : MV->operands()) { 1978 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1979 if (CI->uge(V1Size*2)) 1980 return false; 1981 } else if (!isa<UndefValue>(Op)) { 1982 return false; 1983 } 1984 } 1985 return true; 1986 } 1987 1988 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1989 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1990 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) 1991 if (CDS->getElementAsInteger(i) >= V1Size*2) 1992 return false; 1993 return true; 1994 } 1995 1996 return false; 1997 } 1998 1999 void ShuffleVectorInst::getShuffleMask(const Constant *Mask, 2000 SmallVectorImpl<int> &Result) { 2001 unsigned NumElts = cast<VectorType>(Mask->getType())->getElementCount().Min; 2002 if (isa<ConstantAggregateZero>(Mask)) { 2003 Result.resize(NumElts, 0); 2004 return; 2005 } 2006 Result.reserve(NumElts); 2007 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 2008 for (unsigned i = 0; i != NumElts; ++i) 2009 Result.push_back(CDS->getElementAsInteger(i)); 2010 return; 2011 } 2012 for (unsigned i = 0; i != NumElts; ++i) { 2013 Constant *C = Mask->getAggregateElement(i); 2014 Result.push_back(isa<UndefValue>(C) ? -1 : 2015 cast<ConstantInt>(C)->getZExtValue()); 2016 } 2017 } 2018 2019 void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) { 2020 ShuffleMask.assign(Mask.begin(), Mask.end()); 2021 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType()); 2022 } 2023 Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask, 2024 Type *ResultTy) { 2025 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext()); 2026 if (isa<ScalableVectorType>(ResultTy)) { 2027 assert(is_splat(Mask) && "Unexpected shuffle"); 2028 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true); 2029 if (Mask[0] == 0) 2030 return Constant::getNullValue(VecTy); 2031 return UndefValue::get(VecTy); 2032 } 2033 SmallVector<Constant *, 16> MaskConst; 2034 for (int Elem : Mask) { 2035 if (Elem == UndefMaskElem) 2036 MaskConst.push_back(UndefValue::get(Int32Ty)); 2037 else 2038 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem)); 2039 } 2040 return ConstantVector::get(MaskConst); 2041 } 2042 2043 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 2044 assert(!Mask.empty() && "Shuffle mask must contain elements"); 2045 bool UsesLHS = false; 2046 bool UsesRHS = false; 2047 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 2048 if (Mask[i] == -1) 2049 continue; 2050 assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) && 2051 "Out-of-bounds shuffle mask element"); 2052 UsesLHS |= (Mask[i] < NumOpElts); 2053 UsesRHS |= (Mask[i] >= NumOpElts); 2054 if (UsesLHS && UsesRHS) 2055 return false; 2056 } 2057 // Allow for degenerate case: completely undef mask means neither source is used. 2058 return UsesLHS || UsesRHS; 2059 } 2060 2061 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) { 2062 // We don't have vector operand size information, so assume operands are the 2063 // same size as the mask. 2064 return isSingleSourceMaskImpl(Mask, Mask.size()); 2065 } 2066 2067 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 2068 if (!isSingleSourceMaskImpl(Mask, NumOpElts)) 2069 return false; 2070 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 2071 if (Mask[i] == -1) 2072 continue; 2073 if (Mask[i] != i && Mask[i] != (NumOpElts + i)) 2074 return false; 2075 } 2076 return true; 2077 } 2078 2079 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) { 2080 // We don't have vector operand size information, so assume operands are the 2081 // same size as the mask. 2082 return isIdentityMaskImpl(Mask, Mask.size()); 2083 } 2084 2085 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) { 2086 if (!isSingleSourceMask(Mask)) 2087 return false; 2088 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 2089 if (Mask[i] == -1) 2090 continue; 2091 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i)) 2092 return false; 2093 } 2094 return true; 2095 } 2096 2097 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) { 2098 if (!isSingleSourceMask(Mask)) 2099 return false; 2100 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 2101 if (Mask[i] == -1) 2102 continue; 2103 if (Mask[i] != 0 && Mask[i] != NumElts) 2104 return false; 2105 } 2106 return true; 2107 } 2108 2109 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) { 2110 // Select is differentiated from identity. It requires using both sources. 2111 if (isSingleSourceMask(Mask)) 2112 return false; 2113 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 2114 if (Mask[i] == -1) 2115 continue; 2116 if (Mask[i] != i && Mask[i] != (NumElts + i)) 2117 return false; 2118 } 2119 return true; 2120 } 2121 2122 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) { 2123 // Example masks that will return true: 2124 // v1 = <a, b, c, d> 2125 // v2 = <e, f, g, h> 2126 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g> 2127 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h> 2128 2129 // 1. The number of elements in the mask must be a power-of-2 and at least 2. 2130 int NumElts = Mask.size(); 2131 if (NumElts < 2 || !isPowerOf2_32(NumElts)) 2132 return false; 2133 2134 // 2. The first element of the mask must be either a 0 or a 1. 2135 if (Mask[0] != 0 && Mask[0] != 1) 2136 return false; 2137 2138 // 3. The difference between the first 2 elements must be equal to the 2139 // number of elements in the mask. 2140 if ((Mask[1] - Mask[0]) != NumElts) 2141 return false; 2142 2143 // 4. The difference between consecutive even-numbered and odd-numbered 2144 // elements must be equal to 2. 2145 for (int i = 2; i < NumElts; ++i) { 2146 int MaskEltVal = Mask[i]; 2147 if (MaskEltVal == -1) 2148 return false; 2149 int MaskEltPrevVal = Mask[i - 2]; 2150 if (MaskEltVal - MaskEltPrevVal != 2) 2151 return false; 2152 } 2153 return true; 2154 } 2155 2156 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask, 2157 int NumSrcElts, int &Index) { 2158 // Must extract from a single source. 2159 if (!isSingleSourceMaskImpl(Mask, NumSrcElts)) 2160 return false; 2161 2162 // Must be smaller (else this is an Identity shuffle). 2163 if (NumSrcElts <= (int)Mask.size()) 2164 return false; 2165 2166 // Find start of extraction, accounting that we may start with an UNDEF. 2167 int SubIndex = -1; 2168 for (int i = 0, e = Mask.size(); i != e; ++i) { 2169 int M = Mask[i]; 2170 if (M < 0) 2171 continue; 2172 int Offset = (M % NumSrcElts) - i; 2173 if (0 <= SubIndex && SubIndex != Offset) 2174 return false; 2175 SubIndex = Offset; 2176 } 2177 2178 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) { 2179 Index = SubIndex; 2180 return true; 2181 } 2182 return false; 2183 } 2184 2185 bool ShuffleVectorInst::isIdentityWithPadding() const { 2186 if (isa<UndefValue>(Op<2>())) 2187 return false; 2188 int NumOpElts = cast<VectorType>(Op<0>()->getType())->getNumElements(); 2189 int NumMaskElts = cast<VectorType>(getType())->getNumElements(); 2190 if (NumMaskElts <= NumOpElts) 2191 return false; 2192 2193 // The first part of the mask must choose elements from exactly 1 source op. 2194 ArrayRef<int> Mask = getShuffleMask(); 2195 if (!isIdentityMaskImpl(Mask, NumOpElts)) 2196 return false; 2197 2198 // All extending must be with undef elements. 2199 for (int i = NumOpElts; i < NumMaskElts; ++i) 2200 if (Mask[i] != -1) 2201 return false; 2202 2203 return true; 2204 } 2205 2206 bool ShuffleVectorInst::isIdentityWithExtract() const { 2207 if (isa<UndefValue>(Op<2>())) 2208 return false; 2209 2210 // FIXME: Not currently possible to express a shuffle mask for a scalable 2211 // vector for this case 2212 if (isa<ScalableVectorType>(getType())) 2213 return false; 2214 2215 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2216 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements(); 2217 if (NumMaskElts >= NumOpElts) 2218 return false; 2219 2220 return isIdentityMaskImpl(getShuffleMask(), NumOpElts); 2221 } 2222 2223 bool ShuffleVectorInst::isConcat() const { 2224 // Vector concatenation is differentiated from identity with padding. 2225 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()) || 2226 isa<UndefValue>(Op<2>())) 2227 return false; 2228 2229 int NumOpElts = cast<VectorType>(Op<0>()->getType())->getNumElements(); 2230 int NumMaskElts = getType()->getNumElements(); 2231 if (NumMaskElts != NumOpElts * 2) 2232 return false; 2233 2234 // Use the mask length rather than the operands' vector lengths here. We 2235 // already know that the shuffle returns a vector twice as long as the inputs, 2236 // and neither of the inputs are undef vectors. If the mask picks consecutive 2237 // elements from both inputs, then this is a concatenation of the inputs. 2238 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts); 2239 } 2240 2241 //===----------------------------------------------------------------------===// 2242 // InsertValueInst Class 2243 //===----------------------------------------------------------------------===// 2244 2245 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2246 const Twine &Name) { 2247 assert(getNumOperands() == 2 && "NumOperands not initialized?"); 2248 2249 // There's no fundamental reason why we require at least one index 2250 // (other than weirdness with &*IdxBegin being invalid; see 2251 // getelementptr's init routine for example). But there's no 2252 // present need to support it. 2253 assert(!Idxs.empty() && "InsertValueInst must have at least one index"); 2254 2255 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == 2256 Val->getType() && "Inserted value must match indexed type!"); 2257 Op<0>() = Agg; 2258 Op<1>() = Val; 2259 2260 Indices.append(Idxs.begin(), Idxs.end()); 2261 setName(Name); 2262 } 2263 2264 InsertValueInst::InsertValueInst(const InsertValueInst &IVI) 2265 : Instruction(IVI.getType(), InsertValue, 2266 OperandTraits<InsertValueInst>::op_begin(this), 2), 2267 Indices(IVI.Indices) { 2268 Op<0>() = IVI.getOperand(0); 2269 Op<1>() = IVI.getOperand(1); 2270 SubclassOptionalData = IVI.SubclassOptionalData; 2271 } 2272 2273 //===----------------------------------------------------------------------===// 2274 // ExtractValueInst Class 2275 //===----------------------------------------------------------------------===// 2276 2277 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { 2278 assert(getNumOperands() == 1 && "NumOperands not initialized?"); 2279 2280 // There's no fundamental reason why we require at least one index. 2281 // But there's no present need to support it. 2282 assert(!Idxs.empty() && "ExtractValueInst must have at least one index"); 2283 2284 Indices.append(Idxs.begin(), Idxs.end()); 2285 setName(Name); 2286 } 2287 2288 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) 2289 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), 2290 Indices(EVI.Indices) { 2291 SubclassOptionalData = EVI.SubclassOptionalData; 2292 } 2293 2294 // getIndexedType - Returns the type of the element that would be extracted 2295 // with an extractvalue instruction with the specified parameters. 2296 // 2297 // A null type is returned if the indices are invalid for the specified 2298 // pointer type. 2299 // 2300 Type *ExtractValueInst::getIndexedType(Type *Agg, 2301 ArrayRef<unsigned> Idxs) { 2302 for (unsigned Index : Idxs) { 2303 // We can't use CompositeType::indexValid(Index) here. 2304 // indexValid() always returns true for arrays because getelementptr allows 2305 // out-of-bounds indices. Since we don't allow those for extractvalue and 2306 // insertvalue we need to check array indexing manually. 2307 // Since the only other types we can index into are struct types it's just 2308 // as easy to check those manually as well. 2309 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { 2310 if (Index >= AT->getNumElements()) 2311 return nullptr; 2312 Agg = AT->getElementType(); 2313 } else if (StructType *ST = dyn_cast<StructType>(Agg)) { 2314 if (Index >= ST->getNumElements()) 2315 return nullptr; 2316 Agg = ST->getElementType(Index); 2317 } else { 2318 // Not a valid type to index into. 2319 return nullptr; 2320 } 2321 } 2322 return const_cast<Type*>(Agg); 2323 } 2324 2325 //===----------------------------------------------------------------------===// 2326 // UnaryOperator Class 2327 //===----------------------------------------------------------------------===// 2328 2329 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2330 Type *Ty, const Twine &Name, 2331 Instruction *InsertBefore) 2332 : UnaryInstruction(Ty, iType, S, InsertBefore) { 2333 Op<0>() = S; 2334 setName(Name); 2335 AssertOK(); 2336 } 2337 2338 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2339 Type *Ty, const Twine &Name, 2340 BasicBlock *InsertAtEnd) 2341 : UnaryInstruction(Ty, iType, S, InsertAtEnd) { 2342 Op<0>() = S; 2343 setName(Name); 2344 AssertOK(); 2345 } 2346 2347 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2348 const Twine &Name, 2349 Instruction *InsertBefore) { 2350 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore); 2351 } 2352 2353 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2354 const Twine &Name, 2355 BasicBlock *InsertAtEnd) { 2356 UnaryOperator *Res = Create(Op, S, Name); 2357 InsertAtEnd->getInstList().push_back(Res); 2358 return Res; 2359 } 2360 2361 void UnaryOperator::AssertOK() { 2362 Value *LHS = getOperand(0); 2363 (void)LHS; // Silence warnings. 2364 #ifndef NDEBUG 2365 switch (getOpcode()) { 2366 case FNeg: 2367 assert(getType() == LHS->getType() && 2368 "Unary operation should return same type as operand!"); 2369 assert(getType()->isFPOrFPVectorTy() && 2370 "Tried to create a floating-point operation on a " 2371 "non-floating-point type!"); 2372 break; 2373 default: llvm_unreachable("Invalid opcode provided"); 2374 } 2375 #endif 2376 } 2377 2378 //===----------------------------------------------------------------------===// 2379 // BinaryOperator Class 2380 //===----------------------------------------------------------------------===// 2381 2382 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2383 Type *Ty, const Twine &Name, 2384 Instruction *InsertBefore) 2385 : Instruction(Ty, iType, 2386 OperandTraits<BinaryOperator>::op_begin(this), 2387 OperandTraits<BinaryOperator>::operands(this), 2388 InsertBefore) { 2389 Op<0>() = S1; 2390 Op<1>() = S2; 2391 setName(Name); 2392 AssertOK(); 2393 } 2394 2395 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2396 Type *Ty, const Twine &Name, 2397 BasicBlock *InsertAtEnd) 2398 : Instruction(Ty, iType, 2399 OperandTraits<BinaryOperator>::op_begin(this), 2400 OperandTraits<BinaryOperator>::operands(this), 2401 InsertAtEnd) { 2402 Op<0>() = S1; 2403 Op<1>() = S2; 2404 setName(Name); 2405 AssertOK(); 2406 } 2407 2408 void BinaryOperator::AssertOK() { 2409 Value *LHS = getOperand(0), *RHS = getOperand(1); 2410 (void)LHS; (void)RHS; // Silence warnings. 2411 assert(LHS->getType() == RHS->getType() && 2412 "Binary operator operand types must match!"); 2413 #ifndef NDEBUG 2414 switch (getOpcode()) { 2415 case Add: case Sub: 2416 case Mul: 2417 assert(getType() == LHS->getType() && 2418 "Arithmetic operation should return same type as operands!"); 2419 assert(getType()->isIntOrIntVectorTy() && 2420 "Tried to create an integer operation on a non-integer type!"); 2421 break; 2422 case FAdd: case FSub: 2423 case FMul: 2424 assert(getType() == LHS->getType() && 2425 "Arithmetic operation should return same type as operands!"); 2426 assert(getType()->isFPOrFPVectorTy() && 2427 "Tried to create a floating-point operation on a " 2428 "non-floating-point type!"); 2429 break; 2430 case UDiv: 2431 case SDiv: 2432 assert(getType() == LHS->getType() && 2433 "Arithmetic operation should return same type as operands!"); 2434 assert(getType()->isIntOrIntVectorTy() && 2435 "Incorrect operand type (not integer) for S/UDIV"); 2436 break; 2437 case FDiv: 2438 assert(getType() == LHS->getType() && 2439 "Arithmetic operation should return same type as operands!"); 2440 assert(getType()->isFPOrFPVectorTy() && 2441 "Incorrect operand type (not floating point) for FDIV"); 2442 break; 2443 case URem: 2444 case SRem: 2445 assert(getType() == LHS->getType() && 2446 "Arithmetic operation should return same type as operands!"); 2447 assert(getType()->isIntOrIntVectorTy() && 2448 "Incorrect operand type (not integer) for S/UREM"); 2449 break; 2450 case FRem: 2451 assert(getType() == LHS->getType() && 2452 "Arithmetic operation should return same type as operands!"); 2453 assert(getType()->isFPOrFPVectorTy() && 2454 "Incorrect operand type (not floating point) for FREM"); 2455 break; 2456 case Shl: 2457 case LShr: 2458 case AShr: 2459 assert(getType() == LHS->getType() && 2460 "Shift operation should return same type as operands!"); 2461 assert(getType()->isIntOrIntVectorTy() && 2462 "Tried to create a shift operation on a non-integral type!"); 2463 break; 2464 case And: case Or: 2465 case Xor: 2466 assert(getType() == LHS->getType() && 2467 "Logical operation should return same type as operands!"); 2468 assert(getType()->isIntOrIntVectorTy() && 2469 "Tried to create a logical operation on a non-integral type!"); 2470 break; 2471 default: llvm_unreachable("Invalid opcode provided"); 2472 } 2473 #endif 2474 } 2475 2476 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2477 const Twine &Name, 2478 Instruction *InsertBefore) { 2479 assert(S1->getType() == S2->getType() && 2480 "Cannot create binary operator with two operands of differing type!"); 2481 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); 2482 } 2483 2484 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2485 const Twine &Name, 2486 BasicBlock *InsertAtEnd) { 2487 BinaryOperator *Res = Create(Op, S1, S2, Name); 2488 InsertAtEnd->getInstList().push_back(Res); 2489 return Res; 2490 } 2491 2492 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2493 Instruction *InsertBefore) { 2494 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2495 return new BinaryOperator(Instruction::Sub, 2496 zero, Op, 2497 Op->getType(), Name, InsertBefore); 2498 } 2499 2500 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2501 BasicBlock *InsertAtEnd) { 2502 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2503 return new BinaryOperator(Instruction::Sub, 2504 zero, Op, 2505 Op->getType(), Name, InsertAtEnd); 2506 } 2507 2508 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2509 Instruction *InsertBefore) { 2510 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2511 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); 2512 } 2513 2514 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2515 BasicBlock *InsertAtEnd) { 2516 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2517 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); 2518 } 2519 2520 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2521 Instruction *InsertBefore) { 2522 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2523 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); 2524 } 2525 2526 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2527 BasicBlock *InsertAtEnd) { 2528 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2529 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); 2530 } 2531 2532 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2533 Instruction *InsertBefore) { 2534 Constant *C = Constant::getAllOnesValue(Op->getType()); 2535 return new BinaryOperator(Instruction::Xor, Op, C, 2536 Op->getType(), Name, InsertBefore); 2537 } 2538 2539 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2540 BasicBlock *InsertAtEnd) { 2541 Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); 2542 return new BinaryOperator(Instruction::Xor, Op, AllOnes, 2543 Op->getType(), Name, InsertAtEnd); 2544 } 2545 2546 // Exchange the two operands to this instruction. This instruction is safe to 2547 // use on any binary instruction and does not modify the semantics of the 2548 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode 2549 // is changed. 2550 bool BinaryOperator::swapOperands() { 2551 if (!isCommutative()) 2552 return true; // Can't commute operands 2553 Op<0>().swap(Op<1>()); 2554 return false; 2555 } 2556 2557 //===----------------------------------------------------------------------===// 2558 // FPMathOperator Class 2559 //===----------------------------------------------------------------------===// 2560 2561 float FPMathOperator::getFPAccuracy() const { 2562 const MDNode *MD = 2563 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); 2564 if (!MD) 2565 return 0.0; 2566 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0)); 2567 return Accuracy->getValueAPF().convertToFloat(); 2568 } 2569 2570 //===----------------------------------------------------------------------===// 2571 // CastInst Class 2572 //===----------------------------------------------------------------------===// 2573 2574 // Just determine if this cast only deals with integral->integral conversion. 2575 bool CastInst::isIntegerCast() const { 2576 switch (getOpcode()) { 2577 default: return false; 2578 case Instruction::ZExt: 2579 case Instruction::SExt: 2580 case Instruction::Trunc: 2581 return true; 2582 case Instruction::BitCast: 2583 return getOperand(0)->getType()->isIntegerTy() && 2584 getType()->isIntegerTy(); 2585 } 2586 } 2587 2588 bool CastInst::isLosslessCast() const { 2589 // Only BitCast can be lossless, exit fast if we're not BitCast 2590 if (getOpcode() != Instruction::BitCast) 2591 return false; 2592 2593 // Identity cast is always lossless 2594 Type *SrcTy = getOperand(0)->getType(); 2595 Type *DstTy = getType(); 2596 if (SrcTy == DstTy) 2597 return true; 2598 2599 // Pointer to pointer is always lossless. 2600 if (SrcTy->isPointerTy()) 2601 return DstTy->isPointerTy(); 2602 return false; // Other types have no identity values 2603 } 2604 2605 /// This function determines if the CastInst does not require any bits to be 2606 /// changed in order to effect the cast. Essentially, it identifies cases where 2607 /// no code gen is necessary for the cast, hence the name no-op cast. For 2608 /// example, the following are all no-op casts: 2609 /// # bitcast i32* %x to i8* 2610 /// # bitcast <2 x i32> %x to <4 x i16> 2611 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2612 /// Determine if the described cast is a no-op. 2613 bool CastInst::isNoopCast(Instruction::CastOps Opcode, 2614 Type *SrcTy, 2615 Type *DestTy, 2616 const DataLayout &DL) { 2617 switch (Opcode) { 2618 default: llvm_unreachable("Invalid CastOp"); 2619 case Instruction::Trunc: 2620 case Instruction::ZExt: 2621 case Instruction::SExt: 2622 case Instruction::FPTrunc: 2623 case Instruction::FPExt: 2624 case Instruction::UIToFP: 2625 case Instruction::SIToFP: 2626 case Instruction::FPToUI: 2627 case Instruction::FPToSI: 2628 case Instruction::AddrSpaceCast: 2629 // TODO: Target informations may give a more accurate answer here. 2630 return false; 2631 case Instruction::BitCast: 2632 return true; // BitCast never modifies bits. 2633 case Instruction::PtrToInt: 2634 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() == 2635 DestTy->getScalarSizeInBits(); 2636 case Instruction::IntToPtr: 2637 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() == 2638 SrcTy->getScalarSizeInBits(); 2639 } 2640 } 2641 2642 bool CastInst::isNoopCast(const DataLayout &DL) const { 2643 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL); 2644 } 2645 2646 /// This function determines if a pair of casts can be eliminated and what 2647 /// opcode should be used in the elimination. This assumes that there are two 2648 /// instructions like this: 2649 /// * %F = firstOpcode SrcTy %x to MidTy 2650 /// * %S = secondOpcode MidTy %F to DstTy 2651 /// The function returns a resultOpcode so these two casts can be replaced with: 2652 /// * %Replacement = resultOpcode %SrcTy %x to DstTy 2653 /// If no such cast is permitted, the function returns 0. 2654 unsigned CastInst::isEliminableCastPair( 2655 Instruction::CastOps firstOp, Instruction::CastOps secondOp, 2656 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, 2657 Type *DstIntPtrTy) { 2658 // Define the 144 possibilities for these two cast instructions. The values 2659 // in this matrix determine what to do in a given situation and select the 2660 // case in the switch below. The rows correspond to firstOp, the columns 2661 // correspond to secondOp. In looking at the table below, keep in mind 2662 // the following cast properties: 2663 // 2664 // Size Compare Source Destination 2665 // Operator Src ? Size Type Sign Type Sign 2666 // -------- ------------ ------------------- --------------------- 2667 // TRUNC > Integer Any Integral Any 2668 // ZEXT < Integral Unsigned Integer Any 2669 // SEXT < Integral Signed Integer Any 2670 // FPTOUI n/a FloatPt n/a Integral Unsigned 2671 // FPTOSI n/a FloatPt n/a Integral Signed 2672 // UITOFP n/a Integral Unsigned FloatPt n/a 2673 // SITOFP n/a Integral Signed FloatPt n/a 2674 // FPTRUNC > FloatPt n/a FloatPt n/a 2675 // FPEXT < FloatPt n/a FloatPt n/a 2676 // PTRTOINT n/a Pointer n/a Integral Unsigned 2677 // INTTOPTR n/a Integral Unsigned Pointer n/a 2678 // BITCAST = FirstClass n/a FirstClass n/a 2679 // ADDRSPCST n/a Pointer n/a Pointer n/a 2680 // 2681 // NOTE: some transforms are safe, but we consider them to be non-profitable. 2682 // For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2683 // into "fptoui double to i64", but this loses information about the range 2684 // of the produced value (we no longer know the top-part is all zeros). 2685 // Further this conversion is often much more expensive for typical hardware, 2686 // and causes issues when building libgcc. We disallow fptosi+sext for the 2687 // same reason. 2688 const unsigned numCastOps = 2689 Instruction::CastOpsEnd - Instruction::CastOpsBegin; 2690 static const uint8_t CastResults[numCastOps][numCastOps] = { 2691 // T F F U S F F P I B A -+ 2692 // R Z S P P I I T P 2 N T S | 2693 // U E E 2 2 2 2 R E I T C C +- secondOp 2694 // N X X U S F F N X N 2 V V | 2695 // C T T I I P P C T T P T T -+ 2696 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+ 2697 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt | 2698 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt | 2699 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI | 2700 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI | 2701 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp 2702 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP | 2703 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc | 2704 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt | 2705 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt | 2706 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr | 2707 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast | 2708 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ 2709 }; 2710 2711 // TODO: This logic could be encoded into the table above and handled in the 2712 // switch below. 2713 // If either of the casts are a bitcast from scalar to vector, disallow the 2714 // merging. However, any pair of bitcasts are allowed. 2715 bool IsFirstBitcast = (firstOp == Instruction::BitCast); 2716 bool IsSecondBitcast = (secondOp == Instruction::BitCast); 2717 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast; 2718 2719 // Check if any of the casts convert scalars <-> vectors. 2720 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || 2721 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) 2722 if (!AreBothBitcasts) 2723 return 0; 2724 2725 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] 2726 [secondOp-Instruction::CastOpsBegin]; 2727 switch (ElimCase) { 2728 case 0: 2729 // Categorically disallowed. 2730 return 0; 2731 case 1: 2732 // Allowed, use first cast's opcode. 2733 return firstOp; 2734 case 2: 2735 // Allowed, use second cast's opcode. 2736 return secondOp; 2737 case 3: 2738 // No-op cast in second op implies firstOp as long as the DestTy 2739 // is integer and we are not converting between a vector and a 2740 // non-vector type. 2741 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) 2742 return firstOp; 2743 return 0; 2744 case 4: 2745 // No-op cast in second op implies firstOp as long as the DestTy 2746 // is floating point. 2747 if (DstTy->isFloatingPointTy()) 2748 return firstOp; 2749 return 0; 2750 case 5: 2751 // No-op cast in first op implies secondOp as long as the SrcTy 2752 // is an integer. 2753 if (SrcTy->isIntegerTy()) 2754 return secondOp; 2755 return 0; 2756 case 6: 2757 // No-op cast in first op implies secondOp as long as the SrcTy 2758 // is a floating point. 2759 if (SrcTy->isFloatingPointTy()) 2760 return secondOp; 2761 return 0; 2762 case 7: { 2763 // Cannot simplify if address spaces are different! 2764 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2765 return 0; 2766 2767 unsigned MidSize = MidTy->getScalarSizeInBits(); 2768 // We can still fold this without knowing the actual sizes as long we 2769 // know that the intermediate pointer is the largest possible 2770 // pointer size. 2771 // FIXME: Is this always true? 2772 if (MidSize == 64) 2773 return Instruction::BitCast; 2774 2775 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2776 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) 2777 return 0; 2778 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); 2779 if (MidSize >= PtrSize) 2780 return Instruction::BitCast; 2781 return 0; 2782 } 2783 case 8: { 2784 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size 2785 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2786 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2787 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2788 unsigned DstSize = DstTy->getScalarSizeInBits(); 2789 if (SrcSize == DstSize) 2790 return Instruction::BitCast; 2791 else if (SrcSize < DstSize) 2792 return firstOp; 2793 return secondOp; 2794 } 2795 case 9: 2796 // zext, sext -> zext, because sext can't sign extend after zext 2797 return Instruction::ZExt; 2798 case 11: { 2799 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2800 if (!MidIntPtrTy) 2801 return 0; 2802 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); 2803 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2804 unsigned DstSize = DstTy->getScalarSizeInBits(); 2805 if (SrcSize <= PtrSize && SrcSize == DstSize) 2806 return Instruction::BitCast; 2807 return 0; 2808 } 2809 case 12: 2810 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2811 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2812 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2813 return Instruction::AddrSpaceCast; 2814 return Instruction::BitCast; 2815 case 13: 2816 // FIXME: this state can be merged with (1), but the following assert 2817 // is useful to check the correcteness of the sequence due to semantic 2818 // change of bitcast. 2819 assert( 2820 SrcTy->isPtrOrPtrVectorTy() && 2821 MidTy->isPtrOrPtrVectorTy() && 2822 DstTy->isPtrOrPtrVectorTy() && 2823 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() && 2824 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2825 "Illegal addrspacecast, bitcast sequence!"); 2826 // Allowed, use first cast's opcode 2827 return firstOp; 2828 case 14: 2829 // bitcast, addrspacecast -> addrspacecast if the element type of 2830 // bitcast's source is the same as that of addrspacecast's destination. 2831 if (SrcTy->getScalarType()->getPointerElementType() == 2832 DstTy->getScalarType()->getPointerElementType()) 2833 return Instruction::AddrSpaceCast; 2834 return 0; 2835 case 15: 2836 // FIXME: this state can be merged with (1), but the following assert 2837 // is useful to check the correcteness of the sequence due to semantic 2838 // change of bitcast. 2839 assert( 2840 SrcTy->isIntOrIntVectorTy() && 2841 MidTy->isPtrOrPtrVectorTy() && 2842 DstTy->isPtrOrPtrVectorTy() && 2843 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2844 "Illegal inttoptr, bitcast sequence!"); 2845 // Allowed, use first cast's opcode 2846 return firstOp; 2847 case 16: 2848 // FIXME: this state can be merged with (2), but the following assert 2849 // is useful to check the correcteness of the sequence due to semantic 2850 // change of bitcast. 2851 assert( 2852 SrcTy->isPtrOrPtrVectorTy() && 2853 MidTy->isPtrOrPtrVectorTy() && 2854 DstTy->isIntOrIntVectorTy() && 2855 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() && 2856 "Illegal bitcast, ptrtoint sequence!"); 2857 // Allowed, use second cast's opcode 2858 return secondOp; 2859 case 17: 2860 // (sitofp (zext x)) -> (uitofp x) 2861 return Instruction::UIToFP; 2862 case 99: 2863 // Cast combination can't happen (error in input). This is for all cases 2864 // where the MidTy is not the same for the two cast instructions. 2865 llvm_unreachable("Invalid Cast Combination"); 2866 default: 2867 llvm_unreachable("Error in CastResults table!!!"); 2868 } 2869 } 2870 2871 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2872 const Twine &Name, Instruction *InsertBefore) { 2873 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2874 // Construct and return the appropriate CastInst subclass 2875 switch (op) { 2876 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); 2877 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); 2878 case SExt: return new SExtInst (S, Ty, Name, InsertBefore); 2879 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); 2880 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); 2881 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); 2882 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); 2883 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); 2884 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); 2885 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); 2886 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); 2887 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); 2888 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore); 2889 default: llvm_unreachable("Invalid opcode provided"); 2890 } 2891 } 2892 2893 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2894 const Twine &Name, BasicBlock *InsertAtEnd) { 2895 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2896 // Construct and return the appropriate CastInst subclass 2897 switch (op) { 2898 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); 2899 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); 2900 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); 2901 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); 2902 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); 2903 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); 2904 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); 2905 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); 2906 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); 2907 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); 2908 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); 2909 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); 2910 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd); 2911 default: llvm_unreachable("Invalid opcode provided"); 2912 } 2913 } 2914 2915 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2916 const Twine &Name, 2917 Instruction *InsertBefore) { 2918 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2919 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2920 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); 2921 } 2922 2923 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2924 const Twine &Name, 2925 BasicBlock *InsertAtEnd) { 2926 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2927 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2928 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); 2929 } 2930 2931 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2932 const Twine &Name, 2933 Instruction *InsertBefore) { 2934 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2935 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2936 return Create(Instruction::SExt, S, Ty, Name, InsertBefore); 2937 } 2938 2939 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2940 const Twine &Name, 2941 BasicBlock *InsertAtEnd) { 2942 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2943 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2944 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); 2945 } 2946 2947 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2948 const Twine &Name, 2949 Instruction *InsertBefore) { 2950 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2951 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2952 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); 2953 } 2954 2955 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2956 const Twine &Name, 2957 BasicBlock *InsertAtEnd) { 2958 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2959 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2960 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); 2961 } 2962 2963 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2964 const Twine &Name, 2965 BasicBlock *InsertAtEnd) { 2966 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2967 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2968 "Invalid cast"); 2969 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2970 assert((!Ty->isVectorTy() || 2971 cast<VectorType>(Ty)->getNumElements() == 2972 cast<VectorType>(S->getType())->getNumElements()) && 2973 "Invalid cast"); 2974 2975 if (Ty->isIntOrIntVectorTy()) 2976 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); 2977 2978 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd); 2979 } 2980 2981 /// Create a BitCast or a PtrToInt cast instruction 2982 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2983 const Twine &Name, 2984 Instruction *InsertBefore) { 2985 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2986 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2987 "Invalid cast"); 2988 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2989 assert((!Ty->isVectorTy() || 2990 cast<VectorType>(Ty)->getNumElements() == 2991 cast<VectorType>(S->getType())->getNumElements()) && 2992 "Invalid cast"); 2993 2994 if (Ty->isIntOrIntVectorTy()) 2995 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2996 2997 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore); 2998 } 2999 3000 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 3001 Value *S, Type *Ty, 3002 const Twine &Name, 3003 BasicBlock *InsertAtEnd) { 3004 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 3005 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 3006 3007 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 3008 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd); 3009 3010 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 3011 } 3012 3013 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 3014 Value *S, Type *Ty, 3015 const Twine &Name, 3016 Instruction *InsertBefore) { 3017 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 3018 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 3019 3020 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 3021 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore); 3022 3023 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 3024 } 3025 3026 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty, 3027 const Twine &Name, 3028 Instruction *InsertBefore) { 3029 if (S->getType()->isPointerTy() && Ty->isIntegerTy()) 3030 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 3031 if (S->getType()->isIntegerTy() && Ty->isPointerTy()) 3032 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore); 3033 3034 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 3035 } 3036 3037 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 3038 bool isSigned, const Twine &Name, 3039 Instruction *InsertBefore) { 3040 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 3041 "Invalid integer cast"); 3042 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 3043 unsigned DstBits = Ty->getScalarSizeInBits(); 3044 Instruction::CastOps opcode = 3045 (SrcBits == DstBits ? Instruction::BitCast : 3046 (SrcBits > DstBits ? Instruction::Trunc : 3047 (isSigned ? Instruction::SExt : Instruction::ZExt))); 3048 return Create(opcode, C, Ty, Name, InsertBefore); 3049 } 3050 3051 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 3052 bool isSigned, const Twine &Name, 3053 BasicBlock *InsertAtEnd) { 3054 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 3055 "Invalid cast"); 3056 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 3057 unsigned DstBits = Ty->getScalarSizeInBits(); 3058 Instruction::CastOps opcode = 3059 (SrcBits == DstBits ? Instruction::BitCast : 3060 (SrcBits > DstBits ? Instruction::Trunc : 3061 (isSigned ? Instruction::SExt : Instruction::ZExt))); 3062 return Create(opcode, C, Ty, Name, InsertAtEnd); 3063 } 3064 3065 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 3066 const Twine &Name, 3067 Instruction *InsertBefore) { 3068 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 3069 "Invalid cast"); 3070 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 3071 unsigned DstBits = Ty->getScalarSizeInBits(); 3072 Instruction::CastOps opcode = 3073 (SrcBits == DstBits ? Instruction::BitCast : 3074 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 3075 return Create(opcode, C, Ty, Name, InsertBefore); 3076 } 3077 3078 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 3079 const Twine &Name, 3080 BasicBlock *InsertAtEnd) { 3081 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 3082 "Invalid cast"); 3083 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 3084 unsigned DstBits = Ty->getScalarSizeInBits(); 3085 Instruction::CastOps opcode = 3086 (SrcBits == DstBits ? Instruction::BitCast : 3087 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 3088 return Create(opcode, C, Ty, Name, InsertAtEnd); 3089 } 3090 3091 // Check whether it is valid to call getCastOpcode for these types. 3092 // This routine must be kept in sync with getCastOpcode. 3093 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { 3094 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 3095 return false; 3096 3097 if (SrcTy == DestTy) 3098 return true; 3099 3100 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 3101 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 3102 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 3103 // An element by element cast. Valid if casting the elements is valid. 3104 SrcTy = SrcVecTy->getElementType(); 3105 DestTy = DestVecTy->getElementType(); 3106 } 3107 3108 // Get the bit sizes, we'll need these 3109 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3110 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3111 3112 // Run through the possibilities ... 3113 if (DestTy->isIntegerTy()) { // Casting to integral 3114 if (SrcTy->isIntegerTy()) // Casting from integral 3115 return true; 3116 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 3117 return true; 3118 if (SrcTy->isVectorTy()) // Casting from vector 3119 return DestBits == SrcBits; 3120 // Casting from something else 3121 return SrcTy->isPointerTy(); 3122 } 3123 if (DestTy->isFloatingPointTy()) { // Casting to floating pt 3124 if (SrcTy->isIntegerTy()) // Casting from integral 3125 return true; 3126 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 3127 return true; 3128 if (SrcTy->isVectorTy()) // Casting from vector 3129 return DestBits == SrcBits; 3130 // Casting from something else 3131 return false; 3132 } 3133 if (DestTy->isVectorTy()) // Casting to vector 3134 return DestBits == SrcBits; 3135 if (DestTy->isPointerTy()) { // Casting to pointer 3136 if (SrcTy->isPointerTy()) // Casting from pointer 3137 return true; 3138 return SrcTy->isIntegerTy(); // Casting from integral 3139 } 3140 if (DestTy->isX86_MMXTy()) { 3141 if (SrcTy->isVectorTy()) 3142 return DestBits == SrcBits; // 64-bit vector to MMX 3143 return false; 3144 } // Casting to something else 3145 return false; 3146 } 3147 3148 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) { 3149 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 3150 return false; 3151 3152 if (SrcTy == DestTy) 3153 return true; 3154 3155 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3156 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) { 3157 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) { 3158 // An element by element cast. Valid if casting the elements is valid. 3159 SrcTy = SrcVecTy->getElementType(); 3160 DestTy = DestVecTy->getElementType(); 3161 } 3162 } 3163 } 3164 3165 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) { 3166 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) { 3167 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); 3168 } 3169 } 3170 3171 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3172 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3173 3174 // Could still have vectors of pointers if the number of elements doesn't 3175 // match 3176 if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0) 3177 return false; 3178 3179 if (SrcBits != DestBits) 3180 return false; 3181 3182 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy()) 3183 return false; 3184 3185 return true; 3186 } 3187 3188 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, 3189 const DataLayout &DL) { 3190 // ptrtoint and inttoptr are not allowed on non-integral pointers 3191 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy)) 3192 if (auto *IntTy = dyn_cast<IntegerType>(DestTy)) 3193 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 3194 !DL.isNonIntegralPointerType(PtrTy)); 3195 if (auto *PtrTy = dyn_cast<PointerType>(DestTy)) 3196 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy)) 3197 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 3198 !DL.isNonIntegralPointerType(PtrTy)); 3199 3200 return isBitCastable(SrcTy, DestTy); 3201 } 3202 3203 // Provide a way to get a "cast" where the cast opcode is inferred from the 3204 // types and size of the operand. This, basically, is a parallel of the 3205 // logic in the castIsValid function below. This axiom should hold: 3206 // castIsValid( getCastOpcode(Val, Ty), Val, Ty) 3207 // should not assert in castIsValid. In other words, this produces a "correct" 3208 // casting opcode for the arguments passed to it. 3209 // This routine must be kept in sync with isCastable. 3210 Instruction::CastOps 3211 CastInst::getCastOpcode( 3212 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { 3213 Type *SrcTy = Src->getType(); 3214 3215 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && 3216 "Only first class types are castable!"); 3217 3218 if (SrcTy == DestTy) 3219 return BitCast; 3220 3221 // FIXME: Check address space sizes here 3222 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 3223 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 3224 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 3225 // An element by element cast. Find the appropriate opcode based on the 3226 // element types. 3227 SrcTy = SrcVecTy->getElementType(); 3228 DestTy = DestVecTy->getElementType(); 3229 } 3230 3231 // Get the bit sizes, we'll need these 3232 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3233 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3234 3235 // Run through the possibilities ... 3236 if (DestTy->isIntegerTy()) { // Casting to integral 3237 if (SrcTy->isIntegerTy()) { // Casting from integral 3238 if (DestBits < SrcBits) 3239 return Trunc; // int -> smaller int 3240 else if (DestBits > SrcBits) { // its an extension 3241 if (SrcIsSigned) 3242 return SExt; // signed -> SEXT 3243 else 3244 return ZExt; // unsigned -> ZEXT 3245 } else { 3246 return BitCast; // Same size, No-op cast 3247 } 3248 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 3249 if (DestIsSigned) 3250 return FPToSI; // FP -> sint 3251 else 3252 return FPToUI; // FP -> uint 3253 } else if (SrcTy->isVectorTy()) { 3254 assert(DestBits == SrcBits && 3255 "Casting vector to integer of different width"); 3256 return BitCast; // Same size, no-op cast 3257 } else { 3258 assert(SrcTy->isPointerTy() && 3259 "Casting from a value that is not first-class type"); 3260 return PtrToInt; // ptr -> int 3261 } 3262 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt 3263 if (SrcTy->isIntegerTy()) { // Casting from integral 3264 if (SrcIsSigned) 3265 return SIToFP; // sint -> FP 3266 else 3267 return UIToFP; // uint -> FP 3268 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 3269 if (DestBits < SrcBits) { 3270 return FPTrunc; // FP -> smaller FP 3271 } else if (DestBits > SrcBits) { 3272 return FPExt; // FP -> larger FP 3273 } else { 3274 return BitCast; // same size, no-op cast 3275 } 3276 } else if (SrcTy->isVectorTy()) { 3277 assert(DestBits == SrcBits && 3278 "Casting vector to floating point of different width"); 3279 return BitCast; // same size, no-op cast 3280 } 3281 llvm_unreachable("Casting pointer or non-first class to float"); 3282 } else if (DestTy->isVectorTy()) { 3283 assert(DestBits == SrcBits && 3284 "Illegal cast to vector (wrong type or size)"); 3285 return BitCast; 3286 } else if (DestTy->isPointerTy()) { 3287 if (SrcTy->isPointerTy()) { 3288 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace()) 3289 return AddrSpaceCast; 3290 return BitCast; // ptr -> ptr 3291 } else if (SrcTy->isIntegerTy()) { 3292 return IntToPtr; // int -> ptr 3293 } 3294 llvm_unreachable("Casting pointer to other than pointer or int"); 3295 } else if (DestTy->isX86_MMXTy()) { 3296 if (SrcTy->isVectorTy()) { 3297 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); 3298 return BitCast; // 64-bit vector to MMX 3299 } 3300 llvm_unreachable("Illegal cast to X86_MMX"); 3301 } 3302 llvm_unreachable("Casting to type that is not first-class"); 3303 } 3304 3305 //===----------------------------------------------------------------------===// 3306 // CastInst SubClass Constructors 3307 //===----------------------------------------------------------------------===// 3308 3309 /// Check that the construction parameters for a CastInst are correct. This 3310 /// could be broken out into the separate constructors but it is useful to have 3311 /// it in one place and to eliminate the redundant code for getting the sizes 3312 /// of the types involved. 3313 bool 3314 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { 3315 // Check for type sanity on the arguments 3316 Type *SrcTy = S->getType(); 3317 3318 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || 3319 SrcTy->isAggregateType() || DstTy->isAggregateType()) 3320 return false; 3321 3322 // Get the size of the types in bits, and whether we are dealing 3323 // with vector types, we'll need this later. 3324 bool SrcIsVec = isa<VectorType>(SrcTy); 3325 bool DstIsVec = isa<VectorType>(DstTy); 3326 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits(); 3327 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits(); 3328 3329 // If these are vector types, get the lengths of the vectors (using zero for 3330 // scalar types means that checking that vector lengths match also checks that 3331 // scalars are not being converted to vectors or vectors to scalars). 3332 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount() 3333 : ElementCount(0, false); 3334 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount() 3335 : ElementCount(0, false); 3336 3337 // Switch on the opcode provided 3338 switch (op) { 3339 default: return false; // This is an input error 3340 case Instruction::Trunc: 3341 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3342 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize; 3343 case Instruction::ZExt: 3344 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3345 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize; 3346 case Instruction::SExt: 3347 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3348 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize; 3349 case Instruction::FPTrunc: 3350 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3351 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize; 3352 case Instruction::FPExt: 3353 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3354 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize; 3355 case Instruction::UIToFP: 3356 case Instruction::SIToFP: 3357 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && 3358 SrcEC == DstEC; 3359 case Instruction::FPToUI: 3360 case Instruction::FPToSI: 3361 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && 3362 SrcEC == DstEC; 3363 case Instruction::PtrToInt: 3364 if (SrcEC != DstEC) 3365 return false; 3366 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy(); 3367 case Instruction::IntToPtr: 3368 if (SrcEC != DstEC) 3369 return false; 3370 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy(); 3371 case Instruction::BitCast: { 3372 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3373 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3374 3375 // BitCast implies a no-op cast of type only. No bits change. 3376 // However, you can't cast pointers to anything but pointers. 3377 if (!SrcPtrTy != !DstPtrTy) 3378 return false; 3379 3380 // For non-pointer cases, the cast is okay if the source and destination bit 3381 // widths are identical. 3382 if (!SrcPtrTy) 3383 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); 3384 3385 // If both are pointers then the address spaces must match. 3386 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) 3387 return false; 3388 3389 // A vector of pointers must have the same number of elements. 3390 if (SrcIsVec && DstIsVec) 3391 return SrcEC == DstEC; 3392 if (SrcIsVec) 3393 return SrcEC == ElementCount(1, false); 3394 if (DstIsVec) 3395 return DstEC == ElementCount(1, false); 3396 3397 return true; 3398 } 3399 case Instruction::AddrSpaceCast: { 3400 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3401 if (!SrcPtrTy) 3402 return false; 3403 3404 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3405 if (!DstPtrTy) 3406 return false; 3407 3408 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) 3409 return false; 3410 3411 return SrcEC == DstEC; 3412 } 3413 } 3414 } 3415 3416 TruncInst::TruncInst( 3417 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3418 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) { 3419 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3420 } 3421 3422 TruncInst::TruncInst( 3423 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3424 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { 3425 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3426 } 3427 3428 ZExtInst::ZExtInst( 3429 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3430 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) { 3431 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3432 } 3433 3434 ZExtInst::ZExtInst( 3435 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3436 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { 3437 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3438 } 3439 SExtInst::SExtInst( 3440 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3441 ) : CastInst(Ty, SExt, S, Name, InsertBefore) { 3442 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3443 } 3444 3445 SExtInst::SExtInst( 3446 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3447 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { 3448 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3449 } 3450 3451 FPTruncInst::FPTruncInst( 3452 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3453 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { 3454 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3455 } 3456 3457 FPTruncInst::FPTruncInst( 3458 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3459 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { 3460 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3461 } 3462 3463 FPExtInst::FPExtInst( 3464 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3465 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) { 3466 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3467 } 3468 3469 FPExtInst::FPExtInst( 3470 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3471 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { 3472 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3473 } 3474 3475 UIToFPInst::UIToFPInst( 3476 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3477 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { 3478 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3479 } 3480 3481 UIToFPInst::UIToFPInst( 3482 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3483 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { 3484 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3485 } 3486 3487 SIToFPInst::SIToFPInst( 3488 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3489 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { 3490 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3491 } 3492 3493 SIToFPInst::SIToFPInst( 3494 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3495 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { 3496 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3497 } 3498 3499 FPToUIInst::FPToUIInst( 3500 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3501 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { 3502 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3503 } 3504 3505 FPToUIInst::FPToUIInst( 3506 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3507 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { 3508 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3509 } 3510 3511 FPToSIInst::FPToSIInst( 3512 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3513 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { 3514 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3515 } 3516 3517 FPToSIInst::FPToSIInst( 3518 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3519 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { 3520 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3521 } 3522 3523 PtrToIntInst::PtrToIntInst( 3524 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3525 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { 3526 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3527 } 3528 3529 PtrToIntInst::PtrToIntInst( 3530 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3531 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { 3532 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3533 } 3534 3535 IntToPtrInst::IntToPtrInst( 3536 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3537 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { 3538 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3539 } 3540 3541 IntToPtrInst::IntToPtrInst( 3542 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3543 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { 3544 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3545 } 3546 3547 BitCastInst::BitCastInst( 3548 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3549 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) { 3550 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3551 } 3552 3553 BitCastInst::BitCastInst( 3554 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3555 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { 3556 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3557 } 3558 3559 AddrSpaceCastInst::AddrSpaceCastInst( 3560 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3561 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { 3562 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3563 } 3564 3565 AddrSpaceCastInst::AddrSpaceCastInst( 3566 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3567 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) { 3568 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3569 } 3570 3571 //===----------------------------------------------------------------------===// 3572 // CmpInst Classes 3573 //===----------------------------------------------------------------------===// 3574 3575 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3576 Value *RHS, const Twine &Name, Instruction *InsertBefore, 3577 Instruction *FlagsSource) 3578 : Instruction(ty, op, 3579 OperandTraits<CmpInst>::op_begin(this), 3580 OperandTraits<CmpInst>::operands(this), 3581 InsertBefore) { 3582 Op<0>() = LHS; 3583 Op<1>() = RHS; 3584 setPredicate((Predicate)predicate); 3585 setName(Name); 3586 if (FlagsSource) 3587 copyIRFlags(FlagsSource); 3588 } 3589 3590 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3591 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd) 3592 : Instruction(ty, op, 3593 OperandTraits<CmpInst>::op_begin(this), 3594 OperandTraits<CmpInst>::operands(this), 3595 InsertAtEnd) { 3596 Op<0>() = LHS; 3597 Op<1>() = RHS; 3598 setPredicate((Predicate)predicate); 3599 setName(Name); 3600 } 3601 3602 CmpInst * 3603 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3604 const Twine &Name, Instruction *InsertBefore) { 3605 if (Op == Instruction::ICmp) { 3606 if (InsertBefore) 3607 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), 3608 S1, S2, Name); 3609 else 3610 return new ICmpInst(CmpInst::Predicate(predicate), 3611 S1, S2, Name); 3612 } 3613 3614 if (InsertBefore) 3615 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), 3616 S1, S2, Name); 3617 else 3618 return new FCmpInst(CmpInst::Predicate(predicate), 3619 S1, S2, Name); 3620 } 3621 3622 CmpInst * 3623 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3624 const Twine &Name, BasicBlock *InsertAtEnd) { 3625 if (Op == Instruction::ICmp) { 3626 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3627 S1, S2, Name); 3628 } 3629 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3630 S1, S2, Name); 3631 } 3632 3633 void CmpInst::swapOperands() { 3634 if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3635 IC->swapOperands(); 3636 else 3637 cast<FCmpInst>(this)->swapOperands(); 3638 } 3639 3640 bool CmpInst::isCommutative() const { 3641 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3642 return IC->isCommutative(); 3643 return cast<FCmpInst>(this)->isCommutative(); 3644 } 3645 3646 bool CmpInst::isEquality() const { 3647 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3648 return IC->isEquality(); 3649 return cast<FCmpInst>(this)->isEquality(); 3650 } 3651 3652 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { 3653 switch (pred) { 3654 default: llvm_unreachable("Unknown cmp predicate!"); 3655 case ICMP_EQ: return ICMP_NE; 3656 case ICMP_NE: return ICMP_EQ; 3657 case ICMP_UGT: return ICMP_ULE; 3658 case ICMP_ULT: return ICMP_UGE; 3659 case ICMP_UGE: return ICMP_ULT; 3660 case ICMP_ULE: return ICMP_UGT; 3661 case ICMP_SGT: return ICMP_SLE; 3662 case ICMP_SLT: return ICMP_SGE; 3663 case ICMP_SGE: return ICMP_SLT; 3664 case ICMP_SLE: return ICMP_SGT; 3665 3666 case FCMP_OEQ: return FCMP_UNE; 3667 case FCMP_ONE: return FCMP_UEQ; 3668 case FCMP_OGT: return FCMP_ULE; 3669 case FCMP_OLT: return FCMP_UGE; 3670 case FCMP_OGE: return FCMP_ULT; 3671 case FCMP_OLE: return FCMP_UGT; 3672 case FCMP_UEQ: return FCMP_ONE; 3673 case FCMP_UNE: return FCMP_OEQ; 3674 case FCMP_UGT: return FCMP_OLE; 3675 case FCMP_ULT: return FCMP_OGE; 3676 case FCMP_UGE: return FCMP_OLT; 3677 case FCMP_ULE: return FCMP_OGT; 3678 case FCMP_ORD: return FCMP_UNO; 3679 case FCMP_UNO: return FCMP_ORD; 3680 case FCMP_TRUE: return FCMP_FALSE; 3681 case FCMP_FALSE: return FCMP_TRUE; 3682 } 3683 } 3684 3685 StringRef CmpInst::getPredicateName(Predicate Pred) { 3686 switch (Pred) { 3687 default: return "unknown"; 3688 case FCmpInst::FCMP_FALSE: return "false"; 3689 case FCmpInst::FCMP_OEQ: return "oeq"; 3690 case FCmpInst::FCMP_OGT: return "ogt"; 3691 case FCmpInst::FCMP_OGE: return "oge"; 3692 case FCmpInst::FCMP_OLT: return "olt"; 3693 case FCmpInst::FCMP_OLE: return "ole"; 3694 case FCmpInst::FCMP_ONE: return "one"; 3695 case FCmpInst::FCMP_ORD: return "ord"; 3696 case FCmpInst::FCMP_UNO: return "uno"; 3697 case FCmpInst::FCMP_UEQ: return "ueq"; 3698 case FCmpInst::FCMP_UGT: return "ugt"; 3699 case FCmpInst::FCMP_UGE: return "uge"; 3700 case FCmpInst::FCMP_ULT: return "ult"; 3701 case FCmpInst::FCMP_ULE: return "ule"; 3702 case FCmpInst::FCMP_UNE: return "une"; 3703 case FCmpInst::FCMP_TRUE: return "true"; 3704 case ICmpInst::ICMP_EQ: return "eq"; 3705 case ICmpInst::ICMP_NE: return "ne"; 3706 case ICmpInst::ICMP_SGT: return "sgt"; 3707 case ICmpInst::ICMP_SGE: return "sge"; 3708 case ICmpInst::ICMP_SLT: return "slt"; 3709 case ICmpInst::ICMP_SLE: return "sle"; 3710 case ICmpInst::ICMP_UGT: return "ugt"; 3711 case ICmpInst::ICMP_UGE: return "uge"; 3712 case ICmpInst::ICMP_ULT: return "ult"; 3713 case ICmpInst::ICMP_ULE: return "ule"; 3714 } 3715 } 3716 3717 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { 3718 switch (pred) { 3719 default: llvm_unreachable("Unknown icmp predicate!"); 3720 case ICMP_EQ: case ICMP_NE: 3721 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: 3722 return pred; 3723 case ICMP_UGT: return ICMP_SGT; 3724 case ICMP_ULT: return ICMP_SLT; 3725 case ICMP_UGE: return ICMP_SGE; 3726 case ICMP_ULE: return ICMP_SLE; 3727 } 3728 } 3729 3730 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { 3731 switch (pred) { 3732 default: llvm_unreachable("Unknown icmp predicate!"); 3733 case ICMP_EQ: case ICMP_NE: 3734 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: 3735 return pred; 3736 case ICMP_SGT: return ICMP_UGT; 3737 case ICMP_SLT: return ICMP_ULT; 3738 case ICMP_SGE: return ICMP_UGE; 3739 case ICMP_SLE: return ICMP_ULE; 3740 } 3741 } 3742 3743 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) { 3744 switch (pred) { 3745 default: llvm_unreachable("Unknown or unsupported cmp predicate!"); 3746 case ICMP_SGT: return ICMP_SGE; 3747 case ICMP_SLT: return ICMP_SLE; 3748 case ICMP_SGE: return ICMP_SGT; 3749 case ICMP_SLE: return ICMP_SLT; 3750 case ICMP_UGT: return ICMP_UGE; 3751 case ICMP_ULT: return ICMP_ULE; 3752 case ICMP_UGE: return ICMP_UGT; 3753 case ICMP_ULE: return ICMP_ULT; 3754 3755 case FCMP_OGT: return FCMP_OGE; 3756 case FCMP_OLT: return FCMP_OLE; 3757 case FCMP_OGE: return FCMP_OGT; 3758 case FCMP_OLE: return FCMP_OLT; 3759 case FCMP_UGT: return FCMP_UGE; 3760 case FCMP_ULT: return FCMP_ULE; 3761 case FCMP_UGE: return FCMP_UGT; 3762 case FCMP_ULE: return FCMP_ULT; 3763 } 3764 } 3765 3766 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { 3767 switch (pred) { 3768 default: llvm_unreachable("Unknown cmp predicate!"); 3769 case ICMP_EQ: case ICMP_NE: 3770 return pred; 3771 case ICMP_SGT: return ICMP_SLT; 3772 case ICMP_SLT: return ICMP_SGT; 3773 case ICMP_SGE: return ICMP_SLE; 3774 case ICMP_SLE: return ICMP_SGE; 3775 case ICMP_UGT: return ICMP_ULT; 3776 case ICMP_ULT: return ICMP_UGT; 3777 case ICMP_UGE: return ICMP_ULE; 3778 case ICMP_ULE: return ICMP_UGE; 3779 3780 case FCMP_FALSE: case FCMP_TRUE: 3781 case FCMP_OEQ: case FCMP_ONE: 3782 case FCMP_UEQ: case FCMP_UNE: 3783 case FCMP_ORD: case FCMP_UNO: 3784 return pred; 3785 case FCMP_OGT: return FCMP_OLT; 3786 case FCMP_OLT: return FCMP_OGT; 3787 case FCMP_OGE: return FCMP_OLE; 3788 case FCMP_OLE: return FCMP_OGE; 3789 case FCMP_UGT: return FCMP_ULT; 3790 case FCMP_ULT: return FCMP_UGT; 3791 case FCMP_UGE: return FCMP_ULE; 3792 case FCMP_ULE: return FCMP_UGE; 3793 } 3794 } 3795 3796 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) { 3797 switch (pred) { 3798 case ICMP_SGT: return ICMP_SGE; 3799 case ICMP_SLT: return ICMP_SLE; 3800 case ICMP_UGT: return ICMP_UGE; 3801 case ICMP_ULT: return ICMP_ULE; 3802 case FCMP_OGT: return FCMP_OGE; 3803 case FCMP_OLT: return FCMP_OLE; 3804 case FCMP_UGT: return FCMP_UGE; 3805 case FCMP_ULT: return FCMP_ULE; 3806 default: return pred; 3807 } 3808 } 3809 3810 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) { 3811 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!"); 3812 3813 switch (pred) { 3814 default: 3815 llvm_unreachable("Unknown predicate!"); 3816 case CmpInst::ICMP_ULT: 3817 return CmpInst::ICMP_SLT; 3818 case CmpInst::ICMP_ULE: 3819 return CmpInst::ICMP_SLE; 3820 case CmpInst::ICMP_UGT: 3821 return CmpInst::ICMP_SGT; 3822 case CmpInst::ICMP_UGE: 3823 return CmpInst::ICMP_SGE; 3824 } 3825 } 3826 3827 bool CmpInst::isUnsigned(Predicate predicate) { 3828 switch (predicate) { 3829 default: return false; 3830 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: 3831 case ICmpInst::ICMP_UGE: return true; 3832 } 3833 } 3834 3835 bool CmpInst::isSigned(Predicate predicate) { 3836 switch (predicate) { 3837 default: return false; 3838 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: 3839 case ICmpInst::ICMP_SGE: return true; 3840 } 3841 } 3842 3843 bool CmpInst::isOrdered(Predicate predicate) { 3844 switch (predicate) { 3845 default: return false; 3846 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: 3847 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: 3848 case FCmpInst::FCMP_ORD: return true; 3849 } 3850 } 3851 3852 bool CmpInst::isUnordered(Predicate predicate) { 3853 switch (predicate) { 3854 default: return false; 3855 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: 3856 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: 3857 case FCmpInst::FCMP_UNO: return true; 3858 } 3859 } 3860 3861 bool CmpInst::isTrueWhenEqual(Predicate predicate) { 3862 switch(predicate) { 3863 default: return false; 3864 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: 3865 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; 3866 } 3867 } 3868 3869 bool CmpInst::isFalseWhenEqual(Predicate predicate) { 3870 switch(predicate) { 3871 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: 3872 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; 3873 default: return false; 3874 } 3875 } 3876 3877 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3878 // If the predicates match, then we know the first condition implies the 3879 // second is true. 3880 if (Pred1 == Pred2) 3881 return true; 3882 3883 switch (Pred1) { 3884 default: 3885 break; 3886 case ICMP_EQ: 3887 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true. 3888 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE || 3889 Pred2 == ICMP_SLE; 3890 case ICMP_UGT: // A >u B implies A != B and A >=u B are true. 3891 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE; 3892 case ICMP_ULT: // A <u B implies A != B and A <=u B are true. 3893 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE; 3894 case ICMP_SGT: // A >s B implies A != B and A >=s B are true. 3895 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE; 3896 case ICMP_SLT: // A <s B implies A != B and A <=s B are true. 3897 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE; 3898 } 3899 return false; 3900 } 3901 3902 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3903 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); 3904 } 3905 3906 //===----------------------------------------------------------------------===// 3907 // SwitchInst Implementation 3908 //===----------------------------------------------------------------------===// 3909 3910 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { 3911 assert(Value && Default && NumReserved); 3912 ReservedSpace = NumReserved; 3913 setNumHungOffUseOperands(2); 3914 allocHungoffUses(ReservedSpace); 3915 3916 Op<0>() = Value; 3917 Op<1>() = Default; 3918 } 3919 3920 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3921 /// switch on and a default destination. The number of additional cases can 3922 /// be specified here to make memory allocation more efficient. This 3923 /// constructor can also autoinsert before another instruction. 3924 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3925 Instruction *InsertBefore) 3926 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3927 nullptr, 0, InsertBefore) { 3928 init(Value, Default, 2+NumCases*2); 3929 } 3930 3931 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3932 /// switch on and a default destination. The number of additional cases can 3933 /// be specified here to make memory allocation more efficient. This 3934 /// constructor also autoinserts at the end of the specified BasicBlock. 3935 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3936 BasicBlock *InsertAtEnd) 3937 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3938 nullptr, 0, InsertAtEnd) { 3939 init(Value, Default, 2+NumCases*2); 3940 } 3941 3942 SwitchInst::SwitchInst(const SwitchInst &SI) 3943 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) { 3944 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); 3945 setNumHungOffUseOperands(SI.getNumOperands()); 3946 Use *OL = getOperandList(); 3947 const Use *InOL = SI.getOperandList(); 3948 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { 3949 OL[i] = InOL[i]; 3950 OL[i+1] = InOL[i+1]; 3951 } 3952 SubclassOptionalData = SI.SubclassOptionalData; 3953 } 3954 3955 /// addCase - Add an entry to the switch instruction... 3956 /// 3957 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { 3958 unsigned NewCaseIdx = getNumCases(); 3959 unsigned OpNo = getNumOperands(); 3960 if (OpNo+2 > ReservedSpace) 3961 growOperands(); // Get more space! 3962 // Initialize some new operands. 3963 assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); 3964 setNumHungOffUseOperands(OpNo+2); 3965 CaseHandle Case(this, NewCaseIdx); 3966 Case.setValue(OnVal); 3967 Case.setSuccessor(Dest); 3968 } 3969 3970 /// removeCase - This method removes the specified case and its successor 3971 /// from the switch instruction. 3972 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) { 3973 unsigned idx = I->getCaseIndex(); 3974 3975 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); 3976 3977 unsigned NumOps = getNumOperands(); 3978 Use *OL = getOperandList(); 3979 3980 // Overwrite this case with the end of the list. 3981 if (2 + (idx + 1) * 2 != NumOps) { 3982 OL[2 + idx * 2] = OL[NumOps - 2]; 3983 OL[2 + idx * 2 + 1] = OL[NumOps - 1]; 3984 } 3985 3986 // Nuke the last value. 3987 OL[NumOps-2].set(nullptr); 3988 OL[NumOps-2+1].set(nullptr); 3989 setNumHungOffUseOperands(NumOps-2); 3990 3991 return CaseIt(this, idx); 3992 } 3993 3994 /// growOperands - grow operands - This grows the operand list in response 3995 /// to a push_back style of operation. This grows the number of ops by 3 times. 3996 /// 3997 void SwitchInst::growOperands() { 3998 unsigned e = getNumOperands(); 3999 unsigned NumOps = e*3; 4000 4001 ReservedSpace = NumOps; 4002 growHungoffUses(ReservedSpace); 4003 } 4004 4005 MDNode * 4006 SwitchInstProfUpdateWrapper::getProfBranchWeightsMD(const SwitchInst &SI) { 4007 if (MDNode *ProfileData = SI.getMetadata(LLVMContext::MD_prof)) 4008 if (auto *MDName = dyn_cast<MDString>(ProfileData->getOperand(0))) 4009 if (MDName->getString() == "branch_weights") 4010 return ProfileData; 4011 return nullptr; 4012 } 4013 4014 MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() { 4015 assert(Changed && "called only if metadata has changed"); 4016 4017 if (!Weights) 4018 return nullptr; 4019 4020 assert(SI.getNumSuccessors() == Weights->size() && 4021 "num of prof branch_weights must accord with num of successors"); 4022 4023 bool AllZeroes = 4024 all_of(Weights.getValue(), [](uint32_t W) { return W == 0; }); 4025 4026 if (AllZeroes || Weights.getValue().size() < 2) 4027 return nullptr; 4028 4029 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights); 4030 } 4031 4032 void SwitchInstProfUpdateWrapper::init() { 4033 MDNode *ProfileData = getProfBranchWeightsMD(SI); 4034 if (!ProfileData) 4035 return; 4036 4037 if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) { 4038 llvm_unreachable("number of prof branch_weights metadata operands does " 4039 "not correspond to number of succesors"); 4040 } 4041 4042 SmallVector<uint32_t, 8> Weights; 4043 for (unsigned CI = 1, CE = SI.getNumSuccessors(); CI <= CE; ++CI) { 4044 ConstantInt *C = mdconst::extract<ConstantInt>(ProfileData->getOperand(CI)); 4045 uint32_t CW = C->getValue().getZExtValue(); 4046 Weights.push_back(CW); 4047 } 4048 this->Weights = std::move(Weights); 4049 } 4050 4051 SwitchInst::CaseIt 4052 SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) { 4053 if (Weights) { 4054 assert(SI.getNumSuccessors() == Weights->size() && 4055 "num of prof branch_weights must accord with num of successors"); 4056 Changed = true; 4057 // Copy the last case to the place of the removed one and shrink. 4058 // This is tightly coupled with the way SwitchInst::removeCase() removes 4059 // the cases in SwitchInst::removeCase(CaseIt). 4060 Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back(); 4061 Weights.getValue().pop_back(); 4062 } 4063 return SI.removeCase(I); 4064 } 4065 4066 void SwitchInstProfUpdateWrapper::addCase( 4067 ConstantInt *OnVal, BasicBlock *Dest, 4068 SwitchInstProfUpdateWrapper::CaseWeightOpt W) { 4069 SI.addCase(OnVal, Dest); 4070 4071 if (!Weights && W && *W) { 4072 Changed = true; 4073 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0); 4074 Weights.getValue()[SI.getNumSuccessors() - 1] = *W; 4075 } else if (Weights) { 4076 Changed = true; 4077 Weights.getValue().push_back(W ? *W : 0); 4078 } 4079 if (Weights) 4080 assert(SI.getNumSuccessors() == Weights->size() && 4081 "num of prof branch_weights must accord with num of successors"); 4082 } 4083 4084 SymbolTableList<Instruction>::iterator 4085 SwitchInstProfUpdateWrapper::eraseFromParent() { 4086 // Instruction is erased. Mark as unchanged to not touch it in the destructor. 4087 Changed = false; 4088 if (Weights) 4089 Weights->resize(0); 4090 return SI.eraseFromParent(); 4091 } 4092 4093 SwitchInstProfUpdateWrapper::CaseWeightOpt 4094 SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) { 4095 if (!Weights) 4096 return None; 4097 return Weights.getValue()[idx]; 4098 } 4099 4100 void SwitchInstProfUpdateWrapper::setSuccessorWeight( 4101 unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) { 4102 if (!W) 4103 return; 4104 4105 if (!Weights && *W) 4106 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0); 4107 4108 if (Weights) { 4109 auto &OldW = Weights.getValue()[idx]; 4110 if (*W != OldW) { 4111 Changed = true; 4112 OldW = *W; 4113 } 4114 } 4115 } 4116 4117 SwitchInstProfUpdateWrapper::CaseWeightOpt 4118 SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI, 4119 unsigned idx) { 4120 if (MDNode *ProfileData = getProfBranchWeightsMD(SI)) 4121 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1) 4122 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1)) 4123 ->getValue() 4124 .getZExtValue(); 4125 4126 return None; 4127 } 4128 4129 //===----------------------------------------------------------------------===// 4130 // IndirectBrInst Implementation 4131 //===----------------------------------------------------------------------===// 4132 4133 void IndirectBrInst::init(Value *Address, unsigned NumDests) { 4134 assert(Address && Address->getType()->isPointerTy() && 4135 "Address of indirectbr must be a pointer"); 4136 ReservedSpace = 1+NumDests; 4137 setNumHungOffUseOperands(1); 4138 allocHungoffUses(ReservedSpace); 4139 4140 Op<0>() = Address; 4141 } 4142 4143 4144 /// growOperands - grow operands - This grows the operand list in response 4145 /// to a push_back style of operation. This grows the number of ops by 2 times. 4146 /// 4147 void IndirectBrInst::growOperands() { 4148 unsigned e = getNumOperands(); 4149 unsigned NumOps = e*2; 4150 4151 ReservedSpace = NumOps; 4152 growHungoffUses(ReservedSpace); 4153 } 4154 4155 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 4156 Instruction *InsertBefore) 4157 : Instruction(Type::getVoidTy(Address->getContext()), 4158 Instruction::IndirectBr, nullptr, 0, InsertBefore) { 4159 init(Address, NumCases); 4160 } 4161 4162 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 4163 BasicBlock *InsertAtEnd) 4164 : Instruction(Type::getVoidTy(Address->getContext()), 4165 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) { 4166 init(Address, NumCases); 4167 } 4168 4169 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) 4170 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, 4171 nullptr, IBI.getNumOperands()) { 4172 allocHungoffUses(IBI.getNumOperands()); 4173 Use *OL = getOperandList(); 4174 const Use *InOL = IBI.getOperandList(); 4175 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) 4176 OL[i] = InOL[i]; 4177 SubclassOptionalData = IBI.SubclassOptionalData; 4178 } 4179 4180 /// addDestination - Add a destination. 4181 /// 4182 void IndirectBrInst::addDestination(BasicBlock *DestBB) { 4183 unsigned OpNo = getNumOperands(); 4184 if (OpNo+1 > ReservedSpace) 4185 growOperands(); // Get more space! 4186 // Initialize some new operands. 4187 assert(OpNo < ReservedSpace && "Growing didn't work!"); 4188 setNumHungOffUseOperands(OpNo+1); 4189 getOperandList()[OpNo] = DestBB; 4190 } 4191 4192 /// removeDestination - This method removes the specified successor from the 4193 /// indirectbr instruction. 4194 void IndirectBrInst::removeDestination(unsigned idx) { 4195 assert(idx < getNumOperands()-1 && "Successor index out of range!"); 4196 4197 unsigned NumOps = getNumOperands(); 4198 Use *OL = getOperandList(); 4199 4200 // Replace this value with the last one. 4201 OL[idx+1] = OL[NumOps-1]; 4202 4203 // Nuke the last value. 4204 OL[NumOps-1].set(nullptr); 4205 setNumHungOffUseOperands(NumOps-1); 4206 } 4207 4208 //===----------------------------------------------------------------------===// 4209 // FreezeInst Implementation 4210 //===----------------------------------------------------------------------===// 4211 4212 FreezeInst::FreezeInst(Value *S, 4213 const Twine &Name, Instruction *InsertBefore) 4214 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) { 4215 setName(Name); 4216 } 4217 4218 FreezeInst::FreezeInst(Value *S, 4219 const Twine &Name, BasicBlock *InsertAtEnd) 4220 : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) { 4221 setName(Name); 4222 } 4223 4224 //===----------------------------------------------------------------------===// 4225 // cloneImpl() implementations 4226 //===----------------------------------------------------------------------===// 4227 4228 // Define these methods here so vtables don't get emitted into every translation 4229 // unit that uses these classes. 4230 4231 GetElementPtrInst *GetElementPtrInst::cloneImpl() const { 4232 return new (getNumOperands()) GetElementPtrInst(*this); 4233 } 4234 4235 UnaryOperator *UnaryOperator::cloneImpl() const { 4236 return Create(getOpcode(), Op<0>()); 4237 } 4238 4239 BinaryOperator *BinaryOperator::cloneImpl() const { 4240 return Create(getOpcode(), Op<0>(), Op<1>()); 4241 } 4242 4243 FCmpInst *FCmpInst::cloneImpl() const { 4244 return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); 4245 } 4246 4247 ICmpInst *ICmpInst::cloneImpl() const { 4248 return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); 4249 } 4250 4251 ExtractValueInst *ExtractValueInst::cloneImpl() const { 4252 return new ExtractValueInst(*this); 4253 } 4254 4255 InsertValueInst *InsertValueInst::cloneImpl() const { 4256 return new InsertValueInst(*this); 4257 } 4258 4259 AllocaInst *AllocaInst::cloneImpl() const { 4260 AllocaInst *Result = 4261 new AllocaInst(getAllocatedType(), getType()->getAddressSpace(), 4262 getOperand(0), getAlign()); 4263 Result->setUsedWithInAlloca(isUsedWithInAlloca()); 4264 Result->setSwiftError(isSwiftError()); 4265 return Result; 4266 } 4267 4268 LoadInst *LoadInst::cloneImpl() const { 4269 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(), 4270 getAlign(), getOrdering(), getSyncScopeID()); 4271 } 4272 4273 StoreInst *StoreInst::cloneImpl() const { 4274 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(), 4275 getOrdering(), getSyncScopeID()); 4276 } 4277 4278 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { 4279 AtomicCmpXchgInst *Result = new AtomicCmpXchgInst( 4280 getOperand(0), getOperand(1), getOperand(2), getAlign(), 4281 getSuccessOrdering(), getFailureOrdering(), getSyncScopeID()); 4282 Result->setVolatile(isVolatile()); 4283 Result->setWeak(isWeak()); 4284 return Result; 4285 } 4286 4287 AtomicRMWInst *AtomicRMWInst::cloneImpl() const { 4288 AtomicRMWInst *Result = 4289 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1), 4290 getAlign(), getOrdering(), getSyncScopeID()); 4291 Result->setVolatile(isVolatile()); 4292 return Result; 4293 } 4294 4295 FenceInst *FenceInst::cloneImpl() const { 4296 return new FenceInst(getContext(), getOrdering(), getSyncScopeID()); 4297 } 4298 4299 TruncInst *TruncInst::cloneImpl() const { 4300 return new TruncInst(getOperand(0), getType()); 4301 } 4302 4303 ZExtInst *ZExtInst::cloneImpl() const { 4304 return new ZExtInst(getOperand(0), getType()); 4305 } 4306 4307 SExtInst *SExtInst::cloneImpl() const { 4308 return new SExtInst(getOperand(0), getType()); 4309 } 4310 4311 FPTruncInst *FPTruncInst::cloneImpl() const { 4312 return new FPTruncInst(getOperand(0), getType()); 4313 } 4314 4315 FPExtInst *FPExtInst::cloneImpl() const { 4316 return new FPExtInst(getOperand(0), getType()); 4317 } 4318 4319 UIToFPInst *UIToFPInst::cloneImpl() const { 4320 return new UIToFPInst(getOperand(0), getType()); 4321 } 4322 4323 SIToFPInst *SIToFPInst::cloneImpl() const { 4324 return new SIToFPInst(getOperand(0), getType()); 4325 } 4326 4327 FPToUIInst *FPToUIInst::cloneImpl() const { 4328 return new FPToUIInst(getOperand(0), getType()); 4329 } 4330 4331 FPToSIInst *FPToSIInst::cloneImpl() const { 4332 return new FPToSIInst(getOperand(0), getType()); 4333 } 4334 4335 PtrToIntInst *PtrToIntInst::cloneImpl() const { 4336 return new PtrToIntInst(getOperand(0), getType()); 4337 } 4338 4339 IntToPtrInst *IntToPtrInst::cloneImpl() const { 4340 return new IntToPtrInst(getOperand(0), getType()); 4341 } 4342 4343 BitCastInst *BitCastInst::cloneImpl() const { 4344 return new BitCastInst(getOperand(0), getType()); 4345 } 4346 4347 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const { 4348 return new AddrSpaceCastInst(getOperand(0), getType()); 4349 } 4350 4351 CallInst *CallInst::cloneImpl() const { 4352 if (hasOperandBundles()) { 4353 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4354 return new(getNumOperands(), DescriptorBytes) CallInst(*this); 4355 } 4356 return new(getNumOperands()) CallInst(*this); 4357 } 4358 4359 SelectInst *SelectInst::cloneImpl() const { 4360 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); 4361 } 4362 4363 VAArgInst *VAArgInst::cloneImpl() const { 4364 return new VAArgInst(getOperand(0), getType()); 4365 } 4366 4367 ExtractElementInst *ExtractElementInst::cloneImpl() const { 4368 return ExtractElementInst::Create(getOperand(0), getOperand(1)); 4369 } 4370 4371 InsertElementInst *InsertElementInst::cloneImpl() const { 4372 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); 4373 } 4374 4375 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const { 4376 return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask()); 4377 } 4378 4379 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); } 4380 4381 LandingPadInst *LandingPadInst::cloneImpl() const { 4382 return new LandingPadInst(*this); 4383 } 4384 4385 ReturnInst *ReturnInst::cloneImpl() const { 4386 return new(getNumOperands()) ReturnInst(*this); 4387 } 4388 4389 BranchInst *BranchInst::cloneImpl() const { 4390 return new(getNumOperands()) BranchInst(*this); 4391 } 4392 4393 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); } 4394 4395 IndirectBrInst *IndirectBrInst::cloneImpl() const { 4396 return new IndirectBrInst(*this); 4397 } 4398 4399 InvokeInst *InvokeInst::cloneImpl() const { 4400 if (hasOperandBundles()) { 4401 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4402 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this); 4403 } 4404 return new(getNumOperands()) InvokeInst(*this); 4405 } 4406 4407 CallBrInst *CallBrInst::cloneImpl() const { 4408 if (hasOperandBundles()) { 4409 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4410 return new (getNumOperands(), DescriptorBytes) CallBrInst(*this); 4411 } 4412 return new (getNumOperands()) CallBrInst(*this); 4413 } 4414 4415 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } 4416 4417 CleanupReturnInst *CleanupReturnInst::cloneImpl() const { 4418 return new (getNumOperands()) CleanupReturnInst(*this); 4419 } 4420 4421 CatchReturnInst *CatchReturnInst::cloneImpl() const { 4422 return new (getNumOperands()) CatchReturnInst(*this); 4423 } 4424 4425 CatchSwitchInst *CatchSwitchInst::cloneImpl() const { 4426 return new CatchSwitchInst(*this); 4427 } 4428 4429 FuncletPadInst *FuncletPadInst::cloneImpl() const { 4430 return new (getNumOperands()) FuncletPadInst(*this); 4431 } 4432 4433 UnreachableInst *UnreachableInst::cloneImpl() const { 4434 LLVMContext &Context = getContext(); 4435 return new UnreachableInst(Context); 4436 } 4437 4438 FreezeInst *FreezeInst::cloneImpl() const { 4439 return new FreezeInst(getOperand(0)); 4440 } 4441