1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements all of the non-inline methods for the LLVM instruction 10 // classes. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/IR/Instructions.h" 15 #include "LLVMContextImpl.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Twine.h" 19 #include "llvm/IR/Attributes.h" 20 #include "llvm/IR/BasicBlock.h" 21 #include "llvm/IR/CallSite.h" 22 #include "llvm/IR/Constant.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/DerivedTypes.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/InstrTypes.h" 28 #include "llvm/IR/Instruction.h" 29 #include "llvm/IR/Intrinsics.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/MDBuilder.h" 32 #include "llvm/IR/Metadata.h" 33 #include "llvm/IR/Module.h" 34 #include "llvm/IR/Operator.h" 35 #include "llvm/IR/Type.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/AtomicOrdering.h" 38 #include "llvm/Support/Casting.h" 39 #include "llvm/Support/ErrorHandling.h" 40 #include "llvm/Support/MathExtras.h" 41 #include "llvm/Support/TypeSize.h" 42 #include <algorithm> 43 #include <cassert> 44 #include <cstdint> 45 #include <vector> 46 47 using namespace llvm; 48 49 //===----------------------------------------------------------------------===// 50 // AllocaInst Class 51 //===----------------------------------------------------------------------===// 52 53 Optional<uint64_t> 54 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const { 55 uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType()); 56 if (isArrayAllocation()) { 57 auto C = dyn_cast<ConstantInt>(getArraySize()); 58 if (!C) 59 return None; 60 Size *= C->getZExtValue(); 61 } 62 return Size; 63 } 64 65 //===----------------------------------------------------------------------===// 66 // CallSite Class 67 //===----------------------------------------------------------------------===// 68 69 User::op_iterator CallSite::getCallee() const { 70 return cast<CallBase>(getInstruction())->op_end() - 1; 71 } 72 73 //===----------------------------------------------------------------------===// 74 // SelectInst Class 75 //===----------------------------------------------------------------------===// 76 77 /// areInvalidOperands - Return a string if the specified operands are invalid 78 /// for a select operation, otherwise return null. 79 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { 80 if (Op1->getType() != Op2->getType()) 81 return "both values to select must have same type"; 82 83 if (Op1->getType()->isTokenTy()) 84 return "select values cannot have token type"; 85 86 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { 87 // Vector select. 88 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) 89 return "vector select condition element type must be i1"; 90 VectorType *ET = dyn_cast<VectorType>(Op1->getType()); 91 if (!ET) 92 return "selected values for vector select must be vectors"; 93 if (ET->getNumElements() != VT->getNumElements()) 94 return "vector select requires selected vectors to have " 95 "the same vector length as select condition"; 96 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { 97 return "select condition must be i1 or <n x i1>"; 98 } 99 return nullptr; 100 } 101 102 //===----------------------------------------------------------------------===// 103 // PHINode Class 104 //===----------------------------------------------------------------------===// 105 106 PHINode::PHINode(const PHINode &PN) 107 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()), 108 ReservedSpace(PN.getNumOperands()) { 109 allocHungoffUses(PN.getNumOperands()); 110 std::copy(PN.op_begin(), PN.op_end(), op_begin()); 111 std::copy(PN.block_begin(), PN.block_end(), block_begin()); 112 SubclassOptionalData = PN.SubclassOptionalData; 113 } 114 115 // removeIncomingValue - Remove an incoming value. This is useful if a 116 // predecessor basic block is deleted. 117 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { 118 Value *Removed = getIncomingValue(Idx); 119 120 // Move everything after this operand down. 121 // 122 // FIXME: we could just swap with the end of the list, then erase. However, 123 // clients might not expect this to happen. The code as it is thrashes the 124 // use/def lists, which is kinda lame. 125 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); 126 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); 127 128 // Nuke the last value. 129 Op<-1>().set(nullptr); 130 setNumHungOffUseOperands(getNumOperands() - 1); 131 132 // If the PHI node is dead, because it has zero entries, nuke it now. 133 if (getNumOperands() == 0 && DeletePHIIfEmpty) { 134 // If anyone is using this PHI, make them use a dummy value instead... 135 replaceAllUsesWith(UndefValue::get(getType())); 136 eraseFromParent(); 137 } 138 return Removed; 139 } 140 141 /// growOperands - grow operands - This grows the operand list in response 142 /// to a push_back style of operation. This grows the number of ops by 1.5 143 /// times. 144 /// 145 void PHINode::growOperands() { 146 unsigned e = getNumOperands(); 147 unsigned NumOps = e + e / 2; 148 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 149 150 ReservedSpace = NumOps; 151 growHungoffUses(ReservedSpace, /* IsPhi */ true); 152 } 153 154 /// hasConstantValue - If the specified PHI node always merges together the same 155 /// value, return the value, otherwise return null. 156 Value *PHINode::hasConstantValue() const { 157 // Exploit the fact that phi nodes always have at least one entry. 158 Value *ConstantValue = getIncomingValue(0); 159 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) 160 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { 161 if (ConstantValue != this) 162 return nullptr; // Incoming values not all the same. 163 // The case where the first value is this PHI. 164 ConstantValue = getIncomingValue(i); 165 } 166 if (ConstantValue == this) 167 return UndefValue::get(getType()); 168 return ConstantValue; 169 } 170 171 /// hasConstantOrUndefValue - Whether the specified PHI node always merges 172 /// together the same value, assuming that undefs result in the same value as 173 /// non-undefs. 174 /// Unlike \ref hasConstantValue, this does not return a value because the 175 /// unique non-undef incoming value need not dominate the PHI node. 176 bool PHINode::hasConstantOrUndefValue() const { 177 Value *ConstantValue = nullptr; 178 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) { 179 Value *Incoming = getIncomingValue(i); 180 if (Incoming != this && !isa<UndefValue>(Incoming)) { 181 if (ConstantValue && ConstantValue != Incoming) 182 return false; 183 ConstantValue = Incoming; 184 } 185 } 186 return true; 187 } 188 189 //===----------------------------------------------------------------------===// 190 // LandingPadInst Implementation 191 //===----------------------------------------------------------------------===// 192 193 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 194 const Twine &NameStr, Instruction *InsertBefore) 195 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) { 196 init(NumReservedValues, NameStr); 197 } 198 199 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 200 const Twine &NameStr, BasicBlock *InsertAtEnd) 201 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) { 202 init(NumReservedValues, NameStr); 203 } 204 205 LandingPadInst::LandingPadInst(const LandingPadInst &LP) 206 : Instruction(LP.getType(), Instruction::LandingPad, nullptr, 207 LP.getNumOperands()), 208 ReservedSpace(LP.getNumOperands()) { 209 allocHungoffUses(LP.getNumOperands()); 210 Use *OL = getOperandList(); 211 const Use *InOL = LP.getOperandList(); 212 for (unsigned I = 0, E = ReservedSpace; I != E; ++I) 213 OL[I] = InOL[I]; 214 215 setCleanup(LP.isCleanup()); 216 } 217 218 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 219 const Twine &NameStr, 220 Instruction *InsertBefore) { 221 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore); 222 } 223 224 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 225 const Twine &NameStr, 226 BasicBlock *InsertAtEnd) { 227 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd); 228 } 229 230 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) { 231 ReservedSpace = NumReservedValues; 232 setNumHungOffUseOperands(0); 233 allocHungoffUses(ReservedSpace); 234 setName(NameStr); 235 setCleanup(false); 236 } 237 238 /// growOperands - grow operands - This grows the operand list in response to a 239 /// push_back style of operation. This grows the number of ops by 2 times. 240 void LandingPadInst::growOperands(unsigned Size) { 241 unsigned e = getNumOperands(); 242 if (ReservedSpace >= e + Size) return; 243 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2; 244 growHungoffUses(ReservedSpace); 245 } 246 247 void LandingPadInst::addClause(Constant *Val) { 248 unsigned OpNo = getNumOperands(); 249 growOperands(1); 250 assert(OpNo < ReservedSpace && "Growing didn't work!"); 251 setNumHungOffUseOperands(getNumOperands() + 1); 252 getOperandList()[OpNo] = Val; 253 } 254 255 //===----------------------------------------------------------------------===// 256 // CallBase Implementation 257 //===----------------------------------------------------------------------===// 258 259 Function *CallBase::getCaller() { return getParent()->getParent(); } 260 261 unsigned CallBase::getNumSubclassExtraOperandsDynamic() const { 262 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!"); 263 return cast<CallBrInst>(this)->getNumIndirectDests() + 1; 264 } 265 266 bool CallBase::isIndirectCall() const { 267 const Value *V = getCalledValue(); 268 if (isa<Function>(V) || isa<Constant>(V)) 269 return false; 270 if (const CallInst *CI = dyn_cast<CallInst>(this)) 271 if (CI->isInlineAsm()) 272 return false; 273 return true; 274 } 275 276 /// Tests if this call site must be tail call optimized. Only a CallInst can 277 /// be tail call optimized. 278 bool CallBase::isMustTailCall() const { 279 if (auto *CI = dyn_cast<CallInst>(this)) 280 return CI->isMustTailCall(); 281 return false; 282 } 283 284 /// Tests if this call site is marked as a tail call. 285 bool CallBase::isTailCall() const { 286 if (auto *CI = dyn_cast<CallInst>(this)) 287 return CI->isTailCall(); 288 return false; 289 } 290 291 Intrinsic::ID CallBase::getIntrinsicID() const { 292 if (auto *F = getCalledFunction()) 293 return F->getIntrinsicID(); 294 return Intrinsic::not_intrinsic; 295 } 296 297 bool CallBase::isReturnNonNull() const { 298 if (hasRetAttr(Attribute::NonNull)) 299 return true; 300 301 if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 && 302 !NullPointerIsDefined(getCaller(), 303 getType()->getPointerAddressSpace())) 304 return true; 305 306 return false; 307 } 308 309 Value *CallBase::getReturnedArgOperand() const { 310 unsigned Index; 311 312 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index) 313 return getArgOperand(Index - AttributeList::FirstArgIndex); 314 if (const Function *F = getCalledFunction()) 315 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) && 316 Index) 317 return getArgOperand(Index - AttributeList::FirstArgIndex); 318 319 return nullptr; 320 } 321 322 bool CallBase::hasRetAttr(Attribute::AttrKind Kind) const { 323 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind)) 324 return true; 325 326 // Look at the callee, if available. 327 if (const Function *F = getCalledFunction()) 328 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind); 329 return false; 330 } 331 332 /// Determine whether the argument or parameter has the given attribute. 333 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const { 334 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!"); 335 336 if (Attrs.hasParamAttribute(ArgNo, Kind)) 337 return true; 338 if (const Function *F = getCalledFunction()) 339 return F->getAttributes().hasParamAttribute(ArgNo, Kind); 340 return false; 341 } 342 343 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const { 344 if (const Function *F = getCalledFunction()) 345 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind); 346 return false; 347 } 348 349 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const { 350 if (const Function *F = getCalledFunction()) 351 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind); 352 return false; 353 } 354 355 CallBase::op_iterator 356 CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles, 357 const unsigned BeginIndex) { 358 auto It = op_begin() + BeginIndex; 359 for (auto &B : Bundles) 360 It = std::copy(B.input_begin(), B.input_end(), It); 361 362 auto *ContextImpl = getContext().pImpl; 363 auto BI = Bundles.begin(); 364 unsigned CurrentIndex = BeginIndex; 365 366 for (auto &BOI : bundle_op_infos()) { 367 assert(BI != Bundles.end() && "Incorrect allocation?"); 368 369 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag()); 370 BOI.Begin = CurrentIndex; 371 BOI.End = CurrentIndex + BI->input_size(); 372 CurrentIndex = BOI.End; 373 BI++; 374 } 375 376 assert(BI == Bundles.end() && "Incorrect allocation?"); 377 378 return It; 379 } 380 381 //===----------------------------------------------------------------------===// 382 // CallInst Implementation 383 //===----------------------------------------------------------------------===// 384 385 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 386 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { 387 this->FTy = FTy; 388 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 && 389 "NumOperands not set up?"); 390 setCalledOperand(Func); 391 392 #ifndef NDEBUG 393 assert((Args.size() == FTy->getNumParams() || 394 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 395 "Calling a function with bad signature!"); 396 397 for (unsigned i = 0; i != Args.size(); ++i) 398 assert((i >= FTy->getNumParams() || 399 FTy->getParamType(i) == Args[i]->getType()) && 400 "Calling a function with a bad signature!"); 401 #endif 402 403 llvm::copy(Args, op_begin()); 404 405 auto It = populateBundleOperandInfos(Bundles, Args.size()); 406 (void)It; 407 assert(It + 1 == op_end() && "Should add up!"); 408 409 setName(NameStr); 410 } 411 412 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) { 413 this->FTy = FTy; 414 assert(getNumOperands() == 1 && "NumOperands not set up?"); 415 setCalledOperand(Func); 416 417 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); 418 419 setName(NameStr); 420 } 421 422 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name, 423 Instruction *InsertBefore) 424 : CallBase(Ty->getReturnType(), Instruction::Call, 425 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) { 426 init(Ty, Func, Name); 427 } 428 429 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name, 430 BasicBlock *InsertAtEnd) 431 : CallBase(Ty->getReturnType(), Instruction::Call, 432 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) { 433 init(Ty, Func, Name); 434 } 435 436 CallInst::CallInst(const CallInst &CI) 437 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, 438 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(), 439 CI.getNumOperands()) { 440 setTailCallKind(CI.getTailCallKind()); 441 setCallingConv(CI.getCallingConv()); 442 443 std::copy(CI.op_begin(), CI.op_end(), op_begin()); 444 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(), 445 bundle_op_info_begin()); 446 SubclassOptionalData = CI.SubclassOptionalData; 447 } 448 449 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB, 450 Instruction *InsertPt) { 451 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end()); 452 453 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledValue(), 454 Args, OpB, CI->getName(), InsertPt); 455 NewCI->setTailCallKind(CI->getTailCallKind()); 456 NewCI->setCallingConv(CI->getCallingConv()); 457 NewCI->SubclassOptionalData = CI->SubclassOptionalData; 458 NewCI->setAttributes(CI->getAttributes()); 459 NewCI->setDebugLoc(CI->getDebugLoc()); 460 return NewCI; 461 } 462 463 // Update profile weight for call instruction by scaling it using the ratio 464 // of S/T. The meaning of "branch_weights" meta data for call instruction is 465 // transfered to represent call count. 466 void CallInst::updateProfWeight(uint64_t S, uint64_t T) { 467 auto *ProfileData = getMetadata(LLVMContext::MD_prof); 468 if (ProfileData == nullptr) 469 return; 470 471 auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0)); 472 if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") && 473 !ProfDataName->getString().equals("VP"))) 474 return; 475 476 if (T == 0) { 477 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in " 478 "div by 0. Ignoring. Likely the function " 479 << getParent()->getParent()->getName() 480 << " has 0 entry count, and contains call instructions " 481 "with non-zero prof info."); 482 return; 483 } 484 485 MDBuilder MDB(getContext()); 486 SmallVector<Metadata *, 3> Vals; 487 Vals.push_back(ProfileData->getOperand(0)); 488 APInt APS(128, S), APT(128, T); 489 if (ProfDataName->getString().equals("branch_weights") && 490 ProfileData->getNumOperands() > 0) { 491 // Using APInt::div may be expensive, but most cases should fit 64 bits. 492 APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1)) 493 ->getValue() 494 .getZExtValue()); 495 Val *= APS; 496 Vals.push_back(MDB.createConstant(ConstantInt::get( 497 Type::getInt64Ty(getContext()), Val.udiv(APT).getLimitedValue()))); 498 } else if (ProfDataName->getString().equals("VP")) 499 for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) { 500 // The first value is the key of the value profile, which will not change. 501 Vals.push_back(ProfileData->getOperand(i)); 502 // Using APInt::div may be expensive, but most cases should fit 64 bits. 503 APInt Val(128, 504 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1)) 505 ->getValue() 506 .getZExtValue()); 507 Val *= APS; 508 Vals.push_back(MDB.createConstant( 509 ConstantInt::get(Type::getInt64Ty(getContext()), 510 Val.udiv(APT).getLimitedValue()))); 511 } 512 setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals)); 513 } 514 515 /// IsConstantOne - Return true only if val is constant int 1 516 static bool IsConstantOne(Value *val) { 517 assert(val && "IsConstantOne does not work with nullptr val"); 518 const ConstantInt *CVal = dyn_cast<ConstantInt>(val); 519 return CVal && CVal->isOne(); 520 } 521 522 static Instruction *createMalloc(Instruction *InsertBefore, 523 BasicBlock *InsertAtEnd, Type *IntPtrTy, 524 Type *AllocTy, Value *AllocSize, 525 Value *ArraySize, 526 ArrayRef<OperandBundleDef> OpB, 527 Function *MallocF, const Twine &Name) { 528 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 529 "createMalloc needs either InsertBefore or InsertAtEnd"); 530 531 // malloc(type) becomes: 532 // bitcast (i8* malloc(typeSize)) to type* 533 // malloc(type, arraySize) becomes: 534 // bitcast (i8* malloc(typeSize*arraySize)) to type* 535 if (!ArraySize) 536 ArraySize = ConstantInt::get(IntPtrTy, 1); 537 else if (ArraySize->getType() != IntPtrTy) { 538 if (InsertBefore) 539 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 540 "", InsertBefore); 541 else 542 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 543 "", InsertAtEnd); 544 } 545 546 if (!IsConstantOne(ArraySize)) { 547 if (IsConstantOne(AllocSize)) { 548 AllocSize = ArraySize; // Operand * 1 = Operand 549 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { 550 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, 551 false /*ZExt*/); 552 // Malloc arg is constant product of type size and array size 553 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); 554 } else { 555 // Multiply type size by the array size... 556 if (InsertBefore) 557 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 558 "mallocsize", InsertBefore); 559 else 560 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 561 "mallocsize", InsertAtEnd); 562 } 563 } 564 565 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); 566 // Create the call to Malloc. 567 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 568 Module *M = BB->getParent()->getParent(); 569 Type *BPTy = Type::getInt8PtrTy(BB->getContext()); 570 FunctionCallee MallocFunc = MallocF; 571 if (!MallocFunc) 572 // prototype malloc as "void *malloc(size_t)" 573 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy); 574 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); 575 CallInst *MCall = nullptr; 576 Instruction *Result = nullptr; 577 if (InsertBefore) { 578 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall", 579 InsertBefore); 580 Result = MCall; 581 if (Result->getType() != AllocPtrType) 582 // Create a cast instruction to convert to the right type... 583 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); 584 } else { 585 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall"); 586 Result = MCall; 587 if (Result->getType() != AllocPtrType) { 588 InsertAtEnd->getInstList().push_back(MCall); 589 // Create a cast instruction to convert to the right type... 590 Result = new BitCastInst(MCall, AllocPtrType, Name); 591 } 592 } 593 MCall->setTailCall(); 594 if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) { 595 MCall->setCallingConv(F->getCallingConv()); 596 if (!F->returnDoesNotAlias()) 597 F->setReturnDoesNotAlias(); 598 } 599 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); 600 601 return Result; 602 } 603 604 /// CreateMalloc - Generate the IR for a call to malloc: 605 /// 1. Compute the malloc call's argument as the specified type's size, 606 /// possibly multiplied by the array size if the array size is not 607 /// constant 1. 608 /// 2. Call malloc with that argument. 609 /// 3. Bitcast the result of the malloc call to the specified type. 610 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 611 Type *IntPtrTy, Type *AllocTy, 612 Value *AllocSize, Value *ArraySize, 613 Function *MallocF, 614 const Twine &Name) { 615 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 616 ArraySize, None, MallocF, Name); 617 } 618 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 619 Type *IntPtrTy, Type *AllocTy, 620 Value *AllocSize, Value *ArraySize, 621 ArrayRef<OperandBundleDef> OpB, 622 Function *MallocF, 623 const Twine &Name) { 624 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 625 ArraySize, OpB, MallocF, Name); 626 } 627 628 /// CreateMalloc - Generate the IR for a call to malloc: 629 /// 1. Compute the malloc call's argument as the specified type's size, 630 /// possibly multiplied by the array size if the array size is not 631 /// constant 1. 632 /// 2. Call malloc with that argument. 633 /// 3. Bitcast the result of the malloc call to the specified type. 634 /// Note: This function does not add the bitcast to the basic block, that is the 635 /// responsibility of the caller. 636 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 637 Type *IntPtrTy, Type *AllocTy, 638 Value *AllocSize, Value *ArraySize, 639 Function *MallocF, const Twine &Name) { 640 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 641 ArraySize, None, MallocF, Name); 642 } 643 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 644 Type *IntPtrTy, Type *AllocTy, 645 Value *AllocSize, Value *ArraySize, 646 ArrayRef<OperandBundleDef> OpB, 647 Function *MallocF, const Twine &Name) { 648 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 649 ArraySize, OpB, MallocF, Name); 650 } 651 652 static Instruction *createFree(Value *Source, 653 ArrayRef<OperandBundleDef> Bundles, 654 Instruction *InsertBefore, 655 BasicBlock *InsertAtEnd) { 656 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 657 "createFree needs either InsertBefore or InsertAtEnd"); 658 assert(Source->getType()->isPointerTy() && 659 "Can not free something of nonpointer type!"); 660 661 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 662 Module *M = BB->getParent()->getParent(); 663 664 Type *VoidTy = Type::getVoidTy(M->getContext()); 665 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); 666 // prototype free as "void free(void*)" 667 FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy); 668 CallInst *Result = nullptr; 669 Value *PtrCast = Source; 670 if (InsertBefore) { 671 if (Source->getType() != IntPtrTy) 672 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); 673 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore); 674 } else { 675 if (Source->getType() != IntPtrTy) 676 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); 677 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, ""); 678 } 679 Result->setTailCall(); 680 if (Function *F = dyn_cast<Function>(FreeFunc.getCallee())) 681 Result->setCallingConv(F->getCallingConv()); 682 683 return Result; 684 } 685 686 /// CreateFree - Generate the IR for a call to the builtin free function. 687 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) { 688 return createFree(Source, None, InsertBefore, nullptr); 689 } 690 Instruction *CallInst::CreateFree(Value *Source, 691 ArrayRef<OperandBundleDef> Bundles, 692 Instruction *InsertBefore) { 693 return createFree(Source, Bundles, InsertBefore, nullptr); 694 } 695 696 /// CreateFree - Generate the IR for a call to the builtin free function. 697 /// Note: This function does not add the call to the basic block, that is the 698 /// responsibility of the caller. 699 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) { 700 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd); 701 assert(FreeCall && "CreateFree did not create a CallInst"); 702 return FreeCall; 703 } 704 Instruction *CallInst::CreateFree(Value *Source, 705 ArrayRef<OperandBundleDef> Bundles, 706 BasicBlock *InsertAtEnd) { 707 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd); 708 assert(FreeCall && "CreateFree did not create a CallInst"); 709 return FreeCall; 710 } 711 712 //===----------------------------------------------------------------------===// 713 // InvokeInst Implementation 714 //===----------------------------------------------------------------------===// 715 716 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal, 717 BasicBlock *IfException, ArrayRef<Value *> Args, 718 ArrayRef<OperandBundleDef> Bundles, 719 const Twine &NameStr) { 720 this->FTy = FTy; 721 722 assert((int)getNumOperands() == 723 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) && 724 "NumOperands not set up?"); 725 setNormalDest(IfNormal); 726 setUnwindDest(IfException); 727 setCalledOperand(Fn); 728 729 #ifndef NDEBUG 730 assert(((Args.size() == FTy->getNumParams()) || 731 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 732 "Invoking a function with bad signature"); 733 734 for (unsigned i = 0, e = Args.size(); i != e; i++) 735 assert((i >= FTy->getNumParams() || 736 FTy->getParamType(i) == Args[i]->getType()) && 737 "Invoking a function with a bad signature!"); 738 #endif 739 740 llvm::copy(Args, op_begin()); 741 742 auto It = populateBundleOperandInfos(Bundles, Args.size()); 743 (void)It; 744 assert(It + 3 == op_end() && "Should add up!"); 745 746 setName(NameStr); 747 } 748 749 InvokeInst::InvokeInst(const InvokeInst &II) 750 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, 751 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(), 752 II.getNumOperands()) { 753 setCallingConv(II.getCallingConv()); 754 std::copy(II.op_begin(), II.op_end(), op_begin()); 755 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(), 756 bundle_op_info_begin()); 757 SubclassOptionalData = II.SubclassOptionalData; 758 } 759 760 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB, 761 Instruction *InsertPt) { 762 std::vector<Value *> Args(II->arg_begin(), II->arg_end()); 763 764 auto *NewII = InvokeInst::Create(II->getFunctionType(), II->getCalledValue(), 765 II->getNormalDest(), II->getUnwindDest(), 766 Args, OpB, II->getName(), InsertPt); 767 NewII->setCallingConv(II->getCallingConv()); 768 NewII->SubclassOptionalData = II->SubclassOptionalData; 769 NewII->setAttributes(II->getAttributes()); 770 NewII->setDebugLoc(II->getDebugLoc()); 771 return NewII; 772 } 773 774 775 LandingPadInst *InvokeInst::getLandingPadInst() const { 776 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); 777 } 778 779 //===----------------------------------------------------------------------===// 780 // CallBrInst Implementation 781 //===----------------------------------------------------------------------===// 782 783 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough, 784 ArrayRef<BasicBlock *> IndirectDests, 785 ArrayRef<Value *> Args, 786 ArrayRef<OperandBundleDef> Bundles, 787 const Twine &NameStr) { 788 this->FTy = FTy; 789 790 assert((int)getNumOperands() == 791 ComputeNumOperands(Args.size(), IndirectDests.size(), 792 CountBundleInputs(Bundles)) && 793 "NumOperands not set up?"); 794 NumIndirectDests = IndirectDests.size(); 795 setDefaultDest(Fallthrough); 796 for (unsigned i = 0; i != NumIndirectDests; ++i) 797 setIndirectDest(i, IndirectDests[i]); 798 setCalledOperand(Fn); 799 800 #ifndef NDEBUG 801 assert(((Args.size() == FTy->getNumParams()) || 802 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 803 "Calling a function with bad signature"); 804 805 for (unsigned i = 0, e = Args.size(); i != e; i++) 806 assert((i >= FTy->getNumParams() || 807 FTy->getParamType(i) == Args[i]->getType()) && 808 "Calling a function with a bad signature!"); 809 #endif 810 811 std::copy(Args.begin(), Args.end(), op_begin()); 812 813 auto It = populateBundleOperandInfos(Bundles, Args.size()); 814 (void)It; 815 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!"); 816 817 setName(NameStr); 818 } 819 820 void CallBrInst::updateArgBlockAddresses(unsigned i, BasicBlock *B) { 821 assert(getNumIndirectDests() > i && "IndirectDest # out of range for callbr"); 822 if (BasicBlock *OldBB = getIndirectDest(i)) { 823 BlockAddress *Old = BlockAddress::get(OldBB); 824 BlockAddress *New = BlockAddress::get(B); 825 for (unsigned ArgNo = 0, e = getNumArgOperands(); ArgNo != e; ++ArgNo) 826 if (dyn_cast<BlockAddress>(getArgOperand(ArgNo)) == Old) 827 setArgOperand(ArgNo, New); 828 } 829 } 830 831 CallBrInst::CallBrInst(const CallBrInst &CBI) 832 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr, 833 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(), 834 CBI.getNumOperands()) { 835 setCallingConv(CBI.getCallingConv()); 836 std::copy(CBI.op_begin(), CBI.op_end(), op_begin()); 837 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(), 838 bundle_op_info_begin()); 839 SubclassOptionalData = CBI.SubclassOptionalData; 840 NumIndirectDests = CBI.NumIndirectDests; 841 } 842 843 CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB, 844 Instruction *InsertPt) { 845 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end()); 846 847 auto *NewCBI = CallBrInst::Create(CBI->getFunctionType(), 848 CBI->getCalledValue(), 849 CBI->getDefaultDest(), 850 CBI->getIndirectDests(), 851 Args, OpB, CBI->getName(), InsertPt); 852 NewCBI->setCallingConv(CBI->getCallingConv()); 853 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData; 854 NewCBI->setAttributes(CBI->getAttributes()); 855 NewCBI->setDebugLoc(CBI->getDebugLoc()); 856 NewCBI->NumIndirectDests = CBI->NumIndirectDests; 857 return NewCBI; 858 } 859 860 //===----------------------------------------------------------------------===// 861 // ReturnInst Implementation 862 //===----------------------------------------------------------------------===// 863 864 ReturnInst::ReturnInst(const ReturnInst &RI) 865 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret, 866 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(), 867 RI.getNumOperands()) { 868 if (RI.getNumOperands()) 869 Op<0>() = RI.Op<0>(); 870 SubclassOptionalData = RI.SubclassOptionalData; 871 } 872 873 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) 874 : Instruction(Type::getVoidTy(C), Instruction::Ret, 875 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 876 InsertBefore) { 877 if (retVal) 878 Op<0>() = retVal; 879 } 880 881 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) 882 : Instruction(Type::getVoidTy(C), Instruction::Ret, 883 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 884 InsertAtEnd) { 885 if (retVal) 886 Op<0>() = retVal; 887 } 888 889 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 890 : Instruction(Type::getVoidTy(Context), Instruction::Ret, 891 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {} 892 893 //===----------------------------------------------------------------------===// 894 // ResumeInst Implementation 895 //===----------------------------------------------------------------------===// 896 897 ResumeInst::ResumeInst(const ResumeInst &RI) 898 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume, 899 OperandTraits<ResumeInst>::op_begin(this), 1) { 900 Op<0>() = RI.Op<0>(); 901 } 902 903 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) 904 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 905 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { 906 Op<0>() = Exn; 907 } 908 909 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) 910 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 911 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { 912 Op<0>() = Exn; 913 } 914 915 //===----------------------------------------------------------------------===// 916 // CleanupReturnInst Implementation 917 //===----------------------------------------------------------------------===// 918 919 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) 920 : Instruction(CRI.getType(), Instruction::CleanupRet, 921 OperandTraits<CleanupReturnInst>::op_end(this) - 922 CRI.getNumOperands(), 923 CRI.getNumOperands()) { 924 setInstructionSubclassData(CRI.getSubclassDataFromInstruction()); 925 Op<0>() = CRI.Op<0>(); 926 if (CRI.hasUnwindDest()) 927 Op<1>() = CRI.Op<1>(); 928 } 929 930 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { 931 if (UnwindBB) 932 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 933 934 Op<0>() = CleanupPad; 935 if (UnwindBB) 936 Op<1>() = UnwindBB; 937 } 938 939 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 940 unsigned Values, Instruction *InsertBefore) 941 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 942 Instruction::CleanupRet, 943 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 944 Values, InsertBefore) { 945 init(CleanupPad, UnwindBB); 946 } 947 948 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 949 unsigned Values, BasicBlock *InsertAtEnd) 950 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 951 Instruction::CleanupRet, 952 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 953 Values, InsertAtEnd) { 954 init(CleanupPad, UnwindBB); 955 } 956 957 //===----------------------------------------------------------------------===// 958 // CatchReturnInst Implementation 959 //===----------------------------------------------------------------------===// 960 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) { 961 Op<0>() = CatchPad; 962 Op<1>() = BB; 963 } 964 965 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI) 966 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet, 967 OperandTraits<CatchReturnInst>::op_begin(this), 2) { 968 Op<0>() = CRI.Op<0>(); 969 Op<1>() = CRI.Op<1>(); 970 } 971 972 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 973 Instruction *InsertBefore) 974 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 975 OperandTraits<CatchReturnInst>::op_begin(this), 2, 976 InsertBefore) { 977 init(CatchPad, BB); 978 } 979 980 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 981 BasicBlock *InsertAtEnd) 982 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 983 OperandTraits<CatchReturnInst>::op_begin(this), 2, 984 InsertAtEnd) { 985 init(CatchPad, BB); 986 } 987 988 //===----------------------------------------------------------------------===// 989 // CatchSwitchInst Implementation 990 //===----------------------------------------------------------------------===// 991 992 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 993 unsigned NumReservedValues, 994 const Twine &NameStr, 995 Instruction *InsertBefore) 996 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 997 InsertBefore) { 998 if (UnwindDest) 999 ++NumReservedValues; 1000 init(ParentPad, UnwindDest, NumReservedValues + 1); 1001 setName(NameStr); 1002 } 1003 1004 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 1005 unsigned NumReservedValues, 1006 const Twine &NameStr, BasicBlock *InsertAtEnd) 1007 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 1008 InsertAtEnd) { 1009 if (UnwindDest) 1010 ++NumReservedValues; 1011 init(ParentPad, UnwindDest, NumReservedValues + 1); 1012 setName(NameStr); 1013 } 1014 1015 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI) 1016 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr, 1017 CSI.getNumOperands()) { 1018 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands()); 1019 setNumHungOffUseOperands(ReservedSpace); 1020 Use *OL = getOperandList(); 1021 const Use *InOL = CSI.getOperandList(); 1022 for (unsigned I = 1, E = ReservedSpace; I != E; ++I) 1023 OL[I] = InOL[I]; 1024 } 1025 1026 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, 1027 unsigned NumReservedValues) { 1028 assert(ParentPad && NumReservedValues); 1029 1030 ReservedSpace = NumReservedValues; 1031 setNumHungOffUseOperands(UnwindDest ? 2 : 1); 1032 allocHungoffUses(ReservedSpace); 1033 1034 Op<0>() = ParentPad; 1035 if (UnwindDest) { 1036 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 1037 setUnwindDest(UnwindDest); 1038 } 1039 } 1040 1041 /// growOperands - grow operands - This grows the operand list in response to a 1042 /// push_back style of operation. This grows the number of ops by 2 times. 1043 void CatchSwitchInst::growOperands(unsigned Size) { 1044 unsigned NumOperands = getNumOperands(); 1045 assert(NumOperands >= 1); 1046 if (ReservedSpace >= NumOperands + Size) 1047 return; 1048 ReservedSpace = (NumOperands + Size / 2) * 2; 1049 growHungoffUses(ReservedSpace); 1050 } 1051 1052 void CatchSwitchInst::addHandler(BasicBlock *Handler) { 1053 unsigned OpNo = getNumOperands(); 1054 growOperands(1); 1055 assert(OpNo < ReservedSpace && "Growing didn't work!"); 1056 setNumHungOffUseOperands(getNumOperands() + 1); 1057 getOperandList()[OpNo] = Handler; 1058 } 1059 1060 void CatchSwitchInst::removeHandler(handler_iterator HI) { 1061 // Move all subsequent handlers up one. 1062 Use *EndDst = op_end() - 1; 1063 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst) 1064 *CurDst = *(CurDst + 1); 1065 // Null out the last handler use. 1066 *EndDst = nullptr; 1067 1068 setNumHungOffUseOperands(getNumOperands() - 1); 1069 } 1070 1071 //===----------------------------------------------------------------------===// 1072 // FuncletPadInst Implementation 1073 //===----------------------------------------------------------------------===// 1074 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args, 1075 const Twine &NameStr) { 1076 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?"); 1077 llvm::copy(Args, op_begin()); 1078 setParentPad(ParentPad); 1079 setName(NameStr); 1080 } 1081 1082 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI) 1083 : Instruction(FPI.getType(), FPI.getOpcode(), 1084 OperandTraits<FuncletPadInst>::op_end(this) - 1085 FPI.getNumOperands(), 1086 FPI.getNumOperands()) { 1087 std::copy(FPI.op_begin(), FPI.op_end(), op_begin()); 1088 setParentPad(FPI.getParentPad()); 1089 } 1090 1091 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 1092 ArrayRef<Value *> Args, unsigned Values, 1093 const Twine &NameStr, Instruction *InsertBefore) 1094 : Instruction(ParentPad->getType(), Op, 1095 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 1096 InsertBefore) { 1097 init(ParentPad, Args, NameStr); 1098 } 1099 1100 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 1101 ArrayRef<Value *> Args, unsigned Values, 1102 const Twine &NameStr, BasicBlock *InsertAtEnd) 1103 : Instruction(ParentPad->getType(), Op, 1104 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 1105 InsertAtEnd) { 1106 init(ParentPad, Args, NameStr); 1107 } 1108 1109 //===----------------------------------------------------------------------===// 1110 // UnreachableInst Implementation 1111 //===----------------------------------------------------------------------===// 1112 1113 UnreachableInst::UnreachableInst(LLVMContext &Context, 1114 Instruction *InsertBefore) 1115 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 1116 0, InsertBefore) {} 1117 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 1118 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 1119 0, InsertAtEnd) {} 1120 1121 //===----------------------------------------------------------------------===// 1122 // BranchInst Implementation 1123 //===----------------------------------------------------------------------===// 1124 1125 void BranchInst::AssertOK() { 1126 if (isConditional()) 1127 assert(getCondition()->getType()->isIntegerTy(1) && 1128 "May only branch on boolean predicates!"); 1129 } 1130 1131 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) 1132 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1133 OperandTraits<BranchInst>::op_end(this) - 1, 1, 1134 InsertBefore) { 1135 assert(IfTrue && "Branch destination may not be null!"); 1136 Op<-1>() = IfTrue; 1137 } 1138 1139 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1140 Instruction *InsertBefore) 1141 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1142 OperandTraits<BranchInst>::op_end(this) - 3, 3, 1143 InsertBefore) { 1144 Op<-1>() = IfTrue; 1145 Op<-2>() = IfFalse; 1146 Op<-3>() = Cond; 1147 #ifndef NDEBUG 1148 AssertOK(); 1149 #endif 1150 } 1151 1152 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) 1153 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1154 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) { 1155 assert(IfTrue && "Branch destination may not be null!"); 1156 Op<-1>() = IfTrue; 1157 } 1158 1159 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1160 BasicBlock *InsertAtEnd) 1161 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1162 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) { 1163 Op<-1>() = IfTrue; 1164 Op<-2>() = IfFalse; 1165 Op<-3>() = Cond; 1166 #ifndef NDEBUG 1167 AssertOK(); 1168 #endif 1169 } 1170 1171 BranchInst::BranchInst(const BranchInst &BI) 1172 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br, 1173 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), 1174 BI.getNumOperands()) { 1175 Op<-1>() = BI.Op<-1>(); 1176 if (BI.getNumOperands() != 1) { 1177 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); 1178 Op<-3>() = BI.Op<-3>(); 1179 Op<-2>() = BI.Op<-2>(); 1180 } 1181 SubclassOptionalData = BI.SubclassOptionalData; 1182 } 1183 1184 void BranchInst::swapSuccessors() { 1185 assert(isConditional() && 1186 "Cannot swap successors of an unconditional branch"); 1187 Op<-1>().swap(Op<-2>()); 1188 1189 // Update profile metadata if present and it matches our structural 1190 // expectations. 1191 swapProfMetadata(); 1192 } 1193 1194 //===----------------------------------------------------------------------===// 1195 // AllocaInst Implementation 1196 //===----------------------------------------------------------------------===// 1197 1198 static Value *getAISize(LLVMContext &Context, Value *Amt) { 1199 if (!Amt) 1200 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); 1201 else { 1202 assert(!isa<BasicBlock>(Amt) && 1203 "Passed basic block into allocation size parameter! Use other ctor"); 1204 assert(Amt->getType()->isIntegerTy() && 1205 "Allocation array size is not an integer!"); 1206 } 1207 return Amt; 1208 } 1209 1210 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1211 Instruction *InsertBefore) 1212 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {} 1213 1214 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1215 BasicBlock *InsertAtEnd) 1216 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {} 1217 1218 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1219 const Twine &Name, Instruction *InsertBefore) 1220 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/None, Name, InsertBefore) { 1221 } 1222 1223 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1224 const Twine &Name, BasicBlock *InsertAtEnd) 1225 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/None, Name, InsertAtEnd) {} 1226 1227 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1228 MaybeAlign Align, const Twine &Name, 1229 Instruction *InsertBefore) 1230 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1231 getAISize(Ty->getContext(), ArraySize), InsertBefore), 1232 AllocatedType(Ty) { 1233 setAlignment(MaybeAlign(Align)); 1234 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1235 setName(Name); 1236 } 1237 1238 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1239 MaybeAlign Align, const Twine &Name, 1240 BasicBlock *InsertAtEnd) 1241 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1242 getAISize(Ty->getContext(), ArraySize), InsertAtEnd), 1243 AllocatedType(Ty) { 1244 setAlignment(Align); 1245 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1246 setName(Name); 1247 } 1248 1249 void AllocaInst::setAlignment(MaybeAlign Align) { 1250 assert((!Align || *Align <= MaximumAlignment) && 1251 "Alignment is greater than MaximumAlignment!"); 1252 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | 1253 encode(Align)); 1254 if (Align) 1255 assert(getAlignment() == Align->value() && 1256 "Alignment representation error!"); 1257 else 1258 assert(getAlignment() == 0 && "Alignment representation error!"); 1259 } 1260 1261 bool AllocaInst::isArrayAllocation() const { 1262 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) 1263 return !CI->isOne(); 1264 return true; 1265 } 1266 1267 /// isStaticAlloca - Return true if this alloca is in the entry block of the 1268 /// function and is a constant size. If so, the code generator will fold it 1269 /// into the prolog/epilog code, so it is basically free. 1270 bool AllocaInst::isStaticAlloca() const { 1271 // Must be constant size. 1272 if (!isa<ConstantInt>(getArraySize())) return false; 1273 1274 // Must be in the entry block. 1275 const BasicBlock *Parent = getParent(); 1276 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); 1277 } 1278 1279 //===----------------------------------------------------------------------===// 1280 // LoadInst Implementation 1281 //===----------------------------------------------------------------------===// 1282 1283 void LoadInst::AssertOK() { 1284 assert(getOperand(0)->getType()->isPointerTy() && 1285 "Ptr must have pointer type."); 1286 assert(!(isAtomic() && getAlignment() == 0) && 1287 "Alignment required for atomic load"); 1288 } 1289 1290 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, 1291 Instruction *InsertBef) 1292 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {} 1293 1294 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, 1295 BasicBlock *InsertAE) 1296 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {} 1297 1298 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1299 Instruction *InsertBef) 1300 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertBef) {} 1301 1302 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1303 BasicBlock *InsertAE) 1304 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertAE) {} 1305 1306 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1307 MaybeAlign Align, Instruction *InsertBef) 1308 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1309 SyncScope::System, InsertBef) {} 1310 1311 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1312 MaybeAlign Align, BasicBlock *InsertAE) 1313 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1314 SyncScope::System, InsertAE) {} 1315 1316 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1317 MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, 1318 Instruction *InsertBef) 1319 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1320 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1321 setVolatile(isVolatile); 1322 setAlignment(MaybeAlign(Align)); 1323 setAtomic(Order, SSID); 1324 AssertOK(); 1325 setName(Name); 1326 } 1327 1328 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1329 MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, 1330 BasicBlock *InsertAE) 1331 : UnaryInstruction(Ty, Load, Ptr, InsertAE) { 1332 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1333 setVolatile(isVolatile); 1334 setAlignment(Align); 1335 setAtomic(Order, SSID); 1336 AssertOK(); 1337 setName(Name); 1338 } 1339 1340 void LoadInst::setAlignment(MaybeAlign Align) { 1341 assert((!Align || *Align <= MaximumAlignment) && 1342 "Alignment is greater than MaximumAlignment!"); 1343 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1344 (encode(Align) << 1)); 1345 assert(getAlign() == Align && "Alignment representation error!"); 1346 } 1347 1348 //===----------------------------------------------------------------------===// 1349 // StoreInst Implementation 1350 //===----------------------------------------------------------------------===// 1351 1352 void StoreInst::AssertOK() { 1353 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); 1354 assert(getOperand(1)->getType()->isPointerTy() && 1355 "Ptr must have pointer type!"); 1356 assert(getOperand(0)->getType() == 1357 cast<PointerType>(getOperand(1)->getType())->getElementType() 1358 && "Ptr must be a pointer to Val type!"); 1359 assert(!(isAtomic() && getAlignment() == 0) && 1360 "Alignment required for atomic store"); 1361 } 1362 1363 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) 1364 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {} 1365 1366 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) 1367 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {} 1368 1369 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1370 Instruction *InsertBefore) 1371 : StoreInst(val, addr, isVolatile, /*Align=*/None, InsertBefore) {} 1372 1373 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1374 BasicBlock *InsertAtEnd) 1375 : StoreInst(val, addr, isVolatile, /*Align=*/None, InsertAtEnd) {} 1376 1377 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1378 Instruction *InsertBefore) 1379 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1380 SyncScope::System, InsertBefore) {} 1381 1382 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1383 BasicBlock *InsertAtEnd) 1384 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1385 SyncScope::System, InsertAtEnd) {} 1386 1387 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1388 AtomicOrdering Order, SyncScope::ID SSID, 1389 Instruction *InsertBefore) 1390 : Instruction(Type::getVoidTy(val->getContext()), Store, 1391 OperandTraits<StoreInst>::op_begin(this), 1392 OperandTraits<StoreInst>::operands(this), InsertBefore) { 1393 Op<0>() = val; 1394 Op<1>() = addr; 1395 setVolatile(isVolatile); 1396 setAlignment(Align); 1397 setAtomic(Order, SSID); 1398 AssertOK(); 1399 } 1400 1401 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1402 AtomicOrdering Order, SyncScope::ID SSID, 1403 BasicBlock *InsertAtEnd) 1404 : Instruction(Type::getVoidTy(val->getContext()), Store, 1405 OperandTraits<StoreInst>::op_begin(this), 1406 OperandTraits<StoreInst>::operands(this), InsertAtEnd) { 1407 Op<0>() = val; 1408 Op<1>() = addr; 1409 setVolatile(isVolatile); 1410 setAlignment(Align); 1411 setAtomic(Order, SSID); 1412 AssertOK(); 1413 } 1414 1415 void StoreInst::setAlignment(MaybeAlign Alignment) { 1416 assert((!Alignment || *Alignment <= MaximumAlignment) && 1417 "Alignment is greater than MaximumAlignment!"); 1418 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1419 (encode(Alignment) << 1)); 1420 assert(getAlign() == Alignment && "Alignment representation error!"); 1421 } 1422 1423 //===----------------------------------------------------------------------===// 1424 // AtomicCmpXchgInst Implementation 1425 //===----------------------------------------------------------------------===// 1426 1427 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, 1428 AtomicOrdering SuccessOrdering, 1429 AtomicOrdering FailureOrdering, 1430 SyncScope::ID SSID) { 1431 Op<0>() = Ptr; 1432 Op<1>() = Cmp; 1433 Op<2>() = NewVal; 1434 setSuccessOrdering(SuccessOrdering); 1435 setFailureOrdering(FailureOrdering); 1436 setSyncScopeID(SSID); 1437 1438 assert(getOperand(0) && getOperand(1) && getOperand(2) && 1439 "All operands must be non-null!"); 1440 assert(getOperand(0)->getType()->isPointerTy() && 1441 "Ptr must have pointer type!"); 1442 assert(getOperand(1)->getType() == 1443 cast<PointerType>(getOperand(0)->getType())->getElementType() 1444 && "Ptr must be a pointer to Cmp type!"); 1445 assert(getOperand(2)->getType() == 1446 cast<PointerType>(getOperand(0)->getType())->getElementType() 1447 && "Ptr must be a pointer to NewVal type!"); 1448 assert(SuccessOrdering != AtomicOrdering::NotAtomic && 1449 "AtomicCmpXchg instructions must be atomic!"); 1450 assert(FailureOrdering != AtomicOrdering::NotAtomic && 1451 "AtomicCmpXchg instructions must be atomic!"); 1452 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && 1453 "AtomicCmpXchg failure argument shall be no stronger than the success " 1454 "argument"); 1455 assert(FailureOrdering != AtomicOrdering::Release && 1456 FailureOrdering != AtomicOrdering::AcquireRelease && 1457 "AtomicCmpXchg failure ordering cannot include release semantics"); 1458 } 1459 1460 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1461 AtomicOrdering SuccessOrdering, 1462 AtomicOrdering FailureOrdering, 1463 SyncScope::ID SSID, 1464 Instruction *InsertBefore) 1465 : Instruction( 1466 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1467 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1468 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { 1469 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1470 } 1471 1472 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1473 AtomicOrdering SuccessOrdering, 1474 AtomicOrdering FailureOrdering, 1475 SyncScope::ID SSID, 1476 BasicBlock *InsertAtEnd) 1477 : Instruction( 1478 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1479 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1480 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { 1481 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1482 } 1483 1484 //===----------------------------------------------------------------------===// 1485 // AtomicRMWInst Implementation 1486 //===----------------------------------------------------------------------===// 1487 1488 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, 1489 AtomicOrdering Ordering, 1490 SyncScope::ID SSID) { 1491 Op<0>() = Ptr; 1492 Op<1>() = Val; 1493 setOperation(Operation); 1494 setOrdering(Ordering); 1495 setSyncScopeID(SSID); 1496 1497 assert(getOperand(0) && getOperand(1) && 1498 "All operands must be non-null!"); 1499 assert(getOperand(0)->getType()->isPointerTy() && 1500 "Ptr must have pointer type!"); 1501 assert(getOperand(1)->getType() == 1502 cast<PointerType>(getOperand(0)->getType())->getElementType() 1503 && "Ptr must be a pointer to Val type!"); 1504 assert(Ordering != AtomicOrdering::NotAtomic && 1505 "AtomicRMW instructions must be atomic!"); 1506 } 1507 1508 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1509 AtomicOrdering Ordering, 1510 SyncScope::ID SSID, 1511 Instruction *InsertBefore) 1512 : Instruction(Val->getType(), AtomicRMW, 1513 OperandTraits<AtomicRMWInst>::op_begin(this), 1514 OperandTraits<AtomicRMWInst>::operands(this), 1515 InsertBefore) { 1516 Init(Operation, Ptr, Val, Ordering, SSID); 1517 } 1518 1519 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1520 AtomicOrdering Ordering, 1521 SyncScope::ID SSID, 1522 BasicBlock *InsertAtEnd) 1523 : Instruction(Val->getType(), AtomicRMW, 1524 OperandTraits<AtomicRMWInst>::op_begin(this), 1525 OperandTraits<AtomicRMWInst>::operands(this), 1526 InsertAtEnd) { 1527 Init(Operation, Ptr, Val, Ordering, SSID); 1528 } 1529 1530 StringRef AtomicRMWInst::getOperationName(BinOp Op) { 1531 switch (Op) { 1532 case AtomicRMWInst::Xchg: 1533 return "xchg"; 1534 case AtomicRMWInst::Add: 1535 return "add"; 1536 case AtomicRMWInst::Sub: 1537 return "sub"; 1538 case AtomicRMWInst::And: 1539 return "and"; 1540 case AtomicRMWInst::Nand: 1541 return "nand"; 1542 case AtomicRMWInst::Or: 1543 return "or"; 1544 case AtomicRMWInst::Xor: 1545 return "xor"; 1546 case AtomicRMWInst::Max: 1547 return "max"; 1548 case AtomicRMWInst::Min: 1549 return "min"; 1550 case AtomicRMWInst::UMax: 1551 return "umax"; 1552 case AtomicRMWInst::UMin: 1553 return "umin"; 1554 case AtomicRMWInst::FAdd: 1555 return "fadd"; 1556 case AtomicRMWInst::FSub: 1557 return "fsub"; 1558 case AtomicRMWInst::BAD_BINOP: 1559 return "<invalid operation>"; 1560 } 1561 1562 llvm_unreachable("invalid atomicrmw operation"); 1563 } 1564 1565 //===----------------------------------------------------------------------===// 1566 // FenceInst Implementation 1567 //===----------------------------------------------------------------------===// 1568 1569 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1570 SyncScope::ID SSID, 1571 Instruction *InsertBefore) 1572 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { 1573 setOrdering(Ordering); 1574 setSyncScopeID(SSID); 1575 } 1576 1577 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1578 SyncScope::ID SSID, 1579 BasicBlock *InsertAtEnd) 1580 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { 1581 setOrdering(Ordering); 1582 setSyncScopeID(SSID); 1583 } 1584 1585 //===----------------------------------------------------------------------===// 1586 // GetElementPtrInst Implementation 1587 //===----------------------------------------------------------------------===// 1588 1589 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, 1590 const Twine &Name) { 1591 assert(getNumOperands() == 1 + IdxList.size() && 1592 "NumOperands not initialized?"); 1593 Op<0>() = Ptr; 1594 llvm::copy(IdxList, op_begin() + 1); 1595 setName(Name); 1596 } 1597 1598 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) 1599 : Instruction(GEPI.getType(), GetElementPtr, 1600 OperandTraits<GetElementPtrInst>::op_end(this) - 1601 GEPI.getNumOperands(), 1602 GEPI.getNumOperands()), 1603 SourceElementType(GEPI.SourceElementType), 1604 ResultElementType(GEPI.ResultElementType) { 1605 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); 1606 SubclassOptionalData = GEPI.SubclassOptionalData; 1607 } 1608 1609 /// getIndexedType - Returns the type of the element that would be accessed with 1610 /// a gep instruction with the specified parameters. 1611 /// 1612 /// The Idxs pointer should point to a continuous piece of memory containing the 1613 /// indices, either as Value* or uint64_t. 1614 /// 1615 /// A null type is returned if the indices are invalid for the specified 1616 /// pointer type. 1617 /// 1618 template <typename IndexTy> 1619 static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) { 1620 // Handle the special case of the empty set index set, which is always valid. 1621 if (IdxList.empty()) 1622 return Agg; 1623 1624 // If there is at least one index, the top level type must be sized, otherwise 1625 // it cannot be 'stepped over'. 1626 if (!Agg->isSized()) 1627 return nullptr; 1628 1629 unsigned CurIdx = 1; 1630 for (; CurIdx != IdxList.size(); ++CurIdx) { 1631 CompositeType *CT = dyn_cast<CompositeType>(Agg); 1632 if (!CT || CT->isPointerTy()) return nullptr; 1633 IndexTy Index = IdxList[CurIdx]; 1634 if (!CT->indexValid(Index)) return nullptr; 1635 Agg = CT->getTypeAtIndex(Index); 1636 } 1637 return CurIdx == IdxList.size() ? Agg : nullptr; 1638 } 1639 1640 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) { 1641 return getIndexedTypeInternal(Ty, IdxList); 1642 } 1643 1644 Type *GetElementPtrInst::getIndexedType(Type *Ty, 1645 ArrayRef<Constant *> IdxList) { 1646 return getIndexedTypeInternal(Ty, IdxList); 1647 } 1648 1649 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) { 1650 return getIndexedTypeInternal(Ty, IdxList); 1651 } 1652 1653 /// hasAllZeroIndices - Return true if all of the indices of this GEP are 1654 /// zeros. If so, the result pointer and the first operand have the same 1655 /// value, just potentially different types. 1656 bool GetElementPtrInst::hasAllZeroIndices() const { 1657 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1658 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { 1659 if (!CI->isZero()) return false; 1660 } else { 1661 return false; 1662 } 1663 } 1664 return true; 1665 } 1666 1667 /// hasAllConstantIndices - Return true if all of the indices of this GEP are 1668 /// constant integers. If so, the result pointer and the first operand have 1669 /// a constant offset between them. 1670 bool GetElementPtrInst::hasAllConstantIndices() const { 1671 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1672 if (!isa<ConstantInt>(getOperand(i))) 1673 return false; 1674 } 1675 return true; 1676 } 1677 1678 void GetElementPtrInst::setIsInBounds(bool B) { 1679 cast<GEPOperator>(this)->setIsInBounds(B); 1680 } 1681 1682 bool GetElementPtrInst::isInBounds() const { 1683 return cast<GEPOperator>(this)->isInBounds(); 1684 } 1685 1686 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, 1687 APInt &Offset) const { 1688 // Delegate to the generic GEPOperator implementation. 1689 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); 1690 } 1691 1692 //===----------------------------------------------------------------------===// 1693 // ExtractElementInst Implementation 1694 //===----------------------------------------------------------------------===// 1695 1696 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1697 const Twine &Name, 1698 Instruction *InsertBef) 1699 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1700 ExtractElement, 1701 OperandTraits<ExtractElementInst>::op_begin(this), 1702 2, InsertBef) { 1703 assert(isValidOperands(Val, Index) && 1704 "Invalid extractelement instruction operands!"); 1705 Op<0>() = Val; 1706 Op<1>() = Index; 1707 setName(Name); 1708 } 1709 1710 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1711 const Twine &Name, 1712 BasicBlock *InsertAE) 1713 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1714 ExtractElement, 1715 OperandTraits<ExtractElementInst>::op_begin(this), 1716 2, InsertAE) { 1717 assert(isValidOperands(Val, Index) && 1718 "Invalid extractelement instruction operands!"); 1719 1720 Op<0>() = Val; 1721 Op<1>() = Index; 1722 setName(Name); 1723 } 1724 1725 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { 1726 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy()) 1727 return false; 1728 return true; 1729 } 1730 1731 //===----------------------------------------------------------------------===// 1732 // InsertElementInst Implementation 1733 //===----------------------------------------------------------------------===// 1734 1735 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1736 const Twine &Name, 1737 Instruction *InsertBef) 1738 : Instruction(Vec->getType(), InsertElement, 1739 OperandTraits<InsertElementInst>::op_begin(this), 1740 3, InsertBef) { 1741 assert(isValidOperands(Vec, Elt, Index) && 1742 "Invalid insertelement instruction operands!"); 1743 Op<0>() = Vec; 1744 Op<1>() = Elt; 1745 Op<2>() = Index; 1746 setName(Name); 1747 } 1748 1749 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1750 const Twine &Name, 1751 BasicBlock *InsertAE) 1752 : Instruction(Vec->getType(), InsertElement, 1753 OperandTraits<InsertElementInst>::op_begin(this), 1754 3, InsertAE) { 1755 assert(isValidOperands(Vec, Elt, Index) && 1756 "Invalid insertelement instruction operands!"); 1757 1758 Op<0>() = Vec; 1759 Op<1>() = Elt; 1760 Op<2>() = Index; 1761 setName(Name); 1762 } 1763 1764 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, 1765 const Value *Index) { 1766 if (!Vec->getType()->isVectorTy()) 1767 return false; // First operand of insertelement must be vector type. 1768 1769 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) 1770 return false;// Second operand of insertelement must be vector element type. 1771 1772 if (!Index->getType()->isIntegerTy()) 1773 return false; // Third operand of insertelement must be i32. 1774 return true; 1775 } 1776 1777 //===----------------------------------------------------------------------===// 1778 // ShuffleVectorInst Implementation 1779 //===----------------------------------------------------------------------===// 1780 1781 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1782 const Twine &Name, 1783 Instruction *InsertBefore) 1784 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1785 cast<VectorType>(Mask->getType())->getElementCount()), 1786 ShuffleVector, 1787 OperandTraits<ShuffleVectorInst>::op_begin(this), 1788 OperandTraits<ShuffleVectorInst>::operands(this), 1789 InsertBefore) { 1790 assert(isValidOperands(V1, V2, Mask) && 1791 "Invalid shuffle vector instruction operands!"); 1792 Op<0>() = V1; 1793 Op<1>() = V2; 1794 Op<2>() = Mask; 1795 setName(Name); 1796 } 1797 1798 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1799 const Twine &Name, 1800 BasicBlock *InsertAtEnd) 1801 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1802 cast<VectorType>(Mask->getType())->getElementCount()), 1803 ShuffleVector, 1804 OperandTraits<ShuffleVectorInst>::op_begin(this), 1805 OperandTraits<ShuffleVectorInst>::operands(this), 1806 InsertAtEnd) { 1807 assert(isValidOperands(V1, V2, Mask) && 1808 "Invalid shuffle vector instruction operands!"); 1809 1810 Op<0>() = V1; 1811 Op<1>() = V2; 1812 Op<2>() = Mask; 1813 setName(Name); 1814 } 1815 1816 void ShuffleVectorInst::commute() { 1817 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1818 int NumMaskElts = getMask()->getType()->getVectorNumElements(); 1819 SmallVector<Constant*, 16> NewMask(NumMaskElts); 1820 Type *Int32Ty = Type::getInt32Ty(getContext()); 1821 for (int i = 0; i != NumMaskElts; ++i) { 1822 int MaskElt = getMaskValue(i); 1823 if (MaskElt == -1) { 1824 NewMask[i] = UndefValue::get(Int32Ty); 1825 continue; 1826 } 1827 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask"); 1828 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts; 1829 NewMask[i] = ConstantInt::get(Int32Ty, MaskElt); 1830 } 1831 Op<2>() = ConstantVector::get(NewMask); 1832 Op<0>().swap(Op<1>()); 1833 } 1834 1835 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1836 const Value *Mask) { 1837 // V1 and V2 must be vectors of the same type. 1838 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) 1839 return false; 1840 1841 // Mask must be vector of i32. 1842 auto *MaskTy = dyn_cast<VectorType>(Mask->getType()); 1843 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) 1844 return false; 1845 1846 // Check to see if Mask is valid. 1847 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) 1848 return true; 1849 1850 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) { 1851 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1852 for (Value *Op : MV->operands()) { 1853 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1854 if (CI->uge(V1Size*2)) 1855 return false; 1856 } else if (!isa<UndefValue>(Op)) { 1857 return false; 1858 } 1859 } 1860 return true; 1861 } 1862 1863 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1864 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1865 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) 1866 if (CDS->getElementAsInteger(i) >= V1Size*2) 1867 return false; 1868 return true; 1869 } 1870 1871 // The bitcode reader can create a place holder for a forward reference 1872 // used as the shuffle mask. When this occurs, the shuffle mask will 1873 // fall into this case and fail. To avoid this error, do this bit of 1874 // ugliness to allow such a mask pass. 1875 if (const auto *CE = dyn_cast<ConstantExpr>(Mask)) 1876 if (CE->getOpcode() == Instruction::UserOp1) 1877 return true; 1878 1879 return false; 1880 } 1881 1882 int ShuffleVectorInst::getMaskValue(const Constant *Mask, unsigned i) { 1883 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); 1884 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) 1885 return CDS->getElementAsInteger(i); 1886 Constant *C = Mask->getAggregateElement(i); 1887 if (isa<UndefValue>(C)) 1888 return -1; 1889 return cast<ConstantInt>(C)->getZExtValue(); 1890 } 1891 1892 void ShuffleVectorInst::getShuffleMask(const Constant *Mask, 1893 SmallVectorImpl<int> &Result) { 1894 unsigned NumElts = Mask->getType()->getVectorNumElements(); 1895 1896 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1897 for (unsigned i = 0; i != NumElts; ++i) 1898 Result.push_back(CDS->getElementAsInteger(i)); 1899 return; 1900 } 1901 for (unsigned i = 0; i != NumElts; ++i) { 1902 Constant *C = Mask->getAggregateElement(i); 1903 Result.push_back(isa<UndefValue>(C) ? -1 : 1904 cast<ConstantInt>(C)->getZExtValue()); 1905 } 1906 } 1907 1908 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1909 assert(!Mask.empty() && "Shuffle mask must contain elements"); 1910 bool UsesLHS = false; 1911 bool UsesRHS = false; 1912 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1913 if (Mask[i] == -1) 1914 continue; 1915 assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) && 1916 "Out-of-bounds shuffle mask element"); 1917 UsesLHS |= (Mask[i] < NumOpElts); 1918 UsesRHS |= (Mask[i] >= NumOpElts); 1919 if (UsesLHS && UsesRHS) 1920 return false; 1921 } 1922 assert((UsesLHS ^ UsesRHS) && "Should have selected from exactly 1 source"); 1923 return true; 1924 } 1925 1926 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) { 1927 // We don't have vector operand size information, so assume operands are the 1928 // same size as the mask. 1929 return isSingleSourceMaskImpl(Mask, Mask.size()); 1930 } 1931 1932 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1933 if (!isSingleSourceMaskImpl(Mask, NumOpElts)) 1934 return false; 1935 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1936 if (Mask[i] == -1) 1937 continue; 1938 if (Mask[i] != i && Mask[i] != (NumOpElts + i)) 1939 return false; 1940 } 1941 return true; 1942 } 1943 1944 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) { 1945 // We don't have vector operand size information, so assume operands are the 1946 // same size as the mask. 1947 return isIdentityMaskImpl(Mask, Mask.size()); 1948 } 1949 1950 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) { 1951 if (!isSingleSourceMask(Mask)) 1952 return false; 1953 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1954 if (Mask[i] == -1) 1955 continue; 1956 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i)) 1957 return false; 1958 } 1959 return true; 1960 } 1961 1962 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) { 1963 if (!isSingleSourceMask(Mask)) 1964 return false; 1965 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1966 if (Mask[i] == -1) 1967 continue; 1968 if (Mask[i] != 0 && Mask[i] != NumElts) 1969 return false; 1970 } 1971 return true; 1972 } 1973 1974 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) { 1975 // Select is differentiated from identity. It requires using both sources. 1976 if (isSingleSourceMask(Mask)) 1977 return false; 1978 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1979 if (Mask[i] == -1) 1980 continue; 1981 if (Mask[i] != i && Mask[i] != (NumElts + i)) 1982 return false; 1983 } 1984 return true; 1985 } 1986 1987 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) { 1988 // Example masks that will return true: 1989 // v1 = <a, b, c, d> 1990 // v2 = <e, f, g, h> 1991 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g> 1992 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h> 1993 1994 // 1. The number of elements in the mask must be a power-of-2 and at least 2. 1995 int NumElts = Mask.size(); 1996 if (NumElts < 2 || !isPowerOf2_32(NumElts)) 1997 return false; 1998 1999 // 2. The first element of the mask must be either a 0 or a 1. 2000 if (Mask[0] != 0 && Mask[0] != 1) 2001 return false; 2002 2003 // 3. The difference between the first 2 elements must be equal to the 2004 // number of elements in the mask. 2005 if ((Mask[1] - Mask[0]) != NumElts) 2006 return false; 2007 2008 // 4. The difference between consecutive even-numbered and odd-numbered 2009 // elements must be equal to 2. 2010 for (int i = 2; i < NumElts; ++i) { 2011 int MaskEltVal = Mask[i]; 2012 if (MaskEltVal == -1) 2013 return false; 2014 int MaskEltPrevVal = Mask[i - 2]; 2015 if (MaskEltVal - MaskEltPrevVal != 2) 2016 return false; 2017 } 2018 return true; 2019 } 2020 2021 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask, 2022 int NumSrcElts, int &Index) { 2023 // Must extract from a single source. 2024 if (!isSingleSourceMaskImpl(Mask, NumSrcElts)) 2025 return false; 2026 2027 // Must be smaller (else this is an Identity shuffle). 2028 if (NumSrcElts <= (int)Mask.size()) 2029 return false; 2030 2031 // Find start of extraction, accounting that we may start with an UNDEF. 2032 int SubIndex = -1; 2033 for (int i = 0, e = Mask.size(); i != e; ++i) { 2034 int M = Mask[i]; 2035 if (M < 0) 2036 continue; 2037 int Offset = (M % NumSrcElts) - i; 2038 if (0 <= SubIndex && SubIndex != Offset) 2039 return false; 2040 SubIndex = Offset; 2041 } 2042 2043 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) { 2044 Index = SubIndex; 2045 return true; 2046 } 2047 return false; 2048 } 2049 2050 bool ShuffleVectorInst::isIdentityWithPadding() const { 2051 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2052 int NumMaskElts = getType()->getVectorNumElements(); 2053 if (NumMaskElts <= NumOpElts) 2054 return false; 2055 2056 // The first part of the mask must choose elements from exactly 1 source op. 2057 SmallVector<int, 16> Mask = getShuffleMask(); 2058 if (!isIdentityMaskImpl(Mask, NumOpElts)) 2059 return false; 2060 2061 // All extending must be with undef elements. 2062 for (int i = NumOpElts; i < NumMaskElts; ++i) 2063 if (Mask[i] != -1) 2064 return false; 2065 2066 return true; 2067 } 2068 2069 bool ShuffleVectorInst::isIdentityWithExtract() const { 2070 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2071 int NumMaskElts = getType()->getVectorNumElements(); 2072 if (NumMaskElts >= NumOpElts) 2073 return false; 2074 2075 return isIdentityMaskImpl(getShuffleMask(), NumOpElts); 2076 } 2077 2078 bool ShuffleVectorInst::isConcat() const { 2079 // Vector concatenation is differentiated from identity with padding. 2080 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>())) 2081 return false; 2082 2083 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2084 int NumMaskElts = getType()->getVectorNumElements(); 2085 if (NumMaskElts != NumOpElts * 2) 2086 return false; 2087 2088 // Use the mask length rather than the operands' vector lengths here. We 2089 // already know that the shuffle returns a vector twice as long as the inputs, 2090 // and neither of the inputs are undef vectors. If the mask picks consecutive 2091 // elements from both inputs, then this is a concatenation of the inputs. 2092 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts); 2093 } 2094 2095 //===----------------------------------------------------------------------===// 2096 // InsertValueInst Class 2097 //===----------------------------------------------------------------------===// 2098 2099 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2100 const Twine &Name) { 2101 assert(getNumOperands() == 2 && "NumOperands not initialized?"); 2102 2103 // There's no fundamental reason why we require at least one index 2104 // (other than weirdness with &*IdxBegin being invalid; see 2105 // getelementptr's init routine for example). But there's no 2106 // present need to support it. 2107 assert(!Idxs.empty() && "InsertValueInst must have at least one index"); 2108 2109 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == 2110 Val->getType() && "Inserted value must match indexed type!"); 2111 Op<0>() = Agg; 2112 Op<1>() = Val; 2113 2114 Indices.append(Idxs.begin(), Idxs.end()); 2115 setName(Name); 2116 } 2117 2118 InsertValueInst::InsertValueInst(const InsertValueInst &IVI) 2119 : Instruction(IVI.getType(), InsertValue, 2120 OperandTraits<InsertValueInst>::op_begin(this), 2), 2121 Indices(IVI.Indices) { 2122 Op<0>() = IVI.getOperand(0); 2123 Op<1>() = IVI.getOperand(1); 2124 SubclassOptionalData = IVI.SubclassOptionalData; 2125 } 2126 2127 //===----------------------------------------------------------------------===// 2128 // ExtractValueInst Class 2129 //===----------------------------------------------------------------------===// 2130 2131 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { 2132 assert(getNumOperands() == 1 && "NumOperands not initialized?"); 2133 2134 // There's no fundamental reason why we require at least one index. 2135 // But there's no present need to support it. 2136 assert(!Idxs.empty() && "ExtractValueInst must have at least one index"); 2137 2138 Indices.append(Idxs.begin(), Idxs.end()); 2139 setName(Name); 2140 } 2141 2142 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) 2143 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), 2144 Indices(EVI.Indices) { 2145 SubclassOptionalData = EVI.SubclassOptionalData; 2146 } 2147 2148 // getIndexedType - Returns the type of the element that would be extracted 2149 // with an extractvalue instruction with the specified parameters. 2150 // 2151 // A null type is returned if the indices are invalid for the specified 2152 // pointer type. 2153 // 2154 Type *ExtractValueInst::getIndexedType(Type *Agg, 2155 ArrayRef<unsigned> Idxs) { 2156 for (unsigned Index : Idxs) { 2157 // We can't use CompositeType::indexValid(Index) here. 2158 // indexValid() always returns true for arrays because getelementptr allows 2159 // out-of-bounds indices. Since we don't allow those for extractvalue and 2160 // insertvalue we need to check array indexing manually. 2161 // Since the only other types we can index into are struct types it's just 2162 // as easy to check those manually as well. 2163 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { 2164 if (Index >= AT->getNumElements()) 2165 return nullptr; 2166 } else if (StructType *ST = dyn_cast<StructType>(Agg)) { 2167 if (Index >= ST->getNumElements()) 2168 return nullptr; 2169 } else { 2170 // Not a valid type to index into. 2171 return nullptr; 2172 } 2173 2174 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index); 2175 } 2176 return const_cast<Type*>(Agg); 2177 } 2178 2179 //===----------------------------------------------------------------------===// 2180 // UnaryOperator Class 2181 //===----------------------------------------------------------------------===// 2182 2183 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2184 Type *Ty, const Twine &Name, 2185 Instruction *InsertBefore) 2186 : UnaryInstruction(Ty, iType, S, InsertBefore) { 2187 Op<0>() = S; 2188 setName(Name); 2189 AssertOK(); 2190 } 2191 2192 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2193 Type *Ty, const Twine &Name, 2194 BasicBlock *InsertAtEnd) 2195 : UnaryInstruction(Ty, iType, S, InsertAtEnd) { 2196 Op<0>() = S; 2197 setName(Name); 2198 AssertOK(); 2199 } 2200 2201 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2202 const Twine &Name, 2203 Instruction *InsertBefore) { 2204 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore); 2205 } 2206 2207 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2208 const Twine &Name, 2209 BasicBlock *InsertAtEnd) { 2210 UnaryOperator *Res = Create(Op, S, Name); 2211 InsertAtEnd->getInstList().push_back(Res); 2212 return Res; 2213 } 2214 2215 void UnaryOperator::AssertOK() { 2216 Value *LHS = getOperand(0); 2217 (void)LHS; // Silence warnings. 2218 #ifndef NDEBUG 2219 switch (getOpcode()) { 2220 case FNeg: 2221 assert(getType() == LHS->getType() && 2222 "Unary operation should return same type as operand!"); 2223 assert(getType()->isFPOrFPVectorTy() && 2224 "Tried to create a floating-point operation on a " 2225 "non-floating-point type!"); 2226 break; 2227 default: llvm_unreachable("Invalid opcode provided"); 2228 } 2229 #endif 2230 } 2231 2232 //===----------------------------------------------------------------------===// 2233 // BinaryOperator Class 2234 //===----------------------------------------------------------------------===// 2235 2236 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2237 Type *Ty, const Twine &Name, 2238 Instruction *InsertBefore) 2239 : Instruction(Ty, iType, 2240 OperandTraits<BinaryOperator>::op_begin(this), 2241 OperandTraits<BinaryOperator>::operands(this), 2242 InsertBefore) { 2243 Op<0>() = S1; 2244 Op<1>() = S2; 2245 setName(Name); 2246 AssertOK(); 2247 } 2248 2249 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2250 Type *Ty, const Twine &Name, 2251 BasicBlock *InsertAtEnd) 2252 : Instruction(Ty, iType, 2253 OperandTraits<BinaryOperator>::op_begin(this), 2254 OperandTraits<BinaryOperator>::operands(this), 2255 InsertAtEnd) { 2256 Op<0>() = S1; 2257 Op<1>() = S2; 2258 setName(Name); 2259 AssertOK(); 2260 } 2261 2262 void BinaryOperator::AssertOK() { 2263 Value *LHS = getOperand(0), *RHS = getOperand(1); 2264 (void)LHS; (void)RHS; // Silence warnings. 2265 assert(LHS->getType() == RHS->getType() && 2266 "Binary operator operand types must match!"); 2267 #ifndef NDEBUG 2268 switch (getOpcode()) { 2269 case Add: case Sub: 2270 case Mul: 2271 assert(getType() == LHS->getType() && 2272 "Arithmetic operation should return same type as operands!"); 2273 assert(getType()->isIntOrIntVectorTy() && 2274 "Tried to create an integer operation on a non-integer type!"); 2275 break; 2276 case FAdd: case FSub: 2277 case FMul: 2278 assert(getType() == LHS->getType() && 2279 "Arithmetic operation should return same type as operands!"); 2280 assert(getType()->isFPOrFPVectorTy() && 2281 "Tried to create a floating-point operation on a " 2282 "non-floating-point type!"); 2283 break; 2284 case UDiv: 2285 case SDiv: 2286 assert(getType() == LHS->getType() && 2287 "Arithmetic operation should return same type as operands!"); 2288 assert(getType()->isIntOrIntVectorTy() && 2289 "Incorrect operand type (not integer) for S/UDIV"); 2290 break; 2291 case FDiv: 2292 assert(getType() == LHS->getType() && 2293 "Arithmetic operation should return same type as operands!"); 2294 assert(getType()->isFPOrFPVectorTy() && 2295 "Incorrect operand type (not floating point) for FDIV"); 2296 break; 2297 case URem: 2298 case SRem: 2299 assert(getType() == LHS->getType() && 2300 "Arithmetic operation should return same type as operands!"); 2301 assert(getType()->isIntOrIntVectorTy() && 2302 "Incorrect operand type (not integer) for S/UREM"); 2303 break; 2304 case FRem: 2305 assert(getType() == LHS->getType() && 2306 "Arithmetic operation should return same type as operands!"); 2307 assert(getType()->isFPOrFPVectorTy() && 2308 "Incorrect operand type (not floating point) for FREM"); 2309 break; 2310 case Shl: 2311 case LShr: 2312 case AShr: 2313 assert(getType() == LHS->getType() && 2314 "Shift operation should return same type as operands!"); 2315 assert(getType()->isIntOrIntVectorTy() && 2316 "Tried to create a shift operation on a non-integral type!"); 2317 break; 2318 case And: case Or: 2319 case Xor: 2320 assert(getType() == LHS->getType() && 2321 "Logical operation should return same type as operands!"); 2322 assert(getType()->isIntOrIntVectorTy() && 2323 "Tried to create a logical operation on a non-integral type!"); 2324 break; 2325 default: llvm_unreachable("Invalid opcode provided"); 2326 } 2327 #endif 2328 } 2329 2330 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2331 const Twine &Name, 2332 Instruction *InsertBefore) { 2333 assert(S1->getType() == S2->getType() && 2334 "Cannot create binary operator with two operands of differing type!"); 2335 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); 2336 } 2337 2338 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2339 const Twine &Name, 2340 BasicBlock *InsertAtEnd) { 2341 BinaryOperator *Res = Create(Op, S1, S2, Name); 2342 InsertAtEnd->getInstList().push_back(Res); 2343 return Res; 2344 } 2345 2346 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2347 Instruction *InsertBefore) { 2348 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2349 return new BinaryOperator(Instruction::Sub, 2350 zero, Op, 2351 Op->getType(), Name, InsertBefore); 2352 } 2353 2354 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2355 BasicBlock *InsertAtEnd) { 2356 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2357 return new BinaryOperator(Instruction::Sub, 2358 zero, Op, 2359 Op->getType(), Name, InsertAtEnd); 2360 } 2361 2362 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2363 Instruction *InsertBefore) { 2364 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2365 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); 2366 } 2367 2368 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2369 BasicBlock *InsertAtEnd) { 2370 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2371 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); 2372 } 2373 2374 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2375 Instruction *InsertBefore) { 2376 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2377 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); 2378 } 2379 2380 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2381 BasicBlock *InsertAtEnd) { 2382 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2383 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); 2384 } 2385 2386 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2387 Instruction *InsertBefore) { 2388 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2389 return new BinaryOperator(Instruction::FSub, zero, Op, 2390 Op->getType(), Name, InsertBefore); 2391 } 2392 2393 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2394 BasicBlock *InsertAtEnd) { 2395 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2396 return new BinaryOperator(Instruction::FSub, zero, Op, 2397 Op->getType(), Name, InsertAtEnd); 2398 } 2399 2400 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2401 Instruction *InsertBefore) { 2402 Constant *C = Constant::getAllOnesValue(Op->getType()); 2403 return new BinaryOperator(Instruction::Xor, Op, C, 2404 Op->getType(), Name, InsertBefore); 2405 } 2406 2407 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2408 BasicBlock *InsertAtEnd) { 2409 Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); 2410 return new BinaryOperator(Instruction::Xor, Op, AllOnes, 2411 Op->getType(), Name, InsertAtEnd); 2412 } 2413 2414 // Exchange the two operands to this instruction. This instruction is safe to 2415 // use on any binary instruction and does not modify the semantics of the 2416 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode 2417 // is changed. 2418 bool BinaryOperator::swapOperands() { 2419 if (!isCommutative()) 2420 return true; // Can't commute operands 2421 Op<0>().swap(Op<1>()); 2422 return false; 2423 } 2424 2425 //===----------------------------------------------------------------------===// 2426 // FPMathOperator Class 2427 //===----------------------------------------------------------------------===// 2428 2429 float FPMathOperator::getFPAccuracy() const { 2430 const MDNode *MD = 2431 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); 2432 if (!MD) 2433 return 0.0; 2434 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0)); 2435 return Accuracy->getValueAPF().convertToFloat(); 2436 } 2437 2438 //===----------------------------------------------------------------------===// 2439 // CastInst Class 2440 //===----------------------------------------------------------------------===// 2441 2442 // Just determine if this cast only deals with integral->integral conversion. 2443 bool CastInst::isIntegerCast() const { 2444 switch (getOpcode()) { 2445 default: return false; 2446 case Instruction::ZExt: 2447 case Instruction::SExt: 2448 case Instruction::Trunc: 2449 return true; 2450 case Instruction::BitCast: 2451 return getOperand(0)->getType()->isIntegerTy() && 2452 getType()->isIntegerTy(); 2453 } 2454 } 2455 2456 bool CastInst::isLosslessCast() const { 2457 // Only BitCast can be lossless, exit fast if we're not BitCast 2458 if (getOpcode() != Instruction::BitCast) 2459 return false; 2460 2461 // Identity cast is always lossless 2462 Type *SrcTy = getOperand(0)->getType(); 2463 Type *DstTy = getType(); 2464 if (SrcTy == DstTy) 2465 return true; 2466 2467 // Pointer to pointer is always lossless. 2468 if (SrcTy->isPointerTy()) 2469 return DstTy->isPointerTy(); 2470 return false; // Other types have no identity values 2471 } 2472 2473 /// This function determines if the CastInst does not require any bits to be 2474 /// changed in order to effect the cast. Essentially, it identifies cases where 2475 /// no code gen is necessary for the cast, hence the name no-op cast. For 2476 /// example, the following are all no-op casts: 2477 /// # bitcast i32* %x to i8* 2478 /// # bitcast <2 x i32> %x to <4 x i16> 2479 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2480 /// Determine if the described cast is a no-op. 2481 bool CastInst::isNoopCast(Instruction::CastOps Opcode, 2482 Type *SrcTy, 2483 Type *DestTy, 2484 const DataLayout &DL) { 2485 switch (Opcode) { 2486 default: llvm_unreachable("Invalid CastOp"); 2487 case Instruction::Trunc: 2488 case Instruction::ZExt: 2489 case Instruction::SExt: 2490 case Instruction::FPTrunc: 2491 case Instruction::FPExt: 2492 case Instruction::UIToFP: 2493 case Instruction::SIToFP: 2494 case Instruction::FPToUI: 2495 case Instruction::FPToSI: 2496 case Instruction::AddrSpaceCast: 2497 // TODO: Target informations may give a more accurate answer here. 2498 return false; 2499 case Instruction::BitCast: 2500 return true; // BitCast never modifies bits. 2501 case Instruction::PtrToInt: 2502 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() == 2503 DestTy->getScalarSizeInBits(); 2504 case Instruction::IntToPtr: 2505 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() == 2506 SrcTy->getScalarSizeInBits(); 2507 } 2508 } 2509 2510 bool CastInst::isNoopCast(const DataLayout &DL) const { 2511 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL); 2512 } 2513 2514 /// This function determines if a pair of casts can be eliminated and what 2515 /// opcode should be used in the elimination. This assumes that there are two 2516 /// instructions like this: 2517 /// * %F = firstOpcode SrcTy %x to MidTy 2518 /// * %S = secondOpcode MidTy %F to DstTy 2519 /// The function returns a resultOpcode so these two casts can be replaced with: 2520 /// * %Replacement = resultOpcode %SrcTy %x to DstTy 2521 /// If no such cast is permitted, the function returns 0. 2522 unsigned CastInst::isEliminableCastPair( 2523 Instruction::CastOps firstOp, Instruction::CastOps secondOp, 2524 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, 2525 Type *DstIntPtrTy) { 2526 // Define the 144 possibilities for these two cast instructions. The values 2527 // in this matrix determine what to do in a given situation and select the 2528 // case in the switch below. The rows correspond to firstOp, the columns 2529 // correspond to secondOp. In looking at the table below, keep in mind 2530 // the following cast properties: 2531 // 2532 // Size Compare Source Destination 2533 // Operator Src ? Size Type Sign Type Sign 2534 // -------- ------------ ------------------- --------------------- 2535 // TRUNC > Integer Any Integral Any 2536 // ZEXT < Integral Unsigned Integer Any 2537 // SEXT < Integral Signed Integer Any 2538 // FPTOUI n/a FloatPt n/a Integral Unsigned 2539 // FPTOSI n/a FloatPt n/a Integral Signed 2540 // UITOFP n/a Integral Unsigned FloatPt n/a 2541 // SITOFP n/a Integral Signed FloatPt n/a 2542 // FPTRUNC > FloatPt n/a FloatPt n/a 2543 // FPEXT < FloatPt n/a FloatPt n/a 2544 // PTRTOINT n/a Pointer n/a Integral Unsigned 2545 // INTTOPTR n/a Integral Unsigned Pointer n/a 2546 // BITCAST = FirstClass n/a FirstClass n/a 2547 // ADDRSPCST n/a Pointer n/a Pointer n/a 2548 // 2549 // NOTE: some transforms are safe, but we consider them to be non-profitable. 2550 // For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2551 // into "fptoui double to i64", but this loses information about the range 2552 // of the produced value (we no longer know the top-part is all zeros). 2553 // Further this conversion is often much more expensive for typical hardware, 2554 // and causes issues when building libgcc. We disallow fptosi+sext for the 2555 // same reason. 2556 const unsigned numCastOps = 2557 Instruction::CastOpsEnd - Instruction::CastOpsBegin; 2558 static const uint8_t CastResults[numCastOps][numCastOps] = { 2559 // T F F U S F F P I B A -+ 2560 // R Z S P P I I T P 2 N T S | 2561 // U E E 2 2 2 2 R E I T C C +- secondOp 2562 // N X X U S F F N X N 2 V V | 2563 // C T T I I P P C T T P T T -+ 2564 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+ 2565 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt | 2566 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt | 2567 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI | 2568 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI | 2569 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp 2570 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP | 2571 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc | 2572 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt | 2573 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt | 2574 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr | 2575 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast | 2576 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ 2577 }; 2578 2579 // TODO: This logic could be encoded into the table above and handled in the 2580 // switch below. 2581 // If either of the casts are a bitcast from scalar to vector, disallow the 2582 // merging. However, any pair of bitcasts are allowed. 2583 bool IsFirstBitcast = (firstOp == Instruction::BitCast); 2584 bool IsSecondBitcast = (secondOp == Instruction::BitCast); 2585 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast; 2586 2587 // Check if any of the casts convert scalars <-> vectors. 2588 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || 2589 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) 2590 if (!AreBothBitcasts) 2591 return 0; 2592 2593 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] 2594 [secondOp-Instruction::CastOpsBegin]; 2595 switch (ElimCase) { 2596 case 0: 2597 // Categorically disallowed. 2598 return 0; 2599 case 1: 2600 // Allowed, use first cast's opcode. 2601 return firstOp; 2602 case 2: 2603 // Allowed, use second cast's opcode. 2604 return secondOp; 2605 case 3: 2606 // No-op cast in second op implies firstOp as long as the DestTy 2607 // is integer and we are not converting between a vector and a 2608 // non-vector type. 2609 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) 2610 return firstOp; 2611 return 0; 2612 case 4: 2613 // No-op cast in second op implies firstOp as long as the DestTy 2614 // is floating point. 2615 if (DstTy->isFloatingPointTy()) 2616 return firstOp; 2617 return 0; 2618 case 5: 2619 // No-op cast in first op implies secondOp as long as the SrcTy 2620 // is an integer. 2621 if (SrcTy->isIntegerTy()) 2622 return secondOp; 2623 return 0; 2624 case 6: 2625 // No-op cast in first op implies secondOp as long as the SrcTy 2626 // is a floating point. 2627 if (SrcTy->isFloatingPointTy()) 2628 return secondOp; 2629 return 0; 2630 case 7: { 2631 // Cannot simplify if address spaces are different! 2632 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2633 return 0; 2634 2635 unsigned MidSize = MidTy->getScalarSizeInBits(); 2636 // We can still fold this without knowing the actual sizes as long we 2637 // know that the intermediate pointer is the largest possible 2638 // pointer size. 2639 // FIXME: Is this always true? 2640 if (MidSize == 64) 2641 return Instruction::BitCast; 2642 2643 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2644 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) 2645 return 0; 2646 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); 2647 if (MidSize >= PtrSize) 2648 return Instruction::BitCast; 2649 return 0; 2650 } 2651 case 8: { 2652 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size 2653 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2654 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2655 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2656 unsigned DstSize = DstTy->getScalarSizeInBits(); 2657 if (SrcSize == DstSize) 2658 return Instruction::BitCast; 2659 else if (SrcSize < DstSize) 2660 return firstOp; 2661 return secondOp; 2662 } 2663 case 9: 2664 // zext, sext -> zext, because sext can't sign extend after zext 2665 return Instruction::ZExt; 2666 case 11: { 2667 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2668 if (!MidIntPtrTy) 2669 return 0; 2670 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); 2671 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2672 unsigned DstSize = DstTy->getScalarSizeInBits(); 2673 if (SrcSize <= PtrSize && SrcSize == DstSize) 2674 return Instruction::BitCast; 2675 return 0; 2676 } 2677 case 12: 2678 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2679 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2680 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2681 return Instruction::AddrSpaceCast; 2682 return Instruction::BitCast; 2683 case 13: 2684 // FIXME: this state can be merged with (1), but the following assert 2685 // is useful to check the correcteness of the sequence due to semantic 2686 // change of bitcast. 2687 assert( 2688 SrcTy->isPtrOrPtrVectorTy() && 2689 MidTy->isPtrOrPtrVectorTy() && 2690 DstTy->isPtrOrPtrVectorTy() && 2691 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() && 2692 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2693 "Illegal addrspacecast, bitcast sequence!"); 2694 // Allowed, use first cast's opcode 2695 return firstOp; 2696 case 14: 2697 // bitcast, addrspacecast -> addrspacecast if the element type of 2698 // bitcast's source is the same as that of addrspacecast's destination. 2699 if (SrcTy->getScalarType()->getPointerElementType() == 2700 DstTy->getScalarType()->getPointerElementType()) 2701 return Instruction::AddrSpaceCast; 2702 return 0; 2703 case 15: 2704 // FIXME: this state can be merged with (1), but the following assert 2705 // is useful to check the correcteness of the sequence due to semantic 2706 // change of bitcast. 2707 assert( 2708 SrcTy->isIntOrIntVectorTy() && 2709 MidTy->isPtrOrPtrVectorTy() && 2710 DstTy->isPtrOrPtrVectorTy() && 2711 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2712 "Illegal inttoptr, bitcast sequence!"); 2713 // Allowed, use first cast's opcode 2714 return firstOp; 2715 case 16: 2716 // FIXME: this state can be merged with (2), but the following assert 2717 // is useful to check the correcteness of the sequence due to semantic 2718 // change of bitcast. 2719 assert( 2720 SrcTy->isPtrOrPtrVectorTy() && 2721 MidTy->isPtrOrPtrVectorTy() && 2722 DstTy->isIntOrIntVectorTy() && 2723 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() && 2724 "Illegal bitcast, ptrtoint sequence!"); 2725 // Allowed, use second cast's opcode 2726 return secondOp; 2727 case 17: 2728 // (sitofp (zext x)) -> (uitofp x) 2729 return Instruction::UIToFP; 2730 case 99: 2731 // Cast combination can't happen (error in input). This is for all cases 2732 // where the MidTy is not the same for the two cast instructions. 2733 llvm_unreachable("Invalid Cast Combination"); 2734 default: 2735 llvm_unreachable("Error in CastResults table!!!"); 2736 } 2737 } 2738 2739 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2740 const Twine &Name, Instruction *InsertBefore) { 2741 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2742 // Construct and return the appropriate CastInst subclass 2743 switch (op) { 2744 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); 2745 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); 2746 case SExt: return new SExtInst (S, Ty, Name, InsertBefore); 2747 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); 2748 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); 2749 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); 2750 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); 2751 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); 2752 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); 2753 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); 2754 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); 2755 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); 2756 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore); 2757 default: llvm_unreachable("Invalid opcode provided"); 2758 } 2759 } 2760 2761 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2762 const Twine &Name, BasicBlock *InsertAtEnd) { 2763 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2764 // Construct and return the appropriate CastInst subclass 2765 switch (op) { 2766 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); 2767 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); 2768 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); 2769 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); 2770 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); 2771 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); 2772 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); 2773 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); 2774 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); 2775 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); 2776 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); 2777 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); 2778 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd); 2779 default: llvm_unreachable("Invalid opcode provided"); 2780 } 2781 } 2782 2783 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2784 const Twine &Name, 2785 Instruction *InsertBefore) { 2786 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2787 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2788 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); 2789 } 2790 2791 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2792 const Twine &Name, 2793 BasicBlock *InsertAtEnd) { 2794 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2795 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2796 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); 2797 } 2798 2799 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2800 const Twine &Name, 2801 Instruction *InsertBefore) { 2802 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2803 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2804 return Create(Instruction::SExt, S, Ty, Name, InsertBefore); 2805 } 2806 2807 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2808 const Twine &Name, 2809 BasicBlock *InsertAtEnd) { 2810 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2811 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2812 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); 2813 } 2814 2815 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2816 const Twine &Name, 2817 Instruction *InsertBefore) { 2818 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2819 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2820 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); 2821 } 2822 2823 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2824 const Twine &Name, 2825 BasicBlock *InsertAtEnd) { 2826 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2827 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2828 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); 2829 } 2830 2831 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2832 const Twine &Name, 2833 BasicBlock *InsertAtEnd) { 2834 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2835 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2836 "Invalid cast"); 2837 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2838 assert((!Ty->isVectorTy() || 2839 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2840 "Invalid cast"); 2841 2842 if (Ty->isIntOrIntVectorTy()) 2843 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); 2844 2845 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd); 2846 } 2847 2848 /// Create a BitCast or a PtrToInt cast instruction 2849 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2850 const Twine &Name, 2851 Instruction *InsertBefore) { 2852 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2853 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2854 "Invalid cast"); 2855 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2856 assert((!Ty->isVectorTy() || 2857 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2858 "Invalid cast"); 2859 2860 if (Ty->isIntOrIntVectorTy()) 2861 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2862 2863 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore); 2864 } 2865 2866 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2867 Value *S, Type *Ty, 2868 const Twine &Name, 2869 BasicBlock *InsertAtEnd) { 2870 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2871 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2872 2873 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2874 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd); 2875 2876 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2877 } 2878 2879 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2880 Value *S, Type *Ty, 2881 const Twine &Name, 2882 Instruction *InsertBefore) { 2883 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2884 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2885 2886 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2887 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore); 2888 2889 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2890 } 2891 2892 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty, 2893 const Twine &Name, 2894 Instruction *InsertBefore) { 2895 if (S->getType()->isPointerTy() && Ty->isIntegerTy()) 2896 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2897 if (S->getType()->isIntegerTy() && Ty->isPointerTy()) 2898 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore); 2899 2900 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2901 } 2902 2903 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2904 bool isSigned, const Twine &Name, 2905 Instruction *InsertBefore) { 2906 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2907 "Invalid integer cast"); 2908 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2909 unsigned DstBits = Ty->getScalarSizeInBits(); 2910 Instruction::CastOps opcode = 2911 (SrcBits == DstBits ? Instruction::BitCast : 2912 (SrcBits > DstBits ? Instruction::Trunc : 2913 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2914 return Create(opcode, C, Ty, Name, InsertBefore); 2915 } 2916 2917 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2918 bool isSigned, const Twine &Name, 2919 BasicBlock *InsertAtEnd) { 2920 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2921 "Invalid cast"); 2922 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2923 unsigned DstBits = Ty->getScalarSizeInBits(); 2924 Instruction::CastOps opcode = 2925 (SrcBits == DstBits ? Instruction::BitCast : 2926 (SrcBits > DstBits ? Instruction::Trunc : 2927 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2928 return Create(opcode, C, Ty, Name, InsertAtEnd); 2929 } 2930 2931 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2932 const Twine &Name, 2933 Instruction *InsertBefore) { 2934 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2935 "Invalid cast"); 2936 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2937 unsigned DstBits = Ty->getScalarSizeInBits(); 2938 Instruction::CastOps opcode = 2939 (SrcBits == DstBits ? Instruction::BitCast : 2940 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2941 return Create(opcode, C, Ty, Name, InsertBefore); 2942 } 2943 2944 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2945 const Twine &Name, 2946 BasicBlock *InsertAtEnd) { 2947 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2948 "Invalid cast"); 2949 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2950 unsigned DstBits = Ty->getScalarSizeInBits(); 2951 Instruction::CastOps opcode = 2952 (SrcBits == DstBits ? Instruction::BitCast : 2953 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2954 return Create(opcode, C, Ty, Name, InsertAtEnd); 2955 } 2956 2957 // Check whether it is valid to call getCastOpcode for these types. 2958 // This routine must be kept in sync with getCastOpcode. 2959 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { 2960 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2961 return false; 2962 2963 if (SrcTy == DestTy) 2964 return true; 2965 2966 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2967 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2968 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2969 // An element by element cast. Valid if casting the elements is valid. 2970 SrcTy = SrcVecTy->getElementType(); 2971 DestTy = DestVecTy->getElementType(); 2972 } 2973 2974 // Get the bit sizes, we'll need these 2975 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2976 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2977 2978 // Run through the possibilities ... 2979 if (DestTy->isIntegerTy()) { // Casting to integral 2980 if (SrcTy->isIntegerTy()) // Casting from integral 2981 return true; 2982 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2983 return true; 2984 if (SrcTy->isVectorTy()) // Casting from vector 2985 return DestBits == SrcBits; 2986 // Casting from something else 2987 return SrcTy->isPointerTy(); 2988 } 2989 if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2990 if (SrcTy->isIntegerTy()) // Casting from integral 2991 return true; 2992 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2993 return true; 2994 if (SrcTy->isVectorTy()) // Casting from vector 2995 return DestBits == SrcBits; 2996 // Casting from something else 2997 return false; 2998 } 2999 if (DestTy->isVectorTy()) // Casting to vector 3000 return DestBits == SrcBits; 3001 if (DestTy->isPointerTy()) { // Casting to pointer 3002 if (SrcTy->isPointerTy()) // Casting from pointer 3003 return true; 3004 return SrcTy->isIntegerTy(); // Casting from integral 3005 } 3006 if (DestTy->isX86_MMXTy()) { 3007 if (SrcTy->isVectorTy()) 3008 return DestBits == SrcBits; // 64-bit vector to MMX 3009 return false; 3010 } // Casting to something else 3011 return false; 3012 } 3013 3014 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) { 3015 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 3016 return false; 3017 3018 if (SrcTy == DestTy) 3019 return true; 3020 3021 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3022 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) { 3023 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) { 3024 // An element by element cast. Valid if casting the elements is valid. 3025 SrcTy = SrcVecTy->getElementType(); 3026 DestTy = DestVecTy->getElementType(); 3027 } 3028 } 3029 } 3030 3031 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) { 3032 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) { 3033 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); 3034 } 3035 } 3036 3037 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3038 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3039 3040 // Could still have vectors of pointers if the number of elements doesn't 3041 // match 3042 if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0) 3043 return false; 3044 3045 if (SrcBits != DestBits) 3046 return false; 3047 3048 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy()) 3049 return false; 3050 3051 return true; 3052 } 3053 3054 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, 3055 const DataLayout &DL) { 3056 // ptrtoint and inttoptr are not allowed on non-integral pointers 3057 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy)) 3058 if (auto *IntTy = dyn_cast<IntegerType>(DestTy)) 3059 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 3060 !DL.isNonIntegralPointerType(PtrTy)); 3061 if (auto *PtrTy = dyn_cast<PointerType>(DestTy)) 3062 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy)) 3063 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 3064 !DL.isNonIntegralPointerType(PtrTy)); 3065 3066 return isBitCastable(SrcTy, DestTy); 3067 } 3068 3069 // Provide a way to get a "cast" where the cast opcode is inferred from the 3070 // types and size of the operand. This, basically, is a parallel of the 3071 // logic in the castIsValid function below. This axiom should hold: 3072 // castIsValid( getCastOpcode(Val, Ty), Val, Ty) 3073 // should not assert in castIsValid. In other words, this produces a "correct" 3074 // casting opcode for the arguments passed to it. 3075 // This routine must be kept in sync with isCastable. 3076 Instruction::CastOps 3077 CastInst::getCastOpcode( 3078 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { 3079 Type *SrcTy = Src->getType(); 3080 3081 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && 3082 "Only first class types are castable!"); 3083 3084 if (SrcTy == DestTy) 3085 return BitCast; 3086 3087 // FIXME: Check address space sizes here 3088 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 3089 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 3090 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 3091 // An element by element cast. Find the appropriate opcode based on the 3092 // element types. 3093 SrcTy = SrcVecTy->getElementType(); 3094 DestTy = DestVecTy->getElementType(); 3095 } 3096 3097 // Get the bit sizes, we'll need these 3098 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3099 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3100 3101 // Run through the possibilities ... 3102 if (DestTy->isIntegerTy()) { // Casting to integral 3103 if (SrcTy->isIntegerTy()) { // Casting from integral 3104 if (DestBits < SrcBits) 3105 return Trunc; // int -> smaller int 3106 else if (DestBits > SrcBits) { // its an extension 3107 if (SrcIsSigned) 3108 return SExt; // signed -> SEXT 3109 else 3110 return ZExt; // unsigned -> ZEXT 3111 } else { 3112 return BitCast; // Same size, No-op cast 3113 } 3114 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 3115 if (DestIsSigned) 3116 return FPToSI; // FP -> sint 3117 else 3118 return FPToUI; // FP -> uint 3119 } else if (SrcTy->isVectorTy()) { 3120 assert(DestBits == SrcBits && 3121 "Casting vector to integer of different width"); 3122 return BitCast; // Same size, no-op cast 3123 } else { 3124 assert(SrcTy->isPointerTy() && 3125 "Casting from a value that is not first-class type"); 3126 return PtrToInt; // ptr -> int 3127 } 3128 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt 3129 if (SrcTy->isIntegerTy()) { // Casting from integral 3130 if (SrcIsSigned) 3131 return SIToFP; // sint -> FP 3132 else 3133 return UIToFP; // uint -> FP 3134 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 3135 if (DestBits < SrcBits) { 3136 return FPTrunc; // FP -> smaller FP 3137 } else if (DestBits > SrcBits) { 3138 return FPExt; // FP -> larger FP 3139 } else { 3140 return BitCast; // same size, no-op cast 3141 } 3142 } else if (SrcTy->isVectorTy()) { 3143 assert(DestBits == SrcBits && 3144 "Casting vector to floating point of different width"); 3145 return BitCast; // same size, no-op cast 3146 } 3147 llvm_unreachable("Casting pointer or non-first class to float"); 3148 } else if (DestTy->isVectorTy()) { 3149 assert(DestBits == SrcBits && 3150 "Illegal cast to vector (wrong type or size)"); 3151 return BitCast; 3152 } else if (DestTy->isPointerTy()) { 3153 if (SrcTy->isPointerTy()) { 3154 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace()) 3155 return AddrSpaceCast; 3156 return BitCast; // ptr -> ptr 3157 } else if (SrcTy->isIntegerTy()) { 3158 return IntToPtr; // int -> ptr 3159 } 3160 llvm_unreachable("Casting pointer to other than pointer or int"); 3161 } else if (DestTy->isX86_MMXTy()) { 3162 if (SrcTy->isVectorTy()) { 3163 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); 3164 return BitCast; // 64-bit vector to MMX 3165 } 3166 llvm_unreachable("Illegal cast to X86_MMX"); 3167 } 3168 llvm_unreachable("Casting to type that is not first-class"); 3169 } 3170 3171 //===----------------------------------------------------------------------===// 3172 // CastInst SubClass Constructors 3173 //===----------------------------------------------------------------------===// 3174 3175 /// Check that the construction parameters for a CastInst are correct. This 3176 /// could be broken out into the separate constructors but it is useful to have 3177 /// it in one place and to eliminate the redundant code for getting the sizes 3178 /// of the types involved. 3179 bool 3180 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { 3181 // Check for type sanity on the arguments 3182 Type *SrcTy = S->getType(); 3183 3184 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || 3185 SrcTy->isAggregateType() || DstTy->isAggregateType()) 3186 return false; 3187 3188 // Get the size of the types in bits, we'll need this later 3189 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 3190 unsigned DstBitSize = DstTy->getScalarSizeInBits(); 3191 3192 // If these are vector types, get the lengths of the vectors (using zero for 3193 // scalar types means that checking that vector lengths match also checks that 3194 // scalars are not being converted to vectors or vectors to scalars). 3195 unsigned SrcLength = SrcTy->isVectorTy() ? 3196 cast<VectorType>(SrcTy)->getNumElements() : 0; 3197 unsigned DstLength = DstTy->isVectorTy() ? 3198 cast<VectorType>(DstTy)->getNumElements() : 0; 3199 3200 // Switch on the opcode provided 3201 switch (op) { 3202 default: return false; // This is an input error 3203 case Instruction::Trunc: 3204 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3205 SrcLength == DstLength && SrcBitSize > DstBitSize; 3206 case Instruction::ZExt: 3207 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3208 SrcLength == DstLength && SrcBitSize < DstBitSize; 3209 case Instruction::SExt: 3210 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3211 SrcLength == DstLength && SrcBitSize < DstBitSize; 3212 case Instruction::FPTrunc: 3213 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3214 SrcLength == DstLength && SrcBitSize > DstBitSize; 3215 case Instruction::FPExt: 3216 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3217 SrcLength == DstLength && SrcBitSize < DstBitSize; 3218 case Instruction::UIToFP: 3219 case Instruction::SIToFP: 3220 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && 3221 SrcLength == DstLength; 3222 case Instruction::FPToUI: 3223 case Instruction::FPToSI: 3224 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && 3225 SrcLength == DstLength; 3226 case Instruction::PtrToInt: 3227 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3228 return false; 3229 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3230 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3231 return false; 3232 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy(); 3233 case Instruction::IntToPtr: 3234 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3235 return false; 3236 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3237 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3238 return false; 3239 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy(); 3240 case Instruction::BitCast: { 3241 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3242 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3243 3244 // BitCast implies a no-op cast of type only. No bits change. 3245 // However, you can't cast pointers to anything but pointers. 3246 if (!SrcPtrTy != !DstPtrTy) 3247 return false; 3248 3249 // For non-pointer cases, the cast is okay if the source and destination bit 3250 // widths are identical. 3251 if (!SrcPtrTy) 3252 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); 3253 3254 // If both are pointers then the address spaces must match. 3255 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) 3256 return false; 3257 3258 // A vector of pointers must have the same number of elements. 3259 VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy); 3260 VectorType *DstVecTy = dyn_cast<VectorType>(DstTy); 3261 if (SrcVecTy && DstVecTy) 3262 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3263 if (SrcVecTy) 3264 return SrcVecTy->getNumElements() == 1; 3265 if (DstVecTy) 3266 return DstVecTy->getNumElements() == 1; 3267 3268 return true; 3269 } 3270 case Instruction::AddrSpaceCast: { 3271 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3272 if (!SrcPtrTy) 3273 return false; 3274 3275 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3276 if (!DstPtrTy) 3277 return false; 3278 3279 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) 3280 return false; 3281 3282 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3283 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 3284 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3285 3286 return false; 3287 } 3288 3289 return true; 3290 } 3291 } 3292 } 3293 3294 TruncInst::TruncInst( 3295 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3296 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) { 3297 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3298 } 3299 3300 TruncInst::TruncInst( 3301 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3302 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { 3303 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3304 } 3305 3306 ZExtInst::ZExtInst( 3307 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3308 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) { 3309 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3310 } 3311 3312 ZExtInst::ZExtInst( 3313 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3314 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { 3315 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3316 } 3317 SExtInst::SExtInst( 3318 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3319 ) : CastInst(Ty, SExt, S, Name, InsertBefore) { 3320 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3321 } 3322 3323 SExtInst::SExtInst( 3324 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3325 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { 3326 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3327 } 3328 3329 FPTruncInst::FPTruncInst( 3330 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3331 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { 3332 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3333 } 3334 3335 FPTruncInst::FPTruncInst( 3336 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3337 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { 3338 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3339 } 3340 3341 FPExtInst::FPExtInst( 3342 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3343 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) { 3344 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3345 } 3346 3347 FPExtInst::FPExtInst( 3348 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3349 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { 3350 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3351 } 3352 3353 UIToFPInst::UIToFPInst( 3354 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3355 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { 3356 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3357 } 3358 3359 UIToFPInst::UIToFPInst( 3360 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3361 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { 3362 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3363 } 3364 3365 SIToFPInst::SIToFPInst( 3366 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3367 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { 3368 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3369 } 3370 3371 SIToFPInst::SIToFPInst( 3372 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3373 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { 3374 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3375 } 3376 3377 FPToUIInst::FPToUIInst( 3378 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3379 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { 3380 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3381 } 3382 3383 FPToUIInst::FPToUIInst( 3384 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3385 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { 3386 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3387 } 3388 3389 FPToSIInst::FPToSIInst( 3390 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3391 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { 3392 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3393 } 3394 3395 FPToSIInst::FPToSIInst( 3396 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3397 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { 3398 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3399 } 3400 3401 PtrToIntInst::PtrToIntInst( 3402 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3403 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { 3404 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3405 } 3406 3407 PtrToIntInst::PtrToIntInst( 3408 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3409 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { 3410 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3411 } 3412 3413 IntToPtrInst::IntToPtrInst( 3414 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3415 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { 3416 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3417 } 3418 3419 IntToPtrInst::IntToPtrInst( 3420 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3421 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { 3422 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3423 } 3424 3425 BitCastInst::BitCastInst( 3426 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3427 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) { 3428 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3429 } 3430 3431 BitCastInst::BitCastInst( 3432 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3433 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { 3434 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3435 } 3436 3437 AddrSpaceCastInst::AddrSpaceCastInst( 3438 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3439 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { 3440 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3441 } 3442 3443 AddrSpaceCastInst::AddrSpaceCastInst( 3444 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3445 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) { 3446 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3447 } 3448 3449 //===----------------------------------------------------------------------===// 3450 // CmpInst Classes 3451 //===----------------------------------------------------------------------===// 3452 3453 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3454 Value *RHS, const Twine &Name, Instruction *InsertBefore, 3455 Instruction *FlagsSource) 3456 : Instruction(ty, op, 3457 OperandTraits<CmpInst>::op_begin(this), 3458 OperandTraits<CmpInst>::operands(this), 3459 InsertBefore) { 3460 Op<0>() = LHS; 3461 Op<1>() = RHS; 3462 setPredicate((Predicate)predicate); 3463 setName(Name); 3464 if (FlagsSource) 3465 copyIRFlags(FlagsSource); 3466 } 3467 3468 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3469 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd) 3470 : Instruction(ty, op, 3471 OperandTraits<CmpInst>::op_begin(this), 3472 OperandTraits<CmpInst>::operands(this), 3473 InsertAtEnd) { 3474 Op<0>() = LHS; 3475 Op<1>() = RHS; 3476 setPredicate((Predicate)predicate); 3477 setName(Name); 3478 } 3479 3480 CmpInst * 3481 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3482 const Twine &Name, Instruction *InsertBefore) { 3483 if (Op == Instruction::ICmp) { 3484 if (InsertBefore) 3485 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), 3486 S1, S2, Name); 3487 else 3488 return new ICmpInst(CmpInst::Predicate(predicate), 3489 S1, S2, Name); 3490 } 3491 3492 if (InsertBefore) 3493 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), 3494 S1, S2, Name); 3495 else 3496 return new FCmpInst(CmpInst::Predicate(predicate), 3497 S1, S2, Name); 3498 } 3499 3500 CmpInst * 3501 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3502 const Twine &Name, BasicBlock *InsertAtEnd) { 3503 if (Op == Instruction::ICmp) { 3504 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3505 S1, S2, Name); 3506 } 3507 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3508 S1, S2, Name); 3509 } 3510 3511 void CmpInst::swapOperands() { 3512 if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3513 IC->swapOperands(); 3514 else 3515 cast<FCmpInst>(this)->swapOperands(); 3516 } 3517 3518 bool CmpInst::isCommutative() const { 3519 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3520 return IC->isCommutative(); 3521 return cast<FCmpInst>(this)->isCommutative(); 3522 } 3523 3524 bool CmpInst::isEquality() const { 3525 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3526 return IC->isEquality(); 3527 return cast<FCmpInst>(this)->isEquality(); 3528 } 3529 3530 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { 3531 switch (pred) { 3532 default: llvm_unreachable("Unknown cmp predicate!"); 3533 case ICMP_EQ: return ICMP_NE; 3534 case ICMP_NE: return ICMP_EQ; 3535 case ICMP_UGT: return ICMP_ULE; 3536 case ICMP_ULT: return ICMP_UGE; 3537 case ICMP_UGE: return ICMP_ULT; 3538 case ICMP_ULE: return ICMP_UGT; 3539 case ICMP_SGT: return ICMP_SLE; 3540 case ICMP_SLT: return ICMP_SGE; 3541 case ICMP_SGE: return ICMP_SLT; 3542 case ICMP_SLE: return ICMP_SGT; 3543 3544 case FCMP_OEQ: return FCMP_UNE; 3545 case FCMP_ONE: return FCMP_UEQ; 3546 case FCMP_OGT: return FCMP_ULE; 3547 case FCMP_OLT: return FCMP_UGE; 3548 case FCMP_OGE: return FCMP_ULT; 3549 case FCMP_OLE: return FCMP_UGT; 3550 case FCMP_UEQ: return FCMP_ONE; 3551 case FCMP_UNE: return FCMP_OEQ; 3552 case FCMP_UGT: return FCMP_OLE; 3553 case FCMP_ULT: return FCMP_OGE; 3554 case FCMP_UGE: return FCMP_OLT; 3555 case FCMP_ULE: return FCMP_OGT; 3556 case FCMP_ORD: return FCMP_UNO; 3557 case FCMP_UNO: return FCMP_ORD; 3558 case FCMP_TRUE: return FCMP_FALSE; 3559 case FCMP_FALSE: return FCMP_TRUE; 3560 } 3561 } 3562 3563 StringRef CmpInst::getPredicateName(Predicate Pred) { 3564 switch (Pred) { 3565 default: return "unknown"; 3566 case FCmpInst::FCMP_FALSE: return "false"; 3567 case FCmpInst::FCMP_OEQ: return "oeq"; 3568 case FCmpInst::FCMP_OGT: return "ogt"; 3569 case FCmpInst::FCMP_OGE: return "oge"; 3570 case FCmpInst::FCMP_OLT: return "olt"; 3571 case FCmpInst::FCMP_OLE: return "ole"; 3572 case FCmpInst::FCMP_ONE: return "one"; 3573 case FCmpInst::FCMP_ORD: return "ord"; 3574 case FCmpInst::FCMP_UNO: return "uno"; 3575 case FCmpInst::FCMP_UEQ: return "ueq"; 3576 case FCmpInst::FCMP_UGT: return "ugt"; 3577 case FCmpInst::FCMP_UGE: return "uge"; 3578 case FCmpInst::FCMP_ULT: return "ult"; 3579 case FCmpInst::FCMP_ULE: return "ule"; 3580 case FCmpInst::FCMP_UNE: return "une"; 3581 case FCmpInst::FCMP_TRUE: return "true"; 3582 case ICmpInst::ICMP_EQ: return "eq"; 3583 case ICmpInst::ICMP_NE: return "ne"; 3584 case ICmpInst::ICMP_SGT: return "sgt"; 3585 case ICmpInst::ICMP_SGE: return "sge"; 3586 case ICmpInst::ICMP_SLT: return "slt"; 3587 case ICmpInst::ICMP_SLE: return "sle"; 3588 case ICmpInst::ICMP_UGT: return "ugt"; 3589 case ICmpInst::ICMP_UGE: return "uge"; 3590 case ICmpInst::ICMP_ULT: return "ult"; 3591 case ICmpInst::ICMP_ULE: return "ule"; 3592 } 3593 } 3594 3595 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { 3596 switch (pred) { 3597 default: llvm_unreachable("Unknown icmp predicate!"); 3598 case ICMP_EQ: case ICMP_NE: 3599 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: 3600 return pred; 3601 case ICMP_UGT: return ICMP_SGT; 3602 case ICMP_ULT: return ICMP_SLT; 3603 case ICMP_UGE: return ICMP_SGE; 3604 case ICMP_ULE: return ICMP_SLE; 3605 } 3606 } 3607 3608 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { 3609 switch (pred) { 3610 default: llvm_unreachable("Unknown icmp predicate!"); 3611 case ICMP_EQ: case ICMP_NE: 3612 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: 3613 return pred; 3614 case ICMP_SGT: return ICMP_UGT; 3615 case ICMP_SLT: return ICMP_ULT; 3616 case ICMP_SGE: return ICMP_UGE; 3617 case ICMP_SLE: return ICMP_ULE; 3618 } 3619 } 3620 3621 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) { 3622 switch (pred) { 3623 default: llvm_unreachable("Unknown or unsupported cmp predicate!"); 3624 case ICMP_SGT: return ICMP_SGE; 3625 case ICMP_SLT: return ICMP_SLE; 3626 case ICMP_SGE: return ICMP_SGT; 3627 case ICMP_SLE: return ICMP_SLT; 3628 case ICMP_UGT: return ICMP_UGE; 3629 case ICMP_ULT: return ICMP_ULE; 3630 case ICMP_UGE: return ICMP_UGT; 3631 case ICMP_ULE: return ICMP_ULT; 3632 3633 case FCMP_OGT: return FCMP_OGE; 3634 case FCMP_OLT: return FCMP_OLE; 3635 case FCMP_OGE: return FCMP_OGT; 3636 case FCMP_OLE: return FCMP_OLT; 3637 case FCMP_UGT: return FCMP_UGE; 3638 case FCMP_ULT: return FCMP_ULE; 3639 case FCMP_UGE: return FCMP_UGT; 3640 case FCMP_ULE: return FCMP_ULT; 3641 } 3642 } 3643 3644 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { 3645 switch (pred) { 3646 default: llvm_unreachable("Unknown cmp predicate!"); 3647 case ICMP_EQ: case ICMP_NE: 3648 return pred; 3649 case ICMP_SGT: return ICMP_SLT; 3650 case ICMP_SLT: return ICMP_SGT; 3651 case ICMP_SGE: return ICMP_SLE; 3652 case ICMP_SLE: return ICMP_SGE; 3653 case ICMP_UGT: return ICMP_ULT; 3654 case ICMP_ULT: return ICMP_UGT; 3655 case ICMP_UGE: return ICMP_ULE; 3656 case ICMP_ULE: return ICMP_UGE; 3657 3658 case FCMP_FALSE: case FCMP_TRUE: 3659 case FCMP_OEQ: case FCMP_ONE: 3660 case FCMP_UEQ: case FCMP_UNE: 3661 case FCMP_ORD: case FCMP_UNO: 3662 return pred; 3663 case FCMP_OGT: return FCMP_OLT; 3664 case FCMP_OLT: return FCMP_OGT; 3665 case FCMP_OGE: return FCMP_OLE; 3666 case FCMP_OLE: return FCMP_OGE; 3667 case FCMP_UGT: return FCMP_ULT; 3668 case FCMP_ULT: return FCMP_UGT; 3669 case FCMP_UGE: return FCMP_ULE; 3670 case FCMP_ULE: return FCMP_UGE; 3671 } 3672 } 3673 3674 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) { 3675 switch (pred) { 3676 case ICMP_SGT: return ICMP_SGE; 3677 case ICMP_SLT: return ICMP_SLE; 3678 case ICMP_UGT: return ICMP_UGE; 3679 case ICMP_ULT: return ICMP_ULE; 3680 case FCMP_OGT: return FCMP_OGE; 3681 case FCMP_OLT: return FCMP_OLE; 3682 case FCMP_UGT: return FCMP_UGE; 3683 case FCMP_ULT: return FCMP_ULE; 3684 default: return pred; 3685 } 3686 } 3687 3688 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) { 3689 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!"); 3690 3691 switch (pred) { 3692 default: 3693 llvm_unreachable("Unknown predicate!"); 3694 case CmpInst::ICMP_ULT: 3695 return CmpInst::ICMP_SLT; 3696 case CmpInst::ICMP_ULE: 3697 return CmpInst::ICMP_SLE; 3698 case CmpInst::ICMP_UGT: 3699 return CmpInst::ICMP_SGT; 3700 case CmpInst::ICMP_UGE: 3701 return CmpInst::ICMP_SGE; 3702 } 3703 } 3704 3705 bool CmpInst::isUnsigned(Predicate predicate) { 3706 switch (predicate) { 3707 default: return false; 3708 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: 3709 case ICmpInst::ICMP_UGE: return true; 3710 } 3711 } 3712 3713 bool CmpInst::isSigned(Predicate predicate) { 3714 switch (predicate) { 3715 default: return false; 3716 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: 3717 case ICmpInst::ICMP_SGE: return true; 3718 } 3719 } 3720 3721 bool CmpInst::isOrdered(Predicate predicate) { 3722 switch (predicate) { 3723 default: return false; 3724 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: 3725 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: 3726 case FCmpInst::FCMP_ORD: return true; 3727 } 3728 } 3729 3730 bool CmpInst::isUnordered(Predicate predicate) { 3731 switch (predicate) { 3732 default: return false; 3733 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: 3734 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: 3735 case FCmpInst::FCMP_UNO: return true; 3736 } 3737 } 3738 3739 bool CmpInst::isTrueWhenEqual(Predicate predicate) { 3740 switch(predicate) { 3741 default: return false; 3742 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: 3743 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; 3744 } 3745 } 3746 3747 bool CmpInst::isFalseWhenEqual(Predicate predicate) { 3748 switch(predicate) { 3749 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: 3750 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; 3751 default: return false; 3752 } 3753 } 3754 3755 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3756 // If the predicates match, then we know the first condition implies the 3757 // second is true. 3758 if (Pred1 == Pred2) 3759 return true; 3760 3761 switch (Pred1) { 3762 default: 3763 break; 3764 case ICMP_EQ: 3765 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true. 3766 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE || 3767 Pred2 == ICMP_SLE; 3768 case ICMP_UGT: // A >u B implies A != B and A >=u B are true. 3769 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE; 3770 case ICMP_ULT: // A <u B implies A != B and A <=u B are true. 3771 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE; 3772 case ICMP_SGT: // A >s B implies A != B and A >=s B are true. 3773 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE; 3774 case ICMP_SLT: // A <s B implies A != B and A <=s B are true. 3775 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE; 3776 } 3777 return false; 3778 } 3779 3780 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3781 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); 3782 } 3783 3784 //===----------------------------------------------------------------------===// 3785 // SwitchInst Implementation 3786 //===----------------------------------------------------------------------===// 3787 3788 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { 3789 assert(Value && Default && NumReserved); 3790 ReservedSpace = NumReserved; 3791 setNumHungOffUseOperands(2); 3792 allocHungoffUses(ReservedSpace); 3793 3794 Op<0>() = Value; 3795 Op<1>() = Default; 3796 } 3797 3798 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3799 /// switch on and a default destination. The number of additional cases can 3800 /// be specified here to make memory allocation more efficient. This 3801 /// constructor can also autoinsert before another instruction. 3802 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3803 Instruction *InsertBefore) 3804 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3805 nullptr, 0, InsertBefore) { 3806 init(Value, Default, 2+NumCases*2); 3807 } 3808 3809 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3810 /// switch on and a default destination. The number of additional cases can 3811 /// be specified here to make memory allocation more efficient. This 3812 /// constructor also autoinserts at the end of the specified BasicBlock. 3813 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3814 BasicBlock *InsertAtEnd) 3815 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3816 nullptr, 0, InsertAtEnd) { 3817 init(Value, Default, 2+NumCases*2); 3818 } 3819 3820 SwitchInst::SwitchInst(const SwitchInst &SI) 3821 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) { 3822 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); 3823 setNumHungOffUseOperands(SI.getNumOperands()); 3824 Use *OL = getOperandList(); 3825 const Use *InOL = SI.getOperandList(); 3826 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { 3827 OL[i] = InOL[i]; 3828 OL[i+1] = InOL[i+1]; 3829 } 3830 SubclassOptionalData = SI.SubclassOptionalData; 3831 } 3832 3833 /// addCase - Add an entry to the switch instruction... 3834 /// 3835 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { 3836 unsigned NewCaseIdx = getNumCases(); 3837 unsigned OpNo = getNumOperands(); 3838 if (OpNo+2 > ReservedSpace) 3839 growOperands(); // Get more space! 3840 // Initialize some new operands. 3841 assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); 3842 setNumHungOffUseOperands(OpNo+2); 3843 CaseHandle Case(this, NewCaseIdx); 3844 Case.setValue(OnVal); 3845 Case.setSuccessor(Dest); 3846 } 3847 3848 /// removeCase - This method removes the specified case and its successor 3849 /// from the switch instruction. 3850 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) { 3851 unsigned idx = I->getCaseIndex(); 3852 3853 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); 3854 3855 unsigned NumOps = getNumOperands(); 3856 Use *OL = getOperandList(); 3857 3858 // Overwrite this case with the end of the list. 3859 if (2 + (idx + 1) * 2 != NumOps) { 3860 OL[2 + idx * 2] = OL[NumOps - 2]; 3861 OL[2 + idx * 2 + 1] = OL[NumOps - 1]; 3862 } 3863 3864 // Nuke the last value. 3865 OL[NumOps-2].set(nullptr); 3866 OL[NumOps-2+1].set(nullptr); 3867 setNumHungOffUseOperands(NumOps-2); 3868 3869 return CaseIt(this, idx); 3870 } 3871 3872 /// growOperands - grow operands - This grows the operand list in response 3873 /// to a push_back style of operation. This grows the number of ops by 3 times. 3874 /// 3875 void SwitchInst::growOperands() { 3876 unsigned e = getNumOperands(); 3877 unsigned NumOps = e*3; 3878 3879 ReservedSpace = NumOps; 3880 growHungoffUses(ReservedSpace); 3881 } 3882 3883 MDNode * 3884 SwitchInstProfUpdateWrapper::getProfBranchWeightsMD(const SwitchInst &SI) { 3885 if (MDNode *ProfileData = SI.getMetadata(LLVMContext::MD_prof)) 3886 if (auto *MDName = dyn_cast<MDString>(ProfileData->getOperand(0))) 3887 if (MDName->getString() == "branch_weights") 3888 return ProfileData; 3889 return nullptr; 3890 } 3891 3892 MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() { 3893 assert(Changed && "called only if metadata has changed"); 3894 3895 if (!Weights) 3896 return nullptr; 3897 3898 assert(SI.getNumSuccessors() == Weights->size() && 3899 "num of prof branch_weights must accord with num of successors"); 3900 3901 bool AllZeroes = 3902 all_of(Weights.getValue(), [](uint32_t W) { return W == 0; }); 3903 3904 if (AllZeroes || Weights.getValue().size() < 2) 3905 return nullptr; 3906 3907 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights); 3908 } 3909 3910 void SwitchInstProfUpdateWrapper::init() { 3911 MDNode *ProfileData = getProfBranchWeightsMD(SI); 3912 if (!ProfileData) 3913 return; 3914 3915 if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) { 3916 llvm_unreachable("number of prof branch_weights metadata operands does " 3917 "not correspond to number of succesors"); 3918 } 3919 3920 SmallVector<uint32_t, 8> Weights; 3921 for (unsigned CI = 1, CE = SI.getNumSuccessors(); CI <= CE; ++CI) { 3922 ConstantInt *C = mdconst::extract<ConstantInt>(ProfileData->getOperand(CI)); 3923 uint32_t CW = C->getValue().getZExtValue(); 3924 Weights.push_back(CW); 3925 } 3926 this->Weights = std::move(Weights); 3927 } 3928 3929 SwitchInst::CaseIt 3930 SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) { 3931 if (Weights) { 3932 assert(SI.getNumSuccessors() == Weights->size() && 3933 "num of prof branch_weights must accord with num of successors"); 3934 Changed = true; 3935 // Copy the last case to the place of the removed one and shrink. 3936 // This is tightly coupled with the way SwitchInst::removeCase() removes 3937 // the cases in SwitchInst::removeCase(CaseIt). 3938 Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back(); 3939 Weights.getValue().pop_back(); 3940 } 3941 return SI.removeCase(I); 3942 } 3943 3944 void SwitchInstProfUpdateWrapper::addCase( 3945 ConstantInt *OnVal, BasicBlock *Dest, 3946 SwitchInstProfUpdateWrapper::CaseWeightOpt W) { 3947 SI.addCase(OnVal, Dest); 3948 3949 if (!Weights && W && *W) { 3950 Changed = true; 3951 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0); 3952 Weights.getValue()[SI.getNumSuccessors() - 1] = *W; 3953 } else if (Weights) { 3954 Changed = true; 3955 Weights.getValue().push_back(W ? *W : 0); 3956 } 3957 if (Weights) 3958 assert(SI.getNumSuccessors() == Weights->size() && 3959 "num of prof branch_weights must accord with num of successors"); 3960 } 3961 3962 SymbolTableList<Instruction>::iterator 3963 SwitchInstProfUpdateWrapper::eraseFromParent() { 3964 // Instruction is erased. Mark as unchanged to not touch it in the destructor. 3965 Changed = false; 3966 if (Weights) 3967 Weights->resize(0); 3968 return SI.eraseFromParent(); 3969 } 3970 3971 SwitchInstProfUpdateWrapper::CaseWeightOpt 3972 SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) { 3973 if (!Weights) 3974 return None; 3975 return Weights.getValue()[idx]; 3976 } 3977 3978 void SwitchInstProfUpdateWrapper::setSuccessorWeight( 3979 unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) { 3980 if (!W) 3981 return; 3982 3983 if (!Weights && *W) 3984 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0); 3985 3986 if (Weights) { 3987 auto &OldW = Weights.getValue()[idx]; 3988 if (*W != OldW) { 3989 Changed = true; 3990 OldW = *W; 3991 } 3992 } 3993 } 3994 3995 SwitchInstProfUpdateWrapper::CaseWeightOpt 3996 SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI, 3997 unsigned idx) { 3998 if (MDNode *ProfileData = getProfBranchWeightsMD(SI)) 3999 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1) 4000 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1)) 4001 ->getValue() 4002 .getZExtValue(); 4003 4004 return None; 4005 } 4006 4007 //===----------------------------------------------------------------------===// 4008 // IndirectBrInst Implementation 4009 //===----------------------------------------------------------------------===// 4010 4011 void IndirectBrInst::init(Value *Address, unsigned NumDests) { 4012 assert(Address && Address->getType()->isPointerTy() && 4013 "Address of indirectbr must be a pointer"); 4014 ReservedSpace = 1+NumDests; 4015 setNumHungOffUseOperands(1); 4016 allocHungoffUses(ReservedSpace); 4017 4018 Op<0>() = Address; 4019 } 4020 4021 4022 /// growOperands - grow operands - This grows the operand list in response 4023 /// to a push_back style of operation. This grows the number of ops by 2 times. 4024 /// 4025 void IndirectBrInst::growOperands() { 4026 unsigned e = getNumOperands(); 4027 unsigned NumOps = e*2; 4028 4029 ReservedSpace = NumOps; 4030 growHungoffUses(ReservedSpace); 4031 } 4032 4033 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 4034 Instruction *InsertBefore) 4035 : Instruction(Type::getVoidTy(Address->getContext()), 4036 Instruction::IndirectBr, nullptr, 0, InsertBefore) { 4037 init(Address, NumCases); 4038 } 4039 4040 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 4041 BasicBlock *InsertAtEnd) 4042 : Instruction(Type::getVoidTy(Address->getContext()), 4043 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) { 4044 init(Address, NumCases); 4045 } 4046 4047 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) 4048 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, 4049 nullptr, IBI.getNumOperands()) { 4050 allocHungoffUses(IBI.getNumOperands()); 4051 Use *OL = getOperandList(); 4052 const Use *InOL = IBI.getOperandList(); 4053 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) 4054 OL[i] = InOL[i]; 4055 SubclassOptionalData = IBI.SubclassOptionalData; 4056 } 4057 4058 /// addDestination - Add a destination. 4059 /// 4060 void IndirectBrInst::addDestination(BasicBlock *DestBB) { 4061 unsigned OpNo = getNumOperands(); 4062 if (OpNo+1 > ReservedSpace) 4063 growOperands(); // Get more space! 4064 // Initialize some new operands. 4065 assert(OpNo < ReservedSpace && "Growing didn't work!"); 4066 setNumHungOffUseOperands(OpNo+1); 4067 getOperandList()[OpNo] = DestBB; 4068 } 4069 4070 /// removeDestination - This method removes the specified successor from the 4071 /// indirectbr instruction. 4072 void IndirectBrInst::removeDestination(unsigned idx) { 4073 assert(idx < getNumOperands()-1 && "Successor index out of range!"); 4074 4075 unsigned NumOps = getNumOperands(); 4076 Use *OL = getOperandList(); 4077 4078 // Replace this value with the last one. 4079 OL[idx+1] = OL[NumOps-1]; 4080 4081 // Nuke the last value. 4082 OL[NumOps-1].set(nullptr); 4083 setNumHungOffUseOperands(NumOps-1); 4084 } 4085 4086 //===----------------------------------------------------------------------===// 4087 // FreezeInst Implementation 4088 //===----------------------------------------------------------------------===// 4089 4090 FreezeInst::FreezeInst(Value *S, 4091 const Twine &Name, Instruction *InsertBefore) 4092 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) { 4093 setName(Name); 4094 } 4095 4096 FreezeInst::FreezeInst(Value *S, 4097 const Twine &Name, BasicBlock *InsertAtEnd) 4098 : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) { 4099 setName(Name); 4100 } 4101 4102 //===----------------------------------------------------------------------===// 4103 // cloneImpl() implementations 4104 //===----------------------------------------------------------------------===// 4105 4106 // Define these methods here so vtables don't get emitted into every translation 4107 // unit that uses these classes. 4108 4109 GetElementPtrInst *GetElementPtrInst::cloneImpl() const { 4110 return new (getNumOperands()) GetElementPtrInst(*this); 4111 } 4112 4113 UnaryOperator *UnaryOperator::cloneImpl() const { 4114 return Create(getOpcode(), Op<0>()); 4115 } 4116 4117 BinaryOperator *BinaryOperator::cloneImpl() const { 4118 return Create(getOpcode(), Op<0>(), Op<1>()); 4119 } 4120 4121 FCmpInst *FCmpInst::cloneImpl() const { 4122 return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); 4123 } 4124 4125 ICmpInst *ICmpInst::cloneImpl() const { 4126 return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); 4127 } 4128 4129 ExtractValueInst *ExtractValueInst::cloneImpl() const { 4130 return new ExtractValueInst(*this); 4131 } 4132 4133 InsertValueInst *InsertValueInst::cloneImpl() const { 4134 return new InsertValueInst(*this); 4135 } 4136 4137 AllocaInst *AllocaInst::cloneImpl() const { 4138 AllocaInst *Result = 4139 new AllocaInst(getAllocatedType(), getType()->getAddressSpace(), 4140 (Value *)getOperand(0), MaybeAlign(getAlignment())); 4141 Result->setUsedWithInAlloca(isUsedWithInAlloca()); 4142 Result->setSwiftError(isSwiftError()); 4143 return Result; 4144 } 4145 4146 LoadInst *LoadInst::cloneImpl() const { 4147 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(), 4148 MaybeAlign(getAlignment()), getOrdering(), 4149 getSyncScopeID()); 4150 } 4151 4152 StoreInst *StoreInst::cloneImpl() const { 4153 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), 4154 MaybeAlign(getAlignment()), getOrdering(), 4155 getSyncScopeID()); 4156 } 4157 4158 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { 4159 AtomicCmpXchgInst *Result = 4160 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), 4161 getSuccessOrdering(), getFailureOrdering(), 4162 getSyncScopeID()); 4163 Result->setVolatile(isVolatile()); 4164 Result->setWeak(isWeak()); 4165 return Result; 4166 } 4167 4168 AtomicRMWInst *AtomicRMWInst::cloneImpl() const { 4169 AtomicRMWInst *Result = 4170 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1), 4171 getOrdering(), getSyncScopeID()); 4172 Result->setVolatile(isVolatile()); 4173 return Result; 4174 } 4175 4176 FenceInst *FenceInst::cloneImpl() const { 4177 return new FenceInst(getContext(), getOrdering(), getSyncScopeID()); 4178 } 4179 4180 TruncInst *TruncInst::cloneImpl() const { 4181 return new TruncInst(getOperand(0), getType()); 4182 } 4183 4184 ZExtInst *ZExtInst::cloneImpl() const { 4185 return new ZExtInst(getOperand(0), getType()); 4186 } 4187 4188 SExtInst *SExtInst::cloneImpl() const { 4189 return new SExtInst(getOperand(0), getType()); 4190 } 4191 4192 FPTruncInst *FPTruncInst::cloneImpl() const { 4193 return new FPTruncInst(getOperand(0), getType()); 4194 } 4195 4196 FPExtInst *FPExtInst::cloneImpl() const { 4197 return new FPExtInst(getOperand(0), getType()); 4198 } 4199 4200 UIToFPInst *UIToFPInst::cloneImpl() const { 4201 return new UIToFPInst(getOperand(0), getType()); 4202 } 4203 4204 SIToFPInst *SIToFPInst::cloneImpl() const { 4205 return new SIToFPInst(getOperand(0), getType()); 4206 } 4207 4208 FPToUIInst *FPToUIInst::cloneImpl() const { 4209 return new FPToUIInst(getOperand(0), getType()); 4210 } 4211 4212 FPToSIInst *FPToSIInst::cloneImpl() const { 4213 return new FPToSIInst(getOperand(0), getType()); 4214 } 4215 4216 PtrToIntInst *PtrToIntInst::cloneImpl() const { 4217 return new PtrToIntInst(getOperand(0), getType()); 4218 } 4219 4220 IntToPtrInst *IntToPtrInst::cloneImpl() const { 4221 return new IntToPtrInst(getOperand(0), getType()); 4222 } 4223 4224 BitCastInst *BitCastInst::cloneImpl() const { 4225 return new BitCastInst(getOperand(0), getType()); 4226 } 4227 4228 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const { 4229 return new AddrSpaceCastInst(getOperand(0), getType()); 4230 } 4231 4232 CallInst *CallInst::cloneImpl() const { 4233 if (hasOperandBundles()) { 4234 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4235 return new(getNumOperands(), DescriptorBytes) CallInst(*this); 4236 } 4237 return new(getNumOperands()) CallInst(*this); 4238 } 4239 4240 SelectInst *SelectInst::cloneImpl() const { 4241 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); 4242 } 4243 4244 VAArgInst *VAArgInst::cloneImpl() const { 4245 return new VAArgInst(getOperand(0), getType()); 4246 } 4247 4248 ExtractElementInst *ExtractElementInst::cloneImpl() const { 4249 return ExtractElementInst::Create(getOperand(0), getOperand(1)); 4250 } 4251 4252 InsertElementInst *InsertElementInst::cloneImpl() const { 4253 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); 4254 } 4255 4256 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const { 4257 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2)); 4258 } 4259 4260 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); } 4261 4262 LandingPadInst *LandingPadInst::cloneImpl() const { 4263 return new LandingPadInst(*this); 4264 } 4265 4266 ReturnInst *ReturnInst::cloneImpl() const { 4267 return new(getNumOperands()) ReturnInst(*this); 4268 } 4269 4270 BranchInst *BranchInst::cloneImpl() const { 4271 return new(getNumOperands()) BranchInst(*this); 4272 } 4273 4274 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); } 4275 4276 IndirectBrInst *IndirectBrInst::cloneImpl() const { 4277 return new IndirectBrInst(*this); 4278 } 4279 4280 InvokeInst *InvokeInst::cloneImpl() const { 4281 if (hasOperandBundles()) { 4282 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4283 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this); 4284 } 4285 return new(getNumOperands()) InvokeInst(*this); 4286 } 4287 4288 CallBrInst *CallBrInst::cloneImpl() const { 4289 if (hasOperandBundles()) { 4290 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4291 return new (getNumOperands(), DescriptorBytes) CallBrInst(*this); 4292 } 4293 return new (getNumOperands()) CallBrInst(*this); 4294 } 4295 4296 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } 4297 4298 CleanupReturnInst *CleanupReturnInst::cloneImpl() const { 4299 return new (getNumOperands()) CleanupReturnInst(*this); 4300 } 4301 4302 CatchReturnInst *CatchReturnInst::cloneImpl() const { 4303 return new (getNumOperands()) CatchReturnInst(*this); 4304 } 4305 4306 CatchSwitchInst *CatchSwitchInst::cloneImpl() const { 4307 return new CatchSwitchInst(*this); 4308 } 4309 4310 FuncletPadInst *FuncletPadInst::cloneImpl() const { 4311 return new (getNumOperands()) FuncletPadInst(*this); 4312 } 4313 4314 UnreachableInst *UnreachableInst::cloneImpl() const { 4315 LLVMContext &Context = getContext(); 4316 return new UnreachableInst(Context); 4317 } 4318 4319 FreezeInst *FreezeInst::cloneImpl() const { 4320 return new FreezeInst(getOperand(0)); 4321 } 4322