1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file exposes the class definitions of all of the subclasses of the 10 // Instruction class. This is meant to be an easy way to get access to all 11 // instruction subclasses. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_IR_INSTRUCTIONS_H 16 #define LLVM_IR_INSTRUCTIONS_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/Bitfields.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/StringRef.h" 24 #include "llvm/ADT/Twine.h" 25 #include "llvm/ADT/iterator.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/IR/Attributes.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/CallingConv.h" 30 #include "llvm/IR/Constant.h" 31 #include "llvm/IR/DerivedTypes.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/InstrTypes.h" 34 #include "llvm/IR/Instruction.h" 35 #include "llvm/IR/OperandTraits.h" 36 #include "llvm/IR/Type.h" 37 #include "llvm/IR/Use.h" 38 #include "llvm/IR/User.h" 39 #include "llvm/IR/Value.h" 40 #include "llvm/Support/AtomicOrdering.h" 41 #include "llvm/Support/Casting.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include <cassert> 44 #include <cstddef> 45 #include <cstdint> 46 #include <iterator> 47 48 namespace llvm { 49 50 class APInt; 51 class ConstantInt; 52 class DataLayout; 53 class LLVMContext; 54 55 //===----------------------------------------------------------------------===// 56 // AllocaInst Class 57 //===----------------------------------------------------------------------===// 58 59 /// an instruction to allocate memory on the stack 60 class AllocaInst : public UnaryInstruction { 61 Type *AllocatedType; 62 63 using AlignmentField = AlignmentBitfieldElementT<0>; 64 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; 65 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; 66 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, 67 SwiftErrorField>(), 68 "Bitfields must be contiguous"); 69 70 protected: 71 // Note: Instruction needs to be a friend here to call cloneImpl. 72 friend class Instruction; 73 74 AllocaInst *cloneImpl() const; 75 76 public: 77 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 78 const Twine &Name, Instruction *InsertBefore); 79 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 80 const Twine &Name, BasicBlock *InsertAtEnd); 81 82 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 83 Instruction *InsertBefore); 84 AllocaInst(Type *Ty, unsigned AddrSpace, 85 const Twine &Name, BasicBlock *InsertAtEnd); 86 87 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 88 const Twine &Name = "", Instruction *InsertBefore = nullptr); 89 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 90 const Twine &Name, BasicBlock *InsertAtEnd); 91 92 /// Return true if there is an allocation size parameter to the allocation 93 /// instruction that is not 1. 94 bool isArrayAllocation() const; 95 96 /// Get the number of elements allocated. For a simple allocation of a single 97 /// element, this will return a constant 1 value. 98 const Value *getArraySize() const { return getOperand(0); } 99 Value *getArraySize() { return getOperand(0); } 100 101 /// Overload to return most specific pointer type. 102 PointerType *getType() const { 103 return cast<PointerType>(Instruction::getType()); 104 } 105 106 /// Get allocation size in bits. Returns None if size can't be determined, 107 /// e.g. in case of a VLA. 108 Optional<uint64_t> getAllocationSizeInBits(const DataLayout &DL) const; 109 110 /// Return the type that is being allocated by the instruction. 111 Type *getAllocatedType() const { return AllocatedType; } 112 /// for use only in special circumstances that need to generically 113 /// transform a whole instruction (eg: IR linking and vectorization). 114 void setAllocatedType(Type *Ty) { AllocatedType = Ty; } 115 116 /// Return the alignment of the memory that is being allocated by the 117 /// instruction. 118 Align getAlign() const { 119 return Align(1ULL << getSubclassData<AlignmentField>()); 120 } 121 122 void setAlignment(Align Align) { 123 setSubclassData<AlignmentField>(Log2(Align)); 124 } 125 126 // FIXME: Remove this one transition to Align is over. 127 unsigned getAlignment() const { return getAlign().value(); } 128 129 /// Return true if this alloca is in the entry block of the function and is a 130 /// constant size. If so, the code generator will fold it into the 131 /// prolog/epilog code, so it is basically free. 132 bool isStaticAlloca() const; 133 134 /// Return true if this alloca is used as an inalloca argument to a call. Such 135 /// allocas are never considered static even if they are in the entry block. 136 bool isUsedWithInAlloca() const { 137 return getSubclassData<UsedWithInAllocaField>(); 138 } 139 140 /// Specify whether this alloca is used to represent the arguments to a call. 141 void setUsedWithInAlloca(bool V) { 142 setSubclassData<UsedWithInAllocaField>(V); 143 } 144 145 /// Return true if this alloca is used as a swifterror argument to a call. 146 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } 147 /// Specify whether this alloca is used to represent a swifterror. 148 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } 149 150 // Methods for support type inquiry through isa, cast, and dyn_cast: 151 static bool classof(const Instruction *I) { 152 return (I->getOpcode() == Instruction::Alloca); 153 } 154 static bool classof(const Value *V) { 155 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 156 } 157 158 private: 159 // Shadow Instruction::setInstructionSubclassData with a private forwarding 160 // method so that subclasses cannot accidentally use it. 161 template <typename Bitfield> 162 void setSubclassData(typename Bitfield::Type Value) { 163 Instruction::setSubclassData<Bitfield>(Value); 164 } 165 }; 166 167 //===----------------------------------------------------------------------===// 168 // LoadInst Class 169 //===----------------------------------------------------------------------===// 170 171 /// An instruction for reading from memory. This uses the SubclassData field in 172 /// Value to store whether or not the load is volatile. 173 class LoadInst : public UnaryInstruction { 174 using VolatileField = BoolBitfieldElementT<0>; 175 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 176 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 177 static_assert( 178 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 179 "Bitfields must be contiguous"); 180 181 void AssertOK(); 182 183 protected: 184 // Note: Instruction needs to be a friend here to call cloneImpl. 185 friend class Instruction; 186 187 LoadInst *cloneImpl() const; 188 189 public: 190 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, 191 Instruction *InsertBefore); 192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); 193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 194 Instruction *InsertBefore); 195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 196 BasicBlock *InsertAtEnd); 197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 198 Align Align, Instruction *InsertBefore = nullptr); 199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 200 Align Align, BasicBlock *InsertAtEnd); 201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 202 Align Align, AtomicOrdering Order, 203 SyncScope::ID SSID = SyncScope::System, 204 Instruction *InsertBefore = nullptr); 205 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 206 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 207 BasicBlock *InsertAtEnd); 208 209 /// Return true if this is a load from a volatile memory location. 210 bool isVolatile() const { return getSubclassData<VolatileField>(); } 211 212 /// Specify whether this is a volatile load or not. 213 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 214 215 /// Return the alignment of the access that is being performed. 216 /// FIXME: Remove this function once transition to Align is over. 217 /// Use getAlign() instead. 218 unsigned getAlignment() const { return getAlign().value(); } 219 220 /// Return the alignment of the access that is being performed. 221 Align getAlign() const { 222 return Align(1ULL << (getSubclassData<AlignmentField>())); 223 } 224 225 void setAlignment(Align Align) { 226 setSubclassData<AlignmentField>(Log2(Align)); 227 } 228 229 /// Returns the ordering constraint of this load instruction. 230 AtomicOrdering getOrdering() const { 231 return getSubclassData<OrderingField>(); 232 } 233 /// Sets the ordering constraint of this load instruction. May not be Release 234 /// or AcquireRelease. 235 void setOrdering(AtomicOrdering Ordering) { 236 setSubclassData<OrderingField>(Ordering); 237 } 238 239 /// Returns the synchronization scope ID of this load instruction. 240 SyncScope::ID getSyncScopeID() const { 241 return SSID; 242 } 243 244 /// Sets the synchronization scope ID of this load instruction. 245 void setSyncScopeID(SyncScope::ID SSID) { 246 this->SSID = SSID; 247 } 248 249 /// Sets the ordering constraint and the synchronization scope ID of this load 250 /// instruction. 251 void setAtomic(AtomicOrdering Ordering, 252 SyncScope::ID SSID = SyncScope::System) { 253 setOrdering(Ordering); 254 setSyncScopeID(SSID); 255 } 256 257 bool isSimple() const { return !isAtomic() && !isVolatile(); } 258 259 bool isUnordered() const { 260 return (getOrdering() == AtomicOrdering::NotAtomic || 261 getOrdering() == AtomicOrdering::Unordered) && 262 !isVolatile(); 263 } 264 265 Value *getPointerOperand() { return getOperand(0); } 266 const Value *getPointerOperand() const { return getOperand(0); } 267 static unsigned getPointerOperandIndex() { return 0U; } 268 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 269 270 /// Returns the address space of the pointer operand. 271 unsigned getPointerAddressSpace() const { 272 return getPointerOperandType()->getPointerAddressSpace(); 273 } 274 275 // Methods for support type inquiry through isa, cast, and dyn_cast: 276 static bool classof(const Instruction *I) { 277 return I->getOpcode() == Instruction::Load; 278 } 279 static bool classof(const Value *V) { 280 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 281 } 282 283 private: 284 // Shadow Instruction::setInstructionSubclassData with a private forwarding 285 // method so that subclasses cannot accidentally use it. 286 template <typename Bitfield> 287 void setSubclassData(typename Bitfield::Type Value) { 288 Instruction::setSubclassData<Bitfield>(Value); 289 } 290 291 /// The synchronization scope ID of this load instruction. Not quite enough 292 /// room in SubClassData for everything, so synchronization scope ID gets its 293 /// own field. 294 SyncScope::ID SSID; 295 }; 296 297 //===----------------------------------------------------------------------===// 298 // StoreInst Class 299 //===----------------------------------------------------------------------===// 300 301 /// An instruction for storing to memory. 302 class StoreInst : public Instruction { 303 using VolatileField = BoolBitfieldElementT<0>; 304 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 305 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 306 static_assert( 307 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 308 "Bitfields must be contiguous"); 309 310 void AssertOK(); 311 312 protected: 313 // Note: Instruction needs to be a friend here to call cloneImpl. 314 friend class Instruction; 315 316 StoreInst *cloneImpl() const; 317 318 public: 319 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); 320 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); 321 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); 322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); 323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 324 Instruction *InsertBefore = nullptr); 325 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 326 BasicBlock *InsertAtEnd); 327 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 328 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, 329 Instruction *InsertBefore = nullptr); 330 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 331 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); 332 333 // allocate space for exactly two operands 334 void *operator new(size_t s) { 335 return User::operator new(s, 2); 336 } 337 338 /// Return true if this is a store to a volatile memory location. 339 bool isVolatile() const { return getSubclassData<VolatileField>(); } 340 341 /// Specify whether this is a volatile store or not. 342 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 343 344 /// Transparently provide more efficient getOperand methods. 345 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 346 347 /// Return the alignment of the access that is being performed 348 /// FIXME: Remove this function once transition to Align is over. 349 /// Use getAlign() instead. 350 unsigned getAlignment() const { return getAlign().value(); } 351 352 Align getAlign() const { 353 return Align(1ULL << (getSubclassData<AlignmentField>())); 354 } 355 356 void setAlignment(Align Align) { 357 setSubclassData<AlignmentField>(Log2(Align)); 358 } 359 360 /// Returns the ordering constraint of this store instruction. 361 AtomicOrdering getOrdering() const { 362 return getSubclassData<OrderingField>(); 363 } 364 365 /// Sets the ordering constraint of this store instruction. May not be 366 /// Acquire or AcquireRelease. 367 void setOrdering(AtomicOrdering Ordering) { 368 setSubclassData<OrderingField>(Ordering); 369 } 370 371 /// Returns the synchronization scope ID of this store instruction. 372 SyncScope::ID getSyncScopeID() const { 373 return SSID; 374 } 375 376 /// Sets the synchronization scope ID of this store instruction. 377 void setSyncScopeID(SyncScope::ID SSID) { 378 this->SSID = SSID; 379 } 380 381 /// Sets the ordering constraint and the synchronization scope ID of this 382 /// store instruction. 383 void setAtomic(AtomicOrdering Ordering, 384 SyncScope::ID SSID = SyncScope::System) { 385 setOrdering(Ordering); 386 setSyncScopeID(SSID); 387 } 388 389 bool isSimple() const { return !isAtomic() && !isVolatile(); } 390 391 bool isUnordered() const { 392 return (getOrdering() == AtomicOrdering::NotAtomic || 393 getOrdering() == AtomicOrdering::Unordered) && 394 !isVolatile(); 395 } 396 397 Value *getValueOperand() { return getOperand(0); } 398 const Value *getValueOperand() const { return getOperand(0); } 399 400 Value *getPointerOperand() { return getOperand(1); } 401 const Value *getPointerOperand() const { return getOperand(1); } 402 static unsigned getPointerOperandIndex() { return 1U; } 403 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 404 405 /// Returns the address space of the pointer operand. 406 unsigned getPointerAddressSpace() const { 407 return getPointerOperandType()->getPointerAddressSpace(); 408 } 409 410 // Methods for support type inquiry through isa, cast, and dyn_cast: 411 static bool classof(const Instruction *I) { 412 return I->getOpcode() == Instruction::Store; 413 } 414 static bool classof(const Value *V) { 415 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 416 } 417 418 private: 419 // Shadow Instruction::setInstructionSubclassData with a private forwarding 420 // method so that subclasses cannot accidentally use it. 421 template <typename Bitfield> 422 void setSubclassData(typename Bitfield::Type Value) { 423 Instruction::setSubclassData<Bitfield>(Value); 424 } 425 426 /// The synchronization scope ID of this store instruction. Not quite enough 427 /// room in SubClassData for everything, so synchronization scope ID gets its 428 /// own field. 429 SyncScope::ID SSID; 430 }; 431 432 template <> 433 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { 434 }; 435 436 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) 437 438 //===----------------------------------------------------------------------===// 439 // FenceInst Class 440 //===----------------------------------------------------------------------===// 441 442 /// An instruction for ordering other memory operations. 443 class FenceInst : public Instruction { 444 using OrderingField = AtomicOrderingBitfieldElementT<0>; 445 446 void Init(AtomicOrdering Ordering, SyncScope::ID SSID); 447 448 protected: 449 // Note: Instruction needs to be a friend here to call cloneImpl. 450 friend class Instruction; 451 452 FenceInst *cloneImpl() const; 453 454 public: 455 // Ordering may only be Acquire, Release, AcquireRelease, or 456 // SequentiallyConsistent. 457 FenceInst(LLVMContext &C, AtomicOrdering Ordering, 458 SyncScope::ID SSID = SyncScope::System, 459 Instruction *InsertBefore = nullptr); 460 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, 461 BasicBlock *InsertAtEnd); 462 463 // allocate space for exactly zero operands 464 void *operator new(size_t s) { 465 return User::operator new(s, 0); 466 } 467 468 /// Returns the ordering constraint of this fence instruction. 469 AtomicOrdering getOrdering() const { 470 return getSubclassData<OrderingField>(); 471 } 472 473 /// Sets the ordering constraint of this fence instruction. May only be 474 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. 475 void setOrdering(AtomicOrdering Ordering) { 476 setSubclassData<OrderingField>(Ordering); 477 } 478 479 /// Returns the synchronization scope ID of this fence instruction. 480 SyncScope::ID getSyncScopeID() const { 481 return SSID; 482 } 483 484 /// Sets the synchronization scope ID of this fence instruction. 485 void setSyncScopeID(SyncScope::ID SSID) { 486 this->SSID = SSID; 487 } 488 489 // Methods for support type inquiry through isa, cast, and dyn_cast: 490 static bool classof(const Instruction *I) { 491 return I->getOpcode() == Instruction::Fence; 492 } 493 static bool classof(const Value *V) { 494 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 495 } 496 497 private: 498 // Shadow Instruction::setInstructionSubclassData with a private forwarding 499 // method so that subclasses cannot accidentally use it. 500 template <typename Bitfield> 501 void setSubclassData(typename Bitfield::Type Value) { 502 Instruction::setSubclassData<Bitfield>(Value); 503 } 504 505 /// The synchronization scope ID of this fence instruction. Not quite enough 506 /// room in SubClassData for everything, so synchronization scope ID gets its 507 /// own field. 508 SyncScope::ID SSID; 509 }; 510 511 //===----------------------------------------------------------------------===// 512 // AtomicCmpXchgInst Class 513 //===----------------------------------------------------------------------===// 514 515 /// An instruction that atomically checks whether a 516 /// specified value is in a memory location, and, if it is, stores a new value 517 /// there. The value returned by this instruction is a pair containing the 518 /// original value as first element, and an i1 indicating success (true) or 519 /// failure (false) as second element. 520 /// 521 class AtomicCmpXchgInst : public Instruction { 522 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, 523 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, 524 SyncScope::ID SSID); 525 526 template <unsigned Offset> 527 using AtomicOrderingBitfieldElement = 528 typename Bitfield::Element<AtomicOrdering, Offset, 3, 529 AtomicOrdering::LAST>; 530 531 protected: 532 // Note: Instruction needs to be a friend here to call cloneImpl. 533 friend class Instruction; 534 535 AtomicCmpXchgInst *cloneImpl() const; 536 537 public: 538 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 539 AtomicOrdering SuccessOrdering, 540 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 541 Instruction *InsertBefore = nullptr); 542 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 543 AtomicOrdering SuccessOrdering, 544 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 545 BasicBlock *InsertAtEnd); 546 547 // allocate space for exactly three operands 548 void *operator new(size_t s) { 549 return User::operator new(s, 3); 550 } 551 552 using VolatileField = BoolBitfieldElementT<0>; 553 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; 554 using SuccessOrderingField = 555 AtomicOrderingBitfieldElementT<WeakField::NextBit>; 556 using FailureOrderingField = 557 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; 558 using AlignmentField = 559 AlignmentBitfieldElementT<FailureOrderingField::NextBit>; 560 static_assert( 561 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, 562 FailureOrderingField, AlignmentField>(), 563 "Bitfields must be contiguous"); 564 565 /// Return the alignment of the memory that is being allocated by the 566 /// instruction. 567 Align getAlign() const { 568 return Align(1ULL << getSubclassData<AlignmentField>()); 569 } 570 571 void setAlignment(Align Align) { 572 setSubclassData<AlignmentField>(Log2(Align)); 573 } 574 575 /// Return true if this is a cmpxchg from a volatile memory 576 /// location. 577 /// 578 bool isVolatile() const { return getSubclassData<VolatileField>(); } 579 580 /// Specify whether this is a volatile cmpxchg. 581 /// 582 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 583 584 /// Return true if this cmpxchg may spuriously fail. 585 bool isWeak() const { return getSubclassData<WeakField>(); } 586 587 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } 588 589 /// Transparently provide more efficient getOperand methods. 590 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 591 592 /// Returns the success ordering constraint of this cmpxchg instruction. 593 AtomicOrdering getSuccessOrdering() const { 594 return getSubclassData<SuccessOrderingField>(); 595 } 596 597 /// Sets the success ordering constraint of this cmpxchg instruction. 598 void setSuccessOrdering(AtomicOrdering Ordering) { 599 assert(Ordering != AtomicOrdering::NotAtomic && 600 "CmpXchg instructions can only be atomic."); 601 setSubclassData<SuccessOrderingField>(Ordering); 602 } 603 604 /// Returns the failure ordering constraint of this cmpxchg instruction. 605 AtomicOrdering getFailureOrdering() const { 606 return getSubclassData<FailureOrderingField>(); 607 } 608 609 /// Sets the failure ordering constraint of this cmpxchg instruction. 610 void setFailureOrdering(AtomicOrdering Ordering) { 611 assert(Ordering != AtomicOrdering::NotAtomic && 612 "CmpXchg instructions can only be atomic."); 613 setSubclassData<FailureOrderingField>(Ordering); 614 } 615 616 /// Returns the synchronization scope ID of this cmpxchg instruction. 617 SyncScope::ID getSyncScopeID() const { 618 return SSID; 619 } 620 621 /// Sets the synchronization scope ID of this cmpxchg instruction. 622 void setSyncScopeID(SyncScope::ID SSID) { 623 this->SSID = SSID; 624 } 625 626 Value *getPointerOperand() { return getOperand(0); } 627 const Value *getPointerOperand() const { return getOperand(0); } 628 static unsigned getPointerOperandIndex() { return 0U; } 629 630 Value *getCompareOperand() { return getOperand(1); } 631 const Value *getCompareOperand() const { return getOperand(1); } 632 633 Value *getNewValOperand() { return getOperand(2); } 634 const Value *getNewValOperand() const { return getOperand(2); } 635 636 /// Returns the address space of the pointer operand. 637 unsigned getPointerAddressSpace() const { 638 return getPointerOperand()->getType()->getPointerAddressSpace(); 639 } 640 641 /// Returns the strongest permitted ordering on failure, given the 642 /// desired ordering on success. 643 /// 644 /// If the comparison in a cmpxchg operation fails, there is no atomic store 645 /// so release semantics cannot be provided. So this function drops explicit 646 /// Release requests from the AtomicOrdering. A SequentiallyConsistent 647 /// operation would remain SequentiallyConsistent. 648 static AtomicOrdering 649 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { 650 switch (SuccessOrdering) { 651 default: 652 llvm_unreachable("invalid cmpxchg success ordering"); 653 case AtomicOrdering::Release: 654 case AtomicOrdering::Monotonic: 655 return AtomicOrdering::Monotonic; 656 case AtomicOrdering::AcquireRelease: 657 case AtomicOrdering::Acquire: 658 return AtomicOrdering::Acquire; 659 case AtomicOrdering::SequentiallyConsistent: 660 return AtomicOrdering::SequentiallyConsistent; 661 } 662 } 663 664 // Methods for support type inquiry through isa, cast, and dyn_cast: 665 static bool classof(const Instruction *I) { 666 return I->getOpcode() == Instruction::AtomicCmpXchg; 667 } 668 static bool classof(const Value *V) { 669 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 670 } 671 672 private: 673 // Shadow Instruction::setInstructionSubclassData with a private forwarding 674 // method so that subclasses cannot accidentally use it. 675 template <typename Bitfield> 676 void setSubclassData(typename Bitfield::Type Value) { 677 Instruction::setSubclassData<Bitfield>(Value); 678 } 679 680 /// The synchronization scope ID of this cmpxchg instruction. Not quite 681 /// enough room in SubClassData for everything, so synchronization scope ID 682 /// gets its own field. 683 SyncScope::ID SSID; 684 }; 685 686 template <> 687 struct OperandTraits<AtomicCmpXchgInst> : 688 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { 689 }; 690 691 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) 692 693 //===----------------------------------------------------------------------===// 694 // AtomicRMWInst Class 695 //===----------------------------------------------------------------------===// 696 697 /// an instruction that atomically reads a memory location, 698 /// combines it with another value, and then stores the result back. Returns 699 /// the old value. 700 /// 701 class AtomicRMWInst : public Instruction { 702 protected: 703 // Note: Instruction needs to be a friend here to call cloneImpl. 704 friend class Instruction; 705 706 AtomicRMWInst *cloneImpl() const; 707 708 public: 709 /// This enumeration lists the possible modifications atomicrmw can make. In 710 /// the descriptions, 'p' is the pointer to the instruction's memory location, 711 /// 'old' is the initial value of *p, and 'v' is the other value passed to the 712 /// instruction. These instructions always return 'old'. 713 enum BinOp : unsigned { 714 /// *p = v 715 Xchg, 716 /// *p = old + v 717 Add, 718 /// *p = old - v 719 Sub, 720 /// *p = old & v 721 And, 722 /// *p = ~(old & v) 723 Nand, 724 /// *p = old | v 725 Or, 726 /// *p = old ^ v 727 Xor, 728 /// *p = old >signed v ? old : v 729 Max, 730 /// *p = old <signed v ? old : v 731 Min, 732 /// *p = old >unsigned v ? old : v 733 UMax, 734 /// *p = old <unsigned v ? old : v 735 UMin, 736 737 /// *p = old + v 738 FAdd, 739 740 /// *p = old - v 741 FSub, 742 743 FIRST_BINOP = Xchg, 744 LAST_BINOP = FSub, 745 BAD_BINOP 746 }; 747 748 private: 749 template <unsigned Offset> 750 using AtomicOrderingBitfieldElement = 751 typename Bitfield::Element<AtomicOrdering, Offset, 3, 752 AtomicOrdering::LAST>; 753 754 template <unsigned Offset> 755 using BinOpBitfieldElement = 756 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>; 757 758 public: 759 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 760 AtomicOrdering Ordering, SyncScope::ID SSID, 761 Instruction *InsertBefore = nullptr); 762 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 763 AtomicOrdering Ordering, SyncScope::ID SSID, 764 BasicBlock *InsertAtEnd); 765 766 // allocate space for exactly two operands 767 void *operator new(size_t s) { 768 return User::operator new(s, 2); 769 } 770 771 using VolatileField = BoolBitfieldElementT<0>; 772 using AtomicOrderingField = 773 AtomicOrderingBitfieldElementT<VolatileField::NextBit>; 774 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; 775 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; 776 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, 777 OperationField, AlignmentField>(), 778 "Bitfields must be contiguous"); 779 780 BinOp getOperation() const { return getSubclassData<OperationField>(); } 781 782 static StringRef getOperationName(BinOp Op); 783 784 static bool isFPOperation(BinOp Op) { 785 switch (Op) { 786 case AtomicRMWInst::FAdd: 787 case AtomicRMWInst::FSub: 788 return true; 789 default: 790 return false; 791 } 792 } 793 794 void setOperation(BinOp Operation) { 795 setSubclassData<OperationField>(Operation); 796 } 797 798 /// Return the alignment of the memory that is being allocated by the 799 /// instruction. 800 Align getAlign() const { 801 return Align(1ULL << getSubclassData<AlignmentField>()); 802 } 803 804 void setAlignment(Align Align) { 805 setSubclassData<AlignmentField>(Log2(Align)); 806 } 807 808 /// Return true if this is a RMW on a volatile memory location. 809 /// 810 bool isVolatile() const { return getSubclassData<VolatileField>(); } 811 812 /// Specify whether this is a volatile RMW or not. 813 /// 814 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 815 816 /// Transparently provide more efficient getOperand methods. 817 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 818 819 /// Returns the ordering constraint of this rmw instruction. 820 AtomicOrdering getOrdering() const { 821 return getSubclassData<AtomicOrderingField>(); 822 } 823 824 /// Sets the ordering constraint of this rmw instruction. 825 void setOrdering(AtomicOrdering Ordering) { 826 assert(Ordering != AtomicOrdering::NotAtomic && 827 "atomicrmw instructions can only be atomic."); 828 setSubclassData<AtomicOrderingField>(Ordering); 829 } 830 831 /// Returns the synchronization scope ID of this rmw instruction. 832 SyncScope::ID getSyncScopeID() const { 833 return SSID; 834 } 835 836 /// Sets the synchronization scope ID of this rmw instruction. 837 void setSyncScopeID(SyncScope::ID SSID) { 838 this->SSID = SSID; 839 } 840 841 Value *getPointerOperand() { return getOperand(0); } 842 const Value *getPointerOperand() const { return getOperand(0); } 843 static unsigned getPointerOperandIndex() { return 0U; } 844 845 Value *getValOperand() { return getOperand(1); } 846 const Value *getValOperand() const { return getOperand(1); } 847 848 /// Returns the address space of the pointer operand. 849 unsigned getPointerAddressSpace() const { 850 return getPointerOperand()->getType()->getPointerAddressSpace(); 851 } 852 853 bool isFloatingPointOperation() const { 854 return isFPOperation(getOperation()); 855 } 856 857 // Methods for support type inquiry through isa, cast, and dyn_cast: 858 static bool classof(const Instruction *I) { 859 return I->getOpcode() == Instruction::AtomicRMW; 860 } 861 static bool classof(const Value *V) { 862 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 863 } 864 865 private: 866 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, 867 AtomicOrdering Ordering, SyncScope::ID SSID); 868 869 // Shadow Instruction::setInstructionSubclassData with a private forwarding 870 // method so that subclasses cannot accidentally use it. 871 template <typename Bitfield> 872 void setSubclassData(typename Bitfield::Type Value) { 873 Instruction::setSubclassData<Bitfield>(Value); 874 } 875 876 /// The synchronization scope ID of this rmw instruction. Not quite enough 877 /// room in SubClassData for everything, so synchronization scope ID gets its 878 /// own field. 879 SyncScope::ID SSID; 880 }; 881 882 template <> 883 struct OperandTraits<AtomicRMWInst> 884 : public FixedNumOperandTraits<AtomicRMWInst,2> { 885 }; 886 887 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) 888 889 //===----------------------------------------------------------------------===// 890 // GetElementPtrInst Class 891 //===----------------------------------------------------------------------===// 892 893 // checkGEPType - Simple wrapper function to give a better assertion failure 894 // message on bad indexes for a gep instruction. 895 // 896 inline Type *checkGEPType(Type *Ty) { 897 assert(Ty && "Invalid GetElementPtrInst indices for type!"); 898 return Ty; 899 } 900 901 /// an instruction for type-safe pointer arithmetic to 902 /// access elements of arrays and structs 903 /// 904 class GetElementPtrInst : public Instruction { 905 Type *SourceElementType; 906 Type *ResultElementType; 907 908 GetElementPtrInst(const GetElementPtrInst &GEPI); 909 910 /// Constructors - Create a getelementptr instruction with a base pointer an 911 /// list of indices. The first ctor can optionally insert before an existing 912 /// instruction, the second appends the new instruction to the specified 913 /// BasicBlock. 914 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 915 ArrayRef<Value *> IdxList, unsigned Values, 916 const Twine &NameStr, Instruction *InsertBefore); 917 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 918 ArrayRef<Value *> IdxList, unsigned Values, 919 const Twine &NameStr, BasicBlock *InsertAtEnd); 920 921 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); 922 923 protected: 924 // Note: Instruction needs to be a friend here to call cloneImpl. 925 friend class Instruction; 926 927 GetElementPtrInst *cloneImpl() const; 928 929 public: 930 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 931 ArrayRef<Value *> IdxList, 932 const Twine &NameStr = "", 933 Instruction *InsertBefore = nullptr) { 934 unsigned Values = 1 + unsigned(IdxList.size()); 935 if (!PointeeType) 936 PointeeType = 937 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); 938 else 939 assert( 940 PointeeType == 941 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()); 942 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 943 NameStr, InsertBefore); 944 } 945 946 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 947 ArrayRef<Value *> IdxList, 948 const Twine &NameStr, 949 BasicBlock *InsertAtEnd) { 950 unsigned Values = 1 + unsigned(IdxList.size()); 951 if (!PointeeType) 952 PointeeType = 953 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); 954 else 955 assert( 956 PointeeType == 957 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()); 958 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 959 NameStr, InsertAtEnd); 960 } 961 962 /// Create an "inbounds" getelementptr. See the documentation for the 963 /// "inbounds" flag in LangRef.html for details. 964 static GetElementPtrInst *CreateInBounds(Value *Ptr, 965 ArrayRef<Value *> IdxList, 966 const Twine &NameStr = "", 967 Instruction *InsertBefore = nullptr){ 968 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore); 969 } 970 971 static GetElementPtrInst * 972 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, 973 const Twine &NameStr = "", 974 Instruction *InsertBefore = nullptr) { 975 GetElementPtrInst *GEP = 976 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); 977 GEP->setIsInBounds(true); 978 return GEP; 979 } 980 981 static GetElementPtrInst *CreateInBounds(Value *Ptr, 982 ArrayRef<Value *> IdxList, 983 const Twine &NameStr, 984 BasicBlock *InsertAtEnd) { 985 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd); 986 } 987 988 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, 989 ArrayRef<Value *> IdxList, 990 const Twine &NameStr, 991 BasicBlock *InsertAtEnd) { 992 GetElementPtrInst *GEP = 993 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); 994 GEP->setIsInBounds(true); 995 return GEP; 996 } 997 998 /// Transparently provide more efficient getOperand methods. 999 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1000 1001 Type *getSourceElementType() const { return SourceElementType; } 1002 1003 void setSourceElementType(Type *Ty) { SourceElementType = Ty; } 1004 void setResultElementType(Type *Ty) { ResultElementType = Ty; } 1005 1006 Type *getResultElementType() const { 1007 assert(ResultElementType == 1008 cast<PointerType>(getType()->getScalarType())->getElementType()); 1009 return ResultElementType; 1010 } 1011 1012 /// Returns the address space of this instruction's pointer type. 1013 unsigned getAddressSpace() const { 1014 // Note that this is always the same as the pointer operand's address space 1015 // and that is cheaper to compute, so cheat here. 1016 return getPointerAddressSpace(); 1017 } 1018 1019 /// Returns the result type of a getelementptr with the given source 1020 /// element type and indexes. 1021 /// 1022 /// Null is returned if the indices are invalid for the specified 1023 /// source element type. 1024 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); 1025 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); 1026 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); 1027 1028 /// Return the type of the element at the given index of an indexable 1029 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". 1030 /// 1031 /// Returns null if the type can't be indexed, or the given index is not 1032 /// legal for the given type. 1033 static Type *getTypeAtIndex(Type *Ty, Value *Idx); 1034 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); 1035 1036 inline op_iterator idx_begin() { return op_begin()+1; } 1037 inline const_op_iterator idx_begin() const { return op_begin()+1; } 1038 inline op_iterator idx_end() { return op_end(); } 1039 inline const_op_iterator idx_end() const { return op_end(); } 1040 1041 inline iterator_range<op_iterator> indices() { 1042 return make_range(idx_begin(), idx_end()); 1043 } 1044 1045 inline iterator_range<const_op_iterator> indices() const { 1046 return make_range(idx_begin(), idx_end()); 1047 } 1048 1049 Value *getPointerOperand() { 1050 return getOperand(0); 1051 } 1052 const Value *getPointerOperand() const { 1053 return getOperand(0); 1054 } 1055 static unsigned getPointerOperandIndex() { 1056 return 0U; // get index for modifying correct operand. 1057 } 1058 1059 /// Method to return the pointer operand as a 1060 /// PointerType. 1061 Type *getPointerOperandType() const { 1062 return getPointerOperand()->getType(); 1063 } 1064 1065 /// Returns the address space of the pointer operand. 1066 unsigned getPointerAddressSpace() const { 1067 return getPointerOperandType()->getPointerAddressSpace(); 1068 } 1069 1070 /// Returns the pointer type returned by the GEP 1071 /// instruction, which may be a vector of pointers. 1072 static Type *getGEPReturnType(Type *ElTy, Value *Ptr, 1073 ArrayRef<Value *> IdxList) { 1074 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)), 1075 Ptr->getType()->getPointerAddressSpace()); 1076 // Vector GEP 1077 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { 1078 ElementCount EltCount = PtrVTy->getElementCount(); 1079 return VectorType::get(PtrTy, EltCount); 1080 } 1081 for (Value *Index : IdxList) 1082 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { 1083 ElementCount EltCount = IndexVTy->getElementCount(); 1084 return VectorType::get(PtrTy, EltCount); 1085 } 1086 // Scalar GEP 1087 return PtrTy; 1088 } 1089 1090 unsigned getNumIndices() const { // Note: always non-negative 1091 return getNumOperands() - 1; 1092 } 1093 1094 bool hasIndices() const { 1095 return getNumOperands() > 1; 1096 } 1097 1098 /// Return true if all of the indices of this GEP are 1099 /// zeros. If so, the result pointer and the first operand have the same 1100 /// value, just potentially different types. 1101 bool hasAllZeroIndices() const; 1102 1103 /// Return true if all of the indices of this GEP are 1104 /// constant integers. If so, the result pointer and the first operand have 1105 /// a constant offset between them. 1106 bool hasAllConstantIndices() const; 1107 1108 /// Set or clear the inbounds flag on this GEP instruction. 1109 /// See LangRef.html for the meaning of inbounds on a getelementptr. 1110 void setIsInBounds(bool b = true); 1111 1112 /// Determine whether the GEP has the inbounds flag. 1113 bool isInBounds() const; 1114 1115 /// Accumulate the constant address offset of this GEP if possible. 1116 /// 1117 /// This routine accepts an APInt into which it will accumulate the constant 1118 /// offset of this GEP if the GEP is in fact constant. If the GEP is not 1119 /// all-constant, it returns false and the value of the offset APInt is 1120 /// undefined (it is *not* preserved!). The APInt passed into this routine 1121 /// must be at least as wide as the IntPtr type for the address space of 1122 /// the base GEP pointer. 1123 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; 1124 1125 // Methods for support type inquiry through isa, cast, and dyn_cast: 1126 static bool classof(const Instruction *I) { 1127 return (I->getOpcode() == Instruction::GetElementPtr); 1128 } 1129 static bool classof(const Value *V) { 1130 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1131 } 1132 }; 1133 1134 template <> 1135 struct OperandTraits<GetElementPtrInst> : 1136 public VariadicOperandTraits<GetElementPtrInst, 1> { 1137 }; 1138 1139 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1140 ArrayRef<Value *> IdxList, unsigned Values, 1141 const Twine &NameStr, 1142 Instruction *InsertBefore) 1143 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1144 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1145 Values, InsertBefore), 1146 SourceElementType(PointeeType), 1147 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1148 assert(ResultElementType == 1149 cast<PointerType>(getType()->getScalarType())->getElementType()); 1150 init(Ptr, IdxList, NameStr); 1151 } 1152 1153 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1154 ArrayRef<Value *> IdxList, unsigned Values, 1155 const Twine &NameStr, 1156 BasicBlock *InsertAtEnd) 1157 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1158 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1159 Values, InsertAtEnd), 1160 SourceElementType(PointeeType), 1161 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1162 assert(ResultElementType == 1163 cast<PointerType>(getType()->getScalarType())->getElementType()); 1164 init(Ptr, IdxList, NameStr); 1165 } 1166 1167 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) 1168 1169 //===----------------------------------------------------------------------===// 1170 // ICmpInst Class 1171 //===----------------------------------------------------------------------===// 1172 1173 /// This instruction compares its operands according to the predicate given 1174 /// to the constructor. It only operates on integers or pointers. The operands 1175 /// must be identical types. 1176 /// Represent an integer comparison operator. 1177 class ICmpInst: public CmpInst { 1178 void AssertOK() { 1179 assert(isIntPredicate() && 1180 "Invalid ICmp predicate value"); 1181 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1182 "Both operands to ICmp instruction are not of the same type!"); 1183 // Check that the operands are the right type 1184 assert((getOperand(0)->getType()->isIntOrIntVectorTy() || 1185 getOperand(0)->getType()->isPtrOrPtrVectorTy()) && 1186 "Invalid operand types for ICmp instruction"); 1187 } 1188 1189 protected: 1190 // Note: Instruction needs to be a friend here to call cloneImpl. 1191 friend class Instruction; 1192 1193 /// Clone an identical ICmpInst 1194 ICmpInst *cloneImpl() const; 1195 1196 public: 1197 /// Constructor with insert-before-instruction semantics. 1198 ICmpInst( 1199 Instruction *InsertBefore, ///< Where to insert 1200 Predicate pred, ///< The predicate to use for the comparison 1201 Value *LHS, ///< The left-hand-side of the expression 1202 Value *RHS, ///< The right-hand-side of the expression 1203 const Twine &NameStr = "" ///< Name of the instruction 1204 ) : CmpInst(makeCmpResultType(LHS->getType()), 1205 Instruction::ICmp, pred, LHS, RHS, NameStr, 1206 InsertBefore) { 1207 #ifndef NDEBUG 1208 AssertOK(); 1209 #endif 1210 } 1211 1212 /// Constructor with insert-at-end semantics. 1213 ICmpInst( 1214 BasicBlock &InsertAtEnd, ///< Block to insert into. 1215 Predicate pred, ///< The predicate to use for the comparison 1216 Value *LHS, ///< The left-hand-side of the expression 1217 Value *RHS, ///< The right-hand-side of the expression 1218 const Twine &NameStr = "" ///< Name of the instruction 1219 ) : CmpInst(makeCmpResultType(LHS->getType()), 1220 Instruction::ICmp, pred, LHS, RHS, NameStr, 1221 &InsertAtEnd) { 1222 #ifndef NDEBUG 1223 AssertOK(); 1224 #endif 1225 } 1226 1227 /// Constructor with no-insertion semantics 1228 ICmpInst( 1229 Predicate pred, ///< The predicate to use for the comparison 1230 Value *LHS, ///< The left-hand-side of the expression 1231 Value *RHS, ///< The right-hand-side of the expression 1232 const Twine &NameStr = "" ///< Name of the instruction 1233 ) : CmpInst(makeCmpResultType(LHS->getType()), 1234 Instruction::ICmp, pred, LHS, RHS, NameStr) { 1235 #ifndef NDEBUG 1236 AssertOK(); 1237 #endif 1238 } 1239 1240 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. 1241 /// @returns the predicate that would be the result if the operand were 1242 /// regarded as signed. 1243 /// Return the signed version of the predicate 1244 Predicate getSignedPredicate() const { 1245 return getSignedPredicate(getPredicate()); 1246 } 1247 1248 /// This is a static version that you can use without an instruction. 1249 /// Return the signed version of the predicate. 1250 static Predicate getSignedPredicate(Predicate pred); 1251 1252 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. 1253 /// @returns the predicate that would be the result if the operand were 1254 /// regarded as unsigned. 1255 /// Return the unsigned version of the predicate 1256 Predicate getUnsignedPredicate() const { 1257 return getUnsignedPredicate(getPredicate()); 1258 } 1259 1260 /// This is a static version that you can use without an instruction. 1261 /// Return the unsigned version of the predicate. 1262 static Predicate getUnsignedPredicate(Predicate pred); 1263 1264 /// Return true if this predicate is either EQ or NE. This also 1265 /// tests for commutativity. 1266 static bool isEquality(Predicate P) { 1267 return P == ICMP_EQ || P == ICMP_NE; 1268 } 1269 1270 /// Return true if this predicate is either EQ or NE. This also 1271 /// tests for commutativity. 1272 bool isEquality() const { 1273 return isEquality(getPredicate()); 1274 } 1275 1276 /// @returns true if the predicate of this ICmpInst is commutative 1277 /// Determine if this relation is commutative. 1278 bool isCommutative() const { return isEquality(); } 1279 1280 /// Return true if the predicate is relational (not EQ or NE). 1281 /// 1282 bool isRelational() const { 1283 return !isEquality(); 1284 } 1285 1286 /// Return true if the predicate is relational (not EQ or NE). 1287 /// 1288 static bool isRelational(Predicate P) { 1289 return !isEquality(P); 1290 } 1291 1292 /// Exchange the two operands to this instruction in such a way that it does 1293 /// not modify the semantics of the instruction. The predicate value may be 1294 /// changed to retain the same result if the predicate is order dependent 1295 /// (e.g. ult). 1296 /// Swap operands and adjust predicate. 1297 void swapOperands() { 1298 setPredicate(getSwappedPredicate()); 1299 Op<0>().swap(Op<1>()); 1300 } 1301 1302 // Methods for support type inquiry through isa, cast, and dyn_cast: 1303 static bool classof(const Instruction *I) { 1304 return I->getOpcode() == Instruction::ICmp; 1305 } 1306 static bool classof(const Value *V) { 1307 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1308 } 1309 }; 1310 1311 //===----------------------------------------------------------------------===// 1312 // FCmpInst Class 1313 //===----------------------------------------------------------------------===// 1314 1315 /// This instruction compares its operands according to the predicate given 1316 /// to the constructor. It only operates on floating point values or packed 1317 /// vectors of floating point values. The operands must be identical types. 1318 /// Represents a floating point comparison operator. 1319 class FCmpInst: public CmpInst { 1320 void AssertOK() { 1321 assert(isFPPredicate() && "Invalid FCmp predicate value"); 1322 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1323 "Both operands to FCmp instruction are not of the same type!"); 1324 // Check that the operands are the right type 1325 assert(getOperand(0)->getType()->isFPOrFPVectorTy() && 1326 "Invalid operand types for FCmp instruction"); 1327 } 1328 1329 protected: 1330 // Note: Instruction needs to be a friend here to call cloneImpl. 1331 friend class Instruction; 1332 1333 /// Clone an identical FCmpInst 1334 FCmpInst *cloneImpl() const; 1335 1336 public: 1337 /// Constructor with insert-before-instruction semantics. 1338 FCmpInst( 1339 Instruction *InsertBefore, ///< Where to insert 1340 Predicate pred, ///< The predicate to use for the comparison 1341 Value *LHS, ///< The left-hand-side of the expression 1342 Value *RHS, ///< The right-hand-side of the expression 1343 const Twine &NameStr = "" ///< Name of the instruction 1344 ) : CmpInst(makeCmpResultType(LHS->getType()), 1345 Instruction::FCmp, pred, LHS, RHS, NameStr, 1346 InsertBefore) { 1347 AssertOK(); 1348 } 1349 1350 /// Constructor with insert-at-end semantics. 1351 FCmpInst( 1352 BasicBlock &InsertAtEnd, ///< Block to insert into. 1353 Predicate pred, ///< The predicate to use for the comparison 1354 Value *LHS, ///< The left-hand-side of the expression 1355 Value *RHS, ///< The right-hand-side of the expression 1356 const Twine &NameStr = "" ///< Name of the instruction 1357 ) : CmpInst(makeCmpResultType(LHS->getType()), 1358 Instruction::FCmp, pred, LHS, RHS, NameStr, 1359 &InsertAtEnd) { 1360 AssertOK(); 1361 } 1362 1363 /// Constructor with no-insertion semantics 1364 FCmpInst( 1365 Predicate Pred, ///< The predicate to use for the comparison 1366 Value *LHS, ///< The left-hand-side of the expression 1367 Value *RHS, ///< The right-hand-side of the expression 1368 const Twine &NameStr = "", ///< Name of the instruction 1369 Instruction *FlagsSource = nullptr 1370 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, 1371 RHS, NameStr, nullptr, FlagsSource) { 1372 AssertOK(); 1373 } 1374 1375 /// @returns true if the predicate of this instruction is EQ or NE. 1376 /// Determine if this is an equality predicate. 1377 static bool isEquality(Predicate Pred) { 1378 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || 1379 Pred == FCMP_UNE; 1380 } 1381 1382 /// @returns true if the predicate of this instruction is EQ or NE. 1383 /// Determine if this is an equality predicate. 1384 bool isEquality() const { return isEquality(getPredicate()); } 1385 1386 /// @returns true if the predicate of this instruction is commutative. 1387 /// Determine if this is a commutative predicate. 1388 bool isCommutative() const { 1389 return isEquality() || 1390 getPredicate() == FCMP_FALSE || 1391 getPredicate() == FCMP_TRUE || 1392 getPredicate() == FCMP_ORD || 1393 getPredicate() == FCMP_UNO; 1394 } 1395 1396 /// @returns true if the predicate is relational (not EQ or NE). 1397 /// Determine if this a relational predicate. 1398 bool isRelational() const { return !isEquality(); } 1399 1400 /// Exchange the two operands to this instruction in such a way that it does 1401 /// not modify the semantics of the instruction. The predicate value may be 1402 /// changed to retain the same result if the predicate is order dependent 1403 /// (e.g. ult). 1404 /// Swap operands and adjust predicate. 1405 void swapOperands() { 1406 setPredicate(getSwappedPredicate()); 1407 Op<0>().swap(Op<1>()); 1408 } 1409 1410 /// Methods for support type inquiry through isa, cast, and dyn_cast: 1411 static bool classof(const Instruction *I) { 1412 return I->getOpcode() == Instruction::FCmp; 1413 } 1414 static bool classof(const Value *V) { 1415 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1416 } 1417 }; 1418 1419 //===----------------------------------------------------------------------===// 1420 /// This class represents a function call, abstracting a target 1421 /// machine's calling convention. This class uses low bit of the SubClassData 1422 /// field to indicate whether or not this is a tail call. The rest of the bits 1423 /// hold the calling convention of the call. 1424 /// 1425 class CallInst : public CallBase { 1426 CallInst(const CallInst &CI); 1427 1428 /// Construct a CallInst given a range of arguments. 1429 /// Construct a CallInst from a range of arguments 1430 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1431 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1432 Instruction *InsertBefore); 1433 1434 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1435 const Twine &NameStr, Instruction *InsertBefore) 1436 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} 1437 1438 /// Construct a CallInst given a range of arguments. 1439 /// Construct a CallInst from a range of arguments 1440 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1441 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1442 BasicBlock *InsertAtEnd); 1443 1444 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, 1445 Instruction *InsertBefore); 1446 1447 CallInst(FunctionType *ty, Value *F, const Twine &NameStr, 1448 BasicBlock *InsertAtEnd); 1449 1450 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 1451 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 1452 void init(FunctionType *FTy, Value *Func, const Twine &NameStr); 1453 1454 /// Compute the number of operands to allocate. 1455 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 1456 // We need one operand for the called function, plus the input operand 1457 // counts provided. 1458 return 1 + NumArgs + NumBundleInputs; 1459 } 1460 1461 protected: 1462 // Note: Instruction needs to be a friend here to call cloneImpl. 1463 friend class Instruction; 1464 1465 CallInst *cloneImpl() const; 1466 1467 public: 1468 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", 1469 Instruction *InsertBefore = nullptr) { 1470 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); 1471 } 1472 1473 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1474 const Twine &NameStr, 1475 Instruction *InsertBefore = nullptr) { 1476 return new (ComputeNumOperands(Args.size())) 1477 CallInst(Ty, Func, Args, None, NameStr, InsertBefore); 1478 } 1479 1480 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1481 ArrayRef<OperandBundleDef> Bundles = None, 1482 const Twine &NameStr = "", 1483 Instruction *InsertBefore = nullptr) { 1484 const int NumOperands = 1485 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1486 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1487 1488 return new (NumOperands, DescriptorBytes) 1489 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); 1490 } 1491 1492 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, 1493 BasicBlock *InsertAtEnd) { 1494 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); 1495 } 1496 1497 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1498 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1499 return new (ComputeNumOperands(Args.size())) 1500 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); 1501 } 1502 1503 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1504 ArrayRef<OperandBundleDef> Bundles, 1505 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1506 const int NumOperands = 1507 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1508 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1509 1510 return new (NumOperands, DescriptorBytes) 1511 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); 1512 } 1513 1514 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", 1515 Instruction *InsertBefore = nullptr) { 1516 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1517 InsertBefore); 1518 } 1519 1520 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1521 ArrayRef<OperandBundleDef> Bundles = None, 1522 const Twine &NameStr = "", 1523 Instruction *InsertBefore = nullptr) { 1524 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1525 NameStr, InsertBefore); 1526 } 1527 1528 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1529 const Twine &NameStr, 1530 Instruction *InsertBefore = nullptr) { 1531 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1532 InsertBefore); 1533 } 1534 1535 static CallInst *Create(FunctionCallee Func, const Twine &NameStr, 1536 BasicBlock *InsertAtEnd) { 1537 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1538 InsertAtEnd); 1539 } 1540 1541 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1542 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1543 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1544 InsertAtEnd); 1545 } 1546 1547 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1548 ArrayRef<OperandBundleDef> Bundles, 1549 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1550 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1551 NameStr, InsertAtEnd); 1552 } 1553 1554 /// Create a clone of \p CI with a different set of operand bundles and 1555 /// insert it before \p InsertPt. 1556 /// 1557 /// The returned call instruction is identical \p CI in every way except that 1558 /// the operand bundles for the new instruction are set to the operand bundles 1559 /// in \p Bundles. 1560 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, 1561 Instruction *InsertPt = nullptr); 1562 1563 /// Generate the IR for a call to malloc: 1564 /// 1. Compute the malloc call's argument as the specified type's size, 1565 /// possibly multiplied by the array size if the array size is not 1566 /// constant 1. 1567 /// 2. Call malloc with that argument. 1568 /// 3. Bitcast the result of the malloc call to the specified type. 1569 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1570 Type *AllocTy, Value *AllocSize, 1571 Value *ArraySize = nullptr, 1572 Function *MallocF = nullptr, 1573 const Twine &Name = ""); 1574 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1575 Type *AllocTy, Value *AllocSize, 1576 Value *ArraySize = nullptr, 1577 Function *MallocF = nullptr, 1578 const Twine &Name = ""); 1579 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1580 Type *AllocTy, Value *AllocSize, 1581 Value *ArraySize = nullptr, 1582 ArrayRef<OperandBundleDef> Bundles = None, 1583 Function *MallocF = nullptr, 1584 const Twine &Name = ""); 1585 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1586 Type *AllocTy, Value *AllocSize, 1587 Value *ArraySize = nullptr, 1588 ArrayRef<OperandBundleDef> Bundles = None, 1589 Function *MallocF = nullptr, 1590 const Twine &Name = ""); 1591 /// Generate the IR for a call to the builtin free function. 1592 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); 1593 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); 1594 static Instruction *CreateFree(Value *Source, 1595 ArrayRef<OperandBundleDef> Bundles, 1596 Instruction *InsertBefore); 1597 static Instruction *CreateFree(Value *Source, 1598 ArrayRef<OperandBundleDef> Bundles, 1599 BasicBlock *InsertAtEnd); 1600 1601 // Note that 'musttail' implies 'tail'. 1602 enum TailCallKind : unsigned { 1603 TCK_None = 0, 1604 TCK_Tail = 1, 1605 TCK_MustTail = 2, 1606 TCK_NoTail = 3, 1607 TCK_LAST = TCK_NoTail 1608 }; 1609 1610 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; 1611 static_assert( 1612 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), 1613 "Bitfields must be contiguous"); 1614 1615 TailCallKind getTailCallKind() const { 1616 return getSubclassData<TailCallKindField>(); 1617 } 1618 1619 bool isTailCall() const { 1620 TailCallKind Kind = getTailCallKind(); 1621 return Kind == TCK_Tail || Kind == TCK_MustTail; 1622 } 1623 1624 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } 1625 1626 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } 1627 1628 void setTailCallKind(TailCallKind TCK) { 1629 setSubclassData<TailCallKindField>(TCK); 1630 } 1631 1632 void setTailCall(bool IsTc = true) { 1633 setTailCallKind(IsTc ? TCK_Tail : TCK_None); 1634 } 1635 1636 /// Return true if the call can return twice 1637 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } 1638 void setCanReturnTwice() { 1639 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice); 1640 } 1641 1642 // Methods for support type inquiry through isa, cast, and dyn_cast: 1643 static bool classof(const Instruction *I) { 1644 return I->getOpcode() == Instruction::Call; 1645 } 1646 static bool classof(const Value *V) { 1647 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1648 } 1649 1650 /// Updates profile metadata by scaling it by \p S / \p T. 1651 void updateProfWeight(uint64_t S, uint64_t T); 1652 1653 private: 1654 // Shadow Instruction::setInstructionSubclassData with a private forwarding 1655 // method so that subclasses cannot accidentally use it. 1656 template <typename Bitfield> 1657 void setSubclassData(typename Bitfield::Type Value) { 1658 Instruction::setSubclassData<Bitfield>(Value); 1659 } 1660 }; 1661 1662 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1663 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1664 BasicBlock *InsertAtEnd) 1665 : CallBase(Ty->getReturnType(), Instruction::Call, 1666 OperandTraits<CallBase>::op_end(this) - 1667 (Args.size() + CountBundleInputs(Bundles) + 1), 1668 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1669 InsertAtEnd) { 1670 init(Ty, Func, Args, Bundles, NameStr); 1671 } 1672 1673 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1674 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1675 Instruction *InsertBefore) 1676 : CallBase(Ty->getReturnType(), Instruction::Call, 1677 OperandTraits<CallBase>::op_end(this) - 1678 (Args.size() + CountBundleInputs(Bundles) + 1), 1679 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1680 InsertBefore) { 1681 init(Ty, Func, Args, Bundles, NameStr); 1682 } 1683 1684 //===----------------------------------------------------------------------===// 1685 // SelectInst Class 1686 //===----------------------------------------------------------------------===// 1687 1688 /// This class represents the LLVM 'select' instruction. 1689 /// 1690 class SelectInst : public Instruction { 1691 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1692 Instruction *InsertBefore) 1693 : Instruction(S1->getType(), Instruction::Select, 1694 &Op<0>(), 3, InsertBefore) { 1695 init(C, S1, S2); 1696 setName(NameStr); 1697 } 1698 1699 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1700 BasicBlock *InsertAtEnd) 1701 : Instruction(S1->getType(), Instruction::Select, 1702 &Op<0>(), 3, InsertAtEnd) { 1703 init(C, S1, S2); 1704 setName(NameStr); 1705 } 1706 1707 void init(Value *C, Value *S1, Value *S2) { 1708 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); 1709 Op<0>() = C; 1710 Op<1>() = S1; 1711 Op<2>() = S2; 1712 } 1713 1714 protected: 1715 // Note: Instruction needs to be a friend here to call cloneImpl. 1716 friend class Instruction; 1717 1718 SelectInst *cloneImpl() const; 1719 1720 public: 1721 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1722 const Twine &NameStr = "", 1723 Instruction *InsertBefore = nullptr, 1724 Instruction *MDFrom = nullptr) { 1725 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); 1726 if (MDFrom) 1727 Sel->copyMetadata(*MDFrom); 1728 return Sel; 1729 } 1730 1731 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1732 const Twine &NameStr, 1733 BasicBlock *InsertAtEnd) { 1734 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); 1735 } 1736 1737 const Value *getCondition() const { return Op<0>(); } 1738 const Value *getTrueValue() const { return Op<1>(); } 1739 const Value *getFalseValue() const { return Op<2>(); } 1740 Value *getCondition() { return Op<0>(); } 1741 Value *getTrueValue() { return Op<1>(); } 1742 Value *getFalseValue() { return Op<2>(); } 1743 1744 void setCondition(Value *V) { Op<0>() = V; } 1745 void setTrueValue(Value *V) { Op<1>() = V; } 1746 void setFalseValue(Value *V) { Op<2>() = V; } 1747 1748 /// Swap the true and false values of the select instruction. 1749 /// This doesn't swap prof metadata. 1750 void swapValues() { Op<1>().swap(Op<2>()); } 1751 1752 /// Return a string if the specified operands are invalid 1753 /// for a select operation, otherwise return null. 1754 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); 1755 1756 /// Transparently provide more efficient getOperand methods. 1757 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1758 1759 OtherOps getOpcode() const { 1760 return static_cast<OtherOps>(Instruction::getOpcode()); 1761 } 1762 1763 // Methods for support type inquiry through isa, cast, and dyn_cast: 1764 static bool classof(const Instruction *I) { 1765 return I->getOpcode() == Instruction::Select; 1766 } 1767 static bool classof(const Value *V) { 1768 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1769 } 1770 }; 1771 1772 template <> 1773 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { 1774 }; 1775 1776 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) 1777 1778 //===----------------------------------------------------------------------===// 1779 // VAArgInst Class 1780 //===----------------------------------------------------------------------===// 1781 1782 /// This class represents the va_arg llvm instruction, which returns 1783 /// an argument of the specified type given a va_list and increments that list 1784 /// 1785 class VAArgInst : public UnaryInstruction { 1786 protected: 1787 // Note: Instruction needs to be a friend here to call cloneImpl. 1788 friend class Instruction; 1789 1790 VAArgInst *cloneImpl() const; 1791 1792 public: 1793 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", 1794 Instruction *InsertBefore = nullptr) 1795 : UnaryInstruction(Ty, VAArg, List, InsertBefore) { 1796 setName(NameStr); 1797 } 1798 1799 VAArgInst(Value *List, Type *Ty, const Twine &NameStr, 1800 BasicBlock *InsertAtEnd) 1801 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { 1802 setName(NameStr); 1803 } 1804 1805 Value *getPointerOperand() { return getOperand(0); } 1806 const Value *getPointerOperand() const { return getOperand(0); } 1807 static unsigned getPointerOperandIndex() { return 0U; } 1808 1809 // Methods for support type inquiry through isa, cast, and dyn_cast: 1810 static bool classof(const Instruction *I) { 1811 return I->getOpcode() == VAArg; 1812 } 1813 static bool classof(const Value *V) { 1814 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1815 } 1816 }; 1817 1818 //===----------------------------------------------------------------------===// 1819 // ExtractElementInst Class 1820 //===----------------------------------------------------------------------===// 1821 1822 /// This instruction extracts a single (scalar) 1823 /// element from a VectorType value 1824 /// 1825 class ExtractElementInst : public Instruction { 1826 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", 1827 Instruction *InsertBefore = nullptr); 1828 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, 1829 BasicBlock *InsertAtEnd); 1830 1831 protected: 1832 // Note: Instruction needs to be a friend here to call cloneImpl. 1833 friend class Instruction; 1834 1835 ExtractElementInst *cloneImpl() const; 1836 1837 public: 1838 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1839 const Twine &NameStr = "", 1840 Instruction *InsertBefore = nullptr) { 1841 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); 1842 } 1843 1844 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1845 const Twine &NameStr, 1846 BasicBlock *InsertAtEnd) { 1847 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); 1848 } 1849 1850 /// Return true if an extractelement instruction can be 1851 /// formed with the specified operands. 1852 static bool isValidOperands(const Value *Vec, const Value *Idx); 1853 1854 Value *getVectorOperand() { return Op<0>(); } 1855 Value *getIndexOperand() { return Op<1>(); } 1856 const Value *getVectorOperand() const { return Op<0>(); } 1857 const Value *getIndexOperand() const { return Op<1>(); } 1858 1859 VectorType *getVectorOperandType() const { 1860 return cast<VectorType>(getVectorOperand()->getType()); 1861 } 1862 1863 /// Transparently provide more efficient getOperand methods. 1864 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1865 1866 // Methods for support type inquiry through isa, cast, and dyn_cast: 1867 static bool classof(const Instruction *I) { 1868 return I->getOpcode() == Instruction::ExtractElement; 1869 } 1870 static bool classof(const Value *V) { 1871 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1872 } 1873 }; 1874 1875 template <> 1876 struct OperandTraits<ExtractElementInst> : 1877 public FixedNumOperandTraits<ExtractElementInst, 2> { 1878 }; 1879 1880 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) 1881 1882 //===----------------------------------------------------------------------===// 1883 // InsertElementInst Class 1884 //===----------------------------------------------------------------------===// 1885 1886 /// This instruction inserts a single (scalar) 1887 /// element into a VectorType value 1888 /// 1889 class InsertElementInst : public Instruction { 1890 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, 1891 const Twine &NameStr = "", 1892 Instruction *InsertBefore = nullptr); 1893 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, 1894 BasicBlock *InsertAtEnd); 1895 1896 protected: 1897 // Note: Instruction needs to be a friend here to call cloneImpl. 1898 friend class Instruction; 1899 1900 InsertElementInst *cloneImpl() const; 1901 1902 public: 1903 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1904 const Twine &NameStr = "", 1905 Instruction *InsertBefore = nullptr) { 1906 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); 1907 } 1908 1909 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1910 const Twine &NameStr, 1911 BasicBlock *InsertAtEnd) { 1912 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); 1913 } 1914 1915 /// Return true if an insertelement instruction can be 1916 /// formed with the specified operands. 1917 static bool isValidOperands(const Value *Vec, const Value *NewElt, 1918 const Value *Idx); 1919 1920 /// Overload to return most specific vector type. 1921 /// 1922 VectorType *getType() const { 1923 return cast<VectorType>(Instruction::getType()); 1924 } 1925 1926 /// Transparently provide more efficient getOperand methods. 1927 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1928 1929 // Methods for support type inquiry through isa, cast, and dyn_cast: 1930 static bool classof(const Instruction *I) { 1931 return I->getOpcode() == Instruction::InsertElement; 1932 } 1933 static bool classof(const Value *V) { 1934 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1935 } 1936 }; 1937 1938 template <> 1939 struct OperandTraits<InsertElementInst> : 1940 public FixedNumOperandTraits<InsertElementInst, 3> { 1941 }; 1942 1943 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) 1944 1945 //===----------------------------------------------------------------------===// 1946 // ShuffleVectorInst Class 1947 //===----------------------------------------------------------------------===// 1948 1949 constexpr int UndefMaskElem = -1; 1950 1951 /// This instruction constructs a fixed permutation of two 1952 /// input vectors. 1953 /// 1954 /// For each element of the result vector, the shuffle mask selects an element 1955 /// from one of the input vectors to copy to the result. Non-negative elements 1956 /// in the mask represent an index into the concatenated pair of input vectors. 1957 /// UndefMaskElem (-1) specifies that the result element is undefined. 1958 /// 1959 /// For scalable vectors, all the elements of the mask must be 0 or -1. This 1960 /// requirement may be relaxed in the future. 1961 class ShuffleVectorInst : public Instruction { 1962 SmallVector<int, 4> ShuffleMask; 1963 Constant *ShuffleMaskForBitcode; 1964 1965 protected: 1966 // Note: Instruction needs to be a friend here to call cloneImpl. 1967 friend class Instruction; 1968 1969 ShuffleVectorInst *cloneImpl() const; 1970 1971 public: 1972 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1973 const Twine &NameStr = "", 1974 Instruction *InsertBefor = nullptr); 1975 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1976 const Twine &NameStr, BasicBlock *InsertAtEnd); 1977 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 1978 const Twine &NameStr = "", 1979 Instruction *InsertBefor = nullptr); 1980 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 1981 const Twine &NameStr, BasicBlock *InsertAtEnd); 1982 1983 void *operator new(size_t s) { return User::operator new(s, 2); } 1984 1985 /// Swap the operands and adjust the mask to preserve the semantics 1986 /// of the instruction. 1987 void commute(); 1988 1989 /// Return true if a shufflevector instruction can be 1990 /// formed with the specified operands. 1991 static bool isValidOperands(const Value *V1, const Value *V2, 1992 const Value *Mask); 1993 static bool isValidOperands(const Value *V1, const Value *V2, 1994 ArrayRef<int> Mask); 1995 1996 /// Overload to return most specific vector type. 1997 /// 1998 VectorType *getType() const { 1999 return cast<VectorType>(Instruction::getType()); 2000 } 2001 2002 /// Transparently provide more efficient getOperand methods. 2003 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2004 2005 /// Return the shuffle mask value of this instruction for the given element 2006 /// index. Return UndefMaskElem if the element is undef. 2007 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } 2008 2009 /// Convert the input shuffle mask operand to a vector of integers. Undefined 2010 /// elements of the mask are returned as UndefMaskElem. 2011 static void getShuffleMask(const Constant *Mask, 2012 SmallVectorImpl<int> &Result); 2013 2014 /// Return the mask for this instruction as a vector of integers. Undefined 2015 /// elements of the mask are returned as UndefMaskElem. 2016 void getShuffleMask(SmallVectorImpl<int> &Result) const { 2017 Result.assign(ShuffleMask.begin(), ShuffleMask.end()); 2018 } 2019 2020 /// Return the mask for this instruction, for use in bitcode. 2021 /// 2022 /// TODO: This is temporary until we decide a new bitcode encoding for 2023 /// shufflevector. 2024 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } 2025 2026 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, 2027 Type *ResultTy); 2028 2029 void setShuffleMask(ArrayRef<int> Mask); 2030 2031 ArrayRef<int> getShuffleMask() const { return ShuffleMask; } 2032 2033 /// Return true if this shuffle returns a vector with a different number of 2034 /// elements than its source vectors. 2035 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> 2036 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> 2037 bool changesLength() const { 2038 unsigned NumSourceElts = 2039 cast<VectorType>(Op<0>()->getType())->getElementCount().Min; 2040 unsigned NumMaskElts = ShuffleMask.size(); 2041 return NumSourceElts != NumMaskElts; 2042 } 2043 2044 /// Return true if this shuffle returns a vector with a greater number of 2045 /// elements than its source vectors. 2046 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> 2047 bool increasesLength() const { 2048 unsigned NumSourceElts = 2049 cast<VectorType>(Op<0>()->getType())->getNumElements(); 2050 unsigned NumMaskElts = ShuffleMask.size(); 2051 return NumSourceElts < NumMaskElts; 2052 } 2053 2054 /// Return true if this shuffle mask chooses elements from exactly one source 2055 /// vector. 2056 /// Example: <7,5,undef,7> 2057 /// This assumes that vector operands are the same length as the mask. 2058 static bool isSingleSourceMask(ArrayRef<int> Mask); 2059 static bool isSingleSourceMask(const Constant *Mask) { 2060 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2061 SmallVector<int, 16> MaskAsInts; 2062 getShuffleMask(Mask, MaskAsInts); 2063 return isSingleSourceMask(MaskAsInts); 2064 } 2065 2066 /// Return true if this shuffle chooses elements from exactly one source 2067 /// vector without changing the length of that vector. 2068 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> 2069 /// TODO: Optionally allow length-changing shuffles. 2070 bool isSingleSource() const { 2071 return !changesLength() && isSingleSourceMask(ShuffleMask); 2072 } 2073 2074 /// Return true if this shuffle mask chooses elements from exactly one source 2075 /// vector without lane crossings. A shuffle using this mask is not 2076 /// necessarily a no-op because it may change the number of elements from its 2077 /// input vectors or it may provide demanded bits knowledge via undef lanes. 2078 /// Example: <undef,undef,2,3> 2079 static bool isIdentityMask(ArrayRef<int> Mask); 2080 static bool isIdentityMask(const Constant *Mask) { 2081 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2082 SmallVector<int, 16> MaskAsInts; 2083 getShuffleMask(Mask, MaskAsInts); 2084 return isIdentityMask(MaskAsInts); 2085 } 2086 2087 /// Return true if this shuffle chooses elements from exactly one source 2088 /// vector without lane crossings and does not change the number of elements 2089 /// from its input vectors. 2090 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> 2091 bool isIdentity() const { 2092 return !changesLength() && isIdentityMask(ShuffleMask); 2093 } 2094 2095 /// Return true if this shuffle lengthens exactly one source vector with 2096 /// undefs in the high elements. 2097 bool isIdentityWithPadding() const; 2098 2099 /// Return true if this shuffle extracts the first N elements of exactly one 2100 /// source vector. 2101 bool isIdentityWithExtract() const; 2102 2103 /// Return true if this shuffle concatenates its 2 source vectors. This 2104 /// returns false if either input is undefined. In that case, the shuffle is 2105 /// is better classified as an identity with padding operation. 2106 bool isConcat() const; 2107 2108 /// Return true if this shuffle mask chooses elements from its source vectors 2109 /// without lane crossings. A shuffle using this mask would be 2110 /// equivalent to a vector select with a constant condition operand. 2111 /// Example: <4,1,6,undef> 2112 /// This returns false if the mask does not choose from both input vectors. 2113 /// In that case, the shuffle is better classified as an identity shuffle. 2114 /// This assumes that vector operands are the same length as the mask 2115 /// (a length-changing shuffle can never be equivalent to a vector select). 2116 static bool isSelectMask(ArrayRef<int> Mask); 2117 static bool isSelectMask(const Constant *Mask) { 2118 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2119 SmallVector<int, 16> MaskAsInts; 2120 getShuffleMask(Mask, MaskAsInts); 2121 return isSelectMask(MaskAsInts); 2122 } 2123 2124 /// Return true if this shuffle chooses elements from its source vectors 2125 /// without lane crossings and all operands have the same number of elements. 2126 /// In other words, this shuffle is equivalent to a vector select with a 2127 /// constant condition operand. 2128 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> 2129 /// This returns false if the mask does not choose from both input vectors. 2130 /// In that case, the shuffle is better classified as an identity shuffle. 2131 /// TODO: Optionally allow length-changing shuffles. 2132 bool isSelect() const { 2133 return !changesLength() && isSelectMask(ShuffleMask); 2134 } 2135 2136 /// Return true if this shuffle mask swaps the order of elements from exactly 2137 /// one source vector. 2138 /// Example: <7,6,undef,4> 2139 /// This assumes that vector operands are the same length as the mask. 2140 static bool isReverseMask(ArrayRef<int> Mask); 2141 static bool isReverseMask(const Constant *Mask) { 2142 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2143 SmallVector<int, 16> MaskAsInts; 2144 getShuffleMask(Mask, MaskAsInts); 2145 return isReverseMask(MaskAsInts); 2146 } 2147 2148 /// Return true if this shuffle swaps the order of elements from exactly 2149 /// one source vector. 2150 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> 2151 /// TODO: Optionally allow length-changing shuffles. 2152 bool isReverse() const { 2153 return !changesLength() && isReverseMask(ShuffleMask); 2154 } 2155 2156 /// Return true if this shuffle mask chooses all elements with the same value 2157 /// as the first element of exactly one source vector. 2158 /// Example: <4,undef,undef,4> 2159 /// This assumes that vector operands are the same length as the mask. 2160 static bool isZeroEltSplatMask(ArrayRef<int> Mask); 2161 static bool isZeroEltSplatMask(const Constant *Mask) { 2162 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2163 SmallVector<int, 16> MaskAsInts; 2164 getShuffleMask(Mask, MaskAsInts); 2165 return isZeroEltSplatMask(MaskAsInts); 2166 } 2167 2168 /// Return true if all elements of this shuffle are the same value as the 2169 /// first element of exactly one source vector without changing the length 2170 /// of that vector. 2171 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> 2172 /// TODO: Optionally allow length-changing shuffles. 2173 /// TODO: Optionally allow splats from other elements. 2174 bool isZeroEltSplat() const { 2175 return !changesLength() && isZeroEltSplatMask(ShuffleMask); 2176 } 2177 2178 /// Return true if this shuffle mask is a transpose mask. 2179 /// Transpose vector masks transpose a 2xn matrix. They read corresponding 2180 /// even- or odd-numbered vector elements from two n-dimensional source 2181 /// vectors and write each result into consecutive elements of an 2182 /// n-dimensional destination vector. Two shuffles are necessary to complete 2183 /// the transpose, one for the even elements and another for the odd elements. 2184 /// This description closely follows how the TRN1 and TRN2 AArch64 2185 /// instructions operate. 2186 /// 2187 /// For example, a simple 2x2 matrix can be transposed with: 2188 /// 2189 /// ; Original matrix 2190 /// m0 = < a, b > 2191 /// m1 = < c, d > 2192 /// 2193 /// ; Transposed matrix 2194 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > 2195 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > 2196 /// 2197 /// For matrices having greater than n columns, the resulting nx2 transposed 2198 /// matrix is stored in two result vectors such that one vector contains 2199 /// interleaved elements from all the even-numbered rows and the other vector 2200 /// contains interleaved elements from all the odd-numbered rows. For example, 2201 /// a 2x4 matrix can be transposed with: 2202 /// 2203 /// ; Original matrix 2204 /// m0 = < a, b, c, d > 2205 /// m1 = < e, f, g, h > 2206 /// 2207 /// ; Transposed matrix 2208 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > 2209 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > 2210 static bool isTransposeMask(ArrayRef<int> Mask); 2211 static bool isTransposeMask(const Constant *Mask) { 2212 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2213 SmallVector<int, 16> MaskAsInts; 2214 getShuffleMask(Mask, MaskAsInts); 2215 return isTransposeMask(MaskAsInts); 2216 } 2217 2218 /// Return true if this shuffle transposes the elements of its inputs without 2219 /// changing the length of the vectors. This operation may also be known as a 2220 /// merge or interleave. See the description for isTransposeMask() for the 2221 /// exact specification. 2222 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> 2223 bool isTranspose() const { 2224 return !changesLength() && isTransposeMask(ShuffleMask); 2225 } 2226 2227 /// Return true if this shuffle mask is an extract subvector mask. 2228 /// A valid extract subvector mask returns a smaller vector from a single 2229 /// source operand. The base extraction index is returned as well. 2230 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2231 int &Index); 2232 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, 2233 int &Index) { 2234 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2235 SmallVector<int, 16> MaskAsInts; 2236 getShuffleMask(Mask, MaskAsInts); 2237 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); 2238 } 2239 2240 /// Return true if this shuffle mask is an extract subvector mask. 2241 bool isExtractSubvectorMask(int &Index) const { 2242 int NumSrcElts = cast<VectorType>(Op<0>()->getType())->getNumElements(); 2243 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); 2244 } 2245 2246 /// Change values in a shuffle permute mask assuming the two vector operands 2247 /// of length InVecNumElts have swapped position. 2248 static void commuteShuffleMask(MutableArrayRef<int> Mask, 2249 unsigned InVecNumElts) { 2250 for (int &Idx : Mask) { 2251 if (Idx == -1) 2252 continue; 2253 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; 2254 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 && 2255 "shufflevector mask index out of range"); 2256 } 2257 } 2258 2259 // Methods for support type inquiry through isa, cast, and dyn_cast: 2260 static bool classof(const Instruction *I) { 2261 return I->getOpcode() == Instruction::ShuffleVector; 2262 } 2263 static bool classof(const Value *V) { 2264 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2265 } 2266 }; 2267 2268 template <> 2269 struct OperandTraits<ShuffleVectorInst> 2270 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; 2271 2272 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) 2273 2274 //===----------------------------------------------------------------------===// 2275 // ExtractValueInst Class 2276 //===----------------------------------------------------------------------===// 2277 2278 /// This instruction extracts a struct member or array 2279 /// element value from an aggregate value. 2280 /// 2281 class ExtractValueInst : public UnaryInstruction { 2282 SmallVector<unsigned, 4> Indices; 2283 2284 ExtractValueInst(const ExtractValueInst &EVI); 2285 2286 /// Constructors - Create a extractvalue instruction with a base aggregate 2287 /// value and a list of indices. The first ctor can optionally insert before 2288 /// an existing instruction, the second appends the new instruction to the 2289 /// specified BasicBlock. 2290 inline ExtractValueInst(Value *Agg, 2291 ArrayRef<unsigned> Idxs, 2292 const Twine &NameStr, 2293 Instruction *InsertBefore); 2294 inline ExtractValueInst(Value *Agg, 2295 ArrayRef<unsigned> Idxs, 2296 const Twine &NameStr, BasicBlock *InsertAtEnd); 2297 2298 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); 2299 2300 protected: 2301 // Note: Instruction needs to be a friend here to call cloneImpl. 2302 friend class Instruction; 2303 2304 ExtractValueInst *cloneImpl() const; 2305 2306 public: 2307 static ExtractValueInst *Create(Value *Agg, 2308 ArrayRef<unsigned> Idxs, 2309 const Twine &NameStr = "", 2310 Instruction *InsertBefore = nullptr) { 2311 return new 2312 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); 2313 } 2314 2315 static ExtractValueInst *Create(Value *Agg, 2316 ArrayRef<unsigned> Idxs, 2317 const Twine &NameStr, 2318 BasicBlock *InsertAtEnd) { 2319 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); 2320 } 2321 2322 /// Returns the type of the element that would be extracted 2323 /// with an extractvalue instruction with the specified parameters. 2324 /// 2325 /// Null is returned if the indices are invalid for the specified type. 2326 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); 2327 2328 using idx_iterator = const unsigned*; 2329 2330 inline idx_iterator idx_begin() const { return Indices.begin(); } 2331 inline idx_iterator idx_end() const { return Indices.end(); } 2332 inline iterator_range<idx_iterator> indices() const { 2333 return make_range(idx_begin(), idx_end()); 2334 } 2335 2336 Value *getAggregateOperand() { 2337 return getOperand(0); 2338 } 2339 const Value *getAggregateOperand() const { 2340 return getOperand(0); 2341 } 2342 static unsigned getAggregateOperandIndex() { 2343 return 0U; // get index for modifying correct operand 2344 } 2345 2346 ArrayRef<unsigned> getIndices() const { 2347 return Indices; 2348 } 2349 2350 unsigned getNumIndices() const { 2351 return (unsigned)Indices.size(); 2352 } 2353 2354 bool hasIndices() const { 2355 return true; 2356 } 2357 2358 // Methods for support type inquiry through isa, cast, and dyn_cast: 2359 static bool classof(const Instruction *I) { 2360 return I->getOpcode() == Instruction::ExtractValue; 2361 } 2362 static bool classof(const Value *V) { 2363 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2364 } 2365 }; 2366 2367 ExtractValueInst::ExtractValueInst(Value *Agg, 2368 ArrayRef<unsigned> Idxs, 2369 const Twine &NameStr, 2370 Instruction *InsertBefore) 2371 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2372 ExtractValue, Agg, InsertBefore) { 2373 init(Idxs, NameStr); 2374 } 2375 2376 ExtractValueInst::ExtractValueInst(Value *Agg, 2377 ArrayRef<unsigned> Idxs, 2378 const Twine &NameStr, 2379 BasicBlock *InsertAtEnd) 2380 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2381 ExtractValue, Agg, InsertAtEnd) { 2382 init(Idxs, NameStr); 2383 } 2384 2385 //===----------------------------------------------------------------------===// 2386 // InsertValueInst Class 2387 //===----------------------------------------------------------------------===// 2388 2389 /// This instruction inserts a struct field of array element 2390 /// value into an aggregate value. 2391 /// 2392 class InsertValueInst : public Instruction { 2393 SmallVector<unsigned, 4> Indices; 2394 2395 InsertValueInst(const InsertValueInst &IVI); 2396 2397 /// Constructors - Create a insertvalue instruction with a base aggregate 2398 /// value, a value to insert, and a list of indices. The first ctor can 2399 /// optionally insert before an existing instruction, the second appends 2400 /// the new instruction to the specified BasicBlock. 2401 inline InsertValueInst(Value *Agg, Value *Val, 2402 ArrayRef<unsigned> Idxs, 2403 const Twine &NameStr, 2404 Instruction *InsertBefore); 2405 inline InsertValueInst(Value *Agg, Value *Val, 2406 ArrayRef<unsigned> Idxs, 2407 const Twine &NameStr, BasicBlock *InsertAtEnd); 2408 2409 /// Constructors - These two constructors are convenience methods because one 2410 /// and two index insertvalue instructions are so common. 2411 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, 2412 const Twine &NameStr = "", 2413 Instruction *InsertBefore = nullptr); 2414 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, 2415 BasicBlock *InsertAtEnd); 2416 2417 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2418 const Twine &NameStr); 2419 2420 protected: 2421 // Note: Instruction needs to be a friend here to call cloneImpl. 2422 friend class Instruction; 2423 2424 InsertValueInst *cloneImpl() const; 2425 2426 public: 2427 // allocate space for exactly two operands 2428 void *operator new(size_t s) { 2429 return User::operator new(s, 2); 2430 } 2431 2432 static InsertValueInst *Create(Value *Agg, Value *Val, 2433 ArrayRef<unsigned> Idxs, 2434 const Twine &NameStr = "", 2435 Instruction *InsertBefore = nullptr) { 2436 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); 2437 } 2438 2439 static InsertValueInst *Create(Value *Agg, Value *Val, 2440 ArrayRef<unsigned> Idxs, 2441 const Twine &NameStr, 2442 BasicBlock *InsertAtEnd) { 2443 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); 2444 } 2445 2446 /// Transparently provide more efficient getOperand methods. 2447 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2448 2449 using idx_iterator = const unsigned*; 2450 2451 inline idx_iterator idx_begin() const { return Indices.begin(); } 2452 inline idx_iterator idx_end() const { return Indices.end(); } 2453 inline iterator_range<idx_iterator> indices() const { 2454 return make_range(idx_begin(), idx_end()); 2455 } 2456 2457 Value *getAggregateOperand() { 2458 return getOperand(0); 2459 } 2460 const Value *getAggregateOperand() const { 2461 return getOperand(0); 2462 } 2463 static unsigned getAggregateOperandIndex() { 2464 return 0U; // get index for modifying correct operand 2465 } 2466 2467 Value *getInsertedValueOperand() { 2468 return getOperand(1); 2469 } 2470 const Value *getInsertedValueOperand() const { 2471 return getOperand(1); 2472 } 2473 static unsigned getInsertedValueOperandIndex() { 2474 return 1U; // get index for modifying correct operand 2475 } 2476 2477 ArrayRef<unsigned> getIndices() const { 2478 return Indices; 2479 } 2480 2481 unsigned getNumIndices() const { 2482 return (unsigned)Indices.size(); 2483 } 2484 2485 bool hasIndices() const { 2486 return true; 2487 } 2488 2489 // Methods for support type inquiry through isa, cast, and dyn_cast: 2490 static bool classof(const Instruction *I) { 2491 return I->getOpcode() == Instruction::InsertValue; 2492 } 2493 static bool classof(const Value *V) { 2494 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2495 } 2496 }; 2497 2498 template <> 2499 struct OperandTraits<InsertValueInst> : 2500 public FixedNumOperandTraits<InsertValueInst, 2> { 2501 }; 2502 2503 InsertValueInst::InsertValueInst(Value *Agg, 2504 Value *Val, 2505 ArrayRef<unsigned> Idxs, 2506 const Twine &NameStr, 2507 Instruction *InsertBefore) 2508 : Instruction(Agg->getType(), InsertValue, 2509 OperandTraits<InsertValueInst>::op_begin(this), 2510 2, InsertBefore) { 2511 init(Agg, Val, Idxs, NameStr); 2512 } 2513 2514 InsertValueInst::InsertValueInst(Value *Agg, 2515 Value *Val, 2516 ArrayRef<unsigned> Idxs, 2517 const Twine &NameStr, 2518 BasicBlock *InsertAtEnd) 2519 : Instruction(Agg->getType(), InsertValue, 2520 OperandTraits<InsertValueInst>::op_begin(this), 2521 2, InsertAtEnd) { 2522 init(Agg, Val, Idxs, NameStr); 2523 } 2524 2525 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) 2526 2527 //===----------------------------------------------------------------------===// 2528 // PHINode Class 2529 //===----------------------------------------------------------------------===// 2530 2531 // PHINode - The PHINode class is used to represent the magical mystical PHI 2532 // node, that can not exist in nature, but can be synthesized in a computer 2533 // scientist's overactive imagination. 2534 // 2535 class PHINode : public Instruction { 2536 /// The number of operands actually allocated. NumOperands is 2537 /// the number actually in use. 2538 unsigned ReservedSpace; 2539 2540 PHINode(const PHINode &PN); 2541 2542 explicit PHINode(Type *Ty, unsigned NumReservedValues, 2543 const Twine &NameStr = "", 2544 Instruction *InsertBefore = nullptr) 2545 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), 2546 ReservedSpace(NumReservedValues) { 2547 setName(NameStr); 2548 allocHungoffUses(ReservedSpace); 2549 } 2550 2551 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, 2552 BasicBlock *InsertAtEnd) 2553 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), 2554 ReservedSpace(NumReservedValues) { 2555 setName(NameStr); 2556 allocHungoffUses(ReservedSpace); 2557 } 2558 2559 protected: 2560 // Note: Instruction needs to be a friend here to call cloneImpl. 2561 friend class Instruction; 2562 2563 PHINode *cloneImpl() const; 2564 2565 // allocHungoffUses - this is more complicated than the generic 2566 // User::allocHungoffUses, because we have to allocate Uses for the incoming 2567 // values and pointers to the incoming blocks, all in one allocation. 2568 void allocHungoffUses(unsigned N) { 2569 User::allocHungoffUses(N, /* IsPhi */ true); 2570 } 2571 2572 public: 2573 /// Constructors - NumReservedValues is a hint for the number of incoming 2574 /// edges that this phi node will have (use 0 if you really have no idea). 2575 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2576 const Twine &NameStr = "", 2577 Instruction *InsertBefore = nullptr) { 2578 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); 2579 } 2580 2581 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2582 const Twine &NameStr, BasicBlock *InsertAtEnd) { 2583 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); 2584 } 2585 2586 /// Provide fast operand accessors 2587 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2588 2589 // Block iterator interface. This provides access to the list of incoming 2590 // basic blocks, which parallels the list of incoming values. 2591 2592 using block_iterator = BasicBlock **; 2593 using const_block_iterator = BasicBlock * const *; 2594 2595 block_iterator block_begin() { 2596 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); 2597 } 2598 2599 const_block_iterator block_begin() const { 2600 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 2601 } 2602 2603 block_iterator block_end() { 2604 return block_begin() + getNumOperands(); 2605 } 2606 2607 const_block_iterator block_end() const { 2608 return block_begin() + getNumOperands(); 2609 } 2610 2611 iterator_range<block_iterator> blocks() { 2612 return make_range(block_begin(), block_end()); 2613 } 2614 2615 iterator_range<const_block_iterator> blocks() const { 2616 return make_range(block_begin(), block_end()); 2617 } 2618 2619 op_range incoming_values() { return operands(); } 2620 2621 const_op_range incoming_values() const { return operands(); } 2622 2623 /// Return the number of incoming edges 2624 /// 2625 unsigned getNumIncomingValues() const { return getNumOperands(); } 2626 2627 /// Return incoming value number x 2628 /// 2629 Value *getIncomingValue(unsigned i) const { 2630 return getOperand(i); 2631 } 2632 void setIncomingValue(unsigned i, Value *V) { 2633 assert(V && "PHI node got a null value!"); 2634 assert(getType() == V->getType() && 2635 "All operands to PHI node must be the same type as the PHI node!"); 2636 setOperand(i, V); 2637 } 2638 2639 static unsigned getOperandNumForIncomingValue(unsigned i) { 2640 return i; 2641 } 2642 2643 static unsigned getIncomingValueNumForOperand(unsigned i) { 2644 return i; 2645 } 2646 2647 /// Return incoming basic block number @p i. 2648 /// 2649 BasicBlock *getIncomingBlock(unsigned i) const { 2650 return block_begin()[i]; 2651 } 2652 2653 /// Return incoming basic block corresponding 2654 /// to an operand of the PHI. 2655 /// 2656 BasicBlock *getIncomingBlock(const Use &U) const { 2657 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 2658 return getIncomingBlock(unsigned(&U - op_begin())); 2659 } 2660 2661 /// Return incoming basic block corresponding 2662 /// to value use iterator. 2663 /// 2664 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { 2665 return getIncomingBlock(I.getUse()); 2666 } 2667 2668 void setIncomingBlock(unsigned i, BasicBlock *BB) { 2669 assert(BB && "PHI node got a null basic block!"); 2670 block_begin()[i] = BB; 2671 } 2672 2673 /// Replace every incoming basic block \p Old to basic block \p New. 2674 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { 2675 assert(New && Old && "PHI node got a null basic block!"); 2676 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2677 if (getIncomingBlock(Op) == Old) 2678 setIncomingBlock(Op, New); 2679 } 2680 2681 /// Add an incoming value to the end of the PHI list 2682 /// 2683 void addIncoming(Value *V, BasicBlock *BB) { 2684 if (getNumOperands() == ReservedSpace) 2685 growOperands(); // Get more space! 2686 // Initialize some new operands. 2687 setNumHungOffUseOperands(getNumOperands() + 1); 2688 setIncomingValue(getNumOperands() - 1, V); 2689 setIncomingBlock(getNumOperands() - 1, BB); 2690 } 2691 2692 /// Remove an incoming value. This is useful if a 2693 /// predecessor basic block is deleted. The value removed is returned. 2694 /// 2695 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty 2696 /// is true), the PHI node is destroyed and any uses of it are replaced with 2697 /// dummy values. The only time there should be zero incoming values to a PHI 2698 /// node is when the block is dead, so this strategy is sound. 2699 /// 2700 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); 2701 2702 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { 2703 int Idx = getBasicBlockIndex(BB); 2704 assert(Idx >= 0 && "Invalid basic block argument to remove!"); 2705 return removeIncomingValue(Idx, DeletePHIIfEmpty); 2706 } 2707 2708 /// Return the first index of the specified basic 2709 /// block in the value list for this PHI. Returns -1 if no instance. 2710 /// 2711 int getBasicBlockIndex(const BasicBlock *BB) const { 2712 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 2713 if (block_begin()[i] == BB) 2714 return i; 2715 return -1; 2716 } 2717 2718 Value *getIncomingValueForBlock(const BasicBlock *BB) const { 2719 int Idx = getBasicBlockIndex(BB); 2720 assert(Idx >= 0 && "Invalid basic block argument!"); 2721 return getIncomingValue(Idx); 2722 } 2723 2724 /// Set every incoming value(s) for block \p BB to \p V. 2725 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { 2726 assert(BB && "PHI node got a null basic block!"); 2727 bool Found = false; 2728 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2729 if (getIncomingBlock(Op) == BB) { 2730 Found = true; 2731 setIncomingValue(Op, V); 2732 } 2733 (void)Found; 2734 assert(Found && "Invalid basic block argument to set!"); 2735 } 2736 2737 /// If the specified PHI node always merges together the 2738 /// same value, return the value, otherwise return null. 2739 Value *hasConstantValue() const; 2740 2741 /// Whether the specified PHI node always merges 2742 /// together the same value, assuming undefs are equal to a unique 2743 /// non-undef value. 2744 bool hasConstantOrUndefValue() const; 2745 2746 /// Methods for support type inquiry through isa, cast, and dyn_cast: 2747 static bool classof(const Instruction *I) { 2748 return I->getOpcode() == Instruction::PHI; 2749 } 2750 static bool classof(const Value *V) { 2751 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2752 } 2753 2754 private: 2755 void growOperands(); 2756 }; 2757 2758 template <> 2759 struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { 2760 }; 2761 2762 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) 2763 2764 //===----------------------------------------------------------------------===// 2765 // LandingPadInst Class 2766 //===----------------------------------------------------------------------===// 2767 2768 //===--------------------------------------------------------------------------- 2769 /// The landingpad instruction holds all of the information 2770 /// necessary to generate correct exception handling. The landingpad instruction 2771 /// cannot be moved from the top of a landing pad block, which itself is 2772 /// accessible only from the 'unwind' edge of an invoke. This uses the 2773 /// SubclassData field in Value to store whether or not the landingpad is a 2774 /// cleanup. 2775 /// 2776 class LandingPadInst : public Instruction { 2777 using CleanupField = BoolBitfieldElementT<0>; 2778 2779 /// The number of operands actually allocated. NumOperands is 2780 /// the number actually in use. 2781 unsigned ReservedSpace; 2782 2783 LandingPadInst(const LandingPadInst &LP); 2784 2785 public: 2786 enum ClauseType { Catch, Filter }; 2787 2788 private: 2789 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2790 const Twine &NameStr, Instruction *InsertBefore); 2791 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2792 const Twine &NameStr, BasicBlock *InsertAtEnd); 2793 2794 // Allocate space for exactly zero operands. 2795 void *operator new(size_t s) { 2796 return User::operator new(s); 2797 } 2798 2799 void growOperands(unsigned Size); 2800 void init(unsigned NumReservedValues, const Twine &NameStr); 2801 2802 protected: 2803 // Note: Instruction needs to be a friend here to call cloneImpl. 2804 friend class Instruction; 2805 2806 LandingPadInst *cloneImpl() const; 2807 2808 public: 2809 /// Constructors - NumReservedClauses is a hint for the number of incoming 2810 /// clauses that this landingpad will have (use 0 if you really have no idea). 2811 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2812 const Twine &NameStr = "", 2813 Instruction *InsertBefore = nullptr); 2814 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2815 const Twine &NameStr, BasicBlock *InsertAtEnd); 2816 2817 /// Provide fast operand accessors 2818 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2819 2820 /// Return 'true' if this landingpad instruction is a 2821 /// cleanup. I.e., it should be run when unwinding even if its landing pad 2822 /// doesn't catch the exception. 2823 bool isCleanup() const { return getSubclassData<CleanupField>(); } 2824 2825 /// Indicate that this landingpad instruction is a cleanup. 2826 void setCleanup(bool V) { setSubclassData<CleanupField>(V); } 2827 2828 /// Add a catch or filter clause to the landing pad. 2829 void addClause(Constant *ClauseVal); 2830 2831 /// Get the value of the clause at index Idx. Use isCatch/isFilter to 2832 /// determine what type of clause this is. 2833 Constant *getClause(unsigned Idx) const { 2834 return cast<Constant>(getOperandList()[Idx]); 2835 } 2836 2837 /// Return 'true' if the clause and index Idx is a catch clause. 2838 bool isCatch(unsigned Idx) const { 2839 return !isa<ArrayType>(getOperandList()[Idx]->getType()); 2840 } 2841 2842 /// Return 'true' if the clause and index Idx is a filter clause. 2843 bool isFilter(unsigned Idx) const { 2844 return isa<ArrayType>(getOperandList()[Idx]->getType()); 2845 } 2846 2847 /// Get the number of clauses for this landing pad. 2848 unsigned getNumClauses() const { return getNumOperands(); } 2849 2850 /// Grow the size of the operand list to accommodate the new 2851 /// number of clauses. 2852 void reserveClauses(unsigned Size) { growOperands(Size); } 2853 2854 // Methods for support type inquiry through isa, cast, and dyn_cast: 2855 static bool classof(const Instruction *I) { 2856 return I->getOpcode() == Instruction::LandingPad; 2857 } 2858 static bool classof(const Value *V) { 2859 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2860 } 2861 }; 2862 2863 template <> 2864 struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { 2865 }; 2866 2867 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value) 2868 2869 //===----------------------------------------------------------------------===// 2870 // ReturnInst Class 2871 //===----------------------------------------------------------------------===// 2872 2873 //===--------------------------------------------------------------------------- 2874 /// Return a value (possibly void), from a function. Execution 2875 /// does not continue in this function any longer. 2876 /// 2877 class ReturnInst : public Instruction { 2878 ReturnInst(const ReturnInst &RI); 2879 2880 private: 2881 // ReturnInst constructors: 2882 // ReturnInst() - 'ret void' instruction 2883 // ReturnInst( null) - 'ret void' instruction 2884 // ReturnInst(Value* X) - 'ret X' instruction 2885 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I 2886 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I 2887 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B 2888 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B 2889 // 2890 // NOTE: If the Value* passed is of type void then the constructor behaves as 2891 // if it was passed NULL. 2892 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, 2893 Instruction *InsertBefore = nullptr); 2894 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); 2895 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); 2896 2897 protected: 2898 // Note: Instruction needs to be a friend here to call cloneImpl. 2899 friend class Instruction; 2900 2901 ReturnInst *cloneImpl() const; 2902 2903 public: 2904 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, 2905 Instruction *InsertBefore = nullptr) { 2906 return new(!!retVal) ReturnInst(C, retVal, InsertBefore); 2907 } 2908 2909 static ReturnInst* Create(LLVMContext &C, Value *retVal, 2910 BasicBlock *InsertAtEnd) { 2911 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); 2912 } 2913 2914 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { 2915 return new(0) ReturnInst(C, InsertAtEnd); 2916 } 2917 2918 /// Provide fast operand accessors 2919 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2920 2921 /// Convenience accessor. Returns null if there is no return value. 2922 Value *getReturnValue() const { 2923 return getNumOperands() != 0 ? getOperand(0) : nullptr; 2924 } 2925 2926 unsigned getNumSuccessors() const { return 0; } 2927 2928 // Methods for support type inquiry through isa, cast, and dyn_cast: 2929 static bool classof(const Instruction *I) { 2930 return (I->getOpcode() == Instruction::Ret); 2931 } 2932 static bool classof(const Value *V) { 2933 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2934 } 2935 2936 private: 2937 BasicBlock *getSuccessor(unsigned idx) const { 2938 llvm_unreachable("ReturnInst has no successors!"); 2939 } 2940 2941 void setSuccessor(unsigned idx, BasicBlock *B) { 2942 llvm_unreachable("ReturnInst has no successors!"); 2943 } 2944 }; 2945 2946 template <> 2947 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { 2948 }; 2949 2950 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value) 2951 2952 //===----------------------------------------------------------------------===// 2953 // BranchInst Class 2954 //===----------------------------------------------------------------------===// 2955 2956 //===--------------------------------------------------------------------------- 2957 /// Conditional or Unconditional Branch instruction. 2958 /// 2959 class BranchInst : public Instruction { 2960 /// Ops list - Branches are strange. The operands are ordered: 2961 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because 2962 /// they don't have to check for cond/uncond branchness. These are mostly 2963 /// accessed relative from op_end(). 2964 BranchInst(const BranchInst &BI); 2965 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): 2966 // BranchInst(BB *B) - 'br B' 2967 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' 2968 // BranchInst(BB* B, Inst *I) - 'br B' insert before I 2969 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I 2970 // BranchInst(BB* B, BB *I) - 'br B' insert at end 2971 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end 2972 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); 2973 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 2974 Instruction *InsertBefore = nullptr); 2975 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); 2976 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 2977 BasicBlock *InsertAtEnd); 2978 2979 void AssertOK(); 2980 2981 protected: 2982 // Note: Instruction needs to be a friend here to call cloneImpl. 2983 friend class Instruction; 2984 2985 BranchInst *cloneImpl() const; 2986 2987 public: 2988 /// Iterator type that casts an operand to a basic block. 2989 /// 2990 /// This only makes sense because the successors are stored as adjacent 2991 /// operands for branch instructions. 2992 struct succ_op_iterator 2993 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 2994 std::random_access_iterator_tag, BasicBlock *, 2995 ptrdiff_t, BasicBlock *, BasicBlock *> { 2996 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 2997 2998 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 2999 BasicBlock *operator->() const { return operator*(); } 3000 }; 3001 3002 /// The const version of `succ_op_iterator`. 3003 struct const_succ_op_iterator 3004 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3005 std::random_access_iterator_tag, 3006 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3007 const BasicBlock *> { 3008 explicit const_succ_op_iterator(const_value_op_iterator I) 3009 : iterator_adaptor_base(I) {} 3010 3011 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3012 const BasicBlock *operator->() const { return operator*(); } 3013 }; 3014 3015 static BranchInst *Create(BasicBlock *IfTrue, 3016 Instruction *InsertBefore = nullptr) { 3017 return new(1) BranchInst(IfTrue, InsertBefore); 3018 } 3019 3020 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3021 Value *Cond, Instruction *InsertBefore = nullptr) { 3022 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); 3023 } 3024 3025 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { 3026 return new(1) BranchInst(IfTrue, InsertAtEnd); 3027 } 3028 3029 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3030 Value *Cond, BasicBlock *InsertAtEnd) { 3031 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); 3032 } 3033 3034 /// Transparently provide more efficient getOperand methods. 3035 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3036 3037 bool isUnconditional() const { return getNumOperands() == 1; } 3038 bool isConditional() const { return getNumOperands() == 3; } 3039 3040 Value *getCondition() const { 3041 assert(isConditional() && "Cannot get condition of an uncond branch!"); 3042 return Op<-3>(); 3043 } 3044 3045 void setCondition(Value *V) { 3046 assert(isConditional() && "Cannot set condition of unconditional branch!"); 3047 Op<-3>() = V; 3048 } 3049 3050 unsigned getNumSuccessors() const { return 1+isConditional(); } 3051 3052 BasicBlock *getSuccessor(unsigned i) const { 3053 assert(i < getNumSuccessors() && "Successor # out of range for Branch!"); 3054 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); 3055 } 3056 3057 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3058 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!"); 3059 *(&Op<-1>() - idx) = NewSucc; 3060 } 3061 3062 /// Swap the successors of this branch instruction. 3063 /// 3064 /// Swaps the successors of the branch instruction. This also swaps any 3065 /// branch weight metadata associated with the instruction so that it 3066 /// continues to map correctly to each operand. 3067 void swapSuccessors(); 3068 3069 iterator_range<succ_op_iterator> successors() { 3070 return make_range( 3071 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), 3072 succ_op_iterator(value_op_end())); 3073 } 3074 3075 iterator_range<const_succ_op_iterator> successors() const { 3076 return make_range(const_succ_op_iterator( 3077 std::next(value_op_begin(), isConditional() ? 1 : 0)), 3078 const_succ_op_iterator(value_op_end())); 3079 } 3080 3081 // Methods for support type inquiry through isa, cast, and dyn_cast: 3082 static bool classof(const Instruction *I) { 3083 return (I->getOpcode() == Instruction::Br); 3084 } 3085 static bool classof(const Value *V) { 3086 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3087 } 3088 }; 3089 3090 template <> 3091 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { 3092 }; 3093 3094 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) 3095 3096 //===----------------------------------------------------------------------===// 3097 // SwitchInst Class 3098 //===----------------------------------------------------------------------===// 3099 3100 //===--------------------------------------------------------------------------- 3101 /// Multiway switch 3102 /// 3103 class SwitchInst : public Instruction { 3104 unsigned ReservedSpace; 3105 3106 // Operand[0] = Value to switch on 3107 // Operand[1] = Default basic block destination 3108 // Operand[2n ] = Value to match 3109 // Operand[2n+1] = BasicBlock to go to on match 3110 SwitchInst(const SwitchInst &SI); 3111 3112 /// Create a new switch instruction, specifying a value to switch on and a 3113 /// default destination. The number of additional cases can be specified here 3114 /// to make memory allocation more efficient. This constructor can also 3115 /// auto-insert before another instruction. 3116 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3117 Instruction *InsertBefore); 3118 3119 /// Create a new switch instruction, specifying a value to switch on and a 3120 /// default destination. The number of additional cases can be specified here 3121 /// to make memory allocation more efficient. This constructor also 3122 /// auto-inserts at the end of the specified BasicBlock. 3123 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3124 BasicBlock *InsertAtEnd); 3125 3126 // allocate space for exactly zero operands 3127 void *operator new(size_t s) { 3128 return User::operator new(s); 3129 } 3130 3131 void init(Value *Value, BasicBlock *Default, unsigned NumReserved); 3132 void growOperands(); 3133 3134 protected: 3135 // Note: Instruction needs to be a friend here to call cloneImpl. 3136 friend class Instruction; 3137 3138 SwitchInst *cloneImpl() const; 3139 3140 public: 3141 // -2 3142 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); 3143 3144 template <typename CaseHandleT> class CaseIteratorImpl; 3145 3146 /// A handle to a particular switch case. It exposes a convenient interface 3147 /// to both the case value and the successor block. 3148 /// 3149 /// We define this as a template and instantiate it to form both a const and 3150 /// non-const handle. 3151 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> 3152 class CaseHandleImpl { 3153 // Directly befriend both const and non-const iterators. 3154 friend class SwitchInst::CaseIteratorImpl< 3155 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; 3156 3157 protected: 3158 // Expose the switch type we're parameterized with to the iterator. 3159 using SwitchInstType = SwitchInstT; 3160 3161 SwitchInstT *SI; 3162 ptrdiff_t Index; 3163 3164 CaseHandleImpl() = default; 3165 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} 3166 3167 public: 3168 /// Resolves case value for current case. 3169 ConstantIntT *getCaseValue() const { 3170 assert((unsigned)Index < SI->getNumCases() && 3171 "Index out the number of cases."); 3172 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); 3173 } 3174 3175 /// Resolves successor for current case. 3176 BasicBlockT *getCaseSuccessor() const { 3177 assert(((unsigned)Index < SI->getNumCases() || 3178 (unsigned)Index == DefaultPseudoIndex) && 3179 "Index out the number of cases."); 3180 return SI->getSuccessor(getSuccessorIndex()); 3181 } 3182 3183 /// Returns number of current case. 3184 unsigned getCaseIndex() const { return Index; } 3185 3186 /// Returns successor index for current case successor. 3187 unsigned getSuccessorIndex() const { 3188 assert(((unsigned)Index == DefaultPseudoIndex || 3189 (unsigned)Index < SI->getNumCases()) && 3190 "Index out the number of cases."); 3191 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; 3192 } 3193 3194 bool operator==(const CaseHandleImpl &RHS) const { 3195 assert(SI == RHS.SI && "Incompatible operators."); 3196 return Index == RHS.Index; 3197 } 3198 }; 3199 3200 using ConstCaseHandle = 3201 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; 3202 3203 class CaseHandle 3204 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { 3205 friend class SwitchInst::CaseIteratorImpl<CaseHandle>; 3206 3207 public: 3208 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} 3209 3210 /// Sets the new value for current case. 3211 void setValue(ConstantInt *V) { 3212 assert((unsigned)Index < SI->getNumCases() && 3213 "Index out the number of cases."); 3214 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); 3215 } 3216 3217 /// Sets the new successor for current case. 3218 void setSuccessor(BasicBlock *S) { 3219 SI->setSuccessor(getSuccessorIndex(), S); 3220 } 3221 }; 3222 3223 template <typename CaseHandleT> 3224 class CaseIteratorImpl 3225 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, 3226 std::random_access_iterator_tag, 3227 CaseHandleT> { 3228 using SwitchInstT = typename CaseHandleT::SwitchInstType; 3229 3230 CaseHandleT Case; 3231 3232 public: 3233 /// Default constructed iterator is in an invalid state until assigned to 3234 /// a case for a particular switch. 3235 CaseIteratorImpl() = default; 3236 3237 /// Initializes case iterator for given SwitchInst and for given 3238 /// case number. 3239 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} 3240 3241 /// Initializes case iterator for given SwitchInst and for given 3242 /// successor index. 3243 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, 3244 unsigned SuccessorIndex) { 3245 assert(SuccessorIndex < SI->getNumSuccessors() && 3246 "Successor index # out of range!"); 3247 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) 3248 : CaseIteratorImpl(SI, DefaultPseudoIndex); 3249 } 3250 3251 /// Support converting to the const variant. This will be a no-op for const 3252 /// variant. 3253 operator CaseIteratorImpl<ConstCaseHandle>() const { 3254 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); 3255 } 3256 3257 CaseIteratorImpl &operator+=(ptrdiff_t N) { 3258 // Check index correctness after addition. 3259 // Note: Index == getNumCases() means end(). 3260 assert(Case.Index + N >= 0 && 3261 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && 3262 "Case.Index out the number of cases."); 3263 Case.Index += N; 3264 return *this; 3265 } 3266 CaseIteratorImpl &operator-=(ptrdiff_t N) { 3267 // Check index correctness after subtraction. 3268 // Note: Case.Index == getNumCases() means end(). 3269 assert(Case.Index - N >= 0 && 3270 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && 3271 "Case.Index out the number of cases."); 3272 Case.Index -= N; 3273 return *this; 3274 } 3275 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { 3276 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3277 return Case.Index - RHS.Case.Index; 3278 } 3279 bool operator==(const CaseIteratorImpl &RHS) const { 3280 return Case == RHS.Case; 3281 } 3282 bool operator<(const CaseIteratorImpl &RHS) const { 3283 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3284 return Case.Index < RHS.Case.Index; 3285 } 3286 CaseHandleT &operator*() { return Case; } 3287 const CaseHandleT &operator*() const { return Case; } 3288 }; 3289 3290 using CaseIt = CaseIteratorImpl<CaseHandle>; 3291 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; 3292 3293 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3294 unsigned NumCases, 3295 Instruction *InsertBefore = nullptr) { 3296 return new SwitchInst(Value, Default, NumCases, InsertBefore); 3297 } 3298 3299 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3300 unsigned NumCases, BasicBlock *InsertAtEnd) { 3301 return new SwitchInst(Value, Default, NumCases, InsertAtEnd); 3302 } 3303 3304 /// Provide fast operand accessors 3305 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3306 3307 // Accessor Methods for Switch stmt 3308 Value *getCondition() const { return getOperand(0); } 3309 void setCondition(Value *V) { setOperand(0, V); } 3310 3311 BasicBlock *getDefaultDest() const { 3312 return cast<BasicBlock>(getOperand(1)); 3313 } 3314 3315 void setDefaultDest(BasicBlock *DefaultCase) { 3316 setOperand(1, reinterpret_cast<Value*>(DefaultCase)); 3317 } 3318 3319 /// Return the number of 'cases' in this switch instruction, excluding the 3320 /// default case. 3321 unsigned getNumCases() const { 3322 return getNumOperands()/2 - 1; 3323 } 3324 3325 /// Returns a read/write iterator that points to the first case in the 3326 /// SwitchInst. 3327 CaseIt case_begin() { 3328 return CaseIt(this, 0); 3329 } 3330 3331 /// Returns a read-only iterator that points to the first case in the 3332 /// SwitchInst. 3333 ConstCaseIt case_begin() const { 3334 return ConstCaseIt(this, 0); 3335 } 3336 3337 /// Returns a read/write iterator that points one past the last in the 3338 /// SwitchInst. 3339 CaseIt case_end() { 3340 return CaseIt(this, getNumCases()); 3341 } 3342 3343 /// Returns a read-only iterator that points one past the last in the 3344 /// SwitchInst. 3345 ConstCaseIt case_end() const { 3346 return ConstCaseIt(this, getNumCases()); 3347 } 3348 3349 /// Iteration adapter for range-for loops. 3350 iterator_range<CaseIt> cases() { 3351 return make_range(case_begin(), case_end()); 3352 } 3353 3354 /// Constant iteration adapter for range-for loops. 3355 iterator_range<ConstCaseIt> cases() const { 3356 return make_range(case_begin(), case_end()); 3357 } 3358 3359 /// Returns an iterator that points to the default case. 3360 /// Note: this iterator allows to resolve successor only. Attempt 3361 /// to resolve case value causes an assertion. 3362 /// Also note, that increment and decrement also causes an assertion and 3363 /// makes iterator invalid. 3364 CaseIt case_default() { 3365 return CaseIt(this, DefaultPseudoIndex); 3366 } 3367 ConstCaseIt case_default() const { 3368 return ConstCaseIt(this, DefaultPseudoIndex); 3369 } 3370 3371 /// Search all of the case values for the specified constant. If it is 3372 /// explicitly handled, return the case iterator of it, otherwise return 3373 /// default case iterator to indicate that it is handled by the default 3374 /// handler. 3375 CaseIt findCaseValue(const ConstantInt *C) { 3376 CaseIt I = llvm::find_if( 3377 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; }); 3378 if (I != case_end()) 3379 return I; 3380 3381 return case_default(); 3382 } 3383 ConstCaseIt findCaseValue(const ConstantInt *C) const { 3384 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) { 3385 return Case.getCaseValue() == C; 3386 }); 3387 if (I != case_end()) 3388 return I; 3389 3390 return case_default(); 3391 } 3392 3393 /// Finds the unique case value for a given successor. Returns null if the 3394 /// successor is not found, not unique, or is the default case. 3395 ConstantInt *findCaseDest(BasicBlock *BB) { 3396 if (BB == getDefaultDest()) 3397 return nullptr; 3398 3399 ConstantInt *CI = nullptr; 3400 for (auto Case : cases()) { 3401 if (Case.getCaseSuccessor() != BB) 3402 continue; 3403 3404 if (CI) 3405 return nullptr; // Multiple cases lead to BB. 3406 3407 CI = Case.getCaseValue(); 3408 } 3409 3410 return CI; 3411 } 3412 3413 /// Add an entry to the switch instruction. 3414 /// Note: 3415 /// This action invalidates case_end(). Old case_end() iterator will 3416 /// point to the added case. 3417 void addCase(ConstantInt *OnVal, BasicBlock *Dest); 3418 3419 /// This method removes the specified case and its successor from the switch 3420 /// instruction. Note that this operation may reorder the remaining cases at 3421 /// index idx and above. 3422 /// Note: 3423 /// This action invalidates iterators for all cases following the one removed, 3424 /// including the case_end() iterator. It returns an iterator for the next 3425 /// case. 3426 CaseIt removeCase(CaseIt I); 3427 3428 unsigned getNumSuccessors() const { return getNumOperands()/2; } 3429 BasicBlock *getSuccessor(unsigned idx) const { 3430 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!"); 3431 return cast<BasicBlock>(getOperand(idx*2+1)); 3432 } 3433 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3434 assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); 3435 setOperand(idx * 2 + 1, NewSucc); 3436 } 3437 3438 // Methods for support type inquiry through isa, cast, and dyn_cast: 3439 static bool classof(const Instruction *I) { 3440 return I->getOpcode() == Instruction::Switch; 3441 } 3442 static bool classof(const Value *V) { 3443 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3444 } 3445 }; 3446 3447 /// A wrapper class to simplify modification of SwitchInst cases along with 3448 /// their prof branch_weights metadata. 3449 class SwitchInstProfUpdateWrapper { 3450 SwitchInst &SI; 3451 Optional<SmallVector<uint32_t, 8> > Weights = None; 3452 bool Changed = false; 3453 3454 protected: 3455 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); 3456 3457 MDNode *buildProfBranchWeightsMD(); 3458 3459 void init(); 3460 3461 public: 3462 using CaseWeightOpt = Optional<uint32_t>; 3463 SwitchInst *operator->() { return &SI; } 3464 SwitchInst &operator*() { return SI; } 3465 operator SwitchInst *() { return &SI; } 3466 3467 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } 3468 3469 ~SwitchInstProfUpdateWrapper() { 3470 if (Changed) 3471 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); 3472 } 3473 3474 /// Delegate the call to the underlying SwitchInst::removeCase() and remove 3475 /// correspondent branch weight. 3476 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); 3477 3478 /// Delegate the call to the underlying SwitchInst::addCase() and set the 3479 /// specified branch weight for the added case. 3480 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); 3481 3482 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark 3483 /// this object to not touch the underlying SwitchInst in destructor. 3484 SymbolTableList<Instruction>::iterator eraseFromParent(); 3485 3486 void setSuccessorWeight(unsigned idx, CaseWeightOpt W); 3487 CaseWeightOpt getSuccessorWeight(unsigned idx); 3488 3489 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); 3490 }; 3491 3492 template <> 3493 struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { 3494 }; 3495 3496 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) 3497 3498 //===----------------------------------------------------------------------===// 3499 // IndirectBrInst Class 3500 //===----------------------------------------------------------------------===// 3501 3502 //===--------------------------------------------------------------------------- 3503 /// Indirect Branch Instruction. 3504 /// 3505 class IndirectBrInst : public Instruction { 3506 unsigned ReservedSpace; 3507 3508 // Operand[0] = Address to jump to 3509 // Operand[n+1] = n-th destination 3510 IndirectBrInst(const IndirectBrInst &IBI); 3511 3512 /// Create a new indirectbr instruction, specifying an 3513 /// Address to jump to. The number of expected destinations can be specified 3514 /// here to make memory allocation more efficient. This constructor can also 3515 /// autoinsert before another instruction. 3516 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); 3517 3518 /// Create a new indirectbr instruction, specifying an 3519 /// Address to jump to. The number of expected destinations can be specified 3520 /// here to make memory allocation more efficient. This constructor also 3521 /// autoinserts at the end of the specified BasicBlock. 3522 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); 3523 3524 // allocate space for exactly zero operands 3525 void *operator new(size_t s) { 3526 return User::operator new(s); 3527 } 3528 3529 void init(Value *Address, unsigned NumDests); 3530 void growOperands(); 3531 3532 protected: 3533 // Note: Instruction needs to be a friend here to call cloneImpl. 3534 friend class Instruction; 3535 3536 IndirectBrInst *cloneImpl() const; 3537 3538 public: 3539 /// Iterator type that casts an operand to a basic block. 3540 /// 3541 /// This only makes sense because the successors are stored as adjacent 3542 /// operands for indirectbr instructions. 3543 struct succ_op_iterator 3544 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3545 std::random_access_iterator_tag, BasicBlock *, 3546 ptrdiff_t, BasicBlock *, BasicBlock *> { 3547 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3548 3549 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3550 BasicBlock *operator->() const { return operator*(); } 3551 }; 3552 3553 /// The const version of `succ_op_iterator`. 3554 struct const_succ_op_iterator 3555 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3556 std::random_access_iterator_tag, 3557 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3558 const BasicBlock *> { 3559 explicit const_succ_op_iterator(const_value_op_iterator I) 3560 : iterator_adaptor_base(I) {} 3561 3562 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3563 const BasicBlock *operator->() const { return operator*(); } 3564 }; 3565 3566 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3567 Instruction *InsertBefore = nullptr) { 3568 return new IndirectBrInst(Address, NumDests, InsertBefore); 3569 } 3570 3571 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3572 BasicBlock *InsertAtEnd) { 3573 return new IndirectBrInst(Address, NumDests, InsertAtEnd); 3574 } 3575 3576 /// Provide fast operand accessors. 3577 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3578 3579 // Accessor Methods for IndirectBrInst instruction. 3580 Value *getAddress() { return getOperand(0); } 3581 const Value *getAddress() const { return getOperand(0); } 3582 void setAddress(Value *V) { setOperand(0, V); } 3583 3584 /// return the number of possible destinations in this 3585 /// indirectbr instruction. 3586 unsigned getNumDestinations() const { return getNumOperands()-1; } 3587 3588 /// Return the specified destination. 3589 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } 3590 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } 3591 3592 /// Add a destination. 3593 /// 3594 void addDestination(BasicBlock *Dest); 3595 3596 /// This method removes the specified successor from the 3597 /// indirectbr instruction. 3598 void removeDestination(unsigned i); 3599 3600 unsigned getNumSuccessors() const { return getNumOperands()-1; } 3601 BasicBlock *getSuccessor(unsigned i) const { 3602 return cast<BasicBlock>(getOperand(i+1)); 3603 } 3604 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3605 setOperand(i + 1, NewSucc); 3606 } 3607 3608 iterator_range<succ_op_iterator> successors() { 3609 return make_range(succ_op_iterator(std::next(value_op_begin())), 3610 succ_op_iterator(value_op_end())); 3611 } 3612 3613 iterator_range<const_succ_op_iterator> successors() const { 3614 return make_range(const_succ_op_iterator(std::next(value_op_begin())), 3615 const_succ_op_iterator(value_op_end())); 3616 } 3617 3618 // Methods for support type inquiry through isa, cast, and dyn_cast: 3619 static bool classof(const Instruction *I) { 3620 return I->getOpcode() == Instruction::IndirectBr; 3621 } 3622 static bool classof(const Value *V) { 3623 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3624 } 3625 }; 3626 3627 template <> 3628 struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { 3629 }; 3630 3631 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value) 3632 3633 //===----------------------------------------------------------------------===// 3634 // InvokeInst Class 3635 //===----------------------------------------------------------------------===// 3636 3637 /// Invoke instruction. The SubclassData field is used to hold the 3638 /// calling convention of the call. 3639 /// 3640 class InvokeInst : public CallBase { 3641 /// The number of operands for this call beyond the called function, 3642 /// arguments, and operand bundles. 3643 static constexpr int NumExtraOperands = 2; 3644 3645 /// The index from the end of the operand array to the normal destination. 3646 static constexpr int NormalDestOpEndIdx = -3; 3647 3648 /// The index from the end of the operand array to the unwind destination. 3649 static constexpr int UnwindDestOpEndIdx = -2; 3650 3651 InvokeInst(const InvokeInst &BI); 3652 3653 /// Construct an InvokeInst given a range of arguments. 3654 /// 3655 /// Construct an InvokeInst from a range of arguments 3656 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3657 BasicBlock *IfException, ArrayRef<Value *> Args, 3658 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3659 const Twine &NameStr, Instruction *InsertBefore); 3660 3661 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3662 BasicBlock *IfException, ArrayRef<Value *> Args, 3663 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3664 const Twine &NameStr, BasicBlock *InsertAtEnd); 3665 3666 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3667 BasicBlock *IfException, ArrayRef<Value *> Args, 3668 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3669 3670 /// Compute the number of operands to allocate. 3671 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 3672 // We need one operand for the called function, plus our extra operands and 3673 // the input operand counts provided. 3674 return 1 + NumExtraOperands + NumArgs + NumBundleInputs; 3675 } 3676 3677 protected: 3678 // Note: Instruction needs to be a friend here to call cloneImpl. 3679 friend class Instruction; 3680 3681 InvokeInst *cloneImpl() const; 3682 3683 public: 3684 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3685 BasicBlock *IfException, ArrayRef<Value *> Args, 3686 const Twine &NameStr, 3687 Instruction *InsertBefore = nullptr) { 3688 int NumOperands = ComputeNumOperands(Args.size()); 3689 return new (NumOperands) 3690 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3691 NameStr, InsertBefore); 3692 } 3693 3694 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3695 BasicBlock *IfException, ArrayRef<Value *> Args, 3696 ArrayRef<OperandBundleDef> Bundles = None, 3697 const Twine &NameStr = "", 3698 Instruction *InsertBefore = nullptr) { 3699 int NumOperands = 3700 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3701 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3702 3703 return new (NumOperands, DescriptorBytes) 3704 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3705 NameStr, InsertBefore); 3706 } 3707 3708 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3709 BasicBlock *IfException, ArrayRef<Value *> Args, 3710 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3711 int NumOperands = ComputeNumOperands(Args.size()); 3712 return new (NumOperands) 3713 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3714 NameStr, InsertAtEnd); 3715 } 3716 3717 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3718 BasicBlock *IfException, ArrayRef<Value *> Args, 3719 ArrayRef<OperandBundleDef> Bundles, 3720 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3721 int NumOperands = 3722 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3723 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3724 3725 return new (NumOperands, DescriptorBytes) 3726 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3727 NameStr, InsertAtEnd); 3728 } 3729 3730 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3731 BasicBlock *IfException, ArrayRef<Value *> Args, 3732 const Twine &NameStr, 3733 Instruction *InsertBefore = nullptr) { 3734 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3735 IfException, Args, None, NameStr, InsertBefore); 3736 } 3737 3738 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3739 BasicBlock *IfException, ArrayRef<Value *> Args, 3740 ArrayRef<OperandBundleDef> Bundles = None, 3741 const Twine &NameStr = "", 3742 Instruction *InsertBefore = nullptr) { 3743 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3744 IfException, Args, Bundles, NameStr, InsertBefore); 3745 } 3746 3747 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3748 BasicBlock *IfException, ArrayRef<Value *> Args, 3749 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3750 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3751 IfException, Args, NameStr, InsertAtEnd); 3752 } 3753 3754 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3755 BasicBlock *IfException, ArrayRef<Value *> Args, 3756 ArrayRef<OperandBundleDef> Bundles, 3757 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3758 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3759 IfException, Args, Bundles, NameStr, InsertAtEnd); 3760 } 3761 3762 /// Create a clone of \p II with a different set of operand bundles and 3763 /// insert it before \p InsertPt. 3764 /// 3765 /// The returned invoke instruction is identical to \p II in every way except 3766 /// that the operand bundles for the new instruction are set to the operand 3767 /// bundles in \p Bundles. 3768 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, 3769 Instruction *InsertPt = nullptr); 3770 3771 // get*Dest - Return the destination basic blocks... 3772 BasicBlock *getNormalDest() const { 3773 return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); 3774 } 3775 BasicBlock *getUnwindDest() const { 3776 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); 3777 } 3778 void setNormalDest(BasicBlock *B) { 3779 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3780 } 3781 void setUnwindDest(BasicBlock *B) { 3782 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3783 } 3784 3785 /// Get the landingpad instruction from the landing pad 3786 /// block (the unwind destination). 3787 LandingPadInst *getLandingPadInst() const; 3788 3789 BasicBlock *getSuccessor(unsigned i) const { 3790 assert(i < 2 && "Successor # out of range for invoke!"); 3791 return i == 0 ? getNormalDest() : getUnwindDest(); 3792 } 3793 3794 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3795 assert(i < 2 && "Successor # out of range for invoke!"); 3796 if (i == 0) 3797 setNormalDest(NewSucc); 3798 else 3799 setUnwindDest(NewSucc); 3800 } 3801 3802 unsigned getNumSuccessors() const { return 2; } 3803 3804 // Methods for support type inquiry through isa, cast, and dyn_cast: 3805 static bool classof(const Instruction *I) { 3806 return (I->getOpcode() == Instruction::Invoke); 3807 } 3808 static bool classof(const Value *V) { 3809 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3810 } 3811 3812 private: 3813 // Shadow Instruction::setInstructionSubclassData with a private forwarding 3814 // method so that subclasses cannot accidentally use it. 3815 template <typename Bitfield> 3816 void setSubclassData(typename Bitfield::Type Value) { 3817 Instruction::setSubclassData<Bitfield>(Value); 3818 } 3819 }; 3820 3821 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3822 BasicBlock *IfException, ArrayRef<Value *> Args, 3823 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3824 const Twine &NameStr, Instruction *InsertBefore) 3825 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3826 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3827 InsertBefore) { 3828 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3829 } 3830 3831 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3832 BasicBlock *IfException, ArrayRef<Value *> Args, 3833 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3834 const Twine &NameStr, BasicBlock *InsertAtEnd) 3835 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3836 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3837 InsertAtEnd) { 3838 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3839 } 3840 3841 //===----------------------------------------------------------------------===// 3842 // CallBrInst Class 3843 //===----------------------------------------------------------------------===// 3844 3845 /// CallBr instruction, tracking function calls that may not return control but 3846 /// instead transfer it to a third location. The SubclassData field is used to 3847 /// hold the calling convention of the call. 3848 /// 3849 class CallBrInst : public CallBase { 3850 3851 unsigned NumIndirectDests; 3852 3853 CallBrInst(const CallBrInst &BI); 3854 3855 /// Construct a CallBrInst given a range of arguments. 3856 /// 3857 /// Construct a CallBrInst from a range of arguments 3858 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3859 ArrayRef<BasicBlock *> IndirectDests, 3860 ArrayRef<Value *> Args, 3861 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3862 const Twine &NameStr, Instruction *InsertBefore); 3863 3864 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3865 ArrayRef<BasicBlock *> IndirectDests, 3866 ArrayRef<Value *> Args, 3867 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3868 const Twine &NameStr, BasicBlock *InsertAtEnd); 3869 3870 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, 3871 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 3872 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3873 3874 /// Should the Indirect Destinations change, scan + update the Arg list. 3875 void updateArgBlockAddresses(unsigned i, BasicBlock *B); 3876 3877 /// Compute the number of operands to allocate. 3878 static int ComputeNumOperands(int NumArgs, int NumIndirectDests, 3879 int NumBundleInputs = 0) { 3880 // We need one operand for the called function, plus our extra operands and 3881 // the input operand counts provided. 3882 return 2 + NumIndirectDests + NumArgs + NumBundleInputs; 3883 } 3884 3885 protected: 3886 // Note: Instruction needs to be a friend here to call cloneImpl. 3887 friend class Instruction; 3888 3889 CallBrInst *cloneImpl() const; 3890 3891 public: 3892 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3893 BasicBlock *DefaultDest, 3894 ArrayRef<BasicBlock *> IndirectDests, 3895 ArrayRef<Value *> Args, const Twine &NameStr, 3896 Instruction *InsertBefore = nullptr) { 3897 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 3898 return new (NumOperands) 3899 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 3900 NumOperands, NameStr, InsertBefore); 3901 } 3902 3903 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3904 BasicBlock *DefaultDest, 3905 ArrayRef<BasicBlock *> IndirectDests, 3906 ArrayRef<Value *> Args, 3907 ArrayRef<OperandBundleDef> Bundles = None, 3908 const Twine &NameStr = "", 3909 Instruction *InsertBefore = nullptr) { 3910 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 3911 CountBundleInputs(Bundles)); 3912 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3913 3914 return new (NumOperands, DescriptorBytes) 3915 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 3916 NumOperands, NameStr, InsertBefore); 3917 } 3918 3919 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3920 BasicBlock *DefaultDest, 3921 ArrayRef<BasicBlock *> IndirectDests, 3922 ArrayRef<Value *> Args, const Twine &NameStr, 3923 BasicBlock *InsertAtEnd) { 3924 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 3925 return new (NumOperands) 3926 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 3927 NumOperands, NameStr, InsertAtEnd); 3928 } 3929 3930 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3931 BasicBlock *DefaultDest, 3932 ArrayRef<BasicBlock *> IndirectDests, 3933 ArrayRef<Value *> Args, 3934 ArrayRef<OperandBundleDef> Bundles, 3935 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3936 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 3937 CountBundleInputs(Bundles)); 3938 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3939 3940 return new (NumOperands, DescriptorBytes) 3941 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 3942 NumOperands, NameStr, InsertAtEnd); 3943 } 3944 3945 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 3946 ArrayRef<BasicBlock *> IndirectDests, 3947 ArrayRef<Value *> Args, const Twine &NameStr, 3948 Instruction *InsertBefore = nullptr) { 3949 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 3950 IndirectDests, Args, NameStr, InsertBefore); 3951 } 3952 3953 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 3954 ArrayRef<BasicBlock *> IndirectDests, 3955 ArrayRef<Value *> Args, 3956 ArrayRef<OperandBundleDef> Bundles = None, 3957 const Twine &NameStr = "", 3958 Instruction *InsertBefore = nullptr) { 3959 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 3960 IndirectDests, Args, Bundles, NameStr, InsertBefore); 3961 } 3962 3963 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 3964 ArrayRef<BasicBlock *> IndirectDests, 3965 ArrayRef<Value *> Args, const Twine &NameStr, 3966 BasicBlock *InsertAtEnd) { 3967 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 3968 IndirectDests, Args, NameStr, InsertAtEnd); 3969 } 3970 3971 static CallBrInst *Create(FunctionCallee Func, 3972 BasicBlock *DefaultDest, 3973 ArrayRef<BasicBlock *> IndirectDests, 3974 ArrayRef<Value *> Args, 3975 ArrayRef<OperandBundleDef> Bundles, 3976 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3977 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 3978 IndirectDests, Args, Bundles, NameStr, InsertAtEnd); 3979 } 3980 3981 /// Create a clone of \p CBI with a different set of operand bundles and 3982 /// insert it before \p InsertPt. 3983 /// 3984 /// The returned callbr instruction is identical to \p CBI in every way 3985 /// except that the operand bundles for the new instruction are set to the 3986 /// operand bundles in \p Bundles. 3987 static CallBrInst *Create(CallBrInst *CBI, 3988 ArrayRef<OperandBundleDef> Bundles, 3989 Instruction *InsertPt = nullptr); 3990 3991 /// Return the number of callbr indirect dest labels. 3992 /// 3993 unsigned getNumIndirectDests() const { return NumIndirectDests; } 3994 3995 /// getIndirectDestLabel - Return the i-th indirect dest label. 3996 /// 3997 Value *getIndirectDestLabel(unsigned i) const { 3998 assert(i < getNumIndirectDests() && "Out of bounds!"); 3999 return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() + 4000 1); 4001 } 4002 4003 Value *getIndirectDestLabelUse(unsigned i) const { 4004 assert(i < getNumIndirectDests() && "Out of bounds!"); 4005 return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() + 4006 1); 4007 } 4008 4009 // Return the destination basic blocks... 4010 BasicBlock *getDefaultDest() const { 4011 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); 4012 } 4013 BasicBlock *getIndirectDest(unsigned i) const { 4014 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); 4015 } 4016 SmallVector<BasicBlock *, 16> getIndirectDests() const { 4017 SmallVector<BasicBlock *, 16> IndirectDests; 4018 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) 4019 IndirectDests.push_back(getIndirectDest(i)); 4020 return IndirectDests; 4021 } 4022 void setDefaultDest(BasicBlock *B) { 4023 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); 4024 } 4025 void setIndirectDest(unsigned i, BasicBlock *B) { 4026 updateArgBlockAddresses(i, B); 4027 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); 4028 } 4029 4030 BasicBlock *getSuccessor(unsigned i) const { 4031 assert(i < getNumSuccessors() + 1 && 4032 "Successor # out of range for callbr!"); 4033 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); 4034 } 4035 4036 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4037 assert(i < getNumIndirectDests() + 1 && 4038 "Successor # out of range for callbr!"); 4039 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); 4040 } 4041 4042 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } 4043 4044 // Methods for support type inquiry through isa, cast, and dyn_cast: 4045 static bool classof(const Instruction *I) { 4046 return (I->getOpcode() == Instruction::CallBr); 4047 } 4048 static bool classof(const Value *V) { 4049 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4050 } 4051 4052 private: 4053 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4054 // method so that subclasses cannot accidentally use it. 4055 template <typename Bitfield> 4056 void setSubclassData(typename Bitfield::Type Value) { 4057 Instruction::setSubclassData<Bitfield>(Value); 4058 } 4059 }; 4060 4061 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4062 ArrayRef<BasicBlock *> IndirectDests, 4063 ArrayRef<Value *> Args, 4064 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4065 const Twine &NameStr, Instruction *InsertBefore) 4066 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4067 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4068 InsertBefore) { 4069 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4070 } 4071 4072 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4073 ArrayRef<BasicBlock *> IndirectDests, 4074 ArrayRef<Value *> Args, 4075 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4076 const Twine &NameStr, BasicBlock *InsertAtEnd) 4077 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4078 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4079 InsertAtEnd) { 4080 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4081 } 4082 4083 //===----------------------------------------------------------------------===// 4084 // ResumeInst Class 4085 //===----------------------------------------------------------------------===// 4086 4087 //===--------------------------------------------------------------------------- 4088 /// Resume the propagation of an exception. 4089 /// 4090 class ResumeInst : public Instruction { 4091 ResumeInst(const ResumeInst &RI); 4092 4093 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); 4094 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); 4095 4096 protected: 4097 // Note: Instruction needs to be a friend here to call cloneImpl. 4098 friend class Instruction; 4099 4100 ResumeInst *cloneImpl() const; 4101 4102 public: 4103 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { 4104 return new(1) ResumeInst(Exn, InsertBefore); 4105 } 4106 4107 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { 4108 return new(1) ResumeInst(Exn, InsertAtEnd); 4109 } 4110 4111 /// Provide fast operand accessors 4112 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4113 4114 /// Convenience accessor. 4115 Value *getValue() const { return Op<0>(); } 4116 4117 unsigned getNumSuccessors() const { return 0; } 4118 4119 // Methods for support type inquiry through isa, cast, and dyn_cast: 4120 static bool classof(const Instruction *I) { 4121 return I->getOpcode() == Instruction::Resume; 4122 } 4123 static bool classof(const Value *V) { 4124 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4125 } 4126 4127 private: 4128 BasicBlock *getSuccessor(unsigned idx) const { 4129 llvm_unreachable("ResumeInst has no successors!"); 4130 } 4131 4132 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 4133 llvm_unreachable("ResumeInst has no successors!"); 4134 } 4135 }; 4136 4137 template <> 4138 struct OperandTraits<ResumeInst> : 4139 public FixedNumOperandTraits<ResumeInst, 1> { 4140 }; 4141 4142 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) 4143 4144 //===----------------------------------------------------------------------===// 4145 // CatchSwitchInst Class 4146 //===----------------------------------------------------------------------===// 4147 class CatchSwitchInst : public Instruction { 4148 using UnwindDestField = BoolBitfieldElementT<0>; 4149 4150 /// The number of operands actually allocated. NumOperands is 4151 /// the number actually in use. 4152 unsigned ReservedSpace; 4153 4154 // Operand[0] = Outer scope 4155 // Operand[1] = Unwind block destination 4156 // Operand[n] = BasicBlock to go to on match 4157 CatchSwitchInst(const CatchSwitchInst &CSI); 4158 4159 /// Create a new switch instruction, specifying a 4160 /// default destination. The number of additional handlers can be specified 4161 /// here to make memory allocation more efficient. 4162 /// This constructor can also autoinsert before another instruction. 4163 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4164 unsigned NumHandlers, const Twine &NameStr, 4165 Instruction *InsertBefore); 4166 4167 /// Create a new switch instruction, specifying a 4168 /// default destination. The number of additional handlers can be specified 4169 /// here to make memory allocation more efficient. 4170 /// This constructor also autoinserts at the end of the specified BasicBlock. 4171 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4172 unsigned NumHandlers, const Twine &NameStr, 4173 BasicBlock *InsertAtEnd); 4174 4175 // allocate space for exactly zero operands 4176 void *operator new(size_t s) { return User::operator new(s); } 4177 4178 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); 4179 void growOperands(unsigned Size); 4180 4181 protected: 4182 // Note: Instruction needs to be a friend here to call cloneImpl. 4183 friend class Instruction; 4184 4185 CatchSwitchInst *cloneImpl() const; 4186 4187 public: 4188 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4189 unsigned NumHandlers, 4190 const Twine &NameStr = "", 4191 Instruction *InsertBefore = nullptr) { 4192 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4193 InsertBefore); 4194 } 4195 4196 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4197 unsigned NumHandlers, const Twine &NameStr, 4198 BasicBlock *InsertAtEnd) { 4199 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4200 InsertAtEnd); 4201 } 4202 4203 /// Provide fast operand accessors 4204 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4205 4206 // Accessor Methods for CatchSwitch stmt 4207 Value *getParentPad() const { return getOperand(0); } 4208 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } 4209 4210 // Accessor Methods for CatchSwitch stmt 4211 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4212 bool unwindsToCaller() const { return !hasUnwindDest(); } 4213 BasicBlock *getUnwindDest() const { 4214 if (hasUnwindDest()) 4215 return cast<BasicBlock>(getOperand(1)); 4216 return nullptr; 4217 } 4218 void setUnwindDest(BasicBlock *UnwindDest) { 4219 assert(UnwindDest); 4220 assert(hasUnwindDest()); 4221 setOperand(1, UnwindDest); 4222 } 4223 4224 /// return the number of 'handlers' in this catchswitch 4225 /// instruction, except the default handler 4226 unsigned getNumHandlers() const { 4227 if (hasUnwindDest()) 4228 return getNumOperands() - 2; 4229 return getNumOperands() - 1; 4230 } 4231 4232 private: 4233 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } 4234 static const BasicBlock *handler_helper(const Value *V) { 4235 return cast<BasicBlock>(V); 4236 } 4237 4238 public: 4239 using DerefFnTy = BasicBlock *(*)(Value *); 4240 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; 4241 using handler_range = iterator_range<handler_iterator>; 4242 using ConstDerefFnTy = const BasicBlock *(*)(const Value *); 4243 using const_handler_iterator = 4244 mapped_iterator<const_op_iterator, ConstDerefFnTy>; 4245 using const_handler_range = iterator_range<const_handler_iterator>; 4246 4247 /// Returns an iterator that points to the first handler in CatchSwitchInst. 4248 handler_iterator handler_begin() { 4249 op_iterator It = op_begin() + 1; 4250 if (hasUnwindDest()) 4251 ++It; 4252 return handler_iterator(It, DerefFnTy(handler_helper)); 4253 } 4254 4255 /// Returns an iterator that points to the first handler in the 4256 /// CatchSwitchInst. 4257 const_handler_iterator handler_begin() const { 4258 const_op_iterator It = op_begin() + 1; 4259 if (hasUnwindDest()) 4260 ++It; 4261 return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); 4262 } 4263 4264 /// Returns a read-only iterator that points one past the last 4265 /// handler in the CatchSwitchInst. 4266 handler_iterator handler_end() { 4267 return handler_iterator(op_end(), DerefFnTy(handler_helper)); 4268 } 4269 4270 /// Returns an iterator that points one past the last handler in the 4271 /// CatchSwitchInst. 4272 const_handler_iterator handler_end() const { 4273 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); 4274 } 4275 4276 /// iteration adapter for range-for loops. 4277 handler_range handlers() { 4278 return make_range(handler_begin(), handler_end()); 4279 } 4280 4281 /// iteration adapter for range-for loops. 4282 const_handler_range handlers() const { 4283 return make_range(handler_begin(), handler_end()); 4284 } 4285 4286 /// Add an entry to the switch instruction... 4287 /// Note: 4288 /// This action invalidates handler_end(). Old handler_end() iterator will 4289 /// point to the added handler. 4290 void addHandler(BasicBlock *Dest); 4291 4292 void removeHandler(handler_iterator HI); 4293 4294 unsigned getNumSuccessors() const { return getNumOperands() - 1; } 4295 BasicBlock *getSuccessor(unsigned Idx) const { 4296 assert(Idx < getNumSuccessors() && 4297 "Successor # out of range for catchswitch!"); 4298 return cast<BasicBlock>(getOperand(Idx + 1)); 4299 } 4300 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { 4301 assert(Idx < getNumSuccessors() && 4302 "Successor # out of range for catchswitch!"); 4303 setOperand(Idx + 1, NewSucc); 4304 } 4305 4306 // Methods for support type inquiry through isa, cast, and dyn_cast: 4307 static bool classof(const Instruction *I) { 4308 return I->getOpcode() == Instruction::CatchSwitch; 4309 } 4310 static bool classof(const Value *V) { 4311 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4312 } 4313 }; 4314 4315 template <> 4316 struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; 4317 4318 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value) 4319 4320 //===----------------------------------------------------------------------===// 4321 // CleanupPadInst Class 4322 //===----------------------------------------------------------------------===// 4323 class CleanupPadInst : public FuncletPadInst { 4324 private: 4325 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4326 unsigned Values, const Twine &NameStr, 4327 Instruction *InsertBefore) 4328 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4329 NameStr, InsertBefore) {} 4330 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4331 unsigned Values, const Twine &NameStr, 4332 BasicBlock *InsertAtEnd) 4333 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4334 NameStr, InsertAtEnd) {} 4335 4336 public: 4337 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, 4338 const Twine &NameStr = "", 4339 Instruction *InsertBefore = nullptr) { 4340 unsigned Values = 1 + Args.size(); 4341 return new (Values) 4342 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); 4343 } 4344 4345 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, 4346 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4347 unsigned Values = 1 + Args.size(); 4348 return new (Values) 4349 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); 4350 } 4351 4352 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4353 static bool classof(const Instruction *I) { 4354 return I->getOpcode() == Instruction::CleanupPad; 4355 } 4356 static bool classof(const Value *V) { 4357 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4358 } 4359 }; 4360 4361 //===----------------------------------------------------------------------===// 4362 // CatchPadInst Class 4363 //===----------------------------------------------------------------------===// 4364 class CatchPadInst : public FuncletPadInst { 4365 private: 4366 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4367 unsigned Values, const Twine &NameStr, 4368 Instruction *InsertBefore) 4369 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4370 NameStr, InsertBefore) {} 4371 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4372 unsigned Values, const Twine &NameStr, 4373 BasicBlock *InsertAtEnd) 4374 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4375 NameStr, InsertAtEnd) {} 4376 4377 public: 4378 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4379 const Twine &NameStr = "", 4380 Instruction *InsertBefore = nullptr) { 4381 unsigned Values = 1 + Args.size(); 4382 return new (Values) 4383 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); 4384 } 4385 4386 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4387 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4388 unsigned Values = 1 + Args.size(); 4389 return new (Values) 4390 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); 4391 } 4392 4393 /// Convenience accessors 4394 CatchSwitchInst *getCatchSwitch() const { 4395 return cast<CatchSwitchInst>(Op<-1>()); 4396 } 4397 void setCatchSwitch(Value *CatchSwitch) { 4398 assert(CatchSwitch); 4399 Op<-1>() = CatchSwitch; 4400 } 4401 4402 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4403 static bool classof(const Instruction *I) { 4404 return I->getOpcode() == Instruction::CatchPad; 4405 } 4406 static bool classof(const Value *V) { 4407 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4408 } 4409 }; 4410 4411 //===----------------------------------------------------------------------===// 4412 // CatchReturnInst Class 4413 //===----------------------------------------------------------------------===// 4414 4415 class CatchReturnInst : public Instruction { 4416 CatchReturnInst(const CatchReturnInst &RI); 4417 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); 4418 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); 4419 4420 void init(Value *CatchPad, BasicBlock *BB); 4421 4422 protected: 4423 // Note: Instruction needs to be a friend here to call cloneImpl. 4424 friend class Instruction; 4425 4426 CatchReturnInst *cloneImpl() const; 4427 4428 public: 4429 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4430 Instruction *InsertBefore = nullptr) { 4431 assert(CatchPad); 4432 assert(BB); 4433 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); 4434 } 4435 4436 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4437 BasicBlock *InsertAtEnd) { 4438 assert(CatchPad); 4439 assert(BB); 4440 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); 4441 } 4442 4443 /// Provide fast operand accessors 4444 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4445 4446 /// Convenience accessors. 4447 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } 4448 void setCatchPad(CatchPadInst *CatchPad) { 4449 assert(CatchPad); 4450 Op<0>() = CatchPad; 4451 } 4452 4453 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } 4454 void setSuccessor(BasicBlock *NewSucc) { 4455 assert(NewSucc); 4456 Op<1>() = NewSucc; 4457 } 4458 unsigned getNumSuccessors() const { return 1; } 4459 4460 /// Get the parentPad of this catchret's catchpad's catchswitch. 4461 /// The successor block is implicitly a member of this funclet. 4462 Value *getCatchSwitchParentPad() const { 4463 return getCatchPad()->getCatchSwitch()->getParentPad(); 4464 } 4465 4466 // Methods for support type inquiry through isa, cast, and dyn_cast: 4467 static bool classof(const Instruction *I) { 4468 return (I->getOpcode() == Instruction::CatchRet); 4469 } 4470 static bool classof(const Value *V) { 4471 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4472 } 4473 4474 private: 4475 BasicBlock *getSuccessor(unsigned Idx) const { 4476 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4477 return getSuccessor(); 4478 } 4479 4480 void setSuccessor(unsigned Idx, BasicBlock *B) { 4481 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4482 setSuccessor(B); 4483 } 4484 }; 4485 4486 template <> 4487 struct OperandTraits<CatchReturnInst> 4488 : public FixedNumOperandTraits<CatchReturnInst, 2> {}; 4489 4490 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value) 4491 4492 //===----------------------------------------------------------------------===// 4493 // CleanupReturnInst Class 4494 //===----------------------------------------------------------------------===// 4495 4496 class CleanupReturnInst : public Instruction { 4497 using UnwindDestField = BoolBitfieldElementT<0>; 4498 4499 private: 4500 CleanupReturnInst(const CleanupReturnInst &RI); 4501 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4502 Instruction *InsertBefore = nullptr); 4503 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4504 BasicBlock *InsertAtEnd); 4505 4506 void init(Value *CleanupPad, BasicBlock *UnwindBB); 4507 4508 protected: 4509 // Note: Instruction needs to be a friend here to call cloneImpl. 4510 friend class Instruction; 4511 4512 CleanupReturnInst *cloneImpl() const; 4513 4514 public: 4515 static CleanupReturnInst *Create(Value *CleanupPad, 4516 BasicBlock *UnwindBB = nullptr, 4517 Instruction *InsertBefore = nullptr) { 4518 assert(CleanupPad); 4519 unsigned Values = 1; 4520 if (UnwindBB) 4521 ++Values; 4522 return new (Values) 4523 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); 4524 } 4525 4526 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, 4527 BasicBlock *InsertAtEnd) { 4528 assert(CleanupPad); 4529 unsigned Values = 1; 4530 if (UnwindBB) 4531 ++Values; 4532 return new (Values) 4533 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); 4534 } 4535 4536 /// Provide fast operand accessors 4537 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4538 4539 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4540 bool unwindsToCaller() const { return !hasUnwindDest(); } 4541 4542 /// Convenience accessor. 4543 CleanupPadInst *getCleanupPad() const { 4544 return cast<CleanupPadInst>(Op<0>()); 4545 } 4546 void setCleanupPad(CleanupPadInst *CleanupPad) { 4547 assert(CleanupPad); 4548 Op<0>() = CleanupPad; 4549 } 4550 4551 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } 4552 4553 BasicBlock *getUnwindDest() const { 4554 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; 4555 } 4556 void setUnwindDest(BasicBlock *NewDest) { 4557 assert(NewDest); 4558 assert(hasUnwindDest()); 4559 Op<1>() = NewDest; 4560 } 4561 4562 // Methods for support type inquiry through isa, cast, and dyn_cast: 4563 static bool classof(const Instruction *I) { 4564 return (I->getOpcode() == Instruction::CleanupRet); 4565 } 4566 static bool classof(const Value *V) { 4567 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4568 } 4569 4570 private: 4571 BasicBlock *getSuccessor(unsigned Idx) const { 4572 assert(Idx == 0); 4573 return getUnwindDest(); 4574 } 4575 4576 void setSuccessor(unsigned Idx, BasicBlock *B) { 4577 assert(Idx == 0); 4578 setUnwindDest(B); 4579 } 4580 4581 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4582 // method so that subclasses cannot accidentally use it. 4583 template <typename Bitfield> 4584 void setSubclassData(typename Bitfield::Type Value) { 4585 Instruction::setSubclassData<Bitfield>(Value); 4586 } 4587 }; 4588 4589 template <> 4590 struct OperandTraits<CleanupReturnInst> 4591 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; 4592 4593 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value) 4594 4595 //===----------------------------------------------------------------------===// 4596 // UnreachableInst Class 4597 //===----------------------------------------------------------------------===// 4598 4599 //===--------------------------------------------------------------------------- 4600 /// This function has undefined behavior. In particular, the 4601 /// presence of this instruction indicates some higher level knowledge that the 4602 /// end of the block cannot be reached. 4603 /// 4604 class UnreachableInst : public Instruction { 4605 protected: 4606 // Note: Instruction needs to be a friend here to call cloneImpl. 4607 friend class Instruction; 4608 4609 UnreachableInst *cloneImpl() const; 4610 4611 public: 4612 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); 4613 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); 4614 4615 // allocate space for exactly zero operands 4616 void *operator new(size_t s) { 4617 return User::operator new(s, 0); 4618 } 4619 4620 unsigned getNumSuccessors() const { return 0; } 4621 4622 // Methods for support type inquiry through isa, cast, and dyn_cast: 4623 static bool classof(const Instruction *I) { 4624 return I->getOpcode() == Instruction::Unreachable; 4625 } 4626 static bool classof(const Value *V) { 4627 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4628 } 4629 4630 private: 4631 BasicBlock *getSuccessor(unsigned idx) const { 4632 llvm_unreachable("UnreachableInst has no successors!"); 4633 } 4634 4635 void setSuccessor(unsigned idx, BasicBlock *B) { 4636 llvm_unreachable("UnreachableInst has no successors!"); 4637 } 4638 }; 4639 4640 //===----------------------------------------------------------------------===// 4641 // TruncInst Class 4642 //===----------------------------------------------------------------------===// 4643 4644 /// This class represents a truncation of integer types. 4645 class TruncInst : public CastInst { 4646 protected: 4647 // Note: Instruction needs to be a friend here to call cloneImpl. 4648 friend class Instruction; 4649 4650 /// Clone an identical TruncInst 4651 TruncInst *cloneImpl() const; 4652 4653 public: 4654 /// Constructor with insert-before-instruction semantics 4655 TruncInst( 4656 Value *S, ///< The value to be truncated 4657 Type *Ty, ///< The (smaller) type to truncate to 4658 const Twine &NameStr = "", ///< A name for the new instruction 4659 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4660 ); 4661 4662 /// Constructor with insert-at-end-of-block semantics 4663 TruncInst( 4664 Value *S, ///< The value to be truncated 4665 Type *Ty, ///< The (smaller) type to truncate to 4666 const Twine &NameStr, ///< A name for the new instruction 4667 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4668 ); 4669 4670 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4671 static bool classof(const Instruction *I) { 4672 return I->getOpcode() == Trunc; 4673 } 4674 static bool classof(const Value *V) { 4675 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4676 } 4677 }; 4678 4679 //===----------------------------------------------------------------------===// 4680 // ZExtInst Class 4681 //===----------------------------------------------------------------------===// 4682 4683 /// This class represents zero extension of integer types. 4684 class ZExtInst : public CastInst { 4685 protected: 4686 // Note: Instruction needs to be a friend here to call cloneImpl. 4687 friend class Instruction; 4688 4689 /// Clone an identical ZExtInst 4690 ZExtInst *cloneImpl() const; 4691 4692 public: 4693 /// Constructor with insert-before-instruction semantics 4694 ZExtInst( 4695 Value *S, ///< The value to be zero extended 4696 Type *Ty, ///< The type to zero extend to 4697 const Twine &NameStr = "", ///< A name for the new instruction 4698 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4699 ); 4700 4701 /// Constructor with insert-at-end semantics. 4702 ZExtInst( 4703 Value *S, ///< The value to be zero extended 4704 Type *Ty, ///< The type to zero extend to 4705 const Twine &NameStr, ///< A name for the new instruction 4706 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4707 ); 4708 4709 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4710 static bool classof(const Instruction *I) { 4711 return I->getOpcode() == ZExt; 4712 } 4713 static bool classof(const Value *V) { 4714 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4715 } 4716 }; 4717 4718 //===----------------------------------------------------------------------===// 4719 // SExtInst Class 4720 //===----------------------------------------------------------------------===// 4721 4722 /// This class represents a sign extension of integer types. 4723 class SExtInst : public CastInst { 4724 protected: 4725 // Note: Instruction needs to be a friend here to call cloneImpl. 4726 friend class Instruction; 4727 4728 /// Clone an identical SExtInst 4729 SExtInst *cloneImpl() const; 4730 4731 public: 4732 /// Constructor with insert-before-instruction semantics 4733 SExtInst( 4734 Value *S, ///< The value to be sign extended 4735 Type *Ty, ///< The type to sign extend to 4736 const Twine &NameStr = "", ///< A name for the new instruction 4737 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4738 ); 4739 4740 /// Constructor with insert-at-end-of-block semantics 4741 SExtInst( 4742 Value *S, ///< The value to be sign extended 4743 Type *Ty, ///< The type to sign extend to 4744 const Twine &NameStr, ///< A name for the new instruction 4745 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4746 ); 4747 4748 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4749 static bool classof(const Instruction *I) { 4750 return I->getOpcode() == SExt; 4751 } 4752 static bool classof(const Value *V) { 4753 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4754 } 4755 }; 4756 4757 //===----------------------------------------------------------------------===// 4758 // FPTruncInst Class 4759 //===----------------------------------------------------------------------===// 4760 4761 /// This class represents a truncation of floating point types. 4762 class FPTruncInst : public CastInst { 4763 protected: 4764 // Note: Instruction needs to be a friend here to call cloneImpl. 4765 friend class Instruction; 4766 4767 /// Clone an identical FPTruncInst 4768 FPTruncInst *cloneImpl() const; 4769 4770 public: 4771 /// Constructor with insert-before-instruction semantics 4772 FPTruncInst( 4773 Value *S, ///< The value to be truncated 4774 Type *Ty, ///< The type to truncate to 4775 const Twine &NameStr = "", ///< A name for the new instruction 4776 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4777 ); 4778 4779 /// Constructor with insert-before-instruction semantics 4780 FPTruncInst( 4781 Value *S, ///< The value to be truncated 4782 Type *Ty, ///< The type to truncate to 4783 const Twine &NameStr, ///< A name for the new instruction 4784 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4785 ); 4786 4787 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4788 static bool classof(const Instruction *I) { 4789 return I->getOpcode() == FPTrunc; 4790 } 4791 static bool classof(const Value *V) { 4792 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4793 } 4794 }; 4795 4796 //===----------------------------------------------------------------------===// 4797 // FPExtInst Class 4798 //===----------------------------------------------------------------------===// 4799 4800 /// This class represents an extension of floating point types. 4801 class FPExtInst : public CastInst { 4802 protected: 4803 // Note: Instruction needs to be a friend here to call cloneImpl. 4804 friend class Instruction; 4805 4806 /// Clone an identical FPExtInst 4807 FPExtInst *cloneImpl() const; 4808 4809 public: 4810 /// Constructor with insert-before-instruction semantics 4811 FPExtInst( 4812 Value *S, ///< The value to be extended 4813 Type *Ty, ///< The type to extend to 4814 const Twine &NameStr = "", ///< A name for the new instruction 4815 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4816 ); 4817 4818 /// Constructor with insert-at-end-of-block semantics 4819 FPExtInst( 4820 Value *S, ///< The value to be extended 4821 Type *Ty, ///< The type to extend to 4822 const Twine &NameStr, ///< A name for the new instruction 4823 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4824 ); 4825 4826 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4827 static bool classof(const Instruction *I) { 4828 return I->getOpcode() == FPExt; 4829 } 4830 static bool classof(const Value *V) { 4831 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4832 } 4833 }; 4834 4835 //===----------------------------------------------------------------------===// 4836 // UIToFPInst Class 4837 //===----------------------------------------------------------------------===// 4838 4839 /// This class represents a cast unsigned integer to floating point. 4840 class UIToFPInst : public CastInst { 4841 protected: 4842 // Note: Instruction needs to be a friend here to call cloneImpl. 4843 friend class Instruction; 4844 4845 /// Clone an identical UIToFPInst 4846 UIToFPInst *cloneImpl() const; 4847 4848 public: 4849 /// Constructor with insert-before-instruction semantics 4850 UIToFPInst( 4851 Value *S, ///< The value to be converted 4852 Type *Ty, ///< The type to convert to 4853 const Twine &NameStr = "", ///< A name for the new instruction 4854 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4855 ); 4856 4857 /// Constructor with insert-at-end-of-block semantics 4858 UIToFPInst( 4859 Value *S, ///< The value to be converted 4860 Type *Ty, ///< The type to convert to 4861 const Twine &NameStr, ///< A name for the new instruction 4862 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4863 ); 4864 4865 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4866 static bool classof(const Instruction *I) { 4867 return I->getOpcode() == UIToFP; 4868 } 4869 static bool classof(const Value *V) { 4870 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4871 } 4872 }; 4873 4874 //===----------------------------------------------------------------------===// 4875 // SIToFPInst Class 4876 //===----------------------------------------------------------------------===// 4877 4878 /// This class represents a cast from signed integer to floating point. 4879 class SIToFPInst : public CastInst { 4880 protected: 4881 // Note: Instruction needs to be a friend here to call cloneImpl. 4882 friend class Instruction; 4883 4884 /// Clone an identical SIToFPInst 4885 SIToFPInst *cloneImpl() const; 4886 4887 public: 4888 /// Constructor with insert-before-instruction semantics 4889 SIToFPInst( 4890 Value *S, ///< The value to be converted 4891 Type *Ty, ///< The type to convert to 4892 const Twine &NameStr = "", ///< A name for the new instruction 4893 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4894 ); 4895 4896 /// Constructor with insert-at-end-of-block semantics 4897 SIToFPInst( 4898 Value *S, ///< The value to be converted 4899 Type *Ty, ///< The type to convert to 4900 const Twine &NameStr, ///< A name for the new instruction 4901 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4902 ); 4903 4904 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4905 static bool classof(const Instruction *I) { 4906 return I->getOpcode() == SIToFP; 4907 } 4908 static bool classof(const Value *V) { 4909 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4910 } 4911 }; 4912 4913 //===----------------------------------------------------------------------===// 4914 // FPToUIInst Class 4915 //===----------------------------------------------------------------------===// 4916 4917 /// This class represents a cast from floating point to unsigned integer 4918 class FPToUIInst : public CastInst { 4919 protected: 4920 // Note: Instruction needs to be a friend here to call cloneImpl. 4921 friend class Instruction; 4922 4923 /// Clone an identical FPToUIInst 4924 FPToUIInst *cloneImpl() const; 4925 4926 public: 4927 /// Constructor with insert-before-instruction semantics 4928 FPToUIInst( 4929 Value *S, ///< The value to be converted 4930 Type *Ty, ///< The type to convert to 4931 const Twine &NameStr = "", ///< A name for the new instruction 4932 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4933 ); 4934 4935 /// Constructor with insert-at-end-of-block semantics 4936 FPToUIInst( 4937 Value *S, ///< The value to be converted 4938 Type *Ty, ///< The type to convert to 4939 const Twine &NameStr, ///< A name for the new instruction 4940 BasicBlock *InsertAtEnd ///< Where to insert the new instruction 4941 ); 4942 4943 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4944 static bool classof(const Instruction *I) { 4945 return I->getOpcode() == FPToUI; 4946 } 4947 static bool classof(const Value *V) { 4948 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4949 } 4950 }; 4951 4952 //===----------------------------------------------------------------------===// 4953 // FPToSIInst Class 4954 //===----------------------------------------------------------------------===// 4955 4956 /// This class represents a cast from floating point to signed integer. 4957 class FPToSIInst : public CastInst { 4958 protected: 4959 // Note: Instruction needs to be a friend here to call cloneImpl. 4960 friend class Instruction; 4961 4962 /// Clone an identical FPToSIInst 4963 FPToSIInst *cloneImpl() const; 4964 4965 public: 4966 /// Constructor with insert-before-instruction semantics 4967 FPToSIInst( 4968 Value *S, ///< The value to be converted 4969 Type *Ty, ///< The type to convert to 4970 const Twine &NameStr = "", ///< A name for the new instruction 4971 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4972 ); 4973 4974 /// Constructor with insert-at-end-of-block semantics 4975 FPToSIInst( 4976 Value *S, ///< The value to be converted 4977 Type *Ty, ///< The type to convert to 4978 const Twine &NameStr, ///< A name for the new instruction 4979 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4980 ); 4981 4982 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4983 static bool classof(const Instruction *I) { 4984 return I->getOpcode() == FPToSI; 4985 } 4986 static bool classof(const Value *V) { 4987 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4988 } 4989 }; 4990 4991 //===----------------------------------------------------------------------===// 4992 // IntToPtrInst Class 4993 //===----------------------------------------------------------------------===// 4994 4995 /// This class represents a cast from an integer to a pointer. 4996 class IntToPtrInst : public CastInst { 4997 public: 4998 // Note: Instruction needs to be a friend here to call cloneImpl. 4999 friend class Instruction; 5000 5001 /// Constructor with insert-before-instruction semantics 5002 IntToPtrInst( 5003 Value *S, ///< The value to be converted 5004 Type *Ty, ///< The type to convert to 5005 const Twine &NameStr = "", ///< A name for the new instruction 5006 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5007 ); 5008 5009 /// Constructor with insert-at-end-of-block semantics 5010 IntToPtrInst( 5011 Value *S, ///< The value to be converted 5012 Type *Ty, ///< The type to convert to 5013 const Twine &NameStr, ///< A name for the new instruction 5014 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5015 ); 5016 5017 /// Clone an identical IntToPtrInst. 5018 IntToPtrInst *cloneImpl() const; 5019 5020 /// Returns the address space of this instruction's pointer type. 5021 unsigned getAddressSpace() const { 5022 return getType()->getPointerAddressSpace(); 5023 } 5024 5025 // Methods for support type inquiry through isa, cast, and dyn_cast: 5026 static bool classof(const Instruction *I) { 5027 return I->getOpcode() == IntToPtr; 5028 } 5029 static bool classof(const Value *V) { 5030 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5031 } 5032 }; 5033 5034 //===----------------------------------------------------------------------===// 5035 // PtrToIntInst Class 5036 //===----------------------------------------------------------------------===// 5037 5038 /// This class represents a cast from a pointer to an integer. 5039 class PtrToIntInst : public CastInst { 5040 protected: 5041 // Note: Instruction needs to be a friend here to call cloneImpl. 5042 friend class Instruction; 5043 5044 /// Clone an identical PtrToIntInst. 5045 PtrToIntInst *cloneImpl() const; 5046 5047 public: 5048 /// Constructor with insert-before-instruction semantics 5049 PtrToIntInst( 5050 Value *S, ///< The value to be converted 5051 Type *Ty, ///< The type to convert to 5052 const Twine &NameStr = "", ///< A name for the new instruction 5053 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5054 ); 5055 5056 /// Constructor with insert-at-end-of-block semantics 5057 PtrToIntInst( 5058 Value *S, ///< The value to be converted 5059 Type *Ty, ///< The type to convert to 5060 const Twine &NameStr, ///< A name for the new instruction 5061 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5062 ); 5063 5064 /// Gets the pointer operand. 5065 Value *getPointerOperand() { return getOperand(0); } 5066 /// Gets the pointer operand. 5067 const Value *getPointerOperand() const { return getOperand(0); } 5068 /// Gets the operand index of the pointer operand. 5069 static unsigned getPointerOperandIndex() { return 0U; } 5070 5071 /// Returns the address space of the pointer operand. 5072 unsigned getPointerAddressSpace() const { 5073 return getPointerOperand()->getType()->getPointerAddressSpace(); 5074 } 5075 5076 // Methods for support type inquiry through isa, cast, and dyn_cast: 5077 static bool classof(const Instruction *I) { 5078 return I->getOpcode() == PtrToInt; 5079 } 5080 static bool classof(const Value *V) { 5081 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5082 } 5083 }; 5084 5085 //===----------------------------------------------------------------------===// 5086 // BitCastInst Class 5087 //===----------------------------------------------------------------------===// 5088 5089 /// This class represents a no-op cast from one type to another. 5090 class BitCastInst : public CastInst { 5091 protected: 5092 // Note: Instruction needs to be a friend here to call cloneImpl. 5093 friend class Instruction; 5094 5095 /// Clone an identical BitCastInst. 5096 BitCastInst *cloneImpl() const; 5097 5098 public: 5099 /// Constructor with insert-before-instruction semantics 5100 BitCastInst( 5101 Value *S, ///< The value to be casted 5102 Type *Ty, ///< The type to casted to 5103 const Twine &NameStr = "", ///< A name for the new instruction 5104 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5105 ); 5106 5107 /// Constructor with insert-at-end-of-block semantics 5108 BitCastInst( 5109 Value *S, ///< The value to be casted 5110 Type *Ty, ///< The type to casted to 5111 const Twine &NameStr, ///< A name for the new instruction 5112 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5113 ); 5114 5115 // Methods for support type inquiry through isa, cast, and dyn_cast: 5116 static bool classof(const Instruction *I) { 5117 return I->getOpcode() == BitCast; 5118 } 5119 static bool classof(const Value *V) { 5120 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5121 } 5122 }; 5123 5124 //===----------------------------------------------------------------------===// 5125 // AddrSpaceCastInst Class 5126 //===----------------------------------------------------------------------===// 5127 5128 /// This class represents a conversion between pointers from one address space 5129 /// to another. 5130 class AddrSpaceCastInst : public CastInst { 5131 protected: 5132 // Note: Instruction needs to be a friend here to call cloneImpl. 5133 friend class Instruction; 5134 5135 /// Clone an identical AddrSpaceCastInst. 5136 AddrSpaceCastInst *cloneImpl() const; 5137 5138 public: 5139 /// Constructor with insert-before-instruction semantics 5140 AddrSpaceCastInst( 5141 Value *S, ///< The value to be casted 5142 Type *Ty, ///< The type to casted to 5143 const Twine &NameStr = "", ///< A name for the new instruction 5144 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5145 ); 5146 5147 /// Constructor with insert-at-end-of-block semantics 5148 AddrSpaceCastInst( 5149 Value *S, ///< The value to be casted 5150 Type *Ty, ///< The type to casted to 5151 const Twine &NameStr, ///< A name for the new instruction 5152 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5153 ); 5154 5155 // Methods for support type inquiry through isa, cast, and dyn_cast: 5156 static bool classof(const Instruction *I) { 5157 return I->getOpcode() == AddrSpaceCast; 5158 } 5159 static bool classof(const Value *V) { 5160 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5161 } 5162 5163 /// Gets the pointer operand. 5164 Value *getPointerOperand() { 5165 return getOperand(0); 5166 } 5167 5168 /// Gets the pointer operand. 5169 const Value *getPointerOperand() const { 5170 return getOperand(0); 5171 } 5172 5173 /// Gets the operand index of the pointer operand. 5174 static unsigned getPointerOperandIndex() { 5175 return 0U; 5176 } 5177 5178 /// Returns the address space of the pointer operand. 5179 unsigned getSrcAddressSpace() const { 5180 return getPointerOperand()->getType()->getPointerAddressSpace(); 5181 } 5182 5183 /// Returns the address space of the result. 5184 unsigned getDestAddressSpace() const { 5185 return getType()->getPointerAddressSpace(); 5186 } 5187 }; 5188 5189 /// A helper function that returns the pointer operand of a load or store 5190 /// instruction. Returns nullptr if not load or store. 5191 inline const Value *getLoadStorePointerOperand(const Value *V) { 5192 if (auto *Load = dyn_cast<LoadInst>(V)) 5193 return Load->getPointerOperand(); 5194 if (auto *Store = dyn_cast<StoreInst>(V)) 5195 return Store->getPointerOperand(); 5196 return nullptr; 5197 } 5198 inline Value *getLoadStorePointerOperand(Value *V) { 5199 return const_cast<Value *>( 5200 getLoadStorePointerOperand(static_cast<const Value *>(V))); 5201 } 5202 5203 /// A helper function that returns the pointer operand of a load, store 5204 /// or GEP instruction. Returns nullptr if not load, store, or GEP. 5205 inline const Value *getPointerOperand(const Value *V) { 5206 if (auto *Ptr = getLoadStorePointerOperand(V)) 5207 return Ptr; 5208 if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) 5209 return Gep->getPointerOperand(); 5210 return nullptr; 5211 } 5212 inline Value *getPointerOperand(Value *V) { 5213 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); 5214 } 5215 5216 /// A helper function that returns the alignment of load or store instruction. 5217 inline Align getLoadStoreAlignment(Value *I) { 5218 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5219 "Expected Load or Store instruction"); 5220 if (auto *LI = dyn_cast<LoadInst>(I)) 5221 return LI->getAlign(); 5222 return cast<StoreInst>(I)->getAlign(); 5223 } 5224 5225 /// A helper function that returns the address space of the pointer operand of 5226 /// load or store instruction. 5227 inline unsigned getLoadStoreAddressSpace(Value *I) { 5228 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5229 "Expected Load or Store instruction"); 5230 if (auto *LI = dyn_cast<LoadInst>(I)) 5231 return LI->getPointerAddressSpace(); 5232 return cast<StoreInst>(I)->getPointerAddressSpace(); 5233 } 5234 5235 //===----------------------------------------------------------------------===// 5236 // FreezeInst Class 5237 //===----------------------------------------------------------------------===// 5238 5239 /// This class represents a freeze function that returns random concrete 5240 /// value if an operand is either a poison value or an undef value 5241 class FreezeInst : public UnaryInstruction { 5242 protected: 5243 // Note: Instruction needs to be a friend here to call cloneImpl. 5244 friend class Instruction; 5245 5246 /// Clone an identical FreezeInst 5247 FreezeInst *cloneImpl() const; 5248 5249 public: 5250 explicit FreezeInst(Value *S, 5251 const Twine &NameStr = "", 5252 Instruction *InsertBefore = nullptr); 5253 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); 5254 5255 // Methods for support type inquiry through isa, cast, and dyn_cast: 5256 static inline bool classof(const Instruction *I) { 5257 return I->getOpcode() == Freeze; 5258 } 5259 static inline bool classof(const Value *V) { 5260 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5261 } 5262 }; 5263 5264 } // end namespace llvm 5265 5266 #endif // LLVM_IR_INSTRUCTIONS_H 5267