1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file exposes the class definitions of all of the subclasses of the 10 // Instruction class. This is meant to be an easy way to get access to all 11 // instruction subclasses. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_IR_INSTRUCTIONS_H 16 #define LLVM_IR_INSTRUCTIONS_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/Bitfields.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/StringRef.h" 24 #include "llvm/ADT/Twine.h" 25 #include "llvm/ADT/iterator.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/IR/Attributes.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/CallingConv.h" 30 #include "llvm/IR/CFG.h" 31 #include "llvm/IR/Constant.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/InstrTypes.h" 35 #include "llvm/IR/Instruction.h" 36 #include "llvm/IR/OperandTraits.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Use.h" 39 #include "llvm/IR/User.h" 40 #include "llvm/IR/Value.h" 41 #include "llvm/Support/AtomicOrdering.h" 42 #include "llvm/Support/Casting.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include <cassert> 45 #include <cstddef> 46 #include <cstdint> 47 #include <iterator> 48 49 namespace llvm { 50 51 class APInt; 52 class ConstantInt; 53 class DataLayout; 54 class LLVMContext; 55 56 //===----------------------------------------------------------------------===// 57 // AllocaInst Class 58 //===----------------------------------------------------------------------===// 59 60 /// an instruction to allocate memory on the stack 61 class AllocaInst : public UnaryInstruction { 62 Type *AllocatedType; 63 64 using AlignmentField = AlignmentBitfieldElementT<0>; 65 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; 66 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; 67 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, 68 SwiftErrorField>(), 69 "Bitfields must be contiguous"); 70 71 protected: 72 // Note: Instruction needs to be a friend here to call cloneImpl. 73 friend class Instruction; 74 75 AllocaInst *cloneImpl() const; 76 77 public: 78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 79 const Twine &Name, Instruction *InsertBefore); 80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 81 const Twine &Name, BasicBlock *InsertAtEnd); 82 83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 84 Instruction *InsertBefore); 85 AllocaInst(Type *Ty, unsigned AddrSpace, 86 const Twine &Name, BasicBlock *InsertAtEnd); 87 88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 89 const Twine &Name = "", Instruction *InsertBefore = nullptr); 90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 91 const Twine &Name, BasicBlock *InsertAtEnd); 92 93 /// Return true if there is an allocation size parameter to the allocation 94 /// instruction that is not 1. 95 bool isArrayAllocation() const; 96 97 /// Get the number of elements allocated. For a simple allocation of a single 98 /// element, this will return a constant 1 value. 99 const Value *getArraySize() const { return getOperand(0); } 100 Value *getArraySize() { return getOperand(0); } 101 102 /// Overload to return most specific pointer type. 103 PointerType *getType() const { 104 return cast<PointerType>(Instruction::getType()); 105 } 106 107 /// Get allocation size in bits. Returns None if size can't be determined, 108 /// e.g. in case of a VLA. 109 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; 110 111 /// Return the type that is being allocated by the instruction. 112 Type *getAllocatedType() const { return AllocatedType; } 113 /// for use only in special circumstances that need to generically 114 /// transform a whole instruction (eg: IR linking and vectorization). 115 void setAllocatedType(Type *Ty) { AllocatedType = Ty; } 116 117 /// Return the alignment of the memory that is being allocated by the 118 /// instruction. 119 Align getAlign() const { 120 return Align(1ULL << getSubclassData<AlignmentField>()); 121 } 122 123 void setAlignment(Align Align) { 124 setSubclassData<AlignmentField>(Log2(Align)); 125 } 126 127 // FIXME: Remove this one transition to Align is over. 128 unsigned getAlignment() const { return getAlign().value(); } 129 130 /// Return true if this alloca is in the entry block of the function and is a 131 /// constant size. If so, the code generator will fold it into the 132 /// prolog/epilog code, so it is basically free. 133 bool isStaticAlloca() const; 134 135 /// Return true if this alloca is used as an inalloca argument to a call. Such 136 /// allocas are never considered static even if they are in the entry block. 137 bool isUsedWithInAlloca() const { 138 return getSubclassData<UsedWithInAllocaField>(); 139 } 140 141 /// Specify whether this alloca is used to represent the arguments to a call. 142 void setUsedWithInAlloca(bool V) { 143 setSubclassData<UsedWithInAllocaField>(V); 144 } 145 146 /// Return true if this alloca is used as a swifterror argument to a call. 147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } 148 /// Specify whether this alloca is used to represent a swifterror. 149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } 150 151 // Methods for support type inquiry through isa, cast, and dyn_cast: 152 static bool classof(const Instruction *I) { 153 return (I->getOpcode() == Instruction::Alloca); 154 } 155 static bool classof(const Value *V) { 156 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 157 } 158 159 private: 160 // Shadow Instruction::setInstructionSubclassData with a private forwarding 161 // method so that subclasses cannot accidentally use it. 162 template <typename Bitfield> 163 void setSubclassData(typename Bitfield::Type Value) { 164 Instruction::setSubclassData<Bitfield>(Value); 165 } 166 }; 167 168 //===----------------------------------------------------------------------===// 169 // LoadInst Class 170 //===----------------------------------------------------------------------===// 171 172 /// An instruction for reading from memory. This uses the SubclassData field in 173 /// Value to store whether or not the load is volatile. 174 class LoadInst : public UnaryInstruction { 175 using VolatileField = BoolBitfieldElementT<0>; 176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 178 static_assert( 179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 180 "Bitfields must be contiguous"); 181 182 void AssertOK(); 183 184 protected: 185 // Note: Instruction needs to be a friend here to call cloneImpl. 186 friend class Instruction; 187 188 LoadInst *cloneImpl() const; 189 190 public: 191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, 192 Instruction *InsertBefore); 193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); 194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 195 Instruction *InsertBefore); 196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 197 BasicBlock *InsertAtEnd); 198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 199 Align Align, Instruction *InsertBefore = nullptr); 200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 201 Align Align, BasicBlock *InsertAtEnd); 202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 203 Align Align, AtomicOrdering Order, 204 SyncScope::ID SSID = SyncScope::System, 205 Instruction *InsertBefore = nullptr); 206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 207 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 208 BasicBlock *InsertAtEnd); 209 210 /// Return true if this is a load from a volatile memory location. 211 bool isVolatile() const { return getSubclassData<VolatileField>(); } 212 213 /// Specify whether this is a volatile load or not. 214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 215 216 /// Return the alignment of the access that is being performed. 217 /// FIXME: Remove this function once transition to Align is over. 218 /// Use getAlign() instead. 219 unsigned getAlignment() const { return getAlign().value(); } 220 221 /// Return the alignment of the access that is being performed. 222 Align getAlign() const { 223 return Align(1ULL << (getSubclassData<AlignmentField>())); 224 } 225 226 void setAlignment(Align Align) { 227 setSubclassData<AlignmentField>(Log2(Align)); 228 } 229 230 /// Returns the ordering constraint of this load instruction. 231 AtomicOrdering getOrdering() const { 232 return getSubclassData<OrderingField>(); 233 } 234 /// Sets the ordering constraint of this load instruction. May not be Release 235 /// or AcquireRelease. 236 void setOrdering(AtomicOrdering Ordering) { 237 setSubclassData<OrderingField>(Ordering); 238 } 239 240 /// Returns the synchronization scope ID of this load instruction. 241 SyncScope::ID getSyncScopeID() const { 242 return SSID; 243 } 244 245 /// Sets the synchronization scope ID of this load instruction. 246 void setSyncScopeID(SyncScope::ID SSID) { 247 this->SSID = SSID; 248 } 249 250 /// Sets the ordering constraint and the synchronization scope ID of this load 251 /// instruction. 252 void setAtomic(AtomicOrdering Ordering, 253 SyncScope::ID SSID = SyncScope::System) { 254 setOrdering(Ordering); 255 setSyncScopeID(SSID); 256 } 257 258 bool isSimple() const { return !isAtomic() && !isVolatile(); } 259 260 bool isUnordered() const { 261 return (getOrdering() == AtomicOrdering::NotAtomic || 262 getOrdering() == AtomicOrdering::Unordered) && 263 !isVolatile(); 264 } 265 266 Value *getPointerOperand() { return getOperand(0); } 267 const Value *getPointerOperand() const { return getOperand(0); } 268 static unsigned getPointerOperandIndex() { return 0U; } 269 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 270 271 /// Returns the address space of the pointer operand. 272 unsigned getPointerAddressSpace() const { 273 return getPointerOperandType()->getPointerAddressSpace(); 274 } 275 276 // Methods for support type inquiry through isa, cast, and dyn_cast: 277 static bool classof(const Instruction *I) { 278 return I->getOpcode() == Instruction::Load; 279 } 280 static bool classof(const Value *V) { 281 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 282 } 283 284 private: 285 // Shadow Instruction::setInstructionSubclassData with a private forwarding 286 // method so that subclasses cannot accidentally use it. 287 template <typename Bitfield> 288 void setSubclassData(typename Bitfield::Type Value) { 289 Instruction::setSubclassData<Bitfield>(Value); 290 } 291 292 /// The synchronization scope ID of this load instruction. Not quite enough 293 /// room in SubClassData for everything, so synchronization scope ID gets its 294 /// own field. 295 SyncScope::ID SSID; 296 }; 297 298 //===----------------------------------------------------------------------===// 299 // StoreInst Class 300 //===----------------------------------------------------------------------===// 301 302 /// An instruction for storing to memory. 303 class StoreInst : public Instruction { 304 using VolatileField = BoolBitfieldElementT<0>; 305 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 306 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 307 static_assert( 308 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 309 "Bitfields must be contiguous"); 310 311 void AssertOK(); 312 313 protected: 314 // Note: Instruction needs to be a friend here to call cloneImpl. 315 friend class Instruction; 316 317 StoreInst *cloneImpl() const; 318 319 public: 320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); 321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); 322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); 323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); 324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 325 Instruction *InsertBefore = nullptr); 326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 327 BasicBlock *InsertAtEnd); 328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 329 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, 330 Instruction *InsertBefore = nullptr); 331 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 332 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); 333 334 // allocate space for exactly two operands 335 void *operator new(size_t s) { 336 return User::operator new(s, 2); 337 } 338 339 /// Return true if this is a store to a volatile memory location. 340 bool isVolatile() const { return getSubclassData<VolatileField>(); } 341 342 /// Specify whether this is a volatile store or not. 343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 344 345 /// Transparently provide more efficient getOperand methods. 346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 347 348 /// Return the alignment of the access that is being performed 349 /// FIXME: Remove this function once transition to Align is over. 350 /// Use getAlign() instead. 351 unsigned getAlignment() const { return getAlign().value(); } 352 353 Align getAlign() const { 354 return Align(1ULL << (getSubclassData<AlignmentField>())); 355 } 356 357 void setAlignment(Align Align) { 358 setSubclassData<AlignmentField>(Log2(Align)); 359 } 360 361 /// Returns the ordering constraint of this store instruction. 362 AtomicOrdering getOrdering() const { 363 return getSubclassData<OrderingField>(); 364 } 365 366 /// Sets the ordering constraint of this store instruction. May not be 367 /// Acquire or AcquireRelease. 368 void setOrdering(AtomicOrdering Ordering) { 369 setSubclassData<OrderingField>(Ordering); 370 } 371 372 /// Returns the synchronization scope ID of this store instruction. 373 SyncScope::ID getSyncScopeID() const { 374 return SSID; 375 } 376 377 /// Sets the synchronization scope ID of this store instruction. 378 void setSyncScopeID(SyncScope::ID SSID) { 379 this->SSID = SSID; 380 } 381 382 /// Sets the ordering constraint and the synchronization scope ID of this 383 /// store instruction. 384 void setAtomic(AtomicOrdering Ordering, 385 SyncScope::ID SSID = SyncScope::System) { 386 setOrdering(Ordering); 387 setSyncScopeID(SSID); 388 } 389 390 bool isSimple() const { return !isAtomic() && !isVolatile(); } 391 392 bool isUnordered() const { 393 return (getOrdering() == AtomicOrdering::NotAtomic || 394 getOrdering() == AtomicOrdering::Unordered) && 395 !isVolatile(); 396 } 397 398 Value *getValueOperand() { return getOperand(0); } 399 const Value *getValueOperand() const { return getOperand(0); } 400 401 Value *getPointerOperand() { return getOperand(1); } 402 const Value *getPointerOperand() const { return getOperand(1); } 403 static unsigned getPointerOperandIndex() { return 1U; } 404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 405 406 /// Returns the address space of the pointer operand. 407 unsigned getPointerAddressSpace() const { 408 return getPointerOperandType()->getPointerAddressSpace(); 409 } 410 411 // Methods for support type inquiry through isa, cast, and dyn_cast: 412 static bool classof(const Instruction *I) { 413 return I->getOpcode() == Instruction::Store; 414 } 415 static bool classof(const Value *V) { 416 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 417 } 418 419 private: 420 // Shadow Instruction::setInstructionSubclassData with a private forwarding 421 // method so that subclasses cannot accidentally use it. 422 template <typename Bitfield> 423 void setSubclassData(typename Bitfield::Type Value) { 424 Instruction::setSubclassData<Bitfield>(Value); 425 } 426 427 /// The synchronization scope ID of this store instruction. Not quite enough 428 /// room in SubClassData for everything, so synchronization scope ID gets its 429 /// own field. 430 SyncScope::ID SSID; 431 }; 432 433 template <> 434 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { 435 }; 436 437 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) 438 439 //===----------------------------------------------------------------------===// 440 // FenceInst Class 441 //===----------------------------------------------------------------------===// 442 443 /// An instruction for ordering other memory operations. 444 class FenceInst : public Instruction { 445 using OrderingField = AtomicOrderingBitfieldElementT<0>; 446 447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID); 448 449 protected: 450 // Note: Instruction needs to be a friend here to call cloneImpl. 451 friend class Instruction; 452 453 FenceInst *cloneImpl() const; 454 455 public: 456 // Ordering may only be Acquire, Release, AcquireRelease, or 457 // SequentiallyConsistent. 458 FenceInst(LLVMContext &C, AtomicOrdering Ordering, 459 SyncScope::ID SSID = SyncScope::System, 460 Instruction *InsertBefore = nullptr); 461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, 462 BasicBlock *InsertAtEnd); 463 464 // allocate space for exactly zero operands 465 void *operator new(size_t s) { 466 return User::operator new(s, 0); 467 } 468 469 /// Returns the ordering constraint of this fence instruction. 470 AtomicOrdering getOrdering() const { 471 return getSubclassData<OrderingField>(); 472 } 473 474 /// Sets the ordering constraint of this fence instruction. May only be 475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. 476 void setOrdering(AtomicOrdering Ordering) { 477 setSubclassData<OrderingField>(Ordering); 478 } 479 480 /// Returns the synchronization scope ID of this fence instruction. 481 SyncScope::ID getSyncScopeID() const { 482 return SSID; 483 } 484 485 /// Sets the synchronization scope ID of this fence instruction. 486 void setSyncScopeID(SyncScope::ID SSID) { 487 this->SSID = SSID; 488 } 489 490 // Methods for support type inquiry through isa, cast, and dyn_cast: 491 static bool classof(const Instruction *I) { 492 return I->getOpcode() == Instruction::Fence; 493 } 494 static bool classof(const Value *V) { 495 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 496 } 497 498 private: 499 // Shadow Instruction::setInstructionSubclassData with a private forwarding 500 // method so that subclasses cannot accidentally use it. 501 template <typename Bitfield> 502 void setSubclassData(typename Bitfield::Type Value) { 503 Instruction::setSubclassData<Bitfield>(Value); 504 } 505 506 /// The synchronization scope ID of this fence instruction. Not quite enough 507 /// room in SubClassData for everything, so synchronization scope ID gets its 508 /// own field. 509 SyncScope::ID SSID; 510 }; 511 512 //===----------------------------------------------------------------------===// 513 // AtomicCmpXchgInst Class 514 //===----------------------------------------------------------------------===// 515 516 /// An instruction that atomically checks whether a 517 /// specified value is in a memory location, and, if it is, stores a new value 518 /// there. The value returned by this instruction is a pair containing the 519 /// original value as first element, and an i1 indicating success (true) or 520 /// failure (false) as second element. 521 /// 522 class AtomicCmpXchgInst : public Instruction { 523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, 524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, 525 SyncScope::ID SSID); 526 527 template <unsigned Offset> 528 using AtomicOrderingBitfieldElement = 529 typename Bitfield::Element<AtomicOrdering, Offset, 3, 530 AtomicOrdering::LAST>; 531 532 protected: 533 // Note: Instruction needs to be a friend here to call cloneImpl. 534 friend class Instruction; 535 536 AtomicCmpXchgInst *cloneImpl() const; 537 538 public: 539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 540 AtomicOrdering SuccessOrdering, 541 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 542 Instruction *InsertBefore = nullptr); 543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 544 AtomicOrdering SuccessOrdering, 545 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 546 BasicBlock *InsertAtEnd); 547 548 // allocate space for exactly three operands 549 void *operator new(size_t s) { 550 return User::operator new(s, 3); 551 } 552 553 using VolatileField = BoolBitfieldElementT<0>; 554 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; 555 using SuccessOrderingField = 556 AtomicOrderingBitfieldElementT<WeakField::NextBit>; 557 using FailureOrderingField = 558 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; 559 using AlignmentField = 560 AlignmentBitfieldElementT<FailureOrderingField::NextBit>; 561 static_assert( 562 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, 563 FailureOrderingField, AlignmentField>(), 564 "Bitfields must be contiguous"); 565 566 /// Return the alignment of the memory that is being allocated by the 567 /// instruction. 568 Align getAlign() const { 569 return Align(1ULL << getSubclassData<AlignmentField>()); 570 } 571 572 void setAlignment(Align Align) { 573 setSubclassData<AlignmentField>(Log2(Align)); 574 } 575 576 /// Return true if this is a cmpxchg from a volatile memory 577 /// location. 578 /// 579 bool isVolatile() const { return getSubclassData<VolatileField>(); } 580 581 /// Specify whether this is a volatile cmpxchg. 582 /// 583 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 584 585 /// Return true if this cmpxchg may spuriously fail. 586 bool isWeak() const { return getSubclassData<WeakField>(); } 587 588 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } 589 590 /// Transparently provide more efficient getOperand methods. 591 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 592 593 /// Returns the success ordering constraint of this cmpxchg instruction. 594 AtomicOrdering getSuccessOrdering() const { 595 return getSubclassData<SuccessOrderingField>(); 596 } 597 598 /// Sets the success ordering constraint of this cmpxchg instruction. 599 void setSuccessOrdering(AtomicOrdering Ordering) { 600 assert(Ordering != AtomicOrdering::NotAtomic && 601 "CmpXchg instructions can only be atomic."); 602 setSubclassData<SuccessOrderingField>(Ordering); 603 } 604 605 /// Returns the failure ordering constraint of this cmpxchg instruction. 606 AtomicOrdering getFailureOrdering() const { 607 return getSubclassData<FailureOrderingField>(); 608 } 609 610 /// Sets the failure ordering constraint of this cmpxchg instruction. 611 void setFailureOrdering(AtomicOrdering Ordering) { 612 assert(Ordering != AtomicOrdering::NotAtomic && 613 "CmpXchg instructions can only be atomic."); 614 setSubclassData<FailureOrderingField>(Ordering); 615 } 616 617 /// Returns the synchronization scope ID of this cmpxchg instruction. 618 SyncScope::ID getSyncScopeID() const { 619 return SSID; 620 } 621 622 /// Sets the synchronization scope ID of this cmpxchg instruction. 623 void setSyncScopeID(SyncScope::ID SSID) { 624 this->SSID = SSID; 625 } 626 627 Value *getPointerOperand() { return getOperand(0); } 628 const Value *getPointerOperand() const { return getOperand(0); } 629 static unsigned getPointerOperandIndex() { return 0U; } 630 631 Value *getCompareOperand() { return getOperand(1); } 632 const Value *getCompareOperand() const { return getOperand(1); } 633 634 Value *getNewValOperand() { return getOperand(2); } 635 const Value *getNewValOperand() const { return getOperand(2); } 636 637 /// Returns the address space of the pointer operand. 638 unsigned getPointerAddressSpace() const { 639 return getPointerOperand()->getType()->getPointerAddressSpace(); 640 } 641 642 /// Returns the strongest permitted ordering on failure, given the 643 /// desired ordering on success. 644 /// 645 /// If the comparison in a cmpxchg operation fails, there is no atomic store 646 /// so release semantics cannot be provided. So this function drops explicit 647 /// Release requests from the AtomicOrdering. A SequentiallyConsistent 648 /// operation would remain SequentiallyConsistent. 649 static AtomicOrdering 650 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { 651 switch (SuccessOrdering) { 652 default: 653 llvm_unreachable("invalid cmpxchg success ordering"); 654 case AtomicOrdering::Release: 655 case AtomicOrdering::Monotonic: 656 return AtomicOrdering::Monotonic; 657 case AtomicOrdering::AcquireRelease: 658 case AtomicOrdering::Acquire: 659 return AtomicOrdering::Acquire; 660 case AtomicOrdering::SequentiallyConsistent: 661 return AtomicOrdering::SequentiallyConsistent; 662 } 663 } 664 665 // Methods for support type inquiry through isa, cast, and dyn_cast: 666 static bool classof(const Instruction *I) { 667 return I->getOpcode() == Instruction::AtomicCmpXchg; 668 } 669 static bool classof(const Value *V) { 670 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 671 } 672 673 private: 674 // Shadow Instruction::setInstructionSubclassData with a private forwarding 675 // method so that subclasses cannot accidentally use it. 676 template <typename Bitfield> 677 void setSubclassData(typename Bitfield::Type Value) { 678 Instruction::setSubclassData<Bitfield>(Value); 679 } 680 681 /// The synchronization scope ID of this cmpxchg instruction. Not quite 682 /// enough room in SubClassData for everything, so synchronization scope ID 683 /// gets its own field. 684 SyncScope::ID SSID; 685 }; 686 687 template <> 688 struct OperandTraits<AtomicCmpXchgInst> : 689 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { 690 }; 691 692 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) 693 694 //===----------------------------------------------------------------------===// 695 // AtomicRMWInst Class 696 //===----------------------------------------------------------------------===// 697 698 /// an instruction that atomically reads a memory location, 699 /// combines it with another value, and then stores the result back. Returns 700 /// the old value. 701 /// 702 class AtomicRMWInst : public Instruction { 703 protected: 704 // Note: Instruction needs to be a friend here to call cloneImpl. 705 friend class Instruction; 706 707 AtomicRMWInst *cloneImpl() const; 708 709 public: 710 /// This enumeration lists the possible modifications atomicrmw can make. In 711 /// the descriptions, 'p' is the pointer to the instruction's memory location, 712 /// 'old' is the initial value of *p, and 'v' is the other value passed to the 713 /// instruction. These instructions always return 'old'. 714 enum BinOp : unsigned { 715 /// *p = v 716 Xchg, 717 /// *p = old + v 718 Add, 719 /// *p = old - v 720 Sub, 721 /// *p = old & v 722 And, 723 /// *p = ~(old & v) 724 Nand, 725 /// *p = old | v 726 Or, 727 /// *p = old ^ v 728 Xor, 729 /// *p = old >signed v ? old : v 730 Max, 731 /// *p = old <signed v ? old : v 732 Min, 733 /// *p = old >unsigned v ? old : v 734 UMax, 735 /// *p = old <unsigned v ? old : v 736 UMin, 737 738 /// *p = old + v 739 FAdd, 740 741 /// *p = old - v 742 FSub, 743 744 FIRST_BINOP = Xchg, 745 LAST_BINOP = FSub, 746 BAD_BINOP 747 }; 748 749 private: 750 template <unsigned Offset> 751 using AtomicOrderingBitfieldElement = 752 typename Bitfield::Element<AtomicOrdering, Offset, 3, 753 AtomicOrdering::LAST>; 754 755 template <unsigned Offset> 756 using BinOpBitfieldElement = 757 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>; 758 759 public: 760 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 761 AtomicOrdering Ordering, SyncScope::ID SSID, 762 Instruction *InsertBefore = nullptr); 763 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 764 AtomicOrdering Ordering, SyncScope::ID SSID, 765 BasicBlock *InsertAtEnd); 766 767 // allocate space for exactly two operands 768 void *operator new(size_t s) { 769 return User::operator new(s, 2); 770 } 771 772 using VolatileField = BoolBitfieldElementT<0>; 773 using AtomicOrderingField = 774 AtomicOrderingBitfieldElementT<VolatileField::NextBit>; 775 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; 776 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; 777 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, 778 OperationField, AlignmentField>(), 779 "Bitfields must be contiguous"); 780 781 BinOp getOperation() const { return getSubclassData<OperationField>(); } 782 783 static StringRef getOperationName(BinOp Op); 784 785 static bool isFPOperation(BinOp Op) { 786 switch (Op) { 787 case AtomicRMWInst::FAdd: 788 case AtomicRMWInst::FSub: 789 return true; 790 default: 791 return false; 792 } 793 } 794 795 void setOperation(BinOp Operation) { 796 setSubclassData<OperationField>(Operation); 797 } 798 799 /// Return the alignment of the memory that is being allocated by the 800 /// instruction. 801 Align getAlign() const { 802 return Align(1ULL << getSubclassData<AlignmentField>()); 803 } 804 805 void setAlignment(Align Align) { 806 setSubclassData<AlignmentField>(Log2(Align)); 807 } 808 809 /// Return true if this is a RMW on a volatile memory location. 810 /// 811 bool isVolatile() const { return getSubclassData<VolatileField>(); } 812 813 /// Specify whether this is a volatile RMW or not. 814 /// 815 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 816 817 /// Transparently provide more efficient getOperand methods. 818 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 819 820 /// Returns the ordering constraint of this rmw instruction. 821 AtomicOrdering getOrdering() const { 822 return getSubclassData<AtomicOrderingField>(); 823 } 824 825 /// Sets the ordering constraint of this rmw instruction. 826 void setOrdering(AtomicOrdering Ordering) { 827 assert(Ordering != AtomicOrdering::NotAtomic && 828 "atomicrmw instructions can only be atomic."); 829 setSubclassData<AtomicOrderingField>(Ordering); 830 } 831 832 /// Returns the synchronization scope ID of this rmw instruction. 833 SyncScope::ID getSyncScopeID() const { 834 return SSID; 835 } 836 837 /// Sets the synchronization scope ID of this rmw instruction. 838 void setSyncScopeID(SyncScope::ID SSID) { 839 this->SSID = SSID; 840 } 841 842 Value *getPointerOperand() { return getOperand(0); } 843 const Value *getPointerOperand() const { return getOperand(0); } 844 static unsigned getPointerOperandIndex() { return 0U; } 845 846 Value *getValOperand() { return getOperand(1); } 847 const Value *getValOperand() const { return getOperand(1); } 848 849 /// Returns the address space of the pointer operand. 850 unsigned getPointerAddressSpace() const { 851 return getPointerOperand()->getType()->getPointerAddressSpace(); 852 } 853 854 bool isFloatingPointOperation() const { 855 return isFPOperation(getOperation()); 856 } 857 858 // Methods for support type inquiry through isa, cast, and dyn_cast: 859 static bool classof(const Instruction *I) { 860 return I->getOpcode() == Instruction::AtomicRMW; 861 } 862 static bool classof(const Value *V) { 863 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 864 } 865 866 private: 867 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, 868 AtomicOrdering Ordering, SyncScope::ID SSID); 869 870 // Shadow Instruction::setInstructionSubclassData with a private forwarding 871 // method so that subclasses cannot accidentally use it. 872 template <typename Bitfield> 873 void setSubclassData(typename Bitfield::Type Value) { 874 Instruction::setSubclassData<Bitfield>(Value); 875 } 876 877 /// The synchronization scope ID of this rmw instruction. Not quite enough 878 /// room in SubClassData for everything, so synchronization scope ID gets its 879 /// own field. 880 SyncScope::ID SSID; 881 }; 882 883 template <> 884 struct OperandTraits<AtomicRMWInst> 885 : public FixedNumOperandTraits<AtomicRMWInst,2> { 886 }; 887 888 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) 889 890 //===----------------------------------------------------------------------===// 891 // GetElementPtrInst Class 892 //===----------------------------------------------------------------------===// 893 894 // checkGEPType - Simple wrapper function to give a better assertion failure 895 // message on bad indexes for a gep instruction. 896 // 897 inline Type *checkGEPType(Type *Ty) { 898 assert(Ty && "Invalid GetElementPtrInst indices for type!"); 899 return Ty; 900 } 901 902 /// an instruction for type-safe pointer arithmetic to 903 /// access elements of arrays and structs 904 /// 905 class GetElementPtrInst : public Instruction { 906 Type *SourceElementType; 907 Type *ResultElementType; 908 909 GetElementPtrInst(const GetElementPtrInst &GEPI); 910 911 /// Constructors - Create a getelementptr instruction with a base pointer an 912 /// list of indices. The first ctor can optionally insert before an existing 913 /// instruction, the second appends the new instruction to the specified 914 /// BasicBlock. 915 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 916 ArrayRef<Value *> IdxList, unsigned Values, 917 const Twine &NameStr, Instruction *InsertBefore); 918 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 919 ArrayRef<Value *> IdxList, unsigned Values, 920 const Twine &NameStr, BasicBlock *InsertAtEnd); 921 922 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); 923 924 protected: 925 // Note: Instruction needs to be a friend here to call cloneImpl. 926 friend class Instruction; 927 928 GetElementPtrInst *cloneImpl() const; 929 930 public: 931 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 932 ArrayRef<Value *> IdxList, 933 const Twine &NameStr = "", 934 Instruction *InsertBefore = nullptr) { 935 unsigned Values = 1 + unsigned(IdxList.size()); 936 if (!PointeeType) 937 PointeeType = 938 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); 939 else 940 assert( 941 PointeeType == 942 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()); 943 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 944 NameStr, InsertBefore); 945 } 946 947 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 948 ArrayRef<Value *> IdxList, 949 const Twine &NameStr, 950 BasicBlock *InsertAtEnd) { 951 unsigned Values = 1 + unsigned(IdxList.size()); 952 if (!PointeeType) 953 PointeeType = 954 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); 955 else 956 assert( 957 PointeeType == 958 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()); 959 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 960 NameStr, InsertAtEnd); 961 } 962 963 /// Create an "inbounds" getelementptr. See the documentation for the 964 /// "inbounds" flag in LangRef.html for details. 965 static GetElementPtrInst *CreateInBounds(Value *Ptr, 966 ArrayRef<Value *> IdxList, 967 const Twine &NameStr = "", 968 Instruction *InsertBefore = nullptr){ 969 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore); 970 } 971 972 static GetElementPtrInst * 973 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, 974 const Twine &NameStr = "", 975 Instruction *InsertBefore = nullptr) { 976 GetElementPtrInst *GEP = 977 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); 978 GEP->setIsInBounds(true); 979 return GEP; 980 } 981 982 static GetElementPtrInst *CreateInBounds(Value *Ptr, 983 ArrayRef<Value *> IdxList, 984 const Twine &NameStr, 985 BasicBlock *InsertAtEnd) { 986 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd); 987 } 988 989 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, 990 ArrayRef<Value *> IdxList, 991 const Twine &NameStr, 992 BasicBlock *InsertAtEnd) { 993 GetElementPtrInst *GEP = 994 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); 995 GEP->setIsInBounds(true); 996 return GEP; 997 } 998 999 /// Transparently provide more efficient getOperand methods. 1000 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1001 1002 Type *getSourceElementType() const { return SourceElementType; } 1003 1004 void setSourceElementType(Type *Ty) { SourceElementType = Ty; } 1005 void setResultElementType(Type *Ty) { ResultElementType = Ty; } 1006 1007 Type *getResultElementType() const { 1008 assert(ResultElementType == 1009 cast<PointerType>(getType()->getScalarType())->getElementType()); 1010 return ResultElementType; 1011 } 1012 1013 /// Returns the address space of this instruction's pointer type. 1014 unsigned getAddressSpace() const { 1015 // Note that this is always the same as the pointer operand's address space 1016 // and that is cheaper to compute, so cheat here. 1017 return getPointerAddressSpace(); 1018 } 1019 1020 /// Returns the result type of a getelementptr with the given source 1021 /// element type and indexes. 1022 /// 1023 /// Null is returned if the indices are invalid for the specified 1024 /// source element type. 1025 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); 1026 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); 1027 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); 1028 1029 /// Return the type of the element at the given index of an indexable 1030 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". 1031 /// 1032 /// Returns null if the type can't be indexed, or the given index is not 1033 /// legal for the given type. 1034 static Type *getTypeAtIndex(Type *Ty, Value *Idx); 1035 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); 1036 1037 inline op_iterator idx_begin() { return op_begin()+1; } 1038 inline const_op_iterator idx_begin() const { return op_begin()+1; } 1039 inline op_iterator idx_end() { return op_end(); } 1040 inline const_op_iterator idx_end() const { return op_end(); } 1041 1042 inline iterator_range<op_iterator> indices() { 1043 return make_range(idx_begin(), idx_end()); 1044 } 1045 1046 inline iterator_range<const_op_iterator> indices() const { 1047 return make_range(idx_begin(), idx_end()); 1048 } 1049 1050 Value *getPointerOperand() { 1051 return getOperand(0); 1052 } 1053 const Value *getPointerOperand() const { 1054 return getOperand(0); 1055 } 1056 static unsigned getPointerOperandIndex() { 1057 return 0U; // get index for modifying correct operand. 1058 } 1059 1060 /// Method to return the pointer operand as a 1061 /// PointerType. 1062 Type *getPointerOperandType() const { 1063 return getPointerOperand()->getType(); 1064 } 1065 1066 /// Returns the address space of the pointer operand. 1067 unsigned getPointerAddressSpace() const { 1068 return getPointerOperandType()->getPointerAddressSpace(); 1069 } 1070 1071 /// Returns the pointer type returned by the GEP 1072 /// instruction, which may be a vector of pointers. 1073 static Type *getGEPReturnType(Type *ElTy, Value *Ptr, 1074 ArrayRef<Value *> IdxList) { 1075 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)), 1076 Ptr->getType()->getPointerAddressSpace()); 1077 // Vector GEP 1078 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { 1079 ElementCount EltCount = PtrVTy->getElementCount(); 1080 return VectorType::get(PtrTy, EltCount); 1081 } 1082 for (Value *Index : IdxList) 1083 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { 1084 ElementCount EltCount = IndexVTy->getElementCount(); 1085 return VectorType::get(PtrTy, EltCount); 1086 } 1087 // Scalar GEP 1088 return PtrTy; 1089 } 1090 1091 unsigned getNumIndices() const { // Note: always non-negative 1092 return getNumOperands() - 1; 1093 } 1094 1095 bool hasIndices() const { 1096 return getNumOperands() > 1; 1097 } 1098 1099 /// Return true if all of the indices of this GEP are 1100 /// zeros. If so, the result pointer and the first operand have the same 1101 /// value, just potentially different types. 1102 bool hasAllZeroIndices() const; 1103 1104 /// Return true if all of the indices of this GEP are 1105 /// constant integers. If so, the result pointer and the first operand have 1106 /// a constant offset between them. 1107 bool hasAllConstantIndices() const; 1108 1109 /// Set or clear the inbounds flag on this GEP instruction. 1110 /// See LangRef.html for the meaning of inbounds on a getelementptr. 1111 void setIsInBounds(bool b = true); 1112 1113 /// Determine whether the GEP has the inbounds flag. 1114 bool isInBounds() const; 1115 1116 /// Accumulate the constant address offset of this GEP if possible. 1117 /// 1118 /// This routine accepts an APInt into which it will accumulate the constant 1119 /// offset of this GEP if the GEP is in fact constant. If the GEP is not 1120 /// all-constant, it returns false and the value of the offset APInt is 1121 /// undefined (it is *not* preserved!). The APInt passed into this routine 1122 /// must be at least as wide as the IntPtr type for the address space of 1123 /// the base GEP pointer. 1124 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; 1125 1126 // Methods for support type inquiry through isa, cast, and dyn_cast: 1127 static bool classof(const Instruction *I) { 1128 return (I->getOpcode() == Instruction::GetElementPtr); 1129 } 1130 static bool classof(const Value *V) { 1131 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1132 } 1133 }; 1134 1135 template <> 1136 struct OperandTraits<GetElementPtrInst> : 1137 public VariadicOperandTraits<GetElementPtrInst, 1> { 1138 }; 1139 1140 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1141 ArrayRef<Value *> IdxList, unsigned Values, 1142 const Twine &NameStr, 1143 Instruction *InsertBefore) 1144 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1145 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1146 Values, InsertBefore), 1147 SourceElementType(PointeeType), 1148 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1149 assert(ResultElementType == 1150 cast<PointerType>(getType()->getScalarType())->getElementType()); 1151 init(Ptr, IdxList, NameStr); 1152 } 1153 1154 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1155 ArrayRef<Value *> IdxList, unsigned Values, 1156 const Twine &NameStr, 1157 BasicBlock *InsertAtEnd) 1158 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1159 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1160 Values, InsertAtEnd), 1161 SourceElementType(PointeeType), 1162 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1163 assert(ResultElementType == 1164 cast<PointerType>(getType()->getScalarType())->getElementType()); 1165 init(Ptr, IdxList, NameStr); 1166 } 1167 1168 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) 1169 1170 //===----------------------------------------------------------------------===// 1171 // ICmpInst Class 1172 //===----------------------------------------------------------------------===// 1173 1174 /// This instruction compares its operands according to the predicate given 1175 /// to the constructor. It only operates on integers or pointers. The operands 1176 /// must be identical types. 1177 /// Represent an integer comparison operator. 1178 class ICmpInst: public CmpInst { 1179 void AssertOK() { 1180 assert(isIntPredicate() && 1181 "Invalid ICmp predicate value"); 1182 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1183 "Both operands to ICmp instruction are not of the same type!"); 1184 // Check that the operands are the right type 1185 assert((getOperand(0)->getType()->isIntOrIntVectorTy() || 1186 getOperand(0)->getType()->isPtrOrPtrVectorTy()) && 1187 "Invalid operand types for ICmp instruction"); 1188 } 1189 1190 protected: 1191 // Note: Instruction needs to be a friend here to call cloneImpl. 1192 friend class Instruction; 1193 1194 /// Clone an identical ICmpInst 1195 ICmpInst *cloneImpl() const; 1196 1197 public: 1198 /// Constructor with insert-before-instruction semantics. 1199 ICmpInst( 1200 Instruction *InsertBefore, ///< Where to insert 1201 Predicate pred, ///< The predicate to use for the comparison 1202 Value *LHS, ///< The left-hand-side of the expression 1203 Value *RHS, ///< The right-hand-side of the expression 1204 const Twine &NameStr = "" ///< Name of the instruction 1205 ) : CmpInst(makeCmpResultType(LHS->getType()), 1206 Instruction::ICmp, pred, LHS, RHS, NameStr, 1207 InsertBefore) { 1208 #ifndef NDEBUG 1209 AssertOK(); 1210 #endif 1211 } 1212 1213 /// Constructor with insert-at-end semantics. 1214 ICmpInst( 1215 BasicBlock &InsertAtEnd, ///< Block to insert into. 1216 Predicate pred, ///< The predicate to use for the comparison 1217 Value *LHS, ///< The left-hand-side of the expression 1218 Value *RHS, ///< The right-hand-side of the expression 1219 const Twine &NameStr = "" ///< Name of the instruction 1220 ) : CmpInst(makeCmpResultType(LHS->getType()), 1221 Instruction::ICmp, pred, LHS, RHS, NameStr, 1222 &InsertAtEnd) { 1223 #ifndef NDEBUG 1224 AssertOK(); 1225 #endif 1226 } 1227 1228 /// Constructor with no-insertion semantics 1229 ICmpInst( 1230 Predicate pred, ///< The predicate to use for the comparison 1231 Value *LHS, ///< The left-hand-side of the expression 1232 Value *RHS, ///< The right-hand-side of the expression 1233 const Twine &NameStr = "" ///< Name of the instruction 1234 ) : CmpInst(makeCmpResultType(LHS->getType()), 1235 Instruction::ICmp, pred, LHS, RHS, NameStr) { 1236 #ifndef NDEBUG 1237 AssertOK(); 1238 #endif 1239 } 1240 1241 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. 1242 /// @returns the predicate that would be the result if the operand were 1243 /// regarded as signed. 1244 /// Return the signed version of the predicate 1245 Predicate getSignedPredicate() const { 1246 return getSignedPredicate(getPredicate()); 1247 } 1248 1249 /// This is a static version that you can use without an instruction. 1250 /// Return the signed version of the predicate. 1251 static Predicate getSignedPredicate(Predicate pred); 1252 1253 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. 1254 /// @returns the predicate that would be the result if the operand were 1255 /// regarded as unsigned. 1256 /// Return the unsigned version of the predicate 1257 Predicate getUnsignedPredicate() const { 1258 return getUnsignedPredicate(getPredicate()); 1259 } 1260 1261 /// This is a static version that you can use without an instruction. 1262 /// Return the unsigned version of the predicate. 1263 static Predicate getUnsignedPredicate(Predicate pred); 1264 1265 /// Return true if this predicate is either EQ or NE. This also 1266 /// tests for commutativity. 1267 static bool isEquality(Predicate P) { 1268 return P == ICMP_EQ || P == ICMP_NE; 1269 } 1270 1271 /// Return true if this predicate is either EQ or NE. This also 1272 /// tests for commutativity. 1273 bool isEquality() const { 1274 return isEquality(getPredicate()); 1275 } 1276 1277 /// @returns true if the predicate of this ICmpInst is commutative 1278 /// Determine if this relation is commutative. 1279 bool isCommutative() const { return isEquality(); } 1280 1281 /// Return true if the predicate is relational (not EQ or NE). 1282 /// 1283 bool isRelational() const { 1284 return !isEquality(); 1285 } 1286 1287 /// Return true if the predicate is relational (not EQ or NE). 1288 /// 1289 static bool isRelational(Predicate P) { 1290 return !isEquality(P); 1291 } 1292 1293 /// Return true if the predicate is SGT or UGT. 1294 /// 1295 static bool isGT(Predicate P) { 1296 return P == ICMP_SGT || P == ICMP_UGT; 1297 } 1298 1299 /// Return true if the predicate is SLT or ULT. 1300 /// 1301 static bool isLT(Predicate P) { 1302 return P == ICMP_SLT || P == ICMP_ULT; 1303 } 1304 1305 /// Return true if the predicate is SGE or UGE. 1306 /// 1307 static bool isGE(Predicate P) { 1308 return P == ICMP_SGE || P == ICMP_UGE; 1309 } 1310 1311 /// Return true if the predicate is SLE or ULE. 1312 /// 1313 static bool isLE(Predicate P) { 1314 return P == ICMP_SLE || P == ICMP_ULE; 1315 } 1316 1317 /// Exchange the two operands to this instruction in such a way that it does 1318 /// not modify the semantics of the instruction. The predicate value may be 1319 /// changed to retain the same result if the predicate is order dependent 1320 /// (e.g. ult). 1321 /// Swap operands and adjust predicate. 1322 void swapOperands() { 1323 setPredicate(getSwappedPredicate()); 1324 Op<0>().swap(Op<1>()); 1325 } 1326 1327 // Methods for support type inquiry through isa, cast, and dyn_cast: 1328 static bool classof(const Instruction *I) { 1329 return I->getOpcode() == Instruction::ICmp; 1330 } 1331 static bool classof(const Value *V) { 1332 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1333 } 1334 }; 1335 1336 //===----------------------------------------------------------------------===// 1337 // FCmpInst Class 1338 //===----------------------------------------------------------------------===// 1339 1340 /// This instruction compares its operands according to the predicate given 1341 /// to the constructor. It only operates on floating point values or packed 1342 /// vectors of floating point values. The operands must be identical types. 1343 /// Represents a floating point comparison operator. 1344 class FCmpInst: public CmpInst { 1345 void AssertOK() { 1346 assert(isFPPredicate() && "Invalid FCmp predicate value"); 1347 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1348 "Both operands to FCmp instruction are not of the same type!"); 1349 // Check that the operands are the right type 1350 assert(getOperand(0)->getType()->isFPOrFPVectorTy() && 1351 "Invalid operand types for FCmp instruction"); 1352 } 1353 1354 protected: 1355 // Note: Instruction needs to be a friend here to call cloneImpl. 1356 friend class Instruction; 1357 1358 /// Clone an identical FCmpInst 1359 FCmpInst *cloneImpl() const; 1360 1361 public: 1362 /// Constructor with insert-before-instruction semantics. 1363 FCmpInst( 1364 Instruction *InsertBefore, ///< Where to insert 1365 Predicate pred, ///< The predicate to use for the comparison 1366 Value *LHS, ///< The left-hand-side of the expression 1367 Value *RHS, ///< The right-hand-side of the expression 1368 const Twine &NameStr = "" ///< Name of the instruction 1369 ) : CmpInst(makeCmpResultType(LHS->getType()), 1370 Instruction::FCmp, pred, LHS, RHS, NameStr, 1371 InsertBefore) { 1372 AssertOK(); 1373 } 1374 1375 /// Constructor with insert-at-end semantics. 1376 FCmpInst( 1377 BasicBlock &InsertAtEnd, ///< Block to insert into. 1378 Predicate pred, ///< The predicate to use for the comparison 1379 Value *LHS, ///< The left-hand-side of the expression 1380 Value *RHS, ///< The right-hand-side of the expression 1381 const Twine &NameStr = "" ///< Name of the instruction 1382 ) : CmpInst(makeCmpResultType(LHS->getType()), 1383 Instruction::FCmp, pred, LHS, RHS, NameStr, 1384 &InsertAtEnd) { 1385 AssertOK(); 1386 } 1387 1388 /// Constructor with no-insertion semantics 1389 FCmpInst( 1390 Predicate Pred, ///< The predicate to use for the comparison 1391 Value *LHS, ///< The left-hand-side of the expression 1392 Value *RHS, ///< The right-hand-side of the expression 1393 const Twine &NameStr = "", ///< Name of the instruction 1394 Instruction *FlagsSource = nullptr 1395 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, 1396 RHS, NameStr, nullptr, FlagsSource) { 1397 AssertOK(); 1398 } 1399 1400 /// @returns true if the predicate of this instruction is EQ or NE. 1401 /// Determine if this is an equality predicate. 1402 static bool isEquality(Predicate Pred) { 1403 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || 1404 Pred == FCMP_UNE; 1405 } 1406 1407 /// @returns true if the predicate of this instruction is EQ or NE. 1408 /// Determine if this is an equality predicate. 1409 bool isEquality() const { return isEquality(getPredicate()); } 1410 1411 /// @returns true if the predicate of this instruction is commutative. 1412 /// Determine if this is a commutative predicate. 1413 bool isCommutative() const { 1414 return isEquality() || 1415 getPredicate() == FCMP_FALSE || 1416 getPredicate() == FCMP_TRUE || 1417 getPredicate() == FCMP_ORD || 1418 getPredicate() == FCMP_UNO; 1419 } 1420 1421 /// @returns true if the predicate is relational (not EQ or NE). 1422 /// Determine if this a relational predicate. 1423 bool isRelational() const { return !isEquality(); } 1424 1425 /// Exchange the two operands to this instruction in such a way that it does 1426 /// not modify the semantics of the instruction. The predicate value may be 1427 /// changed to retain the same result if the predicate is order dependent 1428 /// (e.g. ult). 1429 /// Swap operands and adjust predicate. 1430 void swapOperands() { 1431 setPredicate(getSwappedPredicate()); 1432 Op<0>().swap(Op<1>()); 1433 } 1434 1435 /// Methods for support type inquiry through isa, cast, and dyn_cast: 1436 static bool classof(const Instruction *I) { 1437 return I->getOpcode() == Instruction::FCmp; 1438 } 1439 static bool classof(const Value *V) { 1440 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1441 } 1442 }; 1443 1444 //===----------------------------------------------------------------------===// 1445 /// This class represents a function call, abstracting a target 1446 /// machine's calling convention. This class uses low bit of the SubClassData 1447 /// field to indicate whether or not this is a tail call. The rest of the bits 1448 /// hold the calling convention of the call. 1449 /// 1450 class CallInst : public CallBase { 1451 CallInst(const CallInst &CI); 1452 1453 /// Construct a CallInst given a range of arguments. 1454 /// Construct a CallInst from a range of arguments 1455 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1456 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1457 Instruction *InsertBefore); 1458 1459 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1460 const Twine &NameStr, Instruction *InsertBefore) 1461 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} 1462 1463 /// Construct a CallInst given a range of arguments. 1464 /// Construct a CallInst from a range of arguments 1465 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1466 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1467 BasicBlock *InsertAtEnd); 1468 1469 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, 1470 Instruction *InsertBefore); 1471 1472 CallInst(FunctionType *ty, Value *F, const Twine &NameStr, 1473 BasicBlock *InsertAtEnd); 1474 1475 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 1476 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 1477 void init(FunctionType *FTy, Value *Func, const Twine &NameStr); 1478 1479 /// Compute the number of operands to allocate. 1480 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 1481 // We need one operand for the called function, plus the input operand 1482 // counts provided. 1483 return 1 + NumArgs + NumBundleInputs; 1484 } 1485 1486 protected: 1487 // Note: Instruction needs to be a friend here to call cloneImpl. 1488 friend class Instruction; 1489 1490 CallInst *cloneImpl() const; 1491 1492 public: 1493 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", 1494 Instruction *InsertBefore = nullptr) { 1495 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); 1496 } 1497 1498 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1499 const Twine &NameStr, 1500 Instruction *InsertBefore = nullptr) { 1501 return new (ComputeNumOperands(Args.size())) 1502 CallInst(Ty, Func, Args, None, NameStr, InsertBefore); 1503 } 1504 1505 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1506 ArrayRef<OperandBundleDef> Bundles = None, 1507 const Twine &NameStr = "", 1508 Instruction *InsertBefore = nullptr) { 1509 const int NumOperands = 1510 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1511 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1512 1513 return new (NumOperands, DescriptorBytes) 1514 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); 1515 } 1516 1517 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, 1518 BasicBlock *InsertAtEnd) { 1519 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); 1520 } 1521 1522 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1523 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1524 return new (ComputeNumOperands(Args.size())) 1525 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); 1526 } 1527 1528 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1529 ArrayRef<OperandBundleDef> Bundles, 1530 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1531 const int NumOperands = 1532 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1533 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1534 1535 return new (NumOperands, DescriptorBytes) 1536 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); 1537 } 1538 1539 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", 1540 Instruction *InsertBefore = nullptr) { 1541 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1542 InsertBefore); 1543 } 1544 1545 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1546 ArrayRef<OperandBundleDef> Bundles = None, 1547 const Twine &NameStr = "", 1548 Instruction *InsertBefore = nullptr) { 1549 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1550 NameStr, InsertBefore); 1551 } 1552 1553 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1554 const Twine &NameStr, 1555 Instruction *InsertBefore = nullptr) { 1556 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1557 InsertBefore); 1558 } 1559 1560 static CallInst *Create(FunctionCallee Func, const Twine &NameStr, 1561 BasicBlock *InsertAtEnd) { 1562 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1563 InsertAtEnd); 1564 } 1565 1566 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1567 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1568 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1569 InsertAtEnd); 1570 } 1571 1572 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1573 ArrayRef<OperandBundleDef> Bundles, 1574 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1575 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1576 NameStr, InsertAtEnd); 1577 } 1578 1579 /// Create a clone of \p CI with a different set of operand bundles and 1580 /// insert it before \p InsertPt. 1581 /// 1582 /// The returned call instruction is identical \p CI in every way except that 1583 /// the operand bundles for the new instruction are set to the operand bundles 1584 /// in \p Bundles. 1585 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, 1586 Instruction *InsertPt = nullptr); 1587 1588 /// Create a clone of \p CI with a different set of operand bundles and 1589 /// insert it before \p InsertPt. 1590 /// 1591 /// The returned call instruction is identical \p CI in every way except that 1592 /// the operand bundle for the new instruction is set to the operand bundle 1593 /// in \p Bundle. 1594 static CallInst *CreateWithReplacedBundle(CallInst *CI, 1595 OperandBundleDef Bundle, 1596 Instruction *InsertPt = nullptr); 1597 1598 /// Generate the IR for a call to malloc: 1599 /// 1. Compute the malloc call's argument as the specified type's size, 1600 /// possibly multiplied by the array size if the array size is not 1601 /// constant 1. 1602 /// 2. Call malloc with that argument. 1603 /// 3. Bitcast the result of the malloc call to the specified type. 1604 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1605 Type *AllocTy, Value *AllocSize, 1606 Value *ArraySize = nullptr, 1607 Function *MallocF = nullptr, 1608 const Twine &Name = ""); 1609 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1610 Type *AllocTy, Value *AllocSize, 1611 Value *ArraySize = nullptr, 1612 Function *MallocF = nullptr, 1613 const Twine &Name = ""); 1614 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1615 Type *AllocTy, Value *AllocSize, 1616 Value *ArraySize = nullptr, 1617 ArrayRef<OperandBundleDef> Bundles = None, 1618 Function *MallocF = nullptr, 1619 const Twine &Name = ""); 1620 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1621 Type *AllocTy, Value *AllocSize, 1622 Value *ArraySize = nullptr, 1623 ArrayRef<OperandBundleDef> Bundles = None, 1624 Function *MallocF = nullptr, 1625 const Twine &Name = ""); 1626 /// Generate the IR for a call to the builtin free function. 1627 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); 1628 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); 1629 static Instruction *CreateFree(Value *Source, 1630 ArrayRef<OperandBundleDef> Bundles, 1631 Instruction *InsertBefore); 1632 static Instruction *CreateFree(Value *Source, 1633 ArrayRef<OperandBundleDef> Bundles, 1634 BasicBlock *InsertAtEnd); 1635 1636 // Note that 'musttail' implies 'tail'. 1637 enum TailCallKind : unsigned { 1638 TCK_None = 0, 1639 TCK_Tail = 1, 1640 TCK_MustTail = 2, 1641 TCK_NoTail = 3, 1642 TCK_LAST = TCK_NoTail 1643 }; 1644 1645 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; 1646 static_assert( 1647 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), 1648 "Bitfields must be contiguous"); 1649 1650 TailCallKind getTailCallKind() const { 1651 return getSubclassData<TailCallKindField>(); 1652 } 1653 1654 bool isTailCall() const { 1655 TailCallKind Kind = getTailCallKind(); 1656 return Kind == TCK_Tail || Kind == TCK_MustTail; 1657 } 1658 1659 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } 1660 1661 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } 1662 1663 void setTailCallKind(TailCallKind TCK) { 1664 setSubclassData<TailCallKindField>(TCK); 1665 } 1666 1667 void setTailCall(bool IsTc = true) { 1668 setTailCallKind(IsTc ? TCK_Tail : TCK_None); 1669 } 1670 1671 /// Return true if the call can return twice 1672 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } 1673 void setCanReturnTwice() { 1674 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice); 1675 } 1676 1677 // Methods for support type inquiry through isa, cast, and dyn_cast: 1678 static bool classof(const Instruction *I) { 1679 return I->getOpcode() == Instruction::Call; 1680 } 1681 static bool classof(const Value *V) { 1682 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1683 } 1684 1685 /// Updates profile metadata by scaling it by \p S / \p T. 1686 void updateProfWeight(uint64_t S, uint64_t T); 1687 1688 private: 1689 // Shadow Instruction::setInstructionSubclassData with a private forwarding 1690 // method so that subclasses cannot accidentally use it. 1691 template <typename Bitfield> 1692 void setSubclassData(typename Bitfield::Type Value) { 1693 Instruction::setSubclassData<Bitfield>(Value); 1694 } 1695 }; 1696 1697 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1698 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1699 BasicBlock *InsertAtEnd) 1700 : CallBase(Ty->getReturnType(), Instruction::Call, 1701 OperandTraits<CallBase>::op_end(this) - 1702 (Args.size() + CountBundleInputs(Bundles) + 1), 1703 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1704 InsertAtEnd) { 1705 init(Ty, Func, Args, Bundles, NameStr); 1706 } 1707 1708 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1709 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1710 Instruction *InsertBefore) 1711 : CallBase(Ty->getReturnType(), Instruction::Call, 1712 OperandTraits<CallBase>::op_end(this) - 1713 (Args.size() + CountBundleInputs(Bundles) + 1), 1714 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1715 InsertBefore) { 1716 init(Ty, Func, Args, Bundles, NameStr); 1717 } 1718 1719 //===----------------------------------------------------------------------===// 1720 // SelectInst Class 1721 //===----------------------------------------------------------------------===// 1722 1723 /// This class represents the LLVM 'select' instruction. 1724 /// 1725 class SelectInst : public Instruction { 1726 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1727 Instruction *InsertBefore) 1728 : Instruction(S1->getType(), Instruction::Select, 1729 &Op<0>(), 3, InsertBefore) { 1730 init(C, S1, S2); 1731 setName(NameStr); 1732 } 1733 1734 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1735 BasicBlock *InsertAtEnd) 1736 : Instruction(S1->getType(), Instruction::Select, 1737 &Op<0>(), 3, InsertAtEnd) { 1738 init(C, S1, S2); 1739 setName(NameStr); 1740 } 1741 1742 void init(Value *C, Value *S1, Value *S2) { 1743 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); 1744 Op<0>() = C; 1745 Op<1>() = S1; 1746 Op<2>() = S2; 1747 } 1748 1749 protected: 1750 // Note: Instruction needs to be a friend here to call cloneImpl. 1751 friend class Instruction; 1752 1753 SelectInst *cloneImpl() const; 1754 1755 public: 1756 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1757 const Twine &NameStr = "", 1758 Instruction *InsertBefore = nullptr, 1759 Instruction *MDFrom = nullptr) { 1760 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); 1761 if (MDFrom) 1762 Sel->copyMetadata(*MDFrom); 1763 return Sel; 1764 } 1765 1766 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1767 const Twine &NameStr, 1768 BasicBlock *InsertAtEnd) { 1769 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); 1770 } 1771 1772 const Value *getCondition() const { return Op<0>(); } 1773 const Value *getTrueValue() const { return Op<1>(); } 1774 const Value *getFalseValue() const { return Op<2>(); } 1775 Value *getCondition() { return Op<0>(); } 1776 Value *getTrueValue() { return Op<1>(); } 1777 Value *getFalseValue() { return Op<2>(); } 1778 1779 void setCondition(Value *V) { Op<0>() = V; } 1780 void setTrueValue(Value *V) { Op<1>() = V; } 1781 void setFalseValue(Value *V) { Op<2>() = V; } 1782 1783 /// Swap the true and false values of the select instruction. 1784 /// This doesn't swap prof metadata. 1785 void swapValues() { Op<1>().swap(Op<2>()); } 1786 1787 /// Return a string if the specified operands are invalid 1788 /// for a select operation, otherwise return null. 1789 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); 1790 1791 /// Transparently provide more efficient getOperand methods. 1792 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1793 1794 OtherOps getOpcode() const { 1795 return static_cast<OtherOps>(Instruction::getOpcode()); 1796 } 1797 1798 // Methods for support type inquiry through isa, cast, and dyn_cast: 1799 static bool classof(const Instruction *I) { 1800 return I->getOpcode() == Instruction::Select; 1801 } 1802 static bool classof(const Value *V) { 1803 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1804 } 1805 }; 1806 1807 template <> 1808 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { 1809 }; 1810 1811 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) 1812 1813 //===----------------------------------------------------------------------===// 1814 // VAArgInst Class 1815 //===----------------------------------------------------------------------===// 1816 1817 /// This class represents the va_arg llvm instruction, which returns 1818 /// an argument of the specified type given a va_list and increments that list 1819 /// 1820 class VAArgInst : public UnaryInstruction { 1821 protected: 1822 // Note: Instruction needs to be a friend here to call cloneImpl. 1823 friend class Instruction; 1824 1825 VAArgInst *cloneImpl() const; 1826 1827 public: 1828 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", 1829 Instruction *InsertBefore = nullptr) 1830 : UnaryInstruction(Ty, VAArg, List, InsertBefore) { 1831 setName(NameStr); 1832 } 1833 1834 VAArgInst(Value *List, Type *Ty, const Twine &NameStr, 1835 BasicBlock *InsertAtEnd) 1836 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { 1837 setName(NameStr); 1838 } 1839 1840 Value *getPointerOperand() { return getOperand(0); } 1841 const Value *getPointerOperand() const { return getOperand(0); } 1842 static unsigned getPointerOperandIndex() { return 0U; } 1843 1844 // Methods for support type inquiry through isa, cast, and dyn_cast: 1845 static bool classof(const Instruction *I) { 1846 return I->getOpcode() == VAArg; 1847 } 1848 static bool classof(const Value *V) { 1849 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1850 } 1851 }; 1852 1853 //===----------------------------------------------------------------------===// 1854 // ExtractElementInst Class 1855 //===----------------------------------------------------------------------===// 1856 1857 /// This instruction extracts a single (scalar) 1858 /// element from a VectorType value 1859 /// 1860 class ExtractElementInst : public Instruction { 1861 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", 1862 Instruction *InsertBefore = nullptr); 1863 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, 1864 BasicBlock *InsertAtEnd); 1865 1866 protected: 1867 // Note: Instruction needs to be a friend here to call cloneImpl. 1868 friend class Instruction; 1869 1870 ExtractElementInst *cloneImpl() const; 1871 1872 public: 1873 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1874 const Twine &NameStr = "", 1875 Instruction *InsertBefore = nullptr) { 1876 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); 1877 } 1878 1879 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1880 const Twine &NameStr, 1881 BasicBlock *InsertAtEnd) { 1882 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); 1883 } 1884 1885 /// Return true if an extractelement instruction can be 1886 /// formed with the specified operands. 1887 static bool isValidOperands(const Value *Vec, const Value *Idx); 1888 1889 Value *getVectorOperand() { return Op<0>(); } 1890 Value *getIndexOperand() { return Op<1>(); } 1891 const Value *getVectorOperand() const { return Op<0>(); } 1892 const Value *getIndexOperand() const { return Op<1>(); } 1893 1894 VectorType *getVectorOperandType() const { 1895 return cast<VectorType>(getVectorOperand()->getType()); 1896 } 1897 1898 /// Transparently provide more efficient getOperand methods. 1899 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1900 1901 // Methods for support type inquiry through isa, cast, and dyn_cast: 1902 static bool classof(const Instruction *I) { 1903 return I->getOpcode() == Instruction::ExtractElement; 1904 } 1905 static bool classof(const Value *V) { 1906 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1907 } 1908 }; 1909 1910 template <> 1911 struct OperandTraits<ExtractElementInst> : 1912 public FixedNumOperandTraits<ExtractElementInst, 2> { 1913 }; 1914 1915 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) 1916 1917 //===----------------------------------------------------------------------===// 1918 // InsertElementInst Class 1919 //===----------------------------------------------------------------------===// 1920 1921 /// This instruction inserts a single (scalar) 1922 /// element into a VectorType value 1923 /// 1924 class InsertElementInst : public Instruction { 1925 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, 1926 const Twine &NameStr = "", 1927 Instruction *InsertBefore = nullptr); 1928 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, 1929 BasicBlock *InsertAtEnd); 1930 1931 protected: 1932 // Note: Instruction needs to be a friend here to call cloneImpl. 1933 friend class Instruction; 1934 1935 InsertElementInst *cloneImpl() const; 1936 1937 public: 1938 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1939 const Twine &NameStr = "", 1940 Instruction *InsertBefore = nullptr) { 1941 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); 1942 } 1943 1944 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1945 const Twine &NameStr, 1946 BasicBlock *InsertAtEnd) { 1947 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); 1948 } 1949 1950 /// Return true if an insertelement instruction can be 1951 /// formed with the specified operands. 1952 static bool isValidOperands(const Value *Vec, const Value *NewElt, 1953 const Value *Idx); 1954 1955 /// Overload to return most specific vector type. 1956 /// 1957 VectorType *getType() const { 1958 return cast<VectorType>(Instruction::getType()); 1959 } 1960 1961 /// Transparently provide more efficient getOperand methods. 1962 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1963 1964 // Methods for support type inquiry through isa, cast, and dyn_cast: 1965 static bool classof(const Instruction *I) { 1966 return I->getOpcode() == Instruction::InsertElement; 1967 } 1968 static bool classof(const Value *V) { 1969 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1970 } 1971 }; 1972 1973 template <> 1974 struct OperandTraits<InsertElementInst> : 1975 public FixedNumOperandTraits<InsertElementInst, 3> { 1976 }; 1977 1978 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) 1979 1980 //===----------------------------------------------------------------------===// 1981 // ShuffleVectorInst Class 1982 //===----------------------------------------------------------------------===// 1983 1984 constexpr int UndefMaskElem = -1; 1985 1986 /// This instruction constructs a fixed permutation of two 1987 /// input vectors. 1988 /// 1989 /// For each element of the result vector, the shuffle mask selects an element 1990 /// from one of the input vectors to copy to the result. Non-negative elements 1991 /// in the mask represent an index into the concatenated pair of input vectors. 1992 /// UndefMaskElem (-1) specifies that the result element is undefined. 1993 /// 1994 /// For scalable vectors, all the elements of the mask must be 0 or -1. This 1995 /// requirement may be relaxed in the future. 1996 class ShuffleVectorInst : public Instruction { 1997 SmallVector<int, 4> ShuffleMask; 1998 Constant *ShuffleMaskForBitcode; 1999 2000 protected: 2001 // Note: Instruction needs to be a friend here to call cloneImpl. 2002 friend class Instruction; 2003 2004 ShuffleVectorInst *cloneImpl() const; 2005 2006 public: 2007 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2008 const Twine &NameStr = "", 2009 Instruction *InsertBefor = nullptr); 2010 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2011 const Twine &NameStr, BasicBlock *InsertAtEnd); 2012 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2013 const Twine &NameStr = "", 2014 Instruction *InsertBefor = nullptr); 2015 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2016 const Twine &NameStr, BasicBlock *InsertAtEnd); 2017 2018 void *operator new(size_t s) { return User::operator new(s, 2); } 2019 2020 /// Swap the operands and adjust the mask to preserve the semantics 2021 /// of the instruction. 2022 void commute(); 2023 2024 /// Return true if a shufflevector instruction can be 2025 /// formed with the specified operands. 2026 static bool isValidOperands(const Value *V1, const Value *V2, 2027 const Value *Mask); 2028 static bool isValidOperands(const Value *V1, const Value *V2, 2029 ArrayRef<int> Mask); 2030 2031 /// Overload to return most specific vector type. 2032 /// 2033 VectorType *getType() const { 2034 return cast<VectorType>(Instruction::getType()); 2035 } 2036 2037 /// Transparently provide more efficient getOperand methods. 2038 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2039 2040 /// Return the shuffle mask value of this instruction for the given element 2041 /// index. Return UndefMaskElem if the element is undef. 2042 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } 2043 2044 /// Convert the input shuffle mask operand to a vector of integers. Undefined 2045 /// elements of the mask are returned as UndefMaskElem. 2046 static void getShuffleMask(const Constant *Mask, 2047 SmallVectorImpl<int> &Result); 2048 2049 /// Return the mask for this instruction as a vector of integers. Undefined 2050 /// elements of the mask are returned as UndefMaskElem. 2051 void getShuffleMask(SmallVectorImpl<int> &Result) const { 2052 Result.assign(ShuffleMask.begin(), ShuffleMask.end()); 2053 } 2054 2055 /// Return the mask for this instruction, for use in bitcode. 2056 /// 2057 /// TODO: This is temporary until we decide a new bitcode encoding for 2058 /// shufflevector. 2059 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } 2060 2061 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, 2062 Type *ResultTy); 2063 2064 void setShuffleMask(ArrayRef<int> Mask); 2065 2066 ArrayRef<int> getShuffleMask() const { return ShuffleMask; } 2067 2068 /// Return true if this shuffle returns a vector with a different number of 2069 /// elements than its source vectors. 2070 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> 2071 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> 2072 bool changesLength() const { 2073 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2074 ->getElementCount() 2075 .getKnownMinValue(); 2076 unsigned NumMaskElts = ShuffleMask.size(); 2077 return NumSourceElts != NumMaskElts; 2078 } 2079 2080 /// Return true if this shuffle returns a vector with a greater number of 2081 /// elements than its source vectors. 2082 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> 2083 bool increasesLength() const { 2084 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2085 ->getElementCount() 2086 .getKnownMinValue(); 2087 unsigned NumMaskElts = ShuffleMask.size(); 2088 return NumSourceElts < NumMaskElts; 2089 } 2090 2091 /// Return true if this shuffle mask chooses elements from exactly one source 2092 /// vector. 2093 /// Example: <7,5,undef,7> 2094 /// This assumes that vector operands are the same length as the mask. 2095 static bool isSingleSourceMask(ArrayRef<int> Mask); 2096 static bool isSingleSourceMask(const Constant *Mask) { 2097 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2098 SmallVector<int, 16> MaskAsInts; 2099 getShuffleMask(Mask, MaskAsInts); 2100 return isSingleSourceMask(MaskAsInts); 2101 } 2102 2103 /// Return true if this shuffle chooses elements from exactly one source 2104 /// vector without changing the length of that vector. 2105 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> 2106 /// TODO: Optionally allow length-changing shuffles. 2107 bool isSingleSource() const { 2108 return !changesLength() && isSingleSourceMask(ShuffleMask); 2109 } 2110 2111 /// Return true if this shuffle mask chooses elements from exactly one source 2112 /// vector without lane crossings. A shuffle using this mask is not 2113 /// necessarily a no-op because it may change the number of elements from its 2114 /// input vectors or it may provide demanded bits knowledge via undef lanes. 2115 /// Example: <undef,undef,2,3> 2116 static bool isIdentityMask(ArrayRef<int> Mask); 2117 static bool isIdentityMask(const Constant *Mask) { 2118 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2119 SmallVector<int, 16> MaskAsInts; 2120 getShuffleMask(Mask, MaskAsInts); 2121 return isIdentityMask(MaskAsInts); 2122 } 2123 2124 /// Return true if this shuffle chooses elements from exactly one source 2125 /// vector without lane crossings and does not change the number of elements 2126 /// from its input vectors. 2127 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> 2128 bool isIdentity() const { 2129 return !changesLength() && isIdentityMask(ShuffleMask); 2130 } 2131 2132 /// Return true if this shuffle lengthens exactly one source vector with 2133 /// undefs in the high elements. 2134 bool isIdentityWithPadding() const; 2135 2136 /// Return true if this shuffle extracts the first N elements of exactly one 2137 /// source vector. 2138 bool isIdentityWithExtract() const; 2139 2140 /// Return true if this shuffle concatenates its 2 source vectors. This 2141 /// returns false if either input is undefined. In that case, the shuffle is 2142 /// is better classified as an identity with padding operation. 2143 bool isConcat() const; 2144 2145 /// Return true if this shuffle mask chooses elements from its source vectors 2146 /// without lane crossings. A shuffle using this mask would be 2147 /// equivalent to a vector select with a constant condition operand. 2148 /// Example: <4,1,6,undef> 2149 /// This returns false if the mask does not choose from both input vectors. 2150 /// In that case, the shuffle is better classified as an identity shuffle. 2151 /// This assumes that vector operands are the same length as the mask 2152 /// (a length-changing shuffle can never be equivalent to a vector select). 2153 static bool isSelectMask(ArrayRef<int> Mask); 2154 static bool isSelectMask(const Constant *Mask) { 2155 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2156 SmallVector<int, 16> MaskAsInts; 2157 getShuffleMask(Mask, MaskAsInts); 2158 return isSelectMask(MaskAsInts); 2159 } 2160 2161 /// Return true if this shuffle chooses elements from its source vectors 2162 /// without lane crossings and all operands have the same number of elements. 2163 /// In other words, this shuffle is equivalent to a vector select with a 2164 /// constant condition operand. 2165 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> 2166 /// This returns false if the mask does not choose from both input vectors. 2167 /// In that case, the shuffle is better classified as an identity shuffle. 2168 /// TODO: Optionally allow length-changing shuffles. 2169 bool isSelect() const { 2170 return !changesLength() && isSelectMask(ShuffleMask); 2171 } 2172 2173 /// Return true if this shuffle mask swaps the order of elements from exactly 2174 /// one source vector. 2175 /// Example: <7,6,undef,4> 2176 /// This assumes that vector operands are the same length as the mask. 2177 static bool isReverseMask(ArrayRef<int> Mask); 2178 static bool isReverseMask(const Constant *Mask) { 2179 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2180 SmallVector<int, 16> MaskAsInts; 2181 getShuffleMask(Mask, MaskAsInts); 2182 return isReverseMask(MaskAsInts); 2183 } 2184 2185 /// Return true if this shuffle swaps the order of elements from exactly 2186 /// one source vector. 2187 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> 2188 /// TODO: Optionally allow length-changing shuffles. 2189 bool isReverse() const { 2190 return !changesLength() && isReverseMask(ShuffleMask); 2191 } 2192 2193 /// Return true if this shuffle mask chooses all elements with the same value 2194 /// as the first element of exactly one source vector. 2195 /// Example: <4,undef,undef,4> 2196 /// This assumes that vector operands are the same length as the mask. 2197 static bool isZeroEltSplatMask(ArrayRef<int> Mask); 2198 static bool isZeroEltSplatMask(const Constant *Mask) { 2199 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2200 SmallVector<int, 16> MaskAsInts; 2201 getShuffleMask(Mask, MaskAsInts); 2202 return isZeroEltSplatMask(MaskAsInts); 2203 } 2204 2205 /// Return true if all elements of this shuffle are the same value as the 2206 /// first element of exactly one source vector without changing the length 2207 /// of that vector. 2208 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> 2209 /// TODO: Optionally allow length-changing shuffles. 2210 /// TODO: Optionally allow splats from other elements. 2211 bool isZeroEltSplat() const { 2212 return !changesLength() && isZeroEltSplatMask(ShuffleMask); 2213 } 2214 2215 /// Return true if this shuffle mask is a transpose mask. 2216 /// Transpose vector masks transpose a 2xn matrix. They read corresponding 2217 /// even- or odd-numbered vector elements from two n-dimensional source 2218 /// vectors and write each result into consecutive elements of an 2219 /// n-dimensional destination vector. Two shuffles are necessary to complete 2220 /// the transpose, one for the even elements and another for the odd elements. 2221 /// This description closely follows how the TRN1 and TRN2 AArch64 2222 /// instructions operate. 2223 /// 2224 /// For example, a simple 2x2 matrix can be transposed with: 2225 /// 2226 /// ; Original matrix 2227 /// m0 = < a, b > 2228 /// m1 = < c, d > 2229 /// 2230 /// ; Transposed matrix 2231 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > 2232 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > 2233 /// 2234 /// For matrices having greater than n columns, the resulting nx2 transposed 2235 /// matrix is stored in two result vectors such that one vector contains 2236 /// interleaved elements from all the even-numbered rows and the other vector 2237 /// contains interleaved elements from all the odd-numbered rows. For example, 2238 /// a 2x4 matrix can be transposed with: 2239 /// 2240 /// ; Original matrix 2241 /// m0 = < a, b, c, d > 2242 /// m1 = < e, f, g, h > 2243 /// 2244 /// ; Transposed matrix 2245 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > 2246 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > 2247 static bool isTransposeMask(ArrayRef<int> Mask); 2248 static bool isTransposeMask(const Constant *Mask) { 2249 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2250 SmallVector<int, 16> MaskAsInts; 2251 getShuffleMask(Mask, MaskAsInts); 2252 return isTransposeMask(MaskAsInts); 2253 } 2254 2255 /// Return true if this shuffle transposes the elements of its inputs without 2256 /// changing the length of the vectors. This operation may also be known as a 2257 /// merge or interleave. See the description for isTransposeMask() for the 2258 /// exact specification. 2259 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> 2260 bool isTranspose() const { 2261 return !changesLength() && isTransposeMask(ShuffleMask); 2262 } 2263 2264 /// Return true if this shuffle mask is an extract subvector mask. 2265 /// A valid extract subvector mask returns a smaller vector from a single 2266 /// source operand. The base extraction index is returned as well. 2267 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2268 int &Index); 2269 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, 2270 int &Index) { 2271 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2272 // Not possible to express a shuffle mask for a scalable vector for this 2273 // case. 2274 if (isa<ScalableVectorType>(Mask->getType())) 2275 return false; 2276 SmallVector<int, 16> MaskAsInts; 2277 getShuffleMask(Mask, MaskAsInts); 2278 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); 2279 } 2280 2281 /// Return true if this shuffle mask is an extract subvector mask. 2282 bool isExtractSubvectorMask(int &Index) const { 2283 // Not possible to express a shuffle mask for a scalable vector for this 2284 // case. 2285 if (isa<ScalableVectorType>(getType())) 2286 return false; 2287 2288 int NumSrcElts = 2289 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2290 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); 2291 } 2292 2293 /// Change values in a shuffle permute mask assuming the two vector operands 2294 /// of length InVecNumElts have swapped position. 2295 static void commuteShuffleMask(MutableArrayRef<int> Mask, 2296 unsigned InVecNumElts) { 2297 for (int &Idx : Mask) { 2298 if (Idx == -1) 2299 continue; 2300 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; 2301 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 && 2302 "shufflevector mask index out of range"); 2303 } 2304 } 2305 2306 // Methods for support type inquiry through isa, cast, and dyn_cast: 2307 static bool classof(const Instruction *I) { 2308 return I->getOpcode() == Instruction::ShuffleVector; 2309 } 2310 static bool classof(const Value *V) { 2311 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2312 } 2313 }; 2314 2315 template <> 2316 struct OperandTraits<ShuffleVectorInst> 2317 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; 2318 2319 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) 2320 2321 //===----------------------------------------------------------------------===// 2322 // ExtractValueInst Class 2323 //===----------------------------------------------------------------------===// 2324 2325 /// This instruction extracts a struct member or array 2326 /// element value from an aggregate value. 2327 /// 2328 class ExtractValueInst : public UnaryInstruction { 2329 SmallVector<unsigned, 4> Indices; 2330 2331 ExtractValueInst(const ExtractValueInst &EVI); 2332 2333 /// Constructors - Create a extractvalue instruction with a base aggregate 2334 /// value and a list of indices. The first ctor can optionally insert before 2335 /// an existing instruction, the second appends the new instruction to the 2336 /// specified BasicBlock. 2337 inline ExtractValueInst(Value *Agg, 2338 ArrayRef<unsigned> Idxs, 2339 const Twine &NameStr, 2340 Instruction *InsertBefore); 2341 inline ExtractValueInst(Value *Agg, 2342 ArrayRef<unsigned> Idxs, 2343 const Twine &NameStr, BasicBlock *InsertAtEnd); 2344 2345 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); 2346 2347 protected: 2348 // Note: Instruction needs to be a friend here to call cloneImpl. 2349 friend class Instruction; 2350 2351 ExtractValueInst *cloneImpl() const; 2352 2353 public: 2354 static ExtractValueInst *Create(Value *Agg, 2355 ArrayRef<unsigned> Idxs, 2356 const Twine &NameStr = "", 2357 Instruction *InsertBefore = nullptr) { 2358 return new 2359 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); 2360 } 2361 2362 static ExtractValueInst *Create(Value *Agg, 2363 ArrayRef<unsigned> Idxs, 2364 const Twine &NameStr, 2365 BasicBlock *InsertAtEnd) { 2366 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); 2367 } 2368 2369 /// Returns the type of the element that would be extracted 2370 /// with an extractvalue instruction with the specified parameters. 2371 /// 2372 /// Null is returned if the indices are invalid for the specified type. 2373 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); 2374 2375 using idx_iterator = const unsigned*; 2376 2377 inline idx_iterator idx_begin() const { return Indices.begin(); } 2378 inline idx_iterator idx_end() const { return Indices.end(); } 2379 inline iterator_range<idx_iterator> indices() const { 2380 return make_range(idx_begin(), idx_end()); 2381 } 2382 2383 Value *getAggregateOperand() { 2384 return getOperand(0); 2385 } 2386 const Value *getAggregateOperand() const { 2387 return getOperand(0); 2388 } 2389 static unsigned getAggregateOperandIndex() { 2390 return 0U; // get index for modifying correct operand 2391 } 2392 2393 ArrayRef<unsigned> getIndices() const { 2394 return Indices; 2395 } 2396 2397 unsigned getNumIndices() const { 2398 return (unsigned)Indices.size(); 2399 } 2400 2401 bool hasIndices() const { 2402 return true; 2403 } 2404 2405 // Methods for support type inquiry through isa, cast, and dyn_cast: 2406 static bool classof(const Instruction *I) { 2407 return I->getOpcode() == Instruction::ExtractValue; 2408 } 2409 static bool classof(const Value *V) { 2410 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2411 } 2412 }; 2413 2414 ExtractValueInst::ExtractValueInst(Value *Agg, 2415 ArrayRef<unsigned> Idxs, 2416 const Twine &NameStr, 2417 Instruction *InsertBefore) 2418 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2419 ExtractValue, Agg, InsertBefore) { 2420 init(Idxs, NameStr); 2421 } 2422 2423 ExtractValueInst::ExtractValueInst(Value *Agg, 2424 ArrayRef<unsigned> Idxs, 2425 const Twine &NameStr, 2426 BasicBlock *InsertAtEnd) 2427 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2428 ExtractValue, Agg, InsertAtEnd) { 2429 init(Idxs, NameStr); 2430 } 2431 2432 //===----------------------------------------------------------------------===// 2433 // InsertValueInst Class 2434 //===----------------------------------------------------------------------===// 2435 2436 /// This instruction inserts a struct field of array element 2437 /// value into an aggregate value. 2438 /// 2439 class InsertValueInst : public Instruction { 2440 SmallVector<unsigned, 4> Indices; 2441 2442 InsertValueInst(const InsertValueInst &IVI); 2443 2444 /// Constructors - Create a insertvalue instruction with a base aggregate 2445 /// value, a value to insert, and a list of indices. The first ctor can 2446 /// optionally insert before an existing instruction, the second appends 2447 /// the new instruction to the specified BasicBlock. 2448 inline InsertValueInst(Value *Agg, Value *Val, 2449 ArrayRef<unsigned> Idxs, 2450 const Twine &NameStr, 2451 Instruction *InsertBefore); 2452 inline InsertValueInst(Value *Agg, Value *Val, 2453 ArrayRef<unsigned> Idxs, 2454 const Twine &NameStr, BasicBlock *InsertAtEnd); 2455 2456 /// Constructors - These two constructors are convenience methods because one 2457 /// and two index insertvalue instructions are so common. 2458 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, 2459 const Twine &NameStr = "", 2460 Instruction *InsertBefore = nullptr); 2461 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, 2462 BasicBlock *InsertAtEnd); 2463 2464 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2465 const Twine &NameStr); 2466 2467 protected: 2468 // Note: Instruction needs to be a friend here to call cloneImpl. 2469 friend class Instruction; 2470 2471 InsertValueInst *cloneImpl() const; 2472 2473 public: 2474 // allocate space for exactly two operands 2475 void *operator new(size_t s) { 2476 return User::operator new(s, 2); 2477 } 2478 2479 static InsertValueInst *Create(Value *Agg, Value *Val, 2480 ArrayRef<unsigned> Idxs, 2481 const Twine &NameStr = "", 2482 Instruction *InsertBefore = nullptr) { 2483 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); 2484 } 2485 2486 static InsertValueInst *Create(Value *Agg, Value *Val, 2487 ArrayRef<unsigned> Idxs, 2488 const Twine &NameStr, 2489 BasicBlock *InsertAtEnd) { 2490 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); 2491 } 2492 2493 /// Transparently provide more efficient getOperand methods. 2494 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2495 2496 using idx_iterator = const unsigned*; 2497 2498 inline idx_iterator idx_begin() const { return Indices.begin(); } 2499 inline idx_iterator idx_end() const { return Indices.end(); } 2500 inline iterator_range<idx_iterator> indices() const { 2501 return make_range(idx_begin(), idx_end()); 2502 } 2503 2504 Value *getAggregateOperand() { 2505 return getOperand(0); 2506 } 2507 const Value *getAggregateOperand() const { 2508 return getOperand(0); 2509 } 2510 static unsigned getAggregateOperandIndex() { 2511 return 0U; // get index for modifying correct operand 2512 } 2513 2514 Value *getInsertedValueOperand() { 2515 return getOperand(1); 2516 } 2517 const Value *getInsertedValueOperand() const { 2518 return getOperand(1); 2519 } 2520 static unsigned getInsertedValueOperandIndex() { 2521 return 1U; // get index for modifying correct operand 2522 } 2523 2524 ArrayRef<unsigned> getIndices() const { 2525 return Indices; 2526 } 2527 2528 unsigned getNumIndices() const { 2529 return (unsigned)Indices.size(); 2530 } 2531 2532 bool hasIndices() const { 2533 return true; 2534 } 2535 2536 // Methods for support type inquiry through isa, cast, and dyn_cast: 2537 static bool classof(const Instruction *I) { 2538 return I->getOpcode() == Instruction::InsertValue; 2539 } 2540 static bool classof(const Value *V) { 2541 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2542 } 2543 }; 2544 2545 template <> 2546 struct OperandTraits<InsertValueInst> : 2547 public FixedNumOperandTraits<InsertValueInst, 2> { 2548 }; 2549 2550 InsertValueInst::InsertValueInst(Value *Agg, 2551 Value *Val, 2552 ArrayRef<unsigned> Idxs, 2553 const Twine &NameStr, 2554 Instruction *InsertBefore) 2555 : Instruction(Agg->getType(), InsertValue, 2556 OperandTraits<InsertValueInst>::op_begin(this), 2557 2, InsertBefore) { 2558 init(Agg, Val, Idxs, NameStr); 2559 } 2560 2561 InsertValueInst::InsertValueInst(Value *Agg, 2562 Value *Val, 2563 ArrayRef<unsigned> Idxs, 2564 const Twine &NameStr, 2565 BasicBlock *InsertAtEnd) 2566 : Instruction(Agg->getType(), InsertValue, 2567 OperandTraits<InsertValueInst>::op_begin(this), 2568 2, InsertAtEnd) { 2569 init(Agg, Val, Idxs, NameStr); 2570 } 2571 2572 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) 2573 2574 //===----------------------------------------------------------------------===// 2575 // PHINode Class 2576 //===----------------------------------------------------------------------===// 2577 2578 // PHINode - The PHINode class is used to represent the magical mystical PHI 2579 // node, that can not exist in nature, but can be synthesized in a computer 2580 // scientist's overactive imagination. 2581 // 2582 class PHINode : public Instruction { 2583 /// The number of operands actually allocated. NumOperands is 2584 /// the number actually in use. 2585 unsigned ReservedSpace; 2586 2587 PHINode(const PHINode &PN); 2588 2589 explicit PHINode(Type *Ty, unsigned NumReservedValues, 2590 const Twine &NameStr = "", 2591 Instruction *InsertBefore = nullptr) 2592 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), 2593 ReservedSpace(NumReservedValues) { 2594 setName(NameStr); 2595 allocHungoffUses(ReservedSpace); 2596 } 2597 2598 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, 2599 BasicBlock *InsertAtEnd) 2600 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), 2601 ReservedSpace(NumReservedValues) { 2602 setName(NameStr); 2603 allocHungoffUses(ReservedSpace); 2604 } 2605 2606 protected: 2607 // Note: Instruction needs to be a friend here to call cloneImpl. 2608 friend class Instruction; 2609 2610 PHINode *cloneImpl() const; 2611 2612 // allocHungoffUses - this is more complicated than the generic 2613 // User::allocHungoffUses, because we have to allocate Uses for the incoming 2614 // values and pointers to the incoming blocks, all in one allocation. 2615 void allocHungoffUses(unsigned N) { 2616 User::allocHungoffUses(N, /* IsPhi */ true); 2617 } 2618 2619 public: 2620 /// Constructors - NumReservedValues is a hint for the number of incoming 2621 /// edges that this phi node will have (use 0 if you really have no idea). 2622 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2623 const Twine &NameStr = "", 2624 Instruction *InsertBefore = nullptr) { 2625 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); 2626 } 2627 2628 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2629 const Twine &NameStr, BasicBlock *InsertAtEnd) { 2630 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); 2631 } 2632 2633 /// Provide fast operand accessors 2634 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2635 2636 // Block iterator interface. This provides access to the list of incoming 2637 // basic blocks, which parallels the list of incoming values. 2638 2639 using block_iterator = BasicBlock **; 2640 using const_block_iterator = BasicBlock * const *; 2641 2642 block_iterator block_begin() { 2643 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); 2644 } 2645 2646 const_block_iterator block_begin() const { 2647 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 2648 } 2649 2650 block_iterator block_end() { 2651 return block_begin() + getNumOperands(); 2652 } 2653 2654 const_block_iterator block_end() const { 2655 return block_begin() + getNumOperands(); 2656 } 2657 2658 iterator_range<block_iterator> blocks() { 2659 return make_range(block_begin(), block_end()); 2660 } 2661 2662 iterator_range<const_block_iterator> blocks() const { 2663 return make_range(block_begin(), block_end()); 2664 } 2665 2666 op_range incoming_values() { return operands(); } 2667 2668 const_op_range incoming_values() const { return operands(); } 2669 2670 /// Return the number of incoming edges 2671 /// 2672 unsigned getNumIncomingValues() const { return getNumOperands(); } 2673 2674 /// Return incoming value number x 2675 /// 2676 Value *getIncomingValue(unsigned i) const { 2677 return getOperand(i); 2678 } 2679 void setIncomingValue(unsigned i, Value *V) { 2680 assert(V && "PHI node got a null value!"); 2681 assert(getType() == V->getType() && 2682 "All operands to PHI node must be the same type as the PHI node!"); 2683 setOperand(i, V); 2684 } 2685 2686 static unsigned getOperandNumForIncomingValue(unsigned i) { 2687 return i; 2688 } 2689 2690 static unsigned getIncomingValueNumForOperand(unsigned i) { 2691 return i; 2692 } 2693 2694 /// Return incoming basic block number @p i. 2695 /// 2696 BasicBlock *getIncomingBlock(unsigned i) const { 2697 return block_begin()[i]; 2698 } 2699 2700 /// Return incoming basic block corresponding 2701 /// to an operand of the PHI. 2702 /// 2703 BasicBlock *getIncomingBlock(const Use &U) const { 2704 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 2705 return getIncomingBlock(unsigned(&U - op_begin())); 2706 } 2707 2708 /// Return incoming basic block corresponding 2709 /// to value use iterator. 2710 /// 2711 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { 2712 return getIncomingBlock(I.getUse()); 2713 } 2714 2715 void setIncomingBlock(unsigned i, BasicBlock *BB) { 2716 assert(BB && "PHI node got a null basic block!"); 2717 block_begin()[i] = BB; 2718 } 2719 2720 /// Replace every incoming basic block \p Old to basic block \p New. 2721 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { 2722 assert(New && Old && "PHI node got a null basic block!"); 2723 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2724 if (getIncomingBlock(Op) == Old) 2725 setIncomingBlock(Op, New); 2726 } 2727 2728 /// Add an incoming value to the end of the PHI list 2729 /// 2730 void addIncoming(Value *V, BasicBlock *BB) { 2731 if (getNumOperands() == ReservedSpace) 2732 growOperands(); // Get more space! 2733 // Initialize some new operands. 2734 setNumHungOffUseOperands(getNumOperands() + 1); 2735 setIncomingValue(getNumOperands() - 1, V); 2736 setIncomingBlock(getNumOperands() - 1, BB); 2737 } 2738 2739 /// Remove an incoming value. This is useful if a 2740 /// predecessor basic block is deleted. The value removed is returned. 2741 /// 2742 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty 2743 /// is true), the PHI node is destroyed and any uses of it are replaced with 2744 /// dummy values. The only time there should be zero incoming values to a PHI 2745 /// node is when the block is dead, so this strategy is sound. 2746 /// 2747 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); 2748 2749 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { 2750 int Idx = getBasicBlockIndex(BB); 2751 assert(Idx >= 0 && "Invalid basic block argument to remove!"); 2752 return removeIncomingValue(Idx, DeletePHIIfEmpty); 2753 } 2754 2755 /// Return the first index of the specified basic 2756 /// block in the value list for this PHI. Returns -1 if no instance. 2757 /// 2758 int getBasicBlockIndex(const BasicBlock *BB) const { 2759 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 2760 if (block_begin()[i] == BB) 2761 return i; 2762 return -1; 2763 } 2764 2765 Value *getIncomingValueForBlock(const BasicBlock *BB) const { 2766 int Idx = getBasicBlockIndex(BB); 2767 assert(Idx >= 0 && "Invalid basic block argument!"); 2768 return getIncomingValue(Idx); 2769 } 2770 2771 /// Set every incoming value(s) for block \p BB to \p V. 2772 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { 2773 assert(BB && "PHI node got a null basic block!"); 2774 bool Found = false; 2775 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2776 if (getIncomingBlock(Op) == BB) { 2777 Found = true; 2778 setIncomingValue(Op, V); 2779 } 2780 (void)Found; 2781 assert(Found && "Invalid basic block argument to set!"); 2782 } 2783 2784 /// If the specified PHI node always merges together the 2785 /// same value, return the value, otherwise return null. 2786 Value *hasConstantValue() const; 2787 2788 /// Whether the specified PHI node always merges 2789 /// together the same value, assuming undefs are equal to a unique 2790 /// non-undef value. 2791 bool hasConstantOrUndefValue() const; 2792 2793 /// If the PHI node is complete which means all of its parent's predecessors 2794 /// have incoming value in this PHI, return true, otherwise return false. 2795 bool isComplete() const { 2796 return llvm::all_of(predecessors(getParent()), 2797 [this](const BasicBlock *Pred) { 2798 return getBasicBlockIndex(Pred) >= 0; 2799 }); 2800 } 2801 2802 /// Methods for support type inquiry through isa, cast, and dyn_cast: 2803 static bool classof(const Instruction *I) { 2804 return I->getOpcode() == Instruction::PHI; 2805 } 2806 static bool classof(const Value *V) { 2807 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2808 } 2809 2810 private: 2811 void growOperands(); 2812 }; 2813 2814 template <> 2815 struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { 2816 }; 2817 2818 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) 2819 2820 //===----------------------------------------------------------------------===// 2821 // LandingPadInst Class 2822 //===----------------------------------------------------------------------===// 2823 2824 //===--------------------------------------------------------------------------- 2825 /// The landingpad instruction holds all of the information 2826 /// necessary to generate correct exception handling. The landingpad instruction 2827 /// cannot be moved from the top of a landing pad block, which itself is 2828 /// accessible only from the 'unwind' edge of an invoke. This uses the 2829 /// SubclassData field in Value to store whether or not the landingpad is a 2830 /// cleanup. 2831 /// 2832 class LandingPadInst : public Instruction { 2833 using CleanupField = BoolBitfieldElementT<0>; 2834 2835 /// The number of operands actually allocated. NumOperands is 2836 /// the number actually in use. 2837 unsigned ReservedSpace; 2838 2839 LandingPadInst(const LandingPadInst &LP); 2840 2841 public: 2842 enum ClauseType { Catch, Filter }; 2843 2844 private: 2845 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2846 const Twine &NameStr, Instruction *InsertBefore); 2847 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2848 const Twine &NameStr, BasicBlock *InsertAtEnd); 2849 2850 // Allocate space for exactly zero operands. 2851 void *operator new(size_t s) { 2852 return User::operator new(s); 2853 } 2854 2855 void growOperands(unsigned Size); 2856 void init(unsigned NumReservedValues, const Twine &NameStr); 2857 2858 protected: 2859 // Note: Instruction needs to be a friend here to call cloneImpl. 2860 friend class Instruction; 2861 2862 LandingPadInst *cloneImpl() const; 2863 2864 public: 2865 /// Constructors - NumReservedClauses is a hint for the number of incoming 2866 /// clauses that this landingpad will have (use 0 if you really have no idea). 2867 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2868 const Twine &NameStr = "", 2869 Instruction *InsertBefore = nullptr); 2870 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2871 const Twine &NameStr, BasicBlock *InsertAtEnd); 2872 2873 /// Provide fast operand accessors 2874 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2875 2876 /// Return 'true' if this landingpad instruction is a 2877 /// cleanup. I.e., it should be run when unwinding even if its landing pad 2878 /// doesn't catch the exception. 2879 bool isCleanup() const { return getSubclassData<CleanupField>(); } 2880 2881 /// Indicate that this landingpad instruction is a cleanup. 2882 void setCleanup(bool V) { setSubclassData<CleanupField>(V); } 2883 2884 /// Add a catch or filter clause to the landing pad. 2885 void addClause(Constant *ClauseVal); 2886 2887 /// Get the value of the clause at index Idx. Use isCatch/isFilter to 2888 /// determine what type of clause this is. 2889 Constant *getClause(unsigned Idx) const { 2890 return cast<Constant>(getOperandList()[Idx]); 2891 } 2892 2893 /// Return 'true' if the clause and index Idx is a catch clause. 2894 bool isCatch(unsigned Idx) const { 2895 return !isa<ArrayType>(getOperandList()[Idx]->getType()); 2896 } 2897 2898 /// Return 'true' if the clause and index Idx is a filter clause. 2899 bool isFilter(unsigned Idx) const { 2900 return isa<ArrayType>(getOperandList()[Idx]->getType()); 2901 } 2902 2903 /// Get the number of clauses for this landing pad. 2904 unsigned getNumClauses() const { return getNumOperands(); } 2905 2906 /// Grow the size of the operand list to accommodate the new 2907 /// number of clauses. 2908 void reserveClauses(unsigned Size) { growOperands(Size); } 2909 2910 // Methods for support type inquiry through isa, cast, and dyn_cast: 2911 static bool classof(const Instruction *I) { 2912 return I->getOpcode() == Instruction::LandingPad; 2913 } 2914 static bool classof(const Value *V) { 2915 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2916 } 2917 }; 2918 2919 template <> 2920 struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { 2921 }; 2922 2923 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value) 2924 2925 //===----------------------------------------------------------------------===// 2926 // ReturnInst Class 2927 //===----------------------------------------------------------------------===// 2928 2929 //===--------------------------------------------------------------------------- 2930 /// Return a value (possibly void), from a function. Execution 2931 /// does not continue in this function any longer. 2932 /// 2933 class ReturnInst : public Instruction { 2934 ReturnInst(const ReturnInst &RI); 2935 2936 private: 2937 // ReturnInst constructors: 2938 // ReturnInst() - 'ret void' instruction 2939 // ReturnInst( null) - 'ret void' instruction 2940 // ReturnInst(Value* X) - 'ret X' instruction 2941 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I 2942 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I 2943 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B 2944 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B 2945 // 2946 // NOTE: If the Value* passed is of type void then the constructor behaves as 2947 // if it was passed NULL. 2948 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, 2949 Instruction *InsertBefore = nullptr); 2950 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); 2951 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); 2952 2953 protected: 2954 // Note: Instruction needs to be a friend here to call cloneImpl. 2955 friend class Instruction; 2956 2957 ReturnInst *cloneImpl() const; 2958 2959 public: 2960 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, 2961 Instruction *InsertBefore = nullptr) { 2962 return new(!!retVal) ReturnInst(C, retVal, InsertBefore); 2963 } 2964 2965 static ReturnInst* Create(LLVMContext &C, Value *retVal, 2966 BasicBlock *InsertAtEnd) { 2967 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); 2968 } 2969 2970 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { 2971 return new(0) ReturnInst(C, InsertAtEnd); 2972 } 2973 2974 /// Provide fast operand accessors 2975 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2976 2977 /// Convenience accessor. Returns null if there is no return value. 2978 Value *getReturnValue() const { 2979 return getNumOperands() != 0 ? getOperand(0) : nullptr; 2980 } 2981 2982 unsigned getNumSuccessors() const { return 0; } 2983 2984 // Methods for support type inquiry through isa, cast, and dyn_cast: 2985 static bool classof(const Instruction *I) { 2986 return (I->getOpcode() == Instruction::Ret); 2987 } 2988 static bool classof(const Value *V) { 2989 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2990 } 2991 2992 private: 2993 BasicBlock *getSuccessor(unsigned idx) const { 2994 llvm_unreachable("ReturnInst has no successors!"); 2995 } 2996 2997 void setSuccessor(unsigned idx, BasicBlock *B) { 2998 llvm_unreachable("ReturnInst has no successors!"); 2999 } 3000 }; 3001 3002 template <> 3003 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { 3004 }; 3005 3006 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value) 3007 3008 //===----------------------------------------------------------------------===// 3009 // BranchInst Class 3010 //===----------------------------------------------------------------------===// 3011 3012 //===--------------------------------------------------------------------------- 3013 /// Conditional or Unconditional Branch instruction. 3014 /// 3015 class BranchInst : public Instruction { 3016 /// Ops list - Branches are strange. The operands are ordered: 3017 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because 3018 /// they don't have to check for cond/uncond branchness. These are mostly 3019 /// accessed relative from op_end(). 3020 BranchInst(const BranchInst &BI); 3021 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): 3022 // BranchInst(BB *B) - 'br B' 3023 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' 3024 // BranchInst(BB* B, Inst *I) - 'br B' insert before I 3025 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I 3026 // BranchInst(BB* B, BB *I) - 'br B' insert at end 3027 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end 3028 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); 3029 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3030 Instruction *InsertBefore = nullptr); 3031 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); 3032 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3033 BasicBlock *InsertAtEnd); 3034 3035 void AssertOK(); 3036 3037 protected: 3038 // Note: Instruction needs to be a friend here to call cloneImpl. 3039 friend class Instruction; 3040 3041 BranchInst *cloneImpl() const; 3042 3043 public: 3044 /// Iterator type that casts an operand to a basic block. 3045 /// 3046 /// This only makes sense because the successors are stored as adjacent 3047 /// operands for branch instructions. 3048 struct succ_op_iterator 3049 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3050 std::random_access_iterator_tag, BasicBlock *, 3051 ptrdiff_t, BasicBlock *, BasicBlock *> { 3052 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3053 3054 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3055 BasicBlock *operator->() const { return operator*(); } 3056 }; 3057 3058 /// The const version of `succ_op_iterator`. 3059 struct const_succ_op_iterator 3060 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3061 std::random_access_iterator_tag, 3062 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3063 const BasicBlock *> { 3064 explicit const_succ_op_iterator(const_value_op_iterator I) 3065 : iterator_adaptor_base(I) {} 3066 3067 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3068 const BasicBlock *operator->() const { return operator*(); } 3069 }; 3070 3071 static BranchInst *Create(BasicBlock *IfTrue, 3072 Instruction *InsertBefore = nullptr) { 3073 return new(1) BranchInst(IfTrue, InsertBefore); 3074 } 3075 3076 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3077 Value *Cond, Instruction *InsertBefore = nullptr) { 3078 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); 3079 } 3080 3081 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { 3082 return new(1) BranchInst(IfTrue, InsertAtEnd); 3083 } 3084 3085 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3086 Value *Cond, BasicBlock *InsertAtEnd) { 3087 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); 3088 } 3089 3090 /// Transparently provide more efficient getOperand methods. 3091 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3092 3093 bool isUnconditional() const { return getNumOperands() == 1; } 3094 bool isConditional() const { return getNumOperands() == 3; } 3095 3096 Value *getCondition() const { 3097 assert(isConditional() && "Cannot get condition of an uncond branch!"); 3098 return Op<-3>(); 3099 } 3100 3101 void setCondition(Value *V) { 3102 assert(isConditional() && "Cannot set condition of unconditional branch!"); 3103 Op<-3>() = V; 3104 } 3105 3106 unsigned getNumSuccessors() const { return 1+isConditional(); } 3107 3108 BasicBlock *getSuccessor(unsigned i) const { 3109 assert(i < getNumSuccessors() && "Successor # out of range for Branch!"); 3110 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); 3111 } 3112 3113 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3114 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!"); 3115 *(&Op<-1>() - idx) = NewSucc; 3116 } 3117 3118 /// Swap the successors of this branch instruction. 3119 /// 3120 /// Swaps the successors of the branch instruction. This also swaps any 3121 /// branch weight metadata associated with the instruction so that it 3122 /// continues to map correctly to each operand. 3123 void swapSuccessors(); 3124 3125 iterator_range<succ_op_iterator> successors() { 3126 return make_range( 3127 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), 3128 succ_op_iterator(value_op_end())); 3129 } 3130 3131 iterator_range<const_succ_op_iterator> successors() const { 3132 return make_range(const_succ_op_iterator( 3133 std::next(value_op_begin(), isConditional() ? 1 : 0)), 3134 const_succ_op_iterator(value_op_end())); 3135 } 3136 3137 // Methods for support type inquiry through isa, cast, and dyn_cast: 3138 static bool classof(const Instruction *I) { 3139 return (I->getOpcode() == Instruction::Br); 3140 } 3141 static bool classof(const Value *V) { 3142 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3143 } 3144 }; 3145 3146 template <> 3147 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { 3148 }; 3149 3150 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) 3151 3152 //===----------------------------------------------------------------------===// 3153 // SwitchInst Class 3154 //===----------------------------------------------------------------------===// 3155 3156 //===--------------------------------------------------------------------------- 3157 /// Multiway switch 3158 /// 3159 class SwitchInst : public Instruction { 3160 unsigned ReservedSpace; 3161 3162 // Operand[0] = Value to switch on 3163 // Operand[1] = Default basic block destination 3164 // Operand[2n ] = Value to match 3165 // Operand[2n+1] = BasicBlock to go to on match 3166 SwitchInst(const SwitchInst &SI); 3167 3168 /// Create a new switch instruction, specifying a value to switch on and a 3169 /// default destination. The number of additional cases can be specified here 3170 /// to make memory allocation more efficient. This constructor can also 3171 /// auto-insert before another instruction. 3172 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3173 Instruction *InsertBefore); 3174 3175 /// Create a new switch instruction, specifying a value to switch on and a 3176 /// default destination. The number of additional cases can be specified here 3177 /// to make memory allocation more efficient. This constructor also 3178 /// auto-inserts at the end of the specified BasicBlock. 3179 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3180 BasicBlock *InsertAtEnd); 3181 3182 // allocate space for exactly zero operands 3183 void *operator new(size_t s) { 3184 return User::operator new(s); 3185 } 3186 3187 void init(Value *Value, BasicBlock *Default, unsigned NumReserved); 3188 void growOperands(); 3189 3190 protected: 3191 // Note: Instruction needs to be a friend here to call cloneImpl. 3192 friend class Instruction; 3193 3194 SwitchInst *cloneImpl() const; 3195 3196 public: 3197 // -2 3198 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); 3199 3200 template <typename CaseHandleT> class CaseIteratorImpl; 3201 3202 /// A handle to a particular switch case. It exposes a convenient interface 3203 /// to both the case value and the successor block. 3204 /// 3205 /// We define this as a template and instantiate it to form both a const and 3206 /// non-const handle. 3207 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> 3208 class CaseHandleImpl { 3209 // Directly befriend both const and non-const iterators. 3210 friend class SwitchInst::CaseIteratorImpl< 3211 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; 3212 3213 protected: 3214 // Expose the switch type we're parameterized with to the iterator. 3215 using SwitchInstType = SwitchInstT; 3216 3217 SwitchInstT *SI; 3218 ptrdiff_t Index; 3219 3220 CaseHandleImpl() = default; 3221 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} 3222 3223 public: 3224 /// Resolves case value for current case. 3225 ConstantIntT *getCaseValue() const { 3226 assert((unsigned)Index < SI->getNumCases() && 3227 "Index out the number of cases."); 3228 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); 3229 } 3230 3231 /// Resolves successor for current case. 3232 BasicBlockT *getCaseSuccessor() const { 3233 assert(((unsigned)Index < SI->getNumCases() || 3234 (unsigned)Index == DefaultPseudoIndex) && 3235 "Index out the number of cases."); 3236 return SI->getSuccessor(getSuccessorIndex()); 3237 } 3238 3239 /// Returns number of current case. 3240 unsigned getCaseIndex() const { return Index; } 3241 3242 /// Returns successor index for current case successor. 3243 unsigned getSuccessorIndex() const { 3244 assert(((unsigned)Index == DefaultPseudoIndex || 3245 (unsigned)Index < SI->getNumCases()) && 3246 "Index out the number of cases."); 3247 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; 3248 } 3249 3250 bool operator==(const CaseHandleImpl &RHS) const { 3251 assert(SI == RHS.SI && "Incompatible operators."); 3252 return Index == RHS.Index; 3253 } 3254 }; 3255 3256 using ConstCaseHandle = 3257 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; 3258 3259 class CaseHandle 3260 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { 3261 friend class SwitchInst::CaseIteratorImpl<CaseHandle>; 3262 3263 public: 3264 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} 3265 3266 /// Sets the new value for current case. 3267 void setValue(ConstantInt *V) { 3268 assert((unsigned)Index < SI->getNumCases() && 3269 "Index out the number of cases."); 3270 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); 3271 } 3272 3273 /// Sets the new successor for current case. 3274 void setSuccessor(BasicBlock *S) { 3275 SI->setSuccessor(getSuccessorIndex(), S); 3276 } 3277 }; 3278 3279 template <typename CaseHandleT> 3280 class CaseIteratorImpl 3281 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, 3282 std::random_access_iterator_tag, 3283 CaseHandleT> { 3284 using SwitchInstT = typename CaseHandleT::SwitchInstType; 3285 3286 CaseHandleT Case; 3287 3288 public: 3289 /// Default constructed iterator is in an invalid state until assigned to 3290 /// a case for a particular switch. 3291 CaseIteratorImpl() = default; 3292 3293 /// Initializes case iterator for given SwitchInst and for given 3294 /// case number. 3295 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} 3296 3297 /// Initializes case iterator for given SwitchInst and for given 3298 /// successor index. 3299 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, 3300 unsigned SuccessorIndex) { 3301 assert(SuccessorIndex < SI->getNumSuccessors() && 3302 "Successor index # out of range!"); 3303 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) 3304 : CaseIteratorImpl(SI, DefaultPseudoIndex); 3305 } 3306 3307 /// Support converting to the const variant. This will be a no-op for const 3308 /// variant. 3309 operator CaseIteratorImpl<ConstCaseHandle>() const { 3310 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); 3311 } 3312 3313 CaseIteratorImpl &operator+=(ptrdiff_t N) { 3314 // Check index correctness after addition. 3315 // Note: Index == getNumCases() means end(). 3316 assert(Case.Index + N >= 0 && 3317 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && 3318 "Case.Index out the number of cases."); 3319 Case.Index += N; 3320 return *this; 3321 } 3322 CaseIteratorImpl &operator-=(ptrdiff_t N) { 3323 // Check index correctness after subtraction. 3324 // Note: Case.Index == getNumCases() means end(). 3325 assert(Case.Index - N >= 0 && 3326 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && 3327 "Case.Index out the number of cases."); 3328 Case.Index -= N; 3329 return *this; 3330 } 3331 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { 3332 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3333 return Case.Index - RHS.Case.Index; 3334 } 3335 bool operator==(const CaseIteratorImpl &RHS) const { 3336 return Case == RHS.Case; 3337 } 3338 bool operator<(const CaseIteratorImpl &RHS) const { 3339 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3340 return Case.Index < RHS.Case.Index; 3341 } 3342 CaseHandleT &operator*() { return Case; } 3343 const CaseHandleT &operator*() const { return Case; } 3344 }; 3345 3346 using CaseIt = CaseIteratorImpl<CaseHandle>; 3347 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; 3348 3349 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3350 unsigned NumCases, 3351 Instruction *InsertBefore = nullptr) { 3352 return new SwitchInst(Value, Default, NumCases, InsertBefore); 3353 } 3354 3355 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3356 unsigned NumCases, BasicBlock *InsertAtEnd) { 3357 return new SwitchInst(Value, Default, NumCases, InsertAtEnd); 3358 } 3359 3360 /// Provide fast operand accessors 3361 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3362 3363 // Accessor Methods for Switch stmt 3364 Value *getCondition() const { return getOperand(0); } 3365 void setCondition(Value *V) { setOperand(0, V); } 3366 3367 BasicBlock *getDefaultDest() const { 3368 return cast<BasicBlock>(getOperand(1)); 3369 } 3370 3371 void setDefaultDest(BasicBlock *DefaultCase) { 3372 setOperand(1, reinterpret_cast<Value*>(DefaultCase)); 3373 } 3374 3375 /// Return the number of 'cases' in this switch instruction, excluding the 3376 /// default case. 3377 unsigned getNumCases() const { 3378 return getNumOperands()/2 - 1; 3379 } 3380 3381 /// Returns a read/write iterator that points to the first case in the 3382 /// SwitchInst. 3383 CaseIt case_begin() { 3384 return CaseIt(this, 0); 3385 } 3386 3387 /// Returns a read-only iterator that points to the first case in the 3388 /// SwitchInst. 3389 ConstCaseIt case_begin() const { 3390 return ConstCaseIt(this, 0); 3391 } 3392 3393 /// Returns a read/write iterator that points one past the last in the 3394 /// SwitchInst. 3395 CaseIt case_end() { 3396 return CaseIt(this, getNumCases()); 3397 } 3398 3399 /// Returns a read-only iterator that points one past the last in the 3400 /// SwitchInst. 3401 ConstCaseIt case_end() const { 3402 return ConstCaseIt(this, getNumCases()); 3403 } 3404 3405 /// Iteration adapter for range-for loops. 3406 iterator_range<CaseIt> cases() { 3407 return make_range(case_begin(), case_end()); 3408 } 3409 3410 /// Constant iteration adapter for range-for loops. 3411 iterator_range<ConstCaseIt> cases() const { 3412 return make_range(case_begin(), case_end()); 3413 } 3414 3415 /// Returns an iterator that points to the default case. 3416 /// Note: this iterator allows to resolve successor only. Attempt 3417 /// to resolve case value causes an assertion. 3418 /// Also note, that increment and decrement also causes an assertion and 3419 /// makes iterator invalid. 3420 CaseIt case_default() { 3421 return CaseIt(this, DefaultPseudoIndex); 3422 } 3423 ConstCaseIt case_default() const { 3424 return ConstCaseIt(this, DefaultPseudoIndex); 3425 } 3426 3427 /// Search all of the case values for the specified constant. If it is 3428 /// explicitly handled, return the case iterator of it, otherwise return 3429 /// default case iterator to indicate that it is handled by the default 3430 /// handler. 3431 CaseIt findCaseValue(const ConstantInt *C) { 3432 CaseIt I = llvm::find_if( 3433 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; }); 3434 if (I != case_end()) 3435 return I; 3436 3437 return case_default(); 3438 } 3439 ConstCaseIt findCaseValue(const ConstantInt *C) const { 3440 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) { 3441 return Case.getCaseValue() == C; 3442 }); 3443 if (I != case_end()) 3444 return I; 3445 3446 return case_default(); 3447 } 3448 3449 /// Finds the unique case value for a given successor. Returns null if the 3450 /// successor is not found, not unique, or is the default case. 3451 ConstantInt *findCaseDest(BasicBlock *BB) { 3452 if (BB == getDefaultDest()) 3453 return nullptr; 3454 3455 ConstantInt *CI = nullptr; 3456 for (auto Case : cases()) { 3457 if (Case.getCaseSuccessor() != BB) 3458 continue; 3459 3460 if (CI) 3461 return nullptr; // Multiple cases lead to BB. 3462 3463 CI = Case.getCaseValue(); 3464 } 3465 3466 return CI; 3467 } 3468 3469 /// Add an entry to the switch instruction. 3470 /// Note: 3471 /// This action invalidates case_end(). Old case_end() iterator will 3472 /// point to the added case. 3473 void addCase(ConstantInt *OnVal, BasicBlock *Dest); 3474 3475 /// This method removes the specified case and its successor from the switch 3476 /// instruction. Note that this operation may reorder the remaining cases at 3477 /// index idx and above. 3478 /// Note: 3479 /// This action invalidates iterators for all cases following the one removed, 3480 /// including the case_end() iterator. It returns an iterator for the next 3481 /// case. 3482 CaseIt removeCase(CaseIt I); 3483 3484 unsigned getNumSuccessors() const { return getNumOperands()/2; } 3485 BasicBlock *getSuccessor(unsigned idx) const { 3486 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!"); 3487 return cast<BasicBlock>(getOperand(idx*2+1)); 3488 } 3489 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3490 assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); 3491 setOperand(idx * 2 + 1, NewSucc); 3492 } 3493 3494 // Methods for support type inquiry through isa, cast, and dyn_cast: 3495 static bool classof(const Instruction *I) { 3496 return I->getOpcode() == Instruction::Switch; 3497 } 3498 static bool classof(const Value *V) { 3499 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3500 } 3501 }; 3502 3503 /// A wrapper class to simplify modification of SwitchInst cases along with 3504 /// their prof branch_weights metadata. 3505 class SwitchInstProfUpdateWrapper { 3506 SwitchInst &SI; 3507 Optional<SmallVector<uint32_t, 8> > Weights = None; 3508 bool Changed = false; 3509 3510 protected: 3511 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); 3512 3513 MDNode *buildProfBranchWeightsMD(); 3514 3515 void init(); 3516 3517 public: 3518 using CaseWeightOpt = Optional<uint32_t>; 3519 SwitchInst *operator->() { return &SI; } 3520 SwitchInst &operator*() { return SI; } 3521 operator SwitchInst *() { return &SI; } 3522 3523 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } 3524 3525 ~SwitchInstProfUpdateWrapper() { 3526 if (Changed) 3527 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); 3528 } 3529 3530 /// Delegate the call to the underlying SwitchInst::removeCase() and remove 3531 /// correspondent branch weight. 3532 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); 3533 3534 /// Delegate the call to the underlying SwitchInst::addCase() and set the 3535 /// specified branch weight for the added case. 3536 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); 3537 3538 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark 3539 /// this object to not touch the underlying SwitchInst in destructor. 3540 SymbolTableList<Instruction>::iterator eraseFromParent(); 3541 3542 void setSuccessorWeight(unsigned idx, CaseWeightOpt W); 3543 CaseWeightOpt getSuccessorWeight(unsigned idx); 3544 3545 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); 3546 }; 3547 3548 template <> 3549 struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { 3550 }; 3551 3552 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) 3553 3554 //===----------------------------------------------------------------------===// 3555 // IndirectBrInst Class 3556 //===----------------------------------------------------------------------===// 3557 3558 //===--------------------------------------------------------------------------- 3559 /// Indirect Branch Instruction. 3560 /// 3561 class IndirectBrInst : public Instruction { 3562 unsigned ReservedSpace; 3563 3564 // Operand[0] = Address to jump to 3565 // Operand[n+1] = n-th destination 3566 IndirectBrInst(const IndirectBrInst &IBI); 3567 3568 /// Create a new indirectbr instruction, specifying an 3569 /// Address to jump to. The number of expected destinations can be specified 3570 /// here to make memory allocation more efficient. This constructor can also 3571 /// autoinsert before another instruction. 3572 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); 3573 3574 /// Create a new indirectbr instruction, specifying an 3575 /// Address to jump to. The number of expected destinations can be specified 3576 /// here to make memory allocation more efficient. This constructor also 3577 /// autoinserts at the end of the specified BasicBlock. 3578 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); 3579 3580 // allocate space for exactly zero operands 3581 void *operator new(size_t s) { 3582 return User::operator new(s); 3583 } 3584 3585 void init(Value *Address, unsigned NumDests); 3586 void growOperands(); 3587 3588 protected: 3589 // Note: Instruction needs to be a friend here to call cloneImpl. 3590 friend class Instruction; 3591 3592 IndirectBrInst *cloneImpl() const; 3593 3594 public: 3595 /// Iterator type that casts an operand to a basic block. 3596 /// 3597 /// This only makes sense because the successors are stored as adjacent 3598 /// operands for indirectbr instructions. 3599 struct succ_op_iterator 3600 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3601 std::random_access_iterator_tag, BasicBlock *, 3602 ptrdiff_t, BasicBlock *, BasicBlock *> { 3603 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3604 3605 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3606 BasicBlock *operator->() const { return operator*(); } 3607 }; 3608 3609 /// The const version of `succ_op_iterator`. 3610 struct const_succ_op_iterator 3611 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3612 std::random_access_iterator_tag, 3613 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3614 const BasicBlock *> { 3615 explicit const_succ_op_iterator(const_value_op_iterator I) 3616 : iterator_adaptor_base(I) {} 3617 3618 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3619 const BasicBlock *operator->() const { return operator*(); } 3620 }; 3621 3622 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3623 Instruction *InsertBefore = nullptr) { 3624 return new IndirectBrInst(Address, NumDests, InsertBefore); 3625 } 3626 3627 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3628 BasicBlock *InsertAtEnd) { 3629 return new IndirectBrInst(Address, NumDests, InsertAtEnd); 3630 } 3631 3632 /// Provide fast operand accessors. 3633 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3634 3635 // Accessor Methods for IndirectBrInst instruction. 3636 Value *getAddress() { return getOperand(0); } 3637 const Value *getAddress() const { return getOperand(0); } 3638 void setAddress(Value *V) { setOperand(0, V); } 3639 3640 /// return the number of possible destinations in this 3641 /// indirectbr instruction. 3642 unsigned getNumDestinations() const { return getNumOperands()-1; } 3643 3644 /// Return the specified destination. 3645 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } 3646 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } 3647 3648 /// Add a destination. 3649 /// 3650 void addDestination(BasicBlock *Dest); 3651 3652 /// This method removes the specified successor from the 3653 /// indirectbr instruction. 3654 void removeDestination(unsigned i); 3655 3656 unsigned getNumSuccessors() const { return getNumOperands()-1; } 3657 BasicBlock *getSuccessor(unsigned i) const { 3658 return cast<BasicBlock>(getOperand(i+1)); 3659 } 3660 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3661 setOperand(i + 1, NewSucc); 3662 } 3663 3664 iterator_range<succ_op_iterator> successors() { 3665 return make_range(succ_op_iterator(std::next(value_op_begin())), 3666 succ_op_iterator(value_op_end())); 3667 } 3668 3669 iterator_range<const_succ_op_iterator> successors() const { 3670 return make_range(const_succ_op_iterator(std::next(value_op_begin())), 3671 const_succ_op_iterator(value_op_end())); 3672 } 3673 3674 // Methods for support type inquiry through isa, cast, and dyn_cast: 3675 static bool classof(const Instruction *I) { 3676 return I->getOpcode() == Instruction::IndirectBr; 3677 } 3678 static bool classof(const Value *V) { 3679 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3680 } 3681 }; 3682 3683 template <> 3684 struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { 3685 }; 3686 3687 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value) 3688 3689 //===----------------------------------------------------------------------===// 3690 // InvokeInst Class 3691 //===----------------------------------------------------------------------===// 3692 3693 /// Invoke instruction. The SubclassData field is used to hold the 3694 /// calling convention of the call. 3695 /// 3696 class InvokeInst : public CallBase { 3697 /// The number of operands for this call beyond the called function, 3698 /// arguments, and operand bundles. 3699 static constexpr int NumExtraOperands = 2; 3700 3701 /// The index from the end of the operand array to the normal destination. 3702 static constexpr int NormalDestOpEndIdx = -3; 3703 3704 /// The index from the end of the operand array to the unwind destination. 3705 static constexpr int UnwindDestOpEndIdx = -2; 3706 3707 InvokeInst(const InvokeInst &BI); 3708 3709 /// Construct an InvokeInst given a range of arguments. 3710 /// 3711 /// Construct an InvokeInst from a range of arguments 3712 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3713 BasicBlock *IfException, ArrayRef<Value *> Args, 3714 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3715 const Twine &NameStr, Instruction *InsertBefore); 3716 3717 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3718 BasicBlock *IfException, ArrayRef<Value *> Args, 3719 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3720 const Twine &NameStr, BasicBlock *InsertAtEnd); 3721 3722 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3723 BasicBlock *IfException, ArrayRef<Value *> Args, 3724 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3725 3726 /// Compute the number of operands to allocate. 3727 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 3728 // We need one operand for the called function, plus our extra operands and 3729 // the input operand counts provided. 3730 return 1 + NumExtraOperands + NumArgs + NumBundleInputs; 3731 } 3732 3733 protected: 3734 // Note: Instruction needs to be a friend here to call cloneImpl. 3735 friend class Instruction; 3736 3737 InvokeInst *cloneImpl() const; 3738 3739 public: 3740 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3741 BasicBlock *IfException, ArrayRef<Value *> Args, 3742 const Twine &NameStr, 3743 Instruction *InsertBefore = nullptr) { 3744 int NumOperands = ComputeNumOperands(Args.size()); 3745 return new (NumOperands) 3746 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3747 NameStr, InsertBefore); 3748 } 3749 3750 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3751 BasicBlock *IfException, ArrayRef<Value *> Args, 3752 ArrayRef<OperandBundleDef> Bundles = None, 3753 const Twine &NameStr = "", 3754 Instruction *InsertBefore = nullptr) { 3755 int NumOperands = 3756 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3757 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3758 3759 return new (NumOperands, DescriptorBytes) 3760 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3761 NameStr, InsertBefore); 3762 } 3763 3764 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3765 BasicBlock *IfException, ArrayRef<Value *> Args, 3766 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3767 int NumOperands = ComputeNumOperands(Args.size()); 3768 return new (NumOperands) 3769 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3770 NameStr, InsertAtEnd); 3771 } 3772 3773 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3774 BasicBlock *IfException, ArrayRef<Value *> Args, 3775 ArrayRef<OperandBundleDef> Bundles, 3776 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3777 int NumOperands = 3778 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3779 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3780 3781 return new (NumOperands, DescriptorBytes) 3782 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3783 NameStr, InsertAtEnd); 3784 } 3785 3786 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3787 BasicBlock *IfException, ArrayRef<Value *> Args, 3788 const Twine &NameStr, 3789 Instruction *InsertBefore = nullptr) { 3790 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3791 IfException, Args, None, NameStr, InsertBefore); 3792 } 3793 3794 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3795 BasicBlock *IfException, ArrayRef<Value *> Args, 3796 ArrayRef<OperandBundleDef> Bundles = None, 3797 const Twine &NameStr = "", 3798 Instruction *InsertBefore = nullptr) { 3799 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3800 IfException, Args, Bundles, NameStr, InsertBefore); 3801 } 3802 3803 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3804 BasicBlock *IfException, ArrayRef<Value *> Args, 3805 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3806 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3807 IfException, Args, NameStr, InsertAtEnd); 3808 } 3809 3810 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3811 BasicBlock *IfException, ArrayRef<Value *> Args, 3812 ArrayRef<OperandBundleDef> Bundles, 3813 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3814 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3815 IfException, Args, Bundles, NameStr, InsertAtEnd); 3816 } 3817 3818 /// Create a clone of \p II with a different set of operand bundles and 3819 /// insert it before \p InsertPt. 3820 /// 3821 /// The returned invoke instruction is identical to \p II in every way except 3822 /// that the operand bundles for the new instruction are set to the operand 3823 /// bundles in \p Bundles. 3824 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, 3825 Instruction *InsertPt = nullptr); 3826 3827 /// Create a clone of \p II with a different set of operand bundles and 3828 /// insert it before \p InsertPt. 3829 /// 3830 /// The returned invoke instruction is identical to \p II in every way except 3831 /// that the operand bundle for the new instruction is set to the operand 3832 /// bundle in \p Bundle. 3833 static InvokeInst *CreateWithReplacedBundle(InvokeInst *II, 3834 OperandBundleDef Bundles, 3835 Instruction *InsertPt = nullptr); 3836 3837 // get*Dest - Return the destination basic blocks... 3838 BasicBlock *getNormalDest() const { 3839 return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); 3840 } 3841 BasicBlock *getUnwindDest() const { 3842 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); 3843 } 3844 void setNormalDest(BasicBlock *B) { 3845 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3846 } 3847 void setUnwindDest(BasicBlock *B) { 3848 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3849 } 3850 3851 /// Get the landingpad instruction from the landing pad 3852 /// block (the unwind destination). 3853 LandingPadInst *getLandingPadInst() const; 3854 3855 BasicBlock *getSuccessor(unsigned i) const { 3856 assert(i < 2 && "Successor # out of range for invoke!"); 3857 return i == 0 ? getNormalDest() : getUnwindDest(); 3858 } 3859 3860 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3861 assert(i < 2 && "Successor # out of range for invoke!"); 3862 if (i == 0) 3863 setNormalDest(NewSucc); 3864 else 3865 setUnwindDest(NewSucc); 3866 } 3867 3868 unsigned getNumSuccessors() const { return 2; } 3869 3870 // Methods for support type inquiry through isa, cast, and dyn_cast: 3871 static bool classof(const Instruction *I) { 3872 return (I->getOpcode() == Instruction::Invoke); 3873 } 3874 static bool classof(const Value *V) { 3875 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3876 } 3877 3878 private: 3879 // Shadow Instruction::setInstructionSubclassData with a private forwarding 3880 // method so that subclasses cannot accidentally use it. 3881 template <typename Bitfield> 3882 void setSubclassData(typename Bitfield::Type Value) { 3883 Instruction::setSubclassData<Bitfield>(Value); 3884 } 3885 }; 3886 3887 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3888 BasicBlock *IfException, ArrayRef<Value *> Args, 3889 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3890 const Twine &NameStr, Instruction *InsertBefore) 3891 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3892 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3893 InsertBefore) { 3894 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3895 } 3896 3897 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3898 BasicBlock *IfException, ArrayRef<Value *> Args, 3899 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3900 const Twine &NameStr, BasicBlock *InsertAtEnd) 3901 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3902 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3903 InsertAtEnd) { 3904 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3905 } 3906 3907 //===----------------------------------------------------------------------===// 3908 // CallBrInst Class 3909 //===----------------------------------------------------------------------===// 3910 3911 /// CallBr instruction, tracking function calls that may not return control but 3912 /// instead transfer it to a third location. The SubclassData field is used to 3913 /// hold the calling convention of the call. 3914 /// 3915 class CallBrInst : public CallBase { 3916 3917 unsigned NumIndirectDests; 3918 3919 CallBrInst(const CallBrInst &BI); 3920 3921 /// Construct a CallBrInst given a range of arguments. 3922 /// 3923 /// Construct a CallBrInst from a range of arguments 3924 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3925 ArrayRef<BasicBlock *> IndirectDests, 3926 ArrayRef<Value *> Args, 3927 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3928 const Twine &NameStr, Instruction *InsertBefore); 3929 3930 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3931 ArrayRef<BasicBlock *> IndirectDests, 3932 ArrayRef<Value *> Args, 3933 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3934 const Twine &NameStr, BasicBlock *InsertAtEnd); 3935 3936 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, 3937 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 3938 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3939 3940 /// Should the Indirect Destinations change, scan + update the Arg list. 3941 void updateArgBlockAddresses(unsigned i, BasicBlock *B); 3942 3943 /// Compute the number of operands to allocate. 3944 static int ComputeNumOperands(int NumArgs, int NumIndirectDests, 3945 int NumBundleInputs = 0) { 3946 // We need one operand for the called function, plus our extra operands and 3947 // the input operand counts provided. 3948 return 2 + NumIndirectDests + NumArgs + NumBundleInputs; 3949 } 3950 3951 protected: 3952 // Note: Instruction needs to be a friend here to call cloneImpl. 3953 friend class Instruction; 3954 3955 CallBrInst *cloneImpl() const; 3956 3957 public: 3958 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3959 BasicBlock *DefaultDest, 3960 ArrayRef<BasicBlock *> IndirectDests, 3961 ArrayRef<Value *> Args, const Twine &NameStr, 3962 Instruction *InsertBefore = nullptr) { 3963 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 3964 return new (NumOperands) 3965 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 3966 NumOperands, NameStr, InsertBefore); 3967 } 3968 3969 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3970 BasicBlock *DefaultDest, 3971 ArrayRef<BasicBlock *> IndirectDests, 3972 ArrayRef<Value *> Args, 3973 ArrayRef<OperandBundleDef> Bundles = None, 3974 const Twine &NameStr = "", 3975 Instruction *InsertBefore = nullptr) { 3976 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 3977 CountBundleInputs(Bundles)); 3978 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3979 3980 return new (NumOperands, DescriptorBytes) 3981 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 3982 NumOperands, NameStr, InsertBefore); 3983 } 3984 3985 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3986 BasicBlock *DefaultDest, 3987 ArrayRef<BasicBlock *> IndirectDests, 3988 ArrayRef<Value *> Args, const Twine &NameStr, 3989 BasicBlock *InsertAtEnd) { 3990 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 3991 return new (NumOperands) 3992 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 3993 NumOperands, NameStr, InsertAtEnd); 3994 } 3995 3996 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3997 BasicBlock *DefaultDest, 3998 ArrayRef<BasicBlock *> IndirectDests, 3999 ArrayRef<Value *> Args, 4000 ArrayRef<OperandBundleDef> Bundles, 4001 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4002 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4003 CountBundleInputs(Bundles)); 4004 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4005 4006 return new (NumOperands, DescriptorBytes) 4007 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4008 NumOperands, NameStr, InsertAtEnd); 4009 } 4010 4011 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4012 ArrayRef<BasicBlock *> IndirectDests, 4013 ArrayRef<Value *> Args, const Twine &NameStr, 4014 Instruction *InsertBefore = nullptr) { 4015 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4016 IndirectDests, Args, NameStr, InsertBefore); 4017 } 4018 4019 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4020 ArrayRef<BasicBlock *> IndirectDests, 4021 ArrayRef<Value *> Args, 4022 ArrayRef<OperandBundleDef> Bundles = None, 4023 const Twine &NameStr = "", 4024 Instruction *InsertBefore = nullptr) { 4025 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4026 IndirectDests, Args, Bundles, NameStr, InsertBefore); 4027 } 4028 4029 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4030 ArrayRef<BasicBlock *> IndirectDests, 4031 ArrayRef<Value *> Args, const Twine &NameStr, 4032 BasicBlock *InsertAtEnd) { 4033 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4034 IndirectDests, Args, NameStr, InsertAtEnd); 4035 } 4036 4037 static CallBrInst *Create(FunctionCallee Func, 4038 BasicBlock *DefaultDest, 4039 ArrayRef<BasicBlock *> IndirectDests, 4040 ArrayRef<Value *> Args, 4041 ArrayRef<OperandBundleDef> Bundles, 4042 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4043 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4044 IndirectDests, Args, Bundles, NameStr, InsertAtEnd); 4045 } 4046 4047 /// Create a clone of \p CBI with a different set of operand bundles and 4048 /// insert it before \p InsertPt. 4049 /// 4050 /// The returned callbr instruction is identical to \p CBI in every way 4051 /// except that the operand bundles for the new instruction are set to the 4052 /// operand bundles in \p Bundles. 4053 static CallBrInst *Create(CallBrInst *CBI, 4054 ArrayRef<OperandBundleDef> Bundles, 4055 Instruction *InsertPt = nullptr); 4056 4057 /// Return the number of callbr indirect dest labels. 4058 /// 4059 unsigned getNumIndirectDests() const { return NumIndirectDests; } 4060 4061 /// getIndirectDestLabel - Return the i-th indirect dest label. 4062 /// 4063 Value *getIndirectDestLabel(unsigned i) const { 4064 assert(i < getNumIndirectDests() && "Out of bounds!"); 4065 return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() + 4066 1); 4067 } 4068 4069 Value *getIndirectDestLabelUse(unsigned i) const { 4070 assert(i < getNumIndirectDests() && "Out of bounds!"); 4071 return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() + 4072 1); 4073 } 4074 4075 // Return the destination basic blocks... 4076 BasicBlock *getDefaultDest() const { 4077 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); 4078 } 4079 BasicBlock *getIndirectDest(unsigned i) const { 4080 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); 4081 } 4082 SmallVector<BasicBlock *, 16> getIndirectDests() const { 4083 SmallVector<BasicBlock *, 16> IndirectDests; 4084 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) 4085 IndirectDests.push_back(getIndirectDest(i)); 4086 return IndirectDests; 4087 } 4088 void setDefaultDest(BasicBlock *B) { 4089 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); 4090 } 4091 void setIndirectDest(unsigned i, BasicBlock *B) { 4092 updateArgBlockAddresses(i, B); 4093 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); 4094 } 4095 4096 BasicBlock *getSuccessor(unsigned i) const { 4097 assert(i < getNumSuccessors() + 1 && 4098 "Successor # out of range for callbr!"); 4099 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); 4100 } 4101 4102 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4103 assert(i < getNumIndirectDests() + 1 && 4104 "Successor # out of range for callbr!"); 4105 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); 4106 } 4107 4108 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } 4109 4110 // Methods for support type inquiry through isa, cast, and dyn_cast: 4111 static bool classof(const Instruction *I) { 4112 return (I->getOpcode() == Instruction::CallBr); 4113 } 4114 static bool classof(const Value *V) { 4115 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4116 } 4117 4118 private: 4119 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4120 // method so that subclasses cannot accidentally use it. 4121 template <typename Bitfield> 4122 void setSubclassData(typename Bitfield::Type Value) { 4123 Instruction::setSubclassData<Bitfield>(Value); 4124 } 4125 }; 4126 4127 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4128 ArrayRef<BasicBlock *> IndirectDests, 4129 ArrayRef<Value *> Args, 4130 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4131 const Twine &NameStr, Instruction *InsertBefore) 4132 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4133 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4134 InsertBefore) { 4135 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4136 } 4137 4138 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4139 ArrayRef<BasicBlock *> IndirectDests, 4140 ArrayRef<Value *> Args, 4141 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4142 const Twine &NameStr, BasicBlock *InsertAtEnd) 4143 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4144 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4145 InsertAtEnd) { 4146 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4147 } 4148 4149 //===----------------------------------------------------------------------===// 4150 // ResumeInst Class 4151 //===----------------------------------------------------------------------===// 4152 4153 //===--------------------------------------------------------------------------- 4154 /// Resume the propagation of an exception. 4155 /// 4156 class ResumeInst : public Instruction { 4157 ResumeInst(const ResumeInst &RI); 4158 4159 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); 4160 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); 4161 4162 protected: 4163 // Note: Instruction needs to be a friend here to call cloneImpl. 4164 friend class Instruction; 4165 4166 ResumeInst *cloneImpl() const; 4167 4168 public: 4169 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { 4170 return new(1) ResumeInst(Exn, InsertBefore); 4171 } 4172 4173 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { 4174 return new(1) ResumeInst(Exn, InsertAtEnd); 4175 } 4176 4177 /// Provide fast operand accessors 4178 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4179 4180 /// Convenience accessor. 4181 Value *getValue() const { return Op<0>(); } 4182 4183 unsigned getNumSuccessors() const { return 0; } 4184 4185 // Methods for support type inquiry through isa, cast, and dyn_cast: 4186 static bool classof(const Instruction *I) { 4187 return I->getOpcode() == Instruction::Resume; 4188 } 4189 static bool classof(const Value *V) { 4190 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4191 } 4192 4193 private: 4194 BasicBlock *getSuccessor(unsigned idx) const { 4195 llvm_unreachable("ResumeInst has no successors!"); 4196 } 4197 4198 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 4199 llvm_unreachable("ResumeInst has no successors!"); 4200 } 4201 }; 4202 4203 template <> 4204 struct OperandTraits<ResumeInst> : 4205 public FixedNumOperandTraits<ResumeInst, 1> { 4206 }; 4207 4208 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) 4209 4210 //===----------------------------------------------------------------------===// 4211 // CatchSwitchInst Class 4212 //===----------------------------------------------------------------------===// 4213 class CatchSwitchInst : public Instruction { 4214 using UnwindDestField = BoolBitfieldElementT<0>; 4215 4216 /// The number of operands actually allocated. NumOperands is 4217 /// the number actually in use. 4218 unsigned ReservedSpace; 4219 4220 // Operand[0] = Outer scope 4221 // Operand[1] = Unwind block destination 4222 // Operand[n] = BasicBlock to go to on match 4223 CatchSwitchInst(const CatchSwitchInst &CSI); 4224 4225 /// Create a new switch instruction, specifying a 4226 /// default destination. The number of additional handlers can be specified 4227 /// here to make memory allocation more efficient. 4228 /// This constructor can also autoinsert before another instruction. 4229 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4230 unsigned NumHandlers, const Twine &NameStr, 4231 Instruction *InsertBefore); 4232 4233 /// Create a new switch instruction, specifying a 4234 /// default destination. The number of additional handlers can be specified 4235 /// here to make memory allocation more efficient. 4236 /// This constructor also autoinserts at the end of the specified BasicBlock. 4237 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4238 unsigned NumHandlers, const Twine &NameStr, 4239 BasicBlock *InsertAtEnd); 4240 4241 // allocate space for exactly zero operands 4242 void *operator new(size_t s) { return User::operator new(s); } 4243 4244 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); 4245 void growOperands(unsigned Size); 4246 4247 protected: 4248 // Note: Instruction needs to be a friend here to call cloneImpl. 4249 friend class Instruction; 4250 4251 CatchSwitchInst *cloneImpl() const; 4252 4253 public: 4254 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4255 unsigned NumHandlers, 4256 const Twine &NameStr = "", 4257 Instruction *InsertBefore = nullptr) { 4258 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4259 InsertBefore); 4260 } 4261 4262 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4263 unsigned NumHandlers, const Twine &NameStr, 4264 BasicBlock *InsertAtEnd) { 4265 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4266 InsertAtEnd); 4267 } 4268 4269 /// Provide fast operand accessors 4270 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4271 4272 // Accessor Methods for CatchSwitch stmt 4273 Value *getParentPad() const { return getOperand(0); } 4274 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } 4275 4276 // Accessor Methods for CatchSwitch stmt 4277 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4278 bool unwindsToCaller() const { return !hasUnwindDest(); } 4279 BasicBlock *getUnwindDest() const { 4280 if (hasUnwindDest()) 4281 return cast<BasicBlock>(getOperand(1)); 4282 return nullptr; 4283 } 4284 void setUnwindDest(BasicBlock *UnwindDest) { 4285 assert(UnwindDest); 4286 assert(hasUnwindDest()); 4287 setOperand(1, UnwindDest); 4288 } 4289 4290 /// return the number of 'handlers' in this catchswitch 4291 /// instruction, except the default handler 4292 unsigned getNumHandlers() const { 4293 if (hasUnwindDest()) 4294 return getNumOperands() - 2; 4295 return getNumOperands() - 1; 4296 } 4297 4298 private: 4299 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } 4300 static const BasicBlock *handler_helper(const Value *V) { 4301 return cast<BasicBlock>(V); 4302 } 4303 4304 public: 4305 using DerefFnTy = BasicBlock *(*)(Value *); 4306 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; 4307 using handler_range = iterator_range<handler_iterator>; 4308 using ConstDerefFnTy = const BasicBlock *(*)(const Value *); 4309 using const_handler_iterator = 4310 mapped_iterator<const_op_iterator, ConstDerefFnTy>; 4311 using const_handler_range = iterator_range<const_handler_iterator>; 4312 4313 /// Returns an iterator that points to the first handler in CatchSwitchInst. 4314 handler_iterator handler_begin() { 4315 op_iterator It = op_begin() + 1; 4316 if (hasUnwindDest()) 4317 ++It; 4318 return handler_iterator(It, DerefFnTy(handler_helper)); 4319 } 4320 4321 /// Returns an iterator that points to the first handler in the 4322 /// CatchSwitchInst. 4323 const_handler_iterator handler_begin() const { 4324 const_op_iterator It = op_begin() + 1; 4325 if (hasUnwindDest()) 4326 ++It; 4327 return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); 4328 } 4329 4330 /// Returns a read-only iterator that points one past the last 4331 /// handler in the CatchSwitchInst. 4332 handler_iterator handler_end() { 4333 return handler_iterator(op_end(), DerefFnTy(handler_helper)); 4334 } 4335 4336 /// Returns an iterator that points one past the last handler in the 4337 /// CatchSwitchInst. 4338 const_handler_iterator handler_end() const { 4339 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); 4340 } 4341 4342 /// iteration adapter for range-for loops. 4343 handler_range handlers() { 4344 return make_range(handler_begin(), handler_end()); 4345 } 4346 4347 /// iteration adapter for range-for loops. 4348 const_handler_range handlers() const { 4349 return make_range(handler_begin(), handler_end()); 4350 } 4351 4352 /// Add an entry to the switch instruction... 4353 /// Note: 4354 /// This action invalidates handler_end(). Old handler_end() iterator will 4355 /// point to the added handler. 4356 void addHandler(BasicBlock *Dest); 4357 4358 void removeHandler(handler_iterator HI); 4359 4360 unsigned getNumSuccessors() const { return getNumOperands() - 1; } 4361 BasicBlock *getSuccessor(unsigned Idx) const { 4362 assert(Idx < getNumSuccessors() && 4363 "Successor # out of range for catchswitch!"); 4364 return cast<BasicBlock>(getOperand(Idx + 1)); 4365 } 4366 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { 4367 assert(Idx < getNumSuccessors() && 4368 "Successor # out of range for catchswitch!"); 4369 setOperand(Idx + 1, NewSucc); 4370 } 4371 4372 // Methods for support type inquiry through isa, cast, and dyn_cast: 4373 static bool classof(const Instruction *I) { 4374 return I->getOpcode() == Instruction::CatchSwitch; 4375 } 4376 static bool classof(const Value *V) { 4377 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4378 } 4379 }; 4380 4381 template <> 4382 struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; 4383 4384 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value) 4385 4386 //===----------------------------------------------------------------------===// 4387 // CleanupPadInst Class 4388 //===----------------------------------------------------------------------===// 4389 class CleanupPadInst : public FuncletPadInst { 4390 private: 4391 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4392 unsigned Values, const Twine &NameStr, 4393 Instruction *InsertBefore) 4394 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4395 NameStr, InsertBefore) {} 4396 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4397 unsigned Values, const Twine &NameStr, 4398 BasicBlock *InsertAtEnd) 4399 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4400 NameStr, InsertAtEnd) {} 4401 4402 public: 4403 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, 4404 const Twine &NameStr = "", 4405 Instruction *InsertBefore = nullptr) { 4406 unsigned Values = 1 + Args.size(); 4407 return new (Values) 4408 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); 4409 } 4410 4411 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, 4412 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4413 unsigned Values = 1 + Args.size(); 4414 return new (Values) 4415 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); 4416 } 4417 4418 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4419 static bool classof(const Instruction *I) { 4420 return I->getOpcode() == Instruction::CleanupPad; 4421 } 4422 static bool classof(const Value *V) { 4423 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4424 } 4425 }; 4426 4427 //===----------------------------------------------------------------------===// 4428 // CatchPadInst Class 4429 //===----------------------------------------------------------------------===// 4430 class CatchPadInst : public FuncletPadInst { 4431 private: 4432 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4433 unsigned Values, const Twine &NameStr, 4434 Instruction *InsertBefore) 4435 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4436 NameStr, InsertBefore) {} 4437 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4438 unsigned Values, const Twine &NameStr, 4439 BasicBlock *InsertAtEnd) 4440 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4441 NameStr, InsertAtEnd) {} 4442 4443 public: 4444 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4445 const Twine &NameStr = "", 4446 Instruction *InsertBefore = nullptr) { 4447 unsigned Values = 1 + Args.size(); 4448 return new (Values) 4449 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); 4450 } 4451 4452 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4453 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4454 unsigned Values = 1 + Args.size(); 4455 return new (Values) 4456 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); 4457 } 4458 4459 /// Convenience accessors 4460 CatchSwitchInst *getCatchSwitch() const { 4461 return cast<CatchSwitchInst>(Op<-1>()); 4462 } 4463 void setCatchSwitch(Value *CatchSwitch) { 4464 assert(CatchSwitch); 4465 Op<-1>() = CatchSwitch; 4466 } 4467 4468 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4469 static bool classof(const Instruction *I) { 4470 return I->getOpcode() == Instruction::CatchPad; 4471 } 4472 static bool classof(const Value *V) { 4473 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4474 } 4475 }; 4476 4477 //===----------------------------------------------------------------------===// 4478 // CatchReturnInst Class 4479 //===----------------------------------------------------------------------===// 4480 4481 class CatchReturnInst : public Instruction { 4482 CatchReturnInst(const CatchReturnInst &RI); 4483 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); 4484 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); 4485 4486 void init(Value *CatchPad, BasicBlock *BB); 4487 4488 protected: 4489 // Note: Instruction needs to be a friend here to call cloneImpl. 4490 friend class Instruction; 4491 4492 CatchReturnInst *cloneImpl() const; 4493 4494 public: 4495 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4496 Instruction *InsertBefore = nullptr) { 4497 assert(CatchPad); 4498 assert(BB); 4499 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); 4500 } 4501 4502 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4503 BasicBlock *InsertAtEnd) { 4504 assert(CatchPad); 4505 assert(BB); 4506 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); 4507 } 4508 4509 /// Provide fast operand accessors 4510 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4511 4512 /// Convenience accessors. 4513 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } 4514 void setCatchPad(CatchPadInst *CatchPad) { 4515 assert(CatchPad); 4516 Op<0>() = CatchPad; 4517 } 4518 4519 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } 4520 void setSuccessor(BasicBlock *NewSucc) { 4521 assert(NewSucc); 4522 Op<1>() = NewSucc; 4523 } 4524 unsigned getNumSuccessors() const { return 1; } 4525 4526 /// Get the parentPad of this catchret's catchpad's catchswitch. 4527 /// The successor block is implicitly a member of this funclet. 4528 Value *getCatchSwitchParentPad() const { 4529 return getCatchPad()->getCatchSwitch()->getParentPad(); 4530 } 4531 4532 // Methods for support type inquiry through isa, cast, and dyn_cast: 4533 static bool classof(const Instruction *I) { 4534 return (I->getOpcode() == Instruction::CatchRet); 4535 } 4536 static bool classof(const Value *V) { 4537 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4538 } 4539 4540 private: 4541 BasicBlock *getSuccessor(unsigned Idx) const { 4542 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4543 return getSuccessor(); 4544 } 4545 4546 void setSuccessor(unsigned Idx, BasicBlock *B) { 4547 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4548 setSuccessor(B); 4549 } 4550 }; 4551 4552 template <> 4553 struct OperandTraits<CatchReturnInst> 4554 : public FixedNumOperandTraits<CatchReturnInst, 2> {}; 4555 4556 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value) 4557 4558 //===----------------------------------------------------------------------===// 4559 // CleanupReturnInst Class 4560 //===----------------------------------------------------------------------===// 4561 4562 class CleanupReturnInst : public Instruction { 4563 using UnwindDestField = BoolBitfieldElementT<0>; 4564 4565 private: 4566 CleanupReturnInst(const CleanupReturnInst &RI); 4567 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4568 Instruction *InsertBefore = nullptr); 4569 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4570 BasicBlock *InsertAtEnd); 4571 4572 void init(Value *CleanupPad, BasicBlock *UnwindBB); 4573 4574 protected: 4575 // Note: Instruction needs to be a friend here to call cloneImpl. 4576 friend class Instruction; 4577 4578 CleanupReturnInst *cloneImpl() const; 4579 4580 public: 4581 static CleanupReturnInst *Create(Value *CleanupPad, 4582 BasicBlock *UnwindBB = nullptr, 4583 Instruction *InsertBefore = nullptr) { 4584 assert(CleanupPad); 4585 unsigned Values = 1; 4586 if (UnwindBB) 4587 ++Values; 4588 return new (Values) 4589 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); 4590 } 4591 4592 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, 4593 BasicBlock *InsertAtEnd) { 4594 assert(CleanupPad); 4595 unsigned Values = 1; 4596 if (UnwindBB) 4597 ++Values; 4598 return new (Values) 4599 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); 4600 } 4601 4602 /// Provide fast operand accessors 4603 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4604 4605 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4606 bool unwindsToCaller() const { return !hasUnwindDest(); } 4607 4608 /// Convenience accessor. 4609 CleanupPadInst *getCleanupPad() const { 4610 return cast<CleanupPadInst>(Op<0>()); 4611 } 4612 void setCleanupPad(CleanupPadInst *CleanupPad) { 4613 assert(CleanupPad); 4614 Op<0>() = CleanupPad; 4615 } 4616 4617 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } 4618 4619 BasicBlock *getUnwindDest() const { 4620 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; 4621 } 4622 void setUnwindDest(BasicBlock *NewDest) { 4623 assert(NewDest); 4624 assert(hasUnwindDest()); 4625 Op<1>() = NewDest; 4626 } 4627 4628 // Methods for support type inquiry through isa, cast, and dyn_cast: 4629 static bool classof(const Instruction *I) { 4630 return (I->getOpcode() == Instruction::CleanupRet); 4631 } 4632 static bool classof(const Value *V) { 4633 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4634 } 4635 4636 private: 4637 BasicBlock *getSuccessor(unsigned Idx) const { 4638 assert(Idx == 0); 4639 return getUnwindDest(); 4640 } 4641 4642 void setSuccessor(unsigned Idx, BasicBlock *B) { 4643 assert(Idx == 0); 4644 setUnwindDest(B); 4645 } 4646 4647 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4648 // method so that subclasses cannot accidentally use it. 4649 template <typename Bitfield> 4650 void setSubclassData(typename Bitfield::Type Value) { 4651 Instruction::setSubclassData<Bitfield>(Value); 4652 } 4653 }; 4654 4655 template <> 4656 struct OperandTraits<CleanupReturnInst> 4657 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; 4658 4659 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value) 4660 4661 //===----------------------------------------------------------------------===// 4662 // UnreachableInst Class 4663 //===----------------------------------------------------------------------===// 4664 4665 //===--------------------------------------------------------------------------- 4666 /// This function has undefined behavior. In particular, the 4667 /// presence of this instruction indicates some higher level knowledge that the 4668 /// end of the block cannot be reached. 4669 /// 4670 class UnreachableInst : public Instruction { 4671 protected: 4672 // Note: Instruction needs to be a friend here to call cloneImpl. 4673 friend class Instruction; 4674 4675 UnreachableInst *cloneImpl() const; 4676 4677 public: 4678 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); 4679 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); 4680 4681 // allocate space for exactly zero operands 4682 void *operator new(size_t s) { 4683 return User::operator new(s, 0); 4684 } 4685 4686 unsigned getNumSuccessors() const { return 0; } 4687 4688 // Methods for support type inquiry through isa, cast, and dyn_cast: 4689 static bool classof(const Instruction *I) { 4690 return I->getOpcode() == Instruction::Unreachable; 4691 } 4692 static bool classof(const Value *V) { 4693 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4694 } 4695 4696 private: 4697 BasicBlock *getSuccessor(unsigned idx) const { 4698 llvm_unreachable("UnreachableInst has no successors!"); 4699 } 4700 4701 void setSuccessor(unsigned idx, BasicBlock *B) { 4702 llvm_unreachable("UnreachableInst has no successors!"); 4703 } 4704 }; 4705 4706 //===----------------------------------------------------------------------===// 4707 // TruncInst Class 4708 //===----------------------------------------------------------------------===// 4709 4710 /// This class represents a truncation of integer types. 4711 class TruncInst : public CastInst { 4712 protected: 4713 // Note: Instruction needs to be a friend here to call cloneImpl. 4714 friend class Instruction; 4715 4716 /// Clone an identical TruncInst 4717 TruncInst *cloneImpl() const; 4718 4719 public: 4720 /// Constructor with insert-before-instruction semantics 4721 TruncInst( 4722 Value *S, ///< The value to be truncated 4723 Type *Ty, ///< The (smaller) type to truncate to 4724 const Twine &NameStr = "", ///< A name for the new instruction 4725 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4726 ); 4727 4728 /// Constructor with insert-at-end-of-block semantics 4729 TruncInst( 4730 Value *S, ///< The value to be truncated 4731 Type *Ty, ///< The (smaller) type to truncate to 4732 const Twine &NameStr, ///< A name for the new instruction 4733 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4734 ); 4735 4736 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4737 static bool classof(const Instruction *I) { 4738 return I->getOpcode() == Trunc; 4739 } 4740 static bool classof(const Value *V) { 4741 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4742 } 4743 }; 4744 4745 //===----------------------------------------------------------------------===// 4746 // ZExtInst Class 4747 //===----------------------------------------------------------------------===// 4748 4749 /// This class represents zero extension of integer types. 4750 class ZExtInst : public CastInst { 4751 protected: 4752 // Note: Instruction needs to be a friend here to call cloneImpl. 4753 friend class Instruction; 4754 4755 /// Clone an identical ZExtInst 4756 ZExtInst *cloneImpl() const; 4757 4758 public: 4759 /// Constructor with insert-before-instruction semantics 4760 ZExtInst( 4761 Value *S, ///< The value to be zero extended 4762 Type *Ty, ///< The type to zero extend to 4763 const Twine &NameStr = "", ///< A name for the new instruction 4764 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4765 ); 4766 4767 /// Constructor with insert-at-end semantics. 4768 ZExtInst( 4769 Value *S, ///< The value to be zero extended 4770 Type *Ty, ///< The type to zero extend to 4771 const Twine &NameStr, ///< A name for the new instruction 4772 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4773 ); 4774 4775 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4776 static bool classof(const Instruction *I) { 4777 return I->getOpcode() == ZExt; 4778 } 4779 static bool classof(const Value *V) { 4780 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4781 } 4782 }; 4783 4784 //===----------------------------------------------------------------------===// 4785 // SExtInst Class 4786 //===----------------------------------------------------------------------===// 4787 4788 /// This class represents a sign extension of integer types. 4789 class SExtInst : public CastInst { 4790 protected: 4791 // Note: Instruction needs to be a friend here to call cloneImpl. 4792 friend class Instruction; 4793 4794 /// Clone an identical SExtInst 4795 SExtInst *cloneImpl() const; 4796 4797 public: 4798 /// Constructor with insert-before-instruction semantics 4799 SExtInst( 4800 Value *S, ///< The value to be sign extended 4801 Type *Ty, ///< The type to sign extend to 4802 const Twine &NameStr = "", ///< A name for the new instruction 4803 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4804 ); 4805 4806 /// Constructor with insert-at-end-of-block semantics 4807 SExtInst( 4808 Value *S, ///< The value to be sign extended 4809 Type *Ty, ///< The type to sign extend to 4810 const Twine &NameStr, ///< A name for the new instruction 4811 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4812 ); 4813 4814 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4815 static bool classof(const Instruction *I) { 4816 return I->getOpcode() == SExt; 4817 } 4818 static bool classof(const Value *V) { 4819 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4820 } 4821 }; 4822 4823 //===----------------------------------------------------------------------===// 4824 // FPTruncInst Class 4825 //===----------------------------------------------------------------------===// 4826 4827 /// This class represents a truncation of floating point types. 4828 class FPTruncInst : public CastInst { 4829 protected: 4830 // Note: Instruction needs to be a friend here to call cloneImpl. 4831 friend class Instruction; 4832 4833 /// Clone an identical FPTruncInst 4834 FPTruncInst *cloneImpl() const; 4835 4836 public: 4837 /// Constructor with insert-before-instruction semantics 4838 FPTruncInst( 4839 Value *S, ///< The value to be truncated 4840 Type *Ty, ///< The type to truncate to 4841 const Twine &NameStr = "", ///< A name for the new instruction 4842 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4843 ); 4844 4845 /// Constructor with insert-before-instruction semantics 4846 FPTruncInst( 4847 Value *S, ///< The value to be truncated 4848 Type *Ty, ///< The type to truncate to 4849 const Twine &NameStr, ///< A name for the new instruction 4850 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4851 ); 4852 4853 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4854 static bool classof(const Instruction *I) { 4855 return I->getOpcode() == FPTrunc; 4856 } 4857 static bool classof(const Value *V) { 4858 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4859 } 4860 }; 4861 4862 //===----------------------------------------------------------------------===// 4863 // FPExtInst Class 4864 //===----------------------------------------------------------------------===// 4865 4866 /// This class represents an extension of floating point types. 4867 class FPExtInst : public CastInst { 4868 protected: 4869 // Note: Instruction needs to be a friend here to call cloneImpl. 4870 friend class Instruction; 4871 4872 /// Clone an identical FPExtInst 4873 FPExtInst *cloneImpl() const; 4874 4875 public: 4876 /// Constructor with insert-before-instruction semantics 4877 FPExtInst( 4878 Value *S, ///< The value to be extended 4879 Type *Ty, ///< The type to extend to 4880 const Twine &NameStr = "", ///< A name for the new instruction 4881 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4882 ); 4883 4884 /// Constructor with insert-at-end-of-block semantics 4885 FPExtInst( 4886 Value *S, ///< The value to be extended 4887 Type *Ty, ///< The type to extend to 4888 const Twine &NameStr, ///< A name for the new instruction 4889 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4890 ); 4891 4892 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4893 static bool classof(const Instruction *I) { 4894 return I->getOpcode() == FPExt; 4895 } 4896 static bool classof(const Value *V) { 4897 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4898 } 4899 }; 4900 4901 //===----------------------------------------------------------------------===// 4902 // UIToFPInst Class 4903 //===----------------------------------------------------------------------===// 4904 4905 /// This class represents a cast unsigned integer to floating point. 4906 class UIToFPInst : public CastInst { 4907 protected: 4908 // Note: Instruction needs to be a friend here to call cloneImpl. 4909 friend class Instruction; 4910 4911 /// Clone an identical UIToFPInst 4912 UIToFPInst *cloneImpl() const; 4913 4914 public: 4915 /// Constructor with insert-before-instruction semantics 4916 UIToFPInst( 4917 Value *S, ///< The value to be converted 4918 Type *Ty, ///< The type to convert to 4919 const Twine &NameStr = "", ///< A name for the new instruction 4920 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4921 ); 4922 4923 /// Constructor with insert-at-end-of-block semantics 4924 UIToFPInst( 4925 Value *S, ///< The value to be converted 4926 Type *Ty, ///< The type to convert to 4927 const Twine &NameStr, ///< A name for the new instruction 4928 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4929 ); 4930 4931 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4932 static bool classof(const Instruction *I) { 4933 return I->getOpcode() == UIToFP; 4934 } 4935 static bool classof(const Value *V) { 4936 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4937 } 4938 }; 4939 4940 //===----------------------------------------------------------------------===// 4941 // SIToFPInst Class 4942 //===----------------------------------------------------------------------===// 4943 4944 /// This class represents a cast from signed integer to floating point. 4945 class SIToFPInst : public CastInst { 4946 protected: 4947 // Note: Instruction needs to be a friend here to call cloneImpl. 4948 friend class Instruction; 4949 4950 /// Clone an identical SIToFPInst 4951 SIToFPInst *cloneImpl() const; 4952 4953 public: 4954 /// Constructor with insert-before-instruction semantics 4955 SIToFPInst( 4956 Value *S, ///< The value to be converted 4957 Type *Ty, ///< The type to convert to 4958 const Twine &NameStr = "", ///< A name for the new instruction 4959 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4960 ); 4961 4962 /// Constructor with insert-at-end-of-block semantics 4963 SIToFPInst( 4964 Value *S, ///< The value to be converted 4965 Type *Ty, ///< The type to convert to 4966 const Twine &NameStr, ///< A name for the new instruction 4967 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4968 ); 4969 4970 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4971 static bool classof(const Instruction *I) { 4972 return I->getOpcode() == SIToFP; 4973 } 4974 static bool classof(const Value *V) { 4975 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4976 } 4977 }; 4978 4979 //===----------------------------------------------------------------------===// 4980 // FPToUIInst Class 4981 //===----------------------------------------------------------------------===// 4982 4983 /// This class represents a cast from floating point to unsigned integer 4984 class FPToUIInst : public CastInst { 4985 protected: 4986 // Note: Instruction needs to be a friend here to call cloneImpl. 4987 friend class Instruction; 4988 4989 /// Clone an identical FPToUIInst 4990 FPToUIInst *cloneImpl() const; 4991 4992 public: 4993 /// Constructor with insert-before-instruction semantics 4994 FPToUIInst( 4995 Value *S, ///< The value to be converted 4996 Type *Ty, ///< The type to convert to 4997 const Twine &NameStr = "", ///< A name for the new instruction 4998 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4999 ); 5000 5001 /// Constructor with insert-at-end-of-block semantics 5002 FPToUIInst( 5003 Value *S, ///< The value to be converted 5004 Type *Ty, ///< The type to convert to 5005 const Twine &NameStr, ///< A name for the new instruction 5006 BasicBlock *InsertAtEnd ///< Where to insert the new instruction 5007 ); 5008 5009 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5010 static bool classof(const Instruction *I) { 5011 return I->getOpcode() == FPToUI; 5012 } 5013 static bool classof(const Value *V) { 5014 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5015 } 5016 }; 5017 5018 //===----------------------------------------------------------------------===// 5019 // FPToSIInst Class 5020 //===----------------------------------------------------------------------===// 5021 5022 /// This class represents a cast from floating point to signed integer. 5023 class FPToSIInst : public CastInst { 5024 protected: 5025 // Note: Instruction needs to be a friend here to call cloneImpl. 5026 friend class Instruction; 5027 5028 /// Clone an identical FPToSIInst 5029 FPToSIInst *cloneImpl() const; 5030 5031 public: 5032 /// Constructor with insert-before-instruction semantics 5033 FPToSIInst( 5034 Value *S, ///< The value to be converted 5035 Type *Ty, ///< The type to convert to 5036 const Twine &NameStr = "", ///< A name for the new instruction 5037 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5038 ); 5039 5040 /// Constructor with insert-at-end-of-block semantics 5041 FPToSIInst( 5042 Value *S, ///< The value to be converted 5043 Type *Ty, ///< The type to convert to 5044 const Twine &NameStr, ///< A name for the new instruction 5045 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5046 ); 5047 5048 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5049 static bool classof(const Instruction *I) { 5050 return I->getOpcode() == FPToSI; 5051 } 5052 static bool classof(const Value *V) { 5053 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5054 } 5055 }; 5056 5057 //===----------------------------------------------------------------------===// 5058 // IntToPtrInst Class 5059 //===----------------------------------------------------------------------===// 5060 5061 /// This class represents a cast from an integer to a pointer. 5062 class IntToPtrInst : public CastInst { 5063 public: 5064 // Note: Instruction needs to be a friend here to call cloneImpl. 5065 friend class Instruction; 5066 5067 /// Constructor with insert-before-instruction semantics 5068 IntToPtrInst( 5069 Value *S, ///< The value to be converted 5070 Type *Ty, ///< The type to convert to 5071 const Twine &NameStr = "", ///< A name for the new instruction 5072 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5073 ); 5074 5075 /// Constructor with insert-at-end-of-block semantics 5076 IntToPtrInst( 5077 Value *S, ///< The value to be converted 5078 Type *Ty, ///< The type to convert to 5079 const Twine &NameStr, ///< A name for the new instruction 5080 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5081 ); 5082 5083 /// Clone an identical IntToPtrInst. 5084 IntToPtrInst *cloneImpl() const; 5085 5086 /// Returns the address space of this instruction's pointer type. 5087 unsigned getAddressSpace() const { 5088 return getType()->getPointerAddressSpace(); 5089 } 5090 5091 // Methods for support type inquiry through isa, cast, and dyn_cast: 5092 static bool classof(const Instruction *I) { 5093 return I->getOpcode() == IntToPtr; 5094 } 5095 static bool classof(const Value *V) { 5096 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5097 } 5098 }; 5099 5100 //===----------------------------------------------------------------------===// 5101 // PtrToIntInst Class 5102 //===----------------------------------------------------------------------===// 5103 5104 /// This class represents a cast from a pointer to an integer. 5105 class PtrToIntInst : public CastInst { 5106 protected: 5107 // Note: Instruction needs to be a friend here to call cloneImpl. 5108 friend class Instruction; 5109 5110 /// Clone an identical PtrToIntInst. 5111 PtrToIntInst *cloneImpl() const; 5112 5113 public: 5114 /// Constructor with insert-before-instruction semantics 5115 PtrToIntInst( 5116 Value *S, ///< The value to be converted 5117 Type *Ty, ///< The type to convert to 5118 const Twine &NameStr = "", ///< A name for the new instruction 5119 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5120 ); 5121 5122 /// Constructor with insert-at-end-of-block semantics 5123 PtrToIntInst( 5124 Value *S, ///< The value to be converted 5125 Type *Ty, ///< The type to convert to 5126 const Twine &NameStr, ///< A name for the new instruction 5127 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5128 ); 5129 5130 /// Gets the pointer operand. 5131 Value *getPointerOperand() { return getOperand(0); } 5132 /// Gets the pointer operand. 5133 const Value *getPointerOperand() const { return getOperand(0); } 5134 /// Gets the operand index of the pointer operand. 5135 static unsigned getPointerOperandIndex() { return 0U; } 5136 5137 /// Returns the address space of the pointer operand. 5138 unsigned getPointerAddressSpace() const { 5139 return getPointerOperand()->getType()->getPointerAddressSpace(); 5140 } 5141 5142 // Methods for support type inquiry through isa, cast, and dyn_cast: 5143 static bool classof(const Instruction *I) { 5144 return I->getOpcode() == PtrToInt; 5145 } 5146 static bool classof(const Value *V) { 5147 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5148 } 5149 }; 5150 5151 //===----------------------------------------------------------------------===// 5152 // BitCastInst Class 5153 //===----------------------------------------------------------------------===// 5154 5155 /// This class represents a no-op cast from one type to another. 5156 class BitCastInst : public CastInst { 5157 protected: 5158 // Note: Instruction needs to be a friend here to call cloneImpl. 5159 friend class Instruction; 5160 5161 /// Clone an identical BitCastInst. 5162 BitCastInst *cloneImpl() const; 5163 5164 public: 5165 /// Constructor with insert-before-instruction semantics 5166 BitCastInst( 5167 Value *S, ///< The value to be casted 5168 Type *Ty, ///< The type to casted to 5169 const Twine &NameStr = "", ///< A name for the new instruction 5170 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5171 ); 5172 5173 /// Constructor with insert-at-end-of-block semantics 5174 BitCastInst( 5175 Value *S, ///< The value to be casted 5176 Type *Ty, ///< The type to casted to 5177 const Twine &NameStr, ///< A name for the new instruction 5178 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5179 ); 5180 5181 // Methods for support type inquiry through isa, cast, and dyn_cast: 5182 static bool classof(const Instruction *I) { 5183 return I->getOpcode() == BitCast; 5184 } 5185 static bool classof(const Value *V) { 5186 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5187 } 5188 }; 5189 5190 //===----------------------------------------------------------------------===// 5191 // AddrSpaceCastInst Class 5192 //===----------------------------------------------------------------------===// 5193 5194 /// This class represents a conversion between pointers from one address space 5195 /// to another. 5196 class AddrSpaceCastInst : public CastInst { 5197 protected: 5198 // Note: Instruction needs to be a friend here to call cloneImpl. 5199 friend class Instruction; 5200 5201 /// Clone an identical AddrSpaceCastInst. 5202 AddrSpaceCastInst *cloneImpl() const; 5203 5204 public: 5205 /// Constructor with insert-before-instruction semantics 5206 AddrSpaceCastInst( 5207 Value *S, ///< The value to be casted 5208 Type *Ty, ///< The type to casted to 5209 const Twine &NameStr = "", ///< A name for the new instruction 5210 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5211 ); 5212 5213 /// Constructor with insert-at-end-of-block semantics 5214 AddrSpaceCastInst( 5215 Value *S, ///< The value to be casted 5216 Type *Ty, ///< The type to casted to 5217 const Twine &NameStr, ///< A name for the new instruction 5218 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5219 ); 5220 5221 // Methods for support type inquiry through isa, cast, and dyn_cast: 5222 static bool classof(const Instruction *I) { 5223 return I->getOpcode() == AddrSpaceCast; 5224 } 5225 static bool classof(const Value *V) { 5226 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5227 } 5228 5229 /// Gets the pointer operand. 5230 Value *getPointerOperand() { 5231 return getOperand(0); 5232 } 5233 5234 /// Gets the pointer operand. 5235 const Value *getPointerOperand() const { 5236 return getOperand(0); 5237 } 5238 5239 /// Gets the operand index of the pointer operand. 5240 static unsigned getPointerOperandIndex() { 5241 return 0U; 5242 } 5243 5244 /// Returns the address space of the pointer operand. 5245 unsigned getSrcAddressSpace() const { 5246 return getPointerOperand()->getType()->getPointerAddressSpace(); 5247 } 5248 5249 /// Returns the address space of the result. 5250 unsigned getDestAddressSpace() const { 5251 return getType()->getPointerAddressSpace(); 5252 } 5253 }; 5254 5255 /// A helper function that returns the pointer operand of a load or store 5256 /// instruction. Returns nullptr if not load or store. 5257 inline const Value *getLoadStorePointerOperand(const Value *V) { 5258 if (auto *Load = dyn_cast<LoadInst>(V)) 5259 return Load->getPointerOperand(); 5260 if (auto *Store = dyn_cast<StoreInst>(V)) 5261 return Store->getPointerOperand(); 5262 return nullptr; 5263 } 5264 inline Value *getLoadStorePointerOperand(Value *V) { 5265 return const_cast<Value *>( 5266 getLoadStorePointerOperand(static_cast<const Value *>(V))); 5267 } 5268 5269 /// A helper function that returns the pointer operand of a load, store 5270 /// or GEP instruction. Returns nullptr if not load, store, or GEP. 5271 inline const Value *getPointerOperand(const Value *V) { 5272 if (auto *Ptr = getLoadStorePointerOperand(V)) 5273 return Ptr; 5274 if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) 5275 return Gep->getPointerOperand(); 5276 return nullptr; 5277 } 5278 inline Value *getPointerOperand(Value *V) { 5279 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); 5280 } 5281 5282 /// A helper function that returns the alignment of load or store instruction. 5283 inline Align getLoadStoreAlignment(Value *I) { 5284 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5285 "Expected Load or Store instruction"); 5286 if (auto *LI = dyn_cast<LoadInst>(I)) 5287 return LI->getAlign(); 5288 return cast<StoreInst>(I)->getAlign(); 5289 } 5290 5291 /// A helper function that returns the address space of the pointer operand of 5292 /// load or store instruction. 5293 inline unsigned getLoadStoreAddressSpace(Value *I) { 5294 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5295 "Expected Load or Store instruction"); 5296 if (auto *LI = dyn_cast<LoadInst>(I)) 5297 return LI->getPointerAddressSpace(); 5298 return cast<StoreInst>(I)->getPointerAddressSpace(); 5299 } 5300 5301 //===----------------------------------------------------------------------===// 5302 // FreezeInst Class 5303 //===----------------------------------------------------------------------===// 5304 5305 /// This class represents a freeze function that returns random concrete 5306 /// value if an operand is either a poison value or an undef value 5307 class FreezeInst : public UnaryInstruction { 5308 protected: 5309 // Note: Instruction needs to be a friend here to call cloneImpl. 5310 friend class Instruction; 5311 5312 /// Clone an identical FreezeInst 5313 FreezeInst *cloneImpl() const; 5314 5315 public: 5316 explicit FreezeInst(Value *S, 5317 const Twine &NameStr = "", 5318 Instruction *InsertBefore = nullptr); 5319 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); 5320 5321 // Methods for support type inquiry through isa, cast, and dyn_cast: 5322 static inline bool classof(const Instruction *I) { 5323 return I->getOpcode() == Freeze; 5324 } 5325 static inline bool classof(const Value *V) { 5326 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5327 } 5328 }; 5329 5330 } // end namespace llvm 5331 5332 #endif // LLVM_IR_INSTRUCTIONS_H 5333