1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file exposes the class definitions of all of the subclasses of the 10 // Instruction class. This is meant to be an easy way to get access to all 11 // instruction subclasses. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_IR_INSTRUCTIONS_H 16 #define LLVM_IR_INSTRUCTIONS_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/Bitfields.h" 20 #include "llvm/ADT/MapVector.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Twine.h" 25 #include "llvm/ADT/iterator.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/IR/CFG.h" 28 #include "llvm/IR/Constant.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/InstrTypes.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/OperandTraits.h" 33 #include "llvm/IR/Use.h" 34 #include "llvm/IR/User.h" 35 #include "llvm/Support/AtomicOrdering.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include <cassert> 38 #include <cstddef> 39 #include <cstdint> 40 #include <iterator> 41 42 namespace llvm { 43 44 class APFloat; 45 class APInt; 46 class BasicBlock; 47 class BlockAddress; 48 class ConstantInt; 49 class DataLayout; 50 class StringRef; 51 class Type; 52 class Value; 53 54 //===----------------------------------------------------------------------===// 55 // AllocaInst Class 56 //===----------------------------------------------------------------------===// 57 58 /// an instruction to allocate memory on the stack 59 class AllocaInst : public UnaryInstruction { 60 Type *AllocatedType; 61 62 using AlignmentField = AlignmentBitfieldElementT<0>; 63 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; 64 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; 65 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, 66 SwiftErrorField>(), 67 "Bitfields must be contiguous"); 68 69 protected: 70 // Note: Instruction needs to be a friend here to call cloneImpl. 71 friend class Instruction; 72 73 AllocaInst *cloneImpl() const; 74 75 public: 76 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 77 const Twine &Name, Instruction *InsertBefore); 78 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 79 const Twine &Name, BasicBlock *InsertAtEnd); 80 81 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 82 Instruction *InsertBefore); 83 AllocaInst(Type *Ty, unsigned AddrSpace, 84 const Twine &Name, BasicBlock *InsertAtEnd); 85 86 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 87 const Twine &Name = "", Instruction *InsertBefore = nullptr); 88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 89 const Twine &Name, BasicBlock *InsertAtEnd); 90 91 /// Return true if there is an allocation size parameter to the allocation 92 /// instruction that is not 1. 93 bool isArrayAllocation() const; 94 95 /// Get the number of elements allocated. For a simple allocation of a single 96 /// element, this will return a constant 1 value. 97 const Value *getArraySize() const { return getOperand(0); } 98 Value *getArraySize() { return getOperand(0); } 99 100 /// Overload to return most specific pointer type. 101 PointerType *getType() const { 102 return cast<PointerType>(Instruction::getType()); 103 } 104 105 /// Return the address space for the allocation. 106 unsigned getAddressSpace() const { 107 return getType()->getAddressSpace(); 108 } 109 110 /// Get allocation size in bits. Returns None if size can't be determined, 111 /// e.g. in case of a VLA. 112 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; 113 114 /// Return the type that is being allocated by the instruction. 115 Type *getAllocatedType() const { return AllocatedType; } 116 /// for use only in special circumstances that need to generically 117 /// transform a whole instruction (eg: IR linking and vectorization). 118 void setAllocatedType(Type *Ty) { AllocatedType = Ty; } 119 120 /// Return the alignment of the memory that is being allocated by the 121 /// instruction. 122 Align getAlign() const { 123 return Align(1ULL << getSubclassData<AlignmentField>()); 124 } 125 126 void setAlignment(Align Align) { 127 setSubclassData<AlignmentField>(Log2(Align)); 128 } 129 130 /// Return true if this alloca is in the entry block of the function and is a 131 /// constant size. If so, the code generator will fold it into the 132 /// prolog/epilog code, so it is basically free. 133 bool isStaticAlloca() const; 134 135 /// Return true if this alloca is used as an inalloca argument to a call. Such 136 /// allocas are never considered static even if they are in the entry block. 137 bool isUsedWithInAlloca() const { 138 return getSubclassData<UsedWithInAllocaField>(); 139 } 140 141 /// Specify whether this alloca is used to represent the arguments to a call. 142 void setUsedWithInAlloca(bool V) { 143 setSubclassData<UsedWithInAllocaField>(V); 144 } 145 146 /// Return true if this alloca is used as a swifterror argument to a call. 147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } 148 /// Specify whether this alloca is used to represent a swifterror. 149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } 150 151 // Methods for support type inquiry through isa, cast, and dyn_cast: 152 static bool classof(const Instruction *I) { 153 return (I->getOpcode() == Instruction::Alloca); 154 } 155 static bool classof(const Value *V) { 156 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 157 } 158 159 private: 160 // Shadow Instruction::setInstructionSubclassData with a private forwarding 161 // method so that subclasses cannot accidentally use it. 162 template <typename Bitfield> 163 void setSubclassData(typename Bitfield::Type Value) { 164 Instruction::setSubclassData<Bitfield>(Value); 165 } 166 }; 167 168 //===----------------------------------------------------------------------===// 169 // LoadInst Class 170 //===----------------------------------------------------------------------===// 171 172 /// An instruction for reading from memory. This uses the SubclassData field in 173 /// Value to store whether or not the load is volatile. 174 class LoadInst : public UnaryInstruction { 175 using VolatileField = BoolBitfieldElementT<0>; 176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 178 static_assert( 179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 180 "Bitfields must be contiguous"); 181 182 void AssertOK(); 183 184 protected: 185 // Note: Instruction needs to be a friend here to call cloneImpl. 186 friend class Instruction; 187 188 LoadInst *cloneImpl() const; 189 190 public: 191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, 192 Instruction *InsertBefore); 193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); 194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 195 Instruction *InsertBefore); 196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 197 BasicBlock *InsertAtEnd); 198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 199 Align Align, Instruction *InsertBefore = nullptr); 200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 201 Align Align, BasicBlock *InsertAtEnd); 202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 203 Align Align, AtomicOrdering Order, 204 SyncScope::ID SSID = SyncScope::System, 205 Instruction *InsertBefore = nullptr); 206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 207 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 208 BasicBlock *InsertAtEnd); 209 210 /// Return true if this is a load from a volatile memory location. 211 bool isVolatile() const { return getSubclassData<VolatileField>(); } 212 213 /// Specify whether this is a volatile load or not. 214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 215 216 /// Return the alignment of the access that is being performed. 217 Align getAlign() const { 218 return Align(1ULL << (getSubclassData<AlignmentField>())); 219 } 220 221 void setAlignment(Align Align) { 222 setSubclassData<AlignmentField>(Log2(Align)); 223 } 224 225 /// Returns the ordering constraint of this load instruction. 226 AtomicOrdering getOrdering() const { 227 return getSubclassData<OrderingField>(); 228 } 229 /// Sets the ordering constraint of this load instruction. May not be Release 230 /// or AcquireRelease. 231 void setOrdering(AtomicOrdering Ordering) { 232 setSubclassData<OrderingField>(Ordering); 233 } 234 235 /// Returns the synchronization scope ID of this load instruction. 236 SyncScope::ID getSyncScopeID() const { 237 return SSID; 238 } 239 240 /// Sets the synchronization scope ID of this load instruction. 241 void setSyncScopeID(SyncScope::ID SSID) { 242 this->SSID = SSID; 243 } 244 245 /// Sets the ordering constraint and the synchronization scope ID of this load 246 /// instruction. 247 void setAtomic(AtomicOrdering Ordering, 248 SyncScope::ID SSID = SyncScope::System) { 249 setOrdering(Ordering); 250 setSyncScopeID(SSID); 251 } 252 253 bool isSimple() const { return !isAtomic() && !isVolatile(); } 254 255 bool isUnordered() const { 256 return (getOrdering() == AtomicOrdering::NotAtomic || 257 getOrdering() == AtomicOrdering::Unordered) && 258 !isVolatile(); 259 } 260 261 Value *getPointerOperand() { return getOperand(0); } 262 const Value *getPointerOperand() const { return getOperand(0); } 263 static unsigned getPointerOperandIndex() { return 0U; } 264 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 265 266 /// Returns the address space of the pointer operand. 267 unsigned getPointerAddressSpace() const { 268 return getPointerOperandType()->getPointerAddressSpace(); 269 } 270 271 // Methods for support type inquiry through isa, cast, and dyn_cast: 272 static bool classof(const Instruction *I) { 273 return I->getOpcode() == Instruction::Load; 274 } 275 static bool classof(const Value *V) { 276 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 277 } 278 279 private: 280 // Shadow Instruction::setInstructionSubclassData with a private forwarding 281 // method so that subclasses cannot accidentally use it. 282 template <typename Bitfield> 283 void setSubclassData(typename Bitfield::Type Value) { 284 Instruction::setSubclassData<Bitfield>(Value); 285 } 286 287 /// The synchronization scope ID of this load instruction. Not quite enough 288 /// room in SubClassData for everything, so synchronization scope ID gets its 289 /// own field. 290 SyncScope::ID SSID; 291 }; 292 293 //===----------------------------------------------------------------------===// 294 // StoreInst Class 295 //===----------------------------------------------------------------------===// 296 297 /// An instruction for storing to memory. 298 class StoreInst : public Instruction { 299 using VolatileField = BoolBitfieldElementT<0>; 300 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 301 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 302 static_assert( 303 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 304 "Bitfields must be contiguous"); 305 306 void AssertOK(); 307 308 protected: 309 // Note: Instruction needs to be a friend here to call cloneImpl. 310 friend class Instruction; 311 312 StoreInst *cloneImpl() const; 313 314 public: 315 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); 316 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); 317 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); 318 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); 319 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 320 Instruction *InsertBefore = nullptr); 321 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 322 BasicBlock *InsertAtEnd); 323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 324 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, 325 Instruction *InsertBefore = nullptr); 326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 327 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); 328 329 // allocate space for exactly two operands 330 void *operator new(size_t S) { return User::operator new(S, 2); } 331 void operator delete(void *Ptr) { User::operator delete(Ptr); } 332 333 /// Return true if this is a store to a volatile memory location. 334 bool isVolatile() const { return getSubclassData<VolatileField>(); } 335 336 /// Specify whether this is a volatile store or not. 337 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 338 339 /// Transparently provide more efficient getOperand methods. 340 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 341 342 Align getAlign() const { 343 return Align(1ULL << (getSubclassData<AlignmentField>())); 344 } 345 346 void setAlignment(Align Align) { 347 setSubclassData<AlignmentField>(Log2(Align)); 348 } 349 350 /// Returns the ordering constraint of this store instruction. 351 AtomicOrdering getOrdering() const { 352 return getSubclassData<OrderingField>(); 353 } 354 355 /// Sets the ordering constraint of this store instruction. May not be 356 /// Acquire or AcquireRelease. 357 void setOrdering(AtomicOrdering Ordering) { 358 setSubclassData<OrderingField>(Ordering); 359 } 360 361 /// Returns the synchronization scope ID of this store instruction. 362 SyncScope::ID getSyncScopeID() const { 363 return SSID; 364 } 365 366 /// Sets the synchronization scope ID of this store instruction. 367 void setSyncScopeID(SyncScope::ID SSID) { 368 this->SSID = SSID; 369 } 370 371 /// Sets the ordering constraint and the synchronization scope ID of this 372 /// store instruction. 373 void setAtomic(AtomicOrdering Ordering, 374 SyncScope::ID SSID = SyncScope::System) { 375 setOrdering(Ordering); 376 setSyncScopeID(SSID); 377 } 378 379 bool isSimple() const { return !isAtomic() && !isVolatile(); } 380 381 bool isUnordered() const { 382 return (getOrdering() == AtomicOrdering::NotAtomic || 383 getOrdering() == AtomicOrdering::Unordered) && 384 !isVolatile(); 385 } 386 387 Value *getValueOperand() { return getOperand(0); } 388 const Value *getValueOperand() const { return getOperand(0); } 389 390 Value *getPointerOperand() { return getOperand(1); } 391 const Value *getPointerOperand() const { return getOperand(1); } 392 static unsigned getPointerOperandIndex() { return 1U; } 393 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 394 395 /// Returns the address space of the pointer operand. 396 unsigned getPointerAddressSpace() const { 397 return getPointerOperandType()->getPointerAddressSpace(); 398 } 399 400 // Methods for support type inquiry through isa, cast, and dyn_cast: 401 static bool classof(const Instruction *I) { 402 return I->getOpcode() == Instruction::Store; 403 } 404 static bool classof(const Value *V) { 405 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 406 } 407 408 private: 409 // Shadow Instruction::setInstructionSubclassData with a private forwarding 410 // method so that subclasses cannot accidentally use it. 411 template <typename Bitfield> 412 void setSubclassData(typename Bitfield::Type Value) { 413 Instruction::setSubclassData<Bitfield>(Value); 414 } 415 416 /// The synchronization scope ID of this store instruction. Not quite enough 417 /// room in SubClassData for everything, so synchronization scope ID gets its 418 /// own field. 419 SyncScope::ID SSID; 420 }; 421 422 template <> 423 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { 424 }; 425 426 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) 427 428 //===----------------------------------------------------------------------===// 429 // FenceInst Class 430 //===----------------------------------------------------------------------===// 431 432 /// An instruction for ordering other memory operations. 433 class FenceInst : public Instruction { 434 using OrderingField = AtomicOrderingBitfieldElementT<0>; 435 436 void Init(AtomicOrdering Ordering, SyncScope::ID SSID); 437 438 protected: 439 // Note: Instruction needs to be a friend here to call cloneImpl. 440 friend class Instruction; 441 442 FenceInst *cloneImpl() const; 443 444 public: 445 // Ordering may only be Acquire, Release, AcquireRelease, or 446 // SequentiallyConsistent. 447 FenceInst(LLVMContext &C, AtomicOrdering Ordering, 448 SyncScope::ID SSID = SyncScope::System, 449 Instruction *InsertBefore = nullptr); 450 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, 451 BasicBlock *InsertAtEnd); 452 453 // allocate space for exactly zero operands 454 void *operator new(size_t S) { return User::operator new(S, 0); } 455 void operator delete(void *Ptr) { User::operator delete(Ptr); } 456 457 /// Returns the ordering constraint of this fence instruction. 458 AtomicOrdering getOrdering() const { 459 return getSubclassData<OrderingField>(); 460 } 461 462 /// Sets the ordering constraint of this fence instruction. May only be 463 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. 464 void setOrdering(AtomicOrdering Ordering) { 465 setSubclassData<OrderingField>(Ordering); 466 } 467 468 /// Returns the synchronization scope ID of this fence instruction. 469 SyncScope::ID getSyncScopeID() const { 470 return SSID; 471 } 472 473 /// Sets the synchronization scope ID of this fence instruction. 474 void setSyncScopeID(SyncScope::ID SSID) { 475 this->SSID = SSID; 476 } 477 478 // Methods for support type inquiry through isa, cast, and dyn_cast: 479 static bool classof(const Instruction *I) { 480 return I->getOpcode() == Instruction::Fence; 481 } 482 static bool classof(const Value *V) { 483 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 484 } 485 486 private: 487 // Shadow Instruction::setInstructionSubclassData with a private forwarding 488 // method so that subclasses cannot accidentally use it. 489 template <typename Bitfield> 490 void setSubclassData(typename Bitfield::Type Value) { 491 Instruction::setSubclassData<Bitfield>(Value); 492 } 493 494 /// The synchronization scope ID of this fence instruction. Not quite enough 495 /// room in SubClassData for everything, so synchronization scope ID gets its 496 /// own field. 497 SyncScope::ID SSID; 498 }; 499 500 //===----------------------------------------------------------------------===// 501 // AtomicCmpXchgInst Class 502 //===----------------------------------------------------------------------===// 503 504 /// An instruction that atomically checks whether a 505 /// specified value is in a memory location, and, if it is, stores a new value 506 /// there. The value returned by this instruction is a pair containing the 507 /// original value as first element, and an i1 indicating success (true) or 508 /// failure (false) as second element. 509 /// 510 class AtomicCmpXchgInst : public Instruction { 511 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, 512 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, 513 SyncScope::ID SSID); 514 515 template <unsigned Offset> 516 using AtomicOrderingBitfieldElement = 517 typename Bitfield::Element<AtomicOrdering, Offset, 3, 518 AtomicOrdering::LAST>; 519 520 protected: 521 // Note: Instruction needs to be a friend here to call cloneImpl. 522 friend class Instruction; 523 524 AtomicCmpXchgInst *cloneImpl() const; 525 526 public: 527 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 528 AtomicOrdering SuccessOrdering, 529 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 530 Instruction *InsertBefore = nullptr); 531 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 532 AtomicOrdering SuccessOrdering, 533 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 534 BasicBlock *InsertAtEnd); 535 536 // allocate space for exactly three operands 537 void *operator new(size_t S) { return User::operator new(S, 3); } 538 void operator delete(void *Ptr) { User::operator delete(Ptr); } 539 540 using VolatileField = BoolBitfieldElementT<0>; 541 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; 542 using SuccessOrderingField = 543 AtomicOrderingBitfieldElementT<WeakField::NextBit>; 544 using FailureOrderingField = 545 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; 546 using AlignmentField = 547 AlignmentBitfieldElementT<FailureOrderingField::NextBit>; 548 static_assert( 549 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, 550 FailureOrderingField, AlignmentField>(), 551 "Bitfields must be contiguous"); 552 553 /// Return the alignment of the memory that is being allocated by the 554 /// instruction. 555 Align getAlign() const { 556 return Align(1ULL << getSubclassData<AlignmentField>()); 557 } 558 559 void setAlignment(Align Align) { 560 setSubclassData<AlignmentField>(Log2(Align)); 561 } 562 563 /// Return true if this is a cmpxchg from a volatile memory 564 /// location. 565 /// 566 bool isVolatile() const { return getSubclassData<VolatileField>(); } 567 568 /// Specify whether this is a volatile cmpxchg. 569 /// 570 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 571 572 /// Return true if this cmpxchg may spuriously fail. 573 bool isWeak() const { return getSubclassData<WeakField>(); } 574 575 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } 576 577 /// Transparently provide more efficient getOperand methods. 578 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 579 580 static bool isValidSuccessOrdering(AtomicOrdering Ordering) { 581 return Ordering != AtomicOrdering::NotAtomic && 582 Ordering != AtomicOrdering::Unordered; 583 } 584 585 static bool isValidFailureOrdering(AtomicOrdering Ordering) { 586 return Ordering != AtomicOrdering::NotAtomic && 587 Ordering != AtomicOrdering::Unordered && 588 Ordering != AtomicOrdering::AcquireRelease && 589 Ordering != AtomicOrdering::Release; 590 } 591 592 /// Returns the success ordering constraint of this cmpxchg instruction. 593 AtomicOrdering getSuccessOrdering() const { 594 return getSubclassData<SuccessOrderingField>(); 595 } 596 597 /// Sets the success ordering constraint of this cmpxchg instruction. 598 void setSuccessOrdering(AtomicOrdering Ordering) { 599 assert(isValidSuccessOrdering(Ordering) && 600 "invalid CmpXchg success ordering"); 601 setSubclassData<SuccessOrderingField>(Ordering); 602 } 603 604 /// Returns the failure ordering constraint of this cmpxchg instruction. 605 AtomicOrdering getFailureOrdering() const { 606 return getSubclassData<FailureOrderingField>(); 607 } 608 609 /// Sets the failure ordering constraint of this cmpxchg instruction. 610 void setFailureOrdering(AtomicOrdering Ordering) { 611 assert(isValidFailureOrdering(Ordering) && 612 "invalid CmpXchg failure ordering"); 613 setSubclassData<FailureOrderingField>(Ordering); 614 } 615 616 /// Returns a single ordering which is at least as strong as both the 617 /// success and failure orderings for this cmpxchg. 618 AtomicOrdering getMergedOrdering() const { 619 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) 620 return AtomicOrdering::SequentiallyConsistent; 621 if (getFailureOrdering() == AtomicOrdering::Acquire) { 622 if (getSuccessOrdering() == AtomicOrdering::Monotonic) 623 return AtomicOrdering::Acquire; 624 if (getSuccessOrdering() == AtomicOrdering::Release) 625 return AtomicOrdering::AcquireRelease; 626 } 627 return getSuccessOrdering(); 628 } 629 630 /// Returns the synchronization scope ID of this cmpxchg instruction. 631 SyncScope::ID getSyncScopeID() const { 632 return SSID; 633 } 634 635 /// Sets the synchronization scope ID of this cmpxchg instruction. 636 void setSyncScopeID(SyncScope::ID SSID) { 637 this->SSID = SSID; 638 } 639 640 Value *getPointerOperand() { return getOperand(0); } 641 const Value *getPointerOperand() const { return getOperand(0); } 642 static unsigned getPointerOperandIndex() { return 0U; } 643 644 Value *getCompareOperand() { return getOperand(1); } 645 const Value *getCompareOperand() const { return getOperand(1); } 646 647 Value *getNewValOperand() { return getOperand(2); } 648 const Value *getNewValOperand() const { return getOperand(2); } 649 650 /// Returns the address space of the pointer operand. 651 unsigned getPointerAddressSpace() const { 652 return getPointerOperand()->getType()->getPointerAddressSpace(); 653 } 654 655 /// Returns the strongest permitted ordering on failure, given the 656 /// desired ordering on success. 657 /// 658 /// If the comparison in a cmpxchg operation fails, there is no atomic store 659 /// so release semantics cannot be provided. So this function drops explicit 660 /// Release requests from the AtomicOrdering. A SequentiallyConsistent 661 /// operation would remain SequentiallyConsistent. 662 static AtomicOrdering 663 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { 664 switch (SuccessOrdering) { 665 default: 666 llvm_unreachable("invalid cmpxchg success ordering"); 667 case AtomicOrdering::Release: 668 case AtomicOrdering::Monotonic: 669 return AtomicOrdering::Monotonic; 670 case AtomicOrdering::AcquireRelease: 671 case AtomicOrdering::Acquire: 672 return AtomicOrdering::Acquire; 673 case AtomicOrdering::SequentiallyConsistent: 674 return AtomicOrdering::SequentiallyConsistent; 675 } 676 } 677 678 // Methods for support type inquiry through isa, cast, and dyn_cast: 679 static bool classof(const Instruction *I) { 680 return I->getOpcode() == Instruction::AtomicCmpXchg; 681 } 682 static bool classof(const Value *V) { 683 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 684 } 685 686 private: 687 // Shadow Instruction::setInstructionSubclassData with a private forwarding 688 // method so that subclasses cannot accidentally use it. 689 template <typename Bitfield> 690 void setSubclassData(typename Bitfield::Type Value) { 691 Instruction::setSubclassData<Bitfield>(Value); 692 } 693 694 /// The synchronization scope ID of this cmpxchg instruction. Not quite 695 /// enough room in SubClassData for everything, so synchronization scope ID 696 /// gets its own field. 697 SyncScope::ID SSID; 698 }; 699 700 template <> 701 struct OperandTraits<AtomicCmpXchgInst> : 702 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { 703 }; 704 705 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) 706 707 //===----------------------------------------------------------------------===// 708 // AtomicRMWInst Class 709 //===----------------------------------------------------------------------===// 710 711 /// an instruction that atomically reads a memory location, 712 /// combines it with another value, and then stores the result back. Returns 713 /// the old value. 714 /// 715 class AtomicRMWInst : public Instruction { 716 protected: 717 // Note: Instruction needs to be a friend here to call cloneImpl. 718 friend class Instruction; 719 720 AtomicRMWInst *cloneImpl() const; 721 722 public: 723 /// This enumeration lists the possible modifications atomicrmw can make. In 724 /// the descriptions, 'p' is the pointer to the instruction's memory location, 725 /// 'old' is the initial value of *p, and 'v' is the other value passed to the 726 /// instruction. These instructions always return 'old'. 727 enum BinOp : unsigned { 728 /// *p = v 729 Xchg, 730 /// *p = old + v 731 Add, 732 /// *p = old - v 733 Sub, 734 /// *p = old & v 735 And, 736 /// *p = ~(old & v) 737 Nand, 738 /// *p = old | v 739 Or, 740 /// *p = old ^ v 741 Xor, 742 /// *p = old >signed v ? old : v 743 Max, 744 /// *p = old <signed v ? old : v 745 Min, 746 /// *p = old >unsigned v ? old : v 747 UMax, 748 /// *p = old <unsigned v ? old : v 749 UMin, 750 751 /// *p = old + v 752 FAdd, 753 754 /// *p = old - v 755 FSub, 756 757 /// *p = maxnum(old, v) 758 /// \p maxnum matches the behavior of \p llvm.maxnum.*. 759 FMax, 760 761 /// *p = minnum(old, v) 762 /// \p minnum matches the behavior of \p llvm.minnum.*. 763 FMin, 764 765 FIRST_BINOP = Xchg, 766 LAST_BINOP = FMin, 767 BAD_BINOP 768 }; 769 770 private: 771 template <unsigned Offset> 772 using AtomicOrderingBitfieldElement = 773 typename Bitfield::Element<AtomicOrdering, Offset, 3, 774 AtomicOrdering::LAST>; 775 776 template <unsigned Offset> 777 using BinOpBitfieldElement = 778 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>; 779 780 public: 781 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 782 AtomicOrdering Ordering, SyncScope::ID SSID, 783 Instruction *InsertBefore = nullptr); 784 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 785 AtomicOrdering Ordering, SyncScope::ID SSID, 786 BasicBlock *InsertAtEnd); 787 788 // allocate space for exactly two operands 789 void *operator new(size_t S) { return User::operator new(S, 2); } 790 void operator delete(void *Ptr) { User::operator delete(Ptr); } 791 792 using VolatileField = BoolBitfieldElementT<0>; 793 using AtomicOrderingField = 794 AtomicOrderingBitfieldElementT<VolatileField::NextBit>; 795 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; 796 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; 797 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, 798 OperationField, AlignmentField>(), 799 "Bitfields must be contiguous"); 800 801 BinOp getOperation() const { return getSubclassData<OperationField>(); } 802 803 static StringRef getOperationName(BinOp Op); 804 805 static bool isFPOperation(BinOp Op) { 806 switch (Op) { 807 case AtomicRMWInst::FAdd: 808 case AtomicRMWInst::FSub: 809 case AtomicRMWInst::FMax: 810 case AtomicRMWInst::FMin: 811 return true; 812 default: 813 return false; 814 } 815 } 816 817 void setOperation(BinOp Operation) { 818 setSubclassData<OperationField>(Operation); 819 } 820 821 /// Return the alignment of the memory that is being allocated by the 822 /// instruction. 823 Align getAlign() const { 824 return Align(1ULL << getSubclassData<AlignmentField>()); 825 } 826 827 void setAlignment(Align Align) { 828 setSubclassData<AlignmentField>(Log2(Align)); 829 } 830 831 /// Return true if this is a RMW on a volatile memory location. 832 /// 833 bool isVolatile() const { return getSubclassData<VolatileField>(); } 834 835 /// Specify whether this is a volatile RMW or not. 836 /// 837 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 838 839 /// Transparently provide more efficient getOperand methods. 840 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 841 842 /// Returns the ordering constraint of this rmw instruction. 843 AtomicOrdering getOrdering() const { 844 return getSubclassData<AtomicOrderingField>(); 845 } 846 847 /// Sets the ordering constraint of this rmw instruction. 848 void setOrdering(AtomicOrdering Ordering) { 849 assert(Ordering != AtomicOrdering::NotAtomic && 850 "atomicrmw instructions can only be atomic."); 851 assert(Ordering != AtomicOrdering::Unordered && 852 "atomicrmw instructions cannot be unordered."); 853 setSubclassData<AtomicOrderingField>(Ordering); 854 } 855 856 /// Returns the synchronization scope ID of this rmw instruction. 857 SyncScope::ID getSyncScopeID() const { 858 return SSID; 859 } 860 861 /// Sets the synchronization scope ID of this rmw instruction. 862 void setSyncScopeID(SyncScope::ID SSID) { 863 this->SSID = SSID; 864 } 865 866 Value *getPointerOperand() { return getOperand(0); } 867 const Value *getPointerOperand() const { return getOperand(0); } 868 static unsigned getPointerOperandIndex() { return 0U; } 869 870 Value *getValOperand() { return getOperand(1); } 871 const Value *getValOperand() const { return getOperand(1); } 872 873 /// Returns the address space of the pointer operand. 874 unsigned getPointerAddressSpace() const { 875 return getPointerOperand()->getType()->getPointerAddressSpace(); 876 } 877 878 bool isFloatingPointOperation() const { 879 return isFPOperation(getOperation()); 880 } 881 882 // Methods for support type inquiry through isa, cast, and dyn_cast: 883 static bool classof(const Instruction *I) { 884 return I->getOpcode() == Instruction::AtomicRMW; 885 } 886 static bool classof(const Value *V) { 887 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 888 } 889 890 private: 891 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, 892 AtomicOrdering Ordering, SyncScope::ID SSID); 893 894 // Shadow Instruction::setInstructionSubclassData with a private forwarding 895 // method so that subclasses cannot accidentally use it. 896 template <typename Bitfield> 897 void setSubclassData(typename Bitfield::Type Value) { 898 Instruction::setSubclassData<Bitfield>(Value); 899 } 900 901 /// The synchronization scope ID of this rmw instruction. Not quite enough 902 /// room in SubClassData for everything, so synchronization scope ID gets its 903 /// own field. 904 SyncScope::ID SSID; 905 }; 906 907 template <> 908 struct OperandTraits<AtomicRMWInst> 909 : public FixedNumOperandTraits<AtomicRMWInst,2> { 910 }; 911 912 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) 913 914 //===----------------------------------------------------------------------===// 915 // GetElementPtrInst Class 916 //===----------------------------------------------------------------------===// 917 918 // checkGEPType - Simple wrapper function to give a better assertion failure 919 // message on bad indexes for a gep instruction. 920 // 921 inline Type *checkGEPType(Type *Ty) { 922 assert(Ty && "Invalid GetElementPtrInst indices for type!"); 923 return Ty; 924 } 925 926 /// an instruction for type-safe pointer arithmetic to 927 /// access elements of arrays and structs 928 /// 929 class GetElementPtrInst : public Instruction { 930 Type *SourceElementType; 931 Type *ResultElementType; 932 933 GetElementPtrInst(const GetElementPtrInst &GEPI); 934 935 /// Constructors - Create a getelementptr instruction with a base pointer an 936 /// list of indices. The first ctor can optionally insert before an existing 937 /// instruction, the second appends the new instruction to the specified 938 /// BasicBlock. 939 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 940 ArrayRef<Value *> IdxList, unsigned Values, 941 const Twine &NameStr, Instruction *InsertBefore); 942 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 943 ArrayRef<Value *> IdxList, unsigned Values, 944 const Twine &NameStr, BasicBlock *InsertAtEnd); 945 946 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); 947 948 protected: 949 // Note: Instruction needs to be a friend here to call cloneImpl. 950 friend class Instruction; 951 952 GetElementPtrInst *cloneImpl() const; 953 954 public: 955 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 956 ArrayRef<Value *> IdxList, 957 const Twine &NameStr = "", 958 Instruction *InsertBefore = nullptr) { 959 unsigned Values = 1 + unsigned(IdxList.size()); 960 assert(PointeeType && "Must specify element type"); 961 assert(cast<PointerType>(Ptr->getType()->getScalarType()) 962 ->isOpaqueOrPointeeTypeMatches(PointeeType)); 963 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 964 NameStr, InsertBefore); 965 } 966 967 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 968 ArrayRef<Value *> IdxList, 969 const Twine &NameStr, 970 BasicBlock *InsertAtEnd) { 971 unsigned Values = 1 + unsigned(IdxList.size()); 972 assert(PointeeType && "Must specify element type"); 973 assert(cast<PointerType>(Ptr->getType()->getScalarType()) 974 ->isOpaqueOrPointeeTypeMatches(PointeeType)); 975 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 976 NameStr, InsertAtEnd); 977 } 978 979 /// Create an "inbounds" getelementptr. See the documentation for the 980 /// "inbounds" flag in LangRef.html for details. 981 static GetElementPtrInst * 982 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, 983 const Twine &NameStr = "", 984 Instruction *InsertBefore = nullptr) { 985 GetElementPtrInst *GEP = 986 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); 987 GEP->setIsInBounds(true); 988 return GEP; 989 } 990 991 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, 992 ArrayRef<Value *> IdxList, 993 const Twine &NameStr, 994 BasicBlock *InsertAtEnd) { 995 GetElementPtrInst *GEP = 996 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); 997 GEP->setIsInBounds(true); 998 return GEP; 999 } 1000 1001 /// Transparently provide more efficient getOperand methods. 1002 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1003 1004 Type *getSourceElementType() const { return SourceElementType; } 1005 1006 void setSourceElementType(Type *Ty) { SourceElementType = Ty; } 1007 void setResultElementType(Type *Ty) { ResultElementType = Ty; } 1008 1009 Type *getResultElementType() const { 1010 assert(cast<PointerType>(getType()->getScalarType()) 1011 ->isOpaqueOrPointeeTypeMatches(ResultElementType)); 1012 return ResultElementType; 1013 } 1014 1015 /// Returns the address space of this instruction's pointer type. 1016 unsigned getAddressSpace() const { 1017 // Note that this is always the same as the pointer operand's address space 1018 // and that is cheaper to compute, so cheat here. 1019 return getPointerAddressSpace(); 1020 } 1021 1022 /// Returns the result type of a getelementptr with the given source 1023 /// element type and indexes. 1024 /// 1025 /// Null is returned if the indices are invalid for the specified 1026 /// source element type. 1027 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); 1028 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); 1029 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); 1030 1031 /// Return the type of the element at the given index of an indexable 1032 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". 1033 /// 1034 /// Returns null if the type can't be indexed, or the given index is not 1035 /// legal for the given type. 1036 static Type *getTypeAtIndex(Type *Ty, Value *Idx); 1037 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); 1038 1039 inline op_iterator idx_begin() { return op_begin()+1; } 1040 inline const_op_iterator idx_begin() const { return op_begin()+1; } 1041 inline op_iterator idx_end() { return op_end(); } 1042 inline const_op_iterator idx_end() const { return op_end(); } 1043 1044 inline iterator_range<op_iterator> indices() { 1045 return make_range(idx_begin(), idx_end()); 1046 } 1047 1048 inline iterator_range<const_op_iterator> indices() const { 1049 return make_range(idx_begin(), idx_end()); 1050 } 1051 1052 Value *getPointerOperand() { 1053 return getOperand(0); 1054 } 1055 const Value *getPointerOperand() const { 1056 return getOperand(0); 1057 } 1058 static unsigned getPointerOperandIndex() { 1059 return 0U; // get index for modifying correct operand. 1060 } 1061 1062 /// Method to return the pointer operand as a 1063 /// PointerType. 1064 Type *getPointerOperandType() const { 1065 return getPointerOperand()->getType(); 1066 } 1067 1068 /// Returns the address space of the pointer operand. 1069 unsigned getPointerAddressSpace() const { 1070 return getPointerOperandType()->getPointerAddressSpace(); 1071 } 1072 1073 /// Returns the pointer type returned by the GEP 1074 /// instruction, which may be a vector of pointers. 1075 static Type *getGEPReturnType(Type *ElTy, Value *Ptr, 1076 ArrayRef<Value *> IdxList) { 1077 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); 1078 unsigned AddrSpace = OrigPtrTy->getAddressSpace(); 1079 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList)); 1080 Type *PtrTy = OrigPtrTy->isOpaque() 1081 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace) 1082 : PointerType::get(ResultElemTy, AddrSpace); 1083 // Vector GEP 1084 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { 1085 ElementCount EltCount = PtrVTy->getElementCount(); 1086 return VectorType::get(PtrTy, EltCount); 1087 } 1088 for (Value *Index : IdxList) 1089 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { 1090 ElementCount EltCount = IndexVTy->getElementCount(); 1091 return VectorType::get(PtrTy, EltCount); 1092 } 1093 // Scalar GEP 1094 return PtrTy; 1095 } 1096 1097 unsigned getNumIndices() const { // Note: always non-negative 1098 return getNumOperands() - 1; 1099 } 1100 1101 bool hasIndices() const { 1102 return getNumOperands() > 1; 1103 } 1104 1105 /// Return true if all of the indices of this GEP are 1106 /// zeros. If so, the result pointer and the first operand have the same 1107 /// value, just potentially different types. 1108 bool hasAllZeroIndices() const; 1109 1110 /// Return true if all of the indices of this GEP are 1111 /// constant integers. If so, the result pointer and the first operand have 1112 /// a constant offset between them. 1113 bool hasAllConstantIndices() const; 1114 1115 /// Set or clear the inbounds flag on this GEP instruction. 1116 /// See LangRef.html for the meaning of inbounds on a getelementptr. 1117 void setIsInBounds(bool b = true); 1118 1119 /// Determine whether the GEP has the inbounds flag. 1120 bool isInBounds() const; 1121 1122 /// Accumulate the constant address offset of this GEP if possible. 1123 /// 1124 /// This routine accepts an APInt into which it will accumulate the constant 1125 /// offset of this GEP if the GEP is in fact constant. If the GEP is not 1126 /// all-constant, it returns false and the value of the offset APInt is 1127 /// undefined (it is *not* preserved!). The APInt passed into this routine 1128 /// must be at least as wide as the IntPtr type for the address space of 1129 /// the base GEP pointer. 1130 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; 1131 bool collectOffset(const DataLayout &DL, unsigned BitWidth, 1132 MapVector<Value *, APInt> &VariableOffsets, 1133 APInt &ConstantOffset) const; 1134 // Methods for support type inquiry through isa, cast, and dyn_cast: 1135 static bool classof(const Instruction *I) { 1136 return (I->getOpcode() == Instruction::GetElementPtr); 1137 } 1138 static bool classof(const Value *V) { 1139 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1140 } 1141 }; 1142 1143 template <> 1144 struct OperandTraits<GetElementPtrInst> : 1145 public VariadicOperandTraits<GetElementPtrInst, 1> { 1146 }; 1147 1148 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1149 ArrayRef<Value *> IdxList, unsigned Values, 1150 const Twine &NameStr, 1151 Instruction *InsertBefore) 1152 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1153 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1154 Values, InsertBefore), 1155 SourceElementType(PointeeType), 1156 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1157 assert(cast<PointerType>(getType()->getScalarType()) 1158 ->isOpaqueOrPointeeTypeMatches(ResultElementType)); 1159 init(Ptr, IdxList, NameStr); 1160 } 1161 1162 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1163 ArrayRef<Value *> IdxList, unsigned Values, 1164 const Twine &NameStr, 1165 BasicBlock *InsertAtEnd) 1166 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1167 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1168 Values, InsertAtEnd), 1169 SourceElementType(PointeeType), 1170 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1171 assert(cast<PointerType>(getType()->getScalarType()) 1172 ->isOpaqueOrPointeeTypeMatches(ResultElementType)); 1173 init(Ptr, IdxList, NameStr); 1174 } 1175 1176 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) 1177 1178 //===----------------------------------------------------------------------===// 1179 // ICmpInst Class 1180 //===----------------------------------------------------------------------===// 1181 1182 /// This instruction compares its operands according to the predicate given 1183 /// to the constructor. It only operates on integers or pointers. The operands 1184 /// must be identical types. 1185 /// Represent an integer comparison operator. 1186 class ICmpInst: public CmpInst { 1187 void AssertOK() { 1188 assert(isIntPredicate() && 1189 "Invalid ICmp predicate value"); 1190 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1191 "Both operands to ICmp instruction are not of the same type!"); 1192 // Check that the operands are the right type 1193 assert((getOperand(0)->getType()->isIntOrIntVectorTy() || 1194 getOperand(0)->getType()->isPtrOrPtrVectorTy()) && 1195 "Invalid operand types for ICmp instruction"); 1196 } 1197 1198 protected: 1199 // Note: Instruction needs to be a friend here to call cloneImpl. 1200 friend class Instruction; 1201 1202 /// Clone an identical ICmpInst 1203 ICmpInst *cloneImpl() const; 1204 1205 public: 1206 /// Constructor with insert-before-instruction semantics. 1207 ICmpInst( 1208 Instruction *InsertBefore, ///< Where to insert 1209 Predicate pred, ///< The predicate to use for the comparison 1210 Value *LHS, ///< The left-hand-side of the expression 1211 Value *RHS, ///< The right-hand-side of the expression 1212 const Twine &NameStr = "" ///< Name of the instruction 1213 ) : CmpInst(makeCmpResultType(LHS->getType()), 1214 Instruction::ICmp, pred, LHS, RHS, NameStr, 1215 InsertBefore) { 1216 #ifndef NDEBUG 1217 AssertOK(); 1218 #endif 1219 } 1220 1221 /// Constructor with insert-at-end semantics. 1222 ICmpInst( 1223 BasicBlock &InsertAtEnd, ///< Block to insert into. 1224 Predicate pred, ///< The predicate to use for the comparison 1225 Value *LHS, ///< The left-hand-side of the expression 1226 Value *RHS, ///< The right-hand-side of the expression 1227 const Twine &NameStr = "" ///< Name of the instruction 1228 ) : CmpInst(makeCmpResultType(LHS->getType()), 1229 Instruction::ICmp, pred, LHS, RHS, NameStr, 1230 &InsertAtEnd) { 1231 #ifndef NDEBUG 1232 AssertOK(); 1233 #endif 1234 } 1235 1236 /// Constructor with no-insertion semantics 1237 ICmpInst( 1238 Predicate pred, ///< The predicate to use for the comparison 1239 Value *LHS, ///< The left-hand-side of the expression 1240 Value *RHS, ///< The right-hand-side of the expression 1241 const Twine &NameStr = "" ///< Name of the instruction 1242 ) : CmpInst(makeCmpResultType(LHS->getType()), 1243 Instruction::ICmp, pred, LHS, RHS, NameStr) { 1244 #ifndef NDEBUG 1245 AssertOK(); 1246 #endif 1247 } 1248 1249 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. 1250 /// @returns the predicate that would be the result if the operand were 1251 /// regarded as signed. 1252 /// Return the signed version of the predicate 1253 Predicate getSignedPredicate() const { 1254 return getSignedPredicate(getPredicate()); 1255 } 1256 1257 /// This is a static version that you can use without an instruction. 1258 /// Return the signed version of the predicate. 1259 static Predicate getSignedPredicate(Predicate pred); 1260 1261 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. 1262 /// @returns the predicate that would be the result if the operand were 1263 /// regarded as unsigned. 1264 /// Return the unsigned version of the predicate 1265 Predicate getUnsignedPredicate() const { 1266 return getUnsignedPredicate(getPredicate()); 1267 } 1268 1269 /// This is a static version that you can use without an instruction. 1270 /// Return the unsigned version of the predicate. 1271 static Predicate getUnsignedPredicate(Predicate pred); 1272 1273 /// Return true if this predicate is either EQ or NE. This also 1274 /// tests for commutativity. 1275 static bool isEquality(Predicate P) { 1276 return P == ICMP_EQ || P == ICMP_NE; 1277 } 1278 1279 /// Return true if this predicate is either EQ or NE. This also 1280 /// tests for commutativity. 1281 bool isEquality() const { 1282 return isEquality(getPredicate()); 1283 } 1284 1285 /// @returns true if the predicate of this ICmpInst is commutative 1286 /// Determine if this relation is commutative. 1287 bool isCommutative() const { return isEquality(); } 1288 1289 /// Return true if the predicate is relational (not EQ or NE). 1290 /// 1291 bool isRelational() const { 1292 return !isEquality(); 1293 } 1294 1295 /// Return true if the predicate is relational (not EQ or NE). 1296 /// 1297 static bool isRelational(Predicate P) { 1298 return !isEquality(P); 1299 } 1300 1301 /// Return true if the predicate is SGT or UGT. 1302 /// 1303 static bool isGT(Predicate P) { 1304 return P == ICMP_SGT || P == ICMP_UGT; 1305 } 1306 1307 /// Return true if the predicate is SLT or ULT. 1308 /// 1309 static bool isLT(Predicate P) { 1310 return P == ICMP_SLT || P == ICMP_ULT; 1311 } 1312 1313 /// Return true if the predicate is SGE or UGE. 1314 /// 1315 static bool isGE(Predicate P) { 1316 return P == ICMP_SGE || P == ICMP_UGE; 1317 } 1318 1319 /// Return true if the predicate is SLE or ULE. 1320 /// 1321 static bool isLE(Predicate P) { 1322 return P == ICMP_SLE || P == ICMP_ULE; 1323 } 1324 1325 /// Returns the sequence of all ICmp predicates. 1326 /// 1327 static auto predicates() { return ICmpPredicates(); } 1328 1329 /// Exchange the two operands to this instruction in such a way that it does 1330 /// not modify the semantics of the instruction. The predicate value may be 1331 /// changed to retain the same result if the predicate is order dependent 1332 /// (e.g. ult). 1333 /// Swap operands and adjust predicate. 1334 void swapOperands() { 1335 setPredicate(getSwappedPredicate()); 1336 Op<0>().swap(Op<1>()); 1337 } 1338 1339 /// Return result of `LHS Pred RHS` comparison. 1340 static bool compare(const APInt &LHS, const APInt &RHS, 1341 ICmpInst::Predicate Pred); 1342 1343 // Methods for support type inquiry through isa, cast, and dyn_cast: 1344 static bool classof(const Instruction *I) { 1345 return I->getOpcode() == Instruction::ICmp; 1346 } 1347 static bool classof(const Value *V) { 1348 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1349 } 1350 }; 1351 1352 //===----------------------------------------------------------------------===// 1353 // FCmpInst Class 1354 //===----------------------------------------------------------------------===// 1355 1356 /// This instruction compares its operands according to the predicate given 1357 /// to the constructor. It only operates on floating point values or packed 1358 /// vectors of floating point values. The operands must be identical types. 1359 /// Represents a floating point comparison operator. 1360 class FCmpInst: public CmpInst { 1361 void AssertOK() { 1362 assert(isFPPredicate() && "Invalid FCmp predicate value"); 1363 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1364 "Both operands to FCmp instruction are not of the same type!"); 1365 // Check that the operands are the right type 1366 assert(getOperand(0)->getType()->isFPOrFPVectorTy() && 1367 "Invalid operand types for FCmp instruction"); 1368 } 1369 1370 protected: 1371 // Note: Instruction needs to be a friend here to call cloneImpl. 1372 friend class Instruction; 1373 1374 /// Clone an identical FCmpInst 1375 FCmpInst *cloneImpl() const; 1376 1377 public: 1378 /// Constructor with insert-before-instruction semantics. 1379 FCmpInst( 1380 Instruction *InsertBefore, ///< Where to insert 1381 Predicate pred, ///< The predicate to use for the comparison 1382 Value *LHS, ///< The left-hand-side of the expression 1383 Value *RHS, ///< The right-hand-side of the expression 1384 const Twine &NameStr = "" ///< Name of the instruction 1385 ) : CmpInst(makeCmpResultType(LHS->getType()), 1386 Instruction::FCmp, pred, LHS, RHS, NameStr, 1387 InsertBefore) { 1388 AssertOK(); 1389 } 1390 1391 /// Constructor with insert-at-end semantics. 1392 FCmpInst( 1393 BasicBlock &InsertAtEnd, ///< Block to insert into. 1394 Predicate pred, ///< The predicate to use for the comparison 1395 Value *LHS, ///< The left-hand-side of the expression 1396 Value *RHS, ///< The right-hand-side of the expression 1397 const Twine &NameStr = "" ///< Name of the instruction 1398 ) : CmpInst(makeCmpResultType(LHS->getType()), 1399 Instruction::FCmp, pred, LHS, RHS, NameStr, 1400 &InsertAtEnd) { 1401 AssertOK(); 1402 } 1403 1404 /// Constructor with no-insertion semantics 1405 FCmpInst( 1406 Predicate Pred, ///< The predicate to use for the comparison 1407 Value *LHS, ///< The left-hand-side of the expression 1408 Value *RHS, ///< The right-hand-side of the expression 1409 const Twine &NameStr = "", ///< Name of the instruction 1410 Instruction *FlagsSource = nullptr 1411 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, 1412 RHS, NameStr, nullptr, FlagsSource) { 1413 AssertOK(); 1414 } 1415 1416 /// @returns true if the predicate of this instruction is EQ or NE. 1417 /// Determine if this is an equality predicate. 1418 static bool isEquality(Predicate Pred) { 1419 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || 1420 Pred == FCMP_UNE; 1421 } 1422 1423 /// @returns true if the predicate of this instruction is EQ or NE. 1424 /// Determine if this is an equality predicate. 1425 bool isEquality() const { return isEquality(getPredicate()); } 1426 1427 /// @returns true if the predicate of this instruction is commutative. 1428 /// Determine if this is a commutative predicate. 1429 bool isCommutative() const { 1430 return isEquality() || 1431 getPredicate() == FCMP_FALSE || 1432 getPredicate() == FCMP_TRUE || 1433 getPredicate() == FCMP_ORD || 1434 getPredicate() == FCMP_UNO; 1435 } 1436 1437 /// @returns true if the predicate is relational (not EQ or NE). 1438 /// Determine if this a relational predicate. 1439 bool isRelational() const { return !isEquality(); } 1440 1441 /// Exchange the two operands to this instruction in such a way that it does 1442 /// not modify the semantics of the instruction. The predicate value may be 1443 /// changed to retain the same result if the predicate is order dependent 1444 /// (e.g. ult). 1445 /// Swap operands and adjust predicate. 1446 void swapOperands() { 1447 setPredicate(getSwappedPredicate()); 1448 Op<0>().swap(Op<1>()); 1449 } 1450 1451 /// Returns the sequence of all FCmp predicates. 1452 /// 1453 static auto predicates() { return FCmpPredicates(); } 1454 1455 /// Return result of `LHS Pred RHS` comparison. 1456 static bool compare(const APFloat &LHS, const APFloat &RHS, 1457 FCmpInst::Predicate Pred); 1458 1459 /// Methods for support type inquiry through isa, cast, and dyn_cast: 1460 static bool classof(const Instruction *I) { 1461 return I->getOpcode() == Instruction::FCmp; 1462 } 1463 static bool classof(const Value *V) { 1464 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1465 } 1466 }; 1467 1468 //===----------------------------------------------------------------------===// 1469 /// This class represents a function call, abstracting a target 1470 /// machine's calling convention. This class uses low bit of the SubClassData 1471 /// field to indicate whether or not this is a tail call. The rest of the bits 1472 /// hold the calling convention of the call. 1473 /// 1474 class CallInst : public CallBase { 1475 CallInst(const CallInst &CI); 1476 1477 /// Construct a CallInst given a range of arguments. 1478 /// Construct a CallInst from a range of arguments 1479 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1480 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1481 Instruction *InsertBefore); 1482 1483 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1484 const Twine &NameStr, Instruction *InsertBefore) 1485 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} 1486 1487 /// Construct a CallInst given a range of arguments. 1488 /// Construct a CallInst from a range of arguments 1489 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1490 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1491 BasicBlock *InsertAtEnd); 1492 1493 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, 1494 Instruction *InsertBefore); 1495 1496 CallInst(FunctionType *ty, Value *F, const Twine &NameStr, 1497 BasicBlock *InsertAtEnd); 1498 1499 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 1500 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 1501 void init(FunctionType *FTy, Value *Func, const Twine &NameStr); 1502 1503 /// Compute the number of operands to allocate. 1504 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 1505 // We need one operand for the called function, plus the input operand 1506 // counts provided. 1507 return 1 + NumArgs + NumBundleInputs; 1508 } 1509 1510 protected: 1511 // Note: Instruction needs to be a friend here to call cloneImpl. 1512 friend class Instruction; 1513 1514 CallInst *cloneImpl() const; 1515 1516 public: 1517 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", 1518 Instruction *InsertBefore = nullptr) { 1519 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); 1520 } 1521 1522 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1523 const Twine &NameStr, 1524 Instruction *InsertBefore = nullptr) { 1525 return new (ComputeNumOperands(Args.size())) 1526 CallInst(Ty, Func, Args, None, NameStr, InsertBefore); 1527 } 1528 1529 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1530 ArrayRef<OperandBundleDef> Bundles = None, 1531 const Twine &NameStr = "", 1532 Instruction *InsertBefore = nullptr) { 1533 const int NumOperands = 1534 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1535 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1536 1537 return new (NumOperands, DescriptorBytes) 1538 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); 1539 } 1540 1541 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, 1542 BasicBlock *InsertAtEnd) { 1543 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); 1544 } 1545 1546 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1547 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1548 return new (ComputeNumOperands(Args.size())) 1549 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); 1550 } 1551 1552 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1553 ArrayRef<OperandBundleDef> Bundles, 1554 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1555 const int NumOperands = 1556 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1557 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1558 1559 return new (NumOperands, DescriptorBytes) 1560 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); 1561 } 1562 1563 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", 1564 Instruction *InsertBefore = nullptr) { 1565 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1566 InsertBefore); 1567 } 1568 1569 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1570 ArrayRef<OperandBundleDef> Bundles = None, 1571 const Twine &NameStr = "", 1572 Instruction *InsertBefore = nullptr) { 1573 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1574 NameStr, InsertBefore); 1575 } 1576 1577 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1578 const Twine &NameStr, 1579 Instruction *InsertBefore = nullptr) { 1580 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1581 InsertBefore); 1582 } 1583 1584 static CallInst *Create(FunctionCallee Func, const Twine &NameStr, 1585 BasicBlock *InsertAtEnd) { 1586 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1587 InsertAtEnd); 1588 } 1589 1590 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1591 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1592 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1593 InsertAtEnd); 1594 } 1595 1596 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1597 ArrayRef<OperandBundleDef> Bundles, 1598 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1599 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1600 NameStr, InsertAtEnd); 1601 } 1602 1603 /// Create a clone of \p CI with a different set of operand bundles and 1604 /// insert it before \p InsertPt. 1605 /// 1606 /// The returned call instruction is identical \p CI in every way except that 1607 /// the operand bundles for the new instruction are set to the operand bundles 1608 /// in \p Bundles. 1609 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, 1610 Instruction *InsertPt = nullptr); 1611 1612 /// Generate the IR for a call to malloc: 1613 /// 1. Compute the malloc call's argument as the specified type's size, 1614 /// possibly multiplied by the array size if the array size is not 1615 /// constant 1. 1616 /// 2. Call malloc with that argument. 1617 /// 3. Bitcast the result of the malloc call to the specified type. 1618 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1619 Type *AllocTy, Value *AllocSize, 1620 Value *ArraySize = nullptr, 1621 Function *MallocF = nullptr, 1622 const Twine &Name = ""); 1623 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1624 Type *AllocTy, Value *AllocSize, 1625 Value *ArraySize = nullptr, 1626 Function *MallocF = nullptr, 1627 const Twine &Name = ""); 1628 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1629 Type *AllocTy, Value *AllocSize, 1630 Value *ArraySize = nullptr, 1631 ArrayRef<OperandBundleDef> Bundles = None, 1632 Function *MallocF = nullptr, 1633 const Twine &Name = ""); 1634 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1635 Type *AllocTy, Value *AllocSize, 1636 Value *ArraySize = nullptr, 1637 ArrayRef<OperandBundleDef> Bundles = None, 1638 Function *MallocF = nullptr, 1639 const Twine &Name = ""); 1640 /// Generate the IR for a call to the builtin free function. 1641 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); 1642 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); 1643 static Instruction *CreateFree(Value *Source, 1644 ArrayRef<OperandBundleDef> Bundles, 1645 Instruction *InsertBefore); 1646 static Instruction *CreateFree(Value *Source, 1647 ArrayRef<OperandBundleDef> Bundles, 1648 BasicBlock *InsertAtEnd); 1649 1650 // Note that 'musttail' implies 'tail'. 1651 enum TailCallKind : unsigned { 1652 TCK_None = 0, 1653 TCK_Tail = 1, 1654 TCK_MustTail = 2, 1655 TCK_NoTail = 3, 1656 TCK_LAST = TCK_NoTail 1657 }; 1658 1659 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; 1660 static_assert( 1661 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), 1662 "Bitfields must be contiguous"); 1663 1664 TailCallKind getTailCallKind() const { 1665 return getSubclassData<TailCallKindField>(); 1666 } 1667 1668 bool isTailCall() const { 1669 TailCallKind Kind = getTailCallKind(); 1670 return Kind == TCK_Tail || Kind == TCK_MustTail; 1671 } 1672 1673 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } 1674 1675 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } 1676 1677 void setTailCallKind(TailCallKind TCK) { 1678 setSubclassData<TailCallKindField>(TCK); 1679 } 1680 1681 void setTailCall(bool IsTc = true) { 1682 setTailCallKind(IsTc ? TCK_Tail : TCK_None); 1683 } 1684 1685 /// Return true if the call can return twice 1686 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } 1687 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); } 1688 1689 // Methods for support type inquiry through isa, cast, and dyn_cast: 1690 static bool classof(const Instruction *I) { 1691 return I->getOpcode() == Instruction::Call; 1692 } 1693 static bool classof(const Value *V) { 1694 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1695 } 1696 1697 /// Updates profile metadata by scaling it by \p S / \p T. 1698 void updateProfWeight(uint64_t S, uint64_t T); 1699 1700 private: 1701 // Shadow Instruction::setInstructionSubclassData with a private forwarding 1702 // method so that subclasses cannot accidentally use it. 1703 template <typename Bitfield> 1704 void setSubclassData(typename Bitfield::Type Value) { 1705 Instruction::setSubclassData<Bitfield>(Value); 1706 } 1707 }; 1708 1709 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1710 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1711 BasicBlock *InsertAtEnd) 1712 : CallBase(Ty->getReturnType(), Instruction::Call, 1713 OperandTraits<CallBase>::op_end(this) - 1714 (Args.size() + CountBundleInputs(Bundles) + 1), 1715 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1716 InsertAtEnd) { 1717 init(Ty, Func, Args, Bundles, NameStr); 1718 } 1719 1720 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1721 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1722 Instruction *InsertBefore) 1723 : CallBase(Ty->getReturnType(), Instruction::Call, 1724 OperandTraits<CallBase>::op_end(this) - 1725 (Args.size() + CountBundleInputs(Bundles) + 1), 1726 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1727 InsertBefore) { 1728 init(Ty, Func, Args, Bundles, NameStr); 1729 } 1730 1731 //===----------------------------------------------------------------------===// 1732 // SelectInst Class 1733 //===----------------------------------------------------------------------===// 1734 1735 /// This class represents the LLVM 'select' instruction. 1736 /// 1737 class SelectInst : public Instruction { 1738 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1739 Instruction *InsertBefore) 1740 : Instruction(S1->getType(), Instruction::Select, 1741 &Op<0>(), 3, InsertBefore) { 1742 init(C, S1, S2); 1743 setName(NameStr); 1744 } 1745 1746 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1747 BasicBlock *InsertAtEnd) 1748 : Instruction(S1->getType(), Instruction::Select, 1749 &Op<0>(), 3, InsertAtEnd) { 1750 init(C, S1, S2); 1751 setName(NameStr); 1752 } 1753 1754 void init(Value *C, Value *S1, Value *S2) { 1755 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); 1756 Op<0>() = C; 1757 Op<1>() = S1; 1758 Op<2>() = S2; 1759 } 1760 1761 protected: 1762 // Note: Instruction needs to be a friend here to call cloneImpl. 1763 friend class Instruction; 1764 1765 SelectInst *cloneImpl() const; 1766 1767 public: 1768 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1769 const Twine &NameStr = "", 1770 Instruction *InsertBefore = nullptr, 1771 Instruction *MDFrom = nullptr) { 1772 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); 1773 if (MDFrom) 1774 Sel->copyMetadata(*MDFrom); 1775 return Sel; 1776 } 1777 1778 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1779 const Twine &NameStr, 1780 BasicBlock *InsertAtEnd) { 1781 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); 1782 } 1783 1784 const Value *getCondition() const { return Op<0>(); } 1785 const Value *getTrueValue() const { return Op<1>(); } 1786 const Value *getFalseValue() const { return Op<2>(); } 1787 Value *getCondition() { return Op<0>(); } 1788 Value *getTrueValue() { return Op<1>(); } 1789 Value *getFalseValue() { return Op<2>(); } 1790 1791 void setCondition(Value *V) { Op<0>() = V; } 1792 void setTrueValue(Value *V) { Op<1>() = V; } 1793 void setFalseValue(Value *V) { Op<2>() = V; } 1794 1795 /// Swap the true and false values of the select instruction. 1796 /// This doesn't swap prof metadata. 1797 void swapValues() { Op<1>().swap(Op<2>()); } 1798 1799 /// Return a string if the specified operands are invalid 1800 /// for a select operation, otherwise return null. 1801 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); 1802 1803 /// Transparently provide more efficient getOperand methods. 1804 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1805 1806 OtherOps getOpcode() const { 1807 return static_cast<OtherOps>(Instruction::getOpcode()); 1808 } 1809 1810 // Methods for support type inquiry through isa, cast, and dyn_cast: 1811 static bool classof(const Instruction *I) { 1812 return I->getOpcode() == Instruction::Select; 1813 } 1814 static bool classof(const Value *V) { 1815 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1816 } 1817 }; 1818 1819 template <> 1820 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { 1821 }; 1822 1823 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) 1824 1825 //===----------------------------------------------------------------------===// 1826 // VAArgInst Class 1827 //===----------------------------------------------------------------------===// 1828 1829 /// This class represents the va_arg llvm instruction, which returns 1830 /// an argument of the specified type given a va_list and increments that list 1831 /// 1832 class VAArgInst : public UnaryInstruction { 1833 protected: 1834 // Note: Instruction needs to be a friend here to call cloneImpl. 1835 friend class Instruction; 1836 1837 VAArgInst *cloneImpl() const; 1838 1839 public: 1840 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", 1841 Instruction *InsertBefore = nullptr) 1842 : UnaryInstruction(Ty, VAArg, List, InsertBefore) { 1843 setName(NameStr); 1844 } 1845 1846 VAArgInst(Value *List, Type *Ty, const Twine &NameStr, 1847 BasicBlock *InsertAtEnd) 1848 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { 1849 setName(NameStr); 1850 } 1851 1852 Value *getPointerOperand() { return getOperand(0); } 1853 const Value *getPointerOperand() const { return getOperand(0); } 1854 static unsigned getPointerOperandIndex() { return 0U; } 1855 1856 // Methods for support type inquiry through isa, cast, and dyn_cast: 1857 static bool classof(const Instruction *I) { 1858 return I->getOpcode() == VAArg; 1859 } 1860 static bool classof(const Value *V) { 1861 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1862 } 1863 }; 1864 1865 //===----------------------------------------------------------------------===// 1866 // ExtractElementInst Class 1867 //===----------------------------------------------------------------------===// 1868 1869 /// This instruction extracts a single (scalar) 1870 /// element from a VectorType value 1871 /// 1872 class ExtractElementInst : public Instruction { 1873 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", 1874 Instruction *InsertBefore = nullptr); 1875 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, 1876 BasicBlock *InsertAtEnd); 1877 1878 protected: 1879 // Note: Instruction needs to be a friend here to call cloneImpl. 1880 friend class Instruction; 1881 1882 ExtractElementInst *cloneImpl() const; 1883 1884 public: 1885 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1886 const Twine &NameStr = "", 1887 Instruction *InsertBefore = nullptr) { 1888 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); 1889 } 1890 1891 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1892 const Twine &NameStr, 1893 BasicBlock *InsertAtEnd) { 1894 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); 1895 } 1896 1897 /// Return true if an extractelement instruction can be 1898 /// formed with the specified operands. 1899 static bool isValidOperands(const Value *Vec, const Value *Idx); 1900 1901 Value *getVectorOperand() { return Op<0>(); } 1902 Value *getIndexOperand() { return Op<1>(); } 1903 const Value *getVectorOperand() const { return Op<0>(); } 1904 const Value *getIndexOperand() const { return Op<1>(); } 1905 1906 VectorType *getVectorOperandType() const { 1907 return cast<VectorType>(getVectorOperand()->getType()); 1908 } 1909 1910 /// Transparently provide more efficient getOperand methods. 1911 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1912 1913 // Methods for support type inquiry through isa, cast, and dyn_cast: 1914 static bool classof(const Instruction *I) { 1915 return I->getOpcode() == Instruction::ExtractElement; 1916 } 1917 static bool classof(const Value *V) { 1918 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1919 } 1920 }; 1921 1922 template <> 1923 struct OperandTraits<ExtractElementInst> : 1924 public FixedNumOperandTraits<ExtractElementInst, 2> { 1925 }; 1926 1927 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) 1928 1929 //===----------------------------------------------------------------------===// 1930 // InsertElementInst Class 1931 //===----------------------------------------------------------------------===// 1932 1933 /// This instruction inserts a single (scalar) 1934 /// element into a VectorType value 1935 /// 1936 class InsertElementInst : public Instruction { 1937 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, 1938 const Twine &NameStr = "", 1939 Instruction *InsertBefore = nullptr); 1940 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, 1941 BasicBlock *InsertAtEnd); 1942 1943 protected: 1944 // Note: Instruction needs to be a friend here to call cloneImpl. 1945 friend class Instruction; 1946 1947 InsertElementInst *cloneImpl() const; 1948 1949 public: 1950 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1951 const Twine &NameStr = "", 1952 Instruction *InsertBefore = nullptr) { 1953 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); 1954 } 1955 1956 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1957 const Twine &NameStr, 1958 BasicBlock *InsertAtEnd) { 1959 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); 1960 } 1961 1962 /// Return true if an insertelement instruction can be 1963 /// formed with the specified operands. 1964 static bool isValidOperands(const Value *Vec, const Value *NewElt, 1965 const Value *Idx); 1966 1967 /// Overload to return most specific vector type. 1968 /// 1969 VectorType *getType() const { 1970 return cast<VectorType>(Instruction::getType()); 1971 } 1972 1973 /// Transparently provide more efficient getOperand methods. 1974 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1975 1976 // Methods for support type inquiry through isa, cast, and dyn_cast: 1977 static bool classof(const Instruction *I) { 1978 return I->getOpcode() == Instruction::InsertElement; 1979 } 1980 static bool classof(const Value *V) { 1981 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1982 } 1983 }; 1984 1985 template <> 1986 struct OperandTraits<InsertElementInst> : 1987 public FixedNumOperandTraits<InsertElementInst, 3> { 1988 }; 1989 1990 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) 1991 1992 //===----------------------------------------------------------------------===// 1993 // ShuffleVectorInst Class 1994 //===----------------------------------------------------------------------===// 1995 1996 constexpr int UndefMaskElem = -1; 1997 1998 /// This instruction constructs a fixed permutation of two 1999 /// input vectors. 2000 /// 2001 /// For each element of the result vector, the shuffle mask selects an element 2002 /// from one of the input vectors to copy to the result. Non-negative elements 2003 /// in the mask represent an index into the concatenated pair of input vectors. 2004 /// UndefMaskElem (-1) specifies that the result element is undefined. 2005 /// 2006 /// For scalable vectors, all the elements of the mask must be 0 or -1. This 2007 /// requirement may be relaxed in the future. 2008 class ShuffleVectorInst : public Instruction { 2009 SmallVector<int, 4> ShuffleMask; 2010 Constant *ShuffleMaskForBitcode; 2011 2012 protected: 2013 // Note: Instruction needs to be a friend here to call cloneImpl. 2014 friend class Instruction; 2015 2016 ShuffleVectorInst *cloneImpl() const; 2017 2018 public: 2019 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "", 2020 Instruction *InsertBefore = nullptr); 2021 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, 2022 BasicBlock *InsertAtEnd); 2023 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "", 2024 Instruction *InsertBefore = nullptr); 2025 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr, 2026 BasicBlock *InsertAtEnd); 2027 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2028 const Twine &NameStr = "", 2029 Instruction *InsertBefor = nullptr); 2030 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2031 const Twine &NameStr, BasicBlock *InsertAtEnd); 2032 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2033 const Twine &NameStr = "", 2034 Instruction *InsertBefor = nullptr); 2035 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2036 const Twine &NameStr, BasicBlock *InsertAtEnd); 2037 2038 void *operator new(size_t S) { return User::operator new(S, 2); } 2039 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 2040 2041 /// Swap the operands and adjust the mask to preserve the semantics 2042 /// of the instruction. 2043 void commute(); 2044 2045 /// Return true if a shufflevector instruction can be 2046 /// formed with the specified operands. 2047 static bool isValidOperands(const Value *V1, const Value *V2, 2048 const Value *Mask); 2049 static bool isValidOperands(const Value *V1, const Value *V2, 2050 ArrayRef<int> Mask); 2051 2052 /// Overload to return most specific vector type. 2053 /// 2054 VectorType *getType() const { 2055 return cast<VectorType>(Instruction::getType()); 2056 } 2057 2058 /// Transparently provide more efficient getOperand methods. 2059 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2060 2061 /// Return the shuffle mask value of this instruction for the given element 2062 /// index. Return UndefMaskElem if the element is undef. 2063 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } 2064 2065 /// Convert the input shuffle mask operand to a vector of integers. Undefined 2066 /// elements of the mask are returned as UndefMaskElem. 2067 static void getShuffleMask(const Constant *Mask, 2068 SmallVectorImpl<int> &Result); 2069 2070 /// Return the mask for this instruction as a vector of integers. Undefined 2071 /// elements of the mask are returned as UndefMaskElem. 2072 void getShuffleMask(SmallVectorImpl<int> &Result) const { 2073 Result.assign(ShuffleMask.begin(), ShuffleMask.end()); 2074 } 2075 2076 /// Return the mask for this instruction, for use in bitcode. 2077 /// 2078 /// TODO: This is temporary until we decide a new bitcode encoding for 2079 /// shufflevector. 2080 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } 2081 2082 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, 2083 Type *ResultTy); 2084 2085 void setShuffleMask(ArrayRef<int> Mask); 2086 2087 ArrayRef<int> getShuffleMask() const { return ShuffleMask; } 2088 2089 /// Return true if this shuffle returns a vector with a different number of 2090 /// elements than its source vectors. 2091 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> 2092 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> 2093 bool changesLength() const { 2094 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2095 ->getElementCount() 2096 .getKnownMinValue(); 2097 unsigned NumMaskElts = ShuffleMask.size(); 2098 return NumSourceElts != NumMaskElts; 2099 } 2100 2101 /// Return true if this shuffle returns a vector with a greater number of 2102 /// elements than its source vectors. 2103 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> 2104 bool increasesLength() const { 2105 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2106 ->getElementCount() 2107 .getKnownMinValue(); 2108 unsigned NumMaskElts = ShuffleMask.size(); 2109 return NumSourceElts < NumMaskElts; 2110 } 2111 2112 /// Return true if this shuffle mask chooses elements from exactly one source 2113 /// vector. 2114 /// Example: <7,5,undef,7> 2115 /// This assumes that vector operands are the same length as the mask. 2116 static bool isSingleSourceMask(ArrayRef<int> Mask); 2117 static bool isSingleSourceMask(const Constant *Mask) { 2118 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2119 SmallVector<int, 16> MaskAsInts; 2120 getShuffleMask(Mask, MaskAsInts); 2121 return isSingleSourceMask(MaskAsInts); 2122 } 2123 2124 /// Return true if this shuffle chooses elements from exactly one source 2125 /// vector without changing the length of that vector. 2126 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> 2127 /// TODO: Optionally allow length-changing shuffles. 2128 bool isSingleSource() const { 2129 return !changesLength() && isSingleSourceMask(ShuffleMask); 2130 } 2131 2132 /// Return true if this shuffle mask chooses elements from exactly one source 2133 /// vector without lane crossings. A shuffle using this mask is not 2134 /// necessarily a no-op because it may change the number of elements from its 2135 /// input vectors or it may provide demanded bits knowledge via undef lanes. 2136 /// Example: <undef,undef,2,3> 2137 static bool isIdentityMask(ArrayRef<int> Mask); 2138 static bool isIdentityMask(const Constant *Mask) { 2139 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2140 2141 // Not possible to express a shuffle mask for a scalable vector for this 2142 // case. 2143 if (isa<ScalableVectorType>(Mask->getType())) 2144 return false; 2145 2146 SmallVector<int, 16> MaskAsInts; 2147 getShuffleMask(Mask, MaskAsInts); 2148 return isIdentityMask(MaskAsInts); 2149 } 2150 2151 /// Return true if this shuffle chooses elements from exactly one source 2152 /// vector without lane crossings and does not change the number of elements 2153 /// from its input vectors. 2154 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> 2155 bool isIdentity() const { 2156 // Not possible to express a shuffle mask for a scalable vector for this 2157 // case. 2158 if (isa<ScalableVectorType>(getType())) 2159 return false; 2160 2161 return !changesLength() && isIdentityMask(ShuffleMask); 2162 } 2163 2164 /// Return true if this shuffle lengthens exactly one source vector with 2165 /// undefs in the high elements. 2166 bool isIdentityWithPadding() const; 2167 2168 /// Return true if this shuffle extracts the first N elements of exactly one 2169 /// source vector. 2170 bool isIdentityWithExtract() const; 2171 2172 /// Return true if this shuffle concatenates its 2 source vectors. This 2173 /// returns false if either input is undefined. In that case, the shuffle is 2174 /// is better classified as an identity with padding operation. 2175 bool isConcat() const; 2176 2177 /// Return true if this shuffle mask chooses elements from its source vectors 2178 /// without lane crossings. A shuffle using this mask would be 2179 /// equivalent to a vector select with a constant condition operand. 2180 /// Example: <4,1,6,undef> 2181 /// This returns false if the mask does not choose from both input vectors. 2182 /// In that case, the shuffle is better classified as an identity shuffle. 2183 /// This assumes that vector operands are the same length as the mask 2184 /// (a length-changing shuffle can never be equivalent to a vector select). 2185 static bool isSelectMask(ArrayRef<int> Mask); 2186 static bool isSelectMask(const Constant *Mask) { 2187 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2188 SmallVector<int, 16> MaskAsInts; 2189 getShuffleMask(Mask, MaskAsInts); 2190 return isSelectMask(MaskAsInts); 2191 } 2192 2193 /// Return true if this shuffle chooses elements from its source vectors 2194 /// without lane crossings and all operands have the same number of elements. 2195 /// In other words, this shuffle is equivalent to a vector select with a 2196 /// constant condition operand. 2197 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> 2198 /// This returns false if the mask does not choose from both input vectors. 2199 /// In that case, the shuffle is better classified as an identity shuffle. 2200 /// TODO: Optionally allow length-changing shuffles. 2201 bool isSelect() const { 2202 return !changesLength() && isSelectMask(ShuffleMask); 2203 } 2204 2205 /// Return true if this shuffle mask swaps the order of elements from exactly 2206 /// one source vector. 2207 /// Example: <7,6,undef,4> 2208 /// This assumes that vector operands are the same length as the mask. 2209 static bool isReverseMask(ArrayRef<int> Mask); 2210 static bool isReverseMask(const Constant *Mask) { 2211 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2212 SmallVector<int, 16> MaskAsInts; 2213 getShuffleMask(Mask, MaskAsInts); 2214 return isReverseMask(MaskAsInts); 2215 } 2216 2217 /// Return true if this shuffle swaps the order of elements from exactly 2218 /// one source vector. 2219 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> 2220 /// TODO: Optionally allow length-changing shuffles. 2221 bool isReverse() const { 2222 return !changesLength() && isReverseMask(ShuffleMask); 2223 } 2224 2225 /// Return true if this shuffle mask chooses all elements with the same value 2226 /// as the first element of exactly one source vector. 2227 /// Example: <4,undef,undef,4> 2228 /// This assumes that vector operands are the same length as the mask. 2229 static bool isZeroEltSplatMask(ArrayRef<int> Mask); 2230 static bool isZeroEltSplatMask(const Constant *Mask) { 2231 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2232 SmallVector<int, 16> MaskAsInts; 2233 getShuffleMask(Mask, MaskAsInts); 2234 return isZeroEltSplatMask(MaskAsInts); 2235 } 2236 2237 /// Return true if all elements of this shuffle are the same value as the 2238 /// first element of exactly one source vector without changing the length 2239 /// of that vector. 2240 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> 2241 /// TODO: Optionally allow length-changing shuffles. 2242 /// TODO: Optionally allow splats from other elements. 2243 bool isZeroEltSplat() const { 2244 return !changesLength() && isZeroEltSplatMask(ShuffleMask); 2245 } 2246 2247 /// Return true if this shuffle mask is a transpose mask. 2248 /// Transpose vector masks transpose a 2xn matrix. They read corresponding 2249 /// even- or odd-numbered vector elements from two n-dimensional source 2250 /// vectors and write each result into consecutive elements of an 2251 /// n-dimensional destination vector. Two shuffles are necessary to complete 2252 /// the transpose, one for the even elements and another for the odd elements. 2253 /// This description closely follows how the TRN1 and TRN2 AArch64 2254 /// instructions operate. 2255 /// 2256 /// For example, a simple 2x2 matrix can be transposed with: 2257 /// 2258 /// ; Original matrix 2259 /// m0 = < a, b > 2260 /// m1 = < c, d > 2261 /// 2262 /// ; Transposed matrix 2263 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > 2264 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > 2265 /// 2266 /// For matrices having greater than n columns, the resulting nx2 transposed 2267 /// matrix is stored in two result vectors such that one vector contains 2268 /// interleaved elements from all the even-numbered rows and the other vector 2269 /// contains interleaved elements from all the odd-numbered rows. For example, 2270 /// a 2x4 matrix can be transposed with: 2271 /// 2272 /// ; Original matrix 2273 /// m0 = < a, b, c, d > 2274 /// m1 = < e, f, g, h > 2275 /// 2276 /// ; Transposed matrix 2277 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > 2278 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > 2279 static bool isTransposeMask(ArrayRef<int> Mask); 2280 static bool isTransposeMask(const Constant *Mask) { 2281 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2282 SmallVector<int, 16> MaskAsInts; 2283 getShuffleMask(Mask, MaskAsInts); 2284 return isTransposeMask(MaskAsInts); 2285 } 2286 2287 /// Return true if this shuffle transposes the elements of its inputs without 2288 /// changing the length of the vectors. This operation may also be known as a 2289 /// merge or interleave. See the description for isTransposeMask() for the 2290 /// exact specification. 2291 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> 2292 bool isTranspose() const { 2293 return !changesLength() && isTransposeMask(ShuffleMask); 2294 } 2295 2296 /// Return true if this shuffle mask is an extract subvector mask. 2297 /// A valid extract subvector mask returns a smaller vector from a single 2298 /// source operand. The base extraction index is returned as well. 2299 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2300 int &Index); 2301 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, 2302 int &Index) { 2303 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2304 // Not possible to express a shuffle mask for a scalable vector for this 2305 // case. 2306 if (isa<ScalableVectorType>(Mask->getType())) 2307 return false; 2308 SmallVector<int, 16> MaskAsInts; 2309 getShuffleMask(Mask, MaskAsInts); 2310 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); 2311 } 2312 2313 /// Return true if this shuffle mask is an extract subvector mask. 2314 bool isExtractSubvectorMask(int &Index) const { 2315 // Not possible to express a shuffle mask for a scalable vector for this 2316 // case. 2317 if (isa<ScalableVectorType>(getType())) 2318 return false; 2319 2320 int NumSrcElts = 2321 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2322 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); 2323 } 2324 2325 /// Return true if this shuffle mask is an insert subvector mask. 2326 /// A valid insert subvector mask inserts the lowest elements of a second 2327 /// source operand into an in-place first source operand operand. 2328 /// Both the sub vector width and the insertion index is returned. 2329 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2330 int &NumSubElts, int &Index); 2331 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, 2332 int &NumSubElts, int &Index) { 2333 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2334 // Not possible to express a shuffle mask for a scalable vector for this 2335 // case. 2336 if (isa<ScalableVectorType>(Mask->getType())) 2337 return false; 2338 SmallVector<int, 16> MaskAsInts; 2339 getShuffleMask(Mask, MaskAsInts); 2340 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index); 2341 } 2342 2343 /// Return true if this shuffle mask is an insert subvector mask. 2344 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const { 2345 // Not possible to express a shuffle mask for a scalable vector for this 2346 // case. 2347 if (isa<ScalableVectorType>(getType())) 2348 return false; 2349 2350 int NumSrcElts = 2351 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2352 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index); 2353 } 2354 2355 /// Return true if this shuffle mask replicates each of the \p VF elements 2356 /// in a vector \p ReplicationFactor times. 2357 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is: 2358 /// <0,0,0,1,1,1,2,2,2,3,3,3> 2359 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor, 2360 int &VF); 2361 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, 2362 int &VF) { 2363 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2364 // Not possible to express a shuffle mask for a scalable vector for this 2365 // case. 2366 if (isa<ScalableVectorType>(Mask->getType())) 2367 return false; 2368 SmallVector<int, 16> MaskAsInts; 2369 getShuffleMask(Mask, MaskAsInts); 2370 return isReplicationMask(MaskAsInts, ReplicationFactor, VF); 2371 } 2372 2373 /// Return true if this shuffle mask is a replication mask. 2374 bool isReplicationMask(int &ReplicationFactor, int &VF) const; 2375 2376 /// Change values in a shuffle permute mask assuming the two vector operands 2377 /// of length InVecNumElts have swapped position. 2378 static void commuteShuffleMask(MutableArrayRef<int> Mask, 2379 unsigned InVecNumElts) { 2380 for (int &Idx : Mask) { 2381 if (Idx == -1) 2382 continue; 2383 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; 2384 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 && 2385 "shufflevector mask index out of range"); 2386 } 2387 } 2388 2389 // Methods for support type inquiry through isa, cast, and dyn_cast: 2390 static bool classof(const Instruction *I) { 2391 return I->getOpcode() == Instruction::ShuffleVector; 2392 } 2393 static bool classof(const Value *V) { 2394 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2395 } 2396 }; 2397 2398 template <> 2399 struct OperandTraits<ShuffleVectorInst> 2400 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; 2401 2402 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) 2403 2404 //===----------------------------------------------------------------------===// 2405 // ExtractValueInst Class 2406 //===----------------------------------------------------------------------===// 2407 2408 /// This instruction extracts a struct member or array 2409 /// element value from an aggregate value. 2410 /// 2411 class ExtractValueInst : public UnaryInstruction { 2412 SmallVector<unsigned, 4> Indices; 2413 2414 ExtractValueInst(const ExtractValueInst &EVI); 2415 2416 /// Constructors - Create a extractvalue instruction with a base aggregate 2417 /// value and a list of indices. The first ctor can optionally insert before 2418 /// an existing instruction, the second appends the new instruction to the 2419 /// specified BasicBlock. 2420 inline ExtractValueInst(Value *Agg, 2421 ArrayRef<unsigned> Idxs, 2422 const Twine &NameStr, 2423 Instruction *InsertBefore); 2424 inline ExtractValueInst(Value *Agg, 2425 ArrayRef<unsigned> Idxs, 2426 const Twine &NameStr, BasicBlock *InsertAtEnd); 2427 2428 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); 2429 2430 protected: 2431 // Note: Instruction needs to be a friend here to call cloneImpl. 2432 friend class Instruction; 2433 2434 ExtractValueInst *cloneImpl() const; 2435 2436 public: 2437 static ExtractValueInst *Create(Value *Agg, 2438 ArrayRef<unsigned> Idxs, 2439 const Twine &NameStr = "", 2440 Instruction *InsertBefore = nullptr) { 2441 return new 2442 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); 2443 } 2444 2445 static ExtractValueInst *Create(Value *Agg, 2446 ArrayRef<unsigned> Idxs, 2447 const Twine &NameStr, 2448 BasicBlock *InsertAtEnd) { 2449 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); 2450 } 2451 2452 /// Returns the type of the element that would be extracted 2453 /// with an extractvalue instruction with the specified parameters. 2454 /// 2455 /// Null is returned if the indices are invalid for the specified type. 2456 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); 2457 2458 using idx_iterator = const unsigned*; 2459 2460 inline idx_iterator idx_begin() const { return Indices.begin(); } 2461 inline idx_iterator idx_end() const { return Indices.end(); } 2462 inline iterator_range<idx_iterator> indices() const { 2463 return make_range(idx_begin(), idx_end()); 2464 } 2465 2466 Value *getAggregateOperand() { 2467 return getOperand(0); 2468 } 2469 const Value *getAggregateOperand() const { 2470 return getOperand(0); 2471 } 2472 static unsigned getAggregateOperandIndex() { 2473 return 0U; // get index for modifying correct operand 2474 } 2475 2476 ArrayRef<unsigned> getIndices() const { 2477 return Indices; 2478 } 2479 2480 unsigned getNumIndices() const { 2481 return (unsigned)Indices.size(); 2482 } 2483 2484 bool hasIndices() const { 2485 return true; 2486 } 2487 2488 // Methods for support type inquiry through isa, cast, and dyn_cast: 2489 static bool classof(const Instruction *I) { 2490 return I->getOpcode() == Instruction::ExtractValue; 2491 } 2492 static bool classof(const Value *V) { 2493 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2494 } 2495 }; 2496 2497 ExtractValueInst::ExtractValueInst(Value *Agg, 2498 ArrayRef<unsigned> Idxs, 2499 const Twine &NameStr, 2500 Instruction *InsertBefore) 2501 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2502 ExtractValue, Agg, InsertBefore) { 2503 init(Idxs, NameStr); 2504 } 2505 2506 ExtractValueInst::ExtractValueInst(Value *Agg, 2507 ArrayRef<unsigned> Idxs, 2508 const Twine &NameStr, 2509 BasicBlock *InsertAtEnd) 2510 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2511 ExtractValue, Agg, InsertAtEnd) { 2512 init(Idxs, NameStr); 2513 } 2514 2515 //===----------------------------------------------------------------------===// 2516 // InsertValueInst Class 2517 //===----------------------------------------------------------------------===// 2518 2519 /// This instruction inserts a struct field of array element 2520 /// value into an aggregate value. 2521 /// 2522 class InsertValueInst : public Instruction { 2523 SmallVector<unsigned, 4> Indices; 2524 2525 InsertValueInst(const InsertValueInst &IVI); 2526 2527 /// Constructors - Create a insertvalue instruction with a base aggregate 2528 /// value, a value to insert, and a list of indices. The first ctor can 2529 /// optionally insert before an existing instruction, the second appends 2530 /// the new instruction to the specified BasicBlock. 2531 inline InsertValueInst(Value *Agg, Value *Val, 2532 ArrayRef<unsigned> Idxs, 2533 const Twine &NameStr, 2534 Instruction *InsertBefore); 2535 inline InsertValueInst(Value *Agg, Value *Val, 2536 ArrayRef<unsigned> Idxs, 2537 const Twine &NameStr, BasicBlock *InsertAtEnd); 2538 2539 /// Constructors - These two constructors are convenience methods because one 2540 /// and two index insertvalue instructions are so common. 2541 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, 2542 const Twine &NameStr = "", 2543 Instruction *InsertBefore = nullptr); 2544 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, 2545 BasicBlock *InsertAtEnd); 2546 2547 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2548 const Twine &NameStr); 2549 2550 protected: 2551 // Note: Instruction needs to be a friend here to call cloneImpl. 2552 friend class Instruction; 2553 2554 InsertValueInst *cloneImpl() const; 2555 2556 public: 2557 // allocate space for exactly two operands 2558 void *operator new(size_t S) { return User::operator new(S, 2); } 2559 void operator delete(void *Ptr) { User::operator delete(Ptr); } 2560 2561 static InsertValueInst *Create(Value *Agg, Value *Val, 2562 ArrayRef<unsigned> Idxs, 2563 const Twine &NameStr = "", 2564 Instruction *InsertBefore = nullptr) { 2565 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); 2566 } 2567 2568 static InsertValueInst *Create(Value *Agg, Value *Val, 2569 ArrayRef<unsigned> Idxs, 2570 const Twine &NameStr, 2571 BasicBlock *InsertAtEnd) { 2572 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); 2573 } 2574 2575 /// Transparently provide more efficient getOperand methods. 2576 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2577 2578 using idx_iterator = const unsigned*; 2579 2580 inline idx_iterator idx_begin() const { return Indices.begin(); } 2581 inline idx_iterator idx_end() const { return Indices.end(); } 2582 inline iterator_range<idx_iterator> indices() const { 2583 return make_range(idx_begin(), idx_end()); 2584 } 2585 2586 Value *getAggregateOperand() { 2587 return getOperand(0); 2588 } 2589 const Value *getAggregateOperand() const { 2590 return getOperand(0); 2591 } 2592 static unsigned getAggregateOperandIndex() { 2593 return 0U; // get index for modifying correct operand 2594 } 2595 2596 Value *getInsertedValueOperand() { 2597 return getOperand(1); 2598 } 2599 const Value *getInsertedValueOperand() const { 2600 return getOperand(1); 2601 } 2602 static unsigned getInsertedValueOperandIndex() { 2603 return 1U; // get index for modifying correct operand 2604 } 2605 2606 ArrayRef<unsigned> getIndices() const { 2607 return Indices; 2608 } 2609 2610 unsigned getNumIndices() const { 2611 return (unsigned)Indices.size(); 2612 } 2613 2614 bool hasIndices() const { 2615 return true; 2616 } 2617 2618 // Methods for support type inquiry through isa, cast, and dyn_cast: 2619 static bool classof(const Instruction *I) { 2620 return I->getOpcode() == Instruction::InsertValue; 2621 } 2622 static bool classof(const Value *V) { 2623 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2624 } 2625 }; 2626 2627 template <> 2628 struct OperandTraits<InsertValueInst> : 2629 public FixedNumOperandTraits<InsertValueInst, 2> { 2630 }; 2631 2632 InsertValueInst::InsertValueInst(Value *Agg, 2633 Value *Val, 2634 ArrayRef<unsigned> Idxs, 2635 const Twine &NameStr, 2636 Instruction *InsertBefore) 2637 : Instruction(Agg->getType(), InsertValue, 2638 OperandTraits<InsertValueInst>::op_begin(this), 2639 2, InsertBefore) { 2640 init(Agg, Val, Idxs, NameStr); 2641 } 2642 2643 InsertValueInst::InsertValueInst(Value *Agg, 2644 Value *Val, 2645 ArrayRef<unsigned> Idxs, 2646 const Twine &NameStr, 2647 BasicBlock *InsertAtEnd) 2648 : Instruction(Agg->getType(), InsertValue, 2649 OperandTraits<InsertValueInst>::op_begin(this), 2650 2, InsertAtEnd) { 2651 init(Agg, Val, Idxs, NameStr); 2652 } 2653 2654 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) 2655 2656 //===----------------------------------------------------------------------===// 2657 // PHINode Class 2658 //===----------------------------------------------------------------------===// 2659 2660 // PHINode - The PHINode class is used to represent the magical mystical PHI 2661 // node, that can not exist in nature, but can be synthesized in a computer 2662 // scientist's overactive imagination. 2663 // 2664 class PHINode : public Instruction { 2665 /// The number of operands actually allocated. NumOperands is 2666 /// the number actually in use. 2667 unsigned ReservedSpace; 2668 2669 PHINode(const PHINode &PN); 2670 2671 explicit PHINode(Type *Ty, unsigned NumReservedValues, 2672 const Twine &NameStr = "", 2673 Instruction *InsertBefore = nullptr) 2674 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), 2675 ReservedSpace(NumReservedValues) { 2676 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2677 setName(NameStr); 2678 allocHungoffUses(ReservedSpace); 2679 } 2680 2681 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, 2682 BasicBlock *InsertAtEnd) 2683 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), 2684 ReservedSpace(NumReservedValues) { 2685 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2686 setName(NameStr); 2687 allocHungoffUses(ReservedSpace); 2688 } 2689 2690 protected: 2691 // Note: Instruction needs to be a friend here to call cloneImpl. 2692 friend class Instruction; 2693 2694 PHINode *cloneImpl() const; 2695 2696 // allocHungoffUses - this is more complicated than the generic 2697 // User::allocHungoffUses, because we have to allocate Uses for the incoming 2698 // values and pointers to the incoming blocks, all in one allocation. 2699 void allocHungoffUses(unsigned N) { 2700 User::allocHungoffUses(N, /* IsPhi */ true); 2701 } 2702 2703 public: 2704 /// Constructors - NumReservedValues is a hint for the number of incoming 2705 /// edges that this phi node will have (use 0 if you really have no idea). 2706 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2707 const Twine &NameStr = "", 2708 Instruction *InsertBefore = nullptr) { 2709 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); 2710 } 2711 2712 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2713 const Twine &NameStr, BasicBlock *InsertAtEnd) { 2714 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); 2715 } 2716 2717 /// Provide fast operand accessors 2718 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2719 2720 // Block iterator interface. This provides access to the list of incoming 2721 // basic blocks, which parallels the list of incoming values. 2722 2723 using block_iterator = BasicBlock **; 2724 using const_block_iterator = BasicBlock * const *; 2725 2726 block_iterator block_begin() { 2727 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); 2728 } 2729 2730 const_block_iterator block_begin() const { 2731 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 2732 } 2733 2734 block_iterator block_end() { 2735 return block_begin() + getNumOperands(); 2736 } 2737 2738 const_block_iterator block_end() const { 2739 return block_begin() + getNumOperands(); 2740 } 2741 2742 iterator_range<block_iterator> blocks() { 2743 return make_range(block_begin(), block_end()); 2744 } 2745 2746 iterator_range<const_block_iterator> blocks() const { 2747 return make_range(block_begin(), block_end()); 2748 } 2749 2750 op_range incoming_values() { return operands(); } 2751 2752 const_op_range incoming_values() const { return operands(); } 2753 2754 /// Return the number of incoming edges 2755 /// 2756 unsigned getNumIncomingValues() const { return getNumOperands(); } 2757 2758 /// Return incoming value number x 2759 /// 2760 Value *getIncomingValue(unsigned i) const { 2761 return getOperand(i); 2762 } 2763 void setIncomingValue(unsigned i, Value *V) { 2764 assert(V && "PHI node got a null value!"); 2765 assert(getType() == V->getType() && 2766 "All operands to PHI node must be the same type as the PHI node!"); 2767 setOperand(i, V); 2768 } 2769 2770 static unsigned getOperandNumForIncomingValue(unsigned i) { 2771 return i; 2772 } 2773 2774 static unsigned getIncomingValueNumForOperand(unsigned i) { 2775 return i; 2776 } 2777 2778 /// Return incoming basic block number @p i. 2779 /// 2780 BasicBlock *getIncomingBlock(unsigned i) const { 2781 return block_begin()[i]; 2782 } 2783 2784 /// Return incoming basic block corresponding 2785 /// to an operand of the PHI. 2786 /// 2787 BasicBlock *getIncomingBlock(const Use &U) const { 2788 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 2789 return getIncomingBlock(unsigned(&U - op_begin())); 2790 } 2791 2792 /// Return incoming basic block corresponding 2793 /// to value use iterator. 2794 /// 2795 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { 2796 return getIncomingBlock(I.getUse()); 2797 } 2798 2799 void setIncomingBlock(unsigned i, BasicBlock *BB) { 2800 assert(BB && "PHI node got a null basic block!"); 2801 block_begin()[i] = BB; 2802 } 2803 2804 /// Replace every incoming basic block \p Old to basic block \p New. 2805 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { 2806 assert(New && Old && "PHI node got a null basic block!"); 2807 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2808 if (getIncomingBlock(Op) == Old) 2809 setIncomingBlock(Op, New); 2810 } 2811 2812 /// Add an incoming value to the end of the PHI list 2813 /// 2814 void addIncoming(Value *V, BasicBlock *BB) { 2815 if (getNumOperands() == ReservedSpace) 2816 growOperands(); // Get more space! 2817 // Initialize some new operands. 2818 setNumHungOffUseOperands(getNumOperands() + 1); 2819 setIncomingValue(getNumOperands() - 1, V); 2820 setIncomingBlock(getNumOperands() - 1, BB); 2821 } 2822 2823 /// Remove an incoming value. This is useful if a 2824 /// predecessor basic block is deleted. The value removed is returned. 2825 /// 2826 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty 2827 /// is true), the PHI node is destroyed and any uses of it are replaced with 2828 /// dummy values. The only time there should be zero incoming values to a PHI 2829 /// node is when the block is dead, so this strategy is sound. 2830 /// 2831 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); 2832 2833 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { 2834 int Idx = getBasicBlockIndex(BB); 2835 assert(Idx >= 0 && "Invalid basic block argument to remove!"); 2836 return removeIncomingValue(Idx, DeletePHIIfEmpty); 2837 } 2838 2839 /// Return the first index of the specified basic 2840 /// block in the value list for this PHI. Returns -1 if no instance. 2841 /// 2842 int getBasicBlockIndex(const BasicBlock *BB) const { 2843 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 2844 if (block_begin()[i] == BB) 2845 return i; 2846 return -1; 2847 } 2848 2849 Value *getIncomingValueForBlock(const BasicBlock *BB) const { 2850 int Idx = getBasicBlockIndex(BB); 2851 assert(Idx >= 0 && "Invalid basic block argument!"); 2852 return getIncomingValue(Idx); 2853 } 2854 2855 /// Set every incoming value(s) for block \p BB to \p V. 2856 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { 2857 assert(BB && "PHI node got a null basic block!"); 2858 bool Found = false; 2859 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2860 if (getIncomingBlock(Op) == BB) { 2861 Found = true; 2862 setIncomingValue(Op, V); 2863 } 2864 (void)Found; 2865 assert(Found && "Invalid basic block argument to set!"); 2866 } 2867 2868 /// If the specified PHI node always merges together the 2869 /// same value, return the value, otherwise return null. 2870 Value *hasConstantValue() const; 2871 2872 /// Whether the specified PHI node always merges 2873 /// together the same value, assuming undefs are equal to a unique 2874 /// non-undef value. 2875 bool hasConstantOrUndefValue() const; 2876 2877 /// If the PHI node is complete which means all of its parent's predecessors 2878 /// have incoming value in this PHI, return true, otherwise return false. 2879 bool isComplete() const { 2880 return llvm::all_of(predecessors(getParent()), 2881 [this](const BasicBlock *Pred) { 2882 return getBasicBlockIndex(Pred) >= 0; 2883 }); 2884 } 2885 2886 /// Methods for support type inquiry through isa, cast, and dyn_cast: 2887 static bool classof(const Instruction *I) { 2888 return I->getOpcode() == Instruction::PHI; 2889 } 2890 static bool classof(const Value *V) { 2891 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2892 } 2893 2894 private: 2895 void growOperands(); 2896 }; 2897 2898 template <> 2899 struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { 2900 }; 2901 2902 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) 2903 2904 //===----------------------------------------------------------------------===// 2905 // LandingPadInst Class 2906 //===----------------------------------------------------------------------===// 2907 2908 //===--------------------------------------------------------------------------- 2909 /// The landingpad instruction holds all of the information 2910 /// necessary to generate correct exception handling. The landingpad instruction 2911 /// cannot be moved from the top of a landing pad block, which itself is 2912 /// accessible only from the 'unwind' edge of an invoke. This uses the 2913 /// SubclassData field in Value to store whether or not the landingpad is a 2914 /// cleanup. 2915 /// 2916 class LandingPadInst : public Instruction { 2917 using CleanupField = BoolBitfieldElementT<0>; 2918 2919 /// The number of operands actually allocated. NumOperands is 2920 /// the number actually in use. 2921 unsigned ReservedSpace; 2922 2923 LandingPadInst(const LandingPadInst &LP); 2924 2925 public: 2926 enum ClauseType { Catch, Filter }; 2927 2928 private: 2929 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2930 const Twine &NameStr, Instruction *InsertBefore); 2931 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2932 const Twine &NameStr, BasicBlock *InsertAtEnd); 2933 2934 // Allocate space for exactly zero operands. 2935 void *operator new(size_t S) { return User::operator new(S); } 2936 2937 void growOperands(unsigned Size); 2938 void init(unsigned NumReservedValues, const Twine &NameStr); 2939 2940 protected: 2941 // Note: Instruction needs to be a friend here to call cloneImpl. 2942 friend class Instruction; 2943 2944 LandingPadInst *cloneImpl() const; 2945 2946 public: 2947 void operator delete(void *Ptr) { User::operator delete(Ptr); } 2948 2949 /// Constructors - NumReservedClauses is a hint for the number of incoming 2950 /// clauses that this landingpad will have (use 0 if you really have no idea). 2951 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2952 const Twine &NameStr = "", 2953 Instruction *InsertBefore = nullptr); 2954 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2955 const Twine &NameStr, BasicBlock *InsertAtEnd); 2956 2957 /// Provide fast operand accessors 2958 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2959 2960 /// Return 'true' if this landingpad instruction is a 2961 /// cleanup. I.e., it should be run when unwinding even if its landing pad 2962 /// doesn't catch the exception. 2963 bool isCleanup() const { return getSubclassData<CleanupField>(); } 2964 2965 /// Indicate that this landingpad instruction is a cleanup. 2966 void setCleanup(bool V) { setSubclassData<CleanupField>(V); } 2967 2968 /// Add a catch or filter clause to the landing pad. 2969 void addClause(Constant *ClauseVal); 2970 2971 /// Get the value of the clause at index Idx. Use isCatch/isFilter to 2972 /// determine what type of clause this is. 2973 Constant *getClause(unsigned Idx) const { 2974 return cast<Constant>(getOperandList()[Idx]); 2975 } 2976 2977 /// Return 'true' if the clause and index Idx is a catch clause. 2978 bool isCatch(unsigned Idx) const { 2979 return !isa<ArrayType>(getOperandList()[Idx]->getType()); 2980 } 2981 2982 /// Return 'true' if the clause and index Idx is a filter clause. 2983 bool isFilter(unsigned Idx) const { 2984 return isa<ArrayType>(getOperandList()[Idx]->getType()); 2985 } 2986 2987 /// Get the number of clauses for this landing pad. 2988 unsigned getNumClauses() const { return getNumOperands(); } 2989 2990 /// Grow the size of the operand list to accommodate the new 2991 /// number of clauses. 2992 void reserveClauses(unsigned Size) { growOperands(Size); } 2993 2994 // Methods for support type inquiry through isa, cast, and dyn_cast: 2995 static bool classof(const Instruction *I) { 2996 return I->getOpcode() == Instruction::LandingPad; 2997 } 2998 static bool classof(const Value *V) { 2999 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3000 } 3001 }; 3002 3003 template <> 3004 struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { 3005 }; 3006 3007 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value) 3008 3009 //===----------------------------------------------------------------------===// 3010 // ReturnInst Class 3011 //===----------------------------------------------------------------------===// 3012 3013 //===--------------------------------------------------------------------------- 3014 /// Return a value (possibly void), from a function. Execution 3015 /// does not continue in this function any longer. 3016 /// 3017 class ReturnInst : public Instruction { 3018 ReturnInst(const ReturnInst &RI); 3019 3020 private: 3021 // ReturnInst constructors: 3022 // ReturnInst() - 'ret void' instruction 3023 // ReturnInst( null) - 'ret void' instruction 3024 // ReturnInst(Value* X) - 'ret X' instruction 3025 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I 3026 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I 3027 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B 3028 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B 3029 // 3030 // NOTE: If the Value* passed is of type void then the constructor behaves as 3031 // if it was passed NULL. 3032 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, 3033 Instruction *InsertBefore = nullptr); 3034 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); 3035 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); 3036 3037 protected: 3038 // Note: Instruction needs to be a friend here to call cloneImpl. 3039 friend class Instruction; 3040 3041 ReturnInst *cloneImpl() const; 3042 3043 public: 3044 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, 3045 Instruction *InsertBefore = nullptr) { 3046 return new(!!retVal) ReturnInst(C, retVal, InsertBefore); 3047 } 3048 3049 static ReturnInst* Create(LLVMContext &C, Value *retVal, 3050 BasicBlock *InsertAtEnd) { 3051 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); 3052 } 3053 3054 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { 3055 return new(0) ReturnInst(C, InsertAtEnd); 3056 } 3057 3058 /// Provide fast operand accessors 3059 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3060 3061 /// Convenience accessor. Returns null if there is no return value. 3062 Value *getReturnValue() const { 3063 return getNumOperands() != 0 ? getOperand(0) : nullptr; 3064 } 3065 3066 unsigned getNumSuccessors() const { return 0; } 3067 3068 // Methods for support type inquiry through isa, cast, and dyn_cast: 3069 static bool classof(const Instruction *I) { 3070 return (I->getOpcode() == Instruction::Ret); 3071 } 3072 static bool classof(const Value *V) { 3073 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3074 } 3075 3076 private: 3077 BasicBlock *getSuccessor(unsigned idx) const { 3078 llvm_unreachable("ReturnInst has no successors!"); 3079 } 3080 3081 void setSuccessor(unsigned idx, BasicBlock *B) { 3082 llvm_unreachable("ReturnInst has no successors!"); 3083 } 3084 }; 3085 3086 template <> 3087 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { 3088 }; 3089 3090 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value) 3091 3092 //===----------------------------------------------------------------------===// 3093 // BranchInst Class 3094 //===----------------------------------------------------------------------===// 3095 3096 //===--------------------------------------------------------------------------- 3097 /// Conditional or Unconditional Branch instruction. 3098 /// 3099 class BranchInst : public Instruction { 3100 /// Ops list - Branches are strange. The operands are ordered: 3101 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because 3102 /// they don't have to check for cond/uncond branchness. These are mostly 3103 /// accessed relative from op_end(). 3104 BranchInst(const BranchInst &BI); 3105 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): 3106 // BranchInst(BB *B) - 'br B' 3107 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' 3108 // BranchInst(BB* B, Inst *I) - 'br B' insert before I 3109 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I 3110 // BranchInst(BB* B, BB *I) - 'br B' insert at end 3111 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end 3112 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); 3113 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3114 Instruction *InsertBefore = nullptr); 3115 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); 3116 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3117 BasicBlock *InsertAtEnd); 3118 3119 void AssertOK(); 3120 3121 protected: 3122 // Note: Instruction needs to be a friend here to call cloneImpl. 3123 friend class Instruction; 3124 3125 BranchInst *cloneImpl() const; 3126 3127 public: 3128 /// Iterator type that casts an operand to a basic block. 3129 /// 3130 /// This only makes sense because the successors are stored as adjacent 3131 /// operands for branch instructions. 3132 struct succ_op_iterator 3133 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3134 std::random_access_iterator_tag, BasicBlock *, 3135 ptrdiff_t, BasicBlock *, BasicBlock *> { 3136 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3137 3138 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3139 BasicBlock *operator->() const { return operator*(); } 3140 }; 3141 3142 /// The const version of `succ_op_iterator`. 3143 struct const_succ_op_iterator 3144 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3145 std::random_access_iterator_tag, 3146 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3147 const BasicBlock *> { 3148 explicit const_succ_op_iterator(const_value_op_iterator I) 3149 : iterator_adaptor_base(I) {} 3150 3151 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3152 const BasicBlock *operator->() const { return operator*(); } 3153 }; 3154 3155 static BranchInst *Create(BasicBlock *IfTrue, 3156 Instruction *InsertBefore = nullptr) { 3157 return new(1) BranchInst(IfTrue, InsertBefore); 3158 } 3159 3160 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3161 Value *Cond, Instruction *InsertBefore = nullptr) { 3162 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); 3163 } 3164 3165 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { 3166 return new(1) BranchInst(IfTrue, InsertAtEnd); 3167 } 3168 3169 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3170 Value *Cond, BasicBlock *InsertAtEnd) { 3171 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); 3172 } 3173 3174 /// Transparently provide more efficient getOperand methods. 3175 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3176 3177 bool isUnconditional() const { return getNumOperands() == 1; } 3178 bool isConditional() const { return getNumOperands() == 3; } 3179 3180 Value *getCondition() const { 3181 assert(isConditional() && "Cannot get condition of an uncond branch!"); 3182 return Op<-3>(); 3183 } 3184 3185 void setCondition(Value *V) { 3186 assert(isConditional() && "Cannot set condition of unconditional branch!"); 3187 Op<-3>() = V; 3188 } 3189 3190 unsigned getNumSuccessors() const { return 1+isConditional(); } 3191 3192 BasicBlock *getSuccessor(unsigned i) const { 3193 assert(i < getNumSuccessors() && "Successor # out of range for Branch!"); 3194 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); 3195 } 3196 3197 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3198 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!"); 3199 *(&Op<-1>() - idx) = NewSucc; 3200 } 3201 3202 /// Swap the successors of this branch instruction. 3203 /// 3204 /// Swaps the successors of the branch instruction. This also swaps any 3205 /// branch weight metadata associated with the instruction so that it 3206 /// continues to map correctly to each operand. 3207 void swapSuccessors(); 3208 3209 iterator_range<succ_op_iterator> successors() { 3210 return make_range( 3211 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), 3212 succ_op_iterator(value_op_end())); 3213 } 3214 3215 iterator_range<const_succ_op_iterator> successors() const { 3216 return make_range(const_succ_op_iterator( 3217 std::next(value_op_begin(), isConditional() ? 1 : 0)), 3218 const_succ_op_iterator(value_op_end())); 3219 } 3220 3221 // Methods for support type inquiry through isa, cast, and dyn_cast: 3222 static bool classof(const Instruction *I) { 3223 return (I->getOpcode() == Instruction::Br); 3224 } 3225 static bool classof(const Value *V) { 3226 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3227 } 3228 }; 3229 3230 template <> 3231 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { 3232 }; 3233 3234 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) 3235 3236 //===----------------------------------------------------------------------===// 3237 // SwitchInst Class 3238 //===----------------------------------------------------------------------===// 3239 3240 //===--------------------------------------------------------------------------- 3241 /// Multiway switch 3242 /// 3243 class SwitchInst : public Instruction { 3244 unsigned ReservedSpace; 3245 3246 // Operand[0] = Value to switch on 3247 // Operand[1] = Default basic block destination 3248 // Operand[2n ] = Value to match 3249 // Operand[2n+1] = BasicBlock to go to on match 3250 SwitchInst(const SwitchInst &SI); 3251 3252 /// Create a new switch instruction, specifying a value to switch on and a 3253 /// default destination. The number of additional cases can be specified here 3254 /// to make memory allocation more efficient. This constructor can also 3255 /// auto-insert before another instruction. 3256 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3257 Instruction *InsertBefore); 3258 3259 /// Create a new switch instruction, specifying a value to switch on and a 3260 /// default destination. The number of additional cases can be specified here 3261 /// to make memory allocation more efficient. This constructor also 3262 /// auto-inserts at the end of the specified BasicBlock. 3263 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3264 BasicBlock *InsertAtEnd); 3265 3266 // allocate space for exactly zero operands 3267 void *operator new(size_t S) { return User::operator new(S); } 3268 3269 void init(Value *Value, BasicBlock *Default, unsigned NumReserved); 3270 void growOperands(); 3271 3272 protected: 3273 // Note: Instruction needs to be a friend here to call cloneImpl. 3274 friend class Instruction; 3275 3276 SwitchInst *cloneImpl() const; 3277 3278 public: 3279 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3280 3281 // -2 3282 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); 3283 3284 template <typename CaseHandleT> class CaseIteratorImpl; 3285 3286 /// A handle to a particular switch case. It exposes a convenient interface 3287 /// to both the case value and the successor block. 3288 /// 3289 /// We define this as a template and instantiate it to form both a const and 3290 /// non-const handle. 3291 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> 3292 class CaseHandleImpl { 3293 // Directly befriend both const and non-const iterators. 3294 friend class SwitchInst::CaseIteratorImpl< 3295 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; 3296 3297 protected: 3298 // Expose the switch type we're parameterized with to the iterator. 3299 using SwitchInstType = SwitchInstT; 3300 3301 SwitchInstT *SI; 3302 ptrdiff_t Index; 3303 3304 CaseHandleImpl() = default; 3305 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} 3306 3307 public: 3308 /// Resolves case value for current case. 3309 ConstantIntT *getCaseValue() const { 3310 assert((unsigned)Index < SI->getNumCases() && 3311 "Index out the number of cases."); 3312 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); 3313 } 3314 3315 /// Resolves successor for current case. 3316 BasicBlockT *getCaseSuccessor() const { 3317 assert(((unsigned)Index < SI->getNumCases() || 3318 (unsigned)Index == DefaultPseudoIndex) && 3319 "Index out the number of cases."); 3320 return SI->getSuccessor(getSuccessorIndex()); 3321 } 3322 3323 /// Returns number of current case. 3324 unsigned getCaseIndex() const { return Index; } 3325 3326 /// Returns successor index for current case successor. 3327 unsigned getSuccessorIndex() const { 3328 assert(((unsigned)Index == DefaultPseudoIndex || 3329 (unsigned)Index < SI->getNumCases()) && 3330 "Index out the number of cases."); 3331 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; 3332 } 3333 3334 bool operator==(const CaseHandleImpl &RHS) const { 3335 assert(SI == RHS.SI && "Incompatible operators."); 3336 return Index == RHS.Index; 3337 } 3338 }; 3339 3340 using ConstCaseHandle = 3341 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; 3342 3343 class CaseHandle 3344 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { 3345 friend class SwitchInst::CaseIteratorImpl<CaseHandle>; 3346 3347 public: 3348 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} 3349 3350 /// Sets the new value for current case. 3351 void setValue(ConstantInt *V) const { 3352 assert((unsigned)Index < SI->getNumCases() && 3353 "Index out the number of cases."); 3354 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); 3355 } 3356 3357 /// Sets the new successor for current case. 3358 void setSuccessor(BasicBlock *S) const { 3359 SI->setSuccessor(getSuccessorIndex(), S); 3360 } 3361 }; 3362 3363 template <typename CaseHandleT> 3364 class CaseIteratorImpl 3365 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, 3366 std::random_access_iterator_tag, 3367 const CaseHandleT> { 3368 using SwitchInstT = typename CaseHandleT::SwitchInstType; 3369 3370 CaseHandleT Case; 3371 3372 public: 3373 /// Default constructed iterator is in an invalid state until assigned to 3374 /// a case for a particular switch. 3375 CaseIteratorImpl() = default; 3376 3377 /// Initializes case iterator for given SwitchInst and for given 3378 /// case number. 3379 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} 3380 3381 /// Initializes case iterator for given SwitchInst and for given 3382 /// successor index. 3383 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, 3384 unsigned SuccessorIndex) { 3385 assert(SuccessorIndex < SI->getNumSuccessors() && 3386 "Successor index # out of range!"); 3387 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) 3388 : CaseIteratorImpl(SI, DefaultPseudoIndex); 3389 } 3390 3391 /// Support converting to the const variant. This will be a no-op for const 3392 /// variant. 3393 operator CaseIteratorImpl<ConstCaseHandle>() const { 3394 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); 3395 } 3396 3397 CaseIteratorImpl &operator+=(ptrdiff_t N) { 3398 // Check index correctness after addition. 3399 // Note: Index == getNumCases() means end(). 3400 assert(Case.Index + N >= 0 && 3401 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && 3402 "Case.Index out the number of cases."); 3403 Case.Index += N; 3404 return *this; 3405 } 3406 CaseIteratorImpl &operator-=(ptrdiff_t N) { 3407 // Check index correctness after subtraction. 3408 // Note: Case.Index == getNumCases() means end(). 3409 assert(Case.Index - N >= 0 && 3410 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && 3411 "Case.Index out the number of cases."); 3412 Case.Index -= N; 3413 return *this; 3414 } 3415 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { 3416 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3417 return Case.Index - RHS.Case.Index; 3418 } 3419 bool operator==(const CaseIteratorImpl &RHS) const { 3420 return Case == RHS.Case; 3421 } 3422 bool operator<(const CaseIteratorImpl &RHS) const { 3423 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3424 return Case.Index < RHS.Case.Index; 3425 } 3426 const CaseHandleT &operator*() const { return Case; } 3427 }; 3428 3429 using CaseIt = CaseIteratorImpl<CaseHandle>; 3430 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; 3431 3432 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3433 unsigned NumCases, 3434 Instruction *InsertBefore = nullptr) { 3435 return new SwitchInst(Value, Default, NumCases, InsertBefore); 3436 } 3437 3438 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3439 unsigned NumCases, BasicBlock *InsertAtEnd) { 3440 return new SwitchInst(Value, Default, NumCases, InsertAtEnd); 3441 } 3442 3443 /// Provide fast operand accessors 3444 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3445 3446 // Accessor Methods for Switch stmt 3447 Value *getCondition() const { return getOperand(0); } 3448 void setCondition(Value *V) { setOperand(0, V); } 3449 3450 BasicBlock *getDefaultDest() const { 3451 return cast<BasicBlock>(getOperand(1)); 3452 } 3453 3454 void setDefaultDest(BasicBlock *DefaultCase) { 3455 setOperand(1, reinterpret_cast<Value*>(DefaultCase)); 3456 } 3457 3458 /// Return the number of 'cases' in this switch instruction, excluding the 3459 /// default case. 3460 unsigned getNumCases() const { 3461 return getNumOperands()/2 - 1; 3462 } 3463 3464 /// Returns a read/write iterator that points to the first case in the 3465 /// SwitchInst. 3466 CaseIt case_begin() { 3467 return CaseIt(this, 0); 3468 } 3469 3470 /// Returns a read-only iterator that points to the first case in the 3471 /// SwitchInst. 3472 ConstCaseIt case_begin() const { 3473 return ConstCaseIt(this, 0); 3474 } 3475 3476 /// Returns a read/write iterator that points one past the last in the 3477 /// SwitchInst. 3478 CaseIt case_end() { 3479 return CaseIt(this, getNumCases()); 3480 } 3481 3482 /// Returns a read-only iterator that points one past the last in the 3483 /// SwitchInst. 3484 ConstCaseIt case_end() const { 3485 return ConstCaseIt(this, getNumCases()); 3486 } 3487 3488 /// Iteration adapter for range-for loops. 3489 iterator_range<CaseIt> cases() { 3490 return make_range(case_begin(), case_end()); 3491 } 3492 3493 /// Constant iteration adapter for range-for loops. 3494 iterator_range<ConstCaseIt> cases() const { 3495 return make_range(case_begin(), case_end()); 3496 } 3497 3498 /// Returns an iterator that points to the default case. 3499 /// Note: this iterator allows to resolve successor only. Attempt 3500 /// to resolve case value causes an assertion. 3501 /// Also note, that increment and decrement also causes an assertion and 3502 /// makes iterator invalid. 3503 CaseIt case_default() { 3504 return CaseIt(this, DefaultPseudoIndex); 3505 } 3506 ConstCaseIt case_default() const { 3507 return ConstCaseIt(this, DefaultPseudoIndex); 3508 } 3509 3510 /// Search all of the case values for the specified constant. If it is 3511 /// explicitly handled, return the case iterator of it, otherwise return 3512 /// default case iterator to indicate that it is handled by the default 3513 /// handler. 3514 CaseIt findCaseValue(const ConstantInt *C) { 3515 return CaseIt( 3516 this, 3517 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex()); 3518 } 3519 ConstCaseIt findCaseValue(const ConstantInt *C) const { 3520 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) { 3521 return Case.getCaseValue() == C; 3522 }); 3523 if (I != case_end()) 3524 return I; 3525 3526 return case_default(); 3527 } 3528 3529 /// Finds the unique case value for a given successor. Returns null if the 3530 /// successor is not found, not unique, or is the default case. 3531 ConstantInt *findCaseDest(BasicBlock *BB) { 3532 if (BB == getDefaultDest()) 3533 return nullptr; 3534 3535 ConstantInt *CI = nullptr; 3536 for (auto Case : cases()) { 3537 if (Case.getCaseSuccessor() != BB) 3538 continue; 3539 3540 if (CI) 3541 return nullptr; // Multiple cases lead to BB. 3542 3543 CI = Case.getCaseValue(); 3544 } 3545 3546 return CI; 3547 } 3548 3549 /// Add an entry to the switch instruction. 3550 /// Note: 3551 /// This action invalidates case_end(). Old case_end() iterator will 3552 /// point to the added case. 3553 void addCase(ConstantInt *OnVal, BasicBlock *Dest); 3554 3555 /// This method removes the specified case and its successor from the switch 3556 /// instruction. Note that this operation may reorder the remaining cases at 3557 /// index idx and above. 3558 /// Note: 3559 /// This action invalidates iterators for all cases following the one removed, 3560 /// including the case_end() iterator. It returns an iterator for the next 3561 /// case. 3562 CaseIt removeCase(CaseIt I); 3563 3564 unsigned getNumSuccessors() const { return getNumOperands()/2; } 3565 BasicBlock *getSuccessor(unsigned idx) const { 3566 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!"); 3567 return cast<BasicBlock>(getOperand(idx*2+1)); 3568 } 3569 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3570 assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); 3571 setOperand(idx * 2 + 1, NewSucc); 3572 } 3573 3574 // Methods for support type inquiry through isa, cast, and dyn_cast: 3575 static bool classof(const Instruction *I) { 3576 return I->getOpcode() == Instruction::Switch; 3577 } 3578 static bool classof(const Value *V) { 3579 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3580 } 3581 }; 3582 3583 /// A wrapper class to simplify modification of SwitchInst cases along with 3584 /// their prof branch_weights metadata. 3585 class SwitchInstProfUpdateWrapper { 3586 SwitchInst &SI; 3587 Optional<SmallVector<uint32_t, 8> > Weights = None; 3588 bool Changed = false; 3589 3590 protected: 3591 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); 3592 3593 MDNode *buildProfBranchWeightsMD(); 3594 3595 void init(); 3596 3597 public: 3598 using CaseWeightOpt = Optional<uint32_t>; 3599 SwitchInst *operator->() { return &SI; } 3600 SwitchInst &operator*() { return SI; } 3601 operator SwitchInst *() { return &SI; } 3602 3603 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } 3604 3605 ~SwitchInstProfUpdateWrapper() { 3606 if (Changed) 3607 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); 3608 } 3609 3610 /// Delegate the call to the underlying SwitchInst::removeCase() and remove 3611 /// correspondent branch weight. 3612 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); 3613 3614 /// Delegate the call to the underlying SwitchInst::addCase() and set the 3615 /// specified branch weight for the added case. 3616 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); 3617 3618 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark 3619 /// this object to not touch the underlying SwitchInst in destructor. 3620 SymbolTableList<Instruction>::iterator eraseFromParent(); 3621 3622 void setSuccessorWeight(unsigned idx, CaseWeightOpt W); 3623 CaseWeightOpt getSuccessorWeight(unsigned idx); 3624 3625 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); 3626 }; 3627 3628 template <> 3629 struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { 3630 }; 3631 3632 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) 3633 3634 //===----------------------------------------------------------------------===// 3635 // IndirectBrInst Class 3636 //===----------------------------------------------------------------------===// 3637 3638 //===--------------------------------------------------------------------------- 3639 /// Indirect Branch Instruction. 3640 /// 3641 class IndirectBrInst : public Instruction { 3642 unsigned ReservedSpace; 3643 3644 // Operand[0] = Address to jump to 3645 // Operand[n+1] = n-th destination 3646 IndirectBrInst(const IndirectBrInst &IBI); 3647 3648 /// Create a new indirectbr instruction, specifying an 3649 /// Address to jump to. The number of expected destinations can be specified 3650 /// here to make memory allocation more efficient. This constructor can also 3651 /// autoinsert before another instruction. 3652 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); 3653 3654 /// Create a new indirectbr instruction, specifying an 3655 /// Address to jump to. The number of expected destinations can be specified 3656 /// here to make memory allocation more efficient. This constructor also 3657 /// autoinserts at the end of the specified BasicBlock. 3658 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); 3659 3660 // allocate space for exactly zero operands 3661 void *operator new(size_t S) { return User::operator new(S); } 3662 3663 void init(Value *Address, unsigned NumDests); 3664 void growOperands(); 3665 3666 protected: 3667 // Note: Instruction needs to be a friend here to call cloneImpl. 3668 friend class Instruction; 3669 3670 IndirectBrInst *cloneImpl() const; 3671 3672 public: 3673 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3674 3675 /// Iterator type that casts an operand to a basic block. 3676 /// 3677 /// This only makes sense because the successors are stored as adjacent 3678 /// operands for indirectbr instructions. 3679 struct succ_op_iterator 3680 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3681 std::random_access_iterator_tag, BasicBlock *, 3682 ptrdiff_t, BasicBlock *, BasicBlock *> { 3683 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3684 3685 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3686 BasicBlock *operator->() const { return operator*(); } 3687 }; 3688 3689 /// The const version of `succ_op_iterator`. 3690 struct const_succ_op_iterator 3691 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3692 std::random_access_iterator_tag, 3693 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3694 const BasicBlock *> { 3695 explicit const_succ_op_iterator(const_value_op_iterator I) 3696 : iterator_adaptor_base(I) {} 3697 3698 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3699 const BasicBlock *operator->() const { return operator*(); } 3700 }; 3701 3702 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3703 Instruction *InsertBefore = nullptr) { 3704 return new IndirectBrInst(Address, NumDests, InsertBefore); 3705 } 3706 3707 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3708 BasicBlock *InsertAtEnd) { 3709 return new IndirectBrInst(Address, NumDests, InsertAtEnd); 3710 } 3711 3712 /// Provide fast operand accessors. 3713 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3714 3715 // Accessor Methods for IndirectBrInst instruction. 3716 Value *getAddress() { return getOperand(0); } 3717 const Value *getAddress() const { return getOperand(0); } 3718 void setAddress(Value *V) { setOperand(0, V); } 3719 3720 /// return the number of possible destinations in this 3721 /// indirectbr instruction. 3722 unsigned getNumDestinations() const { return getNumOperands()-1; } 3723 3724 /// Return the specified destination. 3725 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } 3726 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } 3727 3728 /// Add a destination. 3729 /// 3730 void addDestination(BasicBlock *Dest); 3731 3732 /// This method removes the specified successor from the 3733 /// indirectbr instruction. 3734 void removeDestination(unsigned i); 3735 3736 unsigned getNumSuccessors() const { return getNumOperands()-1; } 3737 BasicBlock *getSuccessor(unsigned i) const { 3738 return cast<BasicBlock>(getOperand(i+1)); 3739 } 3740 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3741 setOperand(i + 1, NewSucc); 3742 } 3743 3744 iterator_range<succ_op_iterator> successors() { 3745 return make_range(succ_op_iterator(std::next(value_op_begin())), 3746 succ_op_iterator(value_op_end())); 3747 } 3748 3749 iterator_range<const_succ_op_iterator> successors() const { 3750 return make_range(const_succ_op_iterator(std::next(value_op_begin())), 3751 const_succ_op_iterator(value_op_end())); 3752 } 3753 3754 // Methods for support type inquiry through isa, cast, and dyn_cast: 3755 static bool classof(const Instruction *I) { 3756 return I->getOpcode() == Instruction::IndirectBr; 3757 } 3758 static bool classof(const Value *V) { 3759 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3760 } 3761 }; 3762 3763 template <> 3764 struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { 3765 }; 3766 3767 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value) 3768 3769 //===----------------------------------------------------------------------===// 3770 // InvokeInst Class 3771 //===----------------------------------------------------------------------===// 3772 3773 /// Invoke instruction. The SubclassData field is used to hold the 3774 /// calling convention of the call. 3775 /// 3776 class InvokeInst : public CallBase { 3777 /// The number of operands for this call beyond the called function, 3778 /// arguments, and operand bundles. 3779 static constexpr int NumExtraOperands = 2; 3780 3781 /// The index from the end of the operand array to the normal destination. 3782 static constexpr int NormalDestOpEndIdx = -3; 3783 3784 /// The index from the end of the operand array to the unwind destination. 3785 static constexpr int UnwindDestOpEndIdx = -2; 3786 3787 InvokeInst(const InvokeInst &BI); 3788 3789 /// Construct an InvokeInst given a range of arguments. 3790 /// 3791 /// Construct an InvokeInst from a range of arguments 3792 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3793 BasicBlock *IfException, ArrayRef<Value *> Args, 3794 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3795 const Twine &NameStr, Instruction *InsertBefore); 3796 3797 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3798 BasicBlock *IfException, ArrayRef<Value *> Args, 3799 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3800 const Twine &NameStr, BasicBlock *InsertAtEnd); 3801 3802 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3803 BasicBlock *IfException, ArrayRef<Value *> Args, 3804 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3805 3806 /// Compute the number of operands to allocate. 3807 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 3808 // We need one operand for the called function, plus our extra operands and 3809 // the input operand counts provided. 3810 return 1 + NumExtraOperands + NumArgs + NumBundleInputs; 3811 } 3812 3813 protected: 3814 // Note: Instruction needs to be a friend here to call cloneImpl. 3815 friend class Instruction; 3816 3817 InvokeInst *cloneImpl() const; 3818 3819 public: 3820 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3821 BasicBlock *IfException, ArrayRef<Value *> Args, 3822 const Twine &NameStr, 3823 Instruction *InsertBefore = nullptr) { 3824 int NumOperands = ComputeNumOperands(Args.size()); 3825 return new (NumOperands) 3826 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3827 NameStr, InsertBefore); 3828 } 3829 3830 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3831 BasicBlock *IfException, ArrayRef<Value *> Args, 3832 ArrayRef<OperandBundleDef> Bundles = None, 3833 const Twine &NameStr = "", 3834 Instruction *InsertBefore = nullptr) { 3835 int NumOperands = 3836 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3837 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3838 3839 return new (NumOperands, DescriptorBytes) 3840 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3841 NameStr, InsertBefore); 3842 } 3843 3844 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3845 BasicBlock *IfException, ArrayRef<Value *> Args, 3846 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3847 int NumOperands = ComputeNumOperands(Args.size()); 3848 return new (NumOperands) 3849 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3850 NameStr, InsertAtEnd); 3851 } 3852 3853 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3854 BasicBlock *IfException, ArrayRef<Value *> Args, 3855 ArrayRef<OperandBundleDef> Bundles, 3856 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3857 int NumOperands = 3858 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3859 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3860 3861 return new (NumOperands, DescriptorBytes) 3862 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3863 NameStr, InsertAtEnd); 3864 } 3865 3866 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3867 BasicBlock *IfException, ArrayRef<Value *> Args, 3868 const Twine &NameStr, 3869 Instruction *InsertBefore = nullptr) { 3870 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3871 IfException, Args, None, NameStr, InsertBefore); 3872 } 3873 3874 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3875 BasicBlock *IfException, ArrayRef<Value *> Args, 3876 ArrayRef<OperandBundleDef> Bundles = None, 3877 const Twine &NameStr = "", 3878 Instruction *InsertBefore = nullptr) { 3879 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3880 IfException, Args, Bundles, NameStr, InsertBefore); 3881 } 3882 3883 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3884 BasicBlock *IfException, ArrayRef<Value *> Args, 3885 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3886 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3887 IfException, Args, NameStr, InsertAtEnd); 3888 } 3889 3890 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3891 BasicBlock *IfException, ArrayRef<Value *> Args, 3892 ArrayRef<OperandBundleDef> Bundles, 3893 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3894 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3895 IfException, Args, Bundles, NameStr, InsertAtEnd); 3896 } 3897 3898 /// Create a clone of \p II with a different set of operand bundles and 3899 /// insert it before \p InsertPt. 3900 /// 3901 /// The returned invoke instruction is identical to \p II in every way except 3902 /// that the operand bundles for the new instruction are set to the operand 3903 /// bundles in \p Bundles. 3904 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, 3905 Instruction *InsertPt = nullptr); 3906 3907 // get*Dest - Return the destination basic blocks... 3908 BasicBlock *getNormalDest() const { 3909 return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); 3910 } 3911 BasicBlock *getUnwindDest() const { 3912 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); 3913 } 3914 void setNormalDest(BasicBlock *B) { 3915 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3916 } 3917 void setUnwindDest(BasicBlock *B) { 3918 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3919 } 3920 3921 /// Get the landingpad instruction from the landing pad 3922 /// block (the unwind destination). 3923 LandingPadInst *getLandingPadInst() const; 3924 3925 BasicBlock *getSuccessor(unsigned i) const { 3926 assert(i < 2 && "Successor # out of range for invoke!"); 3927 return i == 0 ? getNormalDest() : getUnwindDest(); 3928 } 3929 3930 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3931 assert(i < 2 && "Successor # out of range for invoke!"); 3932 if (i == 0) 3933 setNormalDest(NewSucc); 3934 else 3935 setUnwindDest(NewSucc); 3936 } 3937 3938 unsigned getNumSuccessors() const { return 2; } 3939 3940 // Methods for support type inquiry through isa, cast, and dyn_cast: 3941 static bool classof(const Instruction *I) { 3942 return (I->getOpcode() == Instruction::Invoke); 3943 } 3944 static bool classof(const Value *V) { 3945 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3946 } 3947 3948 private: 3949 // Shadow Instruction::setInstructionSubclassData with a private forwarding 3950 // method so that subclasses cannot accidentally use it. 3951 template <typename Bitfield> 3952 void setSubclassData(typename Bitfield::Type Value) { 3953 Instruction::setSubclassData<Bitfield>(Value); 3954 } 3955 }; 3956 3957 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3958 BasicBlock *IfException, ArrayRef<Value *> Args, 3959 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3960 const Twine &NameStr, Instruction *InsertBefore) 3961 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3962 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3963 InsertBefore) { 3964 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3965 } 3966 3967 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3968 BasicBlock *IfException, ArrayRef<Value *> Args, 3969 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3970 const Twine &NameStr, BasicBlock *InsertAtEnd) 3971 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3972 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3973 InsertAtEnd) { 3974 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3975 } 3976 3977 //===----------------------------------------------------------------------===// 3978 // CallBrInst Class 3979 //===----------------------------------------------------------------------===// 3980 3981 /// CallBr instruction, tracking function calls that may not return control but 3982 /// instead transfer it to a third location. The SubclassData field is used to 3983 /// hold the calling convention of the call. 3984 /// 3985 class CallBrInst : public CallBase { 3986 3987 unsigned NumIndirectDests; 3988 3989 CallBrInst(const CallBrInst &BI); 3990 3991 /// Construct a CallBrInst given a range of arguments. 3992 /// 3993 /// Construct a CallBrInst from a range of arguments 3994 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3995 ArrayRef<BasicBlock *> IndirectDests, 3996 ArrayRef<Value *> Args, 3997 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3998 const Twine &NameStr, Instruction *InsertBefore); 3999 4000 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4001 ArrayRef<BasicBlock *> IndirectDests, 4002 ArrayRef<Value *> Args, 4003 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4004 const Twine &NameStr, BasicBlock *InsertAtEnd); 4005 4006 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, 4007 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 4008 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 4009 4010 /// Compute the number of operands to allocate. 4011 static int ComputeNumOperands(int NumArgs, int NumIndirectDests, 4012 int NumBundleInputs = 0) { 4013 // We need one operand for the called function, plus our extra operands and 4014 // the input operand counts provided. 4015 return 2 + NumIndirectDests + NumArgs + NumBundleInputs; 4016 } 4017 4018 protected: 4019 // Note: Instruction needs to be a friend here to call cloneImpl. 4020 friend class Instruction; 4021 4022 CallBrInst *cloneImpl() const; 4023 4024 public: 4025 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4026 BasicBlock *DefaultDest, 4027 ArrayRef<BasicBlock *> IndirectDests, 4028 ArrayRef<Value *> Args, const Twine &NameStr, 4029 Instruction *InsertBefore = nullptr) { 4030 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4031 return new (NumOperands) 4032 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 4033 NumOperands, NameStr, InsertBefore); 4034 } 4035 4036 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4037 BasicBlock *DefaultDest, 4038 ArrayRef<BasicBlock *> IndirectDests, 4039 ArrayRef<Value *> Args, 4040 ArrayRef<OperandBundleDef> Bundles = None, 4041 const Twine &NameStr = "", 4042 Instruction *InsertBefore = nullptr) { 4043 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4044 CountBundleInputs(Bundles)); 4045 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4046 4047 return new (NumOperands, DescriptorBytes) 4048 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4049 NumOperands, NameStr, InsertBefore); 4050 } 4051 4052 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4053 BasicBlock *DefaultDest, 4054 ArrayRef<BasicBlock *> IndirectDests, 4055 ArrayRef<Value *> Args, const Twine &NameStr, 4056 BasicBlock *InsertAtEnd) { 4057 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4058 return new (NumOperands) 4059 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 4060 NumOperands, NameStr, InsertAtEnd); 4061 } 4062 4063 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4064 BasicBlock *DefaultDest, 4065 ArrayRef<BasicBlock *> IndirectDests, 4066 ArrayRef<Value *> Args, 4067 ArrayRef<OperandBundleDef> Bundles, 4068 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4069 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4070 CountBundleInputs(Bundles)); 4071 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4072 4073 return new (NumOperands, DescriptorBytes) 4074 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4075 NumOperands, NameStr, InsertAtEnd); 4076 } 4077 4078 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4079 ArrayRef<BasicBlock *> IndirectDests, 4080 ArrayRef<Value *> Args, const Twine &NameStr, 4081 Instruction *InsertBefore = nullptr) { 4082 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4083 IndirectDests, Args, NameStr, InsertBefore); 4084 } 4085 4086 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4087 ArrayRef<BasicBlock *> IndirectDests, 4088 ArrayRef<Value *> Args, 4089 ArrayRef<OperandBundleDef> Bundles = None, 4090 const Twine &NameStr = "", 4091 Instruction *InsertBefore = nullptr) { 4092 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4093 IndirectDests, Args, Bundles, NameStr, InsertBefore); 4094 } 4095 4096 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4097 ArrayRef<BasicBlock *> IndirectDests, 4098 ArrayRef<Value *> Args, const Twine &NameStr, 4099 BasicBlock *InsertAtEnd) { 4100 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4101 IndirectDests, Args, NameStr, InsertAtEnd); 4102 } 4103 4104 static CallBrInst *Create(FunctionCallee Func, 4105 BasicBlock *DefaultDest, 4106 ArrayRef<BasicBlock *> IndirectDests, 4107 ArrayRef<Value *> Args, 4108 ArrayRef<OperandBundleDef> Bundles, 4109 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4110 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4111 IndirectDests, Args, Bundles, NameStr, InsertAtEnd); 4112 } 4113 4114 /// Create a clone of \p CBI with a different set of operand bundles and 4115 /// insert it before \p InsertPt. 4116 /// 4117 /// The returned callbr instruction is identical to \p CBI in every way 4118 /// except that the operand bundles for the new instruction are set to the 4119 /// operand bundles in \p Bundles. 4120 static CallBrInst *Create(CallBrInst *CBI, 4121 ArrayRef<OperandBundleDef> Bundles, 4122 Instruction *InsertPt = nullptr); 4123 4124 /// Return the number of callbr indirect dest labels. 4125 /// 4126 unsigned getNumIndirectDests() const { return NumIndirectDests; } 4127 4128 /// getIndirectDestLabel - Return the i-th indirect dest label. 4129 /// 4130 Value *getIndirectDestLabel(unsigned i) const { 4131 assert(i < getNumIndirectDests() && "Out of bounds!"); 4132 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1); 4133 } 4134 4135 Value *getIndirectDestLabelUse(unsigned i) const { 4136 assert(i < getNumIndirectDests() && "Out of bounds!"); 4137 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1); 4138 } 4139 4140 // Return the destination basic blocks... 4141 BasicBlock *getDefaultDest() const { 4142 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); 4143 } 4144 BasicBlock *getIndirectDest(unsigned i) const { 4145 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); 4146 } 4147 SmallVector<BasicBlock *, 16> getIndirectDests() const { 4148 SmallVector<BasicBlock *, 16> IndirectDests; 4149 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) 4150 IndirectDests.push_back(getIndirectDest(i)); 4151 return IndirectDests; 4152 } 4153 void setDefaultDest(BasicBlock *B) { 4154 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); 4155 } 4156 void setIndirectDest(unsigned i, BasicBlock *B) { 4157 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); 4158 } 4159 4160 BasicBlock *getSuccessor(unsigned i) const { 4161 assert(i < getNumSuccessors() + 1 && 4162 "Successor # out of range for callbr!"); 4163 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); 4164 } 4165 4166 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4167 assert(i < getNumIndirectDests() + 1 && 4168 "Successor # out of range for callbr!"); 4169 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); 4170 } 4171 4172 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } 4173 4174 BlockAddress *getBlockAddressForIndirectDest(unsigned DestNo) const; 4175 4176 // Methods for support type inquiry through isa, cast, and dyn_cast: 4177 static bool classof(const Instruction *I) { 4178 return (I->getOpcode() == Instruction::CallBr); 4179 } 4180 static bool classof(const Value *V) { 4181 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4182 } 4183 4184 private: 4185 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4186 // method so that subclasses cannot accidentally use it. 4187 template <typename Bitfield> 4188 void setSubclassData(typename Bitfield::Type Value) { 4189 Instruction::setSubclassData<Bitfield>(Value); 4190 } 4191 }; 4192 4193 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4194 ArrayRef<BasicBlock *> IndirectDests, 4195 ArrayRef<Value *> Args, 4196 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4197 const Twine &NameStr, Instruction *InsertBefore) 4198 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4199 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4200 InsertBefore) { 4201 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4202 } 4203 4204 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4205 ArrayRef<BasicBlock *> IndirectDests, 4206 ArrayRef<Value *> Args, 4207 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4208 const Twine &NameStr, BasicBlock *InsertAtEnd) 4209 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4210 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4211 InsertAtEnd) { 4212 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4213 } 4214 4215 //===----------------------------------------------------------------------===// 4216 // ResumeInst Class 4217 //===----------------------------------------------------------------------===// 4218 4219 //===--------------------------------------------------------------------------- 4220 /// Resume the propagation of an exception. 4221 /// 4222 class ResumeInst : public Instruction { 4223 ResumeInst(const ResumeInst &RI); 4224 4225 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); 4226 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); 4227 4228 protected: 4229 // Note: Instruction needs to be a friend here to call cloneImpl. 4230 friend class Instruction; 4231 4232 ResumeInst *cloneImpl() const; 4233 4234 public: 4235 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { 4236 return new(1) ResumeInst(Exn, InsertBefore); 4237 } 4238 4239 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { 4240 return new(1) ResumeInst(Exn, InsertAtEnd); 4241 } 4242 4243 /// Provide fast operand accessors 4244 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4245 4246 /// Convenience accessor. 4247 Value *getValue() const { return Op<0>(); } 4248 4249 unsigned getNumSuccessors() const { return 0; } 4250 4251 // Methods for support type inquiry through isa, cast, and dyn_cast: 4252 static bool classof(const Instruction *I) { 4253 return I->getOpcode() == Instruction::Resume; 4254 } 4255 static bool classof(const Value *V) { 4256 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4257 } 4258 4259 private: 4260 BasicBlock *getSuccessor(unsigned idx) const { 4261 llvm_unreachable("ResumeInst has no successors!"); 4262 } 4263 4264 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 4265 llvm_unreachable("ResumeInst has no successors!"); 4266 } 4267 }; 4268 4269 template <> 4270 struct OperandTraits<ResumeInst> : 4271 public FixedNumOperandTraits<ResumeInst, 1> { 4272 }; 4273 4274 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) 4275 4276 //===----------------------------------------------------------------------===// 4277 // CatchSwitchInst Class 4278 //===----------------------------------------------------------------------===// 4279 class CatchSwitchInst : public Instruction { 4280 using UnwindDestField = BoolBitfieldElementT<0>; 4281 4282 /// The number of operands actually allocated. NumOperands is 4283 /// the number actually in use. 4284 unsigned ReservedSpace; 4285 4286 // Operand[0] = Outer scope 4287 // Operand[1] = Unwind block destination 4288 // Operand[n] = BasicBlock to go to on match 4289 CatchSwitchInst(const CatchSwitchInst &CSI); 4290 4291 /// Create a new switch instruction, specifying a 4292 /// default destination. The number of additional handlers can be specified 4293 /// here to make memory allocation more efficient. 4294 /// This constructor can also autoinsert before another instruction. 4295 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4296 unsigned NumHandlers, const Twine &NameStr, 4297 Instruction *InsertBefore); 4298 4299 /// Create a new switch instruction, specifying a 4300 /// default destination. The number of additional handlers can be specified 4301 /// here to make memory allocation more efficient. 4302 /// This constructor also autoinserts at the end of the specified BasicBlock. 4303 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4304 unsigned NumHandlers, const Twine &NameStr, 4305 BasicBlock *InsertAtEnd); 4306 4307 // allocate space for exactly zero operands 4308 void *operator new(size_t S) { return User::operator new(S); } 4309 4310 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); 4311 void growOperands(unsigned Size); 4312 4313 protected: 4314 // Note: Instruction needs to be a friend here to call cloneImpl. 4315 friend class Instruction; 4316 4317 CatchSwitchInst *cloneImpl() const; 4318 4319 public: 4320 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 4321 4322 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4323 unsigned NumHandlers, 4324 const Twine &NameStr = "", 4325 Instruction *InsertBefore = nullptr) { 4326 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4327 InsertBefore); 4328 } 4329 4330 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4331 unsigned NumHandlers, const Twine &NameStr, 4332 BasicBlock *InsertAtEnd) { 4333 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4334 InsertAtEnd); 4335 } 4336 4337 /// Provide fast operand accessors 4338 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4339 4340 // Accessor Methods for CatchSwitch stmt 4341 Value *getParentPad() const { return getOperand(0); } 4342 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } 4343 4344 // Accessor Methods for CatchSwitch stmt 4345 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4346 bool unwindsToCaller() const { return !hasUnwindDest(); } 4347 BasicBlock *getUnwindDest() const { 4348 if (hasUnwindDest()) 4349 return cast<BasicBlock>(getOperand(1)); 4350 return nullptr; 4351 } 4352 void setUnwindDest(BasicBlock *UnwindDest) { 4353 assert(UnwindDest); 4354 assert(hasUnwindDest()); 4355 setOperand(1, UnwindDest); 4356 } 4357 4358 /// return the number of 'handlers' in this catchswitch 4359 /// instruction, except the default handler 4360 unsigned getNumHandlers() const { 4361 if (hasUnwindDest()) 4362 return getNumOperands() - 2; 4363 return getNumOperands() - 1; 4364 } 4365 4366 private: 4367 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } 4368 static const BasicBlock *handler_helper(const Value *V) { 4369 return cast<BasicBlock>(V); 4370 } 4371 4372 public: 4373 using DerefFnTy = BasicBlock *(*)(Value *); 4374 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; 4375 using handler_range = iterator_range<handler_iterator>; 4376 using ConstDerefFnTy = const BasicBlock *(*)(const Value *); 4377 using const_handler_iterator = 4378 mapped_iterator<const_op_iterator, ConstDerefFnTy>; 4379 using const_handler_range = iterator_range<const_handler_iterator>; 4380 4381 /// Returns an iterator that points to the first handler in CatchSwitchInst. 4382 handler_iterator handler_begin() { 4383 op_iterator It = op_begin() + 1; 4384 if (hasUnwindDest()) 4385 ++It; 4386 return handler_iterator(It, DerefFnTy(handler_helper)); 4387 } 4388 4389 /// Returns an iterator that points to the first handler in the 4390 /// CatchSwitchInst. 4391 const_handler_iterator handler_begin() const { 4392 const_op_iterator It = op_begin() + 1; 4393 if (hasUnwindDest()) 4394 ++It; 4395 return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); 4396 } 4397 4398 /// Returns a read-only iterator that points one past the last 4399 /// handler in the CatchSwitchInst. 4400 handler_iterator handler_end() { 4401 return handler_iterator(op_end(), DerefFnTy(handler_helper)); 4402 } 4403 4404 /// Returns an iterator that points one past the last handler in the 4405 /// CatchSwitchInst. 4406 const_handler_iterator handler_end() const { 4407 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); 4408 } 4409 4410 /// iteration adapter for range-for loops. 4411 handler_range handlers() { 4412 return make_range(handler_begin(), handler_end()); 4413 } 4414 4415 /// iteration adapter for range-for loops. 4416 const_handler_range handlers() const { 4417 return make_range(handler_begin(), handler_end()); 4418 } 4419 4420 /// Add an entry to the switch instruction... 4421 /// Note: 4422 /// This action invalidates handler_end(). Old handler_end() iterator will 4423 /// point to the added handler. 4424 void addHandler(BasicBlock *Dest); 4425 4426 void removeHandler(handler_iterator HI); 4427 4428 unsigned getNumSuccessors() const { return getNumOperands() - 1; } 4429 BasicBlock *getSuccessor(unsigned Idx) const { 4430 assert(Idx < getNumSuccessors() && 4431 "Successor # out of range for catchswitch!"); 4432 return cast<BasicBlock>(getOperand(Idx + 1)); 4433 } 4434 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { 4435 assert(Idx < getNumSuccessors() && 4436 "Successor # out of range for catchswitch!"); 4437 setOperand(Idx + 1, NewSucc); 4438 } 4439 4440 // Methods for support type inquiry through isa, cast, and dyn_cast: 4441 static bool classof(const Instruction *I) { 4442 return I->getOpcode() == Instruction::CatchSwitch; 4443 } 4444 static bool classof(const Value *V) { 4445 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4446 } 4447 }; 4448 4449 template <> 4450 struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; 4451 4452 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value) 4453 4454 //===----------------------------------------------------------------------===// 4455 // CleanupPadInst Class 4456 //===----------------------------------------------------------------------===// 4457 class CleanupPadInst : public FuncletPadInst { 4458 private: 4459 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4460 unsigned Values, const Twine &NameStr, 4461 Instruction *InsertBefore) 4462 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4463 NameStr, InsertBefore) {} 4464 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4465 unsigned Values, const Twine &NameStr, 4466 BasicBlock *InsertAtEnd) 4467 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4468 NameStr, InsertAtEnd) {} 4469 4470 public: 4471 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, 4472 const Twine &NameStr = "", 4473 Instruction *InsertBefore = nullptr) { 4474 unsigned Values = 1 + Args.size(); 4475 return new (Values) 4476 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); 4477 } 4478 4479 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, 4480 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4481 unsigned Values = 1 + Args.size(); 4482 return new (Values) 4483 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); 4484 } 4485 4486 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4487 static bool classof(const Instruction *I) { 4488 return I->getOpcode() == Instruction::CleanupPad; 4489 } 4490 static bool classof(const Value *V) { 4491 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4492 } 4493 }; 4494 4495 //===----------------------------------------------------------------------===// 4496 // CatchPadInst Class 4497 //===----------------------------------------------------------------------===// 4498 class CatchPadInst : public FuncletPadInst { 4499 private: 4500 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4501 unsigned Values, const Twine &NameStr, 4502 Instruction *InsertBefore) 4503 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4504 NameStr, InsertBefore) {} 4505 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4506 unsigned Values, const Twine &NameStr, 4507 BasicBlock *InsertAtEnd) 4508 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4509 NameStr, InsertAtEnd) {} 4510 4511 public: 4512 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4513 const Twine &NameStr = "", 4514 Instruction *InsertBefore = nullptr) { 4515 unsigned Values = 1 + Args.size(); 4516 return new (Values) 4517 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); 4518 } 4519 4520 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4521 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4522 unsigned Values = 1 + Args.size(); 4523 return new (Values) 4524 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); 4525 } 4526 4527 /// Convenience accessors 4528 CatchSwitchInst *getCatchSwitch() const { 4529 return cast<CatchSwitchInst>(Op<-1>()); 4530 } 4531 void setCatchSwitch(Value *CatchSwitch) { 4532 assert(CatchSwitch); 4533 Op<-1>() = CatchSwitch; 4534 } 4535 4536 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4537 static bool classof(const Instruction *I) { 4538 return I->getOpcode() == Instruction::CatchPad; 4539 } 4540 static bool classof(const Value *V) { 4541 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4542 } 4543 }; 4544 4545 //===----------------------------------------------------------------------===// 4546 // CatchReturnInst Class 4547 //===----------------------------------------------------------------------===// 4548 4549 class CatchReturnInst : public Instruction { 4550 CatchReturnInst(const CatchReturnInst &RI); 4551 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); 4552 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); 4553 4554 void init(Value *CatchPad, BasicBlock *BB); 4555 4556 protected: 4557 // Note: Instruction needs to be a friend here to call cloneImpl. 4558 friend class Instruction; 4559 4560 CatchReturnInst *cloneImpl() const; 4561 4562 public: 4563 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4564 Instruction *InsertBefore = nullptr) { 4565 assert(CatchPad); 4566 assert(BB); 4567 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); 4568 } 4569 4570 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4571 BasicBlock *InsertAtEnd) { 4572 assert(CatchPad); 4573 assert(BB); 4574 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); 4575 } 4576 4577 /// Provide fast operand accessors 4578 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4579 4580 /// Convenience accessors. 4581 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } 4582 void setCatchPad(CatchPadInst *CatchPad) { 4583 assert(CatchPad); 4584 Op<0>() = CatchPad; 4585 } 4586 4587 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } 4588 void setSuccessor(BasicBlock *NewSucc) { 4589 assert(NewSucc); 4590 Op<1>() = NewSucc; 4591 } 4592 unsigned getNumSuccessors() const { return 1; } 4593 4594 /// Get the parentPad of this catchret's catchpad's catchswitch. 4595 /// The successor block is implicitly a member of this funclet. 4596 Value *getCatchSwitchParentPad() const { 4597 return getCatchPad()->getCatchSwitch()->getParentPad(); 4598 } 4599 4600 // Methods for support type inquiry through isa, cast, and dyn_cast: 4601 static bool classof(const Instruction *I) { 4602 return (I->getOpcode() == Instruction::CatchRet); 4603 } 4604 static bool classof(const Value *V) { 4605 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4606 } 4607 4608 private: 4609 BasicBlock *getSuccessor(unsigned Idx) const { 4610 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4611 return getSuccessor(); 4612 } 4613 4614 void setSuccessor(unsigned Idx, BasicBlock *B) { 4615 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4616 setSuccessor(B); 4617 } 4618 }; 4619 4620 template <> 4621 struct OperandTraits<CatchReturnInst> 4622 : public FixedNumOperandTraits<CatchReturnInst, 2> {}; 4623 4624 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value) 4625 4626 //===----------------------------------------------------------------------===// 4627 // CleanupReturnInst Class 4628 //===----------------------------------------------------------------------===// 4629 4630 class CleanupReturnInst : public Instruction { 4631 using UnwindDestField = BoolBitfieldElementT<0>; 4632 4633 private: 4634 CleanupReturnInst(const CleanupReturnInst &RI); 4635 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4636 Instruction *InsertBefore = nullptr); 4637 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4638 BasicBlock *InsertAtEnd); 4639 4640 void init(Value *CleanupPad, BasicBlock *UnwindBB); 4641 4642 protected: 4643 // Note: Instruction needs to be a friend here to call cloneImpl. 4644 friend class Instruction; 4645 4646 CleanupReturnInst *cloneImpl() const; 4647 4648 public: 4649 static CleanupReturnInst *Create(Value *CleanupPad, 4650 BasicBlock *UnwindBB = nullptr, 4651 Instruction *InsertBefore = nullptr) { 4652 assert(CleanupPad); 4653 unsigned Values = 1; 4654 if (UnwindBB) 4655 ++Values; 4656 return new (Values) 4657 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); 4658 } 4659 4660 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, 4661 BasicBlock *InsertAtEnd) { 4662 assert(CleanupPad); 4663 unsigned Values = 1; 4664 if (UnwindBB) 4665 ++Values; 4666 return new (Values) 4667 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); 4668 } 4669 4670 /// Provide fast operand accessors 4671 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4672 4673 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4674 bool unwindsToCaller() const { return !hasUnwindDest(); } 4675 4676 /// Convenience accessor. 4677 CleanupPadInst *getCleanupPad() const { 4678 return cast<CleanupPadInst>(Op<0>()); 4679 } 4680 void setCleanupPad(CleanupPadInst *CleanupPad) { 4681 assert(CleanupPad); 4682 Op<0>() = CleanupPad; 4683 } 4684 4685 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } 4686 4687 BasicBlock *getUnwindDest() const { 4688 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; 4689 } 4690 void setUnwindDest(BasicBlock *NewDest) { 4691 assert(NewDest); 4692 assert(hasUnwindDest()); 4693 Op<1>() = NewDest; 4694 } 4695 4696 // Methods for support type inquiry through isa, cast, and dyn_cast: 4697 static bool classof(const Instruction *I) { 4698 return (I->getOpcode() == Instruction::CleanupRet); 4699 } 4700 static bool classof(const Value *V) { 4701 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4702 } 4703 4704 private: 4705 BasicBlock *getSuccessor(unsigned Idx) const { 4706 assert(Idx == 0); 4707 return getUnwindDest(); 4708 } 4709 4710 void setSuccessor(unsigned Idx, BasicBlock *B) { 4711 assert(Idx == 0); 4712 setUnwindDest(B); 4713 } 4714 4715 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4716 // method so that subclasses cannot accidentally use it. 4717 template <typename Bitfield> 4718 void setSubclassData(typename Bitfield::Type Value) { 4719 Instruction::setSubclassData<Bitfield>(Value); 4720 } 4721 }; 4722 4723 template <> 4724 struct OperandTraits<CleanupReturnInst> 4725 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; 4726 4727 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value) 4728 4729 //===----------------------------------------------------------------------===// 4730 // UnreachableInst Class 4731 //===----------------------------------------------------------------------===// 4732 4733 //===--------------------------------------------------------------------------- 4734 /// This function has undefined behavior. In particular, the 4735 /// presence of this instruction indicates some higher level knowledge that the 4736 /// end of the block cannot be reached. 4737 /// 4738 class UnreachableInst : public Instruction { 4739 protected: 4740 // Note: Instruction needs to be a friend here to call cloneImpl. 4741 friend class Instruction; 4742 4743 UnreachableInst *cloneImpl() const; 4744 4745 public: 4746 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); 4747 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); 4748 4749 // allocate space for exactly zero operands 4750 void *operator new(size_t S) { return User::operator new(S, 0); } 4751 void operator delete(void *Ptr) { User::operator delete(Ptr); } 4752 4753 unsigned getNumSuccessors() const { return 0; } 4754 4755 // Methods for support type inquiry through isa, cast, and dyn_cast: 4756 static bool classof(const Instruction *I) { 4757 return I->getOpcode() == Instruction::Unreachable; 4758 } 4759 static bool classof(const Value *V) { 4760 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4761 } 4762 4763 private: 4764 BasicBlock *getSuccessor(unsigned idx) const { 4765 llvm_unreachable("UnreachableInst has no successors!"); 4766 } 4767 4768 void setSuccessor(unsigned idx, BasicBlock *B) { 4769 llvm_unreachable("UnreachableInst has no successors!"); 4770 } 4771 }; 4772 4773 //===----------------------------------------------------------------------===// 4774 // TruncInst Class 4775 //===----------------------------------------------------------------------===// 4776 4777 /// This class represents a truncation of integer types. 4778 class TruncInst : public CastInst { 4779 protected: 4780 // Note: Instruction needs to be a friend here to call cloneImpl. 4781 friend class Instruction; 4782 4783 /// Clone an identical TruncInst 4784 TruncInst *cloneImpl() const; 4785 4786 public: 4787 /// Constructor with insert-before-instruction semantics 4788 TruncInst( 4789 Value *S, ///< The value to be truncated 4790 Type *Ty, ///< The (smaller) type to truncate to 4791 const Twine &NameStr = "", ///< A name for the new instruction 4792 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4793 ); 4794 4795 /// Constructor with insert-at-end-of-block semantics 4796 TruncInst( 4797 Value *S, ///< The value to be truncated 4798 Type *Ty, ///< The (smaller) type to truncate to 4799 const Twine &NameStr, ///< A name for the new instruction 4800 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4801 ); 4802 4803 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4804 static bool classof(const Instruction *I) { 4805 return I->getOpcode() == Trunc; 4806 } 4807 static bool classof(const Value *V) { 4808 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4809 } 4810 }; 4811 4812 //===----------------------------------------------------------------------===// 4813 // ZExtInst Class 4814 //===----------------------------------------------------------------------===// 4815 4816 /// This class represents zero extension of integer types. 4817 class ZExtInst : public CastInst { 4818 protected: 4819 // Note: Instruction needs to be a friend here to call cloneImpl. 4820 friend class Instruction; 4821 4822 /// Clone an identical ZExtInst 4823 ZExtInst *cloneImpl() const; 4824 4825 public: 4826 /// Constructor with insert-before-instruction semantics 4827 ZExtInst( 4828 Value *S, ///< The value to be zero extended 4829 Type *Ty, ///< The type to zero extend to 4830 const Twine &NameStr = "", ///< A name for the new instruction 4831 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4832 ); 4833 4834 /// Constructor with insert-at-end semantics. 4835 ZExtInst( 4836 Value *S, ///< The value to be zero extended 4837 Type *Ty, ///< The type to zero extend to 4838 const Twine &NameStr, ///< A name for the new instruction 4839 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4840 ); 4841 4842 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4843 static bool classof(const Instruction *I) { 4844 return I->getOpcode() == ZExt; 4845 } 4846 static bool classof(const Value *V) { 4847 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4848 } 4849 }; 4850 4851 //===----------------------------------------------------------------------===// 4852 // SExtInst Class 4853 //===----------------------------------------------------------------------===// 4854 4855 /// This class represents a sign extension of integer types. 4856 class SExtInst : public CastInst { 4857 protected: 4858 // Note: Instruction needs to be a friend here to call cloneImpl. 4859 friend class Instruction; 4860 4861 /// Clone an identical SExtInst 4862 SExtInst *cloneImpl() const; 4863 4864 public: 4865 /// Constructor with insert-before-instruction semantics 4866 SExtInst( 4867 Value *S, ///< The value to be sign extended 4868 Type *Ty, ///< The type to sign extend to 4869 const Twine &NameStr = "", ///< A name for the new instruction 4870 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4871 ); 4872 4873 /// Constructor with insert-at-end-of-block semantics 4874 SExtInst( 4875 Value *S, ///< The value to be sign extended 4876 Type *Ty, ///< The type to sign extend to 4877 const Twine &NameStr, ///< A name for the new instruction 4878 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4879 ); 4880 4881 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4882 static bool classof(const Instruction *I) { 4883 return I->getOpcode() == SExt; 4884 } 4885 static bool classof(const Value *V) { 4886 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4887 } 4888 }; 4889 4890 //===----------------------------------------------------------------------===// 4891 // FPTruncInst Class 4892 //===----------------------------------------------------------------------===// 4893 4894 /// This class represents a truncation of floating point types. 4895 class FPTruncInst : public CastInst { 4896 protected: 4897 // Note: Instruction needs to be a friend here to call cloneImpl. 4898 friend class Instruction; 4899 4900 /// Clone an identical FPTruncInst 4901 FPTruncInst *cloneImpl() const; 4902 4903 public: 4904 /// Constructor with insert-before-instruction semantics 4905 FPTruncInst( 4906 Value *S, ///< The value to be truncated 4907 Type *Ty, ///< The type to truncate to 4908 const Twine &NameStr = "", ///< A name for the new instruction 4909 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4910 ); 4911 4912 /// Constructor with insert-before-instruction semantics 4913 FPTruncInst( 4914 Value *S, ///< The value to be truncated 4915 Type *Ty, ///< The type to truncate to 4916 const Twine &NameStr, ///< A name for the new instruction 4917 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4918 ); 4919 4920 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4921 static bool classof(const Instruction *I) { 4922 return I->getOpcode() == FPTrunc; 4923 } 4924 static bool classof(const Value *V) { 4925 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4926 } 4927 }; 4928 4929 //===----------------------------------------------------------------------===// 4930 // FPExtInst Class 4931 //===----------------------------------------------------------------------===// 4932 4933 /// This class represents an extension of floating point types. 4934 class FPExtInst : public CastInst { 4935 protected: 4936 // Note: Instruction needs to be a friend here to call cloneImpl. 4937 friend class Instruction; 4938 4939 /// Clone an identical FPExtInst 4940 FPExtInst *cloneImpl() const; 4941 4942 public: 4943 /// Constructor with insert-before-instruction semantics 4944 FPExtInst( 4945 Value *S, ///< The value to be extended 4946 Type *Ty, ///< The type to extend to 4947 const Twine &NameStr = "", ///< A name for the new instruction 4948 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4949 ); 4950 4951 /// Constructor with insert-at-end-of-block semantics 4952 FPExtInst( 4953 Value *S, ///< The value to be extended 4954 Type *Ty, ///< The type to extend to 4955 const Twine &NameStr, ///< A name for the new instruction 4956 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4957 ); 4958 4959 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4960 static bool classof(const Instruction *I) { 4961 return I->getOpcode() == FPExt; 4962 } 4963 static bool classof(const Value *V) { 4964 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4965 } 4966 }; 4967 4968 //===----------------------------------------------------------------------===// 4969 // UIToFPInst Class 4970 //===----------------------------------------------------------------------===// 4971 4972 /// This class represents a cast unsigned integer to floating point. 4973 class UIToFPInst : public CastInst { 4974 protected: 4975 // Note: Instruction needs to be a friend here to call cloneImpl. 4976 friend class Instruction; 4977 4978 /// Clone an identical UIToFPInst 4979 UIToFPInst *cloneImpl() const; 4980 4981 public: 4982 /// Constructor with insert-before-instruction semantics 4983 UIToFPInst( 4984 Value *S, ///< The value to be converted 4985 Type *Ty, ///< The type to convert to 4986 const Twine &NameStr = "", ///< A name for the new instruction 4987 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4988 ); 4989 4990 /// Constructor with insert-at-end-of-block semantics 4991 UIToFPInst( 4992 Value *S, ///< The value to be converted 4993 Type *Ty, ///< The type to convert to 4994 const Twine &NameStr, ///< A name for the new instruction 4995 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4996 ); 4997 4998 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4999 static bool classof(const Instruction *I) { 5000 return I->getOpcode() == UIToFP; 5001 } 5002 static bool classof(const Value *V) { 5003 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5004 } 5005 }; 5006 5007 //===----------------------------------------------------------------------===// 5008 // SIToFPInst Class 5009 //===----------------------------------------------------------------------===// 5010 5011 /// This class represents a cast from signed integer to floating point. 5012 class SIToFPInst : public CastInst { 5013 protected: 5014 // Note: Instruction needs to be a friend here to call cloneImpl. 5015 friend class Instruction; 5016 5017 /// Clone an identical SIToFPInst 5018 SIToFPInst *cloneImpl() const; 5019 5020 public: 5021 /// Constructor with insert-before-instruction semantics 5022 SIToFPInst( 5023 Value *S, ///< The value to be converted 5024 Type *Ty, ///< The type to convert to 5025 const Twine &NameStr = "", ///< A name for the new instruction 5026 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5027 ); 5028 5029 /// Constructor with insert-at-end-of-block semantics 5030 SIToFPInst( 5031 Value *S, ///< The value to be converted 5032 Type *Ty, ///< The type to convert to 5033 const Twine &NameStr, ///< A name for the new instruction 5034 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5035 ); 5036 5037 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5038 static bool classof(const Instruction *I) { 5039 return I->getOpcode() == SIToFP; 5040 } 5041 static bool classof(const Value *V) { 5042 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5043 } 5044 }; 5045 5046 //===----------------------------------------------------------------------===// 5047 // FPToUIInst Class 5048 //===----------------------------------------------------------------------===// 5049 5050 /// This class represents a cast from floating point to unsigned integer 5051 class FPToUIInst : public CastInst { 5052 protected: 5053 // Note: Instruction needs to be a friend here to call cloneImpl. 5054 friend class Instruction; 5055 5056 /// Clone an identical FPToUIInst 5057 FPToUIInst *cloneImpl() const; 5058 5059 public: 5060 /// Constructor with insert-before-instruction semantics 5061 FPToUIInst( 5062 Value *S, ///< The value to be converted 5063 Type *Ty, ///< The type to convert to 5064 const Twine &NameStr = "", ///< A name for the new instruction 5065 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5066 ); 5067 5068 /// Constructor with insert-at-end-of-block semantics 5069 FPToUIInst( 5070 Value *S, ///< The value to be converted 5071 Type *Ty, ///< The type to convert to 5072 const Twine &NameStr, ///< A name for the new instruction 5073 BasicBlock *InsertAtEnd ///< Where to insert the new instruction 5074 ); 5075 5076 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5077 static bool classof(const Instruction *I) { 5078 return I->getOpcode() == FPToUI; 5079 } 5080 static bool classof(const Value *V) { 5081 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5082 } 5083 }; 5084 5085 //===----------------------------------------------------------------------===// 5086 // FPToSIInst Class 5087 //===----------------------------------------------------------------------===// 5088 5089 /// This class represents a cast from floating point to signed integer. 5090 class FPToSIInst : public CastInst { 5091 protected: 5092 // Note: Instruction needs to be a friend here to call cloneImpl. 5093 friend class Instruction; 5094 5095 /// Clone an identical FPToSIInst 5096 FPToSIInst *cloneImpl() const; 5097 5098 public: 5099 /// Constructor with insert-before-instruction semantics 5100 FPToSIInst( 5101 Value *S, ///< The value to be converted 5102 Type *Ty, ///< The type to convert to 5103 const Twine &NameStr = "", ///< A name for the new instruction 5104 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5105 ); 5106 5107 /// Constructor with insert-at-end-of-block semantics 5108 FPToSIInst( 5109 Value *S, ///< The value to be converted 5110 Type *Ty, ///< The type to convert to 5111 const Twine &NameStr, ///< A name for the new instruction 5112 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5113 ); 5114 5115 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5116 static bool classof(const Instruction *I) { 5117 return I->getOpcode() == FPToSI; 5118 } 5119 static bool classof(const Value *V) { 5120 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5121 } 5122 }; 5123 5124 //===----------------------------------------------------------------------===// 5125 // IntToPtrInst Class 5126 //===----------------------------------------------------------------------===// 5127 5128 /// This class represents a cast from an integer to a pointer. 5129 class IntToPtrInst : public CastInst { 5130 public: 5131 // Note: Instruction needs to be a friend here to call cloneImpl. 5132 friend class Instruction; 5133 5134 /// Constructor with insert-before-instruction semantics 5135 IntToPtrInst( 5136 Value *S, ///< The value to be converted 5137 Type *Ty, ///< The type to convert to 5138 const Twine &NameStr = "", ///< A name for the new instruction 5139 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5140 ); 5141 5142 /// Constructor with insert-at-end-of-block semantics 5143 IntToPtrInst( 5144 Value *S, ///< The value to be converted 5145 Type *Ty, ///< The type to convert to 5146 const Twine &NameStr, ///< A name for the new instruction 5147 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5148 ); 5149 5150 /// Clone an identical IntToPtrInst. 5151 IntToPtrInst *cloneImpl() const; 5152 5153 /// Returns the address space of this instruction's pointer type. 5154 unsigned getAddressSpace() const { 5155 return getType()->getPointerAddressSpace(); 5156 } 5157 5158 // Methods for support type inquiry through isa, cast, and dyn_cast: 5159 static bool classof(const Instruction *I) { 5160 return I->getOpcode() == IntToPtr; 5161 } 5162 static bool classof(const Value *V) { 5163 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5164 } 5165 }; 5166 5167 //===----------------------------------------------------------------------===// 5168 // PtrToIntInst Class 5169 //===----------------------------------------------------------------------===// 5170 5171 /// This class represents a cast from a pointer to an integer. 5172 class PtrToIntInst : public CastInst { 5173 protected: 5174 // Note: Instruction needs to be a friend here to call cloneImpl. 5175 friend class Instruction; 5176 5177 /// Clone an identical PtrToIntInst. 5178 PtrToIntInst *cloneImpl() const; 5179 5180 public: 5181 /// Constructor with insert-before-instruction semantics 5182 PtrToIntInst( 5183 Value *S, ///< The value to be converted 5184 Type *Ty, ///< The type to convert to 5185 const Twine &NameStr = "", ///< A name for the new instruction 5186 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5187 ); 5188 5189 /// Constructor with insert-at-end-of-block semantics 5190 PtrToIntInst( 5191 Value *S, ///< The value to be converted 5192 Type *Ty, ///< The type to convert to 5193 const Twine &NameStr, ///< A name for the new instruction 5194 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5195 ); 5196 5197 /// Gets the pointer operand. 5198 Value *getPointerOperand() { return getOperand(0); } 5199 /// Gets the pointer operand. 5200 const Value *getPointerOperand() const { return getOperand(0); } 5201 /// Gets the operand index of the pointer operand. 5202 static unsigned getPointerOperandIndex() { return 0U; } 5203 5204 /// Returns the address space of the pointer operand. 5205 unsigned getPointerAddressSpace() const { 5206 return getPointerOperand()->getType()->getPointerAddressSpace(); 5207 } 5208 5209 // Methods for support type inquiry through isa, cast, and dyn_cast: 5210 static bool classof(const Instruction *I) { 5211 return I->getOpcode() == PtrToInt; 5212 } 5213 static bool classof(const Value *V) { 5214 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5215 } 5216 }; 5217 5218 //===----------------------------------------------------------------------===// 5219 // BitCastInst Class 5220 //===----------------------------------------------------------------------===// 5221 5222 /// This class represents a no-op cast from one type to another. 5223 class BitCastInst : public CastInst { 5224 protected: 5225 // Note: Instruction needs to be a friend here to call cloneImpl. 5226 friend class Instruction; 5227 5228 /// Clone an identical BitCastInst. 5229 BitCastInst *cloneImpl() const; 5230 5231 public: 5232 /// Constructor with insert-before-instruction semantics 5233 BitCastInst( 5234 Value *S, ///< The value to be casted 5235 Type *Ty, ///< The type to casted to 5236 const Twine &NameStr = "", ///< A name for the new instruction 5237 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5238 ); 5239 5240 /// Constructor with insert-at-end-of-block semantics 5241 BitCastInst( 5242 Value *S, ///< The value to be casted 5243 Type *Ty, ///< The type to casted to 5244 const Twine &NameStr, ///< A name for the new instruction 5245 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5246 ); 5247 5248 // Methods for support type inquiry through isa, cast, and dyn_cast: 5249 static bool classof(const Instruction *I) { 5250 return I->getOpcode() == BitCast; 5251 } 5252 static bool classof(const Value *V) { 5253 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5254 } 5255 }; 5256 5257 //===----------------------------------------------------------------------===// 5258 // AddrSpaceCastInst Class 5259 //===----------------------------------------------------------------------===// 5260 5261 /// This class represents a conversion between pointers from one address space 5262 /// to another. 5263 class AddrSpaceCastInst : public CastInst { 5264 protected: 5265 // Note: Instruction needs to be a friend here to call cloneImpl. 5266 friend class Instruction; 5267 5268 /// Clone an identical AddrSpaceCastInst. 5269 AddrSpaceCastInst *cloneImpl() const; 5270 5271 public: 5272 /// Constructor with insert-before-instruction semantics 5273 AddrSpaceCastInst( 5274 Value *S, ///< The value to be casted 5275 Type *Ty, ///< The type to casted to 5276 const Twine &NameStr = "", ///< A name for the new instruction 5277 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5278 ); 5279 5280 /// Constructor with insert-at-end-of-block semantics 5281 AddrSpaceCastInst( 5282 Value *S, ///< The value to be casted 5283 Type *Ty, ///< The type to casted to 5284 const Twine &NameStr, ///< A name for the new instruction 5285 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5286 ); 5287 5288 // Methods for support type inquiry through isa, cast, and dyn_cast: 5289 static bool classof(const Instruction *I) { 5290 return I->getOpcode() == AddrSpaceCast; 5291 } 5292 static bool classof(const Value *V) { 5293 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5294 } 5295 5296 /// Gets the pointer operand. 5297 Value *getPointerOperand() { 5298 return getOperand(0); 5299 } 5300 5301 /// Gets the pointer operand. 5302 const Value *getPointerOperand() const { 5303 return getOperand(0); 5304 } 5305 5306 /// Gets the operand index of the pointer operand. 5307 static unsigned getPointerOperandIndex() { 5308 return 0U; 5309 } 5310 5311 /// Returns the address space of the pointer operand. 5312 unsigned getSrcAddressSpace() const { 5313 return getPointerOperand()->getType()->getPointerAddressSpace(); 5314 } 5315 5316 /// Returns the address space of the result. 5317 unsigned getDestAddressSpace() const { 5318 return getType()->getPointerAddressSpace(); 5319 } 5320 }; 5321 5322 //===----------------------------------------------------------------------===// 5323 // Helper functions 5324 //===----------------------------------------------------------------------===// 5325 5326 /// A helper function that returns the pointer operand of a load or store 5327 /// instruction. Returns nullptr if not load or store. 5328 inline const Value *getLoadStorePointerOperand(const Value *V) { 5329 if (auto *Load = dyn_cast<LoadInst>(V)) 5330 return Load->getPointerOperand(); 5331 if (auto *Store = dyn_cast<StoreInst>(V)) 5332 return Store->getPointerOperand(); 5333 return nullptr; 5334 } 5335 inline Value *getLoadStorePointerOperand(Value *V) { 5336 return const_cast<Value *>( 5337 getLoadStorePointerOperand(static_cast<const Value *>(V))); 5338 } 5339 5340 /// A helper function that returns the pointer operand of a load, store 5341 /// or GEP instruction. Returns nullptr if not load, store, or GEP. 5342 inline const Value *getPointerOperand(const Value *V) { 5343 if (auto *Ptr = getLoadStorePointerOperand(V)) 5344 return Ptr; 5345 if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) 5346 return Gep->getPointerOperand(); 5347 return nullptr; 5348 } 5349 inline Value *getPointerOperand(Value *V) { 5350 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); 5351 } 5352 5353 /// A helper function that returns the alignment of load or store instruction. 5354 inline Align getLoadStoreAlignment(Value *I) { 5355 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5356 "Expected Load or Store instruction"); 5357 if (auto *LI = dyn_cast<LoadInst>(I)) 5358 return LI->getAlign(); 5359 return cast<StoreInst>(I)->getAlign(); 5360 } 5361 5362 /// A helper function that returns the address space of the pointer operand of 5363 /// load or store instruction. 5364 inline unsigned getLoadStoreAddressSpace(Value *I) { 5365 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5366 "Expected Load or Store instruction"); 5367 if (auto *LI = dyn_cast<LoadInst>(I)) 5368 return LI->getPointerAddressSpace(); 5369 return cast<StoreInst>(I)->getPointerAddressSpace(); 5370 } 5371 5372 /// A helper function that returns the type of a load or store instruction. 5373 inline Type *getLoadStoreType(Value *I) { 5374 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5375 "Expected Load or Store instruction"); 5376 if (auto *LI = dyn_cast<LoadInst>(I)) 5377 return LI->getType(); 5378 return cast<StoreInst>(I)->getValueOperand()->getType(); 5379 } 5380 5381 /// A helper function that returns an atomic operation's sync scope; returns 5382 /// None if it is not an atomic operation. 5383 inline Optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) { 5384 if (!I->isAtomic()) 5385 return None; 5386 if (auto *AI = dyn_cast<LoadInst>(I)) 5387 return AI->getSyncScopeID(); 5388 if (auto *AI = dyn_cast<StoreInst>(I)) 5389 return AI->getSyncScopeID(); 5390 if (auto *AI = dyn_cast<FenceInst>(I)) 5391 return AI->getSyncScopeID(); 5392 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) 5393 return AI->getSyncScopeID(); 5394 if (auto *AI = dyn_cast<AtomicRMWInst>(I)) 5395 return AI->getSyncScopeID(); 5396 llvm_unreachable("unhandled atomic operation"); 5397 } 5398 5399 //===----------------------------------------------------------------------===// 5400 // FreezeInst Class 5401 //===----------------------------------------------------------------------===// 5402 5403 /// This class represents a freeze function that returns random concrete 5404 /// value if an operand is either a poison value or an undef value 5405 class FreezeInst : public UnaryInstruction { 5406 protected: 5407 // Note: Instruction needs to be a friend here to call cloneImpl. 5408 friend class Instruction; 5409 5410 /// Clone an identical FreezeInst 5411 FreezeInst *cloneImpl() const; 5412 5413 public: 5414 explicit FreezeInst(Value *S, 5415 const Twine &NameStr = "", 5416 Instruction *InsertBefore = nullptr); 5417 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); 5418 5419 // Methods for support type inquiry through isa, cast, and dyn_cast: 5420 static inline bool classof(const Instruction *I) { 5421 return I->getOpcode() == Freeze; 5422 } 5423 static inline bool classof(const Value *V) { 5424 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5425 } 5426 }; 5427 5428 } // end namespace llvm 5429 5430 #endif // LLVM_IR_INSTRUCTIONS_H 5431