1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file exposes the class definitions of all of the subclasses of the 10 // Instruction class. This is meant to be an easy way to get access to all 11 // instruction subclasses. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_IR_INSTRUCTIONS_H 16 #define LLVM_IR_INSTRUCTIONS_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/Bitfields.h" 20 #include "llvm/ADT/MapVector.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/StringRef.h" 25 #include "llvm/ADT/Twine.h" 26 #include "llvm/ADT/iterator.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/IR/Attributes.h" 29 #include "llvm/IR/BasicBlock.h" 30 #include "llvm/IR/CFG.h" 31 #include "llvm/IR/Constant.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/InstrTypes.h" 34 #include "llvm/IR/Instruction.h" 35 #include "llvm/IR/OperandTraits.h" 36 #include "llvm/IR/Type.h" 37 #include "llvm/IR/Use.h" 38 #include "llvm/IR/User.h" 39 #include "llvm/IR/Value.h" 40 #include "llvm/Support/AtomicOrdering.h" 41 #include "llvm/Support/Casting.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include <cassert> 44 #include <cstddef> 45 #include <cstdint> 46 #include <iterator> 47 48 namespace llvm { 49 50 class APInt; 51 class ConstantInt; 52 class DataLayout; 53 54 //===----------------------------------------------------------------------===// 55 // AllocaInst Class 56 //===----------------------------------------------------------------------===// 57 58 /// an instruction to allocate memory on the stack 59 class AllocaInst : public UnaryInstruction { 60 Type *AllocatedType; 61 62 using AlignmentField = AlignmentBitfieldElementT<0>; 63 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; 64 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; 65 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, 66 SwiftErrorField>(), 67 "Bitfields must be contiguous"); 68 69 protected: 70 // Note: Instruction needs to be a friend here to call cloneImpl. 71 friend class Instruction; 72 73 AllocaInst *cloneImpl() const; 74 75 public: 76 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 77 const Twine &Name, Instruction *InsertBefore); 78 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 79 const Twine &Name, BasicBlock *InsertAtEnd); 80 81 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 82 Instruction *InsertBefore); 83 AllocaInst(Type *Ty, unsigned AddrSpace, 84 const Twine &Name, BasicBlock *InsertAtEnd); 85 86 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 87 const Twine &Name = "", Instruction *InsertBefore = nullptr); 88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 89 const Twine &Name, BasicBlock *InsertAtEnd); 90 91 /// Return true if there is an allocation size parameter to the allocation 92 /// instruction that is not 1. 93 bool isArrayAllocation() const; 94 95 /// Get the number of elements allocated. For a simple allocation of a single 96 /// element, this will return a constant 1 value. 97 const Value *getArraySize() const { return getOperand(0); } 98 Value *getArraySize() { return getOperand(0); } 99 100 /// Overload to return most specific pointer type. 101 PointerType *getType() const { 102 return cast<PointerType>(Instruction::getType()); 103 } 104 105 /// Return the address space for the allocation. 106 unsigned getAddressSpace() const { 107 return getType()->getAddressSpace(); 108 } 109 110 /// Get allocation size in bits. Returns None if size can't be determined, 111 /// e.g. in case of a VLA. 112 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; 113 114 /// Return the type that is being allocated by the instruction. 115 Type *getAllocatedType() const { return AllocatedType; } 116 /// for use only in special circumstances that need to generically 117 /// transform a whole instruction (eg: IR linking and vectorization). 118 void setAllocatedType(Type *Ty) { AllocatedType = Ty; } 119 120 /// Return the alignment of the memory that is being allocated by the 121 /// instruction. 122 Align getAlign() const { 123 return Align(1ULL << getSubclassData<AlignmentField>()); 124 } 125 126 void setAlignment(Align Align) { 127 setSubclassData<AlignmentField>(Log2(Align)); 128 } 129 130 // FIXME: Remove this one transition to Align is over. 131 uint64_t getAlignment() const { return getAlign().value(); } 132 133 /// Return true if this alloca is in the entry block of the function and is a 134 /// constant size. If so, the code generator will fold it into the 135 /// prolog/epilog code, so it is basically free. 136 bool isStaticAlloca() const; 137 138 /// Return true if this alloca is used as an inalloca argument to a call. Such 139 /// allocas are never considered static even if they are in the entry block. 140 bool isUsedWithInAlloca() const { 141 return getSubclassData<UsedWithInAllocaField>(); 142 } 143 144 /// Specify whether this alloca is used to represent the arguments to a call. 145 void setUsedWithInAlloca(bool V) { 146 setSubclassData<UsedWithInAllocaField>(V); 147 } 148 149 /// Return true if this alloca is used as a swifterror argument to a call. 150 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } 151 /// Specify whether this alloca is used to represent a swifterror. 152 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } 153 154 // Methods for support type inquiry through isa, cast, and dyn_cast: 155 static bool classof(const Instruction *I) { 156 return (I->getOpcode() == Instruction::Alloca); 157 } 158 static bool classof(const Value *V) { 159 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 160 } 161 162 private: 163 // Shadow Instruction::setInstructionSubclassData with a private forwarding 164 // method so that subclasses cannot accidentally use it. 165 template <typename Bitfield> 166 void setSubclassData(typename Bitfield::Type Value) { 167 Instruction::setSubclassData<Bitfield>(Value); 168 } 169 }; 170 171 //===----------------------------------------------------------------------===// 172 // LoadInst Class 173 //===----------------------------------------------------------------------===// 174 175 /// An instruction for reading from memory. This uses the SubclassData field in 176 /// Value to store whether or not the load is volatile. 177 class LoadInst : public UnaryInstruction { 178 using VolatileField = BoolBitfieldElementT<0>; 179 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 180 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 181 static_assert( 182 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 183 "Bitfields must be contiguous"); 184 185 void AssertOK(); 186 187 protected: 188 // Note: Instruction needs to be a friend here to call cloneImpl. 189 friend class Instruction; 190 191 LoadInst *cloneImpl() const; 192 193 public: 194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, 195 Instruction *InsertBefore); 196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); 197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 198 Instruction *InsertBefore); 199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 200 BasicBlock *InsertAtEnd); 201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 202 Align Align, Instruction *InsertBefore = nullptr); 203 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 204 Align Align, BasicBlock *InsertAtEnd); 205 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 206 Align Align, AtomicOrdering Order, 207 SyncScope::ID SSID = SyncScope::System, 208 Instruction *InsertBefore = nullptr); 209 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 210 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 211 BasicBlock *InsertAtEnd); 212 213 /// Return true if this is a load from a volatile memory location. 214 bool isVolatile() const { return getSubclassData<VolatileField>(); } 215 216 /// Specify whether this is a volatile load or not. 217 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 218 219 /// Return the alignment of the access that is being performed. 220 /// FIXME: Remove this function once transition to Align is over. 221 /// Use getAlign() instead. 222 uint64_t getAlignment() const { return getAlign().value(); } 223 224 /// Return the alignment of the access that is being performed. 225 Align getAlign() const { 226 return Align(1ULL << (getSubclassData<AlignmentField>())); 227 } 228 229 void setAlignment(Align Align) { 230 setSubclassData<AlignmentField>(Log2(Align)); 231 } 232 233 /// Returns the ordering constraint of this load instruction. 234 AtomicOrdering getOrdering() const { 235 return getSubclassData<OrderingField>(); 236 } 237 /// Sets the ordering constraint of this load instruction. May not be Release 238 /// or AcquireRelease. 239 void setOrdering(AtomicOrdering Ordering) { 240 setSubclassData<OrderingField>(Ordering); 241 } 242 243 /// Returns the synchronization scope ID of this load instruction. 244 SyncScope::ID getSyncScopeID() const { 245 return SSID; 246 } 247 248 /// Sets the synchronization scope ID of this load instruction. 249 void setSyncScopeID(SyncScope::ID SSID) { 250 this->SSID = SSID; 251 } 252 253 /// Sets the ordering constraint and the synchronization scope ID of this load 254 /// instruction. 255 void setAtomic(AtomicOrdering Ordering, 256 SyncScope::ID SSID = SyncScope::System) { 257 setOrdering(Ordering); 258 setSyncScopeID(SSID); 259 } 260 261 bool isSimple() const { return !isAtomic() && !isVolatile(); } 262 263 bool isUnordered() const { 264 return (getOrdering() == AtomicOrdering::NotAtomic || 265 getOrdering() == AtomicOrdering::Unordered) && 266 !isVolatile(); 267 } 268 269 Value *getPointerOperand() { return getOperand(0); } 270 const Value *getPointerOperand() const { return getOperand(0); } 271 static unsigned getPointerOperandIndex() { return 0U; } 272 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 273 274 /// Returns the address space of the pointer operand. 275 unsigned getPointerAddressSpace() const { 276 return getPointerOperandType()->getPointerAddressSpace(); 277 } 278 279 // Methods for support type inquiry through isa, cast, and dyn_cast: 280 static bool classof(const Instruction *I) { 281 return I->getOpcode() == Instruction::Load; 282 } 283 static bool classof(const Value *V) { 284 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 285 } 286 287 private: 288 // Shadow Instruction::setInstructionSubclassData with a private forwarding 289 // method so that subclasses cannot accidentally use it. 290 template <typename Bitfield> 291 void setSubclassData(typename Bitfield::Type Value) { 292 Instruction::setSubclassData<Bitfield>(Value); 293 } 294 295 /// The synchronization scope ID of this load instruction. Not quite enough 296 /// room in SubClassData for everything, so synchronization scope ID gets its 297 /// own field. 298 SyncScope::ID SSID; 299 }; 300 301 //===----------------------------------------------------------------------===// 302 // StoreInst Class 303 //===----------------------------------------------------------------------===// 304 305 /// An instruction for storing to memory. 306 class StoreInst : public Instruction { 307 using VolatileField = BoolBitfieldElementT<0>; 308 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 309 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 310 static_assert( 311 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 312 "Bitfields must be contiguous"); 313 314 void AssertOK(); 315 316 protected: 317 // Note: Instruction needs to be a friend here to call cloneImpl. 318 friend class Instruction; 319 320 StoreInst *cloneImpl() const; 321 322 public: 323 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); 324 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); 325 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); 326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); 327 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 328 Instruction *InsertBefore = nullptr); 329 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 330 BasicBlock *InsertAtEnd); 331 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 332 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, 333 Instruction *InsertBefore = nullptr); 334 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 335 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); 336 337 // allocate space for exactly two operands 338 void *operator new(size_t S) { return User::operator new(S, 2); } 339 void operator delete(void *Ptr) { User::operator delete(Ptr); } 340 341 /// Return true if this is a store to a volatile memory location. 342 bool isVolatile() const { return getSubclassData<VolatileField>(); } 343 344 /// Specify whether this is a volatile store or not. 345 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 346 347 /// Transparently provide more efficient getOperand methods. 348 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 349 350 /// Return the alignment of the access that is being performed 351 /// FIXME: Remove this function once transition to Align is over. 352 /// Use getAlign() instead. 353 uint64_t getAlignment() const { return getAlign().value(); } 354 355 Align getAlign() const { 356 return Align(1ULL << (getSubclassData<AlignmentField>())); 357 } 358 359 void setAlignment(Align Align) { 360 setSubclassData<AlignmentField>(Log2(Align)); 361 } 362 363 /// Returns the ordering constraint of this store instruction. 364 AtomicOrdering getOrdering() const { 365 return getSubclassData<OrderingField>(); 366 } 367 368 /// Sets the ordering constraint of this store instruction. May not be 369 /// Acquire or AcquireRelease. 370 void setOrdering(AtomicOrdering Ordering) { 371 setSubclassData<OrderingField>(Ordering); 372 } 373 374 /// Returns the synchronization scope ID of this store instruction. 375 SyncScope::ID getSyncScopeID() const { 376 return SSID; 377 } 378 379 /// Sets the synchronization scope ID of this store instruction. 380 void setSyncScopeID(SyncScope::ID SSID) { 381 this->SSID = SSID; 382 } 383 384 /// Sets the ordering constraint and the synchronization scope ID of this 385 /// store instruction. 386 void setAtomic(AtomicOrdering Ordering, 387 SyncScope::ID SSID = SyncScope::System) { 388 setOrdering(Ordering); 389 setSyncScopeID(SSID); 390 } 391 392 bool isSimple() const { return !isAtomic() && !isVolatile(); } 393 394 bool isUnordered() const { 395 return (getOrdering() == AtomicOrdering::NotAtomic || 396 getOrdering() == AtomicOrdering::Unordered) && 397 !isVolatile(); 398 } 399 400 Value *getValueOperand() { return getOperand(0); } 401 const Value *getValueOperand() const { return getOperand(0); } 402 403 Value *getPointerOperand() { return getOperand(1); } 404 const Value *getPointerOperand() const { return getOperand(1); } 405 static unsigned getPointerOperandIndex() { return 1U; } 406 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 407 408 /// Returns the address space of the pointer operand. 409 unsigned getPointerAddressSpace() const { 410 return getPointerOperandType()->getPointerAddressSpace(); 411 } 412 413 // Methods for support type inquiry through isa, cast, and dyn_cast: 414 static bool classof(const Instruction *I) { 415 return I->getOpcode() == Instruction::Store; 416 } 417 static bool classof(const Value *V) { 418 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 419 } 420 421 private: 422 // Shadow Instruction::setInstructionSubclassData with a private forwarding 423 // method so that subclasses cannot accidentally use it. 424 template <typename Bitfield> 425 void setSubclassData(typename Bitfield::Type Value) { 426 Instruction::setSubclassData<Bitfield>(Value); 427 } 428 429 /// The synchronization scope ID of this store instruction. Not quite enough 430 /// room in SubClassData for everything, so synchronization scope ID gets its 431 /// own field. 432 SyncScope::ID SSID; 433 }; 434 435 template <> 436 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { 437 }; 438 439 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) 440 441 //===----------------------------------------------------------------------===// 442 // FenceInst Class 443 //===----------------------------------------------------------------------===// 444 445 /// An instruction for ordering other memory operations. 446 class FenceInst : public Instruction { 447 using OrderingField = AtomicOrderingBitfieldElementT<0>; 448 449 void Init(AtomicOrdering Ordering, SyncScope::ID SSID); 450 451 protected: 452 // Note: Instruction needs to be a friend here to call cloneImpl. 453 friend class Instruction; 454 455 FenceInst *cloneImpl() const; 456 457 public: 458 // Ordering may only be Acquire, Release, AcquireRelease, or 459 // SequentiallyConsistent. 460 FenceInst(LLVMContext &C, AtomicOrdering Ordering, 461 SyncScope::ID SSID = SyncScope::System, 462 Instruction *InsertBefore = nullptr); 463 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, 464 BasicBlock *InsertAtEnd); 465 466 // allocate space for exactly zero operands 467 void *operator new(size_t S) { return User::operator new(S, 0); } 468 void operator delete(void *Ptr) { User::operator delete(Ptr); } 469 470 /// Returns the ordering constraint of this fence instruction. 471 AtomicOrdering getOrdering() const { 472 return getSubclassData<OrderingField>(); 473 } 474 475 /// Sets the ordering constraint of this fence instruction. May only be 476 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. 477 void setOrdering(AtomicOrdering Ordering) { 478 setSubclassData<OrderingField>(Ordering); 479 } 480 481 /// Returns the synchronization scope ID of this fence instruction. 482 SyncScope::ID getSyncScopeID() const { 483 return SSID; 484 } 485 486 /// Sets the synchronization scope ID of this fence instruction. 487 void setSyncScopeID(SyncScope::ID SSID) { 488 this->SSID = SSID; 489 } 490 491 // Methods for support type inquiry through isa, cast, and dyn_cast: 492 static bool classof(const Instruction *I) { 493 return I->getOpcode() == Instruction::Fence; 494 } 495 static bool classof(const Value *V) { 496 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 497 } 498 499 private: 500 // Shadow Instruction::setInstructionSubclassData with a private forwarding 501 // method so that subclasses cannot accidentally use it. 502 template <typename Bitfield> 503 void setSubclassData(typename Bitfield::Type Value) { 504 Instruction::setSubclassData<Bitfield>(Value); 505 } 506 507 /// The synchronization scope ID of this fence instruction. Not quite enough 508 /// room in SubClassData for everything, so synchronization scope ID gets its 509 /// own field. 510 SyncScope::ID SSID; 511 }; 512 513 //===----------------------------------------------------------------------===// 514 // AtomicCmpXchgInst Class 515 //===----------------------------------------------------------------------===// 516 517 /// An instruction that atomically checks whether a 518 /// specified value is in a memory location, and, if it is, stores a new value 519 /// there. The value returned by this instruction is a pair containing the 520 /// original value as first element, and an i1 indicating success (true) or 521 /// failure (false) as second element. 522 /// 523 class AtomicCmpXchgInst : public Instruction { 524 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, 525 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, 526 SyncScope::ID SSID); 527 528 template <unsigned Offset> 529 using AtomicOrderingBitfieldElement = 530 typename Bitfield::Element<AtomicOrdering, Offset, 3, 531 AtomicOrdering::LAST>; 532 533 protected: 534 // Note: Instruction needs to be a friend here to call cloneImpl. 535 friend class Instruction; 536 537 AtomicCmpXchgInst *cloneImpl() const; 538 539 public: 540 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 541 AtomicOrdering SuccessOrdering, 542 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 543 Instruction *InsertBefore = nullptr); 544 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 545 AtomicOrdering SuccessOrdering, 546 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 547 BasicBlock *InsertAtEnd); 548 549 // allocate space for exactly three operands 550 void *operator new(size_t S) { return User::operator new(S, 3); } 551 void operator delete(void *Ptr) { User::operator delete(Ptr); } 552 553 using VolatileField = BoolBitfieldElementT<0>; 554 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; 555 using SuccessOrderingField = 556 AtomicOrderingBitfieldElementT<WeakField::NextBit>; 557 using FailureOrderingField = 558 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; 559 using AlignmentField = 560 AlignmentBitfieldElementT<FailureOrderingField::NextBit>; 561 static_assert( 562 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, 563 FailureOrderingField, AlignmentField>(), 564 "Bitfields must be contiguous"); 565 566 /// Return the alignment of the memory that is being allocated by the 567 /// instruction. 568 Align getAlign() const { 569 return Align(1ULL << getSubclassData<AlignmentField>()); 570 } 571 572 void setAlignment(Align Align) { 573 setSubclassData<AlignmentField>(Log2(Align)); 574 } 575 576 /// Return true if this is a cmpxchg from a volatile memory 577 /// location. 578 /// 579 bool isVolatile() const { return getSubclassData<VolatileField>(); } 580 581 /// Specify whether this is a volatile cmpxchg. 582 /// 583 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 584 585 /// Return true if this cmpxchg may spuriously fail. 586 bool isWeak() const { return getSubclassData<WeakField>(); } 587 588 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } 589 590 /// Transparently provide more efficient getOperand methods. 591 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 592 593 static bool isValidSuccessOrdering(AtomicOrdering Ordering) { 594 return Ordering != AtomicOrdering::NotAtomic && 595 Ordering != AtomicOrdering::Unordered; 596 } 597 598 static bool isValidFailureOrdering(AtomicOrdering Ordering) { 599 return Ordering != AtomicOrdering::NotAtomic && 600 Ordering != AtomicOrdering::Unordered && 601 Ordering != AtomicOrdering::AcquireRelease && 602 Ordering != AtomicOrdering::Release; 603 } 604 605 /// Returns the success ordering constraint of this cmpxchg instruction. 606 AtomicOrdering getSuccessOrdering() const { 607 return getSubclassData<SuccessOrderingField>(); 608 } 609 610 /// Sets the success ordering constraint of this cmpxchg instruction. 611 void setSuccessOrdering(AtomicOrdering Ordering) { 612 assert(isValidSuccessOrdering(Ordering) && 613 "invalid CmpXchg success ordering"); 614 setSubclassData<SuccessOrderingField>(Ordering); 615 } 616 617 /// Returns the failure ordering constraint of this cmpxchg instruction. 618 AtomicOrdering getFailureOrdering() const { 619 return getSubclassData<FailureOrderingField>(); 620 } 621 622 /// Sets the failure ordering constraint of this cmpxchg instruction. 623 void setFailureOrdering(AtomicOrdering Ordering) { 624 assert(isValidFailureOrdering(Ordering) && 625 "invalid CmpXchg failure ordering"); 626 setSubclassData<FailureOrderingField>(Ordering); 627 } 628 629 /// Returns a single ordering which is at least as strong as both the 630 /// success and failure orderings for this cmpxchg. 631 AtomicOrdering getMergedOrdering() const { 632 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) 633 return AtomicOrdering::SequentiallyConsistent; 634 if (getFailureOrdering() == AtomicOrdering::Acquire) { 635 if (getSuccessOrdering() == AtomicOrdering::Monotonic) 636 return AtomicOrdering::Acquire; 637 if (getSuccessOrdering() == AtomicOrdering::Release) 638 return AtomicOrdering::AcquireRelease; 639 } 640 return getSuccessOrdering(); 641 } 642 643 /// Returns the synchronization scope ID of this cmpxchg instruction. 644 SyncScope::ID getSyncScopeID() const { 645 return SSID; 646 } 647 648 /// Sets the synchronization scope ID of this cmpxchg instruction. 649 void setSyncScopeID(SyncScope::ID SSID) { 650 this->SSID = SSID; 651 } 652 653 Value *getPointerOperand() { return getOperand(0); } 654 const Value *getPointerOperand() const { return getOperand(0); } 655 static unsigned getPointerOperandIndex() { return 0U; } 656 657 Value *getCompareOperand() { return getOperand(1); } 658 const Value *getCompareOperand() const { return getOperand(1); } 659 660 Value *getNewValOperand() { return getOperand(2); } 661 const Value *getNewValOperand() const { return getOperand(2); } 662 663 /// Returns the address space of the pointer operand. 664 unsigned getPointerAddressSpace() const { 665 return getPointerOperand()->getType()->getPointerAddressSpace(); 666 } 667 668 /// Returns the strongest permitted ordering on failure, given the 669 /// desired ordering on success. 670 /// 671 /// If the comparison in a cmpxchg operation fails, there is no atomic store 672 /// so release semantics cannot be provided. So this function drops explicit 673 /// Release requests from the AtomicOrdering. A SequentiallyConsistent 674 /// operation would remain SequentiallyConsistent. 675 static AtomicOrdering 676 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { 677 switch (SuccessOrdering) { 678 default: 679 llvm_unreachable("invalid cmpxchg success ordering"); 680 case AtomicOrdering::Release: 681 case AtomicOrdering::Monotonic: 682 return AtomicOrdering::Monotonic; 683 case AtomicOrdering::AcquireRelease: 684 case AtomicOrdering::Acquire: 685 return AtomicOrdering::Acquire; 686 case AtomicOrdering::SequentiallyConsistent: 687 return AtomicOrdering::SequentiallyConsistent; 688 } 689 } 690 691 // Methods for support type inquiry through isa, cast, and dyn_cast: 692 static bool classof(const Instruction *I) { 693 return I->getOpcode() == Instruction::AtomicCmpXchg; 694 } 695 static bool classof(const Value *V) { 696 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 697 } 698 699 private: 700 // Shadow Instruction::setInstructionSubclassData with a private forwarding 701 // method so that subclasses cannot accidentally use it. 702 template <typename Bitfield> 703 void setSubclassData(typename Bitfield::Type Value) { 704 Instruction::setSubclassData<Bitfield>(Value); 705 } 706 707 /// The synchronization scope ID of this cmpxchg instruction. Not quite 708 /// enough room in SubClassData for everything, so synchronization scope ID 709 /// gets its own field. 710 SyncScope::ID SSID; 711 }; 712 713 template <> 714 struct OperandTraits<AtomicCmpXchgInst> : 715 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { 716 }; 717 718 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) 719 720 //===----------------------------------------------------------------------===// 721 // AtomicRMWInst Class 722 //===----------------------------------------------------------------------===// 723 724 /// an instruction that atomically reads a memory location, 725 /// combines it with another value, and then stores the result back. Returns 726 /// the old value. 727 /// 728 class AtomicRMWInst : public Instruction { 729 protected: 730 // Note: Instruction needs to be a friend here to call cloneImpl. 731 friend class Instruction; 732 733 AtomicRMWInst *cloneImpl() const; 734 735 public: 736 /// This enumeration lists the possible modifications atomicrmw can make. In 737 /// the descriptions, 'p' is the pointer to the instruction's memory location, 738 /// 'old' is the initial value of *p, and 'v' is the other value passed to the 739 /// instruction. These instructions always return 'old'. 740 enum BinOp : unsigned { 741 /// *p = v 742 Xchg, 743 /// *p = old + v 744 Add, 745 /// *p = old - v 746 Sub, 747 /// *p = old & v 748 And, 749 /// *p = ~(old & v) 750 Nand, 751 /// *p = old | v 752 Or, 753 /// *p = old ^ v 754 Xor, 755 /// *p = old >signed v ? old : v 756 Max, 757 /// *p = old <signed v ? old : v 758 Min, 759 /// *p = old >unsigned v ? old : v 760 UMax, 761 /// *p = old <unsigned v ? old : v 762 UMin, 763 764 /// *p = old + v 765 FAdd, 766 767 /// *p = old - v 768 FSub, 769 770 FIRST_BINOP = Xchg, 771 LAST_BINOP = FSub, 772 BAD_BINOP 773 }; 774 775 private: 776 template <unsigned Offset> 777 using AtomicOrderingBitfieldElement = 778 typename Bitfield::Element<AtomicOrdering, Offset, 3, 779 AtomicOrdering::LAST>; 780 781 template <unsigned Offset> 782 using BinOpBitfieldElement = 783 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>; 784 785 public: 786 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 787 AtomicOrdering Ordering, SyncScope::ID SSID, 788 Instruction *InsertBefore = nullptr); 789 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 790 AtomicOrdering Ordering, SyncScope::ID SSID, 791 BasicBlock *InsertAtEnd); 792 793 // allocate space for exactly two operands 794 void *operator new(size_t S) { return User::operator new(S, 2); } 795 void operator delete(void *Ptr) { User::operator delete(Ptr); } 796 797 using VolatileField = BoolBitfieldElementT<0>; 798 using AtomicOrderingField = 799 AtomicOrderingBitfieldElementT<VolatileField::NextBit>; 800 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; 801 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; 802 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, 803 OperationField, AlignmentField>(), 804 "Bitfields must be contiguous"); 805 806 BinOp getOperation() const { return getSubclassData<OperationField>(); } 807 808 static StringRef getOperationName(BinOp Op); 809 810 static bool isFPOperation(BinOp Op) { 811 switch (Op) { 812 case AtomicRMWInst::FAdd: 813 case AtomicRMWInst::FSub: 814 return true; 815 default: 816 return false; 817 } 818 } 819 820 void setOperation(BinOp Operation) { 821 setSubclassData<OperationField>(Operation); 822 } 823 824 /// Return the alignment of the memory that is being allocated by the 825 /// instruction. 826 Align getAlign() const { 827 return Align(1ULL << getSubclassData<AlignmentField>()); 828 } 829 830 void setAlignment(Align Align) { 831 setSubclassData<AlignmentField>(Log2(Align)); 832 } 833 834 /// Return true if this is a RMW on a volatile memory location. 835 /// 836 bool isVolatile() const { return getSubclassData<VolatileField>(); } 837 838 /// Specify whether this is a volatile RMW or not. 839 /// 840 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 841 842 /// Transparently provide more efficient getOperand methods. 843 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 844 845 /// Returns the ordering constraint of this rmw instruction. 846 AtomicOrdering getOrdering() const { 847 return getSubclassData<AtomicOrderingField>(); 848 } 849 850 /// Sets the ordering constraint of this rmw instruction. 851 void setOrdering(AtomicOrdering Ordering) { 852 assert(Ordering != AtomicOrdering::NotAtomic && 853 "atomicrmw instructions can only be atomic."); 854 setSubclassData<AtomicOrderingField>(Ordering); 855 } 856 857 /// Returns the synchronization scope ID of this rmw instruction. 858 SyncScope::ID getSyncScopeID() const { 859 return SSID; 860 } 861 862 /// Sets the synchronization scope ID of this rmw instruction. 863 void setSyncScopeID(SyncScope::ID SSID) { 864 this->SSID = SSID; 865 } 866 867 Value *getPointerOperand() { return getOperand(0); } 868 const Value *getPointerOperand() const { return getOperand(0); } 869 static unsigned getPointerOperandIndex() { return 0U; } 870 871 Value *getValOperand() { return getOperand(1); } 872 const Value *getValOperand() const { return getOperand(1); } 873 874 /// Returns the address space of the pointer operand. 875 unsigned getPointerAddressSpace() const { 876 return getPointerOperand()->getType()->getPointerAddressSpace(); 877 } 878 879 bool isFloatingPointOperation() const { 880 return isFPOperation(getOperation()); 881 } 882 883 // Methods for support type inquiry through isa, cast, and dyn_cast: 884 static bool classof(const Instruction *I) { 885 return I->getOpcode() == Instruction::AtomicRMW; 886 } 887 static bool classof(const Value *V) { 888 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 889 } 890 891 private: 892 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, 893 AtomicOrdering Ordering, SyncScope::ID SSID); 894 895 // Shadow Instruction::setInstructionSubclassData with a private forwarding 896 // method so that subclasses cannot accidentally use it. 897 template <typename Bitfield> 898 void setSubclassData(typename Bitfield::Type Value) { 899 Instruction::setSubclassData<Bitfield>(Value); 900 } 901 902 /// The synchronization scope ID of this rmw instruction. Not quite enough 903 /// room in SubClassData for everything, so synchronization scope ID gets its 904 /// own field. 905 SyncScope::ID SSID; 906 }; 907 908 template <> 909 struct OperandTraits<AtomicRMWInst> 910 : public FixedNumOperandTraits<AtomicRMWInst,2> { 911 }; 912 913 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) 914 915 //===----------------------------------------------------------------------===// 916 // GetElementPtrInst Class 917 //===----------------------------------------------------------------------===// 918 919 // checkGEPType - Simple wrapper function to give a better assertion failure 920 // message on bad indexes for a gep instruction. 921 // 922 inline Type *checkGEPType(Type *Ty) { 923 assert(Ty && "Invalid GetElementPtrInst indices for type!"); 924 return Ty; 925 } 926 927 /// an instruction for type-safe pointer arithmetic to 928 /// access elements of arrays and structs 929 /// 930 class GetElementPtrInst : public Instruction { 931 Type *SourceElementType; 932 Type *ResultElementType; 933 934 GetElementPtrInst(const GetElementPtrInst &GEPI); 935 936 /// Constructors - Create a getelementptr instruction with a base pointer an 937 /// list of indices. The first ctor can optionally insert before an existing 938 /// instruction, the second appends the new instruction to the specified 939 /// BasicBlock. 940 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 941 ArrayRef<Value *> IdxList, unsigned Values, 942 const Twine &NameStr, Instruction *InsertBefore); 943 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 944 ArrayRef<Value *> IdxList, unsigned Values, 945 const Twine &NameStr, BasicBlock *InsertAtEnd); 946 947 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); 948 949 protected: 950 // Note: Instruction needs to be a friend here to call cloneImpl. 951 friend class Instruction; 952 953 GetElementPtrInst *cloneImpl() const; 954 955 public: 956 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 957 ArrayRef<Value *> IdxList, 958 const Twine &NameStr = "", 959 Instruction *InsertBefore = nullptr) { 960 unsigned Values = 1 + unsigned(IdxList.size()); 961 assert(PointeeType && "Must specify element type"); 962 assert(cast<PointerType>(Ptr->getType()->getScalarType()) 963 ->isOpaqueOrPointeeTypeMatches(PointeeType)); 964 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 965 NameStr, InsertBefore); 966 } 967 968 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 969 ArrayRef<Value *> IdxList, 970 const Twine &NameStr, 971 BasicBlock *InsertAtEnd) { 972 unsigned Values = 1 + unsigned(IdxList.size()); 973 assert(PointeeType && "Must specify element type"); 974 assert(cast<PointerType>(Ptr->getType()->getScalarType()) 975 ->isOpaqueOrPointeeTypeMatches(PointeeType)); 976 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 977 NameStr, InsertAtEnd); 978 } 979 980 /// Create an "inbounds" getelementptr. See the documentation for the 981 /// "inbounds" flag in LangRef.html for details. 982 static GetElementPtrInst * 983 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, 984 const Twine &NameStr = "", 985 Instruction *InsertBefore = nullptr) { 986 GetElementPtrInst *GEP = 987 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); 988 GEP->setIsInBounds(true); 989 return GEP; 990 } 991 992 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, 993 ArrayRef<Value *> IdxList, 994 const Twine &NameStr, 995 BasicBlock *InsertAtEnd) { 996 GetElementPtrInst *GEP = 997 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); 998 GEP->setIsInBounds(true); 999 return GEP; 1000 } 1001 1002 /// Transparently provide more efficient getOperand methods. 1003 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1004 1005 Type *getSourceElementType() const { return SourceElementType; } 1006 1007 void setSourceElementType(Type *Ty) { SourceElementType = Ty; } 1008 void setResultElementType(Type *Ty) { ResultElementType = Ty; } 1009 1010 Type *getResultElementType() const { 1011 assert(cast<PointerType>(getType()->getScalarType()) 1012 ->isOpaqueOrPointeeTypeMatches(ResultElementType)); 1013 return ResultElementType; 1014 } 1015 1016 /// Returns the address space of this instruction's pointer type. 1017 unsigned getAddressSpace() const { 1018 // Note that this is always the same as the pointer operand's address space 1019 // and that is cheaper to compute, so cheat here. 1020 return getPointerAddressSpace(); 1021 } 1022 1023 /// Returns the result type of a getelementptr with the given source 1024 /// element type and indexes. 1025 /// 1026 /// Null is returned if the indices are invalid for the specified 1027 /// source element type. 1028 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); 1029 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); 1030 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); 1031 1032 /// Return the type of the element at the given index of an indexable 1033 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". 1034 /// 1035 /// Returns null if the type can't be indexed, or the given index is not 1036 /// legal for the given type. 1037 static Type *getTypeAtIndex(Type *Ty, Value *Idx); 1038 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); 1039 1040 inline op_iterator idx_begin() { return op_begin()+1; } 1041 inline const_op_iterator idx_begin() const { return op_begin()+1; } 1042 inline op_iterator idx_end() { return op_end(); } 1043 inline const_op_iterator idx_end() const { return op_end(); } 1044 1045 inline iterator_range<op_iterator> indices() { 1046 return make_range(idx_begin(), idx_end()); 1047 } 1048 1049 inline iterator_range<const_op_iterator> indices() const { 1050 return make_range(idx_begin(), idx_end()); 1051 } 1052 1053 Value *getPointerOperand() { 1054 return getOperand(0); 1055 } 1056 const Value *getPointerOperand() const { 1057 return getOperand(0); 1058 } 1059 static unsigned getPointerOperandIndex() { 1060 return 0U; // get index for modifying correct operand. 1061 } 1062 1063 /// Method to return the pointer operand as a 1064 /// PointerType. 1065 Type *getPointerOperandType() const { 1066 return getPointerOperand()->getType(); 1067 } 1068 1069 /// Returns the address space of the pointer operand. 1070 unsigned getPointerAddressSpace() const { 1071 return getPointerOperandType()->getPointerAddressSpace(); 1072 } 1073 1074 /// Returns the pointer type returned by the GEP 1075 /// instruction, which may be a vector of pointers. 1076 static Type *getGEPReturnType(Type *ElTy, Value *Ptr, 1077 ArrayRef<Value *> IdxList) { 1078 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); 1079 unsigned AddrSpace = OrigPtrTy->getAddressSpace(); 1080 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList)); 1081 Type *PtrTy = OrigPtrTy->isOpaque() 1082 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace) 1083 : PointerType::get(ResultElemTy, AddrSpace); 1084 // Vector GEP 1085 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { 1086 ElementCount EltCount = PtrVTy->getElementCount(); 1087 return VectorType::get(PtrTy, EltCount); 1088 } 1089 for (Value *Index : IdxList) 1090 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { 1091 ElementCount EltCount = IndexVTy->getElementCount(); 1092 return VectorType::get(PtrTy, EltCount); 1093 } 1094 // Scalar GEP 1095 return PtrTy; 1096 } 1097 1098 unsigned getNumIndices() const { // Note: always non-negative 1099 return getNumOperands() - 1; 1100 } 1101 1102 bool hasIndices() const { 1103 return getNumOperands() > 1; 1104 } 1105 1106 /// Return true if all of the indices of this GEP are 1107 /// zeros. If so, the result pointer and the first operand have the same 1108 /// value, just potentially different types. 1109 bool hasAllZeroIndices() const; 1110 1111 /// Return true if all of the indices of this GEP are 1112 /// constant integers. If so, the result pointer and the first operand have 1113 /// a constant offset between them. 1114 bool hasAllConstantIndices() const; 1115 1116 /// Set or clear the inbounds flag on this GEP instruction. 1117 /// See LangRef.html for the meaning of inbounds on a getelementptr. 1118 void setIsInBounds(bool b = true); 1119 1120 /// Determine whether the GEP has the inbounds flag. 1121 bool isInBounds() const; 1122 1123 /// Accumulate the constant address offset of this GEP if possible. 1124 /// 1125 /// This routine accepts an APInt into which it will accumulate the constant 1126 /// offset of this GEP if the GEP is in fact constant. If the GEP is not 1127 /// all-constant, it returns false and the value of the offset APInt is 1128 /// undefined (it is *not* preserved!). The APInt passed into this routine 1129 /// must be at least as wide as the IntPtr type for the address space of 1130 /// the base GEP pointer. 1131 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; 1132 bool collectOffset(const DataLayout &DL, unsigned BitWidth, 1133 MapVector<Value *, APInt> &VariableOffsets, 1134 APInt &ConstantOffset) const; 1135 // Methods for support type inquiry through isa, cast, and dyn_cast: 1136 static bool classof(const Instruction *I) { 1137 return (I->getOpcode() == Instruction::GetElementPtr); 1138 } 1139 static bool classof(const Value *V) { 1140 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1141 } 1142 }; 1143 1144 template <> 1145 struct OperandTraits<GetElementPtrInst> : 1146 public VariadicOperandTraits<GetElementPtrInst, 1> { 1147 }; 1148 1149 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1150 ArrayRef<Value *> IdxList, unsigned Values, 1151 const Twine &NameStr, 1152 Instruction *InsertBefore) 1153 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1154 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1155 Values, InsertBefore), 1156 SourceElementType(PointeeType), 1157 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1158 assert(cast<PointerType>(getType()->getScalarType()) 1159 ->isOpaqueOrPointeeTypeMatches(ResultElementType)); 1160 init(Ptr, IdxList, NameStr); 1161 } 1162 1163 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1164 ArrayRef<Value *> IdxList, unsigned Values, 1165 const Twine &NameStr, 1166 BasicBlock *InsertAtEnd) 1167 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1168 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1169 Values, InsertAtEnd), 1170 SourceElementType(PointeeType), 1171 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1172 assert(cast<PointerType>(getType()->getScalarType()) 1173 ->isOpaqueOrPointeeTypeMatches(ResultElementType)); 1174 init(Ptr, IdxList, NameStr); 1175 } 1176 1177 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) 1178 1179 //===----------------------------------------------------------------------===// 1180 // ICmpInst Class 1181 //===----------------------------------------------------------------------===// 1182 1183 /// This instruction compares its operands according to the predicate given 1184 /// to the constructor. It only operates on integers or pointers. The operands 1185 /// must be identical types. 1186 /// Represent an integer comparison operator. 1187 class ICmpInst: public CmpInst { 1188 void AssertOK() { 1189 assert(isIntPredicate() && 1190 "Invalid ICmp predicate value"); 1191 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1192 "Both operands to ICmp instruction are not of the same type!"); 1193 // Check that the operands are the right type 1194 assert((getOperand(0)->getType()->isIntOrIntVectorTy() || 1195 getOperand(0)->getType()->isPtrOrPtrVectorTy()) && 1196 "Invalid operand types for ICmp instruction"); 1197 } 1198 1199 protected: 1200 // Note: Instruction needs to be a friend here to call cloneImpl. 1201 friend class Instruction; 1202 1203 /// Clone an identical ICmpInst 1204 ICmpInst *cloneImpl() const; 1205 1206 public: 1207 /// Constructor with insert-before-instruction semantics. 1208 ICmpInst( 1209 Instruction *InsertBefore, ///< Where to insert 1210 Predicate pred, ///< The predicate to use for the comparison 1211 Value *LHS, ///< The left-hand-side of the expression 1212 Value *RHS, ///< The right-hand-side of the expression 1213 const Twine &NameStr = "" ///< Name of the instruction 1214 ) : CmpInst(makeCmpResultType(LHS->getType()), 1215 Instruction::ICmp, pred, LHS, RHS, NameStr, 1216 InsertBefore) { 1217 #ifndef NDEBUG 1218 AssertOK(); 1219 #endif 1220 } 1221 1222 /// Constructor with insert-at-end semantics. 1223 ICmpInst( 1224 BasicBlock &InsertAtEnd, ///< Block to insert into. 1225 Predicate pred, ///< The predicate to use for the comparison 1226 Value *LHS, ///< The left-hand-side of the expression 1227 Value *RHS, ///< The right-hand-side of the expression 1228 const Twine &NameStr = "" ///< Name of the instruction 1229 ) : CmpInst(makeCmpResultType(LHS->getType()), 1230 Instruction::ICmp, pred, LHS, RHS, NameStr, 1231 &InsertAtEnd) { 1232 #ifndef NDEBUG 1233 AssertOK(); 1234 #endif 1235 } 1236 1237 /// Constructor with no-insertion semantics 1238 ICmpInst( 1239 Predicate pred, ///< The predicate to use for the comparison 1240 Value *LHS, ///< The left-hand-side of the expression 1241 Value *RHS, ///< The right-hand-side of the expression 1242 const Twine &NameStr = "" ///< Name of the instruction 1243 ) : CmpInst(makeCmpResultType(LHS->getType()), 1244 Instruction::ICmp, pred, LHS, RHS, NameStr) { 1245 #ifndef NDEBUG 1246 AssertOK(); 1247 #endif 1248 } 1249 1250 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. 1251 /// @returns the predicate that would be the result if the operand were 1252 /// regarded as signed. 1253 /// Return the signed version of the predicate 1254 Predicate getSignedPredicate() const { 1255 return getSignedPredicate(getPredicate()); 1256 } 1257 1258 /// This is a static version that you can use without an instruction. 1259 /// Return the signed version of the predicate. 1260 static Predicate getSignedPredicate(Predicate pred); 1261 1262 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. 1263 /// @returns the predicate that would be the result if the operand were 1264 /// regarded as unsigned. 1265 /// Return the unsigned version of the predicate 1266 Predicate getUnsignedPredicate() const { 1267 return getUnsignedPredicate(getPredicate()); 1268 } 1269 1270 /// This is a static version that you can use without an instruction. 1271 /// Return the unsigned version of the predicate. 1272 static Predicate getUnsignedPredicate(Predicate pred); 1273 1274 /// Return true if this predicate is either EQ or NE. This also 1275 /// tests for commutativity. 1276 static bool isEquality(Predicate P) { 1277 return P == ICMP_EQ || P == ICMP_NE; 1278 } 1279 1280 /// Return true if this predicate is either EQ or NE. This also 1281 /// tests for commutativity. 1282 bool isEquality() const { 1283 return isEquality(getPredicate()); 1284 } 1285 1286 /// @returns true if the predicate of this ICmpInst is commutative 1287 /// Determine if this relation is commutative. 1288 bool isCommutative() const { return isEquality(); } 1289 1290 /// Return true if the predicate is relational (not EQ or NE). 1291 /// 1292 bool isRelational() const { 1293 return !isEquality(); 1294 } 1295 1296 /// Return true if the predicate is relational (not EQ or NE). 1297 /// 1298 static bool isRelational(Predicate P) { 1299 return !isEquality(P); 1300 } 1301 1302 /// Return true if the predicate is SGT or UGT. 1303 /// 1304 static bool isGT(Predicate P) { 1305 return P == ICMP_SGT || P == ICMP_UGT; 1306 } 1307 1308 /// Return true if the predicate is SLT or ULT. 1309 /// 1310 static bool isLT(Predicate P) { 1311 return P == ICMP_SLT || P == ICMP_ULT; 1312 } 1313 1314 /// Return true if the predicate is SGE or UGE. 1315 /// 1316 static bool isGE(Predicate P) { 1317 return P == ICMP_SGE || P == ICMP_UGE; 1318 } 1319 1320 /// Return true if the predicate is SLE or ULE. 1321 /// 1322 static bool isLE(Predicate P) { 1323 return P == ICMP_SLE || P == ICMP_ULE; 1324 } 1325 1326 /// Returns the sequence of all ICmp predicates. 1327 /// 1328 static auto predicates() { return ICmpPredicates(); } 1329 1330 /// Exchange the two operands to this instruction in such a way that it does 1331 /// not modify the semantics of the instruction. The predicate value may be 1332 /// changed to retain the same result if the predicate is order dependent 1333 /// (e.g. ult). 1334 /// Swap operands and adjust predicate. 1335 void swapOperands() { 1336 setPredicate(getSwappedPredicate()); 1337 Op<0>().swap(Op<1>()); 1338 } 1339 1340 /// Return result of `LHS Pred RHS` comparison. 1341 static bool compare(const APInt &LHS, const APInt &RHS, 1342 ICmpInst::Predicate Pred); 1343 1344 // Methods for support type inquiry through isa, cast, and dyn_cast: 1345 static bool classof(const Instruction *I) { 1346 return I->getOpcode() == Instruction::ICmp; 1347 } 1348 static bool classof(const Value *V) { 1349 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1350 } 1351 }; 1352 1353 //===----------------------------------------------------------------------===// 1354 // FCmpInst Class 1355 //===----------------------------------------------------------------------===// 1356 1357 /// This instruction compares its operands according to the predicate given 1358 /// to the constructor. It only operates on floating point values or packed 1359 /// vectors of floating point values. The operands must be identical types. 1360 /// Represents a floating point comparison operator. 1361 class FCmpInst: public CmpInst { 1362 void AssertOK() { 1363 assert(isFPPredicate() && "Invalid FCmp predicate value"); 1364 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1365 "Both operands to FCmp instruction are not of the same type!"); 1366 // Check that the operands are the right type 1367 assert(getOperand(0)->getType()->isFPOrFPVectorTy() && 1368 "Invalid operand types for FCmp instruction"); 1369 } 1370 1371 protected: 1372 // Note: Instruction needs to be a friend here to call cloneImpl. 1373 friend class Instruction; 1374 1375 /// Clone an identical FCmpInst 1376 FCmpInst *cloneImpl() const; 1377 1378 public: 1379 /// Constructor with insert-before-instruction semantics. 1380 FCmpInst( 1381 Instruction *InsertBefore, ///< Where to insert 1382 Predicate pred, ///< The predicate to use for the comparison 1383 Value *LHS, ///< The left-hand-side of the expression 1384 Value *RHS, ///< The right-hand-side of the expression 1385 const Twine &NameStr = "" ///< Name of the instruction 1386 ) : CmpInst(makeCmpResultType(LHS->getType()), 1387 Instruction::FCmp, pred, LHS, RHS, NameStr, 1388 InsertBefore) { 1389 AssertOK(); 1390 } 1391 1392 /// Constructor with insert-at-end semantics. 1393 FCmpInst( 1394 BasicBlock &InsertAtEnd, ///< Block to insert into. 1395 Predicate pred, ///< The predicate to use for the comparison 1396 Value *LHS, ///< The left-hand-side of the expression 1397 Value *RHS, ///< The right-hand-side of the expression 1398 const Twine &NameStr = "" ///< Name of the instruction 1399 ) : CmpInst(makeCmpResultType(LHS->getType()), 1400 Instruction::FCmp, pred, LHS, RHS, NameStr, 1401 &InsertAtEnd) { 1402 AssertOK(); 1403 } 1404 1405 /// Constructor with no-insertion semantics 1406 FCmpInst( 1407 Predicate Pred, ///< The predicate to use for the comparison 1408 Value *LHS, ///< The left-hand-side of the expression 1409 Value *RHS, ///< The right-hand-side of the expression 1410 const Twine &NameStr = "", ///< Name of the instruction 1411 Instruction *FlagsSource = nullptr 1412 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, 1413 RHS, NameStr, nullptr, FlagsSource) { 1414 AssertOK(); 1415 } 1416 1417 /// @returns true if the predicate of this instruction is EQ or NE. 1418 /// Determine if this is an equality predicate. 1419 static bool isEquality(Predicate Pred) { 1420 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || 1421 Pred == FCMP_UNE; 1422 } 1423 1424 /// @returns true if the predicate of this instruction is EQ or NE. 1425 /// Determine if this is an equality predicate. 1426 bool isEquality() const { return isEquality(getPredicate()); } 1427 1428 /// @returns true if the predicate of this instruction is commutative. 1429 /// Determine if this is a commutative predicate. 1430 bool isCommutative() const { 1431 return isEquality() || 1432 getPredicate() == FCMP_FALSE || 1433 getPredicate() == FCMP_TRUE || 1434 getPredicate() == FCMP_ORD || 1435 getPredicate() == FCMP_UNO; 1436 } 1437 1438 /// @returns true if the predicate is relational (not EQ or NE). 1439 /// Determine if this a relational predicate. 1440 bool isRelational() const { return !isEquality(); } 1441 1442 /// Exchange the two operands to this instruction in such a way that it does 1443 /// not modify the semantics of the instruction. The predicate value may be 1444 /// changed to retain the same result if the predicate is order dependent 1445 /// (e.g. ult). 1446 /// Swap operands and adjust predicate. 1447 void swapOperands() { 1448 setPredicate(getSwappedPredicate()); 1449 Op<0>().swap(Op<1>()); 1450 } 1451 1452 /// Returns the sequence of all FCmp predicates. 1453 /// 1454 static auto predicates() { return FCmpPredicates(); } 1455 1456 /// Return result of `LHS Pred RHS` comparison. 1457 static bool compare(const APFloat &LHS, const APFloat &RHS, 1458 FCmpInst::Predicate Pred); 1459 1460 /// Methods for support type inquiry through isa, cast, and dyn_cast: 1461 static bool classof(const Instruction *I) { 1462 return I->getOpcode() == Instruction::FCmp; 1463 } 1464 static bool classof(const Value *V) { 1465 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1466 } 1467 }; 1468 1469 //===----------------------------------------------------------------------===// 1470 /// This class represents a function call, abstracting a target 1471 /// machine's calling convention. This class uses low bit of the SubClassData 1472 /// field to indicate whether or not this is a tail call. The rest of the bits 1473 /// hold the calling convention of the call. 1474 /// 1475 class CallInst : public CallBase { 1476 CallInst(const CallInst &CI); 1477 1478 /// Construct a CallInst given a range of arguments. 1479 /// Construct a CallInst from a range of arguments 1480 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1481 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1482 Instruction *InsertBefore); 1483 1484 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1485 const Twine &NameStr, Instruction *InsertBefore) 1486 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} 1487 1488 /// Construct a CallInst given a range of arguments. 1489 /// Construct a CallInst from a range of arguments 1490 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1491 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1492 BasicBlock *InsertAtEnd); 1493 1494 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, 1495 Instruction *InsertBefore); 1496 1497 CallInst(FunctionType *ty, Value *F, const Twine &NameStr, 1498 BasicBlock *InsertAtEnd); 1499 1500 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 1501 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 1502 void init(FunctionType *FTy, Value *Func, const Twine &NameStr); 1503 1504 /// Compute the number of operands to allocate. 1505 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 1506 // We need one operand for the called function, plus the input operand 1507 // counts provided. 1508 return 1 + NumArgs + NumBundleInputs; 1509 } 1510 1511 protected: 1512 // Note: Instruction needs to be a friend here to call cloneImpl. 1513 friend class Instruction; 1514 1515 CallInst *cloneImpl() const; 1516 1517 public: 1518 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", 1519 Instruction *InsertBefore = nullptr) { 1520 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); 1521 } 1522 1523 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1524 const Twine &NameStr, 1525 Instruction *InsertBefore = nullptr) { 1526 return new (ComputeNumOperands(Args.size())) 1527 CallInst(Ty, Func, Args, None, NameStr, InsertBefore); 1528 } 1529 1530 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1531 ArrayRef<OperandBundleDef> Bundles = None, 1532 const Twine &NameStr = "", 1533 Instruction *InsertBefore = nullptr) { 1534 const int NumOperands = 1535 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1536 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1537 1538 return new (NumOperands, DescriptorBytes) 1539 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); 1540 } 1541 1542 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, 1543 BasicBlock *InsertAtEnd) { 1544 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); 1545 } 1546 1547 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1548 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1549 return new (ComputeNumOperands(Args.size())) 1550 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); 1551 } 1552 1553 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1554 ArrayRef<OperandBundleDef> Bundles, 1555 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1556 const int NumOperands = 1557 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1558 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1559 1560 return new (NumOperands, DescriptorBytes) 1561 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); 1562 } 1563 1564 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", 1565 Instruction *InsertBefore = nullptr) { 1566 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1567 InsertBefore); 1568 } 1569 1570 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1571 ArrayRef<OperandBundleDef> Bundles = None, 1572 const Twine &NameStr = "", 1573 Instruction *InsertBefore = nullptr) { 1574 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1575 NameStr, InsertBefore); 1576 } 1577 1578 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1579 const Twine &NameStr, 1580 Instruction *InsertBefore = nullptr) { 1581 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1582 InsertBefore); 1583 } 1584 1585 static CallInst *Create(FunctionCallee Func, const Twine &NameStr, 1586 BasicBlock *InsertAtEnd) { 1587 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1588 InsertAtEnd); 1589 } 1590 1591 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1592 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1593 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1594 InsertAtEnd); 1595 } 1596 1597 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1598 ArrayRef<OperandBundleDef> Bundles, 1599 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1600 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1601 NameStr, InsertAtEnd); 1602 } 1603 1604 /// Create a clone of \p CI with a different set of operand bundles and 1605 /// insert it before \p InsertPt. 1606 /// 1607 /// The returned call instruction is identical \p CI in every way except that 1608 /// the operand bundles for the new instruction are set to the operand bundles 1609 /// in \p Bundles. 1610 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, 1611 Instruction *InsertPt = nullptr); 1612 1613 /// Generate the IR for a call to malloc: 1614 /// 1. Compute the malloc call's argument as the specified type's size, 1615 /// possibly multiplied by the array size if the array size is not 1616 /// constant 1. 1617 /// 2. Call malloc with that argument. 1618 /// 3. Bitcast the result of the malloc call to the specified type. 1619 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1620 Type *AllocTy, Value *AllocSize, 1621 Value *ArraySize = nullptr, 1622 Function *MallocF = nullptr, 1623 const Twine &Name = ""); 1624 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1625 Type *AllocTy, Value *AllocSize, 1626 Value *ArraySize = nullptr, 1627 Function *MallocF = nullptr, 1628 const Twine &Name = ""); 1629 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1630 Type *AllocTy, Value *AllocSize, 1631 Value *ArraySize = nullptr, 1632 ArrayRef<OperandBundleDef> Bundles = None, 1633 Function *MallocF = nullptr, 1634 const Twine &Name = ""); 1635 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1636 Type *AllocTy, Value *AllocSize, 1637 Value *ArraySize = nullptr, 1638 ArrayRef<OperandBundleDef> Bundles = None, 1639 Function *MallocF = nullptr, 1640 const Twine &Name = ""); 1641 /// Generate the IR for a call to the builtin free function. 1642 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); 1643 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); 1644 static Instruction *CreateFree(Value *Source, 1645 ArrayRef<OperandBundleDef> Bundles, 1646 Instruction *InsertBefore); 1647 static Instruction *CreateFree(Value *Source, 1648 ArrayRef<OperandBundleDef> Bundles, 1649 BasicBlock *InsertAtEnd); 1650 1651 // Note that 'musttail' implies 'tail'. 1652 enum TailCallKind : unsigned { 1653 TCK_None = 0, 1654 TCK_Tail = 1, 1655 TCK_MustTail = 2, 1656 TCK_NoTail = 3, 1657 TCK_LAST = TCK_NoTail 1658 }; 1659 1660 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; 1661 static_assert( 1662 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), 1663 "Bitfields must be contiguous"); 1664 1665 TailCallKind getTailCallKind() const { 1666 return getSubclassData<TailCallKindField>(); 1667 } 1668 1669 bool isTailCall() const { 1670 TailCallKind Kind = getTailCallKind(); 1671 return Kind == TCK_Tail || Kind == TCK_MustTail; 1672 } 1673 1674 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } 1675 1676 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } 1677 1678 void setTailCallKind(TailCallKind TCK) { 1679 setSubclassData<TailCallKindField>(TCK); 1680 } 1681 1682 void setTailCall(bool IsTc = true) { 1683 setTailCallKind(IsTc ? TCK_Tail : TCK_None); 1684 } 1685 1686 /// Return true if the call can return twice 1687 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } 1688 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); } 1689 1690 // Methods for support type inquiry through isa, cast, and dyn_cast: 1691 static bool classof(const Instruction *I) { 1692 return I->getOpcode() == Instruction::Call; 1693 } 1694 static bool classof(const Value *V) { 1695 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1696 } 1697 1698 /// Updates profile metadata by scaling it by \p S / \p T. 1699 void updateProfWeight(uint64_t S, uint64_t T); 1700 1701 private: 1702 // Shadow Instruction::setInstructionSubclassData with a private forwarding 1703 // method so that subclasses cannot accidentally use it. 1704 template <typename Bitfield> 1705 void setSubclassData(typename Bitfield::Type Value) { 1706 Instruction::setSubclassData<Bitfield>(Value); 1707 } 1708 }; 1709 1710 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1711 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1712 BasicBlock *InsertAtEnd) 1713 : CallBase(Ty->getReturnType(), Instruction::Call, 1714 OperandTraits<CallBase>::op_end(this) - 1715 (Args.size() + CountBundleInputs(Bundles) + 1), 1716 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1717 InsertAtEnd) { 1718 init(Ty, Func, Args, Bundles, NameStr); 1719 } 1720 1721 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1722 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1723 Instruction *InsertBefore) 1724 : CallBase(Ty->getReturnType(), Instruction::Call, 1725 OperandTraits<CallBase>::op_end(this) - 1726 (Args.size() + CountBundleInputs(Bundles) + 1), 1727 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1728 InsertBefore) { 1729 init(Ty, Func, Args, Bundles, NameStr); 1730 } 1731 1732 //===----------------------------------------------------------------------===// 1733 // SelectInst Class 1734 //===----------------------------------------------------------------------===// 1735 1736 /// This class represents the LLVM 'select' instruction. 1737 /// 1738 class SelectInst : public Instruction { 1739 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1740 Instruction *InsertBefore) 1741 : Instruction(S1->getType(), Instruction::Select, 1742 &Op<0>(), 3, InsertBefore) { 1743 init(C, S1, S2); 1744 setName(NameStr); 1745 } 1746 1747 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1748 BasicBlock *InsertAtEnd) 1749 : Instruction(S1->getType(), Instruction::Select, 1750 &Op<0>(), 3, InsertAtEnd) { 1751 init(C, S1, S2); 1752 setName(NameStr); 1753 } 1754 1755 void init(Value *C, Value *S1, Value *S2) { 1756 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); 1757 Op<0>() = C; 1758 Op<1>() = S1; 1759 Op<2>() = S2; 1760 } 1761 1762 protected: 1763 // Note: Instruction needs to be a friend here to call cloneImpl. 1764 friend class Instruction; 1765 1766 SelectInst *cloneImpl() const; 1767 1768 public: 1769 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1770 const Twine &NameStr = "", 1771 Instruction *InsertBefore = nullptr, 1772 Instruction *MDFrom = nullptr) { 1773 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); 1774 if (MDFrom) 1775 Sel->copyMetadata(*MDFrom); 1776 return Sel; 1777 } 1778 1779 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1780 const Twine &NameStr, 1781 BasicBlock *InsertAtEnd) { 1782 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); 1783 } 1784 1785 const Value *getCondition() const { return Op<0>(); } 1786 const Value *getTrueValue() const { return Op<1>(); } 1787 const Value *getFalseValue() const { return Op<2>(); } 1788 Value *getCondition() { return Op<0>(); } 1789 Value *getTrueValue() { return Op<1>(); } 1790 Value *getFalseValue() { return Op<2>(); } 1791 1792 void setCondition(Value *V) { Op<0>() = V; } 1793 void setTrueValue(Value *V) { Op<1>() = V; } 1794 void setFalseValue(Value *V) { Op<2>() = V; } 1795 1796 /// Swap the true and false values of the select instruction. 1797 /// This doesn't swap prof metadata. 1798 void swapValues() { Op<1>().swap(Op<2>()); } 1799 1800 /// Return a string if the specified operands are invalid 1801 /// for a select operation, otherwise return null. 1802 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); 1803 1804 /// Transparently provide more efficient getOperand methods. 1805 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1806 1807 OtherOps getOpcode() const { 1808 return static_cast<OtherOps>(Instruction::getOpcode()); 1809 } 1810 1811 // Methods for support type inquiry through isa, cast, and dyn_cast: 1812 static bool classof(const Instruction *I) { 1813 return I->getOpcode() == Instruction::Select; 1814 } 1815 static bool classof(const Value *V) { 1816 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1817 } 1818 }; 1819 1820 template <> 1821 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { 1822 }; 1823 1824 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) 1825 1826 //===----------------------------------------------------------------------===// 1827 // VAArgInst Class 1828 //===----------------------------------------------------------------------===// 1829 1830 /// This class represents the va_arg llvm instruction, which returns 1831 /// an argument of the specified type given a va_list and increments that list 1832 /// 1833 class VAArgInst : public UnaryInstruction { 1834 protected: 1835 // Note: Instruction needs to be a friend here to call cloneImpl. 1836 friend class Instruction; 1837 1838 VAArgInst *cloneImpl() const; 1839 1840 public: 1841 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", 1842 Instruction *InsertBefore = nullptr) 1843 : UnaryInstruction(Ty, VAArg, List, InsertBefore) { 1844 setName(NameStr); 1845 } 1846 1847 VAArgInst(Value *List, Type *Ty, const Twine &NameStr, 1848 BasicBlock *InsertAtEnd) 1849 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { 1850 setName(NameStr); 1851 } 1852 1853 Value *getPointerOperand() { return getOperand(0); } 1854 const Value *getPointerOperand() const { return getOperand(0); } 1855 static unsigned getPointerOperandIndex() { return 0U; } 1856 1857 // Methods for support type inquiry through isa, cast, and dyn_cast: 1858 static bool classof(const Instruction *I) { 1859 return I->getOpcode() == VAArg; 1860 } 1861 static bool classof(const Value *V) { 1862 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1863 } 1864 }; 1865 1866 //===----------------------------------------------------------------------===// 1867 // ExtractElementInst Class 1868 //===----------------------------------------------------------------------===// 1869 1870 /// This instruction extracts a single (scalar) 1871 /// element from a VectorType value 1872 /// 1873 class ExtractElementInst : public Instruction { 1874 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", 1875 Instruction *InsertBefore = nullptr); 1876 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, 1877 BasicBlock *InsertAtEnd); 1878 1879 protected: 1880 // Note: Instruction needs to be a friend here to call cloneImpl. 1881 friend class Instruction; 1882 1883 ExtractElementInst *cloneImpl() const; 1884 1885 public: 1886 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1887 const Twine &NameStr = "", 1888 Instruction *InsertBefore = nullptr) { 1889 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); 1890 } 1891 1892 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1893 const Twine &NameStr, 1894 BasicBlock *InsertAtEnd) { 1895 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); 1896 } 1897 1898 /// Return true if an extractelement instruction can be 1899 /// formed with the specified operands. 1900 static bool isValidOperands(const Value *Vec, const Value *Idx); 1901 1902 Value *getVectorOperand() { return Op<0>(); } 1903 Value *getIndexOperand() { return Op<1>(); } 1904 const Value *getVectorOperand() const { return Op<0>(); } 1905 const Value *getIndexOperand() const { return Op<1>(); } 1906 1907 VectorType *getVectorOperandType() const { 1908 return cast<VectorType>(getVectorOperand()->getType()); 1909 } 1910 1911 /// Transparently provide more efficient getOperand methods. 1912 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1913 1914 // Methods for support type inquiry through isa, cast, and dyn_cast: 1915 static bool classof(const Instruction *I) { 1916 return I->getOpcode() == Instruction::ExtractElement; 1917 } 1918 static bool classof(const Value *V) { 1919 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1920 } 1921 }; 1922 1923 template <> 1924 struct OperandTraits<ExtractElementInst> : 1925 public FixedNumOperandTraits<ExtractElementInst, 2> { 1926 }; 1927 1928 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) 1929 1930 //===----------------------------------------------------------------------===// 1931 // InsertElementInst Class 1932 //===----------------------------------------------------------------------===// 1933 1934 /// This instruction inserts a single (scalar) 1935 /// element into a VectorType value 1936 /// 1937 class InsertElementInst : public Instruction { 1938 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, 1939 const Twine &NameStr = "", 1940 Instruction *InsertBefore = nullptr); 1941 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, 1942 BasicBlock *InsertAtEnd); 1943 1944 protected: 1945 // Note: Instruction needs to be a friend here to call cloneImpl. 1946 friend class Instruction; 1947 1948 InsertElementInst *cloneImpl() const; 1949 1950 public: 1951 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1952 const Twine &NameStr = "", 1953 Instruction *InsertBefore = nullptr) { 1954 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); 1955 } 1956 1957 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1958 const Twine &NameStr, 1959 BasicBlock *InsertAtEnd) { 1960 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); 1961 } 1962 1963 /// Return true if an insertelement instruction can be 1964 /// formed with the specified operands. 1965 static bool isValidOperands(const Value *Vec, const Value *NewElt, 1966 const Value *Idx); 1967 1968 /// Overload to return most specific vector type. 1969 /// 1970 VectorType *getType() const { 1971 return cast<VectorType>(Instruction::getType()); 1972 } 1973 1974 /// Transparently provide more efficient getOperand methods. 1975 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1976 1977 // Methods for support type inquiry through isa, cast, and dyn_cast: 1978 static bool classof(const Instruction *I) { 1979 return I->getOpcode() == Instruction::InsertElement; 1980 } 1981 static bool classof(const Value *V) { 1982 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1983 } 1984 }; 1985 1986 template <> 1987 struct OperandTraits<InsertElementInst> : 1988 public FixedNumOperandTraits<InsertElementInst, 3> { 1989 }; 1990 1991 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) 1992 1993 //===----------------------------------------------------------------------===// 1994 // ShuffleVectorInst Class 1995 //===----------------------------------------------------------------------===// 1996 1997 constexpr int UndefMaskElem = -1; 1998 1999 /// This instruction constructs a fixed permutation of two 2000 /// input vectors. 2001 /// 2002 /// For each element of the result vector, the shuffle mask selects an element 2003 /// from one of the input vectors to copy to the result. Non-negative elements 2004 /// in the mask represent an index into the concatenated pair of input vectors. 2005 /// UndefMaskElem (-1) specifies that the result element is undefined. 2006 /// 2007 /// For scalable vectors, all the elements of the mask must be 0 or -1. This 2008 /// requirement may be relaxed in the future. 2009 class ShuffleVectorInst : public Instruction { 2010 SmallVector<int, 4> ShuffleMask; 2011 Constant *ShuffleMaskForBitcode; 2012 2013 protected: 2014 // Note: Instruction needs to be a friend here to call cloneImpl. 2015 friend class Instruction; 2016 2017 ShuffleVectorInst *cloneImpl() const; 2018 2019 public: 2020 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "", 2021 Instruction *InsertBefore = nullptr); 2022 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, 2023 BasicBlock *InsertAtEnd); 2024 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "", 2025 Instruction *InsertBefore = nullptr); 2026 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr, 2027 BasicBlock *InsertAtEnd); 2028 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2029 const Twine &NameStr = "", 2030 Instruction *InsertBefor = nullptr); 2031 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2032 const Twine &NameStr, BasicBlock *InsertAtEnd); 2033 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2034 const Twine &NameStr = "", 2035 Instruction *InsertBefor = nullptr); 2036 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2037 const Twine &NameStr, BasicBlock *InsertAtEnd); 2038 2039 void *operator new(size_t S) { return User::operator new(S, 2); } 2040 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 2041 2042 /// Swap the operands and adjust the mask to preserve the semantics 2043 /// of the instruction. 2044 void commute(); 2045 2046 /// Return true if a shufflevector instruction can be 2047 /// formed with the specified operands. 2048 static bool isValidOperands(const Value *V1, const Value *V2, 2049 const Value *Mask); 2050 static bool isValidOperands(const Value *V1, const Value *V2, 2051 ArrayRef<int> Mask); 2052 2053 /// Overload to return most specific vector type. 2054 /// 2055 VectorType *getType() const { 2056 return cast<VectorType>(Instruction::getType()); 2057 } 2058 2059 /// Transparently provide more efficient getOperand methods. 2060 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2061 2062 /// Return the shuffle mask value of this instruction for the given element 2063 /// index. Return UndefMaskElem if the element is undef. 2064 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } 2065 2066 /// Convert the input shuffle mask operand to a vector of integers. Undefined 2067 /// elements of the mask are returned as UndefMaskElem. 2068 static void getShuffleMask(const Constant *Mask, 2069 SmallVectorImpl<int> &Result); 2070 2071 /// Return the mask for this instruction as a vector of integers. Undefined 2072 /// elements of the mask are returned as UndefMaskElem. 2073 void getShuffleMask(SmallVectorImpl<int> &Result) const { 2074 Result.assign(ShuffleMask.begin(), ShuffleMask.end()); 2075 } 2076 2077 /// Return the mask for this instruction, for use in bitcode. 2078 /// 2079 /// TODO: This is temporary until we decide a new bitcode encoding for 2080 /// shufflevector. 2081 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } 2082 2083 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, 2084 Type *ResultTy); 2085 2086 void setShuffleMask(ArrayRef<int> Mask); 2087 2088 ArrayRef<int> getShuffleMask() const { return ShuffleMask; } 2089 2090 /// Return true if this shuffle returns a vector with a different number of 2091 /// elements than its source vectors. 2092 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> 2093 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> 2094 bool changesLength() const { 2095 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2096 ->getElementCount() 2097 .getKnownMinValue(); 2098 unsigned NumMaskElts = ShuffleMask.size(); 2099 return NumSourceElts != NumMaskElts; 2100 } 2101 2102 /// Return true if this shuffle returns a vector with a greater number of 2103 /// elements than its source vectors. 2104 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> 2105 bool increasesLength() const { 2106 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2107 ->getElementCount() 2108 .getKnownMinValue(); 2109 unsigned NumMaskElts = ShuffleMask.size(); 2110 return NumSourceElts < NumMaskElts; 2111 } 2112 2113 /// Return true if this shuffle mask chooses elements from exactly one source 2114 /// vector. 2115 /// Example: <7,5,undef,7> 2116 /// This assumes that vector operands are the same length as the mask. 2117 static bool isSingleSourceMask(ArrayRef<int> Mask); 2118 static bool isSingleSourceMask(const Constant *Mask) { 2119 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2120 SmallVector<int, 16> MaskAsInts; 2121 getShuffleMask(Mask, MaskAsInts); 2122 return isSingleSourceMask(MaskAsInts); 2123 } 2124 2125 /// Return true if this shuffle chooses elements from exactly one source 2126 /// vector without changing the length of that vector. 2127 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> 2128 /// TODO: Optionally allow length-changing shuffles. 2129 bool isSingleSource() const { 2130 return !changesLength() && isSingleSourceMask(ShuffleMask); 2131 } 2132 2133 /// Return true if this shuffle mask chooses elements from exactly one source 2134 /// vector without lane crossings. A shuffle using this mask is not 2135 /// necessarily a no-op because it may change the number of elements from its 2136 /// input vectors or it may provide demanded bits knowledge via undef lanes. 2137 /// Example: <undef,undef,2,3> 2138 static bool isIdentityMask(ArrayRef<int> Mask); 2139 static bool isIdentityMask(const Constant *Mask) { 2140 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2141 SmallVector<int, 16> MaskAsInts; 2142 getShuffleMask(Mask, MaskAsInts); 2143 return isIdentityMask(MaskAsInts); 2144 } 2145 2146 /// Return true if this shuffle chooses elements from exactly one source 2147 /// vector without lane crossings and does not change the number of elements 2148 /// from its input vectors. 2149 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> 2150 bool isIdentity() const { 2151 return !changesLength() && isIdentityMask(ShuffleMask); 2152 } 2153 2154 /// Return true if this shuffle lengthens exactly one source vector with 2155 /// undefs in the high elements. 2156 bool isIdentityWithPadding() const; 2157 2158 /// Return true if this shuffle extracts the first N elements of exactly one 2159 /// source vector. 2160 bool isIdentityWithExtract() const; 2161 2162 /// Return true if this shuffle concatenates its 2 source vectors. This 2163 /// returns false if either input is undefined. In that case, the shuffle is 2164 /// is better classified as an identity with padding operation. 2165 bool isConcat() const; 2166 2167 /// Return true if this shuffle mask chooses elements from its source vectors 2168 /// without lane crossings. A shuffle using this mask would be 2169 /// equivalent to a vector select with a constant condition operand. 2170 /// Example: <4,1,6,undef> 2171 /// This returns false if the mask does not choose from both input vectors. 2172 /// In that case, the shuffle is better classified as an identity shuffle. 2173 /// This assumes that vector operands are the same length as the mask 2174 /// (a length-changing shuffle can never be equivalent to a vector select). 2175 static bool isSelectMask(ArrayRef<int> Mask); 2176 static bool isSelectMask(const Constant *Mask) { 2177 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2178 SmallVector<int, 16> MaskAsInts; 2179 getShuffleMask(Mask, MaskAsInts); 2180 return isSelectMask(MaskAsInts); 2181 } 2182 2183 /// Return true if this shuffle chooses elements from its source vectors 2184 /// without lane crossings and all operands have the same number of elements. 2185 /// In other words, this shuffle is equivalent to a vector select with a 2186 /// constant condition operand. 2187 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> 2188 /// This returns false if the mask does not choose from both input vectors. 2189 /// In that case, the shuffle is better classified as an identity shuffle. 2190 /// TODO: Optionally allow length-changing shuffles. 2191 bool isSelect() const { 2192 return !changesLength() && isSelectMask(ShuffleMask); 2193 } 2194 2195 /// Return true if this shuffle mask swaps the order of elements from exactly 2196 /// one source vector. 2197 /// Example: <7,6,undef,4> 2198 /// This assumes that vector operands are the same length as the mask. 2199 static bool isReverseMask(ArrayRef<int> Mask); 2200 static bool isReverseMask(const Constant *Mask) { 2201 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2202 SmallVector<int, 16> MaskAsInts; 2203 getShuffleMask(Mask, MaskAsInts); 2204 return isReverseMask(MaskAsInts); 2205 } 2206 2207 /// Return true if this shuffle swaps the order of elements from exactly 2208 /// one source vector. 2209 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> 2210 /// TODO: Optionally allow length-changing shuffles. 2211 bool isReverse() const { 2212 return !changesLength() && isReverseMask(ShuffleMask); 2213 } 2214 2215 /// Return true if this shuffle mask chooses all elements with the same value 2216 /// as the first element of exactly one source vector. 2217 /// Example: <4,undef,undef,4> 2218 /// This assumes that vector operands are the same length as the mask. 2219 static bool isZeroEltSplatMask(ArrayRef<int> Mask); 2220 static bool isZeroEltSplatMask(const Constant *Mask) { 2221 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2222 SmallVector<int, 16> MaskAsInts; 2223 getShuffleMask(Mask, MaskAsInts); 2224 return isZeroEltSplatMask(MaskAsInts); 2225 } 2226 2227 /// Return true if all elements of this shuffle are the same value as the 2228 /// first element of exactly one source vector without changing the length 2229 /// of that vector. 2230 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> 2231 /// TODO: Optionally allow length-changing shuffles. 2232 /// TODO: Optionally allow splats from other elements. 2233 bool isZeroEltSplat() const { 2234 return !changesLength() && isZeroEltSplatMask(ShuffleMask); 2235 } 2236 2237 /// Return true if this shuffle mask is a transpose mask. 2238 /// Transpose vector masks transpose a 2xn matrix. They read corresponding 2239 /// even- or odd-numbered vector elements from two n-dimensional source 2240 /// vectors and write each result into consecutive elements of an 2241 /// n-dimensional destination vector. Two shuffles are necessary to complete 2242 /// the transpose, one for the even elements and another for the odd elements. 2243 /// This description closely follows how the TRN1 and TRN2 AArch64 2244 /// instructions operate. 2245 /// 2246 /// For example, a simple 2x2 matrix can be transposed with: 2247 /// 2248 /// ; Original matrix 2249 /// m0 = < a, b > 2250 /// m1 = < c, d > 2251 /// 2252 /// ; Transposed matrix 2253 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > 2254 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > 2255 /// 2256 /// For matrices having greater than n columns, the resulting nx2 transposed 2257 /// matrix is stored in two result vectors such that one vector contains 2258 /// interleaved elements from all the even-numbered rows and the other vector 2259 /// contains interleaved elements from all the odd-numbered rows. For example, 2260 /// a 2x4 matrix can be transposed with: 2261 /// 2262 /// ; Original matrix 2263 /// m0 = < a, b, c, d > 2264 /// m1 = < e, f, g, h > 2265 /// 2266 /// ; Transposed matrix 2267 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > 2268 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > 2269 static bool isTransposeMask(ArrayRef<int> Mask); 2270 static bool isTransposeMask(const Constant *Mask) { 2271 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2272 SmallVector<int, 16> MaskAsInts; 2273 getShuffleMask(Mask, MaskAsInts); 2274 return isTransposeMask(MaskAsInts); 2275 } 2276 2277 /// Return true if this shuffle transposes the elements of its inputs without 2278 /// changing the length of the vectors. This operation may also be known as a 2279 /// merge or interleave. See the description for isTransposeMask() for the 2280 /// exact specification. 2281 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> 2282 bool isTranspose() const { 2283 return !changesLength() && isTransposeMask(ShuffleMask); 2284 } 2285 2286 /// Return true if this shuffle mask is an extract subvector mask. 2287 /// A valid extract subvector mask returns a smaller vector from a single 2288 /// source operand. The base extraction index is returned as well. 2289 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2290 int &Index); 2291 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, 2292 int &Index) { 2293 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2294 // Not possible to express a shuffle mask for a scalable vector for this 2295 // case. 2296 if (isa<ScalableVectorType>(Mask->getType())) 2297 return false; 2298 SmallVector<int, 16> MaskAsInts; 2299 getShuffleMask(Mask, MaskAsInts); 2300 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); 2301 } 2302 2303 /// Return true if this shuffle mask is an extract subvector mask. 2304 bool isExtractSubvectorMask(int &Index) const { 2305 // Not possible to express a shuffle mask for a scalable vector for this 2306 // case. 2307 if (isa<ScalableVectorType>(getType())) 2308 return false; 2309 2310 int NumSrcElts = 2311 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2312 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); 2313 } 2314 2315 /// Return true if this shuffle mask is an insert subvector mask. 2316 /// A valid insert subvector mask inserts the lowest elements of a second 2317 /// source operand into an in-place first source operand operand. 2318 /// Both the sub vector width and the insertion index is returned. 2319 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2320 int &NumSubElts, int &Index); 2321 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, 2322 int &NumSubElts, int &Index) { 2323 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2324 // Not possible to express a shuffle mask for a scalable vector for this 2325 // case. 2326 if (isa<ScalableVectorType>(Mask->getType())) 2327 return false; 2328 SmallVector<int, 16> MaskAsInts; 2329 getShuffleMask(Mask, MaskAsInts); 2330 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index); 2331 } 2332 2333 /// Return true if this shuffle mask is an insert subvector mask. 2334 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const { 2335 // Not possible to express a shuffle mask for a scalable vector for this 2336 // case. 2337 if (isa<ScalableVectorType>(getType())) 2338 return false; 2339 2340 int NumSrcElts = 2341 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2342 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index); 2343 } 2344 2345 /// Return true if this shuffle mask replicates each of the \p VF elements 2346 /// in a vector \p ReplicationFactor times. 2347 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is: 2348 /// <0,0,0,1,1,1,2,2,2,3,3,3> 2349 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor, 2350 int &VF); 2351 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, 2352 int &VF) { 2353 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2354 // Not possible to express a shuffle mask for a scalable vector for this 2355 // case. 2356 if (isa<ScalableVectorType>(Mask->getType())) 2357 return false; 2358 SmallVector<int, 16> MaskAsInts; 2359 getShuffleMask(Mask, MaskAsInts); 2360 return isReplicationMask(MaskAsInts, ReplicationFactor, VF); 2361 } 2362 2363 /// Return true if this shuffle mask is a replication mask. 2364 bool isReplicationMask(int &ReplicationFactor, int &VF) const; 2365 2366 /// Change values in a shuffle permute mask assuming the two vector operands 2367 /// of length InVecNumElts have swapped position. 2368 static void commuteShuffleMask(MutableArrayRef<int> Mask, 2369 unsigned InVecNumElts) { 2370 for (int &Idx : Mask) { 2371 if (Idx == -1) 2372 continue; 2373 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; 2374 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 && 2375 "shufflevector mask index out of range"); 2376 } 2377 } 2378 2379 // Methods for support type inquiry through isa, cast, and dyn_cast: 2380 static bool classof(const Instruction *I) { 2381 return I->getOpcode() == Instruction::ShuffleVector; 2382 } 2383 static bool classof(const Value *V) { 2384 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2385 } 2386 }; 2387 2388 template <> 2389 struct OperandTraits<ShuffleVectorInst> 2390 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; 2391 2392 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) 2393 2394 //===----------------------------------------------------------------------===// 2395 // ExtractValueInst Class 2396 //===----------------------------------------------------------------------===// 2397 2398 /// This instruction extracts a struct member or array 2399 /// element value from an aggregate value. 2400 /// 2401 class ExtractValueInst : public UnaryInstruction { 2402 SmallVector<unsigned, 4> Indices; 2403 2404 ExtractValueInst(const ExtractValueInst &EVI); 2405 2406 /// Constructors - Create a extractvalue instruction with a base aggregate 2407 /// value and a list of indices. The first ctor can optionally insert before 2408 /// an existing instruction, the second appends the new instruction to the 2409 /// specified BasicBlock. 2410 inline ExtractValueInst(Value *Agg, 2411 ArrayRef<unsigned> Idxs, 2412 const Twine &NameStr, 2413 Instruction *InsertBefore); 2414 inline ExtractValueInst(Value *Agg, 2415 ArrayRef<unsigned> Idxs, 2416 const Twine &NameStr, BasicBlock *InsertAtEnd); 2417 2418 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); 2419 2420 protected: 2421 // Note: Instruction needs to be a friend here to call cloneImpl. 2422 friend class Instruction; 2423 2424 ExtractValueInst *cloneImpl() const; 2425 2426 public: 2427 static ExtractValueInst *Create(Value *Agg, 2428 ArrayRef<unsigned> Idxs, 2429 const Twine &NameStr = "", 2430 Instruction *InsertBefore = nullptr) { 2431 return new 2432 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); 2433 } 2434 2435 static ExtractValueInst *Create(Value *Agg, 2436 ArrayRef<unsigned> Idxs, 2437 const Twine &NameStr, 2438 BasicBlock *InsertAtEnd) { 2439 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); 2440 } 2441 2442 /// Returns the type of the element that would be extracted 2443 /// with an extractvalue instruction with the specified parameters. 2444 /// 2445 /// Null is returned if the indices are invalid for the specified type. 2446 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); 2447 2448 using idx_iterator = const unsigned*; 2449 2450 inline idx_iterator idx_begin() const { return Indices.begin(); } 2451 inline idx_iterator idx_end() const { return Indices.end(); } 2452 inline iterator_range<idx_iterator> indices() const { 2453 return make_range(idx_begin(), idx_end()); 2454 } 2455 2456 Value *getAggregateOperand() { 2457 return getOperand(0); 2458 } 2459 const Value *getAggregateOperand() const { 2460 return getOperand(0); 2461 } 2462 static unsigned getAggregateOperandIndex() { 2463 return 0U; // get index for modifying correct operand 2464 } 2465 2466 ArrayRef<unsigned> getIndices() const { 2467 return Indices; 2468 } 2469 2470 unsigned getNumIndices() const { 2471 return (unsigned)Indices.size(); 2472 } 2473 2474 bool hasIndices() const { 2475 return true; 2476 } 2477 2478 // Methods for support type inquiry through isa, cast, and dyn_cast: 2479 static bool classof(const Instruction *I) { 2480 return I->getOpcode() == Instruction::ExtractValue; 2481 } 2482 static bool classof(const Value *V) { 2483 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2484 } 2485 }; 2486 2487 ExtractValueInst::ExtractValueInst(Value *Agg, 2488 ArrayRef<unsigned> Idxs, 2489 const Twine &NameStr, 2490 Instruction *InsertBefore) 2491 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2492 ExtractValue, Agg, InsertBefore) { 2493 init(Idxs, NameStr); 2494 } 2495 2496 ExtractValueInst::ExtractValueInst(Value *Agg, 2497 ArrayRef<unsigned> Idxs, 2498 const Twine &NameStr, 2499 BasicBlock *InsertAtEnd) 2500 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2501 ExtractValue, Agg, InsertAtEnd) { 2502 init(Idxs, NameStr); 2503 } 2504 2505 //===----------------------------------------------------------------------===// 2506 // InsertValueInst Class 2507 //===----------------------------------------------------------------------===// 2508 2509 /// This instruction inserts a struct field of array element 2510 /// value into an aggregate value. 2511 /// 2512 class InsertValueInst : public Instruction { 2513 SmallVector<unsigned, 4> Indices; 2514 2515 InsertValueInst(const InsertValueInst &IVI); 2516 2517 /// Constructors - Create a insertvalue instruction with a base aggregate 2518 /// value, a value to insert, and a list of indices. The first ctor can 2519 /// optionally insert before an existing instruction, the second appends 2520 /// the new instruction to the specified BasicBlock. 2521 inline InsertValueInst(Value *Agg, Value *Val, 2522 ArrayRef<unsigned> Idxs, 2523 const Twine &NameStr, 2524 Instruction *InsertBefore); 2525 inline InsertValueInst(Value *Agg, Value *Val, 2526 ArrayRef<unsigned> Idxs, 2527 const Twine &NameStr, BasicBlock *InsertAtEnd); 2528 2529 /// Constructors - These two constructors are convenience methods because one 2530 /// and two index insertvalue instructions are so common. 2531 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, 2532 const Twine &NameStr = "", 2533 Instruction *InsertBefore = nullptr); 2534 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, 2535 BasicBlock *InsertAtEnd); 2536 2537 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2538 const Twine &NameStr); 2539 2540 protected: 2541 // Note: Instruction needs to be a friend here to call cloneImpl. 2542 friend class Instruction; 2543 2544 InsertValueInst *cloneImpl() const; 2545 2546 public: 2547 // allocate space for exactly two operands 2548 void *operator new(size_t S) { return User::operator new(S, 2); } 2549 void operator delete(void *Ptr) { User::operator delete(Ptr); } 2550 2551 static InsertValueInst *Create(Value *Agg, Value *Val, 2552 ArrayRef<unsigned> Idxs, 2553 const Twine &NameStr = "", 2554 Instruction *InsertBefore = nullptr) { 2555 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); 2556 } 2557 2558 static InsertValueInst *Create(Value *Agg, Value *Val, 2559 ArrayRef<unsigned> Idxs, 2560 const Twine &NameStr, 2561 BasicBlock *InsertAtEnd) { 2562 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); 2563 } 2564 2565 /// Transparently provide more efficient getOperand methods. 2566 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2567 2568 using idx_iterator = const unsigned*; 2569 2570 inline idx_iterator idx_begin() const { return Indices.begin(); } 2571 inline idx_iterator idx_end() const { return Indices.end(); } 2572 inline iterator_range<idx_iterator> indices() const { 2573 return make_range(idx_begin(), idx_end()); 2574 } 2575 2576 Value *getAggregateOperand() { 2577 return getOperand(0); 2578 } 2579 const Value *getAggregateOperand() const { 2580 return getOperand(0); 2581 } 2582 static unsigned getAggregateOperandIndex() { 2583 return 0U; // get index for modifying correct operand 2584 } 2585 2586 Value *getInsertedValueOperand() { 2587 return getOperand(1); 2588 } 2589 const Value *getInsertedValueOperand() const { 2590 return getOperand(1); 2591 } 2592 static unsigned getInsertedValueOperandIndex() { 2593 return 1U; // get index for modifying correct operand 2594 } 2595 2596 ArrayRef<unsigned> getIndices() const { 2597 return Indices; 2598 } 2599 2600 unsigned getNumIndices() const { 2601 return (unsigned)Indices.size(); 2602 } 2603 2604 bool hasIndices() const { 2605 return true; 2606 } 2607 2608 // Methods for support type inquiry through isa, cast, and dyn_cast: 2609 static bool classof(const Instruction *I) { 2610 return I->getOpcode() == Instruction::InsertValue; 2611 } 2612 static bool classof(const Value *V) { 2613 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2614 } 2615 }; 2616 2617 template <> 2618 struct OperandTraits<InsertValueInst> : 2619 public FixedNumOperandTraits<InsertValueInst, 2> { 2620 }; 2621 2622 InsertValueInst::InsertValueInst(Value *Agg, 2623 Value *Val, 2624 ArrayRef<unsigned> Idxs, 2625 const Twine &NameStr, 2626 Instruction *InsertBefore) 2627 : Instruction(Agg->getType(), InsertValue, 2628 OperandTraits<InsertValueInst>::op_begin(this), 2629 2, InsertBefore) { 2630 init(Agg, Val, Idxs, NameStr); 2631 } 2632 2633 InsertValueInst::InsertValueInst(Value *Agg, 2634 Value *Val, 2635 ArrayRef<unsigned> Idxs, 2636 const Twine &NameStr, 2637 BasicBlock *InsertAtEnd) 2638 : Instruction(Agg->getType(), InsertValue, 2639 OperandTraits<InsertValueInst>::op_begin(this), 2640 2, InsertAtEnd) { 2641 init(Agg, Val, Idxs, NameStr); 2642 } 2643 2644 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) 2645 2646 //===----------------------------------------------------------------------===// 2647 // PHINode Class 2648 //===----------------------------------------------------------------------===// 2649 2650 // PHINode - The PHINode class is used to represent the magical mystical PHI 2651 // node, that can not exist in nature, but can be synthesized in a computer 2652 // scientist's overactive imagination. 2653 // 2654 class PHINode : public Instruction { 2655 /// The number of operands actually allocated. NumOperands is 2656 /// the number actually in use. 2657 unsigned ReservedSpace; 2658 2659 PHINode(const PHINode &PN); 2660 2661 explicit PHINode(Type *Ty, unsigned NumReservedValues, 2662 const Twine &NameStr = "", 2663 Instruction *InsertBefore = nullptr) 2664 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), 2665 ReservedSpace(NumReservedValues) { 2666 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2667 setName(NameStr); 2668 allocHungoffUses(ReservedSpace); 2669 } 2670 2671 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, 2672 BasicBlock *InsertAtEnd) 2673 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), 2674 ReservedSpace(NumReservedValues) { 2675 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2676 setName(NameStr); 2677 allocHungoffUses(ReservedSpace); 2678 } 2679 2680 protected: 2681 // Note: Instruction needs to be a friend here to call cloneImpl. 2682 friend class Instruction; 2683 2684 PHINode *cloneImpl() const; 2685 2686 // allocHungoffUses - this is more complicated than the generic 2687 // User::allocHungoffUses, because we have to allocate Uses for the incoming 2688 // values and pointers to the incoming blocks, all in one allocation. 2689 void allocHungoffUses(unsigned N) { 2690 User::allocHungoffUses(N, /* IsPhi */ true); 2691 } 2692 2693 public: 2694 /// Constructors - NumReservedValues is a hint for the number of incoming 2695 /// edges that this phi node will have (use 0 if you really have no idea). 2696 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2697 const Twine &NameStr = "", 2698 Instruction *InsertBefore = nullptr) { 2699 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); 2700 } 2701 2702 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2703 const Twine &NameStr, BasicBlock *InsertAtEnd) { 2704 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); 2705 } 2706 2707 /// Provide fast operand accessors 2708 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2709 2710 // Block iterator interface. This provides access to the list of incoming 2711 // basic blocks, which parallels the list of incoming values. 2712 2713 using block_iterator = BasicBlock **; 2714 using const_block_iterator = BasicBlock * const *; 2715 2716 block_iterator block_begin() { 2717 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); 2718 } 2719 2720 const_block_iterator block_begin() const { 2721 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 2722 } 2723 2724 block_iterator block_end() { 2725 return block_begin() + getNumOperands(); 2726 } 2727 2728 const_block_iterator block_end() const { 2729 return block_begin() + getNumOperands(); 2730 } 2731 2732 iterator_range<block_iterator> blocks() { 2733 return make_range(block_begin(), block_end()); 2734 } 2735 2736 iterator_range<const_block_iterator> blocks() const { 2737 return make_range(block_begin(), block_end()); 2738 } 2739 2740 op_range incoming_values() { return operands(); } 2741 2742 const_op_range incoming_values() const { return operands(); } 2743 2744 /// Return the number of incoming edges 2745 /// 2746 unsigned getNumIncomingValues() const { return getNumOperands(); } 2747 2748 /// Return incoming value number x 2749 /// 2750 Value *getIncomingValue(unsigned i) const { 2751 return getOperand(i); 2752 } 2753 void setIncomingValue(unsigned i, Value *V) { 2754 assert(V && "PHI node got a null value!"); 2755 assert(getType() == V->getType() && 2756 "All operands to PHI node must be the same type as the PHI node!"); 2757 setOperand(i, V); 2758 } 2759 2760 static unsigned getOperandNumForIncomingValue(unsigned i) { 2761 return i; 2762 } 2763 2764 static unsigned getIncomingValueNumForOperand(unsigned i) { 2765 return i; 2766 } 2767 2768 /// Return incoming basic block number @p i. 2769 /// 2770 BasicBlock *getIncomingBlock(unsigned i) const { 2771 return block_begin()[i]; 2772 } 2773 2774 /// Return incoming basic block corresponding 2775 /// to an operand of the PHI. 2776 /// 2777 BasicBlock *getIncomingBlock(const Use &U) const { 2778 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 2779 return getIncomingBlock(unsigned(&U - op_begin())); 2780 } 2781 2782 /// Return incoming basic block corresponding 2783 /// to value use iterator. 2784 /// 2785 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { 2786 return getIncomingBlock(I.getUse()); 2787 } 2788 2789 void setIncomingBlock(unsigned i, BasicBlock *BB) { 2790 assert(BB && "PHI node got a null basic block!"); 2791 block_begin()[i] = BB; 2792 } 2793 2794 /// Replace every incoming basic block \p Old to basic block \p New. 2795 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { 2796 assert(New && Old && "PHI node got a null basic block!"); 2797 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2798 if (getIncomingBlock(Op) == Old) 2799 setIncomingBlock(Op, New); 2800 } 2801 2802 /// Add an incoming value to the end of the PHI list 2803 /// 2804 void addIncoming(Value *V, BasicBlock *BB) { 2805 if (getNumOperands() == ReservedSpace) 2806 growOperands(); // Get more space! 2807 // Initialize some new operands. 2808 setNumHungOffUseOperands(getNumOperands() + 1); 2809 setIncomingValue(getNumOperands() - 1, V); 2810 setIncomingBlock(getNumOperands() - 1, BB); 2811 } 2812 2813 /// Remove an incoming value. This is useful if a 2814 /// predecessor basic block is deleted. The value removed is returned. 2815 /// 2816 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty 2817 /// is true), the PHI node is destroyed and any uses of it are replaced with 2818 /// dummy values. The only time there should be zero incoming values to a PHI 2819 /// node is when the block is dead, so this strategy is sound. 2820 /// 2821 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); 2822 2823 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { 2824 int Idx = getBasicBlockIndex(BB); 2825 assert(Idx >= 0 && "Invalid basic block argument to remove!"); 2826 return removeIncomingValue(Idx, DeletePHIIfEmpty); 2827 } 2828 2829 /// Return the first index of the specified basic 2830 /// block in the value list for this PHI. Returns -1 if no instance. 2831 /// 2832 int getBasicBlockIndex(const BasicBlock *BB) const { 2833 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 2834 if (block_begin()[i] == BB) 2835 return i; 2836 return -1; 2837 } 2838 2839 Value *getIncomingValueForBlock(const BasicBlock *BB) const { 2840 int Idx = getBasicBlockIndex(BB); 2841 assert(Idx >= 0 && "Invalid basic block argument!"); 2842 return getIncomingValue(Idx); 2843 } 2844 2845 /// Set every incoming value(s) for block \p BB to \p V. 2846 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { 2847 assert(BB && "PHI node got a null basic block!"); 2848 bool Found = false; 2849 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2850 if (getIncomingBlock(Op) == BB) { 2851 Found = true; 2852 setIncomingValue(Op, V); 2853 } 2854 (void)Found; 2855 assert(Found && "Invalid basic block argument to set!"); 2856 } 2857 2858 /// If the specified PHI node always merges together the 2859 /// same value, return the value, otherwise return null. 2860 Value *hasConstantValue() const; 2861 2862 /// Whether the specified PHI node always merges 2863 /// together the same value, assuming undefs are equal to a unique 2864 /// non-undef value. 2865 bool hasConstantOrUndefValue() const; 2866 2867 /// If the PHI node is complete which means all of its parent's predecessors 2868 /// have incoming value in this PHI, return true, otherwise return false. 2869 bool isComplete() const { 2870 return llvm::all_of(predecessors(getParent()), 2871 [this](const BasicBlock *Pred) { 2872 return getBasicBlockIndex(Pred) >= 0; 2873 }); 2874 } 2875 2876 /// Methods for support type inquiry through isa, cast, and dyn_cast: 2877 static bool classof(const Instruction *I) { 2878 return I->getOpcode() == Instruction::PHI; 2879 } 2880 static bool classof(const Value *V) { 2881 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2882 } 2883 2884 private: 2885 void growOperands(); 2886 }; 2887 2888 template <> 2889 struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { 2890 }; 2891 2892 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) 2893 2894 //===----------------------------------------------------------------------===// 2895 // LandingPadInst Class 2896 //===----------------------------------------------------------------------===// 2897 2898 //===--------------------------------------------------------------------------- 2899 /// The landingpad instruction holds all of the information 2900 /// necessary to generate correct exception handling. The landingpad instruction 2901 /// cannot be moved from the top of a landing pad block, which itself is 2902 /// accessible only from the 'unwind' edge of an invoke. This uses the 2903 /// SubclassData field in Value to store whether or not the landingpad is a 2904 /// cleanup. 2905 /// 2906 class LandingPadInst : public Instruction { 2907 using CleanupField = BoolBitfieldElementT<0>; 2908 2909 /// The number of operands actually allocated. NumOperands is 2910 /// the number actually in use. 2911 unsigned ReservedSpace; 2912 2913 LandingPadInst(const LandingPadInst &LP); 2914 2915 public: 2916 enum ClauseType { Catch, Filter }; 2917 2918 private: 2919 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2920 const Twine &NameStr, Instruction *InsertBefore); 2921 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2922 const Twine &NameStr, BasicBlock *InsertAtEnd); 2923 2924 // Allocate space for exactly zero operands. 2925 void *operator new(size_t S) { return User::operator new(S); } 2926 2927 void growOperands(unsigned Size); 2928 void init(unsigned NumReservedValues, const Twine &NameStr); 2929 2930 protected: 2931 // Note: Instruction needs to be a friend here to call cloneImpl. 2932 friend class Instruction; 2933 2934 LandingPadInst *cloneImpl() const; 2935 2936 public: 2937 void operator delete(void *Ptr) { User::operator delete(Ptr); } 2938 2939 /// Constructors - NumReservedClauses is a hint for the number of incoming 2940 /// clauses that this landingpad will have (use 0 if you really have no idea). 2941 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2942 const Twine &NameStr = "", 2943 Instruction *InsertBefore = nullptr); 2944 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2945 const Twine &NameStr, BasicBlock *InsertAtEnd); 2946 2947 /// Provide fast operand accessors 2948 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2949 2950 /// Return 'true' if this landingpad instruction is a 2951 /// cleanup. I.e., it should be run when unwinding even if its landing pad 2952 /// doesn't catch the exception. 2953 bool isCleanup() const { return getSubclassData<CleanupField>(); } 2954 2955 /// Indicate that this landingpad instruction is a cleanup. 2956 void setCleanup(bool V) { setSubclassData<CleanupField>(V); } 2957 2958 /// Add a catch or filter clause to the landing pad. 2959 void addClause(Constant *ClauseVal); 2960 2961 /// Get the value of the clause at index Idx. Use isCatch/isFilter to 2962 /// determine what type of clause this is. 2963 Constant *getClause(unsigned Idx) const { 2964 return cast<Constant>(getOperandList()[Idx]); 2965 } 2966 2967 /// Return 'true' if the clause and index Idx is a catch clause. 2968 bool isCatch(unsigned Idx) const { 2969 return !isa<ArrayType>(getOperandList()[Idx]->getType()); 2970 } 2971 2972 /// Return 'true' if the clause and index Idx is a filter clause. 2973 bool isFilter(unsigned Idx) const { 2974 return isa<ArrayType>(getOperandList()[Idx]->getType()); 2975 } 2976 2977 /// Get the number of clauses for this landing pad. 2978 unsigned getNumClauses() const { return getNumOperands(); } 2979 2980 /// Grow the size of the operand list to accommodate the new 2981 /// number of clauses. 2982 void reserveClauses(unsigned Size) { growOperands(Size); } 2983 2984 // Methods for support type inquiry through isa, cast, and dyn_cast: 2985 static bool classof(const Instruction *I) { 2986 return I->getOpcode() == Instruction::LandingPad; 2987 } 2988 static bool classof(const Value *V) { 2989 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2990 } 2991 }; 2992 2993 template <> 2994 struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { 2995 }; 2996 2997 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value) 2998 2999 //===----------------------------------------------------------------------===// 3000 // ReturnInst Class 3001 //===----------------------------------------------------------------------===// 3002 3003 //===--------------------------------------------------------------------------- 3004 /// Return a value (possibly void), from a function. Execution 3005 /// does not continue in this function any longer. 3006 /// 3007 class ReturnInst : public Instruction { 3008 ReturnInst(const ReturnInst &RI); 3009 3010 private: 3011 // ReturnInst constructors: 3012 // ReturnInst() - 'ret void' instruction 3013 // ReturnInst( null) - 'ret void' instruction 3014 // ReturnInst(Value* X) - 'ret X' instruction 3015 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I 3016 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I 3017 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B 3018 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B 3019 // 3020 // NOTE: If the Value* passed is of type void then the constructor behaves as 3021 // if it was passed NULL. 3022 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, 3023 Instruction *InsertBefore = nullptr); 3024 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); 3025 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); 3026 3027 protected: 3028 // Note: Instruction needs to be a friend here to call cloneImpl. 3029 friend class Instruction; 3030 3031 ReturnInst *cloneImpl() const; 3032 3033 public: 3034 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, 3035 Instruction *InsertBefore = nullptr) { 3036 return new(!!retVal) ReturnInst(C, retVal, InsertBefore); 3037 } 3038 3039 static ReturnInst* Create(LLVMContext &C, Value *retVal, 3040 BasicBlock *InsertAtEnd) { 3041 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); 3042 } 3043 3044 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { 3045 return new(0) ReturnInst(C, InsertAtEnd); 3046 } 3047 3048 /// Provide fast operand accessors 3049 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3050 3051 /// Convenience accessor. Returns null if there is no return value. 3052 Value *getReturnValue() const { 3053 return getNumOperands() != 0 ? getOperand(0) : nullptr; 3054 } 3055 3056 unsigned getNumSuccessors() const { return 0; } 3057 3058 // Methods for support type inquiry through isa, cast, and dyn_cast: 3059 static bool classof(const Instruction *I) { 3060 return (I->getOpcode() == Instruction::Ret); 3061 } 3062 static bool classof(const Value *V) { 3063 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3064 } 3065 3066 private: 3067 BasicBlock *getSuccessor(unsigned idx) const { 3068 llvm_unreachable("ReturnInst has no successors!"); 3069 } 3070 3071 void setSuccessor(unsigned idx, BasicBlock *B) { 3072 llvm_unreachable("ReturnInst has no successors!"); 3073 } 3074 }; 3075 3076 template <> 3077 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { 3078 }; 3079 3080 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value) 3081 3082 //===----------------------------------------------------------------------===// 3083 // BranchInst Class 3084 //===----------------------------------------------------------------------===// 3085 3086 //===--------------------------------------------------------------------------- 3087 /// Conditional or Unconditional Branch instruction. 3088 /// 3089 class BranchInst : public Instruction { 3090 /// Ops list - Branches are strange. The operands are ordered: 3091 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because 3092 /// they don't have to check for cond/uncond branchness. These are mostly 3093 /// accessed relative from op_end(). 3094 BranchInst(const BranchInst &BI); 3095 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): 3096 // BranchInst(BB *B) - 'br B' 3097 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' 3098 // BranchInst(BB* B, Inst *I) - 'br B' insert before I 3099 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I 3100 // BranchInst(BB* B, BB *I) - 'br B' insert at end 3101 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end 3102 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); 3103 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3104 Instruction *InsertBefore = nullptr); 3105 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); 3106 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3107 BasicBlock *InsertAtEnd); 3108 3109 void AssertOK(); 3110 3111 protected: 3112 // Note: Instruction needs to be a friend here to call cloneImpl. 3113 friend class Instruction; 3114 3115 BranchInst *cloneImpl() const; 3116 3117 public: 3118 /// Iterator type that casts an operand to a basic block. 3119 /// 3120 /// This only makes sense because the successors are stored as adjacent 3121 /// operands for branch instructions. 3122 struct succ_op_iterator 3123 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3124 std::random_access_iterator_tag, BasicBlock *, 3125 ptrdiff_t, BasicBlock *, BasicBlock *> { 3126 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3127 3128 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3129 BasicBlock *operator->() const { return operator*(); } 3130 }; 3131 3132 /// The const version of `succ_op_iterator`. 3133 struct const_succ_op_iterator 3134 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3135 std::random_access_iterator_tag, 3136 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3137 const BasicBlock *> { 3138 explicit const_succ_op_iterator(const_value_op_iterator I) 3139 : iterator_adaptor_base(I) {} 3140 3141 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3142 const BasicBlock *operator->() const { return operator*(); } 3143 }; 3144 3145 static BranchInst *Create(BasicBlock *IfTrue, 3146 Instruction *InsertBefore = nullptr) { 3147 return new(1) BranchInst(IfTrue, InsertBefore); 3148 } 3149 3150 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3151 Value *Cond, Instruction *InsertBefore = nullptr) { 3152 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); 3153 } 3154 3155 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { 3156 return new(1) BranchInst(IfTrue, InsertAtEnd); 3157 } 3158 3159 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3160 Value *Cond, BasicBlock *InsertAtEnd) { 3161 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); 3162 } 3163 3164 /// Transparently provide more efficient getOperand methods. 3165 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3166 3167 bool isUnconditional() const { return getNumOperands() == 1; } 3168 bool isConditional() const { return getNumOperands() == 3; } 3169 3170 Value *getCondition() const { 3171 assert(isConditional() && "Cannot get condition of an uncond branch!"); 3172 return Op<-3>(); 3173 } 3174 3175 void setCondition(Value *V) { 3176 assert(isConditional() && "Cannot set condition of unconditional branch!"); 3177 Op<-3>() = V; 3178 } 3179 3180 unsigned getNumSuccessors() const { return 1+isConditional(); } 3181 3182 BasicBlock *getSuccessor(unsigned i) const { 3183 assert(i < getNumSuccessors() && "Successor # out of range for Branch!"); 3184 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); 3185 } 3186 3187 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3188 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!"); 3189 *(&Op<-1>() - idx) = NewSucc; 3190 } 3191 3192 /// Swap the successors of this branch instruction. 3193 /// 3194 /// Swaps the successors of the branch instruction. This also swaps any 3195 /// branch weight metadata associated with the instruction so that it 3196 /// continues to map correctly to each operand. 3197 void swapSuccessors(); 3198 3199 iterator_range<succ_op_iterator> successors() { 3200 return make_range( 3201 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), 3202 succ_op_iterator(value_op_end())); 3203 } 3204 3205 iterator_range<const_succ_op_iterator> successors() const { 3206 return make_range(const_succ_op_iterator( 3207 std::next(value_op_begin(), isConditional() ? 1 : 0)), 3208 const_succ_op_iterator(value_op_end())); 3209 } 3210 3211 // Methods for support type inquiry through isa, cast, and dyn_cast: 3212 static bool classof(const Instruction *I) { 3213 return (I->getOpcode() == Instruction::Br); 3214 } 3215 static bool classof(const Value *V) { 3216 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3217 } 3218 }; 3219 3220 template <> 3221 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { 3222 }; 3223 3224 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) 3225 3226 //===----------------------------------------------------------------------===// 3227 // SwitchInst Class 3228 //===----------------------------------------------------------------------===// 3229 3230 //===--------------------------------------------------------------------------- 3231 /// Multiway switch 3232 /// 3233 class SwitchInst : public Instruction { 3234 unsigned ReservedSpace; 3235 3236 // Operand[0] = Value to switch on 3237 // Operand[1] = Default basic block destination 3238 // Operand[2n ] = Value to match 3239 // Operand[2n+1] = BasicBlock to go to on match 3240 SwitchInst(const SwitchInst &SI); 3241 3242 /// Create a new switch instruction, specifying a value to switch on and a 3243 /// default destination. The number of additional cases can be specified here 3244 /// to make memory allocation more efficient. This constructor can also 3245 /// auto-insert before another instruction. 3246 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3247 Instruction *InsertBefore); 3248 3249 /// Create a new switch instruction, specifying a value to switch on and a 3250 /// default destination. The number of additional cases can be specified here 3251 /// to make memory allocation more efficient. This constructor also 3252 /// auto-inserts at the end of the specified BasicBlock. 3253 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3254 BasicBlock *InsertAtEnd); 3255 3256 // allocate space for exactly zero operands 3257 void *operator new(size_t S) { return User::operator new(S); } 3258 3259 void init(Value *Value, BasicBlock *Default, unsigned NumReserved); 3260 void growOperands(); 3261 3262 protected: 3263 // Note: Instruction needs to be a friend here to call cloneImpl. 3264 friend class Instruction; 3265 3266 SwitchInst *cloneImpl() const; 3267 3268 public: 3269 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3270 3271 // -2 3272 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); 3273 3274 template <typename CaseHandleT> class CaseIteratorImpl; 3275 3276 /// A handle to a particular switch case. It exposes a convenient interface 3277 /// to both the case value and the successor block. 3278 /// 3279 /// We define this as a template and instantiate it to form both a const and 3280 /// non-const handle. 3281 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> 3282 class CaseHandleImpl { 3283 // Directly befriend both const and non-const iterators. 3284 friend class SwitchInst::CaseIteratorImpl< 3285 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; 3286 3287 protected: 3288 // Expose the switch type we're parameterized with to the iterator. 3289 using SwitchInstType = SwitchInstT; 3290 3291 SwitchInstT *SI; 3292 ptrdiff_t Index; 3293 3294 CaseHandleImpl() = default; 3295 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} 3296 3297 public: 3298 /// Resolves case value for current case. 3299 ConstantIntT *getCaseValue() const { 3300 assert((unsigned)Index < SI->getNumCases() && 3301 "Index out the number of cases."); 3302 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); 3303 } 3304 3305 /// Resolves successor for current case. 3306 BasicBlockT *getCaseSuccessor() const { 3307 assert(((unsigned)Index < SI->getNumCases() || 3308 (unsigned)Index == DefaultPseudoIndex) && 3309 "Index out the number of cases."); 3310 return SI->getSuccessor(getSuccessorIndex()); 3311 } 3312 3313 /// Returns number of current case. 3314 unsigned getCaseIndex() const { return Index; } 3315 3316 /// Returns successor index for current case successor. 3317 unsigned getSuccessorIndex() const { 3318 assert(((unsigned)Index == DefaultPseudoIndex || 3319 (unsigned)Index < SI->getNumCases()) && 3320 "Index out the number of cases."); 3321 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; 3322 } 3323 3324 bool operator==(const CaseHandleImpl &RHS) const { 3325 assert(SI == RHS.SI && "Incompatible operators."); 3326 return Index == RHS.Index; 3327 } 3328 }; 3329 3330 using ConstCaseHandle = 3331 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; 3332 3333 class CaseHandle 3334 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { 3335 friend class SwitchInst::CaseIteratorImpl<CaseHandle>; 3336 3337 public: 3338 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} 3339 3340 /// Sets the new value for current case. 3341 void setValue(ConstantInt *V) const { 3342 assert((unsigned)Index < SI->getNumCases() && 3343 "Index out the number of cases."); 3344 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); 3345 } 3346 3347 /// Sets the new successor for current case. 3348 void setSuccessor(BasicBlock *S) const { 3349 SI->setSuccessor(getSuccessorIndex(), S); 3350 } 3351 }; 3352 3353 template <typename CaseHandleT> 3354 class CaseIteratorImpl 3355 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, 3356 std::random_access_iterator_tag, 3357 const CaseHandleT> { 3358 using SwitchInstT = typename CaseHandleT::SwitchInstType; 3359 3360 CaseHandleT Case; 3361 3362 public: 3363 /// Default constructed iterator is in an invalid state until assigned to 3364 /// a case for a particular switch. 3365 CaseIteratorImpl() = default; 3366 3367 /// Initializes case iterator for given SwitchInst and for given 3368 /// case number. 3369 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} 3370 3371 /// Initializes case iterator for given SwitchInst and for given 3372 /// successor index. 3373 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, 3374 unsigned SuccessorIndex) { 3375 assert(SuccessorIndex < SI->getNumSuccessors() && 3376 "Successor index # out of range!"); 3377 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) 3378 : CaseIteratorImpl(SI, DefaultPseudoIndex); 3379 } 3380 3381 /// Support converting to the const variant. This will be a no-op for const 3382 /// variant. 3383 operator CaseIteratorImpl<ConstCaseHandle>() const { 3384 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); 3385 } 3386 3387 CaseIteratorImpl &operator+=(ptrdiff_t N) { 3388 // Check index correctness after addition. 3389 // Note: Index == getNumCases() means end(). 3390 assert(Case.Index + N >= 0 && 3391 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && 3392 "Case.Index out the number of cases."); 3393 Case.Index += N; 3394 return *this; 3395 } 3396 CaseIteratorImpl &operator-=(ptrdiff_t N) { 3397 // Check index correctness after subtraction. 3398 // Note: Case.Index == getNumCases() means end(). 3399 assert(Case.Index - N >= 0 && 3400 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && 3401 "Case.Index out the number of cases."); 3402 Case.Index -= N; 3403 return *this; 3404 } 3405 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { 3406 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3407 return Case.Index - RHS.Case.Index; 3408 } 3409 bool operator==(const CaseIteratorImpl &RHS) const { 3410 return Case == RHS.Case; 3411 } 3412 bool operator<(const CaseIteratorImpl &RHS) const { 3413 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3414 return Case.Index < RHS.Case.Index; 3415 } 3416 const CaseHandleT &operator*() const { return Case; } 3417 }; 3418 3419 using CaseIt = CaseIteratorImpl<CaseHandle>; 3420 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; 3421 3422 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3423 unsigned NumCases, 3424 Instruction *InsertBefore = nullptr) { 3425 return new SwitchInst(Value, Default, NumCases, InsertBefore); 3426 } 3427 3428 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3429 unsigned NumCases, BasicBlock *InsertAtEnd) { 3430 return new SwitchInst(Value, Default, NumCases, InsertAtEnd); 3431 } 3432 3433 /// Provide fast operand accessors 3434 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3435 3436 // Accessor Methods for Switch stmt 3437 Value *getCondition() const { return getOperand(0); } 3438 void setCondition(Value *V) { setOperand(0, V); } 3439 3440 BasicBlock *getDefaultDest() const { 3441 return cast<BasicBlock>(getOperand(1)); 3442 } 3443 3444 void setDefaultDest(BasicBlock *DefaultCase) { 3445 setOperand(1, reinterpret_cast<Value*>(DefaultCase)); 3446 } 3447 3448 /// Return the number of 'cases' in this switch instruction, excluding the 3449 /// default case. 3450 unsigned getNumCases() const { 3451 return getNumOperands()/2 - 1; 3452 } 3453 3454 /// Returns a read/write iterator that points to the first case in the 3455 /// SwitchInst. 3456 CaseIt case_begin() { 3457 return CaseIt(this, 0); 3458 } 3459 3460 /// Returns a read-only iterator that points to the first case in the 3461 /// SwitchInst. 3462 ConstCaseIt case_begin() const { 3463 return ConstCaseIt(this, 0); 3464 } 3465 3466 /// Returns a read/write iterator that points one past the last in the 3467 /// SwitchInst. 3468 CaseIt case_end() { 3469 return CaseIt(this, getNumCases()); 3470 } 3471 3472 /// Returns a read-only iterator that points one past the last in the 3473 /// SwitchInst. 3474 ConstCaseIt case_end() const { 3475 return ConstCaseIt(this, getNumCases()); 3476 } 3477 3478 /// Iteration adapter for range-for loops. 3479 iterator_range<CaseIt> cases() { 3480 return make_range(case_begin(), case_end()); 3481 } 3482 3483 /// Constant iteration adapter for range-for loops. 3484 iterator_range<ConstCaseIt> cases() const { 3485 return make_range(case_begin(), case_end()); 3486 } 3487 3488 /// Returns an iterator that points to the default case. 3489 /// Note: this iterator allows to resolve successor only. Attempt 3490 /// to resolve case value causes an assertion. 3491 /// Also note, that increment and decrement also causes an assertion and 3492 /// makes iterator invalid. 3493 CaseIt case_default() { 3494 return CaseIt(this, DefaultPseudoIndex); 3495 } 3496 ConstCaseIt case_default() const { 3497 return ConstCaseIt(this, DefaultPseudoIndex); 3498 } 3499 3500 /// Search all of the case values for the specified constant. If it is 3501 /// explicitly handled, return the case iterator of it, otherwise return 3502 /// default case iterator to indicate that it is handled by the default 3503 /// handler. 3504 CaseIt findCaseValue(const ConstantInt *C) { 3505 return CaseIt( 3506 this, 3507 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex()); 3508 } 3509 ConstCaseIt findCaseValue(const ConstantInt *C) const { 3510 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) { 3511 return Case.getCaseValue() == C; 3512 }); 3513 if (I != case_end()) 3514 return I; 3515 3516 return case_default(); 3517 } 3518 3519 /// Finds the unique case value for a given successor. Returns null if the 3520 /// successor is not found, not unique, or is the default case. 3521 ConstantInt *findCaseDest(BasicBlock *BB) { 3522 if (BB == getDefaultDest()) 3523 return nullptr; 3524 3525 ConstantInt *CI = nullptr; 3526 for (auto Case : cases()) { 3527 if (Case.getCaseSuccessor() != BB) 3528 continue; 3529 3530 if (CI) 3531 return nullptr; // Multiple cases lead to BB. 3532 3533 CI = Case.getCaseValue(); 3534 } 3535 3536 return CI; 3537 } 3538 3539 /// Add an entry to the switch instruction. 3540 /// Note: 3541 /// This action invalidates case_end(). Old case_end() iterator will 3542 /// point to the added case. 3543 void addCase(ConstantInt *OnVal, BasicBlock *Dest); 3544 3545 /// This method removes the specified case and its successor from the switch 3546 /// instruction. Note that this operation may reorder the remaining cases at 3547 /// index idx and above. 3548 /// Note: 3549 /// This action invalidates iterators for all cases following the one removed, 3550 /// including the case_end() iterator. It returns an iterator for the next 3551 /// case. 3552 CaseIt removeCase(CaseIt I); 3553 3554 unsigned getNumSuccessors() const { return getNumOperands()/2; } 3555 BasicBlock *getSuccessor(unsigned idx) const { 3556 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!"); 3557 return cast<BasicBlock>(getOperand(idx*2+1)); 3558 } 3559 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3560 assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); 3561 setOperand(idx * 2 + 1, NewSucc); 3562 } 3563 3564 // Methods for support type inquiry through isa, cast, and dyn_cast: 3565 static bool classof(const Instruction *I) { 3566 return I->getOpcode() == Instruction::Switch; 3567 } 3568 static bool classof(const Value *V) { 3569 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3570 } 3571 }; 3572 3573 /// A wrapper class to simplify modification of SwitchInst cases along with 3574 /// their prof branch_weights metadata. 3575 class SwitchInstProfUpdateWrapper { 3576 SwitchInst &SI; 3577 Optional<SmallVector<uint32_t, 8> > Weights = None; 3578 bool Changed = false; 3579 3580 protected: 3581 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); 3582 3583 MDNode *buildProfBranchWeightsMD(); 3584 3585 void init(); 3586 3587 public: 3588 using CaseWeightOpt = Optional<uint32_t>; 3589 SwitchInst *operator->() { return &SI; } 3590 SwitchInst &operator*() { return SI; } 3591 operator SwitchInst *() { return &SI; } 3592 3593 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } 3594 3595 ~SwitchInstProfUpdateWrapper() { 3596 if (Changed) 3597 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); 3598 } 3599 3600 /// Delegate the call to the underlying SwitchInst::removeCase() and remove 3601 /// correspondent branch weight. 3602 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); 3603 3604 /// Delegate the call to the underlying SwitchInst::addCase() and set the 3605 /// specified branch weight for the added case. 3606 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); 3607 3608 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark 3609 /// this object to not touch the underlying SwitchInst in destructor. 3610 SymbolTableList<Instruction>::iterator eraseFromParent(); 3611 3612 void setSuccessorWeight(unsigned idx, CaseWeightOpt W); 3613 CaseWeightOpt getSuccessorWeight(unsigned idx); 3614 3615 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); 3616 }; 3617 3618 template <> 3619 struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { 3620 }; 3621 3622 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) 3623 3624 //===----------------------------------------------------------------------===// 3625 // IndirectBrInst Class 3626 //===----------------------------------------------------------------------===// 3627 3628 //===--------------------------------------------------------------------------- 3629 /// Indirect Branch Instruction. 3630 /// 3631 class IndirectBrInst : public Instruction { 3632 unsigned ReservedSpace; 3633 3634 // Operand[0] = Address to jump to 3635 // Operand[n+1] = n-th destination 3636 IndirectBrInst(const IndirectBrInst &IBI); 3637 3638 /// Create a new indirectbr instruction, specifying an 3639 /// Address to jump to. The number of expected destinations can be specified 3640 /// here to make memory allocation more efficient. This constructor can also 3641 /// autoinsert before another instruction. 3642 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); 3643 3644 /// Create a new indirectbr instruction, specifying an 3645 /// Address to jump to. The number of expected destinations can be specified 3646 /// here to make memory allocation more efficient. This constructor also 3647 /// autoinserts at the end of the specified BasicBlock. 3648 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); 3649 3650 // allocate space for exactly zero operands 3651 void *operator new(size_t S) { return User::operator new(S); } 3652 3653 void init(Value *Address, unsigned NumDests); 3654 void growOperands(); 3655 3656 protected: 3657 // Note: Instruction needs to be a friend here to call cloneImpl. 3658 friend class Instruction; 3659 3660 IndirectBrInst *cloneImpl() const; 3661 3662 public: 3663 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3664 3665 /// Iterator type that casts an operand to a basic block. 3666 /// 3667 /// This only makes sense because the successors are stored as adjacent 3668 /// operands for indirectbr instructions. 3669 struct succ_op_iterator 3670 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3671 std::random_access_iterator_tag, BasicBlock *, 3672 ptrdiff_t, BasicBlock *, BasicBlock *> { 3673 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3674 3675 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3676 BasicBlock *operator->() const { return operator*(); } 3677 }; 3678 3679 /// The const version of `succ_op_iterator`. 3680 struct const_succ_op_iterator 3681 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3682 std::random_access_iterator_tag, 3683 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3684 const BasicBlock *> { 3685 explicit const_succ_op_iterator(const_value_op_iterator I) 3686 : iterator_adaptor_base(I) {} 3687 3688 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3689 const BasicBlock *operator->() const { return operator*(); } 3690 }; 3691 3692 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3693 Instruction *InsertBefore = nullptr) { 3694 return new IndirectBrInst(Address, NumDests, InsertBefore); 3695 } 3696 3697 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3698 BasicBlock *InsertAtEnd) { 3699 return new IndirectBrInst(Address, NumDests, InsertAtEnd); 3700 } 3701 3702 /// Provide fast operand accessors. 3703 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3704 3705 // Accessor Methods for IndirectBrInst instruction. 3706 Value *getAddress() { return getOperand(0); } 3707 const Value *getAddress() const { return getOperand(0); } 3708 void setAddress(Value *V) { setOperand(0, V); } 3709 3710 /// return the number of possible destinations in this 3711 /// indirectbr instruction. 3712 unsigned getNumDestinations() const { return getNumOperands()-1; } 3713 3714 /// Return the specified destination. 3715 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } 3716 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } 3717 3718 /// Add a destination. 3719 /// 3720 void addDestination(BasicBlock *Dest); 3721 3722 /// This method removes the specified successor from the 3723 /// indirectbr instruction. 3724 void removeDestination(unsigned i); 3725 3726 unsigned getNumSuccessors() const { return getNumOperands()-1; } 3727 BasicBlock *getSuccessor(unsigned i) const { 3728 return cast<BasicBlock>(getOperand(i+1)); 3729 } 3730 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3731 setOperand(i + 1, NewSucc); 3732 } 3733 3734 iterator_range<succ_op_iterator> successors() { 3735 return make_range(succ_op_iterator(std::next(value_op_begin())), 3736 succ_op_iterator(value_op_end())); 3737 } 3738 3739 iterator_range<const_succ_op_iterator> successors() const { 3740 return make_range(const_succ_op_iterator(std::next(value_op_begin())), 3741 const_succ_op_iterator(value_op_end())); 3742 } 3743 3744 // Methods for support type inquiry through isa, cast, and dyn_cast: 3745 static bool classof(const Instruction *I) { 3746 return I->getOpcode() == Instruction::IndirectBr; 3747 } 3748 static bool classof(const Value *V) { 3749 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3750 } 3751 }; 3752 3753 template <> 3754 struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { 3755 }; 3756 3757 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value) 3758 3759 //===----------------------------------------------------------------------===// 3760 // InvokeInst Class 3761 //===----------------------------------------------------------------------===// 3762 3763 /// Invoke instruction. The SubclassData field is used to hold the 3764 /// calling convention of the call. 3765 /// 3766 class InvokeInst : public CallBase { 3767 /// The number of operands for this call beyond the called function, 3768 /// arguments, and operand bundles. 3769 static constexpr int NumExtraOperands = 2; 3770 3771 /// The index from the end of the operand array to the normal destination. 3772 static constexpr int NormalDestOpEndIdx = -3; 3773 3774 /// The index from the end of the operand array to the unwind destination. 3775 static constexpr int UnwindDestOpEndIdx = -2; 3776 3777 InvokeInst(const InvokeInst &BI); 3778 3779 /// Construct an InvokeInst given a range of arguments. 3780 /// 3781 /// Construct an InvokeInst from a range of arguments 3782 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3783 BasicBlock *IfException, ArrayRef<Value *> Args, 3784 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3785 const Twine &NameStr, Instruction *InsertBefore); 3786 3787 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3788 BasicBlock *IfException, ArrayRef<Value *> Args, 3789 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3790 const Twine &NameStr, BasicBlock *InsertAtEnd); 3791 3792 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3793 BasicBlock *IfException, ArrayRef<Value *> Args, 3794 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3795 3796 /// Compute the number of operands to allocate. 3797 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 3798 // We need one operand for the called function, plus our extra operands and 3799 // the input operand counts provided. 3800 return 1 + NumExtraOperands + NumArgs + NumBundleInputs; 3801 } 3802 3803 protected: 3804 // Note: Instruction needs to be a friend here to call cloneImpl. 3805 friend class Instruction; 3806 3807 InvokeInst *cloneImpl() const; 3808 3809 public: 3810 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3811 BasicBlock *IfException, ArrayRef<Value *> Args, 3812 const Twine &NameStr, 3813 Instruction *InsertBefore = nullptr) { 3814 int NumOperands = ComputeNumOperands(Args.size()); 3815 return new (NumOperands) 3816 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3817 NameStr, InsertBefore); 3818 } 3819 3820 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3821 BasicBlock *IfException, ArrayRef<Value *> Args, 3822 ArrayRef<OperandBundleDef> Bundles = None, 3823 const Twine &NameStr = "", 3824 Instruction *InsertBefore = nullptr) { 3825 int NumOperands = 3826 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3827 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3828 3829 return new (NumOperands, DescriptorBytes) 3830 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3831 NameStr, InsertBefore); 3832 } 3833 3834 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3835 BasicBlock *IfException, ArrayRef<Value *> Args, 3836 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3837 int NumOperands = ComputeNumOperands(Args.size()); 3838 return new (NumOperands) 3839 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3840 NameStr, InsertAtEnd); 3841 } 3842 3843 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3844 BasicBlock *IfException, ArrayRef<Value *> Args, 3845 ArrayRef<OperandBundleDef> Bundles, 3846 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3847 int NumOperands = 3848 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3849 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3850 3851 return new (NumOperands, DescriptorBytes) 3852 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3853 NameStr, InsertAtEnd); 3854 } 3855 3856 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3857 BasicBlock *IfException, ArrayRef<Value *> Args, 3858 const Twine &NameStr, 3859 Instruction *InsertBefore = nullptr) { 3860 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3861 IfException, Args, None, NameStr, InsertBefore); 3862 } 3863 3864 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3865 BasicBlock *IfException, ArrayRef<Value *> Args, 3866 ArrayRef<OperandBundleDef> Bundles = None, 3867 const Twine &NameStr = "", 3868 Instruction *InsertBefore = nullptr) { 3869 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3870 IfException, Args, Bundles, NameStr, InsertBefore); 3871 } 3872 3873 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3874 BasicBlock *IfException, ArrayRef<Value *> Args, 3875 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3876 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3877 IfException, Args, NameStr, InsertAtEnd); 3878 } 3879 3880 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3881 BasicBlock *IfException, ArrayRef<Value *> Args, 3882 ArrayRef<OperandBundleDef> Bundles, 3883 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3884 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3885 IfException, Args, Bundles, NameStr, InsertAtEnd); 3886 } 3887 3888 /// Create a clone of \p II with a different set of operand bundles and 3889 /// insert it before \p InsertPt. 3890 /// 3891 /// The returned invoke instruction is identical to \p II in every way except 3892 /// that the operand bundles for the new instruction are set to the operand 3893 /// bundles in \p Bundles. 3894 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, 3895 Instruction *InsertPt = nullptr); 3896 3897 // get*Dest - Return the destination basic blocks... 3898 BasicBlock *getNormalDest() const { 3899 return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); 3900 } 3901 BasicBlock *getUnwindDest() const { 3902 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); 3903 } 3904 void setNormalDest(BasicBlock *B) { 3905 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3906 } 3907 void setUnwindDest(BasicBlock *B) { 3908 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3909 } 3910 3911 /// Get the landingpad instruction from the landing pad 3912 /// block (the unwind destination). 3913 LandingPadInst *getLandingPadInst() const; 3914 3915 BasicBlock *getSuccessor(unsigned i) const { 3916 assert(i < 2 && "Successor # out of range for invoke!"); 3917 return i == 0 ? getNormalDest() : getUnwindDest(); 3918 } 3919 3920 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3921 assert(i < 2 && "Successor # out of range for invoke!"); 3922 if (i == 0) 3923 setNormalDest(NewSucc); 3924 else 3925 setUnwindDest(NewSucc); 3926 } 3927 3928 unsigned getNumSuccessors() const { return 2; } 3929 3930 // Methods for support type inquiry through isa, cast, and dyn_cast: 3931 static bool classof(const Instruction *I) { 3932 return (I->getOpcode() == Instruction::Invoke); 3933 } 3934 static bool classof(const Value *V) { 3935 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3936 } 3937 3938 private: 3939 // Shadow Instruction::setInstructionSubclassData with a private forwarding 3940 // method so that subclasses cannot accidentally use it. 3941 template <typename Bitfield> 3942 void setSubclassData(typename Bitfield::Type Value) { 3943 Instruction::setSubclassData<Bitfield>(Value); 3944 } 3945 }; 3946 3947 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3948 BasicBlock *IfException, ArrayRef<Value *> Args, 3949 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3950 const Twine &NameStr, Instruction *InsertBefore) 3951 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3952 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3953 InsertBefore) { 3954 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3955 } 3956 3957 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3958 BasicBlock *IfException, ArrayRef<Value *> Args, 3959 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3960 const Twine &NameStr, BasicBlock *InsertAtEnd) 3961 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3962 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3963 InsertAtEnd) { 3964 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3965 } 3966 3967 //===----------------------------------------------------------------------===// 3968 // CallBrInst Class 3969 //===----------------------------------------------------------------------===// 3970 3971 /// CallBr instruction, tracking function calls that may not return control but 3972 /// instead transfer it to a third location. The SubclassData field is used to 3973 /// hold the calling convention of the call. 3974 /// 3975 class CallBrInst : public CallBase { 3976 3977 unsigned NumIndirectDests; 3978 3979 CallBrInst(const CallBrInst &BI); 3980 3981 /// Construct a CallBrInst given a range of arguments. 3982 /// 3983 /// Construct a CallBrInst from a range of arguments 3984 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3985 ArrayRef<BasicBlock *> IndirectDests, 3986 ArrayRef<Value *> Args, 3987 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3988 const Twine &NameStr, Instruction *InsertBefore); 3989 3990 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3991 ArrayRef<BasicBlock *> IndirectDests, 3992 ArrayRef<Value *> Args, 3993 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3994 const Twine &NameStr, BasicBlock *InsertAtEnd); 3995 3996 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, 3997 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 3998 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3999 4000 /// Should the Indirect Destinations change, scan + update the Arg list. 4001 void updateArgBlockAddresses(unsigned i, BasicBlock *B); 4002 4003 /// Compute the number of operands to allocate. 4004 static int ComputeNumOperands(int NumArgs, int NumIndirectDests, 4005 int NumBundleInputs = 0) { 4006 // We need one operand for the called function, plus our extra operands and 4007 // the input operand counts provided. 4008 return 2 + NumIndirectDests + NumArgs + NumBundleInputs; 4009 } 4010 4011 protected: 4012 // Note: Instruction needs to be a friend here to call cloneImpl. 4013 friend class Instruction; 4014 4015 CallBrInst *cloneImpl() const; 4016 4017 public: 4018 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4019 BasicBlock *DefaultDest, 4020 ArrayRef<BasicBlock *> IndirectDests, 4021 ArrayRef<Value *> Args, const Twine &NameStr, 4022 Instruction *InsertBefore = nullptr) { 4023 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4024 return new (NumOperands) 4025 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 4026 NumOperands, NameStr, InsertBefore); 4027 } 4028 4029 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4030 BasicBlock *DefaultDest, 4031 ArrayRef<BasicBlock *> IndirectDests, 4032 ArrayRef<Value *> Args, 4033 ArrayRef<OperandBundleDef> Bundles = None, 4034 const Twine &NameStr = "", 4035 Instruction *InsertBefore = nullptr) { 4036 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4037 CountBundleInputs(Bundles)); 4038 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4039 4040 return new (NumOperands, DescriptorBytes) 4041 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4042 NumOperands, NameStr, InsertBefore); 4043 } 4044 4045 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4046 BasicBlock *DefaultDest, 4047 ArrayRef<BasicBlock *> IndirectDests, 4048 ArrayRef<Value *> Args, const Twine &NameStr, 4049 BasicBlock *InsertAtEnd) { 4050 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4051 return new (NumOperands) 4052 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 4053 NumOperands, NameStr, InsertAtEnd); 4054 } 4055 4056 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4057 BasicBlock *DefaultDest, 4058 ArrayRef<BasicBlock *> IndirectDests, 4059 ArrayRef<Value *> Args, 4060 ArrayRef<OperandBundleDef> Bundles, 4061 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4062 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4063 CountBundleInputs(Bundles)); 4064 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4065 4066 return new (NumOperands, DescriptorBytes) 4067 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4068 NumOperands, NameStr, InsertAtEnd); 4069 } 4070 4071 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4072 ArrayRef<BasicBlock *> IndirectDests, 4073 ArrayRef<Value *> Args, const Twine &NameStr, 4074 Instruction *InsertBefore = nullptr) { 4075 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4076 IndirectDests, Args, NameStr, InsertBefore); 4077 } 4078 4079 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4080 ArrayRef<BasicBlock *> IndirectDests, 4081 ArrayRef<Value *> Args, 4082 ArrayRef<OperandBundleDef> Bundles = None, 4083 const Twine &NameStr = "", 4084 Instruction *InsertBefore = nullptr) { 4085 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4086 IndirectDests, Args, Bundles, NameStr, InsertBefore); 4087 } 4088 4089 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4090 ArrayRef<BasicBlock *> IndirectDests, 4091 ArrayRef<Value *> Args, const Twine &NameStr, 4092 BasicBlock *InsertAtEnd) { 4093 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4094 IndirectDests, Args, NameStr, InsertAtEnd); 4095 } 4096 4097 static CallBrInst *Create(FunctionCallee Func, 4098 BasicBlock *DefaultDest, 4099 ArrayRef<BasicBlock *> IndirectDests, 4100 ArrayRef<Value *> Args, 4101 ArrayRef<OperandBundleDef> Bundles, 4102 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4103 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4104 IndirectDests, Args, Bundles, NameStr, InsertAtEnd); 4105 } 4106 4107 /// Create a clone of \p CBI with a different set of operand bundles and 4108 /// insert it before \p InsertPt. 4109 /// 4110 /// The returned callbr instruction is identical to \p CBI in every way 4111 /// except that the operand bundles for the new instruction are set to the 4112 /// operand bundles in \p Bundles. 4113 static CallBrInst *Create(CallBrInst *CBI, 4114 ArrayRef<OperandBundleDef> Bundles, 4115 Instruction *InsertPt = nullptr); 4116 4117 /// Return the number of callbr indirect dest labels. 4118 /// 4119 unsigned getNumIndirectDests() const { return NumIndirectDests; } 4120 4121 /// getIndirectDestLabel - Return the i-th indirect dest label. 4122 /// 4123 Value *getIndirectDestLabel(unsigned i) const { 4124 assert(i < getNumIndirectDests() && "Out of bounds!"); 4125 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1); 4126 } 4127 4128 Value *getIndirectDestLabelUse(unsigned i) const { 4129 assert(i < getNumIndirectDests() && "Out of bounds!"); 4130 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1); 4131 } 4132 4133 // Return the destination basic blocks... 4134 BasicBlock *getDefaultDest() const { 4135 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); 4136 } 4137 BasicBlock *getIndirectDest(unsigned i) const { 4138 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); 4139 } 4140 SmallVector<BasicBlock *, 16> getIndirectDests() const { 4141 SmallVector<BasicBlock *, 16> IndirectDests; 4142 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) 4143 IndirectDests.push_back(getIndirectDest(i)); 4144 return IndirectDests; 4145 } 4146 void setDefaultDest(BasicBlock *B) { 4147 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); 4148 } 4149 void setIndirectDest(unsigned i, BasicBlock *B) { 4150 updateArgBlockAddresses(i, B); 4151 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); 4152 } 4153 4154 BasicBlock *getSuccessor(unsigned i) const { 4155 assert(i < getNumSuccessors() + 1 && 4156 "Successor # out of range for callbr!"); 4157 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); 4158 } 4159 4160 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4161 assert(i < getNumIndirectDests() + 1 && 4162 "Successor # out of range for callbr!"); 4163 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); 4164 } 4165 4166 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } 4167 4168 // Methods for support type inquiry through isa, cast, and dyn_cast: 4169 static bool classof(const Instruction *I) { 4170 return (I->getOpcode() == Instruction::CallBr); 4171 } 4172 static bool classof(const Value *V) { 4173 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4174 } 4175 4176 private: 4177 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4178 // method so that subclasses cannot accidentally use it. 4179 template <typename Bitfield> 4180 void setSubclassData(typename Bitfield::Type Value) { 4181 Instruction::setSubclassData<Bitfield>(Value); 4182 } 4183 }; 4184 4185 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4186 ArrayRef<BasicBlock *> IndirectDests, 4187 ArrayRef<Value *> Args, 4188 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4189 const Twine &NameStr, Instruction *InsertBefore) 4190 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4191 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4192 InsertBefore) { 4193 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4194 } 4195 4196 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4197 ArrayRef<BasicBlock *> IndirectDests, 4198 ArrayRef<Value *> Args, 4199 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4200 const Twine &NameStr, BasicBlock *InsertAtEnd) 4201 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4202 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4203 InsertAtEnd) { 4204 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4205 } 4206 4207 //===----------------------------------------------------------------------===// 4208 // ResumeInst Class 4209 //===----------------------------------------------------------------------===// 4210 4211 //===--------------------------------------------------------------------------- 4212 /// Resume the propagation of an exception. 4213 /// 4214 class ResumeInst : public Instruction { 4215 ResumeInst(const ResumeInst &RI); 4216 4217 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); 4218 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); 4219 4220 protected: 4221 // Note: Instruction needs to be a friend here to call cloneImpl. 4222 friend class Instruction; 4223 4224 ResumeInst *cloneImpl() const; 4225 4226 public: 4227 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { 4228 return new(1) ResumeInst(Exn, InsertBefore); 4229 } 4230 4231 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { 4232 return new(1) ResumeInst(Exn, InsertAtEnd); 4233 } 4234 4235 /// Provide fast operand accessors 4236 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4237 4238 /// Convenience accessor. 4239 Value *getValue() const { return Op<0>(); } 4240 4241 unsigned getNumSuccessors() const { return 0; } 4242 4243 // Methods for support type inquiry through isa, cast, and dyn_cast: 4244 static bool classof(const Instruction *I) { 4245 return I->getOpcode() == Instruction::Resume; 4246 } 4247 static bool classof(const Value *V) { 4248 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4249 } 4250 4251 private: 4252 BasicBlock *getSuccessor(unsigned idx) const { 4253 llvm_unreachable("ResumeInst has no successors!"); 4254 } 4255 4256 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 4257 llvm_unreachable("ResumeInst has no successors!"); 4258 } 4259 }; 4260 4261 template <> 4262 struct OperandTraits<ResumeInst> : 4263 public FixedNumOperandTraits<ResumeInst, 1> { 4264 }; 4265 4266 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) 4267 4268 //===----------------------------------------------------------------------===// 4269 // CatchSwitchInst Class 4270 //===----------------------------------------------------------------------===// 4271 class CatchSwitchInst : public Instruction { 4272 using UnwindDestField = BoolBitfieldElementT<0>; 4273 4274 /// The number of operands actually allocated. NumOperands is 4275 /// the number actually in use. 4276 unsigned ReservedSpace; 4277 4278 // Operand[0] = Outer scope 4279 // Operand[1] = Unwind block destination 4280 // Operand[n] = BasicBlock to go to on match 4281 CatchSwitchInst(const CatchSwitchInst &CSI); 4282 4283 /// Create a new switch instruction, specifying a 4284 /// default destination. The number of additional handlers can be specified 4285 /// here to make memory allocation more efficient. 4286 /// This constructor can also autoinsert before another instruction. 4287 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4288 unsigned NumHandlers, const Twine &NameStr, 4289 Instruction *InsertBefore); 4290 4291 /// Create a new switch instruction, specifying a 4292 /// default destination. The number of additional handlers can be specified 4293 /// here to make memory allocation more efficient. 4294 /// This constructor also autoinserts at the end of the specified BasicBlock. 4295 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4296 unsigned NumHandlers, const Twine &NameStr, 4297 BasicBlock *InsertAtEnd); 4298 4299 // allocate space for exactly zero operands 4300 void *operator new(size_t S) { return User::operator new(S); } 4301 4302 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); 4303 void growOperands(unsigned Size); 4304 4305 protected: 4306 // Note: Instruction needs to be a friend here to call cloneImpl. 4307 friend class Instruction; 4308 4309 CatchSwitchInst *cloneImpl() const; 4310 4311 public: 4312 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 4313 4314 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4315 unsigned NumHandlers, 4316 const Twine &NameStr = "", 4317 Instruction *InsertBefore = nullptr) { 4318 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4319 InsertBefore); 4320 } 4321 4322 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4323 unsigned NumHandlers, const Twine &NameStr, 4324 BasicBlock *InsertAtEnd) { 4325 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4326 InsertAtEnd); 4327 } 4328 4329 /// Provide fast operand accessors 4330 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4331 4332 // Accessor Methods for CatchSwitch stmt 4333 Value *getParentPad() const { return getOperand(0); } 4334 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } 4335 4336 // Accessor Methods for CatchSwitch stmt 4337 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4338 bool unwindsToCaller() const { return !hasUnwindDest(); } 4339 BasicBlock *getUnwindDest() const { 4340 if (hasUnwindDest()) 4341 return cast<BasicBlock>(getOperand(1)); 4342 return nullptr; 4343 } 4344 void setUnwindDest(BasicBlock *UnwindDest) { 4345 assert(UnwindDest); 4346 assert(hasUnwindDest()); 4347 setOperand(1, UnwindDest); 4348 } 4349 4350 /// return the number of 'handlers' in this catchswitch 4351 /// instruction, except the default handler 4352 unsigned getNumHandlers() const { 4353 if (hasUnwindDest()) 4354 return getNumOperands() - 2; 4355 return getNumOperands() - 1; 4356 } 4357 4358 private: 4359 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } 4360 static const BasicBlock *handler_helper(const Value *V) { 4361 return cast<BasicBlock>(V); 4362 } 4363 4364 public: 4365 using DerefFnTy = BasicBlock *(*)(Value *); 4366 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; 4367 using handler_range = iterator_range<handler_iterator>; 4368 using ConstDerefFnTy = const BasicBlock *(*)(const Value *); 4369 using const_handler_iterator = 4370 mapped_iterator<const_op_iterator, ConstDerefFnTy>; 4371 using const_handler_range = iterator_range<const_handler_iterator>; 4372 4373 /// Returns an iterator that points to the first handler in CatchSwitchInst. 4374 handler_iterator handler_begin() { 4375 op_iterator It = op_begin() + 1; 4376 if (hasUnwindDest()) 4377 ++It; 4378 return handler_iterator(It, DerefFnTy(handler_helper)); 4379 } 4380 4381 /// Returns an iterator that points to the first handler in the 4382 /// CatchSwitchInst. 4383 const_handler_iterator handler_begin() const { 4384 const_op_iterator It = op_begin() + 1; 4385 if (hasUnwindDest()) 4386 ++It; 4387 return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); 4388 } 4389 4390 /// Returns a read-only iterator that points one past the last 4391 /// handler in the CatchSwitchInst. 4392 handler_iterator handler_end() { 4393 return handler_iterator(op_end(), DerefFnTy(handler_helper)); 4394 } 4395 4396 /// Returns an iterator that points one past the last handler in the 4397 /// CatchSwitchInst. 4398 const_handler_iterator handler_end() const { 4399 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); 4400 } 4401 4402 /// iteration adapter for range-for loops. 4403 handler_range handlers() { 4404 return make_range(handler_begin(), handler_end()); 4405 } 4406 4407 /// iteration adapter for range-for loops. 4408 const_handler_range handlers() const { 4409 return make_range(handler_begin(), handler_end()); 4410 } 4411 4412 /// Add an entry to the switch instruction... 4413 /// Note: 4414 /// This action invalidates handler_end(). Old handler_end() iterator will 4415 /// point to the added handler. 4416 void addHandler(BasicBlock *Dest); 4417 4418 void removeHandler(handler_iterator HI); 4419 4420 unsigned getNumSuccessors() const { return getNumOperands() - 1; } 4421 BasicBlock *getSuccessor(unsigned Idx) const { 4422 assert(Idx < getNumSuccessors() && 4423 "Successor # out of range for catchswitch!"); 4424 return cast<BasicBlock>(getOperand(Idx + 1)); 4425 } 4426 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { 4427 assert(Idx < getNumSuccessors() && 4428 "Successor # out of range for catchswitch!"); 4429 setOperand(Idx + 1, NewSucc); 4430 } 4431 4432 // Methods for support type inquiry through isa, cast, and dyn_cast: 4433 static bool classof(const Instruction *I) { 4434 return I->getOpcode() == Instruction::CatchSwitch; 4435 } 4436 static bool classof(const Value *V) { 4437 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4438 } 4439 }; 4440 4441 template <> 4442 struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; 4443 4444 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value) 4445 4446 //===----------------------------------------------------------------------===// 4447 // CleanupPadInst Class 4448 //===----------------------------------------------------------------------===// 4449 class CleanupPadInst : public FuncletPadInst { 4450 private: 4451 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4452 unsigned Values, const Twine &NameStr, 4453 Instruction *InsertBefore) 4454 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4455 NameStr, InsertBefore) {} 4456 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4457 unsigned Values, const Twine &NameStr, 4458 BasicBlock *InsertAtEnd) 4459 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4460 NameStr, InsertAtEnd) {} 4461 4462 public: 4463 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, 4464 const Twine &NameStr = "", 4465 Instruction *InsertBefore = nullptr) { 4466 unsigned Values = 1 + Args.size(); 4467 return new (Values) 4468 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); 4469 } 4470 4471 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, 4472 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4473 unsigned Values = 1 + Args.size(); 4474 return new (Values) 4475 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); 4476 } 4477 4478 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4479 static bool classof(const Instruction *I) { 4480 return I->getOpcode() == Instruction::CleanupPad; 4481 } 4482 static bool classof(const Value *V) { 4483 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4484 } 4485 }; 4486 4487 //===----------------------------------------------------------------------===// 4488 // CatchPadInst Class 4489 //===----------------------------------------------------------------------===// 4490 class CatchPadInst : public FuncletPadInst { 4491 private: 4492 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4493 unsigned Values, const Twine &NameStr, 4494 Instruction *InsertBefore) 4495 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4496 NameStr, InsertBefore) {} 4497 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4498 unsigned Values, const Twine &NameStr, 4499 BasicBlock *InsertAtEnd) 4500 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4501 NameStr, InsertAtEnd) {} 4502 4503 public: 4504 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4505 const Twine &NameStr = "", 4506 Instruction *InsertBefore = nullptr) { 4507 unsigned Values = 1 + Args.size(); 4508 return new (Values) 4509 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); 4510 } 4511 4512 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4513 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4514 unsigned Values = 1 + Args.size(); 4515 return new (Values) 4516 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); 4517 } 4518 4519 /// Convenience accessors 4520 CatchSwitchInst *getCatchSwitch() const { 4521 return cast<CatchSwitchInst>(Op<-1>()); 4522 } 4523 void setCatchSwitch(Value *CatchSwitch) { 4524 assert(CatchSwitch); 4525 Op<-1>() = CatchSwitch; 4526 } 4527 4528 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4529 static bool classof(const Instruction *I) { 4530 return I->getOpcode() == Instruction::CatchPad; 4531 } 4532 static bool classof(const Value *V) { 4533 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4534 } 4535 }; 4536 4537 //===----------------------------------------------------------------------===// 4538 // CatchReturnInst Class 4539 //===----------------------------------------------------------------------===// 4540 4541 class CatchReturnInst : public Instruction { 4542 CatchReturnInst(const CatchReturnInst &RI); 4543 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); 4544 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); 4545 4546 void init(Value *CatchPad, BasicBlock *BB); 4547 4548 protected: 4549 // Note: Instruction needs to be a friend here to call cloneImpl. 4550 friend class Instruction; 4551 4552 CatchReturnInst *cloneImpl() const; 4553 4554 public: 4555 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4556 Instruction *InsertBefore = nullptr) { 4557 assert(CatchPad); 4558 assert(BB); 4559 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); 4560 } 4561 4562 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4563 BasicBlock *InsertAtEnd) { 4564 assert(CatchPad); 4565 assert(BB); 4566 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); 4567 } 4568 4569 /// Provide fast operand accessors 4570 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4571 4572 /// Convenience accessors. 4573 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } 4574 void setCatchPad(CatchPadInst *CatchPad) { 4575 assert(CatchPad); 4576 Op<0>() = CatchPad; 4577 } 4578 4579 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } 4580 void setSuccessor(BasicBlock *NewSucc) { 4581 assert(NewSucc); 4582 Op<1>() = NewSucc; 4583 } 4584 unsigned getNumSuccessors() const { return 1; } 4585 4586 /// Get the parentPad of this catchret's catchpad's catchswitch. 4587 /// The successor block is implicitly a member of this funclet. 4588 Value *getCatchSwitchParentPad() const { 4589 return getCatchPad()->getCatchSwitch()->getParentPad(); 4590 } 4591 4592 // Methods for support type inquiry through isa, cast, and dyn_cast: 4593 static bool classof(const Instruction *I) { 4594 return (I->getOpcode() == Instruction::CatchRet); 4595 } 4596 static bool classof(const Value *V) { 4597 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4598 } 4599 4600 private: 4601 BasicBlock *getSuccessor(unsigned Idx) const { 4602 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4603 return getSuccessor(); 4604 } 4605 4606 void setSuccessor(unsigned Idx, BasicBlock *B) { 4607 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4608 setSuccessor(B); 4609 } 4610 }; 4611 4612 template <> 4613 struct OperandTraits<CatchReturnInst> 4614 : public FixedNumOperandTraits<CatchReturnInst, 2> {}; 4615 4616 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value) 4617 4618 //===----------------------------------------------------------------------===// 4619 // CleanupReturnInst Class 4620 //===----------------------------------------------------------------------===// 4621 4622 class CleanupReturnInst : public Instruction { 4623 using UnwindDestField = BoolBitfieldElementT<0>; 4624 4625 private: 4626 CleanupReturnInst(const CleanupReturnInst &RI); 4627 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4628 Instruction *InsertBefore = nullptr); 4629 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4630 BasicBlock *InsertAtEnd); 4631 4632 void init(Value *CleanupPad, BasicBlock *UnwindBB); 4633 4634 protected: 4635 // Note: Instruction needs to be a friend here to call cloneImpl. 4636 friend class Instruction; 4637 4638 CleanupReturnInst *cloneImpl() const; 4639 4640 public: 4641 static CleanupReturnInst *Create(Value *CleanupPad, 4642 BasicBlock *UnwindBB = nullptr, 4643 Instruction *InsertBefore = nullptr) { 4644 assert(CleanupPad); 4645 unsigned Values = 1; 4646 if (UnwindBB) 4647 ++Values; 4648 return new (Values) 4649 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); 4650 } 4651 4652 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, 4653 BasicBlock *InsertAtEnd) { 4654 assert(CleanupPad); 4655 unsigned Values = 1; 4656 if (UnwindBB) 4657 ++Values; 4658 return new (Values) 4659 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); 4660 } 4661 4662 /// Provide fast operand accessors 4663 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4664 4665 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4666 bool unwindsToCaller() const { return !hasUnwindDest(); } 4667 4668 /// Convenience accessor. 4669 CleanupPadInst *getCleanupPad() const { 4670 return cast<CleanupPadInst>(Op<0>()); 4671 } 4672 void setCleanupPad(CleanupPadInst *CleanupPad) { 4673 assert(CleanupPad); 4674 Op<0>() = CleanupPad; 4675 } 4676 4677 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } 4678 4679 BasicBlock *getUnwindDest() const { 4680 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; 4681 } 4682 void setUnwindDest(BasicBlock *NewDest) { 4683 assert(NewDest); 4684 assert(hasUnwindDest()); 4685 Op<1>() = NewDest; 4686 } 4687 4688 // Methods for support type inquiry through isa, cast, and dyn_cast: 4689 static bool classof(const Instruction *I) { 4690 return (I->getOpcode() == Instruction::CleanupRet); 4691 } 4692 static bool classof(const Value *V) { 4693 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4694 } 4695 4696 private: 4697 BasicBlock *getSuccessor(unsigned Idx) const { 4698 assert(Idx == 0); 4699 return getUnwindDest(); 4700 } 4701 4702 void setSuccessor(unsigned Idx, BasicBlock *B) { 4703 assert(Idx == 0); 4704 setUnwindDest(B); 4705 } 4706 4707 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4708 // method so that subclasses cannot accidentally use it. 4709 template <typename Bitfield> 4710 void setSubclassData(typename Bitfield::Type Value) { 4711 Instruction::setSubclassData<Bitfield>(Value); 4712 } 4713 }; 4714 4715 template <> 4716 struct OperandTraits<CleanupReturnInst> 4717 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; 4718 4719 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value) 4720 4721 //===----------------------------------------------------------------------===// 4722 // UnreachableInst Class 4723 //===----------------------------------------------------------------------===// 4724 4725 //===--------------------------------------------------------------------------- 4726 /// This function has undefined behavior. In particular, the 4727 /// presence of this instruction indicates some higher level knowledge that the 4728 /// end of the block cannot be reached. 4729 /// 4730 class UnreachableInst : public Instruction { 4731 protected: 4732 // Note: Instruction needs to be a friend here to call cloneImpl. 4733 friend class Instruction; 4734 4735 UnreachableInst *cloneImpl() const; 4736 4737 public: 4738 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); 4739 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); 4740 4741 // allocate space for exactly zero operands 4742 void *operator new(size_t S) { return User::operator new(S, 0); } 4743 void operator delete(void *Ptr) { User::operator delete(Ptr); } 4744 4745 unsigned getNumSuccessors() const { return 0; } 4746 4747 // Methods for support type inquiry through isa, cast, and dyn_cast: 4748 static bool classof(const Instruction *I) { 4749 return I->getOpcode() == Instruction::Unreachable; 4750 } 4751 static bool classof(const Value *V) { 4752 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4753 } 4754 4755 private: 4756 BasicBlock *getSuccessor(unsigned idx) const { 4757 llvm_unreachable("UnreachableInst has no successors!"); 4758 } 4759 4760 void setSuccessor(unsigned idx, BasicBlock *B) { 4761 llvm_unreachable("UnreachableInst has no successors!"); 4762 } 4763 }; 4764 4765 //===----------------------------------------------------------------------===// 4766 // TruncInst Class 4767 //===----------------------------------------------------------------------===// 4768 4769 /// This class represents a truncation of integer types. 4770 class TruncInst : public CastInst { 4771 protected: 4772 // Note: Instruction needs to be a friend here to call cloneImpl. 4773 friend class Instruction; 4774 4775 /// Clone an identical TruncInst 4776 TruncInst *cloneImpl() const; 4777 4778 public: 4779 /// Constructor with insert-before-instruction semantics 4780 TruncInst( 4781 Value *S, ///< The value to be truncated 4782 Type *Ty, ///< The (smaller) type to truncate to 4783 const Twine &NameStr = "", ///< A name for the new instruction 4784 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4785 ); 4786 4787 /// Constructor with insert-at-end-of-block semantics 4788 TruncInst( 4789 Value *S, ///< The value to be truncated 4790 Type *Ty, ///< The (smaller) type to truncate to 4791 const Twine &NameStr, ///< A name for the new instruction 4792 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4793 ); 4794 4795 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4796 static bool classof(const Instruction *I) { 4797 return I->getOpcode() == Trunc; 4798 } 4799 static bool classof(const Value *V) { 4800 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4801 } 4802 }; 4803 4804 //===----------------------------------------------------------------------===// 4805 // ZExtInst Class 4806 //===----------------------------------------------------------------------===// 4807 4808 /// This class represents zero extension of integer types. 4809 class ZExtInst : public CastInst { 4810 protected: 4811 // Note: Instruction needs to be a friend here to call cloneImpl. 4812 friend class Instruction; 4813 4814 /// Clone an identical ZExtInst 4815 ZExtInst *cloneImpl() const; 4816 4817 public: 4818 /// Constructor with insert-before-instruction semantics 4819 ZExtInst( 4820 Value *S, ///< The value to be zero extended 4821 Type *Ty, ///< The type to zero extend to 4822 const Twine &NameStr = "", ///< A name for the new instruction 4823 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4824 ); 4825 4826 /// Constructor with insert-at-end semantics. 4827 ZExtInst( 4828 Value *S, ///< The value to be zero extended 4829 Type *Ty, ///< The type to zero extend to 4830 const Twine &NameStr, ///< A name for the new instruction 4831 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4832 ); 4833 4834 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4835 static bool classof(const Instruction *I) { 4836 return I->getOpcode() == ZExt; 4837 } 4838 static bool classof(const Value *V) { 4839 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4840 } 4841 }; 4842 4843 //===----------------------------------------------------------------------===// 4844 // SExtInst Class 4845 //===----------------------------------------------------------------------===// 4846 4847 /// This class represents a sign extension of integer types. 4848 class SExtInst : public CastInst { 4849 protected: 4850 // Note: Instruction needs to be a friend here to call cloneImpl. 4851 friend class Instruction; 4852 4853 /// Clone an identical SExtInst 4854 SExtInst *cloneImpl() const; 4855 4856 public: 4857 /// Constructor with insert-before-instruction semantics 4858 SExtInst( 4859 Value *S, ///< The value to be sign extended 4860 Type *Ty, ///< The type to sign extend to 4861 const Twine &NameStr = "", ///< A name for the new instruction 4862 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4863 ); 4864 4865 /// Constructor with insert-at-end-of-block semantics 4866 SExtInst( 4867 Value *S, ///< The value to be sign extended 4868 Type *Ty, ///< The type to sign extend to 4869 const Twine &NameStr, ///< A name for the new instruction 4870 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4871 ); 4872 4873 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4874 static bool classof(const Instruction *I) { 4875 return I->getOpcode() == SExt; 4876 } 4877 static bool classof(const Value *V) { 4878 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4879 } 4880 }; 4881 4882 //===----------------------------------------------------------------------===// 4883 // FPTruncInst Class 4884 //===----------------------------------------------------------------------===// 4885 4886 /// This class represents a truncation of floating point types. 4887 class FPTruncInst : public CastInst { 4888 protected: 4889 // Note: Instruction needs to be a friend here to call cloneImpl. 4890 friend class Instruction; 4891 4892 /// Clone an identical FPTruncInst 4893 FPTruncInst *cloneImpl() const; 4894 4895 public: 4896 /// Constructor with insert-before-instruction semantics 4897 FPTruncInst( 4898 Value *S, ///< The value to be truncated 4899 Type *Ty, ///< The type to truncate to 4900 const Twine &NameStr = "", ///< A name for the new instruction 4901 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4902 ); 4903 4904 /// Constructor with insert-before-instruction semantics 4905 FPTruncInst( 4906 Value *S, ///< The value to be truncated 4907 Type *Ty, ///< The type to truncate to 4908 const Twine &NameStr, ///< A name for the new instruction 4909 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4910 ); 4911 4912 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4913 static bool classof(const Instruction *I) { 4914 return I->getOpcode() == FPTrunc; 4915 } 4916 static bool classof(const Value *V) { 4917 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4918 } 4919 }; 4920 4921 //===----------------------------------------------------------------------===// 4922 // FPExtInst Class 4923 //===----------------------------------------------------------------------===// 4924 4925 /// This class represents an extension of floating point types. 4926 class FPExtInst : public CastInst { 4927 protected: 4928 // Note: Instruction needs to be a friend here to call cloneImpl. 4929 friend class Instruction; 4930 4931 /// Clone an identical FPExtInst 4932 FPExtInst *cloneImpl() const; 4933 4934 public: 4935 /// Constructor with insert-before-instruction semantics 4936 FPExtInst( 4937 Value *S, ///< The value to be extended 4938 Type *Ty, ///< The type to extend to 4939 const Twine &NameStr = "", ///< A name for the new instruction 4940 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4941 ); 4942 4943 /// Constructor with insert-at-end-of-block semantics 4944 FPExtInst( 4945 Value *S, ///< The value to be extended 4946 Type *Ty, ///< The type to extend to 4947 const Twine &NameStr, ///< A name for the new instruction 4948 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4949 ); 4950 4951 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4952 static bool classof(const Instruction *I) { 4953 return I->getOpcode() == FPExt; 4954 } 4955 static bool classof(const Value *V) { 4956 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4957 } 4958 }; 4959 4960 //===----------------------------------------------------------------------===// 4961 // UIToFPInst Class 4962 //===----------------------------------------------------------------------===// 4963 4964 /// This class represents a cast unsigned integer to floating point. 4965 class UIToFPInst : public CastInst { 4966 protected: 4967 // Note: Instruction needs to be a friend here to call cloneImpl. 4968 friend class Instruction; 4969 4970 /// Clone an identical UIToFPInst 4971 UIToFPInst *cloneImpl() const; 4972 4973 public: 4974 /// Constructor with insert-before-instruction semantics 4975 UIToFPInst( 4976 Value *S, ///< The value to be converted 4977 Type *Ty, ///< The type to convert to 4978 const Twine &NameStr = "", ///< A name for the new instruction 4979 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4980 ); 4981 4982 /// Constructor with insert-at-end-of-block semantics 4983 UIToFPInst( 4984 Value *S, ///< The value to be converted 4985 Type *Ty, ///< The type to convert to 4986 const Twine &NameStr, ///< A name for the new instruction 4987 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4988 ); 4989 4990 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4991 static bool classof(const Instruction *I) { 4992 return I->getOpcode() == UIToFP; 4993 } 4994 static bool classof(const Value *V) { 4995 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4996 } 4997 }; 4998 4999 //===----------------------------------------------------------------------===// 5000 // SIToFPInst Class 5001 //===----------------------------------------------------------------------===// 5002 5003 /// This class represents a cast from signed integer to floating point. 5004 class SIToFPInst : public CastInst { 5005 protected: 5006 // Note: Instruction needs to be a friend here to call cloneImpl. 5007 friend class Instruction; 5008 5009 /// Clone an identical SIToFPInst 5010 SIToFPInst *cloneImpl() const; 5011 5012 public: 5013 /// Constructor with insert-before-instruction semantics 5014 SIToFPInst( 5015 Value *S, ///< The value to be converted 5016 Type *Ty, ///< The type to convert to 5017 const Twine &NameStr = "", ///< A name for the new instruction 5018 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5019 ); 5020 5021 /// Constructor with insert-at-end-of-block semantics 5022 SIToFPInst( 5023 Value *S, ///< The value to be converted 5024 Type *Ty, ///< The type to convert to 5025 const Twine &NameStr, ///< A name for the new instruction 5026 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5027 ); 5028 5029 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5030 static bool classof(const Instruction *I) { 5031 return I->getOpcode() == SIToFP; 5032 } 5033 static bool classof(const Value *V) { 5034 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5035 } 5036 }; 5037 5038 //===----------------------------------------------------------------------===// 5039 // FPToUIInst Class 5040 //===----------------------------------------------------------------------===// 5041 5042 /// This class represents a cast from floating point to unsigned integer 5043 class FPToUIInst : public CastInst { 5044 protected: 5045 // Note: Instruction needs to be a friend here to call cloneImpl. 5046 friend class Instruction; 5047 5048 /// Clone an identical FPToUIInst 5049 FPToUIInst *cloneImpl() const; 5050 5051 public: 5052 /// Constructor with insert-before-instruction semantics 5053 FPToUIInst( 5054 Value *S, ///< The value to be converted 5055 Type *Ty, ///< The type to convert to 5056 const Twine &NameStr = "", ///< A name for the new instruction 5057 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5058 ); 5059 5060 /// Constructor with insert-at-end-of-block semantics 5061 FPToUIInst( 5062 Value *S, ///< The value to be converted 5063 Type *Ty, ///< The type to convert to 5064 const Twine &NameStr, ///< A name for the new instruction 5065 BasicBlock *InsertAtEnd ///< Where to insert the new instruction 5066 ); 5067 5068 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5069 static bool classof(const Instruction *I) { 5070 return I->getOpcode() == FPToUI; 5071 } 5072 static bool classof(const Value *V) { 5073 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5074 } 5075 }; 5076 5077 //===----------------------------------------------------------------------===// 5078 // FPToSIInst Class 5079 //===----------------------------------------------------------------------===// 5080 5081 /// This class represents a cast from floating point to signed integer. 5082 class FPToSIInst : public CastInst { 5083 protected: 5084 // Note: Instruction needs to be a friend here to call cloneImpl. 5085 friend class Instruction; 5086 5087 /// Clone an identical FPToSIInst 5088 FPToSIInst *cloneImpl() const; 5089 5090 public: 5091 /// Constructor with insert-before-instruction semantics 5092 FPToSIInst( 5093 Value *S, ///< The value to be converted 5094 Type *Ty, ///< The type to convert to 5095 const Twine &NameStr = "", ///< A name for the new instruction 5096 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5097 ); 5098 5099 /// Constructor with insert-at-end-of-block semantics 5100 FPToSIInst( 5101 Value *S, ///< The value to be converted 5102 Type *Ty, ///< The type to convert to 5103 const Twine &NameStr, ///< A name for the new instruction 5104 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5105 ); 5106 5107 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5108 static bool classof(const Instruction *I) { 5109 return I->getOpcode() == FPToSI; 5110 } 5111 static bool classof(const Value *V) { 5112 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5113 } 5114 }; 5115 5116 //===----------------------------------------------------------------------===// 5117 // IntToPtrInst Class 5118 //===----------------------------------------------------------------------===// 5119 5120 /// This class represents a cast from an integer to a pointer. 5121 class IntToPtrInst : public CastInst { 5122 public: 5123 // Note: Instruction needs to be a friend here to call cloneImpl. 5124 friend class Instruction; 5125 5126 /// Constructor with insert-before-instruction semantics 5127 IntToPtrInst( 5128 Value *S, ///< The value to be converted 5129 Type *Ty, ///< The type to convert to 5130 const Twine &NameStr = "", ///< A name for the new instruction 5131 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5132 ); 5133 5134 /// Constructor with insert-at-end-of-block semantics 5135 IntToPtrInst( 5136 Value *S, ///< The value to be converted 5137 Type *Ty, ///< The type to convert to 5138 const Twine &NameStr, ///< A name for the new instruction 5139 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5140 ); 5141 5142 /// Clone an identical IntToPtrInst. 5143 IntToPtrInst *cloneImpl() const; 5144 5145 /// Returns the address space of this instruction's pointer type. 5146 unsigned getAddressSpace() const { 5147 return getType()->getPointerAddressSpace(); 5148 } 5149 5150 // Methods for support type inquiry through isa, cast, and dyn_cast: 5151 static bool classof(const Instruction *I) { 5152 return I->getOpcode() == IntToPtr; 5153 } 5154 static bool classof(const Value *V) { 5155 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5156 } 5157 }; 5158 5159 //===----------------------------------------------------------------------===// 5160 // PtrToIntInst Class 5161 //===----------------------------------------------------------------------===// 5162 5163 /// This class represents a cast from a pointer to an integer. 5164 class PtrToIntInst : public CastInst { 5165 protected: 5166 // Note: Instruction needs to be a friend here to call cloneImpl. 5167 friend class Instruction; 5168 5169 /// Clone an identical PtrToIntInst. 5170 PtrToIntInst *cloneImpl() const; 5171 5172 public: 5173 /// Constructor with insert-before-instruction semantics 5174 PtrToIntInst( 5175 Value *S, ///< The value to be converted 5176 Type *Ty, ///< The type to convert to 5177 const Twine &NameStr = "", ///< A name for the new instruction 5178 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5179 ); 5180 5181 /// Constructor with insert-at-end-of-block semantics 5182 PtrToIntInst( 5183 Value *S, ///< The value to be converted 5184 Type *Ty, ///< The type to convert to 5185 const Twine &NameStr, ///< A name for the new instruction 5186 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5187 ); 5188 5189 /// Gets the pointer operand. 5190 Value *getPointerOperand() { return getOperand(0); } 5191 /// Gets the pointer operand. 5192 const Value *getPointerOperand() const { return getOperand(0); } 5193 /// Gets the operand index of the pointer operand. 5194 static unsigned getPointerOperandIndex() { return 0U; } 5195 5196 /// Returns the address space of the pointer operand. 5197 unsigned getPointerAddressSpace() const { 5198 return getPointerOperand()->getType()->getPointerAddressSpace(); 5199 } 5200 5201 // Methods for support type inquiry through isa, cast, and dyn_cast: 5202 static bool classof(const Instruction *I) { 5203 return I->getOpcode() == PtrToInt; 5204 } 5205 static bool classof(const Value *V) { 5206 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5207 } 5208 }; 5209 5210 //===----------------------------------------------------------------------===// 5211 // BitCastInst Class 5212 //===----------------------------------------------------------------------===// 5213 5214 /// This class represents a no-op cast from one type to another. 5215 class BitCastInst : public CastInst { 5216 protected: 5217 // Note: Instruction needs to be a friend here to call cloneImpl. 5218 friend class Instruction; 5219 5220 /// Clone an identical BitCastInst. 5221 BitCastInst *cloneImpl() const; 5222 5223 public: 5224 /// Constructor with insert-before-instruction semantics 5225 BitCastInst( 5226 Value *S, ///< The value to be casted 5227 Type *Ty, ///< The type to casted to 5228 const Twine &NameStr = "", ///< A name for the new instruction 5229 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5230 ); 5231 5232 /// Constructor with insert-at-end-of-block semantics 5233 BitCastInst( 5234 Value *S, ///< The value to be casted 5235 Type *Ty, ///< The type to casted to 5236 const Twine &NameStr, ///< A name for the new instruction 5237 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5238 ); 5239 5240 // Methods for support type inquiry through isa, cast, and dyn_cast: 5241 static bool classof(const Instruction *I) { 5242 return I->getOpcode() == BitCast; 5243 } 5244 static bool classof(const Value *V) { 5245 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5246 } 5247 }; 5248 5249 //===----------------------------------------------------------------------===// 5250 // AddrSpaceCastInst Class 5251 //===----------------------------------------------------------------------===// 5252 5253 /// This class represents a conversion between pointers from one address space 5254 /// to another. 5255 class AddrSpaceCastInst : public CastInst { 5256 protected: 5257 // Note: Instruction needs to be a friend here to call cloneImpl. 5258 friend class Instruction; 5259 5260 /// Clone an identical AddrSpaceCastInst. 5261 AddrSpaceCastInst *cloneImpl() const; 5262 5263 public: 5264 /// Constructor with insert-before-instruction semantics 5265 AddrSpaceCastInst( 5266 Value *S, ///< The value to be casted 5267 Type *Ty, ///< The type to casted to 5268 const Twine &NameStr = "", ///< A name for the new instruction 5269 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5270 ); 5271 5272 /// Constructor with insert-at-end-of-block semantics 5273 AddrSpaceCastInst( 5274 Value *S, ///< The value to be casted 5275 Type *Ty, ///< The type to casted to 5276 const Twine &NameStr, ///< A name for the new instruction 5277 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5278 ); 5279 5280 // Methods for support type inquiry through isa, cast, and dyn_cast: 5281 static bool classof(const Instruction *I) { 5282 return I->getOpcode() == AddrSpaceCast; 5283 } 5284 static bool classof(const Value *V) { 5285 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5286 } 5287 5288 /// Gets the pointer operand. 5289 Value *getPointerOperand() { 5290 return getOperand(0); 5291 } 5292 5293 /// Gets the pointer operand. 5294 const Value *getPointerOperand() const { 5295 return getOperand(0); 5296 } 5297 5298 /// Gets the operand index of the pointer operand. 5299 static unsigned getPointerOperandIndex() { 5300 return 0U; 5301 } 5302 5303 /// Returns the address space of the pointer operand. 5304 unsigned getSrcAddressSpace() const { 5305 return getPointerOperand()->getType()->getPointerAddressSpace(); 5306 } 5307 5308 /// Returns the address space of the result. 5309 unsigned getDestAddressSpace() const { 5310 return getType()->getPointerAddressSpace(); 5311 } 5312 }; 5313 5314 /// A helper function that returns the pointer operand of a load or store 5315 /// instruction. Returns nullptr if not load or store. 5316 inline const Value *getLoadStorePointerOperand(const Value *V) { 5317 if (auto *Load = dyn_cast<LoadInst>(V)) 5318 return Load->getPointerOperand(); 5319 if (auto *Store = dyn_cast<StoreInst>(V)) 5320 return Store->getPointerOperand(); 5321 return nullptr; 5322 } 5323 inline Value *getLoadStorePointerOperand(Value *V) { 5324 return const_cast<Value *>( 5325 getLoadStorePointerOperand(static_cast<const Value *>(V))); 5326 } 5327 5328 /// A helper function that returns the pointer operand of a load, store 5329 /// or GEP instruction. Returns nullptr if not load, store, or GEP. 5330 inline const Value *getPointerOperand(const Value *V) { 5331 if (auto *Ptr = getLoadStorePointerOperand(V)) 5332 return Ptr; 5333 if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) 5334 return Gep->getPointerOperand(); 5335 return nullptr; 5336 } 5337 inline Value *getPointerOperand(Value *V) { 5338 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); 5339 } 5340 5341 /// A helper function that returns the alignment of load or store instruction. 5342 inline Align getLoadStoreAlignment(Value *I) { 5343 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5344 "Expected Load or Store instruction"); 5345 if (auto *LI = dyn_cast<LoadInst>(I)) 5346 return LI->getAlign(); 5347 return cast<StoreInst>(I)->getAlign(); 5348 } 5349 5350 /// A helper function that returns the address space of the pointer operand of 5351 /// load or store instruction. 5352 inline unsigned getLoadStoreAddressSpace(Value *I) { 5353 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5354 "Expected Load or Store instruction"); 5355 if (auto *LI = dyn_cast<LoadInst>(I)) 5356 return LI->getPointerAddressSpace(); 5357 return cast<StoreInst>(I)->getPointerAddressSpace(); 5358 } 5359 5360 /// A helper function that returns the type of a load or store instruction. 5361 inline Type *getLoadStoreType(Value *I) { 5362 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5363 "Expected Load or Store instruction"); 5364 if (auto *LI = dyn_cast<LoadInst>(I)) 5365 return LI->getType(); 5366 return cast<StoreInst>(I)->getValueOperand()->getType(); 5367 } 5368 5369 //===----------------------------------------------------------------------===// 5370 // FreezeInst Class 5371 //===----------------------------------------------------------------------===// 5372 5373 /// This class represents a freeze function that returns random concrete 5374 /// value if an operand is either a poison value or an undef value 5375 class FreezeInst : public UnaryInstruction { 5376 protected: 5377 // Note: Instruction needs to be a friend here to call cloneImpl. 5378 friend class Instruction; 5379 5380 /// Clone an identical FreezeInst 5381 FreezeInst *cloneImpl() const; 5382 5383 public: 5384 explicit FreezeInst(Value *S, 5385 const Twine &NameStr = "", 5386 Instruction *InsertBefore = nullptr); 5387 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); 5388 5389 // Methods for support type inquiry through isa, cast, and dyn_cast: 5390 static inline bool classof(const Instruction *I) { 5391 return I->getOpcode() == Freeze; 5392 } 5393 static inline bool classof(const Value *V) { 5394 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5395 } 5396 }; 5397 5398 } // end namespace llvm 5399 5400 #endif // LLVM_IR_INSTRUCTIONS_H 5401