1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file exposes the class definitions of all of the subclasses of the 10 // Instruction class. This is meant to be an easy way to get access to all 11 // instruction subclasses. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_IR_INSTRUCTIONS_H 16 #define LLVM_IR_INSTRUCTIONS_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/Bitfields.h" 20 #include "llvm/ADT/MapVector.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Twine.h" 24 #include "llvm/ADT/iterator.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/IR/CFG.h" 27 #include "llvm/IR/Constant.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/GEPNoWrapFlags.h" 30 #include "llvm/IR/InstrTypes.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/OperandTraits.h" 34 #include "llvm/IR/Use.h" 35 #include "llvm/IR/User.h" 36 #include "llvm/Support/AtomicOrdering.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include <cassert> 39 #include <cstddef> 40 #include <cstdint> 41 #include <iterator> 42 #include <optional> 43 44 namespace llvm { 45 46 class APFloat; 47 class APInt; 48 class BasicBlock; 49 class ConstantInt; 50 class DataLayout; 51 class StringRef; 52 class Type; 53 class Value; 54 class UnreachableInst; 55 56 //===----------------------------------------------------------------------===// 57 // AllocaInst Class 58 //===----------------------------------------------------------------------===// 59 60 /// an instruction to allocate memory on the stack 61 class AllocaInst : public UnaryInstruction { 62 Type *AllocatedType; 63 64 using AlignmentField = AlignmentBitfieldElementT<0>; 65 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; 66 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; 67 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, 68 SwiftErrorField>(), 69 "Bitfields must be contiguous"); 70 71 protected: 72 // Note: Instruction needs to be a friend here to call cloneImpl. 73 friend class Instruction; 74 75 AllocaInst *cloneImpl() const; 76 77 public: 78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 79 const Twine &Name, InsertPosition InsertBefore); 80 81 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 82 InsertPosition InsertBefore); 83 84 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 85 const Twine &Name = "", InsertPosition InsertBefore = nullptr); 86 87 /// Return true if there is an allocation size parameter to the allocation 88 /// instruction that is not 1. 89 bool isArrayAllocation() const; 90 91 /// Get the number of elements allocated. For a simple allocation of a single 92 /// element, this will return a constant 1 value. getArraySize()93 const Value *getArraySize() const { return getOperand(0); } getArraySize()94 Value *getArraySize() { return getOperand(0); } 95 96 /// Overload to return most specific pointer type. getType()97 PointerType *getType() const { 98 return cast<PointerType>(Instruction::getType()); 99 } 100 101 /// Return the address space for the allocation. getAddressSpace()102 unsigned getAddressSpace() const { 103 return getType()->getAddressSpace(); 104 } 105 106 /// Get allocation size in bytes. Returns std::nullopt if size can't be 107 /// determined, e.g. in case of a VLA. 108 std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const; 109 110 /// Get allocation size in bits. Returns std::nullopt if size can't be 111 /// determined, e.g. in case of a VLA. 112 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; 113 114 /// Return the type that is being allocated by the instruction. getAllocatedType()115 Type *getAllocatedType() const { return AllocatedType; } 116 /// for use only in special circumstances that need to generically 117 /// transform a whole instruction (eg: IR linking and vectorization). setAllocatedType(Type * Ty)118 void setAllocatedType(Type *Ty) { AllocatedType = Ty; } 119 120 /// Return the alignment of the memory that is being allocated by the 121 /// instruction. getAlign()122 Align getAlign() const { 123 return Align(1ULL << getSubclassData<AlignmentField>()); 124 } 125 setAlignment(Align Align)126 void setAlignment(Align Align) { 127 setSubclassData<AlignmentField>(Log2(Align)); 128 } 129 130 /// Return true if this alloca is in the entry block of the function and is a 131 /// constant size. If so, the code generator will fold it into the 132 /// prolog/epilog code, so it is basically free. 133 bool isStaticAlloca() const; 134 135 /// Return true if this alloca is used as an inalloca argument to a call. Such 136 /// allocas are never considered static even if they are in the entry block. isUsedWithInAlloca()137 bool isUsedWithInAlloca() const { 138 return getSubclassData<UsedWithInAllocaField>(); 139 } 140 141 /// Specify whether this alloca is used to represent the arguments to a call. setUsedWithInAlloca(bool V)142 void setUsedWithInAlloca(bool V) { 143 setSubclassData<UsedWithInAllocaField>(V); 144 } 145 146 /// Return true if this alloca is used as a swifterror argument to a call. isSwiftError()147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } 148 /// Specify whether this alloca is used to represent a swifterror. setSwiftError(bool V)149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } 150 151 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)152 static bool classof(const Instruction *I) { 153 return (I->getOpcode() == Instruction::Alloca); 154 } classof(const Value * V)155 static bool classof(const Value *V) { 156 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 157 } 158 159 private: 160 // Shadow Instruction::setInstructionSubclassData with a private forwarding 161 // method so that subclasses cannot accidentally use it. 162 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)163 void setSubclassData(typename Bitfield::Type Value) { 164 Instruction::setSubclassData<Bitfield>(Value); 165 } 166 }; 167 168 //===----------------------------------------------------------------------===// 169 // LoadInst Class 170 //===----------------------------------------------------------------------===// 171 172 /// An instruction for reading from memory. This uses the SubclassData field in 173 /// Value to store whether or not the load is volatile. 174 class LoadInst : public UnaryInstruction { 175 using VolatileField = BoolBitfieldElementT<0>; 176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 178 static_assert( 179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 180 "Bitfields must be contiguous"); 181 182 void AssertOK(); 183 184 protected: 185 // Note: Instruction needs to be a friend here to call cloneImpl. 186 friend class Instruction; 187 188 LoadInst *cloneImpl() const; 189 190 public: 191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, 192 InsertPosition InsertBefore); 193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 194 InsertPosition InsertBefore); 195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 196 Align Align, InsertPosition InsertBefore = nullptr); 197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 198 Align Align, AtomicOrdering Order, 199 SyncScope::ID SSID = SyncScope::System, 200 InsertPosition InsertBefore = nullptr); 201 202 /// Return true if this is a load from a volatile memory location. isVolatile()203 bool isVolatile() const { return getSubclassData<VolatileField>(); } 204 205 /// Specify whether this is a volatile load or not. setVolatile(bool V)206 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 207 208 /// Return the alignment of the access that is being performed. getAlign()209 Align getAlign() const { 210 return Align(1ULL << (getSubclassData<AlignmentField>())); 211 } 212 setAlignment(Align Align)213 void setAlignment(Align Align) { 214 setSubclassData<AlignmentField>(Log2(Align)); 215 } 216 217 /// Returns the ordering constraint of this load instruction. getOrdering()218 AtomicOrdering getOrdering() const { 219 return getSubclassData<OrderingField>(); 220 } 221 /// Sets the ordering constraint of this load instruction. May not be Release 222 /// or AcquireRelease. setOrdering(AtomicOrdering Ordering)223 void setOrdering(AtomicOrdering Ordering) { 224 setSubclassData<OrderingField>(Ordering); 225 } 226 227 /// Returns the synchronization scope ID of this load instruction. getSyncScopeID()228 SyncScope::ID getSyncScopeID() const { 229 return SSID; 230 } 231 232 /// Sets the synchronization scope ID of this load instruction. setSyncScopeID(SyncScope::ID SSID)233 void setSyncScopeID(SyncScope::ID SSID) { 234 this->SSID = SSID; 235 } 236 237 /// Sets the ordering constraint and the synchronization scope ID of this load 238 /// instruction. 239 void setAtomic(AtomicOrdering Ordering, 240 SyncScope::ID SSID = SyncScope::System) { 241 setOrdering(Ordering); 242 setSyncScopeID(SSID); 243 } 244 isSimple()245 bool isSimple() const { return !isAtomic() && !isVolatile(); } 246 isUnordered()247 bool isUnordered() const { 248 return (getOrdering() == AtomicOrdering::NotAtomic || 249 getOrdering() == AtomicOrdering::Unordered) && 250 !isVolatile(); 251 } 252 getPointerOperand()253 Value *getPointerOperand() { return getOperand(0); } getPointerOperand()254 const Value *getPointerOperand() const { return getOperand(0); } getPointerOperandIndex()255 static unsigned getPointerOperandIndex() { return 0U; } getPointerOperandType()256 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 257 258 /// Returns the address space of the pointer operand. getPointerAddressSpace()259 unsigned getPointerAddressSpace() const { 260 return getPointerOperandType()->getPointerAddressSpace(); 261 } 262 263 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)264 static bool classof(const Instruction *I) { 265 return I->getOpcode() == Instruction::Load; 266 } classof(const Value * V)267 static bool classof(const Value *V) { 268 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 269 } 270 271 private: 272 // Shadow Instruction::setInstructionSubclassData with a private forwarding 273 // method so that subclasses cannot accidentally use it. 274 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)275 void setSubclassData(typename Bitfield::Type Value) { 276 Instruction::setSubclassData<Bitfield>(Value); 277 } 278 279 /// The synchronization scope ID of this load instruction. Not quite enough 280 /// room in SubClassData for everything, so synchronization scope ID gets its 281 /// own field. 282 SyncScope::ID SSID; 283 }; 284 285 //===----------------------------------------------------------------------===// 286 // StoreInst Class 287 //===----------------------------------------------------------------------===// 288 289 /// An instruction for storing to memory. 290 class StoreInst : public Instruction { 291 using VolatileField = BoolBitfieldElementT<0>; 292 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 293 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 294 static_assert( 295 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 296 "Bitfields must be contiguous"); 297 298 void AssertOK(); 299 300 protected: 301 // Note: Instruction needs to be a friend here to call cloneImpl. 302 friend class Instruction; 303 304 StoreInst *cloneImpl() const; 305 306 public: 307 StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore); 308 StoreInst(Value *Val, Value *Ptr, bool isVolatile, 309 InsertPosition InsertBefore); 310 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 311 InsertPosition InsertBefore = nullptr); 312 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 313 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, 314 InsertPosition InsertBefore = nullptr); 315 316 // allocate space for exactly two operands new(size_t S)317 void *operator new(size_t S) { return User::operator new(S, 2); } delete(void * Ptr)318 void operator delete(void *Ptr) { User::operator delete(Ptr); } 319 320 /// Return true if this is a store to a volatile memory location. isVolatile()321 bool isVolatile() const { return getSubclassData<VolatileField>(); } 322 323 /// Specify whether this is a volatile store or not. setVolatile(bool V)324 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 325 326 /// Transparently provide more efficient getOperand methods. 327 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 328 getAlign()329 Align getAlign() const { 330 return Align(1ULL << (getSubclassData<AlignmentField>())); 331 } 332 setAlignment(Align Align)333 void setAlignment(Align Align) { 334 setSubclassData<AlignmentField>(Log2(Align)); 335 } 336 337 /// Returns the ordering constraint of this store instruction. getOrdering()338 AtomicOrdering getOrdering() const { 339 return getSubclassData<OrderingField>(); 340 } 341 342 /// Sets the ordering constraint of this store instruction. May not be 343 /// Acquire or AcquireRelease. setOrdering(AtomicOrdering Ordering)344 void setOrdering(AtomicOrdering Ordering) { 345 setSubclassData<OrderingField>(Ordering); 346 } 347 348 /// Returns the synchronization scope ID of this store instruction. getSyncScopeID()349 SyncScope::ID getSyncScopeID() const { 350 return SSID; 351 } 352 353 /// Sets the synchronization scope ID of this store instruction. setSyncScopeID(SyncScope::ID SSID)354 void setSyncScopeID(SyncScope::ID SSID) { 355 this->SSID = SSID; 356 } 357 358 /// Sets the ordering constraint and the synchronization scope ID of this 359 /// store instruction. 360 void setAtomic(AtomicOrdering Ordering, 361 SyncScope::ID SSID = SyncScope::System) { 362 setOrdering(Ordering); 363 setSyncScopeID(SSID); 364 } 365 isSimple()366 bool isSimple() const { return !isAtomic() && !isVolatile(); } 367 isUnordered()368 bool isUnordered() const { 369 return (getOrdering() == AtomicOrdering::NotAtomic || 370 getOrdering() == AtomicOrdering::Unordered) && 371 !isVolatile(); 372 } 373 getValueOperand()374 Value *getValueOperand() { return getOperand(0); } getValueOperand()375 const Value *getValueOperand() const { return getOperand(0); } 376 getPointerOperand()377 Value *getPointerOperand() { return getOperand(1); } getPointerOperand()378 const Value *getPointerOperand() const { return getOperand(1); } getPointerOperandIndex()379 static unsigned getPointerOperandIndex() { return 1U; } getPointerOperandType()380 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 381 382 /// Returns the address space of the pointer operand. getPointerAddressSpace()383 unsigned getPointerAddressSpace() const { 384 return getPointerOperandType()->getPointerAddressSpace(); 385 } 386 387 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)388 static bool classof(const Instruction *I) { 389 return I->getOpcode() == Instruction::Store; 390 } classof(const Value * V)391 static bool classof(const Value *V) { 392 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 393 } 394 395 private: 396 // Shadow Instruction::setInstructionSubclassData with a private forwarding 397 // method so that subclasses cannot accidentally use it. 398 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)399 void setSubclassData(typename Bitfield::Type Value) { 400 Instruction::setSubclassData<Bitfield>(Value); 401 } 402 403 /// The synchronization scope ID of this store instruction. Not quite enough 404 /// room in SubClassData for everything, so synchronization scope ID gets its 405 /// own field. 406 SyncScope::ID SSID; 407 }; 408 409 template <> 410 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { 411 }; 412 413 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) 414 415 //===----------------------------------------------------------------------===// 416 // FenceInst Class 417 //===----------------------------------------------------------------------===// 418 419 /// An instruction for ordering other memory operations. 420 class FenceInst : public Instruction { 421 using OrderingField = AtomicOrderingBitfieldElementT<0>; 422 423 void Init(AtomicOrdering Ordering, SyncScope::ID SSID); 424 425 protected: 426 // Note: Instruction needs to be a friend here to call cloneImpl. 427 friend class Instruction; 428 429 FenceInst *cloneImpl() const; 430 431 public: 432 // Ordering may only be Acquire, Release, AcquireRelease, or 433 // SequentiallyConsistent. 434 FenceInst(LLVMContext &C, AtomicOrdering Ordering, 435 SyncScope::ID SSID = SyncScope::System, 436 InsertPosition InsertBefore = nullptr); 437 438 // allocate space for exactly zero operands 439 void *operator new(size_t S) { return User::operator new(S, 0); } 440 void operator delete(void *Ptr) { User::operator delete(Ptr); } 441 442 /// Returns the ordering constraint of this fence instruction. 443 AtomicOrdering getOrdering() const { 444 return getSubclassData<OrderingField>(); 445 } 446 447 /// Sets the ordering constraint of this fence instruction. May only be 448 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. 449 void setOrdering(AtomicOrdering Ordering) { 450 setSubclassData<OrderingField>(Ordering); 451 } 452 453 /// Returns the synchronization scope ID of this fence instruction. 454 SyncScope::ID getSyncScopeID() const { 455 return SSID; 456 } 457 458 /// Sets the synchronization scope ID of this fence instruction. 459 void setSyncScopeID(SyncScope::ID SSID) { 460 this->SSID = SSID; 461 } 462 463 // Methods for support type inquiry through isa, cast, and dyn_cast: 464 static bool classof(const Instruction *I) { 465 return I->getOpcode() == Instruction::Fence; 466 } 467 static bool classof(const Value *V) { 468 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 469 } 470 471 private: 472 // Shadow Instruction::setInstructionSubclassData with a private forwarding 473 // method so that subclasses cannot accidentally use it. 474 template <typename Bitfield> 475 void setSubclassData(typename Bitfield::Type Value) { 476 Instruction::setSubclassData<Bitfield>(Value); 477 } 478 479 /// The synchronization scope ID of this fence instruction. Not quite enough 480 /// room in SubClassData for everything, so synchronization scope ID gets its 481 /// own field. 482 SyncScope::ID SSID; 483 }; 484 485 //===----------------------------------------------------------------------===// 486 // AtomicCmpXchgInst Class 487 //===----------------------------------------------------------------------===// 488 489 /// An instruction that atomically checks whether a 490 /// specified value is in a memory location, and, if it is, stores a new value 491 /// there. The value returned by this instruction is a pair containing the 492 /// original value as first element, and an i1 indicating success (true) or 493 /// failure (false) as second element. 494 /// 495 class AtomicCmpXchgInst : public Instruction { 496 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, 497 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, 498 SyncScope::ID SSID); 499 500 template <unsigned Offset> 501 using AtomicOrderingBitfieldElement = 502 typename Bitfield::Element<AtomicOrdering, Offset, 3, 503 AtomicOrdering::LAST>; 504 505 protected: 506 // Note: Instruction needs to be a friend here to call cloneImpl. 507 friend class Instruction; 508 509 AtomicCmpXchgInst *cloneImpl() const; 510 511 public: 512 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 513 AtomicOrdering SuccessOrdering, 514 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 515 InsertPosition InsertBefore = nullptr); 516 517 // allocate space for exactly three operands 518 void *operator new(size_t S) { return User::operator new(S, 3); } 519 void operator delete(void *Ptr) { User::operator delete(Ptr); } 520 521 using VolatileField = BoolBitfieldElementT<0>; 522 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; 523 using SuccessOrderingField = 524 AtomicOrderingBitfieldElementT<WeakField::NextBit>; 525 using FailureOrderingField = 526 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; 527 using AlignmentField = 528 AlignmentBitfieldElementT<FailureOrderingField::NextBit>; 529 static_assert( 530 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, 531 FailureOrderingField, AlignmentField>(), 532 "Bitfields must be contiguous"); 533 534 /// Return the alignment of the memory that is being allocated by the 535 /// instruction. 536 Align getAlign() const { 537 return Align(1ULL << getSubclassData<AlignmentField>()); 538 } 539 540 void setAlignment(Align Align) { 541 setSubclassData<AlignmentField>(Log2(Align)); 542 } 543 544 /// Return true if this is a cmpxchg from a volatile memory 545 /// location. 546 /// 547 bool isVolatile() const { return getSubclassData<VolatileField>(); } 548 549 /// Specify whether this is a volatile cmpxchg. 550 /// 551 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 552 553 /// Return true if this cmpxchg may spuriously fail. 554 bool isWeak() const { return getSubclassData<WeakField>(); } 555 556 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } 557 558 /// Transparently provide more efficient getOperand methods. 559 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 560 561 static bool isValidSuccessOrdering(AtomicOrdering Ordering) { 562 return Ordering != AtomicOrdering::NotAtomic && 563 Ordering != AtomicOrdering::Unordered; 564 } 565 566 static bool isValidFailureOrdering(AtomicOrdering Ordering) { 567 return Ordering != AtomicOrdering::NotAtomic && 568 Ordering != AtomicOrdering::Unordered && 569 Ordering != AtomicOrdering::AcquireRelease && 570 Ordering != AtomicOrdering::Release; 571 } 572 573 /// Returns the success ordering constraint of this cmpxchg instruction. 574 AtomicOrdering getSuccessOrdering() const { 575 return getSubclassData<SuccessOrderingField>(); 576 } 577 578 /// Sets the success ordering constraint of this cmpxchg instruction. 579 void setSuccessOrdering(AtomicOrdering Ordering) { 580 assert(isValidSuccessOrdering(Ordering) && 581 "invalid CmpXchg success ordering"); 582 setSubclassData<SuccessOrderingField>(Ordering); 583 } 584 585 /// Returns the failure ordering constraint of this cmpxchg instruction. 586 AtomicOrdering getFailureOrdering() const { 587 return getSubclassData<FailureOrderingField>(); 588 } 589 590 /// Sets the failure ordering constraint of this cmpxchg instruction. 591 void setFailureOrdering(AtomicOrdering Ordering) { 592 assert(isValidFailureOrdering(Ordering) && 593 "invalid CmpXchg failure ordering"); 594 setSubclassData<FailureOrderingField>(Ordering); 595 } 596 597 /// Returns a single ordering which is at least as strong as both the 598 /// success and failure orderings for this cmpxchg. 599 AtomicOrdering getMergedOrdering() const { 600 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) 601 return AtomicOrdering::SequentiallyConsistent; 602 if (getFailureOrdering() == AtomicOrdering::Acquire) { 603 if (getSuccessOrdering() == AtomicOrdering::Monotonic) 604 return AtomicOrdering::Acquire; 605 if (getSuccessOrdering() == AtomicOrdering::Release) 606 return AtomicOrdering::AcquireRelease; 607 } 608 return getSuccessOrdering(); 609 } 610 611 /// Returns the synchronization scope ID of this cmpxchg instruction. 612 SyncScope::ID getSyncScopeID() const { 613 return SSID; 614 } 615 616 /// Sets the synchronization scope ID of this cmpxchg instruction. 617 void setSyncScopeID(SyncScope::ID SSID) { 618 this->SSID = SSID; 619 } 620 621 Value *getPointerOperand() { return getOperand(0); } 622 const Value *getPointerOperand() const { return getOperand(0); } 623 static unsigned getPointerOperandIndex() { return 0U; } 624 625 Value *getCompareOperand() { return getOperand(1); } 626 const Value *getCompareOperand() const { return getOperand(1); } 627 628 Value *getNewValOperand() { return getOperand(2); } 629 const Value *getNewValOperand() const { return getOperand(2); } 630 631 /// Returns the address space of the pointer operand. 632 unsigned getPointerAddressSpace() const { 633 return getPointerOperand()->getType()->getPointerAddressSpace(); 634 } 635 636 /// Returns the strongest permitted ordering on failure, given the 637 /// desired ordering on success. 638 /// 639 /// If the comparison in a cmpxchg operation fails, there is no atomic store 640 /// so release semantics cannot be provided. So this function drops explicit 641 /// Release requests from the AtomicOrdering. A SequentiallyConsistent 642 /// operation would remain SequentiallyConsistent. 643 static AtomicOrdering 644 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { 645 switch (SuccessOrdering) { 646 default: 647 llvm_unreachable("invalid cmpxchg success ordering"); 648 case AtomicOrdering::Release: 649 case AtomicOrdering::Monotonic: 650 return AtomicOrdering::Monotonic; 651 case AtomicOrdering::AcquireRelease: 652 case AtomicOrdering::Acquire: 653 return AtomicOrdering::Acquire; 654 case AtomicOrdering::SequentiallyConsistent: 655 return AtomicOrdering::SequentiallyConsistent; 656 } 657 } 658 659 // Methods for support type inquiry through isa, cast, and dyn_cast: 660 static bool classof(const Instruction *I) { 661 return I->getOpcode() == Instruction::AtomicCmpXchg; 662 } 663 static bool classof(const Value *V) { 664 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 665 } 666 667 private: 668 // Shadow Instruction::setInstructionSubclassData with a private forwarding 669 // method so that subclasses cannot accidentally use it. 670 template <typename Bitfield> 671 void setSubclassData(typename Bitfield::Type Value) { 672 Instruction::setSubclassData<Bitfield>(Value); 673 } 674 675 /// The synchronization scope ID of this cmpxchg instruction. Not quite 676 /// enough room in SubClassData for everything, so synchronization scope ID 677 /// gets its own field. 678 SyncScope::ID SSID; 679 }; 680 681 template <> 682 struct OperandTraits<AtomicCmpXchgInst> : 683 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { 684 }; 685 686 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) 687 688 //===----------------------------------------------------------------------===// 689 // AtomicRMWInst Class 690 //===----------------------------------------------------------------------===// 691 692 /// an instruction that atomically reads a memory location, 693 /// combines it with another value, and then stores the result back. Returns 694 /// the old value. 695 /// 696 class AtomicRMWInst : public Instruction { 697 protected: 698 // Note: Instruction needs to be a friend here to call cloneImpl. 699 friend class Instruction; 700 701 AtomicRMWInst *cloneImpl() const; 702 703 public: 704 /// This enumeration lists the possible modifications atomicrmw can make. In 705 /// the descriptions, 'p' is the pointer to the instruction's memory location, 706 /// 'old' is the initial value of *p, and 'v' is the other value passed to the 707 /// instruction. These instructions always return 'old'. 708 enum BinOp : unsigned { 709 /// *p = v 710 Xchg, 711 /// *p = old + v 712 Add, 713 /// *p = old - v 714 Sub, 715 /// *p = old & v 716 And, 717 /// *p = ~(old & v) 718 Nand, 719 /// *p = old | v 720 Or, 721 /// *p = old ^ v 722 Xor, 723 /// *p = old >signed v ? old : v 724 Max, 725 /// *p = old <signed v ? old : v 726 Min, 727 /// *p = old >unsigned v ? old : v 728 UMax, 729 /// *p = old <unsigned v ? old : v 730 UMin, 731 732 /// *p = old + v 733 FAdd, 734 735 /// *p = old - v 736 FSub, 737 738 /// *p = maxnum(old, v) 739 /// \p maxnum matches the behavior of \p llvm.maxnum.*. 740 FMax, 741 742 /// *p = minnum(old, v) 743 /// \p minnum matches the behavior of \p llvm.minnum.*. 744 FMin, 745 746 /// Increment one up to a maximum value. 747 /// *p = (old u>= v) ? 0 : (old + 1) 748 UIncWrap, 749 750 /// Decrement one until a minimum value or zero. 751 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1) 752 UDecWrap, 753 754 FIRST_BINOP = Xchg, 755 LAST_BINOP = UDecWrap, 756 BAD_BINOP 757 }; 758 759 private: 760 template <unsigned Offset> 761 using AtomicOrderingBitfieldElement = 762 typename Bitfield::Element<AtomicOrdering, Offset, 3, 763 AtomicOrdering::LAST>; 764 765 template <unsigned Offset> 766 using BinOpBitfieldElement = 767 typename Bitfield::Element<BinOp, Offset, 5, BinOp::LAST_BINOP>; 768 769 public: 770 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 771 AtomicOrdering Ordering, SyncScope::ID SSID, 772 InsertPosition InsertBefore = nullptr); 773 774 // allocate space for exactly two operands 775 void *operator new(size_t S) { return User::operator new(S, 2); } 776 void operator delete(void *Ptr) { User::operator delete(Ptr); } 777 778 using VolatileField = BoolBitfieldElementT<0>; 779 using AtomicOrderingField = 780 AtomicOrderingBitfieldElementT<VolatileField::NextBit>; 781 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; 782 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; 783 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, 784 OperationField, AlignmentField>(), 785 "Bitfields must be contiguous"); 786 787 BinOp getOperation() const { return getSubclassData<OperationField>(); } 788 789 static StringRef getOperationName(BinOp Op); 790 791 static bool isFPOperation(BinOp Op) { 792 switch (Op) { 793 case AtomicRMWInst::FAdd: 794 case AtomicRMWInst::FSub: 795 case AtomicRMWInst::FMax: 796 case AtomicRMWInst::FMin: 797 return true; 798 default: 799 return false; 800 } 801 } 802 803 void setOperation(BinOp Operation) { 804 setSubclassData<OperationField>(Operation); 805 } 806 807 /// Return the alignment of the memory that is being allocated by the 808 /// instruction. 809 Align getAlign() const { 810 return Align(1ULL << getSubclassData<AlignmentField>()); 811 } 812 813 void setAlignment(Align Align) { 814 setSubclassData<AlignmentField>(Log2(Align)); 815 } 816 817 /// Return true if this is a RMW on a volatile memory location. 818 /// 819 bool isVolatile() const { return getSubclassData<VolatileField>(); } 820 821 /// Specify whether this is a volatile RMW or not. 822 /// 823 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 824 825 /// Transparently provide more efficient getOperand methods. 826 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 827 828 /// Returns the ordering constraint of this rmw instruction. 829 AtomicOrdering getOrdering() const { 830 return getSubclassData<AtomicOrderingField>(); 831 } 832 833 /// Sets the ordering constraint of this rmw instruction. 834 void setOrdering(AtomicOrdering Ordering) { 835 assert(Ordering != AtomicOrdering::NotAtomic && 836 "atomicrmw instructions can only be atomic."); 837 assert(Ordering != AtomicOrdering::Unordered && 838 "atomicrmw instructions cannot be unordered."); 839 setSubclassData<AtomicOrderingField>(Ordering); 840 } 841 842 /// Returns the synchronization scope ID of this rmw instruction. 843 SyncScope::ID getSyncScopeID() const { 844 return SSID; 845 } 846 847 /// Sets the synchronization scope ID of this rmw instruction. 848 void setSyncScopeID(SyncScope::ID SSID) { 849 this->SSID = SSID; 850 } 851 852 Value *getPointerOperand() { return getOperand(0); } 853 const Value *getPointerOperand() const { return getOperand(0); } 854 static unsigned getPointerOperandIndex() { return 0U; } 855 856 Value *getValOperand() { return getOperand(1); } 857 const Value *getValOperand() const { return getOperand(1); } 858 859 /// Returns the address space of the pointer operand. 860 unsigned getPointerAddressSpace() const { 861 return getPointerOperand()->getType()->getPointerAddressSpace(); 862 } 863 864 bool isFloatingPointOperation() const { 865 return isFPOperation(getOperation()); 866 } 867 868 // Methods for support type inquiry through isa, cast, and dyn_cast: 869 static bool classof(const Instruction *I) { 870 return I->getOpcode() == Instruction::AtomicRMW; 871 } 872 static bool classof(const Value *V) { 873 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 874 } 875 876 private: 877 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, 878 AtomicOrdering Ordering, SyncScope::ID SSID); 879 880 // Shadow Instruction::setInstructionSubclassData with a private forwarding 881 // method so that subclasses cannot accidentally use it. 882 template <typename Bitfield> 883 void setSubclassData(typename Bitfield::Type Value) { 884 Instruction::setSubclassData<Bitfield>(Value); 885 } 886 887 /// The synchronization scope ID of this rmw instruction. Not quite enough 888 /// room in SubClassData for everything, so synchronization scope ID gets its 889 /// own field. 890 SyncScope::ID SSID; 891 }; 892 893 template <> 894 struct OperandTraits<AtomicRMWInst> 895 : public FixedNumOperandTraits<AtomicRMWInst,2> { 896 }; 897 898 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) 899 900 //===----------------------------------------------------------------------===// 901 // GetElementPtrInst Class 902 //===----------------------------------------------------------------------===// 903 904 // checkGEPType - Simple wrapper function to give a better assertion failure 905 // message on bad indexes for a gep instruction. 906 // 907 inline Type *checkGEPType(Type *Ty) { 908 assert(Ty && "Invalid GetElementPtrInst indices for type!"); 909 return Ty; 910 } 911 912 /// an instruction for type-safe pointer arithmetic to 913 /// access elements of arrays and structs 914 /// 915 class GetElementPtrInst : public Instruction { 916 Type *SourceElementType; 917 Type *ResultElementType; 918 919 GetElementPtrInst(const GetElementPtrInst &GEPI); 920 921 /// Constructors - Create a getelementptr instruction with a base pointer an 922 /// list of indices. The first and second ctor can optionally insert before an 923 /// existing instruction, the third appends the new instruction to the 924 /// specified BasicBlock. 925 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 926 ArrayRef<Value *> IdxList, unsigned Values, 927 const Twine &NameStr, InsertPosition InsertBefore); 928 929 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); 930 931 protected: 932 // Note: Instruction needs to be a friend here to call cloneImpl. 933 friend class Instruction; 934 935 GetElementPtrInst *cloneImpl() const; 936 937 public: 938 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 939 ArrayRef<Value *> IdxList, 940 const Twine &NameStr = "", 941 InsertPosition InsertBefore = nullptr) { 942 unsigned Values = 1 + unsigned(IdxList.size()); 943 assert(PointeeType && "Must specify element type"); 944 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 945 NameStr, InsertBefore); 946 } 947 948 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 949 ArrayRef<Value *> IdxList, GEPNoWrapFlags NW, 950 const Twine &NameStr = "", 951 InsertPosition InsertBefore = nullptr) { 952 GetElementPtrInst *GEP = 953 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); 954 GEP->setNoWrapFlags(NW); 955 return GEP; 956 } 957 958 /// Create an "inbounds" getelementptr. See the documentation for the 959 /// "inbounds" flag in LangRef.html for details. 960 static GetElementPtrInst * 961 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, 962 const Twine &NameStr = "", 963 InsertPosition InsertBefore = nullptr) { 964 return Create(PointeeType, Ptr, IdxList, GEPNoWrapFlags::inBounds(), 965 NameStr, InsertBefore); 966 } 967 968 /// Transparently provide more efficient getOperand methods. 969 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 970 971 Type *getSourceElementType() const { return SourceElementType; } 972 973 void setSourceElementType(Type *Ty) { SourceElementType = Ty; } 974 void setResultElementType(Type *Ty) { ResultElementType = Ty; } 975 976 Type *getResultElementType() const { 977 return ResultElementType; 978 } 979 980 /// Returns the address space of this instruction's pointer type. 981 unsigned getAddressSpace() const { 982 // Note that this is always the same as the pointer operand's address space 983 // and that is cheaper to compute, so cheat here. 984 return getPointerAddressSpace(); 985 } 986 987 /// Returns the result type of a getelementptr with the given source 988 /// element type and indexes. 989 /// 990 /// Null is returned if the indices are invalid for the specified 991 /// source element type. 992 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); 993 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); 994 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); 995 996 /// Return the type of the element at the given index of an indexable 997 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". 998 /// 999 /// Returns null if the type can't be indexed, or the given index is not 1000 /// legal for the given type. 1001 static Type *getTypeAtIndex(Type *Ty, Value *Idx); 1002 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); 1003 1004 inline op_iterator idx_begin() { return op_begin()+1; } 1005 inline const_op_iterator idx_begin() const { return op_begin()+1; } 1006 inline op_iterator idx_end() { return op_end(); } 1007 inline const_op_iterator idx_end() const { return op_end(); } 1008 1009 inline iterator_range<op_iterator> indices() { 1010 return make_range(idx_begin(), idx_end()); 1011 } 1012 1013 inline iterator_range<const_op_iterator> indices() const { 1014 return make_range(idx_begin(), idx_end()); 1015 } 1016 1017 Value *getPointerOperand() { 1018 return getOperand(0); 1019 } 1020 const Value *getPointerOperand() const { 1021 return getOperand(0); 1022 } 1023 static unsigned getPointerOperandIndex() { 1024 return 0U; // get index for modifying correct operand. 1025 } 1026 1027 /// Method to return the pointer operand as a 1028 /// PointerType. 1029 Type *getPointerOperandType() const { 1030 return getPointerOperand()->getType(); 1031 } 1032 1033 /// Returns the address space of the pointer operand. 1034 unsigned getPointerAddressSpace() const { 1035 return getPointerOperandType()->getPointerAddressSpace(); 1036 } 1037 1038 /// Returns the pointer type returned by the GEP 1039 /// instruction, which may be a vector of pointers. 1040 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) { 1041 // Vector GEP 1042 Type *Ty = Ptr->getType(); 1043 if (Ty->isVectorTy()) 1044 return Ty; 1045 1046 for (Value *Index : IdxList) 1047 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { 1048 ElementCount EltCount = IndexVTy->getElementCount(); 1049 return VectorType::get(Ty, EltCount); 1050 } 1051 // Scalar GEP 1052 return Ty; 1053 } 1054 1055 unsigned getNumIndices() const { // Note: always non-negative 1056 return getNumOperands() - 1; 1057 } 1058 1059 bool hasIndices() const { 1060 return getNumOperands() > 1; 1061 } 1062 1063 /// Return true if all of the indices of this GEP are 1064 /// zeros. If so, the result pointer and the first operand have the same 1065 /// value, just potentially different types. 1066 bool hasAllZeroIndices() const; 1067 1068 /// Return true if all of the indices of this GEP are 1069 /// constant integers. If so, the result pointer and the first operand have 1070 /// a constant offset between them. 1071 bool hasAllConstantIndices() const; 1072 1073 /// Set nowrap flags for GEP instruction. 1074 void setNoWrapFlags(GEPNoWrapFlags NW); 1075 1076 /// Set or clear the inbounds flag on this GEP instruction. 1077 /// See LangRef.html for the meaning of inbounds on a getelementptr. 1078 /// TODO: Remove this method in favor of setNoWrapFlags(). 1079 void setIsInBounds(bool b = true); 1080 1081 /// Get the nowrap flags for the GEP instruction. 1082 GEPNoWrapFlags getNoWrapFlags() const; 1083 1084 /// Determine whether the GEP has the inbounds flag. 1085 bool isInBounds() const; 1086 1087 /// Determine whether the GEP has the nusw flag. 1088 bool hasNoUnsignedSignedWrap() const; 1089 1090 /// Determine whether the GEP has the nuw flag. 1091 bool hasNoUnsignedWrap() const; 1092 1093 /// Accumulate the constant address offset of this GEP if possible. 1094 /// 1095 /// This routine accepts an APInt into which it will accumulate the constant 1096 /// offset of this GEP if the GEP is in fact constant. If the GEP is not 1097 /// all-constant, it returns false and the value of the offset APInt is 1098 /// undefined (it is *not* preserved!). The APInt passed into this routine 1099 /// must be at least as wide as the IntPtr type for the address space of 1100 /// the base GEP pointer. 1101 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; 1102 bool collectOffset(const DataLayout &DL, unsigned BitWidth, 1103 MapVector<Value *, APInt> &VariableOffsets, 1104 APInt &ConstantOffset) const; 1105 // Methods for support type inquiry through isa, cast, and dyn_cast: 1106 static bool classof(const Instruction *I) { 1107 return (I->getOpcode() == Instruction::GetElementPtr); 1108 } 1109 static bool classof(const Value *V) { 1110 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1111 } 1112 }; 1113 1114 template <> 1115 struct OperandTraits<GetElementPtrInst> : 1116 public VariadicOperandTraits<GetElementPtrInst, 1> { 1117 }; 1118 1119 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1120 ArrayRef<Value *> IdxList, unsigned Values, 1121 const Twine &NameStr, 1122 InsertPosition InsertBefore) 1123 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, 1124 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1125 Values, InsertBefore), 1126 SourceElementType(PointeeType), 1127 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1128 init(Ptr, IdxList, NameStr); 1129 } 1130 1131 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) 1132 1133 //===----------------------------------------------------------------------===// 1134 // ICmpInst Class 1135 //===----------------------------------------------------------------------===// 1136 1137 /// This instruction compares its operands according to the predicate given 1138 /// to the constructor. It only operates on integers or pointers. The operands 1139 /// must be identical types. 1140 /// Represent an integer comparison operator. 1141 class ICmpInst: public CmpInst { 1142 void AssertOK() { 1143 assert(isIntPredicate() && 1144 "Invalid ICmp predicate value"); 1145 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1146 "Both operands to ICmp instruction are not of the same type!"); 1147 // Check that the operands are the right type 1148 assert((getOperand(0)->getType()->isIntOrIntVectorTy() || 1149 getOperand(0)->getType()->isPtrOrPtrVectorTy()) && 1150 "Invalid operand types for ICmp instruction"); 1151 } 1152 1153 protected: 1154 // Note: Instruction needs to be a friend here to call cloneImpl. 1155 friend class Instruction; 1156 1157 /// Clone an identical ICmpInst 1158 ICmpInst *cloneImpl() const; 1159 1160 public: 1161 /// Constructor with insertion semantics. 1162 ICmpInst(InsertPosition InsertBefore, ///< Where to insert 1163 Predicate pred, ///< The predicate to use for the comparison 1164 Value *LHS, ///< The left-hand-side of the expression 1165 Value *RHS, ///< The right-hand-side of the expression 1166 const Twine &NameStr = "" ///< Name of the instruction 1167 ) 1168 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::ICmp, pred, LHS, 1169 RHS, NameStr, InsertBefore) { 1170 #ifndef NDEBUG 1171 AssertOK(); 1172 #endif 1173 } 1174 1175 /// Constructor with no-insertion semantics 1176 ICmpInst( 1177 Predicate pred, ///< The predicate to use for the comparison 1178 Value *LHS, ///< The left-hand-side of the expression 1179 Value *RHS, ///< The right-hand-side of the expression 1180 const Twine &NameStr = "" ///< Name of the instruction 1181 ) : CmpInst(makeCmpResultType(LHS->getType()), 1182 Instruction::ICmp, pred, LHS, RHS, NameStr) { 1183 #ifndef NDEBUG 1184 AssertOK(); 1185 #endif 1186 } 1187 1188 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. 1189 /// @returns the predicate that would be the result if the operand were 1190 /// regarded as signed. 1191 /// Return the signed version of the predicate 1192 Predicate getSignedPredicate() const { 1193 return getSignedPredicate(getPredicate()); 1194 } 1195 1196 /// This is a static version that you can use without an instruction. 1197 /// Return the signed version of the predicate. 1198 static Predicate getSignedPredicate(Predicate pred); 1199 1200 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. 1201 /// @returns the predicate that would be the result if the operand were 1202 /// regarded as unsigned. 1203 /// Return the unsigned version of the predicate 1204 Predicate getUnsignedPredicate() const { 1205 return getUnsignedPredicate(getPredicate()); 1206 } 1207 1208 /// This is a static version that you can use without an instruction. 1209 /// Return the unsigned version of the predicate. 1210 static Predicate getUnsignedPredicate(Predicate pred); 1211 1212 /// Return true if this predicate is either EQ or NE. This also 1213 /// tests for commutativity. 1214 static bool isEquality(Predicate P) { 1215 return P == ICMP_EQ || P == ICMP_NE; 1216 } 1217 1218 /// Return true if this predicate is either EQ or NE. This also 1219 /// tests for commutativity. 1220 bool isEquality() const { 1221 return isEquality(getPredicate()); 1222 } 1223 1224 /// @returns true if the predicate of this ICmpInst is commutative 1225 /// Determine if this relation is commutative. 1226 bool isCommutative() const { return isEquality(); } 1227 1228 /// Return true if the predicate is relational (not EQ or NE). 1229 /// 1230 bool isRelational() const { 1231 return !isEquality(); 1232 } 1233 1234 /// Return true if the predicate is relational (not EQ or NE). 1235 /// 1236 static bool isRelational(Predicate P) { 1237 return !isEquality(P); 1238 } 1239 1240 /// Return true if the predicate is SGT or UGT. 1241 /// 1242 static bool isGT(Predicate P) { 1243 return P == ICMP_SGT || P == ICMP_UGT; 1244 } 1245 1246 /// Return true if the predicate is SLT or ULT. 1247 /// 1248 static bool isLT(Predicate P) { 1249 return P == ICMP_SLT || P == ICMP_ULT; 1250 } 1251 1252 /// Return true if the predicate is SGE or UGE. 1253 /// 1254 static bool isGE(Predicate P) { 1255 return P == ICMP_SGE || P == ICMP_UGE; 1256 } 1257 1258 /// Return true if the predicate is SLE or ULE. 1259 /// 1260 static bool isLE(Predicate P) { 1261 return P == ICMP_SLE || P == ICMP_ULE; 1262 } 1263 1264 /// Returns the sequence of all ICmp predicates. 1265 /// 1266 static auto predicates() { return ICmpPredicates(); } 1267 1268 /// Exchange the two operands to this instruction in such a way that it does 1269 /// not modify the semantics of the instruction. The predicate value may be 1270 /// changed to retain the same result if the predicate is order dependent 1271 /// (e.g. ult). 1272 /// Swap operands and adjust predicate. 1273 void swapOperands() { 1274 setPredicate(getSwappedPredicate()); 1275 Op<0>().swap(Op<1>()); 1276 } 1277 1278 /// Return result of `LHS Pred RHS` comparison. 1279 static bool compare(const APInt &LHS, const APInt &RHS, 1280 ICmpInst::Predicate Pred); 1281 1282 // Methods for support type inquiry through isa, cast, and dyn_cast: 1283 static bool classof(const Instruction *I) { 1284 return I->getOpcode() == Instruction::ICmp; 1285 } 1286 static bool classof(const Value *V) { 1287 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1288 } 1289 }; 1290 1291 //===----------------------------------------------------------------------===// 1292 // FCmpInst Class 1293 //===----------------------------------------------------------------------===// 1294 1295 /// This instruction compares its operands according to the predicate given 1296 /// to the constructor. It only operates on floating point values or packed 1297 /// vectors of floating point values. The operands must be identical types. 1298 /// Represents a floating point comparison operator. 1299 class FCmpInst: public CmpInst { 1300 void AssertOK() { 1301 assert(isFPPredicate() && "Invalid FCmp predicate value"); 1302 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1303 "Both operands to FCmp instruction are not of the same type!"); 1304 // Check that the operands are the right type 1305 assert(getOperand(0)->getType()->isFPOrFPVectorTy() && 1306 "Invalid operand types for FCmp instruction"); 1307 } 1308 1309 protected: 1310 // Note: Instruction needs to be a friend here to call cloneImpl. 1311 friend class Instruction; 1312 1313 /// Clone an identical FCmpInst 1314 FCmpInst *cloneImpl() const; 1315 1316 public: 1317 /// Constructor with insertion semantics. 1318 FCmpInst(InsertPosition InsertBefore, ///< Where to insert 1319 Predicate pred, ///< The predicate to use for the comparison 1320 Value *LHS, ///< The left-hand-side of the expression 1321 Value *RHS, ///< The right-hand-side of the expression 1322 const Twine &NameStr = "" ///< Name of the instruction 1323 ) 1324 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, pred, LHS, 1325 RHS, NameStr, InsertBefore) { 1326 AssertOK(); 1327 } 1328 1329 /// Constructor with no-insertion semantics 1330 FCmpInst(Predicate Pred, ///< The predicate to use for the comparison 1331 Value *LHS, ///< The left-hand-side of the expression 1332 Value *RHS, ///< The right-hand-side of the expression 1333 const Twine &NameStr = "", ///< Name of the instruction 1334 Instruction *FlagsSource = nullptr) 1335 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, 1336 RHS, NameStr, nullptr, FlagsSource) { 1337 AssertOK(); 1338 } 1339 1340 /// @returns true if the predicate of this instruction is EQ or NE. 1341 /// Determine if this is an equality predicate. 1342 static bool isEquality(Predicate Pred) { 1343 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || 1344 Pred == FCMP_UNE; 1345 } 1346 1347 /// @returns true if the predicate of this instruction is EQ or NE. 1348 /// Determine if this is an equality predicate. 1349 bool isEquality() const { return isEquality(getPredicate()); } 1350 1351 /// @returns true if the predicate of this instruction is commutative. 1352 /// Determine if this is a commutative predicate. 1353 bool isCommutative() const { 1354 return isEquality() || 1355 getPredicate() == FCMP_FALSE || 1356 getPredicate() == FCMP_TRUE || 1357 getPredicate() == FCMP_ORD || 1358 getPredicate() == FCMP_UNO; 1359 } 1360 1361 /// @returns true if the predicate is relational (not EQ or NE). 1362 /// Determine if this a relational predicate. 1363 bool isRelational() const { return !isEquality(); } 1364 1365 /// Exchange the two operands to this instruction in such a way that it does 1366 /// not modify the semantics of the instruction. The predicate value may be 1367 /// changed to retain the same result if the predicate is order dependent 1368 /// (e.g. ult). 1369 /// Swap operands and adjust predicate. 1370 void swapOperands() { 1371 setPredicate(getSwappedPredicate()); 1372 Op<0>().swap(Op<1>()); 1373 } 1374 1375 /// Returns the sequence of all FCmp predicates. 1376 /// 1377 static auto predicates() { return FCmpPredicates(); } 1378 1379 /// Return result of `LHS Pred RHS` comparison. 1380 static bool compare(const APFloat &LHS, const APFloat &RHS, 1381 FCmpInst::Predicate Pred); 1382 1383 /// Methods for support type inquiry through isa, cast, and dyn_cast: 1384 static bool classof(const Instruction *I) { 1385 return I->getOpcode() == Instruction::FCmp; 1386 } 1387 static bool classof(const Value *V) { 1388 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1389 } 1390 }; 1391 1392 //===----------------------------------------------------------------------===// 1393 /// This class represents a function call, abstracting a target 1394 /// machine's calling convention. This class uses low bit of the SubClassData 1395 /// field to indicate whether or not this is a tail call. The rest of the bits 1396 /// hold the calling convention of the call. 1397 /// 1398 class CallInst : public CallBase { 1399 CallInst(const CallInst &CI); 1400 1401 /// Construct a CallInst from a range of arguments 1402 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1403 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1404 InsertPosition InsertBefore); 1405 1406 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1407 const Twine &NameStr, InsertPosition InsertBefore) 1408 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {} 1409 1410 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, 1411 InsertPosition InsertBefore); 1412 1413 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 1414 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 1415 void init(FunctionType *FTy, Value *Func, const Twine &NameStr); 1416 1417 /// Compute the number of operands to allocate. 1418 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 1419 // We need one operand for the called function, plus the input operand 1420 // counts provided. 1421 return 1 + NumArgs + NumBundleInputs; 1422 } 1423 1424 protected: 1425 // Note: Instruction needs to be a friend here to call cloneImpl. 1426 friend class Instruction; 1427 1428 CallInst *cloneImpl() const; 1429 1430 public: 1431 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", 1432 InsertPosition InsertBefore = nullptr) { 1433 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); 1434 } 1435 1436 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1437 const Twine &NameStr, 1438 InsertPosition InsertBefore = nullptr) { 1439 return new (ComputeNumOperands(Args.size())) 1440 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore); 1441 } 1442 1443 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1444 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 1445 const Twine &NameStr = "", 1446 InsertPosition InsertBefore = nullptr) { 1447 const int NumOperands = 1448 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1449 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1450 1451 return new (NumOperands, DescriptorBytes) 1452 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); 1453 } 1454 1455 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", 1456 InsertPosition InsertBefore = nullptr) { 1457 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1458 InsertBefore); 1459 } 1460 1461 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1462 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 1463 const Twine &NameStr = "", 1464 InsertPosition InsertBefore = nullptr) { 1465 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1466 NameStr, InsertBefore); 1467 } 1468 1469 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1470 const Twine &NameStr, 1471 InsertPosition InsertBefore = nullptr) { 1472 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1473 InsertBefore); 1474 } 1475 1476 /// Create a clone of \p CI with a different set of operand bundles and 1477 /// insert it before \p InsertBefore. 1478 /// 1479 /// The returned call instruction is identical \p CI in every way except that 1480 /// the operand bundles for the new instruction are set to the operand bundles 1481 /// in \p Bundles. 1482 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, 1483 InsertPosition InsertPt = nullptr); 1484 1485 // Note that 'musttail' implies 'tail'. 1486 enum TailCallKind : unsigned { 1487 TCK_None = 0, 1488 TCK_Tail = 1, 1489 TCK_MustTail = 2, 1490 TCK_NoTail = 3, 1491 TCK_LAST = TCK_NoTail 1492 }; 1493 1494 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; 1495 static_assert( 1496 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), 1497 "Bitfields must be contiguous"); 1498 1499 TailCallKind getTailCallKind() const { 1500 return getSubclassData<TailCallKindField>(); 1501 } 1502 1503 bool isTailCall() const { 1504 TailCallKind Kind = getTailCallKind(); 1505 return Kind == TCK_Tail || Kind == TCK_MustTail; 1506 } 1507 1508 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } 1509 1510 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } 1511 1512 void setTailCallKind(TailCallKind TCK) { 1513 setSubclassData<TailCallKindField>(TCK); 1514 } 1515 1516 void setTailCall(bool IsTc = true) { 1517 setTailCallKind(IsTc ? TCK_Tail : TCK_None); 1518 } 1519 1520 /// Return true if the call can return twice 1521 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } 1522 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); } 1523 1524 /// Return true if the call is for a noreturn trap intrinsic. 1525 bool isNonContinuableTrap() const { 1526 switch (getIntrinsicID()) { 1527 case Intrinsic::trap: 1528 case Intrinsic::ubsantrap: 1529 return !hasFnAttr("trap-func-name"); 1530 default: 1531 return false; 1532 } 1533 } 1534 1535 // Methods for support type inquiry through isa, cast, and dyn_cast: 1536 static bool classof(const Instruction *I) { 1537 return I->getOpcode() == Instruction::Call; 1538 } 1539 static bool classof(const Value *V) { 1540 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1541 } 1542 1543 /// Updates profile metadata by scaling it by \p S / \p T. 1544 void updateProfWeight(uint64_t S, uint64_t T); 1545 1546 private: 1547 // Shadow Instruction::setInstructionSubclassData with a private forwarding 1548 // method so that subclasses cannot accidentally use it. 1549 template <typename Bitfield> 1550 void setSubclassData(typename Bitfield::Type Value) { 1551 Instruction::setSubclassData<Bitfield>(Value); 1552 } 1553 }; 1554 1555 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1556 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1557 InsertPosition InsertBefore) 1558 : CallBase(Ty->getReturnType(), Instruction::Call, 1559 OperandTraits<CallBase>::op_end(this) - 1560 (Args.size() + CountBundleInputs(Bundles) + 1), 1561 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1562 InsertBefore) { 1563 init(Ty, Func, Args, Bundles, NameStr); 1564 } 1565 1566 //===----------------------------------------------------------------------===// 1567 // SelectInst Class 1568 //===----------------------------------------------------------------------===// 1569 1570 /// This class represents the LLVM 'select' instruction. 1571 /// 1572 class SelectInst : public Instruction { 1573 1574 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1575 InsertPosition InsertBefore) 1576 : Instruction(S1->getType(), Instruction::Select, &Op<0>(), 3, 1577 InsertBefore) { 1578 init(C, S1, S2); 1579 setName(NameStr); 1580 } 1581 1582 void init(Value *C, Value *S1, Value *S2) { 1583 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); 1584 Op<0>() = C; 1585 Op<1>() = S1; 1586 Op<2>() = S2; 1587 } 1588 1589 protected: 1590 // Note: Instruction needs to be a friend here to call cloneImpl. 1591 friend class Instruction; 1592 1593 SelectInst *cloneImpl() const; 1594 1595 public: 1596 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1597 const Twine &NameStr = "", 1598 InsertPosition InsertBefore = nullptr, 1599 Instruction *MDFrom = nullptr) { 1600 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); 1601 if (MDFrom) 1602 Sel->copyMetadata(*MDFrom); 1603 return Sel; 1604 } 1605 1606 const Value *getCondition() const { return Op<0>(); } 1607 const Value *getTrueValue() const { return Op<1>(); } 1608 const Value *getFalseValue() const { return Op<2>(); } 1609 Value *getCondition() { return Op<0>(); } 1610 Value *getTrueValue() { return Op<1>(); } 1611 Value *getFalseValue() { return Op<2>(); } 1612 1613 void setCondition(Value *V) { Op<0>() = V; } 1614 void setTrueValue(Value *V) { Op<1>() = V; } 1615 void setFalseValue(Value *V) { Op<2>() = V; } 1616 1617 /// Swap the true and false values of the select instruction. 1618 /// This doesn't swap prof metadata. 1619 void swapValues() { Op<1>().swap(Op<2>()); } 1620 1621 /// Return a string if the specified operands are invalid 1622 /// for a select operation, otherwise return null. 1623 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); 1624 1625 /// Transparently provide more efficient getOperand methods. 1626 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1627 1628 OtherOps getOpcode() const { 1629 return static_cast<OtherOps>(Instruction::getOpcode()); 1630 } 1631 1632 // Methods for support type inquiry through isa, cast, and dyn_cast: 1633 static bool classof(const Instruction *I) { 1634 return I->getOpcode() == Instruction::Select; 1635 } 1636 static bool classof(const Value *V) { 1637 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1638 } 1639 }; 1640 1641 template <> 1642 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { 1643 }; 1644 1645 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) 1646 1647 //===----------------------------------------------------------------------===// 1648 // VAArgInst Class 1649 //===----------------------------------------------------------------------===// 1650 1651 /// This class represents the va_arg llvm instruction, which returns 1652 /// an argument of the specified type given a va_list and increments that list 1653 /// 1654 class VAArgInst : public UnaryInstruction { 1655 protected: 1656 // Note: Instruction needs to be a friend here to call cloneImpl. 1657 friend class Instruction; 1658 1659 VAArgInst *cloneImpl() const; 1660 1661 public: 1662 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", 1663 InsertPosition InsertBefore = nullptr) 1664 : UnaryInstruction(Ty, VAArg, List, InsertBefore) { 1665 setName(NameStr); 1666 } 1667 1668 Value *getPointerOperand() { return getOperand(0); } 1669 const Value *getPointerOperand() const { return getOperand(0); } 1670 static unsigned getPointerOperandIndex() { return 0U; } 1671 1672 // Methods for support type inquiry through isa, cast, and dyn_cast: 1673 static bool classof(const Instruction *I) { 1674 return I->getOpcode() == VAArg; 1675 } 1676 static bool classof(const Value *V) { 1677 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1678 } 1679 }; 1680 1681 //===----------------------------------------------------------------------===// 1682 // ExtractElementInst Class 1683 //===----------------------------------------------------------------------===// 1684 1685 /// This instruction extracts a single (scalar) 1686 /// element from a VectorType value 1687 /// 1688 class ExtractElementInst : public Instruction { 1689 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", 1690 InsertPosition InsertBefore = nullptr); 1691 1692 protected: 1693 // Note: Instruction needs to be a friend here to call cloneImpl. 1694 friend class Instruction; 1695 1696 ExtractElementInst *cloneImpl() const; 1697 1698 public: 1699 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1700 const Twine &NameStr = "", 1701 InsertPosition InsertBefore = nullptr) { 1702 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); 1703 } 1704 1705 /// Return true if an extractelement instruction can be 1706 /// formed with the specified operands. 1707 static bool isValidOperands(const Value *Vec, const Value *Idx); 1708 1709 Value *getVectorOperand() { return Op<0>(); } 1710 Value *getIndexOperand() { return Op<1>(); } 1711 const Value *getVectorOperand() const { return Op<0>(); } 1712 const Value *getIndexOperand() const { return Op<1>(); } 1713 1714 VectorType *getVectorOperandType() const { 1715 return cast<VectorType>(getVectorOperand()->getType()); 1716 } 1717 1718 /// Transparently provide more efficient getOperand methods. 1719 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1720 1721 // Methods for support type inquiry through isa, cast, and dyn_cast: 1722 static bool classof(const Instruction *I) { 1723 return I->getOpcode() == Instruction::ExtractElement; 1724 } 1725 static bool classof(const Value *V) { 1726 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1727 } 1728 }; 1729 1730 template <> 1731 struct OperandTraits<ExtractElementInst> : 1732 public FixedNumOperandTraits<ExtractElementInst, 2> { 1733 }; 1734 1735 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) 1736 1737 //===----------------------------------------------------------------------===// 1738 // InsertElementInst Class 1739 //===----------------------------------------------------------------------===// 1740 1741 /// This instruction inserts a single (scalar) 1742 /// element into a VectorType value 1743 /// 1744 class InsertElementInst : public Instruction { 1745 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, 1746 const Twine &NameStr = "", 1747 InsertPosition InsertBefore = nullptr); 1748 1749 protected: 1750 // Note: Instruction needs to be a friend here to call cloneImpl. 1751 friend class Instruction; 1752 1753 InsertElementInst *cloneImpl() const; 1754 1755 public: 1756 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1757 const Twine &NameStr = "", 1758 InsertPosition InsertBefore = nullptr) { 1759 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); 1760 } 1761 1762 /// Return true if an insertelement instruction can be 1763 /// formed with the specified operands. 1764 static bool isValidOperands(const Value *Vec, const Value *NewElt, 1765 const Value *Idx); 1766 1767 /// Overload to return most specific vector type. 1768 /// 1769 VectorType *getType() const { 1770 return cast<VectorType>(Instruction::getType()); 1771 } 1772 1773 /// Transparently provide more efficient getOperand methods. 1774 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1775 1776 // Methods for support type inquiry through isa, cast, and dyn_cast: 1777 static bool classof(const Instruction *I) { 1778 return I->getOpcode() == Instruction::InsertElement; 1779 } 1780 static bool classof(const Value *V) { 1781 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1782 } 1783 }; 1784 1785 template <> 1786 struct OperandTraits<InsertElementInst> : 1787 public FixedNumOperandTraits<InsertElementInst, 3> { 1788 }; 1789 1790 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) 1791 1792 //===----------------------------------------------------------------------===// 1793 // ShuffleVectorInst Class 1794 //===----------------------------------------------------------------------===// 1795 1796 constexpr int PoisonMaskElem = -1; 1797 1798 /// This instruction constructs a fixed permutation of two 1799 /// input vectors. 1800 /// 1801 /// For each element of the result vector, the shuffle mask selects an element 1802 /// from one of the input vectors to copy to the result. Non-negative elements 1803 /// in the mask represent an index into the concatenated pair of input vectors. 1804 /// PoisonMaskElem (-1) specifies that the result element is poison. 1805 /// 1806 /// For scalable vectors, all the elements of the mask must be 0 or -1. This 1807 /// requirement may be relaxed in the future. 1808 class ShuffleVectorInst : public Instruction { 1809 SmallVector<int, 4> ShuffleMask; 1810 Constant *ShuffleMaskForBitcode; 1811 1812 protected: 1813 // Note: Instruction needs to be a friend here to call cloneImpl. 1814 friend class Instruction; 1815 1816 ShuffleVectorInst *cloneImpl() const; 1817 1818 public: 1819 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "", 1820 InsertPosition InsertBefore = nullptr); 1821 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "", 1822 InsertPosition InsertBefore = nullptr); 1823 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1824 const Twine &NameStr = "", 1825 InsertPosition InsertBefore = nullptr); 1826 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 1827 const Twine &NameStr = "", 1828 InsertPosition InsertBefore = nullptr); 1829 1830 void *operator new(size_t S) { return User::operator new(S, 2); } 1831 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 1832 1833 /// Swap the operands and adjust the mask to preserve the semantics 1834 /// of the instruction. 1835 void commute(); 1836 1837 /// Return true if a shufflevector instruction can be 1838 /// formed with the specified operands. 1839 static bool isValidOperands(const Value *V1, const Value *V2, 1840 const Value *Mask); 1841 static bool isValidOperands(const Value *V1, const Value *V2, 1842 ArrayRef<int> Mask); 1843 1844 /// Overload to return most specific vector type. 1845 /// 1846 VectorType *getType() const { 1847 return cast<VectorType>(Instruction::getType()); 1848 } 1849 1850 /// Transparently provide more efficient getOperand methods. 1851 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1852 1853 /// Return the shuffle mask value of this instruction for the given element 1854 /// index. Return PoisonMaskElem if the element is undef. 1855 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } 1856 1857 /// Convert the input shuffle mask operand to a vector of integers. Undefined 1858 /// elements of the mask are returned as PoisonMaskElem. 1859 static void getShuffleMask(const Constant *Mask, 1860 SmallVectorImpl<int> &Result); 1861 1862 /// Return the mask for this instruction as a vector of integers. Undefined 1863 /// elements of the mask are returned as PoisonMaskElem. 1864 void getShuffleMask(SmallVectorImpl<int> &Result) const { 1865 Result.assign(ShuffleMask.begin(), ShuffleMask.end()); 1866 } 1867 1868 /// Return the mask for this instruction, for use in bitcode. 1869 /// 1870 /// TODO: This is temporary until we decide a new bitcode encoding for 1871 /// shufflevector. 1872 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } 1873 1874 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, 1875 Type *ResultTy); 1876 1877 void setShuffleMask(ArrayRef<int> Mask); 1878 1879 ArrayRef<int> getShuffleMask() const { return ShuffleMask; } 1880 1881 /// Return true if this shuffle returns a vector with a different number of 1882 /// elements than its source vectors. 1883 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> 1884 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> 1885 bool changesLength() const { 1886 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 1887 ->getElementCount() 1888 .getKnownMinValue(); 1889 unsigned NumMaskElts = ShuffleMask.size(); 1890 return NumSourceElts != NumMaskElts; 1891 } 1892 1893 /// Return true if this shuffle returns a vector with a greater number of 1894 /// elements than its source vectors. 1895 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> 1896 bool increasesLength() const { 1897 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 1898 ->getElementCount() 1899 .getKnownMinValue(); 1900 unsigned NumMaskElts = ShuffleMask.size(); 1901 return NumSourceElts < NumMaskElts; 1902 } 1903 1904 /// Return true if this shuffle mask chooses elements from exactly one source 1905 /// vector. 1906 /// Example: <7,5,undef,7> 1907 /// This assumes that vector operands (of length \p NumSrcElts) are the same 1908 /// length as the mask. 1909 static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts); 1910 static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) { 1911 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 1912 SmallVector<int, 16> MaskAsInts; 1913 getShuffleMask(Mask, MaskAsInts); 1914 return isSingleSourceMask(MaskAsInts, NumSrcElts); 1915 } 1916 1917 /// Return true if this shuffle chooses elements from exactly one source 1918 /// vector without changing the length of that vector. 1919 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> 1920 /// TODO: Optionally allow length-changing shuffles. 1921 bool isSingleSource() const { 1922 return !changesLength() && 1923 isSingleSourceMask(ShuffleMask, ShuffleMask.size()); 1924 } 1925 1926 /// Return true if this shuffle mask chooses elements from exactly one source 1927 /// vector without lane crossings. A shuffle using this mask is not 1928 /// necessarily a no-op because it may change the number of elements from its 1929 /// input vectors or it may provide demanded bits knowledge via undef lanes. 1930 /// Example: <undef,undef,2,3> 1931 static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts); 1932 static bool isIdentityMask(const Constant *Mask, int NumSrcElts) { 1933 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 1934 1935 // Not possible to express a shuffle mask for a scalable vector for this 1936 // case. 1937 if (isa<ScalableVectorType>(Mask->getType())) 1938 return false; 1939 1940 SmallVector<int, 16> MaskAsInts; 1941 getShuffleMask(Mask, MaskAsInts); 1942 return isIdentityMask(MaskAsInts, NumSrcElts); 1943 } 1944 1945 /// Return true if this shuffle chooses elements from exactly one source 1946 /// vector without lane crossings and does not change the number of elements 1947 /// from its input vectors. 1948 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> 1949 bool isIdentity() const { 1950 // Not possible to express a shuffle mask for a scalable vector for this 1951 // case. 1952 if (isa<ScalableVectorType>(getType())) 1953 return false; 1954 1955 return !changesLength() && isIdentityMask(ShuffleMask, ShuffleMask.size()); 1956 } 1957 1958 /// Return true if this shuffle lengthens exactly one source vector with 1959 /// undefs in the high elements. 1960 bool isIdentityWithPadding() const; 1961 1962 /// Return true if this shuffle extracts the first N elements of exactly one 1963 /// source vector. 1964 bool isIdentityWithExtract() const; 1965 1966 /// Return true if this shuffle concatenates its 2 source vectors. This 1967 /// returns false if either input is undefined. In that case, the shuffle is 1968 /// is better classified as an identity with padding operation. 1969 bool isConcat() const; 1970 1971 /// Return true if this shuffle mask chooses elements from its source vectors 1972 /// without lane crossings. A shuffle using this mask would be 1973 /// equivalent to a vector select with a constant condition operand. 1974 /// Example: <4,1,6,undef> 1975 /// This returns false if the mask does not choose from both input vectors. 1976 /// In that case, the shuffle is better classified as an identity shuffle. 1977 /// This assumes that vector operands are the same length as the mask 1978 /// (a length-changing shuffle can never be equivalent to a vector select). 1979 static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts); 1980 static bool isSelectMask(const Constant *Mask, int NumSrcElts) { 1981 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 1982 SmallVector<int, 16> MaskAsInts; 1983 getShuffleMask(Mask, MaskAsInts); 1984 return isSelectMask(MaskAsInts, NumSrcElts); 1985 } 1986 1987 /// Return true if this shuffle chooses elements from its source vectors 1988 /// without lane crossings and all operands have the same number of elements. 1989 /// In other words, this shuffle is equivalent to a vector select with a 1990 /// constant condition operand. 1991 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> 1992 /// This returns false if the mask does not choose from both input vectors. 1993 /// In that case, the shuffle is better classified as an identity shuffle. 1994 /// TODO: Optionally allow length-changing shuffles. 1995 bool isSelect() const { 1996 return !changesLength() && isSelectMask(ShuffleMask, ShuffleMask.size()); 1997 } 1998 1999 /// Return true if this shuffle mask swaps the order of elements from exactly 2000 /// one source vector. 2001 /// Example: <7,6,undef,4> 2002 /// This assumes that vector operands (of length \p NumSrcElts) are the same 2003 /// length as the mask. 2004 static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts); 2005 static bool isReverseMask(const Constant *Mask, int NumSrcElts) { 2006 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2007 SmallVector<int, 16> MaskAsInts; 2008 getShuffleMask(Mask, MaskAsInts); 2009 return isReverseMask(MaskAsInts, NumSrcElts); 2010 } 2011 2012 /// Return true if this shuffle swaps the order of elements from exactly 2013 /// one source vector. 2014 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> 2015 /// TODO: Optionally allow length-changing shuffles. 2016 bool isReverse() const { 2017 return !changesLength() && isReverseMask(ShuffleMask, ShuffleMask.size()); 2018 } 2019 2020 /// Return true if this shuffle mask chooses all elements with the same value 2021 /// as the first element of exactly one source vector. 2022 /// Example: <4,undef,undef,4> 2023 /// This assumes that vector operands (of length \p NumSrcElts) are the same 2024 /// length as the mask. 2025 static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts); 2026 static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) { 2027 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2028 SmallVector<int, 16> MaskAsInts; 2029 getShuffleMask(Mask, MaskAsInts); 2030 return isZeroEltSplatMask(MaskAsInts, NumSrcElts); 2031 } 2032 2033 /// Return true if all elements of this shuffle are the same value as the 2034 /// first element of exactly one source vector without changing the length 2035 /// of that vector. 2036 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> 2037 /// TODO: Optionally allow length-changing shuffles. 2038 /// TODO: Optionally allow splats from other elements. 2039 bool isZeroEltSplat() const { 2040 return !changesLength() && 2041 isZeroEltSplatMask(ShuffleMask, ShuffleMask.size()); 2042 } 2043 2044 /// Return true if this shuffle mask is a transpose mask. 2045 /// Transpose vector masks transpose a 2xn matrix. They read corresponding 2046 /// even- or odd-numbered vector elements from two n-dimensional source 2047 /// vectors and write each result into consecutive elements of an 2048 /// n-dimensional destination vector. Two shuffles are necessary to complete 2049 /// the transpose, one for the even elements and another for the odd elements. 2050 /// This description closely follows how the TRN1 and TRN2 AArch64 2051 /// instructions operate. 2052 /// 2053 /// For example, a simple 2x2 matrix can be transposed with: 2054 /// 2055 /// ; Original matrix 2056 /// m0 = < a, b > 2057 /// m1 = < c, d > 2058 /// 2059 /// ; Transposed matrix 2060 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > 2061 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > 2062 /// 2063 /// For matrices having greater than n columns, the resulting nx2 transposed 2064 /// matrix is stored in two result vectors such that one vector contains 2065 /// interleaved elements from all the even-numbered rows and the other vector 2066 /// contains interleaved elements from all the odd-numbered rows. For example, 2067 /// a 2x4 matrix can be transposed with: 2068 /// 2069 /// ; Original matrix 2070 /// m0 = < a, b, c, d > 2071 /// m1 = < e, f, g, h > 2072 /// 2073 /// ; Transposed matrix 2074 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > 2075 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > 2076 static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts); 2077 static bool isTransposeMask(const Constant *Mask, int NumSrcElts) { 2078 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2079 SmallVector<int, 16> MaskAsInts; 2080 getShuffleMask(Mask, MaskAsInts); 2081 return isTransposeMask(MaskAsInts, NumSrcElts); 2082 } 2083 2084 /// Return true if this shuffle transposes the elements of its inputs without 2085 /// changing the length of the vectors. This operation may also be known as a 2086 /// merge or interleave. See the description for isTransposeMask() for the 2087 /// exact specification. 2088 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> 2089 bool isTranspose() const { 2090 return !changesLength() && isTransposeMask(ShuffleMask, ShuffleMask.size()); 2091 } 2092 2093 /// Return true if this shuffle mask is a splice mask, concatenating the two 2094 /// inputs together and then extracts an original width vector starting from 2095 /// the splice index. 2096 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> 2097 /// This assumes that vector operands (of length \p NumSrcElts) are the same 2098 /// length as the mask. 2099 static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts, int &Index); 2100 static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) { 2101 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2102 SmallVector<int, 16> MaskAsInts; 2103 getShuffleMask(Mask, MaskAsInts); 2104 return isSpliceMask(MaskAsInts, NumSrcElts, Index); 2105 } 2106 2107 /// Return true if this shuffle splices two inputs without changing the length 2108 /// of the vectors. This operation concatenates the two inputs together and 2109 /// then extracts an original width vector starting from the splice index. 2110 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> 2111 bool isSplice(int &Index) const { 2112 return !changesLength() && 2113 isSpliceMask(ShuffleMask, ShuffleMask.size(), Index); 2114 } 2115 2116 /// Return true if this shuffle mask is an extract subvector mask. 2117 /// A valid extract subvector mask returns a smaller vector from a single 2118 /// source operand. The base extraction index is returned as well. 2119 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2120 int &Index); 2121 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, 2122 int &Index) { 2123 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2124 // Not possible to express a shuffle mask for a scalable vector for this 2125 // case. 2126 if (isa<ScalableVectorType>(Mask->getType())) 2127 return false; 2128 SmallVector<int, 16> MaskAsInts; 2129 getShuffleMask(Mask, MaskAsInts); 2130 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); 2131 } 2132 2133 /// Return true if this shuffle mask is an extract subvector mask. 2134 bool isExtractSubvectorMask(int &Index) const { 2135 // Not possible to express a shuffle mask for a scalable vector for this 2136 // case. 2137 if (isa<ScalableVectorType>(getType())) 2138 return false; 2139 2140 int NumSrcElts = 2141 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2142 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); 2143 } 2144 2145 /// Return true if this shuffle mask is an insert subvector mask. 2146 /// A valid insert subvector mask inserts the lowest elements of a second 2147 /// source operand into an in-place first source operand. 2148 /// Both the sub vector width and the insertion index is returned. 2149 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2150 int &NumSubElts, int &Index); 2151 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, 2152 int &NumSubElts, int &Index) { 2153 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2154 // Not possible to express a shuffle mask for a scalable vector for this 2155 // case. 2156 if (isa<ScalableVectorType>(Mask->getType())) 2157 return false; 2158 SmallVector<int, 16> MaskAsInts; 2159 getShuffleMask(Mask, MaskAsInts); 2160 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index); 2161 } 2162 2163 /// Return true if this shuffle mask is an insert subvector mask. 2164 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const { 2165 // Not possible to express a shuffle mask for a scalable vector for this 2166 // case. 2167 if (isa<ScalableVectorType>(getType())) 2168 return false; 2169 2170 int NumSrcElts = 2171 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2172 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index); 2173 } 2174 2175 /// Return true if this shuffle mask replicates each of the \p VF elements 2176 /// in a vector \p ReplicationFactor times. 2177 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is: 2178 /// <0,0,0,1,1,1,2,2,2,3,3,3> 2179 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor, 2180 int &VF); 2181 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, 2182 int &VF) { 2183 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2184 // Not possible to express a shuffle mask for a scalable vector for this 2185 // case. 2186 if (isa<ScalableVectorType>(Mask->getType())) 2187 return false; 2188 SmallVector<int, 16> MaskAsInts; 2189 getShuffleMask(Mask, MaskAsInts); 2190 return isReplicationMask(MaskAsInts, ReplicationFactor, VF); 2191 } 2192 2193 /// Return true if this shuffle mask is a replication mask. 2194 bool isReplicationMask(int &ReplicationFactor, int &VF) const; 2195 2196 /// Return true if this shuffle mask represents "clustered" mask of size VF, 2197 /// i.e. each index between [0..VF) is used exactly once in each submask of 2198 /// size VF. 2199 /// For example, the mask for \p VF=4 is: 2200 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4 2201 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time. 2202 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because 2203 /// element 3 is used twice in the second submask 2204 /// (3,3,1,0) and index 2 is not used at all. 2205 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF); 2206 2207 /// Return true if this shuffle mask is a one-use-single-source("clustered") 2208 /// mask. 2209 bool isOneUseSingleSourceMask(int VF) const; 2210 2211 /// Change values in a shuffle permute mask assuming the two vector operands 2212 /// of length InVecNumElts have swapped position. 2213 static void commuteShuffleMask(MutableArrayRef<int> Mask, 2214 unsigned InVecNumElts) { 2215 for (int &Idx : Mask) { 2216 if (Idx == -1) 2217 continue; 2218 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; 2219 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 && 2220 "shufflevector mask index out of range"); 2221 } 2222 } 2223 2224 /// Return if this shuffle interleaves its two input vectors together. 2225 bool isInterleave(unsigned Factor); 2226 2227 /// Return true if the mask interleaves one or more input vectors together. 2228 /// 2229 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...> 2230 /// E.g. For a Factor of 2 (LaneLen=4): 2231 /// <0, 4, 1, 5, 2, 6, 3, 7> 2232 /// E.g. For a Factor of 3 (LaneLen=4): 2233 /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12> 2234 /// E.g. For a Factor of 4 (LaneLen=2): 2235 /// <0, 2, 6, 4, 1, 3, 7, 5> 2236 /// 2237 /// NumInputElts is the total number of elements in the input vectors. 2238 /// 2239 /// StartIndexes are the first indexes of each vector being interleaved, 2240 /// substituting any indexes that were undef 2241 /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2> 2242 /// 2243 /// Note that this does not check if the input vectors are consecutive: 2244 /// It will return true for masks such as 2245 /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2) 2246 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor, 2247 unsigned NumInputElts, 2248 SmallVectorImpl<unsigned> &StartIndexes); 2249 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor, 2250 unsigned NumInputElts) { 2251 SmallVector<unsigned, 8> StartIndexes; 2252 return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes); 2253 } 2254 2255 /// Check if the mask is a DE-interleave mask of the given factor 2256 /// \p Factor like: 2257 /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor> 2258 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor, 2259 unsigned &Index); 2260 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor) { 2261 unsigned Unused; 2262 return isDeInterleaveMaskOfFactor(Mask, Factor, Unused); 2263 } 2264 2265 /// Checks if the shuffle is a bit rotation of the first operand across 2266 /// multiple subelements, e.g: 2267 /// 2268 /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6> 2269 /// 2270 /// could be expressed as 2271 /// 2272 /// rotl <4 x i16> %a, 8 2273 /// 2274 /// If it can be expressed as a rotation, returns the number of subelements to 2275 /// group by in NumSubElts and the number of bits to rotate left in RotateAmt. 2276 static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits, 2277 unsigned MinSubElts, unsigned MaxSubElts, 2278 unsigned &NumSubElts, unsigned &RotateAmt); 2279 2280 // Methods for support type inquiry through isa, cast, and dyn_cast: 2281 static bool classof(const Instruction *I) { 2282 return I->getOpcode() == Instruction::ShuffleVector; 2283 } 2284 static bool classof(const Value *V) { 2285 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2286 } 2287 }; 2288 2289 template <> 2290 struct OperandTraits<ShuffleVectorInst> 2291 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; 2292 2293 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) 2294 2295 //===----------------------------------------------------------------------===// 2296 // ExtractValueInst Class 2297 //===----------------------------------------------------------------------===// 2298 2299 /// This instruction extracts a struct member or array 2300 /// element value from an aggregate value. 2301 /// 2302 class ExtractValueInst : public UnaryInstruction { 2303 SmallVector<unsigned, 4> Indices; 2304 2305 ExtractValueInst(const ExtractValueInst &EVI); 2306 2307 /// Constructors - Create a extractvalue instruction with a base aggregate 2308 /// value and a list of indices. The first and second ctor can optionally 2309 /// insert before an existing instruction, the third appends the new 2310 /// instruction to the specified BasicBlock. 2311 inline ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 2312 const Twine &NameStr, InsertPosition InsertBefore); 2313 2314 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); 2315 2316 protected: 2317 // Note: Instruction needs to be a friend here to call cloneImpl. 2318 friend class Instruction; 2319 2320 ExtractValueInst *cloneImpl() const; 2321 2322 public: 2323 static ExtractValueInst *Create(Value *Agg, ArrayRef<unsigned> Idxs, 2324 const Twine &NameStr = "", 2325 InsertPosition InsertBefore = nullptr) { 2326 return new 2327 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); 2328 } 2329 2330 /// Returns the type of the element that would be extracted 2331 /// with an extractvalue instruction with the specified parameters. 2332 /// 2333 /// Null is returned if the indices are invalid for the specified type. 2334 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); 2335 2336 using idx_iterator = const unsigned*; 2337 2338 inline idx_iterator idx_begin() const { return Indices.begin(); } 2339 inline idx_iterator idx_end() const { return Indices.end(); } 2340 inline iterator_range<idx_iterator> indices() const { 2341 return make_range(idx_begin(), idx_end()); 2342 } 2343 2344 Value *getAggregateOperand() { 2345 return getOperand(0); 2346 } 2347 const Value *getAggregateOperand() const { 2348 return getOperand(0); 2349 } 2350 static unsigned getAggregateOperandIndex() { 2351 return 0U; // get index for modifying correct operand 2352 } 2353 2354 ArrayRef<unsigned> getIndices() const { 2355 return Indices; 2356 } 2357 2358 unsigned getNumIndices() const { 2359 return (unsigned)Indices.size(); 2360 } 2361 2362 bool hasIndices() const { 2363 return true; 2364 } 2365 2366 // Methods for support type inquiry through isa, cast, and dyn_cast: 2367 static bool classof(const Instruction *I) { 2368 return I->getOpcode() == Instruction::ExtractValue; 2369 } 2370 static bool classof(const Value *V) { 2371 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2372 } 2373 }; 2374 2375 ExtractValueInst::ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 2376 const Twine &NameStr, 2377 InsertPosition InsertBefore) 2378 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2379 ExtractValue, Agg, InsertBefore) { 2380 init(Idxs, NameStr); 2381 } 2382 2383 //===----------------------------------------------------------------------===// 2384 // InsertValueInst Class 2385 //===----------------------------------------------------------------------===// 2386 2387 /// This instruction inserts a struct field of array element 2388 /// value into an aggregate value. 2389 /// 2390 class InsertValueInst : public Instruction { 2391 SmallVector<unsigned, 4> Indices; 2392 2393 InsertValueInst(const InsertValueInst &IVI); 2394 2395 /// Constructors - Create a insertvalue instruction with a base aggregate 2396 /// value, a value to insert, and a list of indices. The first and second ctor 2397 /// can optionally insert before an existing instruction, the third appends 2398 /// the new instruction to the specified BasicBlock. 2399 inline InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2400 const Twine &NameStr, InsertPosition InsertBefore); 2401 2402 /// Constructors - These three constructors are convenience methods because 2403 /// one and two index insertvalue instructions are so common. 2404 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, 2405 const Twine &NameStr = "", 2406 InsertPosition InsertBefore = nullptr); 2407 2408 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2409 const Twine &NameStr); 2410 2411 protected: 2412 // Note: Instruction needs to be a friend here to call cloneImpl. 2413 friend class Instruction; 2414 2415 InsertValueInst *cloneImpl() const; 2416 2417 public: 2418 // allocate space for exactly two operands 2419 void *operator new(size_t S) { return User::operator new(S, 2); } 2420 void operator delete(void *Ptr) { User::operator delete(Ptr); } 2421 2422 static InsertValueInst *Create(Value *Agg, Value *Val, 2423 ArrayRef<unsigned> Idxs, 2424 const Twine &NameStr = "", 2425 InsertPosition InsertBefore = nullptr) { 2426 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); 2427 } 2428 2429 /// Transparently provide more efficient getOperand methods. 2430 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2431 2432 using idx_iterator = const unsigned*; 2433 2434 inline idx_iterator idx_begin() const { return Indices.begin(); } 2435 inline idx_iterator idx_end() const { return Indices.end(); } 2436 inline iterator_range<idx_iterator> indices() const { 2437 return make_range(idx_begin(), idx_end()); 2438 } 2439 2440 Value *getAggregateOperand() { 2441 return getOperand(0); 2442 } 2443 const Value *getAggregateOperand() const { 2444 return getOperand(0); 2445 } 2446 static unsigned getAggregateOperandIndex() { 2447 return 0U; // get index for modifying correct operand 2448 } 2449 2450 Value *getInsertedValueOperand() { 2451 return getOperand(1); 2452 } 2453 const Value *getInsertedValueOperand() const { 2454 return getOperand(1); 2455 } 2456 static unsigned getInsertedValueOperandIndex() { 2457 return 1U; // get index for modifying correct operand 2458 } 2459 2460 ArrayRef<unsigned> getIndices() const { 2461 return Indices; 2462 } 2463 2464 unsigned getNumIndices() const { 2465 return (unsigned)Indices.size(); 2466 } 2467 2468 bool hasIndices() const { 2469 return true; 2470 } 2471 2472 // Methods for support type inquiry through isa, cast, and dyn_cast: 2473 static bool classof(const Instruction *I) { 2474 return I->getOpcode() == Instruction::InsertValue; 2475 } 2476 static bool classof(const Value *V) { 2477 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2478 } 2479 }; 2480 2481 template <> 2482 struct OperandTraits<InsertValueInst> : 2483 public FixedNumOperandTraits<InsertValueInst, 2> { 2484 }; 2485 2486 InsertValueInst::InsertValueInst(Value *Agg, Value *Val, 2487 ArrayRef<unsigned> Idxs, const Twine &NameStr, 2488 InsertPosition InsertBefore) 2489 : Instruction(Agg->getType(), InsertValue, 2490 OperandTraits<InsertValueInst>::op_begin(this), 2, 2491 InsertBefore) { 2492 init(Agg, Val, Idxs, NameStr); 2493 } 2494 2495 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) 2496 2497 //===----------------------------------------------------------------------===// 2498 // PHINode Class 2499 //===----------------------------------------------------------------------===// 2500 2501 // PHINode - The PHINode class is used to represent the magical mystical PHI 2502 // node, that can not exist in nature, but can be synthesized in a computer 2503 // scientist's overactive imagination. 2504 // 2505 class PHINode : public Instruction { 2506 /// The number of operands actually allocated. NumOperands is 2507 /// the number actually in use. 2508 unsigned ReservedSpace; 2509 2510 PHINode(const PHINode &PN); 2511 2512 explicit PHINode(Type *Ty, unsigned NumReservedValues, 2513 const Twine &NameStr = "", 2514 InsertPosition InsertBefore = nullptr) 2515 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), 2516 ReservedSpace(NumReservedValues) { 2517 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2518 setName(NameStr); 2519 allocHungoffUses(ReservedSpace); 2520 } 2521 2522 protected: 2523 // Note: Instruction needs to be a friend here to call cloneImpl. 2524 friend class Instruction; 2525 2526 PHINode *cloneImpl() const; 2527 2528 // allocHungoffUses - this is more complicated than the generic 2529 // User::allocHungoffUses, because we have to allocate Uses for the incoming 2530 // values and pointers to the incoming blocks, all in one allocation. 2531 void allocHungoffUses(unsigned N) { 2532 User::allocHungoffUses(N, /* IsPhi */ true); 2533 } 2534 2535 public: 2536 /// Constructors - NumReservedValues is a hint for the number of incoming 2537 /// edges that this phi node will have (use 0 if you really have no idea). 2538 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2539 const Twine &NameStr = "", 2540 InsertPosition InsertBefore = nullptr) { 2541 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); 2542 } 2543 2544 /// Provide fast operand accessors 2545 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2546 2547 // Block iterator interface. This provides access to the list of incoming 2548 // basic blocks, which parallels the list of incoming values. 2549 // Please note that we are not providing non-const iterators for blocks to 2550 // force all updates go through an interface function. 2551 2552 using block_iterator = BasicBlock **; 2553 using const_block_iterator = BasicBlock * const *; 2554 2555 const_block_iterator block_begin() const { 2556 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 2557 } 2558 2559 const_block_iterator block_end() const { 2560 return block_begin() + getNumOperands(); 2561 } 2562 2563 iterator_range<const_block_iterator> blocks() const { 2564 return make_range(block_begin(), block_end()); 2565 } 2566 2567 op_range incoming_values() { return operands(); } 2568 2569 const_op_range incoming_values() const { return operands(); } 2570 2571 /// Return the number of incoming edges 2572 /// 2573 unsigned getNumIncomingValues() const { return getNumOperands(); } 2574 2575 /// Return incoming value number x 2576 /// 2577 Value *getIncomingValue(unsigned i) const { 2578 return getOperand(i); 2579 } 2580 void setIncomingValue(unsigned i, Value *V) { 2581 assert(V && "PHI node got a null value!"); 2582 assert(getType() == V->getType() && 2583 "All operands to PHI node must be the same type as the PHI node!"); 2584 setOperand(i, V); 2585 } 2586 2587 static unsigned getOperandNumForIncomingValue(unsigned i) { 2588 return i; 2589 } 2590 2591 static unsigned getIncomingValueNumForOperand(unsigned i) { 2592 return i; 2593 } 2594 2595 /// Return incoming basic block number @p i. 2596 /// 2597 BasicBlock *getIncomingBlock(unsigned i) const { 2598 return block_begin()[i]; 2599 } 2600 2601 /// Return incoming basic block corresponding 2602 /// to an operand of the PHI. 2603 /// 2604 BasicBlock *getIncomingBlock(const Use &U) const { 2605 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 2606 return getIncomingBlock(unsigned(&U - op_begin())); 2607 } 2608 2609 /// Return incoming basic block corresponding 2610 /// to value use iterator. 2611 /// 2612 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { 2613 return getIncomingBlock(I.getUse()); 2614 } 2615 2616 void setIncomingBlock(unsigned i, BasicBlock *BB) { 2617 const_cast<block_iterator>(block_begin())[i] = BB; 2618 } 2619 2620 /// Copies the basic blocks from \p BBRange to the incoming basic block list 2621 /// of this PHINode, starting at \p ToIdx. 2622 void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange, 2623 uint32_t ToIdx = 0) { 2624 copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx); 2625 } 2626 2627 /// Replace every incoming basic block \p Old to basic block \p New. 2628 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { 2629 assert(New && Old && "PHI node got a null basic block!"); 2630 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2631 if (getIncomingBlock(Op) == Old) 2632 setIncomingBlock(Op, New); 2633 } 2634 2635 /// Add an incoming value to the end of the PHI list 2636 /// 2637 void addIncoming(Value *V, BasicBlock *BB) { 2638 if (getNumOperands() == ReservedSpace) 2639 growOperands(); // Get more space! 2640 // Initialize some new operands. 2641 setNumHungOffUseOperands(getNumOperands() + 1); 2642 setIncomingValue(getNumOperands() - 1, V); 2643 setIncomingBlock(getNumOperands() - 1, BB); 2644 } 2645 2646 /// Remove an incoming value. This is useful if a 2647 /// predecessor basic block is deleted. The value removed is returned. 2648 /// 2649 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty 2650 /// is true), the PHI node is destroyed and any uses of it are replaced with 2651 /// dummy values. The only time there should be zero incoming values to a PHI 2652 /// node is when the block is dead, so this strategy is sound. 2653 /// 2654 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); 2655 2656 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { 2657 int Idx = getBasicBlockIndex(BB); 2658 assert(Idx >= 0 && "Invalid basic block argument to remove!"); 2659 return removeIncomingValue(Idx, DeletePHIIfEmpty); 2660 } 2661 2662 /// Remove all incoming values for which the predicate returns true. 2663 /// The predicate accepts the incoming value index. 2664 void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate, 2665 bool DeletePHIIfEmpty = true); 2666 2667 /// Return the first index of the specified basic 2668 /// block in the value list for this PHI. Returns -1 if no instance. 2669 /// 2670 int getBasicBlockIndex(const BasicBlock *BB) const { 2671 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 2672 if (block_begin()[i] == BB) 2673 return i; 2674 return -1; 2675 } 2676 2677 Value *getIncomingValueForBlock(const BasicBlock *BB) const { 2678 int Idx = getBasicBlockIndex(BB); 2679 assert(Idx >= 0 && "Invalid basic block argument!"); 2680 return getIncomingValue(Idx); 2681 } 2682 2683 /// Set every incoming value(s) for block \p BB to \p V. 2684 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { 2685 assert(BB && "PHI node got a null basic block!"); 2686 bool Found = false; 2687 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2688 if (getIncomingBlock(Op) == BB) { 2689 Found = true; 2690 setIncomingValue(Op, V); 2691 } 2692 (void)Found; 2693 assert(Found && "Invalid basic block argument to set!"); 2694 } 2695 2696 /// If the specified PHI node always merges together the 2697 /// same value, return the value, otherwise return null. 2698 Value *hasConstantValue() const; 2699 2700 /// Whether the specified PHI node always merges 2701 /// together the same value, assuming undefs are equal to a unique 2702 /// non-undef value. 2703 bool hasConstantOrUndefValue() const; 2704 2705 /// If the PHI node is complete which means all of its parent's predecessors 2706 /// have incoming value in this PHI, return true, otherwise return false. 2707 bool isComplete() const { 2708 return llvm::all_of(predecessors(getParent()), 2709 [this](const BasicBlock *Pred) { 2710 return getBasicBlockIndex(Pred) >= 0; 2711 }); 2712 } 2713 2714 /// Methods for support type inquiry through isa, cast, and dyn_cast: 2715 static bool classof(const Instruction *I) { 2716 return I->getOpcode() == Instruction::PHI; 2717 } 2718 static bool classof(const Value *V) { 2719 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2720 } 2721 2722 private: 2723 void growOperands(); 2724 }; 2725 2726 template <> 2727 struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { 2728 }; 2729 2730 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) 2731 2732 //===----------------------------------------------------------------------===// 2733 // LandingPadInst Class 2734 //===----------------------------------------------------------------------===// 2735 2736 //===--------------------------------------------------------------------------- 2737 /// The landingpad instruction holds all of the information 2738 /// necessary to generate correct exception handling. The landingpad instruction 2739 /// cannot be moved from the top of a landing pad block, which itself is 2740 /// accessible only from the 'unwind' edge of an invoke. This uses the 2741 /// SubclassData field in Value to store whether or not the landingpad is a 2742 /// cleanup. 2743 /// 2744 class LandingPadInst : public Instruction { 2745 using CleanupField = BoolBitfieldElementT<0>; 2746 2747 /// The number of operands actually allocated. NumOperands is 2748 /// the number actually in use. 2749 unsigned ReservedSpace; 2750 2751 LandingPadInst(const LandingPadInst &LP); 2752 2753 public: 2754 enum ClauseType { Catch, Filter }; 2755 2756 private: 2757 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2758 const Twine &NameStr, InsertPosition InsertBefore); 2759 2760 // Allocate space for exactly zero operands. 2761 void *operator new(size_t S) { return User::operator new(S); } 2762 2763 void growOperands(unsigned Size); 2764 void init(unsigned NumReservedValues, const Twine &NameStr); 2765 2766 protected: 2767 // Note: Instruction needs to be a friend here to call cloneImpl. 2768 friend class Instruction; 2769 2770 LandingPadInst *cloneImpl() const; 2771 2772 public: 2773 void operator delete(void *Ptr) { User::operator delete(Ptr); } 2774 2775 /// Constructors - NumReservedClauses is a hint for the number of incoming 2776 /// clauses that this landingpad will have (use 0 if you really have no idea). 2777 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2778 const Twine &NameStr = "", 2779 InsertPosition InsertBefore = nullptr); 2780 2781 /// Provide fast operand accessors 2782 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2783 2784 /// Return 'true' if this landingpad instruction is a 2785 /// cleanup. I.e., it should be run when unwinding even if its landing pad 2786 /// doesn't catch the exception. 2787 bool isCleanup() const { return getSubclassData<CleanupField>(); } 2788 2789 /// Indicate that this landingpad instruction is a cleanup. 2790 void setCleanup(bool V) { setSubclassData<CleanupField>(V); } 2791 2792 /// Add a catch or filter clause to the landing pad. 2793 void addClause(Constant *ClauseVal); 2794 2795 /// Get the value of the clause at index Idx. Use isCatch/isFilter to 2796 /// determine what type of clause this is. 2797 Constant *getClause(unsigned Idx) const { 2798 return cast<Constant>(getOperandList()[Idx]); 2799 } 2800 2801 /// Return 'true' if the clause and index Idx is a catch clause. 2802 bool isCatch(unsigned Idx) const { 2803 return !isa<ArrayType>(getOperandList()[Idx]->getType()); 2804 } 2805 2806 /// Return 'true' if the clause and index Idx is a filter clause. 2807 bool isFilter(unsigned Idx) const { 2808 return isa<ArrayType>(getOperandList()[Idx]->getType()); 2809 } 2810 2811 /// Get the number of clauses for this landing pad. 2812 unsigned getNumClauses() const { return getNumOperands(); } 2813 2814 /// Grow the size of the operand list to accommodate the new 2815 /// number of clauses. 2816 void reserveClauses(unsigned Size) { growOperands(Size); } 2817 2818 // Methods for support type inquiry through isa, cast, and dyn_cast: 2819 static bool classof(const Instruction *I) { 2820 return I->getOpcode() == Instruction::LandingPad; 2821 } 2822 static bool classof(const Value *V) { 2823 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2824 } 2825 }; 2826 2827 template <> 2828 struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { 2829 }; 2830 2831 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value) 2832 2833 //===----------------------------------------------------------------------===// 2834 // ReturnInst Class 2835 //===----------------------------------------------------------------------===// 2836 2837 //===--------------------------------------------------------------------------- 2838 /// Return a value (possibly void), from a function. Execution 2839 /// does not continue in this function any longer. 2840 /// 2841 class ReturnInst : public Instruction { 2842 ReturnInst(const ReturnInst &RI); 2843 2844 private: 2845 // ReturnInst constructors: 2846 // ReturnInst() - 'ret void' instruction 2847 // ReturnInst( null) - 'ret void' instruction 2848 // ReturnInst(Value* X) - 'ret X' instruction 2849 // ReturnInst(null, Iterator It) - 'ret void' instruction, insert before I 2850 // ReturnInst(Value* X, Iterator It) - 'ret X' instruction, insert before I 2851 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I 2852 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I 2853 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B 2854 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B 2855 // 2856 // NOTE: If the Value* passed is of type void then the constructor behaves as 2857 // if it was passed NULL. 2858 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, 2859 InsertPosition InsertBefore = nullptr); 2860 2861 protected: 2862 // Note: Instruction needs to be a friend here to call cloneImpl. 2863 friend class Instruction; 2864 2865 ReturnInst *cloneImpl() const; 2866 2867 public: 2868 static ReturnInst *Create(LLVMContext &C, Value *retVal = nullptr, 2869 InsertPosition InsertBefore = nullptr) { 2870 return new(!!retVal) ReturnInst(C, retVal, InsertBefore); 2871 } 2872 2873 static ReturnInst *Create(LLVMContext &C, BasicBlock *InsertAtEnd) { 2874 return new (0) ReturnInst(C, nullptr, InsertAtEnd); 2875 } 2876 2877 /// Provide fast operand accessors 2878 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2879 2880 /// Convenience accessor. Returns null if there is no return value. 2881 Value *getReturnValue() const { 2882 return getNumOperands() != 0 ? getOperand(0) : nullptr; 2883 } 2884 2885 unsigned getNumSuccessors() const { return 0; } 2886 2887 // Methods for support type inquiry through isa, cast, and dyn_cast: 2888 static bool classof(const Instruction *I) { 2889 return (I->getOpcode() == Instruction::Ret); 2890 } 2891 static bool classof(const Value *V) { 2892 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2893 } 2894 2895 private: 2896 BasicBlock *getSuccessor(unsigned idx) const { 2897 llvm_unreachable("ReturnInst has no successors!"); 2898 } 2899 2900 void setSuccessor(unsigned idx, BasicBlock *B) { 2901 llvm_unreachable("ReturnInst has no successors!"); 2902 } 2903 }; 2904 2905 template <> 2906 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { 2907 }; 2908 2909 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value) 2910 2911 //===----------------------------------------------------------------------===// 2912 // BranchInst Class 2913 //===----------------------------------------------------------------------===// 2914 2915 //===--------------------------------------------------------------------------- 2916 /// Conditional or Unconditional Branch instruction. 2917 /// 2918 class BranchInst : public Instruction { 2919 /// Ops list - Branches are strange. The operands are ordered: 2920 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because 2921 /// they don't have to check for cond/uncond branchness. These are mostly 2922 /// accessed relative from op_end(). 2923 BranchInst(const BranchInst &BI); 2924 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): 2925 // BranchInst(BB *B) - 'br B' 2926 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' 2927 // BranchInst(BB* B, Iter It) - 'br B' insert before I 2928 // BranchInst(BB* T, BB *F, Value *C, Iter It) - 'br C, T, F', insert before I 2929 // BranchInst(BB* B, Inst *I) - 'br B' insert before I 2930 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I 2931 // BranchInst(BB* B, BB *I) - 'br B' insert at end 2932 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end 2933 explicit BranchInst(BasicBlock *IfTrue, 2934 InsertPosition InsertBefore = nullptr); 2935 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 2936 InsertPosition InsertBefore = nullptr); 2937 2938 void AssertOK(); 2939 2940 protected: 2941 // Note: Instruction needs to be a friend here to call cloneImpl. 2942 friend class Instruction; 2943 2944 BranchInst *cloneImpl() const; 2945 2946 public: 2947 /// Iterator type that casts an operand to a basic block. 2948 /// 2949 /// This only makes sense because the successors are stored as adjacent 2950 /// operands for branch instructions. 2951 struct succ_op_iterator 2952 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 2953 std::random_access_iterator_tag, BasicBlock *, 2954 ptrdiff_t, BasicBlock *, BasicBlock *> { 2955 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 2956 2957 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 2958 BasicBlock *operator->() const { return operator*(); } 2959 }; 2960 2961 /// The const version of `succ_op_iterator`. 2962 struct const_succ_op_iterator 2963 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 2964 std::random_access_iterator_tag, 2965 const BasicBlock *, ptrdiff_t, const BasicBlock *, 2966 const BasicBlock *> { 2967 explicit const_succ_op_iterator(const_value_op_iterator I) 2968 : iterator_adaptor_base(I) {} 2969 2970 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 2971 const BasicBlock *operator->() const { return operator*(); } 2972 }; 2973 2974 static BranchInst *Create(BasicBlock *IfTrue, 2975 InsertPosition InsertBefore = nullptr) { 2976 return new(1) BranchInst(IfTrue, InsertBefore); 2977 } 2978 2979 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 2980 Value *Cond, 2981 InsertPosition InsertBefore = nullptr) { 2982 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); 2983 } 2984 2985 /// Transparently provide more efficient getOperand methods. 2986 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2987 2988 bool isUnconditional() const { return getNumOperands() == 1; } 2989 bool isConditional() const { return getNumOperands() == 3; } 2990 2991 Value *getCondition() const { 2992 assert(isConditional() && "Cannot get condition of an uncond branch!"); 2993 return Op<-3>(); 2994 } 2995 2996 void setCondition(Value *V) { 2997 assert(isConditional() && "Cannot set condition of unconditional branch!"); 2998 Op<-3>() = V; 2999 } 3000 3001 unsigned getNumSuccessors() const { return 1+isConditional(); } 3002 3003 BasicBlock *getSuccessor(unsigned i) const { 3004 assert(i < getNumSuccessors() && "Successor # out of range for Branch!"); 3005 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); 3006 } 3007 3008 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3009 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!"); 3010 *(&Op<-1>() - idx) = NewSucc; 3011 } 3012 3013 /// Swap the successors of this branch instruction. 3014 /// 3015 /// Swaps the successors of the branch instruction. This also swaps any 3016 /// branch weight metadata associated with the instruction so that it 3017 /// continues to map correctly to each operand. 3018 void swapSuccessors(); 3019 3020 iterator_range<succ_op_iterator> successors() { 3021 return make_range( 3022 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), 3023 succ_op_iterator(value_op_end())); 3024 } 3025 3026 iterator_range<const_succ_op_iterator> successors() const { 3027 return make_range(const_succ_op_iterator( 3028 std::next(value_op_begin(), isConditional() ? 1 : 0)), 3029 const_succ_op_iterator(value_op_end())); 3030 } 3031 3032 // Methods for support type inquiry through isa, cast, and dyn_cast: 3033 static bool classof(const Instruction *I) { 3034 return (I->getOpcode() == Instruction::Br); 3035 } 3036 static bool classof(const Value *V) { 3037 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3038 } 3039 }; 3040 3041 template <> 3042 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { 3043 }; 3044 3045 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) 3046 3047 //===----------------------------------------------------------------------===// 3048 // SwitchInst Class 3049 //===----------------------------------------------------------------------===// 3050 3051 //===--------------------------------------------------------------------------- 3052 /// Multiway switch 3053 /// 3054 class SwitchInst : public Instruction { 3055 unsigned ReservedSpace; 3056 3057 // Operand[0] = Value to switch on 3058 // Operand[1] = Default basic block destination 3059 // Operand[2n ] = Value to match 3060 // Operand[2n+1] = BasicBlock to go to on match 3061 SwitchInst(const SwitchInst &SI); 3062 3063 /// Create a new switch instruction, specifying a value to switch on and a 3064 /// default destination. The number of additional cases can be specified here 3065 /// to make memory allocation more efficient. This constructor can also 3066 /// auto-insert before another instruction. 3067 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3068 InsertPosition InsertBefore); 3069 3070 // allocate space for exactly zero operands 3071 void *operator new(size_t S) { return User::operator new(S); } 3072 3073 void init(Value *Value, BasicBlock *Default, unsigned NumReserved); 3074 void growOperands(); 3075 3076 protected: 3077 // Note: Instruction needs to be a friend here to call cloneImpl. 3078 friend class Instruction; 3079 3080 SwitchInst *cloneImpl() const; 3081 3082 public: 3083 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3084 3085 // -2 3086 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); 3087 3088 template <typename CaseHandleT> class CaseIteratorImpl; 3089 3090 /// A handle to a particular switch case. It exposes a convenient interface 3091 /// to both the case value and the successor block. 3092 /// 3093 /// We define this as a template and instantiate it to form both a const and 3094 /// non-const handle. 3095 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> 3096 class CaseHandleImpl { 3097 // Directly befriend both const and non-const iterators. 3098 friend class SwitchInst::CaseIteratorImpl< 3099 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; 3100 3101 protected: 3102 // Expose the switch type we're parameterized with to the iterator. 3103 using SwitchInstType = SwitchInstT; 3104 3105 SwitchInstT *SI; 3106 ptrdiff_t Index; 3107 3108 CaseHandleImpl() = default; 3109 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} 3110 3111 public: 3112 /// Resolves case value for current case. 3113 ConstantIntT *getCaseValue() const { 3114 assert((unsigned)Index < SI->getNumCases() && 3115 "Index out the number of cases."); 3116 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); 3117 } 3118 3119 /// Resolves successor for current case. 3120 BasicBlockT *getCaseSuccessor() const { 3121 assert(((unsigned)Index < SI->getNumCases() || 3122 (unsigned)Index == DefaultPseudoIndex) && 3123 "Index out the number of cases."); 3124 return SI->getSuccessor(getSuccessorIndex()); 3125 } 3126 3127 /// Returns number of current case. 3128 unsigned getCaseIndex() const { return Index; } 3129 3130 /// Returns successor index for current case successor. 3131 unsigned getSuccessorIndex() const { 3132 assert(((unsigned)Index == DefaultPseudoIndex || 3133 (unsigned)Index < SI->getNumCases()) && 3134 "Index out the number of cases."); 3135 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; 3136 } 3137 3138 bool operator==(const CaseHandleImpl &RHS) const { 3139 assert(SI == RHS.SI && "Incompatible operators."); 3140 return Index == RHS.Index; 3141 } 3142 }; 3143 3144 using ConstCaseHandle = 3145 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; 3146 3147 class CaseHandle 3148 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { 3149 friend class SwitchInst::CaseIteratorImpl<CaseHandle>; 3150 3151 public: 3152 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} 3153 3154 /// Sets the new value for current case. 3155 void setValue(ConstantInt *V) const { 3156 assert((unsigned)Index < SI->getNumCases() && 3157 "Index out the number of cases."); 3158 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); 3159 } 3160 3161 /// Sets the new successor for current case. 3162 void setSuccessor(BasicBlock *S) const { 3163 SI->setSuccessor(getSuccessorIndex(), S); 3164 } 3165 }; 3166 3167 template <typename CaseHandleT> 3168 class CaseIteratorImpl 3169 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, 3170 std::random_access_iterator_tag, 3171 const CaseHandleT> { 3172 using SwitchInstT = typename CaseHandleT::SwitchInstType; 3173 3174 CaseHandleT Case; 3175 3176 public: 3177 /// Default constructed iterator is in an invalid state until assigned to 3178 /// a case for a particular switch. 3179 CaseIteratorImpl() = default; 3180 3181 /// Initializes case iterator for given SwitchInst and for given 3182 /// case number. 3183 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} 3184 3185 /// Initializes case iterator for given SwitchInst and for given 3186 /// successor index. 3187 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, 3188 unsigned SuccessorIndex) { 3189 assert(SuccessorIndex < SI->getNumSuccessors() && 3190 "Successor index # out of range!"); 3191 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) 3192 : CaseIteratorImpl(SI, DefaultPseudoIndex); 3193 } 3194 3195 /// Support converting to the const variant. This will be a no-op for const 3196 /// variant. 3197 operator CaseIteratorImpl<ConstCaseHandle>() const { 3198 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); 3199 } 3200 3201 CaseIteratorImpl &operator+=(ptrdiff_t N) { 3202 // Check index correctness after addition. 3203 // Note: Index == getNumCases() means end(). 3204 assert(Case.Index + N >= 0 && 3205 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && 3206 "Case.Index out the number of cases."); 3207 Case.Index += N; 3208 return *this; 3209 } 3210 CaseIteratorImpl &operator-=(ptrdiff_t N) { 3211 // Check index correctness after subtraction. 3212 // Note: Case.Index == getNumCases() means end(). 3213 assert(Case.Index - N >= 0 && 3214 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && 3215 "Case.Index out the number of cases."); 3216 Case.Index -= N; 3217 return *this; 3218 } 3219 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { 3220 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3221 return Case.Index - RHS.Case.Index; 3222 } 3223 bool operator==(const CaseIteratorImpl &RHS) const { 3224 return Case == RHS.Case; 3225 } 3226 bool operator<(const CaseIteratorImpl &RHS) const { 3227 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3228 return Case.Index < RHS.Case.Index; 3229 } 3230 const CaseHandleT &operator*() const { return Case; } 3231 }; 3232 3233 using CaseIt = CaseIteratorImpl<CaseHandle>; 3234 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; 3235 3236 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3237 unsigned NumCases, 3238 InsertPosition InsertBefore = nullptr) { 3239 return new SwitchInst(Value, Default, NumCases, InsertBefore); 3240 } 3241 3242 /// Provide fast operand accessors 3243 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3244 3245 // Accessor Methods for Switch stmt 3246 Value *getCondition() const { return getOperand(0); } 3247 void setCondition(Value *V) { setOperand(0, V); } 3248 3249 BasicBlock *getDefaultDest() const { 3250 return cast<BasicBlock>(getOperand(1)); 3251 } 3252 3253 /// Returns true if the default branch must result in immediate undefined 3254 /// behavior, false otherwise. 3255 bool defaultDestUndefined() const { 3256 return isa<UnreachableInst>(getDefaultDest()->getFirstNonPHIOrDbg()); 3257 } 3258 3259 void setDefaultDest(BasicBlock *DefaultCase) { 3260 setOperand(1, reinterpret_cast<Value*>(DefaultCase)); 3261 } 3262 3263 /// Return the number of 'cases' in this switch instruction, excluding the 3264 /// default case. 3265 unsigned getNumCases() const { 3266 return getNumOperands()/2 - 1; 3267 } 3268 3269 /// Returns a read/write iterator that points to the first case in the 3270 /// SwitchInst. 3271 CaseIt case_begin() { 3272 return CaseIt(this, 0); 3273 } 3274 3275 /// Returns a read-only iterator that points to the first case in the 3276 /// SwitchInst. 3277 ConstCaseIt case_begin() const { 3278 return ConstCaseIt(this, 0); 3279 } 3280 3281 /// Returns a read/write iterator that points one past the last in the 3282 /// SwitchInst. 3283 CaseIt case_end() { 3284 return CaseIt(this, getNumCases()); 3285 } 3286 3287 /// Returns a read-only iterator that points one past the last in the 3288 /// SwitchInst. 3289 ConstCaseIt case_end() const { 3290 return ConstCaseIt(this, getNumCases()); 3291 } 3292 3293 /// Iteration adapter for range-for loops. 3294 iterator_range<CaseIt> cases() { 3295 return make_range(case_begin(), case_end()); 3296 } 3297 3298 /// Constant iteration adapter for range-for loops. 3299 iterator_range<ConstCaseIt> cases() const { 3300 return make_range(case_begin(), case_end()); 3301 } 3302 3303 /// Returns an iterator that points to the default case. 3304 /// Note: this iterator allows to resolve successor only. Attempt 3305 /// to resolve case value causes an assertion. 3306 /// Also note, that increment and decrement also causes an assertion and 3307 /// makes iterator invalid. 3308 CaseIt case_default() { 3309 return CaseIt(this, DefaultPseudoIndex); 3310 } 3311 ConstCaseIt case_default() const { 3312 return ConstCaseIt(this, DefaultPseudoIndex); 3313 } 3314 3315 /// Search all of the case values for the specified constant. If it is 3316 /// explicitly handled, return the case iterator of it, otherwise return 3317 /// default case iterator to indicate that it is handled by the default 3318 /// handler. 3319 CaseIt findCaseValue(const ConstantInt *C) { 3320 return CaseIt( 3321 this, 3322 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex()); 3323 } 3324 ConstCaseIt findCaseValue(const ConstantInt *C) const { 3325 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) { 3326 return Case.getCaseValue() == C; 3327 }); 3328 if (I != case_end()) 3329 return I; 3330 3331 return case_default(); 3332 } 3333 3334 /// Finds the unique case value for a given successor. Returns null if the 3335 /// successor is not found, not unique, or is the default case. 3336 ConstantInt *findCaseDest(BasicBlock *BB) { 3337 if (BB == getDefaultDest()) 3338 return nullptr; 3339 3340 ConstantInt *CI = nullptr; 3341 for (auto Case : cases()) { 3342 if (Case.getCaseSuccessor() != BB) 3343 continue; 3344 3345 if (CI) 3346 return nullptr; // Multiple cases lead to BB. 3347 3348 CI = Case.getCaseValue(); 3349 } 3350 3351 return CI; 3352 } 3353 3354 /// Add an entry to the switch instruction. 3355 /// Note: 3356 /// This action invalidates case_end(). Old case_end() iterator will 3357 /// point to the added case. 3358 void addCase(ConstantInt *OnVal, BasicBlock *Dest); 3359 3360 /// This method removes the specified case and its successor from the switch 3361 /// instruction. Note that this operation may reorder the remaining cases at 3362 /// index idx and above. 3363 /// Note: 3364 /// This action invalidates iterators for all cases following the one removed, 3365 /// including the case_end() iterator. It returns an iterator for the next 3366 /// case. 3367 CaseIt removeCase(CaseIt I); 3368 3369 unsigned getNumSuccessors() const { return getNumOperands()/2; } 3370 BasicBlock *getSuccessor(unsigned idx) const { 3371 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!"); 3372 return cast<BasicBlock>(getOperand(idx*2+1)); 3373 } 3374 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3375 assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); 3376 setOperand(idx * 2 + 1, NewSucc); 3377 } 3378 3379 // Methods for support type inquiry through isa, cast, and dyn_cast: 3380 static bool classof(const Instruction *I) { 3381 return I->getOpcode() == Instruction::Switch; 3382 } 3383 static bool classof(const Value *V) { 3384 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3385 } 3386 }; 3387 3388 /// A wrapper class to simplify modification of SwitchInst cases along with 3389 /// their prof branch_weights metadata. 3390 class SwitchInstProfUpdateWrapper { 3391 SwitchInst &SI; 3392 std::optional<SmallVector<uint32_t, 8>> Weights; 3393 bool Changed = false; 3394 3395 protected: 3396 MDNode *buildProfBranchWeightsMD(); 3397 3398 void init(); 3399 3400 public: 3401 using CaseWeightOpt = std::optional<uint32_t>; 3402 SwitchInst *operator->() { return &SI; } 3403 SwitchInst &operator*() { return SI; } 3404 operator SwitchInst *() { return &SI; } 3405 3406 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } 3407 3408 ~SwitchInstProfUpdateWrapper() { 3409 if (Changed) 3410 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); 3411 } 3412 3413 /// Delegate the call to the underlying SwitchInst::removeCase() and remove 3414 /// correspondent branch weight. 3415 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); 3416 3417 /// Delegate the call to the underlying SwitchInst::addCase() and set the 3418 /// specified branch weight for the added case. 3419 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); 3420 3421 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark 3422 /// this object to not touch the underlying SwitchInst in destructor. 3423 Instruction::InstListType::iterator eraseFromParent(); 3424 3425 void setSuccessorWeight(unsigned idx, CaseWeightOpt W); 3426 CaseWeightOpt getSuccessorWeight(unsigned idx); 3427 3428 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); 3429 }; 3430 3431 template <> 3432 struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { 3433 }; 3434 3435 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) 3436 3437 //===----------------------------------------------------------------------===// 3438 // IndirectBrInst Class 3439 //===----------------------------------------------------------------------===// 3440 3441 //===--------------------------------------------------------------------------- 3442 /// Indirect Branch Instruction. 3443 /// 3444 class IndirectBrInst : public Instruction { 3445 unsigned ReservedSpace; 3446 3447 // Operand[0] = Address to jump to 3448 // Operand[n+1] = n-th destination 3449 IndirectBrInst(const IndirectBrInst &IBI); 3450 3451 /// Create a new indirectbr instruction, specifying an 3452 /// Address to jump to. The number of expected destinations can be specified 3453 /// here to make memory allocation more efficient. This constructor can also 3454 /// autoinsert before another instruction. 3455 IndirectBrInst(Value *Address, unsigned NumDests, 3456 InsertPosition InsertBefore); 3457 3458 // allocate space for exactly zero operands 3459 void *operator new(size_t S) { return User::operator new(S); } 3460 3461 void init(Value *Address, unsigned NumDests); 3462 void growOperands(); 3463 3464 protected: 3465 // Note: Instruction needs to be a friend here to call cloneImpl. 3466 friend class Instruction; 3467 3468 IndirectBrInst *cloneImpl() const; 3469 3470 public: 3471 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3472 3473 /// Iterator type that casts an operand to a basic block. 3474 /// 3475 /// This only makes sense because the successors are stored as adjacent 3476 /// operands for indirectbr instructions. 3477 struct succ_op_iterator 3478 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3479 std::random_access_iterator_tag, BasicBlock *, 3480 ptrdiff_t, BasicBlock *, BasicBlock *> { 3481 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3482 3483 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3484 BasicBlock *operator->() const { return operator*(); } 3485 }; 3486 3487 /// The const version of `succ_op_iterator`. 3488 struct const_succ_op_iterator 3489 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3490 std::random_access_iterator_tag, 3491 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3492 const BasicBlock *> { 3493 explicit const_succ_op_iterator(const_value_op_iterator I) 3494 : iterator_adaptor_base(I) {} 3495 3496 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3497 const BasicBlock *operator->() const { return operator*(); } 3498 }; 3499 3500 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3501 InsertPosition InsertBefore = nullptr) { 3502 return new IndirectBrInst(Address, NumDests, InsertBefore); 3503 } 3504 3505 /// Provide fast operand accessors. 3506 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3507 3508 // Accessor Methods for IndirectBrInst instruction. 3509 Value *getAddress() { return getOperand(0); } 3510 const Value *getAddress() const { return getOperand(0); } 3511 void setAddress(Value *V) { setOperand(0, V); } 3512 3513 /// return the number of possible destinations in this 3514 /// indirectbr instruction. 3515 unsigned getNumDestinations() const { return getNumOperands()-1; } 3516 3517 /// Return the specified destination. 3518 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } 3519 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } 3520 3521 /// Add a destination. 3522 /// 3523 void addDestination(BasicBlock *Dest); 3524 3525 /// This method removes the specified successor from the 3526 /// indirectbr instruction. 3527 void removeDestination(unsigned i); 3528 3529 unsigned getNumSuccessors() const { return getNumOperands()-1; } 3530 BasicBlock *getSuccessor(unsigned i) const { 3531 return cast<BasicBlock>(getOperand(i+1)); 3532 } 3533 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3534 setOperand(i + 1, NewSucc); 3535 } 3536 3537 iterator_range<succ_op_iterator> successors() { 3538 return make_range(succ_op_iterator(std::next(value_op_begin())), 3539 succ_op_iterator(value_op_end())); 3540 } 3541 3542 iterator_range<const_succ_op_iterator> successors() const { 3543 return make_range(const_succ_op_iterator(std::next(value_op_begin())), 3544 const_succ_op_iterator(value_op_end())); 3545 } 3546 3547 // Methods for support type inquiry through isa, cast, and dyn_cast: 3548 static bool classof(const Instruction *I) { 3549 return I->getOpcode() == Instruction::IndirectBr; 3550 } 3551 static bool classof(const Value *V) { 3552 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3553 } 3554 }; 3555 3556 template <> 3557 struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { 3558 }; 3559 3560 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value) 3561 3562 //===----------------------------------------------------------------------===// 3563 // InvokeInst Class 3564 //===----------------------------------------------------------------------===// 3565 3566 /// Invoke instruction. The SubclassData field is used to hold the 3567 /// calling convention of the call. 3568 /// 3569 class InvokeInst : public CallBase { 3570 /// The number of operands for this call beyond the called function, 3571 /// arguments, and operand bundles. 3572 static constexpr int NumExtraOperands = 2; 3573 3574 /// The index from the end of the operand array to the normal destination. 3575 static constexpr int NormalDestOpEndIdx = -3; 3576 3577 /// The index from the end of the operand array to the unwind destination. 3578 static constexpr int UnwindDestOpEndIdx = -2; 3579 3580 InvokeInst(const InvokeInst &BI); 3581 3582 /// Construct an InvokeInst given a range of arguments. 3583 /// 3584 /// Construct an InvokeInst from a range of arguments 3585 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3586 BasicBlock *IfException, ArrayRef<Value *> Args, 3587 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3588 const Twine &NameStr, InsertPosition InsertBefore); 3589 3590 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3591 BasicBlock *IfException, ArrayRef<Value *> Args, 3592 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3593 3594 /// Compute the number of operands to allocate. 3595 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 3596 // We need one operand for the called function, plus our extra operands and 3597 // the input operand counts provided. 3598 return 1 + NumExtraOperands + NumArgs + NumBundleInputs; 3599 } 3600 3601 protected: 3602 // Note: Instruction needs to be a friend here to call cloneImpl. 3603 friend class Instruction; 3604 3605 InvokeInst *cloneImpl() const; 3606 3607 public: 3608 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3609 BasicBlock *IfException, ArrayRef<Value *> Args, 3610 const Twine &NameStr, 3611 InsertPosition InsertBefore = nullptr) { 3612 int NumOperands = ComputeNumOperands(Args.size()); 3613 return new (NumOperands) 3614 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, 3615 NumOperands, NameStr, InsertBefore); 3616 } 3617 3618 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3619 BasicBlock *IfException, ArrayRef<Value *> Args, 3620 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 3621 const Twine &NameStr = "", 3622 InsertPosition InsertBefore = nullptr) { 3623 int NumOperands = 3624 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3625 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3626 3627 return new (NumOperands, DescriptorBytes) 3628 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3629 NameStr, InsertBefore); 3630 } 3631 3632 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3633 BasicBlock *IfException, ArrayRef<Value *> Args, 3634 const Twine &NameStr, 3635 InsertPosition InsertBefore = nullptr) { 3636 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3637 IfException, Args, std::nullopt, NameStr, InsertBefore); 3638 } 3639 3640 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3641 BasicBlock *IfException, ArrayRef<Value *> Args, 3642 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 3643 const Twine &NameStr = "", 3644 InsertPosition InsertBefore = nullptr) { 3645 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3646 IfException, Args, Bundles, NameStr, InsertBefore); 3647 } 3648 3649 /// Create a clone of \p II with a different set of operand bundles and 3650 /// insert it before \p InsertBefore. 3651 /// 3652 /// The returned invoke instruction is identical to \p II in every way except 3653 /// that the operand bundles for the new instruction are set to the operand 3654 /// bundles in \p Bundles. 3655 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, 3656 InsertPosition InsertPt = nullptr); 3657 3658 // get*Dest - Return the destination basic blocks... 3659 BasicBlock *getNormalDest() const { 3660 return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); 3661 } 3662 BasicBlock *getUnwindDest() const { 3663 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); 3664 } 3665 void setNormalDest(BasicBlock *B) { 3666 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3667 } 3668 void setUnwindDest(BasicBlock *B) { 3669 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3670 } 3671 3672 /// Get the landingpad instruction from the landing pad 3673 /// block (the unwind destination). 3674 LandingPadInst *getLandingPadInst() const; 3675 3676 BasicBlock *getSuccessor(unsigned i) const { 3677 assert(i < 2 && "Successor # out of range for invoke!"); 3678 return i == 0 ? getNormalDest() : getUnwindDest(); 3679 } 3680 3681 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3682 assert(i < 2 && "Successor # out of range for invoke!"); 3683 if (i == 0) 3684 setNormalDest(NewSucc); 3685 else 3686 setUnwindDest(NewSucc); 3687 } 3688 3689 unsigned getNumSuccessors() const { return 2; } 3690 3691 /// Updates profile metadata by scaling it by \p S / \p T. 3692 void updateProfWeight(uint64_t S, uint64_t T); 3693 3694 // Methods for support type inquiry through isa, cast, and dyn_cast: 3695 static bool classof(const Instruction *I) { 3696 return (I->getOpcode() == Instruction::Invoke); 3697 } 3698 static bool classof(const Value *V) { 3699 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3700 } 3701 3702 private: 3703 // Shadow Instruction::setInstructionSubclassData with a private forwarding 3704 // method so that subclasses cannot accidentally use it. 3705 template <typename Bitfield> 3706 void setSubclassData(typename Bitfield::Type Value) { 3707 Instruction::setSubclassData<Bitfield>(Value); 3708 } 3709 }; 3710 3711 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3712 BasicBlock *IfException, ArrayRef<Value *> Args, 3713 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3714 const Twine &NameStr, InsertPosition InsertBefore) 3715 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3716 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3717 InsertBefore) { 3718 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3719 } 3720 3721 //===----------------------------------------------------------------------===// 3722 // CallBrInst Class 3723 //===----------------------------------------------------------------------===// 3724 3725 /// CallBr instruction, tracking function calls that may not return control but 3726 /// instead transfer it to a third location. The SubclassData field is used to 3727 /// hold the calling convention of the call. 3728 /// 3729 class CallBrInst : public CallBase { 3730 3731 unsigned NumIndirectDests; 3732 3733 CallBrInst(const CallBrInst &BI); 3734 3735 /// Construct a CallBrInst given a range of arguments. 3736 /// 3737 /// Construct a CallBrInst from a range of arguments 3738 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3739 ArrayRef<BasicBlock *> IndirectDests, 3740 ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles, 3741 int NumOperands, const Twine &NameStr, 3742 InsertPosition InsertBefore); 3743 3744 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, 3745 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 3746 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3747 3748 /// Compute the number of operands to allocate. 3749 static int ComputeNumOperands(int NumArgs, int NumIndirectDests, 3750 int NumBundleInputs = 0) { 3751 // We need one operand for the called function, plus our extra operands and 3752 // the input operand counts provided. 3753 return 2 + NumIndirectDests + NumArgs + NumBundleInputs; 3754 } 3755 3756 protected: 3757 // Note: Instruction needs to be a friend here to call cloneImpl. 3758 friend class Instruction; 3759 3760 CallBrInst *cloneImpl() const; 3761 3762 public: 3763 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3764 BasicBlock *DefaultDest, 3765 ArrayRef<BasicBlock *> IndirectDests, 3766 ArrayRef<Value *> Args, const Twine &NameStr, 3767 InsertPosition InsertBefore = nullptr) { 3768 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 3769 return new (NumOperands) 3770 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, 3771 NumOperands, NameStr, InsertBefore); 3772 } 3773 3774 static CallBrInst * 3775 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3776 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 3777 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 3778 const Twine &NameStr = "", InsertPosition InsertBefore = nullptr) { 3779 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 3780 CountBundleInputs(Bundles)); 3781 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3782 3783 return new (NumOperands, DescriptorBytes) 3784 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 3785 NumOperands, NameStr, InsertBefore); 3786 } 3787 3788 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 3789 ArrayRef<BasicBlock *> IndirectDests, 3790 ArrayRef<Value *> Args, const Twine &NameStr, 3791 InsertPosition InsertBefore = nullptr) { 3792 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 3793 IndirectDests, Args, NameStr, InsertBefore); 3794 } 3795 3796 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 3797 ArrayRef<BasicBlock *> IndirectDests, 3798 ArrayRef<Value *> Args, 3799 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 3800 const Twine &NameStr = "", 3801 InsertPosition InsertBefore = nullptr) { 3802 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 3803 IndirectDests, Args, Bundles, NameStr, InsertBefore); 3804 } 3805 3806 /// Create a clone of \p CBI with a different set of operand bundles and 3807 /// insert it before \p InsertBefore. 3808 /// 3809 /// The returned callbr instruction is identical to \p CBI in every way 3810 /// except that the operand bundles for the new instruction are set to the 3811 /// operand bundles in \p Bundles. 3812 static CallBrInst *Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> Bundles, 3813 InsertPosition InsertBefore = nullptr); 3814 3815 /// Return the number of callbr indirect dest labels. 3816 /// 3817 unsigned getNumIndirectDests() const { return NumIndirectDests; } 3818 3819 /// getIndirectDestLabel - Return the i-th indirect dest label. 3820 /// 3821 Value *getIndirectDestLabel(unsigned i) const { 3822 assert(i < getNumIndirectDests() && "Out of bounds!"); 3823 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1); 3824 } 3825 3826 Value *getIndirectDestLabelUse(unsigned i) const { 3827 assert(i < getNumIndirectDests() && "Out of bounds!"); 3828 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1); 3829 } 3830 3831 // Return the destination basic blocks... 3832 BasicBlock *getDefaultDest() const { 3833 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); 3834 } 3835 BasicBlock *getIndirectDest(unsigned i) const { 3836 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); 3837 } 3838 SmallVector<BasicBlock *, 16> getIndirectDests() const { 3839 SmallVector<BasicBlock *, 16> IndirectDests; 3840 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) 3841 IndirectDests.push_back(getIndirectDest(i)); 3842 return IndirectDests; 3843 } 3844 void setDefaultDest(BasicBlock *B) { 3845 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); 3846 } 3847 void setIndirectDest(unsigned i, BasicBlock *B) { 3848 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); 3849 } 3850 3851 BasicBlock *getSuccessor(unsigned i) const { 3852 assert(i < getNumSuccessors() + 1 && 3853 "Successor # out of range for callbr!"); 3854 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); 3855 } 3856 3857 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3858 assert(i < getNumIndirectDests() + 1 && 3859 "Successor # out of range for callbr!"); 3860 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); 3861 } 3862 3863 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } 3864 3865 // Methods for support type inquiry through isa, cast, and dyn_cast: 3866 static bool classof(const Instruction *I) { 3867 return (I->getOpcode() == Instruction::CallBr); 3868 } 3869 static bool classof(const Value *V) { 3870 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3871 } 3872 3873 private: 3874 // Shadow Instruction::setInstructionSubclassData with a private forwarding 3875 // method so that subclasses cannot accidentally use it. 3876 template <typename Bitfield> 3877 void setSubclassData(typename Bitfield::Type Value) { 3878 Instruction::setSubclassData<Bitfield>(Value); 3879 } 3880 }; 3881 3882 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3883 ArrayRef<BasicBlock *> IndirectDests, 3884 ArrayRef<Value *> Args, 3885 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3886 const Twine &NameStr, InsertPosition InsertBefore) 3887 : CallBase(Ty->getReturnType(), Instruction::CallBr, 3888 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3889 InsertBefore) { 3890 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 3891 } 3892 3893 //===----------------------------------------------------------------------===// 3894 // ResumeInst Class 3895 //===----------------------------------------------------------------------===// 3896 3897 //===--------------------------------------------------------------------------- 3898 /// Resume the propagation of an exception. 3899 /// 3900 class ResumeInst : public Instruction { 3901 ResumeInst(const ResumeInst &RI); 3902 3903 explicit ResumeInst(Value *Exn, InsertPosition InsertBefore = nullptr); 3904 3905 protected: 3906 // Note: Instruction needs to be a friend here to call cloneImpl. 3907 friend class Instruction; 3908 3909 ResumeInst *cloneImpl() const; 3910 3911 public: 3912 static ResumeInst *Create(Value *Exn, InsertPosition InsertBefore = nullptr) { 3913 return new(1) ResumeInst(Exn, InsertBefore); 3914 } 3915 3916 /// Provide fast operand accessors 3917 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3918 3919 /// Convenience accessor. 3920 Value *getValue() const { return Op<0>(); } 3921 3922 unsigned getNumSuccessors() const { return 0; } 3923 3924 // Methods for support type inquiry through isa, cast, and dyn_cast: 3925 static bool classof(const Instruction *I) { 3926 return I->getOpcode() == Instruction::Resume; 3927 } 3928 static bool classof(const Value *V) { 3929 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3930 } 3931 3932 private: 3933 BasicBlock *getSuccessor(unsigned idx) const { 3934 llvm_unreachable("ResumeInst has no successors!"); 3935 } 3936 3937 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3938 llvm_unreachable("ResumeInst has no successors!"); 3939 } 3940 }; 3941 3942 template <> 3943 struct OperandTraits<ResumeInst> : 3944 public FixedNumOperandTraits<ResumeInst, 1> { 3945 }; 3946 3947 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) 3948 3949 //===----------------------------------------------------------------------===// 3950 // CatchSwitchInst Class 3951 //===----------------------------------------------------------------------===// 3952 class CatchSwitchInst : public Instruction { 3953 using UnwindDestField = BoolBitfieldElementT<0>; 3954 3955 /// The number of operands actually allocated. NumOperands is 3956 /// the number actually in use. 3957 unsigned ReservedSpace; 3958 3959 // Operand[0] = Outer scope 3960 // Operand[1] = Unwind block destination 3961 // Operand[n] = BasicBlock to go to on match 3962 CatchSwitchInst(const CatchSwitchInst &CSI); 3963 3964 /// Create a new switch instruction, specifying a 3965 /// default destination. The number of additional handlers can be specified 3966 /// here to make memory allocation more efficient. 3967 /// This constructor can also autoinsert before another instruction. 3968 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 3969 unsigned NumHandlers, const Twine &NameStr, 3970 InsertPosition InsertBefore); 3971 3972 // allocate space for exactly zero operands 3973 void *operator new(size_t S) { return User::operator new(S); } 3974 3975 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); 3976 void growOperands(unsigned Size); 3977 3978 protected: 3979 // Note: Instruction needs to be a friend here to call cloneImpl. 3980 friend class Instruction; 3981 3982 CatchSwitchInst *cloneImpl() const; 3983 3984 public: 3985 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 3986 3987 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 3988 unsigned NumHandlers, 3989 const Twine &NameStr = "", 3990 InsertPosition InsertBefore = nullptr) { 3991 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 3992 InsertBefore); 3993 } 3994 3995 /// Provide fast operand accessors 3996 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3997 3998 // Accessor Methods for CatchSwitch stmt 3999 Value *getParentPad() const { return getOperand(0); } 4000 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } 4001 4002 // Accessor Methods for CatchSwitch stmt 4003 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4004 bool unwindsToCaller() const { return !hasUnwindDest(); } 4005 BasicBlock *getUnwindDest() const { 4006 if (hasUnwindDest()) 4007 return cast<BasicBlock>(getOperand(1)); 4008 return nullptr; 4009 } 4010 void setUnwindDest(BasicBlock *UnwindDest) { 4011 assert(UnwindDest); 4012 assert(hasUnwindDest()); 4013 setOperand(1, UnwindDest); 4014 } 4015 4016 /// return the number of 'handlers' in this catchswitch 4017 /// instruction, except the default handler 4018 unsigned getNumHandlers() const { 4019 if (hasUnwindDest()) 4020 return getNumOperands() - 2; 4021 return getNumOperands() - 1; 4022 } 4023 4024 private: 4025 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } 4026 static const BasicBlock *handler_helper(const Value *V) { 4027 return cast<BasicBlock>(V); 4028 } 4029 4030 public: 4031 using DerefFnTy = BasicBlock *(*)(Value *); 4032 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; 4033 using handler_range = iterator_range<handler_iterator>; 4034 using ConstDerefFnTy = const BasicBlock *(*)(const Value *); 4035 using const_handler_iterator = 4036 mapped_iterator<const_op_iterator, ConstDerefFnTy>; 4037 using const_handler_range = iterator_range<const_handler_iterator>; 4038 4039 /// Returns an iterator that points to the first handler in CatchSwitchInst. 4040 handler_iterator handler_begin() { 4041 op_iterator It = op_begin() + 1; 4042 if (hasUnwindDest()) 4043 ++It; 4044 return handler_iterator(It, DerefFnTy(handler_helper)); 4045 } 4046 4047 /// Returns an iterator that points to the first handler in the 4048 /// CatchSwitchInst. 4049 const_handler_iterator handler_begin() const { 4050 const_op_iterator It = op_begin() + 1; 4051 if (hasUnwindDest()) 4052 ++It; 4053 return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); 4054 } 4055 4056 /// Returns a read-only iterator that points one past the last 4057 /// handler in the CatchSwitchInst. 4058 handler_iterator handler_end() { 4059 return handler_iterator(op_end(), DerefFnTy(handler_helper)); 4060 } 4061 4062 /// Returns an iterator that points one past the last handler in the 4063 /// CatchSwitchInst. 4064 const_handler_iterator handler_end() const { 4065 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); 4066 } 4067 4068 /// iteration adapter for range-for loops. 4069 handler_range handlers() { 4070 return make_range(handler_begin(), handler_end()); 4071 } 4072 4073 /// iteration adapter for range-for loops. 4074 const_handler_range handlers() const { 4075 return make_range(handler_begin(), handler_end()); 4076 } 4077 4078 /// Add an entry to the switch instruction... 4079 /// Note: 4080 /// This action invalidates handler_end(). Old handler_end() iterator will 4081 /// point to the added handler. 4082 void addHandler(BasicBlock *Dest); 4083 4084 void removeHandler(handler_iterator HI); 4085 4086 unsigned getNumSuccessors() const { return getNumOperands() - 1; } 4087 BasicBlock *getSuccessor(unsigned Idx) const { 4088 assert(Idx < getNumSuccessors() && 4089 "Successor # out of range for catchswitch!"); 4090 return cast<BasicBlock>(getOperand(Idx + 1)); 4091 } 4092 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { 4093 assert(Idx < getNumSuccessors() && 4094 "Successor # out of range for catchswitch!"); 4095 setOperand(Idx + 1, NewSucc); 4096 } 4097 4098 // Methods for support type inquiry through isa, cast, and dyn_cast: 4099 static bool classof(const Instruction *I) { 4100 return I->getOpcode() == Instruction::CatchSwitch; 4101 } 4102 static bool classof(const Value *V) { 4103 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4104 } 4105 }; 4106 4107 template <> 4108 struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; 4109 4110 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value) 4111 4112 //===----------------------------------------------------------------------===// 4113 // CleanupPadInst Class 4114 //===----------------------------------------------------------------------===// 4115 class CleanupPadInst : public FuncletPadInst { 4116 private: 4117 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4118 unsigned Values, const Twine &NameStr, 4119 InsertPosition InsertBefore) 4120 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4121 NameStr, InsertBefore) {} 4122 4123 public: 4124 static CleanupPadInst *Create(Value *ParentPad, 4125 ArrayRef<Value *> Args = std::nullopt, 4126 const Twine &NameStr = "", 4127 InsertPosition InsertBefore = nullptr) { 4128 unsigned Values = 1 + Args.size(); 4129 return new (Values) 4130 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); 4131 } 4132 4133 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4134 static bool classof(const Instruction *I) { 4135 return I->getOpcode() == Instruction::CleanupPad; 4136 } 4137 static bool classof(const Value *V) { 4138 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4139 } 4140 }; 4141 4142 //===----------------------------------------------------------------------===// 4143 // CatchPadInst Class 4144 //===----------------------------------------------------------------------===// 4145 class CatchPadInst : public FuncletPadInst { 4146 private: 4147 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4148 unsigned Values, const Twine &NameStr, 4149 InsertPosition InsertBefore) 4150 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4151 NameStr, InsertBefore) {} 4152 4153 public: 4154 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4155 const Twine &NameStr = "", 4156 InsertPosition InsertBefore = nullptr) { 4157 unsigned Values = 1 + Args.size(); 4158 return new (Values) 4159 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); 4160 } 4161 4162 /// Convenience accessors 4163 CatchSwitchInst *getCatchSwitch() const { 4164 return cast<CatchSwitchInst>(Op<-1>()); 4165 } 4166 void setCatchSwitch(Value *CatchSwitch) { 4167 assert(CatchSwitch); 4168 Op<-1>() = CatchSwitch; 4169 } 4170 4171 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4172 static bool classof(const Instruction *I) { 4173 return I->getOpcode() == Instruction::CatchPad; 4174 } 4175 static bool classof(const Value *V) { 4176 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4177 } 4178 }; 4179 4180 //===----------------------------------------------------------------------===// 4181 // CatchReturnInst Class 4182 //===----------------------------------------------------------------------===// 4183 4184 class CatchReturnInst : public Instruction { 4185 CatchReturnInst(const CatchReturnInst &RI); 4186 CatchReturnInst(Value *CatchPad, BasicBlock *BB, InsertPosition InsertBefore); 4187 4188 void init(Value *CatchPad, BasicBlock *BB); 4189 4190 protected: 4191 // Note: Instruction needs to be a friend here to call cloneImpl. 4192 friend class Instruction; 4193 4194 CatchReturnInst *cloneImpl() const; 4195 4196 public: 4197 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4198 InsertPosition InsertBefore = nullptr) { 4199 assert(CatchPad); 4200 assert(BB); 4201 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); 4202 } 4203 4204 /// Provide fast operand accessors 4205 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4206 4207 /// Convenience accessors. 4208 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } 4209 void setCatchPad(CatchPadInst *CatchPad) { 4210 assert(CatchPad); 4211 Op<0>() = CatchPad; 4212 } 4213 4214 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } 4215 void setSuccessor(BasicBlock *NewSucc) { 4216 assert(NewSucc); 4217 Op<1>() = NewSucc; 4218 } 4219 unsigned getNumSuccessors() const { return 1; } 4220 4221 /// Get the parentPad of this catchret's catchpad's catchswitch. 4222 /// The successor block is implicitly a member of this funclet. 4223 Value *getCatchSwitchParentPad() const { 4224 return getCatchPad()->getCatchSwitch()->getParentPad(); 4225 } 4226 4227 // Methods for support type inquiry through isa, cast, and dyn_cast: 4228 static bool classof(const Instruction *I) { 4229 return (I->getOpcode() == Instruction::CatchRet); 4230 } 4231 static bool classof(const Value *V) { 4232 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4233 } 4234 4235 private: 4236 BasicBlock *getSuccessor(unsigned Idx) const { 4237 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4238 return getSuccessor(); 4239 } 4240 4241 void setSuccessor(unsigned Idx, BasicBlock *B) { 4242 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4243 setSuccessor(B); 4244 } 4245 }; 4246 4247 template <> 4248 struct OperandTraits<CatchReturnInst> 4249 : public FixedNumOperandTraits<CatchReturnInst, 2> {}; 4250 4251 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value) 4252 4253 //===----------------------------------------------------------------------===// 4254 // CleanupReturnInst Class 4255 //===----------------------------------------------------------------------===// 4256 4257 class CleanupReturnInst : public Instruction { 4258 using UnwindDestField = BoolBitfieldElementT<0>; 4259 4260 private: 4261 CleanupReturnInst(const CleanupReturnInst &RI); 4262 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4263 InsertPosition InsertBefore = nullptr); 4264 4265 void init(Value *CleanupPad, BasicBlock *UnwindBB); 4266 4267 protected: 4268 // Note: Instruction needs to be a friend here to call cloneImpl. 4269 friend class Instruction; 4270 4271 CleanupReturnInst *cloneImpl() const; 4272 4273 public: 4274 static CleanupReturnInst *Create(Value *CleanupPad, 4275 BasicBlock *UnwindBB = nullptr, 4276 InsertPosition InsertBefore = nullptr) { 4277 assert(CleanupPad); 4278 unsigned Values = 1; 4279 if (UnwindBB) 4280 ++Values; 4281 return new (Values) 4282 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); 4283 } 4284 4285 /// Provide fast operand accessors 4286 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4287 4288 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4289 bool unwindsToCaller() const { return !hasUnwindDest(); } 4290 4291 /// Convenience accessor. 4292 CleanupPadInst *getCleanupPad() const { 4293 return cast<CleanupPadInst>(Op<0>()); 4294 } 4295 void setCleanupPad(CleanupPadInst *CleanupPad) { 4296 assert(CleanupPad); 4297 Op<0>() = CleanupPad; 4298 } 4299 4300 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } 4301 4302 BasicBlock *getUnwindDest() const { 4303 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; 4304 } 4305 void setUnwindDest(BasicBlock *NewDest) { 4306 assert(NewDest); 4307 assert(hasUnwindDest()); 4308 Op<1>() = NewDest; 4309 } 4310 4311 // Methods for support type inquiry through isa, cast, and dyn_cast: 4312 static bool classof(const Instruction *I) { 4313 return (I->getOpcode() == Instruction::CleanupRet); 4314 } 4315 static bool classof(const Value *V) { 4316 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4317 } 4318 4319 private: 4320 BasicBlock *getSuccessor(unsigned Idx) const { 4321 assert(Idx == 0); 4322 return getUnwindDest(); 4323 } 4324 4325 void setSuccessor(unsigned Idx, BasicBlock *B) { 4326 assert(Idx == 0); 4327 setUnwindDest(B); 4328 } 4329 4330 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4331 // method so that subclasses cannot accidentally use it. 4332 template <typename Bitfield> 4333 void setSubclassData(typename Bitfield::Type Value) { 4334 Instruction::setSubclassData<Bitfield>(Value); 4335 } 4336 }; 4337 4338 template <> 4339 struct OperandTraits<CleanupReturnInst> 4340 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; 4341 4342 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value) 4343 4344 //===----------------------------------------------------------------------===// 4345 // UnreachableInst Class 4346 //===----------------------------------------------------------------------===// 4347 4348 //===--------------------------------------------------------------------------- 4349 /// This function has undefined behavior. In particular, the 4350 /// presence of this instruction indicates some higher level knowledge that the 4351 /// end of the block cannot be reached. 4352 /// 4353 class UnreachableInst : public Instruction { 4354 protected: 4355 // Note: Instruction needs to be a friend here to call cloneImpl. 4356 friend class Instruction; 4357 4358 UnreachableInst *cloneImpl() const; 4359 4360 public: 4361 explicit UnreachableInst(LLVMContext &C, 4362 InsertPosition InsertBefore = nullptr); 4363 4364 // allocate space for exactly zero operands 4365 void *operator new(size_t S) { return User::operator new(S, 0); } 4366 void operator delete(void *Ptr) { User::operator delete(Ptr); } 4367 4368 unsigned getNumSuccessors() const { return 0; } 4369 4370 // Methods for support type inquiry through isa, cast, and dyn_cast: 4371 static bool classof(const Instruction *I) { 4372 return I->getOpcode() == Instruction::Unreachable; 4373 } 4374 static bool classof(const Value *V) { 4375 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4376 } 4377 4378 private: 4379 BasicBlock *getSuccessor(unsigned idx) const { 4380 llvm_unreachable("UnreachableInst has no successors!"); 4381 } 4382 4383 void setSuccessor(unsigned idx, BasicBlock *B) { 4384 llvm_unreachable("UnreachableInst has no successors!"); 4385 } 4386 }; 4387 4388 //===----------------------------------------------------------------------===// 4389 // TruncInst Class 4390 //===----------------------------------------------------------------------===// 4391 4392 /// This class represents a truncation of integer types. 4393 class TruncInst : public CastInst { 4394 protected: 4395 // Note: Instruction needs to be a friend here to call cloneImpl. 4396 friend class Instruction; 4397 4398 /// Clone an identical TruncInst 4399 TruncInst *cloneImpl() const; 4400 4401 public: 4402 enum { AnyWrap = 0, NoUnsignedWrap = (1 << 0), NoSignedWrap = (1 << 1) }; 4403 4404 /// Constructor with insert-before-instruction semantics 4405 TruncInst(Value *S, ///< The value to be truncated 4406 Type *Ty, ///< The (smaller) type to truncate to 4407 const Twine &NameStr = "", ///< A name for the new instruction 4408 InsertPosition InsertBefore = 4409 nullptr ///< Where to insert the new instruction 4410 ); 4411 4412 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4413 static bool classof(const Instruction *I) { 4414 return I->getOpcode() == Trunc; 4415 } 4416 static bool classof(const Value *V) { 4417 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4418 } 4419 4420 void setHasNoUnsignedWrap(bool B) { 4421 SubclassOptionalData = 4422 (SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap); 4423 } 4424 void setHasNoSignedWrap(bool B) { 4425 SubclassOptionalData = 4426 (SubclassOptionalData & ~NoSignedWrap) | (B * NoSignedWrap); 4427 } 4428 4429 /// Test whether this operation is known to never 4430 /// undergo unsigned overflow, aka the nuw property. 4431 bool hasNoUnsignedWrap() const { 4432 return SubclassOptionalData & NoUnsignedWrap; 4433 } 4434 4435 /// Test whether this operation is known to never 4436 /// undergo signed overflow, aka the nsw property. 4437 bool hasNoSignedWrap() const { 4438 return (SubclassOptionalData & NoSignedWrap) != 0; 4439 } 4440 4441 /// Returns the no-wrap kind of the operation. 4442 unsigned getNoWrapKind() const { 4443 unsigned NoWrapKind = 0; 4444 if (hasNoUnsignedWrap()) 4445 NoWrapKind |= NoUnsignedWrap; 4446 4447 if (hasNoSignedWrap()) 4448 NoWrapKind |= NoSignedWrap; 4449 4450 return NoWrapKind; 4451 } 4452 }; 4453 4454 //===----------------------------------------------------------------------===// 4455 // ZExtInst Class 4456 //===----------------------------------------------------------------------===// 4457 4458 /// This class represents zero extension of integer types. 4459 class ZExtInst : public CastInst { 4460 protected: 4461 // Note: Instruction needs to be a friend here to call cloneImpl. 4462 friend class Instruction; 4463 4464 /// Clone an identical ZExtInst 4465 ZExtInst *cloneImpl() const; 4466 4467 public: 4468 /// Constructor with insert-before-instruction semantics 4469 ZExtInst(Value *S, ///< The value to be zero extended 4470 Type *Ty, ///< The type to zero extend to 4471 const Twine &NameStr = "", ///< A name for the new instruction 4472 InsertPosition InsertBefore = 4473 nullptr ///< Where to insert the new instruction 4474 ); 4475 4476 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4477 static bool classof(const Instruction *I) { 4478 return I->getOpcode() == ZExt; 4479 } 4480 static bool classof(const Value *V) { 4481 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4482 } 4483 }; 4484 4485 //===----------------------------------------------------------------------===// 4486 // SExtInst Class 4487 //===----------------------------------------------------------------------===// 4488 4489 /// This class represents a sign extension of integer types. 4490 class SExtInst : public CastInst { 4491 protected: 4492 // Note: Instruction needs to be a friend here to call cloneImpl. 4493 friend class Instruction; 4494 4495 /// Clone an identical SExtInst 4496 SExtInst *cloneImpl() const; 4497 4498 public: 4499 /// Constructor with insert-before-instruction semantics 4500 SExtInst(Value *S, ///< The value to be sign extended 4501 Type *Ty, ///< The type to sign extend to 4502 const Twine &NameStr = "", ///< A name for the new instruction 4503 InsertPosition InsertBefore = 4504 nullptr ///< Where to insert the new instruction 4505 ); 4506 4507 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4508 static bool classof(const Instruction *I) { 4509 return I->getOpcode() == SExt; 4510 } 4511 static bool classof(const Value *V) { 4512 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4513 } 4514 }; 4515 4516 //===----------------------------------------------------------------------===// 4517 // FPTruncInst Class 4518 //===----------------------------------------------------------------------===// 4519 4520 /// This class represents a truncation of floating point types. 4521 class FPTruncInst : public CastInst { 4522 protected: 4523 // Note: Instruction needs to be a friend here to call cloneImpl. 4524 friend class Instruction; 4525 4526 /// Clone an identical FPTruncInst 4527 FPTruncInst *cloneImpl() const; 4528 4529 public: /// Constructor with insert-before-instruction semantics 4530 FPTruncInst(Value *S, ///< The value to be truncated 4531 Type *Ty, ///< The type to truncate to 4532 const Twine &NameStr = "", ///< A name for the new instruction 4533 InsertPosition InsertBefore = 4534 nullptr ///< Where to insert the new instruction 4535 ); 4536 4537 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4538 static bool classof(const Instruction *I) { 4539 return I->getOpcode() == FPTrunc; 4540 } 4541 static bool classof(const Value *V) { 4542 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4543 } 4544 }; 4545 4546 //===----------------------------------------------------------------------===// 4547 // FPExtInst Class 4548 //===----------------------------------------------------------------------===// 4549 4550 /// This class represents an extension of floating point types. 4551 class FPExtInst : public CastInst { 4552 protected: 4553 // Note: Instruction needs to be a friend here to call cloneImpl. 4554 friend class Instruction; 4555 4556 /// Clone an identical FPExtInst 4557 FPExtInst *cloneImpl() const; 4558 4559 public: 4560 /// Constructor with insert-before-instruction semantics 4561 FPExtInst(Value *S, ///< The value to be extended 4562 Type *Ty, ///< The type to extend to 4563 const Twine &NameStr = "", ///< A name for the new instruction 4564 InsertPosition InsertBefore = 4565 nullptr ///< Where to insert the new instruction 4566 ); 4567 4568 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4569 static bool classof(const Instruction *I) { 4570 return I->getOpcode() == FPExt; 4571 } 4572 static bool classof(const Value *V) { 4573 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4574 } 4575 }; 4576 4577 //===----------------------------------------------------------------------===// 4578 // UIToFPInst Class 4579 //===----------------------------------------------------------------------===// 4580 4581 /// This class represents a cast unsigned integer to floating point. 4582 class UIToFPInst : public CastInst { 4583 protected: 4584 // Note: Instruction needs to be a friend here to call cloneImpl. 4585 friend class Instruction; 4586 4587 /// Clone an identical UIToFPInst 4588 UIToFPInst *cloneImpl() const; 4589 4590 public: 4591 /// Constructor with insert-before-instruction semantics 4592 UIToFPInst(Value *S, ///< The value to be converted 4593 Type *Ty, ///< The type to convert to 4594 const Twine &NameStr = "", ///< A name for the new instruction 4595 InsertPosition InsertBefore = 4596 nullptr ///< Where to insert the new instruction 4597 ); 4598 4599 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4600 static bool classof(const Instruction *I) { 4601 return I->getOpcode() == UIToFP; 4602 } 4603 static bool classof(const Value *V) { 4604 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4605 } 4606 }; 4607 4608 //===----------------------------------------------------------------------===// 4609 // SIToFPInst Class 4610 //===----------------------------------------------------------------------===// 4611 4612 /// This class represents a cast from signed integer to floating point. 4613 class SIToFPInst : public CastInst { 4614 protected: 4615 // Note: Instruction needs to be a friend here to call cloneImpl. 4616 friend class Instruction; 4617 4618 /// Clone an identical SIToFPInst 4619 SIToFPInst *cloneImpl() const; 4620 4621 public: 4622 /// Constructor with insert-before-instruction semantics 4623 SIToFPInst(Value *S, ///< The value to be converted 4624 Type *Ty, ///< The type to convert to 4625 const Twine &NameStr = "", ///< A name for the new instruction 4626 InsertPosition InsertBefore = 4627 nullptr ///< Where to insert the new instruction 4628 ); 4629 4630 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4631 static bool classof(const Instruction *I) { 4632 return I->getOpcode() == SIToFP; 4633 } 4634 static bool classof(const Value *V) { 4635 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4636 } 4637 }; 4638 4639 //===----------------------------------------------------------------------===// 4640 // FPToUIInst Class 4641 //===----------------------------------------------------------------------===// 4642 4643 /// This class represents a cast from floating point to unsigned integer 4644 class FPToUIInst : public CastInst { 4645 protected: 4646 // Note: Instruction needs to be a friend here to call cloneImpl. 4647 friend class Instruction; 4648 4649 /// Clone an identical FPToUIInst 4650 FPToUIInst *cloneImpl() const; 4651 4652 public: 4653 /// Constructor with insert-before-instruction semantics 4654 FPToUIInst(Value *S, ///< The value to be converted 4655 Type *Ty, ///< The type to convert to 4656 const Twine &NameStr = "", ///< A name for the new instruction 4657 InsertPosition InsertBefore = 4658 nullptr ///< Where to insert the new instruction 4659 ); 4660 4661 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4662 static bool classof(const Instruction *I) { 4663 return I->getOpcode() == FPToUI; 4664 } 4665 static bool classof(const Value *V) { 4666 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4667 } 4668 }; 4669 4670 //===----------------------------------------------------------------------===// 4671 // FPToSIInst Class 4672 //===----------------------------------------------------------------------===// 4673 4674 /// This class represents a cast from floating point to signed integer. 4675 class FPToSIInst : public CastInst { 4676 protected: 4677 // Note: Instruction needs to be a friend here to call cloneImpl. 4678 friend class Instruction; 4679 4680 /// Clone an identical FPToSIInst 4681 FPToSIInst *cloneImpl() const; 4682 4683 public: 4684 /// Constructor with insert-before-instruction semantics 4685 FPToSIInst(Value *S, ///< The value to be converted 4686 Type *Ty, ///< The type to convert to 4687 const Twine &NameStr = "", ///< A name for the new instruction 4688 InsertPosition InsertBefore = 4689 nullptr ///< Where to insert the new instruction 4690 ); 4691 4692 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4693 static bool classof(const Instruction *I) { 4694 return I->getOpcode() == FPToSI; 4695 } 4696 static bool classof(const Value *V) { 4697 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4698 } 4699 }; 4700 4701 //===----------------------------------------------------------------------===// 4702 // IntToPtrInst Class 4703 //===----------------------------------------------------------------------===// 4704 4705 /// This class represents a cast from an integer to a pointer. 4706 class IntToPtrInst : public CastInst { 4707 public: 4708 // Note: Instruction needs to be a friend here to call cloneImpl. 4709 friend class Instruction; 4710 4711 /// Constructor with insert-before-instruction semantics 4712 IntToPtrInst(Value *S, ///< The value to be converted 4713 Type *Ty, ///< The type to convert to 4714 const Twine &NameStr = "", ///< A name for the new instruction 4715 InsertPosition InsertBefore = 4716 nullptr ///< Where to insert the new instruction 4717 ); 4718 4719 /// Clone an identical IntToPtrInst. 4720 IntToPtrInst *cloneImpl() const; 4721 4722 /// Returns the address space of this instruction's pointer type. 4723 unsigned getAddressSpace() const { 4724 return getType()->getPointerAddressSpace(); 4725 } 4726 4727 // Methods for support type inquiry through isa, cast, and dyn_cast: 4728 static bool classof(const Instruction *I) { 4729 return I->getOpcode() == IntToPtr; 4730 } 4731 static bool classof(const Value *V) { 4732 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4733 } 4734 }; 4735 4736 //===----------------------------------------------------------------------===// 4737 // PtrToIntInst Class 4738 //===----------------------------------------------------------------------===// 4739 4740 /// This class represents a cast from a pointer to an integer. 4741 class PtrToIntInst : public CastInst { 4742 protected: 4743 // Note: Instruction needs to be a friend here to call cloneImpl. 4744 friend class Instruction; 4745 4746 /// Clone an identical PtrToIntInst. 4747 PtrToIntInst *cloneImpl() const; 4748 4749 public: 4750 /// Constructor with insert-before-instruction semantics 4751 PtrToIntInst(Value *S, ///< The value to be converted 4752 Type *Ty, ///< The type to convert to 4753 const Twine &NameStr = "", ///< A name for the new instruction 4754 InsertPosition InsertBefore = 4755 nullptr ///< Where to insert the new instruction 4756 ); 4757 4758 /// Gets the pointer operand. 4759 Value *getPointerOperand() { return getOperand(0); } 4760 /// Gets the pointer operand. 4761 const Value *getPointerOperand() const { return getOperand(0); } 4762 /// Gets the operand index of the pointer operand. 4763 static unsigned getPointerOperandIndex() { return 0U; } 4764 4765 /// Returns the address space of the pointer operand. 4766 unsigned getPointerAddressSpace() const { 4767 return getPointerOperand()->getType()->getPointerAddressSpace(); 4768 } 4769 4770 // Methods for support type inquiry through isa, cast, and dyn_cast: 4771 static bool classof(const Instruction *I) { 4772 return I->getOpcode() == PtrToInt; 4773 } 4774 static bool classof(const Value *V) { 4775 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4776 } 4777 }; 4778 4779 //===----------------------------------------------------------------------===// 4780 // BitCastInst Class 4781 //===----------------------------------------------------------------------===// 4782 4783 /// This class represents a no-op cast from one type to another. 4784 class BitCastInst : public CastInst { 4785 protected: 4786 // Note: Instruction needs to be a friend here to call cloneImpl. 4787 friend class Instruction; 4788 4789 /// Clone an identical BitCastInst. 4790 BitCastInst *cloneImpl() const; 4791 4792 public: 4793 /// Constructor with insert-before-instruction semantics 4794 BitCastInst(Value *S, ///< The value to be casted 4795 Type *Ty, ///< The type to casted to 4796 const Twine &NameStr = "", ///< A name for the new instruction 4797 InsertPosition InsertBefore = 4798 nullptr ///< Where to insert the new instruction 4799 ); 4800 4801 // Methods for support type inquiry through isa, cast, and dyn_cast: 4802 static bool classof(const Instruction *I) { 4803 return I->getOpcode() == BitCast; 4804 } 4805 static bool classof(const Value *V) { 4806 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4807 } 4808 }; 4809 4810 //===----------------------------------------------------------------------===// 4811 // AddrSpaceCastInst Class 4812 //===----------------------------------------------------------------------===// 4813 4814 /// This class represents a conversion between pointers from one address space 4815 /// to another. 4816 class AddrSpaceCastInst : public CastInst { 4817 protected: 4818 // Note: Instruction needs to be a friend here to call cloneImpl. 4819 friend class Instruction; 4820 4821 /// Clone an identical AddrSpaceCastInst. 4822 AddrSpaceCastInst *cloneImpl() const; 4823 4824 public: 4825 /// Constructor with insert-before-instruction semantics 4826 AddrSpaceCastInst( 4827 Value *S, ///< The value to be casted 4828 Type *Ty, ///< The type to casted to 4829 const Twine &NameStr = "", ///< A name for the new instruction 4830 InsertPosition InsertBefore = 4831 nullptr ///< Where to insert the new instruction 4832 ); 4833 4834 // Methods for support type inquiry through isa, cast, and dyn_cast: 4835 static bool classof(const Instruction *I) { 4836 return I->getOpcode() == AddrSpaceCast; 4837 } 4838 static bool classof(const Value *V) { 4839 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4840 } 4841 4842 /// Gets the pointer operand. 4843 Value *getPointerOperand() { 4844 return getOperand(0); 4845 } 4846 4847 /// Gets the pointer operand. 4848 const Value *getPointerOperand() const { 4849 return getOperand(0); 4850 } 4851 4852 /// Gets the operand index of the pointer operand. 4853 static unsigned getPointerOperandIndex() { 4854 return 0U; 4855 } 4856 4857 /// Returns the address space of the pointer operand. 4858 unsigned getSrcAddressSpace() const { 4859 return getPointerOperand()->getType()->getPointerAddressSpace(); 4860 } 4861 4862 /// Returns the address space of the result. 4863 unsigned getDestAddressSpace() const { 4864 return getType()->getPointerAddressSpace(); 4865 } 4866 }; 4867 4868 //===----------------------------------------------------------------------===// 4869 // Helper functions 4870 //===----------------------------------------------------------------------===// 4871 4872 /// A helper function that returns the pointer operand of a load or store 4873 /// instruction. Returns nullptr if not load or store. 4874 inline const Value *getLoadStorePointerOperand(const Value *V) { 4875 if (auto *Load = dyn_cast<LoadInst>(V)) 4876 return Load->getPointerOperand(); 4877 if (auto *Store = dyn_cast<StoreInst>(V)) 4878 return Store->getPointerOperand(); 4879 return nullptr; 4880 } 4881 inline Value *getLoadStorePointerOperand(Value *V) { 4882 return const_cast<Value *>( 4883 getLoadStorePointerOperand(static_cast<const Value *>(V))); 4884 } 4885 4886 /// A helper function that returns the pointer operand of a load, store 4887 /// or GEP instruction. Returns nullptr if not load, store, or GEP. 4888 inline const Value *getPointerOperand(const Value *V) { 4889 if (auto *Ptr = getLoadStorePointerOperand(V)) 4890 return Ptr; 4891 if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) 4892 return Gep->getPointerOperand(); 4893 return nullptr; 4894 } 4895 inline Value *getPointerOperand(Value *V) { 4896 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); 4897 } 4898 4899 /// A helper function that returns the alignment of load or store instruction. 4900 inline Align getLoadStoreAlignment(Value *I) { 4901 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 4902 "Expected Load or Store instruction"); 4903 if (auto *LI = dyn_cast<LoadInst>(I)) 4904 return LI->getAlign(); 4905 return cast<StoreInst>(I)->getAlign(); 4906 } 4907 4908 /// A helper function that returns the address space of the pointer operand of 4909 /// load or store instruction. 4910 inline unsigned getLoadStoreAddressSpace(Value *I) { 4911 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 4912 "Expected Load or Store instruction"); 4913 if (auto *LI = dyn_cast<LoadInst>(I)) 4914 return LI->getPointerAddressSpace(); 4915 return cast<StoreInst>(I)->getPointerAddressSpace(); 4916 } 4917 4918 /// A helper function that returns the type of a load or store instruction. 4919 inline Type *getLoadStoreType(Value *I) { 4920 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 4921 "Expected Load or Store instruction"); 4922 if (auto *LI = dyn_cast<LoadInst>(I)) 4923 return LI->getType(); 4924 return cast<StoreInst>(I)->getValueOperand()->getType(); 4925 } 4926 4927 /// A helper function that returns an atomic operation's sync scope; returns 4928 /// std::nullopt if it is not an atomic operation. 4929 inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) { 4930 if (!I->isAtomic()) 4931 return std::nullopt; 4932 if (auto *AI = dyn_cast<LoadInst>(I)) 4933 return AI->getSyncScopeID(); 4934 if (auto *AI = dyn_cast<StoreInst>(I)) 4935 return AI->getSyncScopeID(); 4936 if (auto *AI = dyn_cast<FenceInst>(I)) 4937 return AI->getSyncScopeID(); 4938 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) 4939 return AI->getSyncScopeID(); 4940 if (auto *AI = dyn_cast<AtomicRMWInst>(I)) 4941 return AI->getSyncScopeID(); 4942 llvm_unreachable("unhandled atomic operation"); 4943 } 4944 4945 //===----------------------------------------------------------------------===// 4946 // FreezeInst Class 4947 //===----------------------------------------------------------------------===// 4948 4949 /// This class represents a freeze function that returns random concrete 4950 /// value if an operand is either a poison value or an undef value 4951 class FreezeInst : public UnaryInstruction { 4952 protected: 4953 // Note: Instruction needs to be a friend here to call cloneImpl. 4954 friend class Instruction; 4955 4956 /// Clone an identical FreezeInst 4957 FreezeInst *cloneImpl() const; 4958 4959 public: 4960 explicit FreezeInst(Value *S, const Twine &NameStr = "", 4961 InsertPosition InsertBefore = nullptr); 4962 4963 // Methods for support type inquiry through isa, cast, and dyn_cast: 4964 static inline bool classof(const Instruction *I) { 4965 return I->getOpcode() == Freeze; 4966 } 4967 static inline bool classof(const Value *V) { 4968 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4969 } 4970 }; 4971 4972 } // end namespace llvm 4973 4974 #endif // LLVM_IR_INSTRUCTIONS_H 4975