1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file exposes the class definitions of all of the subclasses of the 10 // Instruction class. This is meant to be an easy way to get access to all 11 // instruction subclasses. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_IR_INSTRUCTIONS_H 16 #define LLVM_IR_INSTRUCTIONS_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/Bitfields.h" 20 #include "llvm/ADT/MapVector.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/StringRef.h" 25 #include "llvm/ADT/Twine.h" 26 #include "llvm/ADT/iterator.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/IR/Attributes.h" 29 #include "llvm/IR/BasicBlock.h" 30 #include "llvm/IR/CallingConv.h" 31 #include "llvm/IR/CFG.h" 32 #include "llvm/IR/Constant.h" 33 #include "llvm/IR/DerivedTypes.h" 34 #include "llvm/IR/Function.h" 35 #include "llvm/IR/InstrTypes.h" 36 #include "llvm/IR/Instruction.h" 37 #include "llvm/IR/OperandTraits.h" 38 #include "llvm/IR/Type.h" 39 #include "llvm/IR/Use.h" 40 #include "llvm/IR/User.h" 41 #include "llvm/IR/Value.h" 42 #include "llvm/Support/AtomicOrdering.h" 43 #include "llvm/Support/Casting.h" 44 #include "llvm/Support/ErrorHandling.h" 45 #include <cassert> 46 #include <cstddef> 47 #include <cstdint> 48 #include <iterator> 49 50 namespace llvm { 51 52 class APInt; 53 class ConstantInt; 54 class DataLayout; 55 class LLVMContext; 56 57 //===----------------------------------------------------------------------===// 58 // AllocaInst Class 59 //===----------------------------------------------------------------------===// 60 61 /// an instruction to allocate memory on the stack 62 class AllocaInst : public UnaryInstruction { 63 Type *AllocatedType; 64 65 using AlignmentField = AlignmentBitfieldElementT<0>; 66 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; 67 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; 68 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, 69 SwiftErrorField>(), 70 "Bitfields must be contiguous"); 71 72 protected: 73 // Note: Instruction needs to be a friend here to call cloneImpl. 74 friend class Instruction; 75 76 AllocaInst *cloneImpl() const; 77 78 public: 79 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 80 const Twine &Name, Instruction *InsertBefore); 81 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 82 const Twine &Name, BasicBlock *InsertAtEnd); 83 84 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 85 Instruction *InsertBefore); 86 AllocaInst(Type *Ty, unsigned AddrSpace, 87 const Twine &Name, BasicBlock *InsertAtEnd); 88 89 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 90 const Twine &Name = "", Instruction *InsertBefore = nullptr); 91 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 92 const Twine &Name, BasicBlock *InsertAtEnd); 93 94 /// Return true if there is an allocation size parameter to the allocation 95 /// instruction that is not 1. 96 bool isArrayAllocation() const; 97 98 /// Get the number of elements allocated. For a simple allocation of a single 99 /// element, this will return a constant 1 value. 100 const Value *getArraySize() const { return getOperand(0); } 101 Value *getArraySize() { return getOperand(0); } 102 103 /// Overload to return most specific pointer type. 104 PointerType *getType() const { 105 return cast<PointerType>(Instruction::getType()); 106 } 107 108 /// Get allocation size in bits. Returns None if size can't be determined, 109 /// e.g. in case of a VLA. 110 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; 111 112 /// Return the type that is being allocated by the instruction. 113 Type *getAllocatedType() const { return AllocatedType; } 114 /// for use only in special circumstances that need to generically 115 /// transform a whole instruction (eg: IR linking and vectorization). 116 void setAllocatedType(Type *Ty) { AllocatedType = Ty; } 117 118 /// Return the alignment of the memory that is being allocated by the 119 /// instruction. 120 Align getAlign() const { 121 return Align(1ULL << getSubclassData<AlignmentField>()); 122 } 123 124 void setAlignment(Align Align) { 125 setSubclassData<AlignmentField>(Log2(Align)); 126 } 127 128 // FIXME: Remove this one transition to Align is over. 129 uint64_t getAlignment() const { return getAlign().value(); } 130 131 /// Return true if this alloca is in the entry block of the function and is a 132 /// constant size. If so, the code generator will fold it into the 133 /// prolog/epilog code, so it is basically free. 134 bool isStaticAlloca() const; 135 136 /// Return true if this alloca is used as an inalloca argument to a call. Such 137 /// allocas are never considered static even if they are in the entry block. 138 bool isUsedWithInAlloca() const { 139 return getSubclassData<UsedWithInAllocaField>(); 140 } 141 142 /// Specify whether this alloca is used to represent the arguments to a call. 143 void setUsedWithInAlloca(bool V) { 144 setSubclassData<UsedWithInAllocaField>(V); 145 } 146 147 /// Return true if this alloca is used as a swifterror argument to a call. 148 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } 149 /// Specify whether this alloca is used to represent a swifterror. 150 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } 151 152 // Methods for support type inquiry through isa, cast, and dyn_cast: 153 static bool classof(const Instruction *I) { 154 return (I->getOpcode() == Instruction::Alloca); 155 } 156 static bool classof(const Value *V) { 157 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 158 } 159 160 private: 161 // Shadow Instruction::setInstructionSubclassData with a private forwarding 162 // method so that subclasses cannot accidentally use it. 163 template <typename Bitfield> 164 void setSubclassData(typename Bitfield::Type Value) { 165 Instruction::setSubclassData<Bitfield>(Value); 166 } 167 }; 168 169 //===----------------------------------------------------------------------===// 170 // LoadInst Class 171 //===----------------------------------------------------------------------===// 172 173 /// An instruction for reading from memory. This uses the SubclassData field in 174 /// Value to store whether or not the load is volatile. 175 class LoadInst : public UnaryInstruction { 176 using VolatileField = BoolBitfieldElementT<0>; 177 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 178 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 179 static_assert( 180 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 181 "Bitfields must be contiguous"); 182 183 void AssertOK(); 184 185 protected: 186 // Note: Instruction needs to be a friend here to call cloneImpl. 187 friend class Instruction; 188 189 LoadInst *cloneImpl() const; 190 191 public: 192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, 193 Instruction *InsertBefore); 194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); 195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 196 Instruction *InsertBefore); 197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 198 BasicBlock *InsertAtEnd); 199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 200 Align Align, Instruction *InsertBefore = nullptr); 201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 202 Align Align, BasicBlock *InsertAtEnd); 203 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 204 Align Align, AtomicOrdering Order, 205 SyncScope::ID SSID = SyncScope::System, 206 Instruction *InsertBefore = nullptr); 207 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 208 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 209 BasicBlock *InsertAtEnd); 210 211 /// Return true if this is a load from a volatile memory location. 212 bool isVolatile() const { return getSubclassData<VolatileField>(); } 213 214 /// Specify whether this is a volatile load or not. 215 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 216 217 /// Return the alignment of the access that is being performed. 218 /// FIXME: Remove this function once transition to Align is over. 219 /// Use getAlign() instead. 220 uint64_t getAlignment() const { return getAlign().value(); } 221 222 /// Return the alignment of the access that is being performed. 223 Align getAlign() const { 224 return Align(1ULL << (getSubclassData<AlignmentField>())); 225 } 226 227 void setAlignment(Align Align) { 228 setSubclassData<AlignmentField>(Log2(Align)); 229 } 230 231 /// Returns the ordering constraint of this load instruction. 232 AtomicOrdering getOrdering() const { 233 return getSubclassData<OrderingField>(); 234 } 235 /// Sets the ordering constraint of this load instruction. May not be Release 236 /// or AcquireRelease. 237 void setOrdering(AtomicOrdering Ordering) { 238 setSubclassData<OrderingField>(Ordering); 239 } 240 241 /// Returns the synchronization scope ID of this load instruction. 242 SyncScope::ID getSyncScopeID() const { 243 return SSID; 244 } 245 246 /// Sets the synchronization scope ID of this load instruction. 247 void setSyncScopeID(SyncScope::ID SSID) { 248 this->SSID = SSID; 249 } 250 251 /// Sets the ordering constraint and the synchronization scope ID of this load 252 /// instruction. 253 void setAtomic(AtomicOrdering Ordering, 254 SyncScope::ID SSID = SyncScope::System) { 255 setOrdering(Ordering); 256 setSyncScopeID(SSID); 257 } 258 259 bool isSimple() const { return !isAtomic() && !isVolatile(); } 260 261 bool isUnordered() const { 262 return (getOrdering() == AtomicOrdering::NotAtomic || 263 getOrdering() == AtomicOrdering::Unordered) && 264 !isVolatile(); 265 } 266 267 Value *getPointerOperand() { return getOperand(0); } 268 const Value *getPointerOperand() const { return getOperand(0); } 269 static unsigned getPointerOperandIndex() { return 0U; } 270 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 271 272 /// Returns the address space of the pointer operand. 273 unsigned getPointerAddressSpace() const { 274 return getPointerOperandType()->getPointerAddressSpace(); 275 } 276 277 // Methods for support type inquiry through isa, cast, and dyn_cast: 278 static bool classof(const Instruction *I) { 279 return I->getOpcode() == Instruction::Load; 280 } 281 static bool classof(const Value *V) { 282 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 283 } 284 285 private: 286 // Shadow Instruction::setInstructionSubclassData with a private forwarding 287 // method so that subclasses cannot accidentally use it. 288 template <typename Bitfield> 289 void setSubclassData(typename Bitfield::Type Value) { 290 Instruction::setSubclassData<Bitfield>(Value); 291 } 292 293 /// The synchronization scope ID of this load instruction. Not quite enough 294 /// room in SubClassData for everything, so synchronization scope ID gets its 295 /// own field. 296 SyncScope::ID SSID; 297 }; 298 299 //===----------------------------------------------------------------------===// 300 // StoreInst Class 301 //===----------------------------------------------------------------------===// 302 303 /// An instruction for storing to memory. 304 class StoreInst : public Instruction { 305 using VolatileField = BoolBitfieldElementT<0>; 306 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 307 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 308 static_assert( 309 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 310 "Bitfields must be contiguous"); 311 312 void AssertOK(); 313 314 protected: 315 // Note: Instruction needs to be a friend here to call cloneImpl. 316 friend class Instruction; 317 318 StoreInst *cloneImpl() const; 319 320 public: 321 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); 322 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); 323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); 324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); 325 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 326 Instruction *InsertBefore = nullptr); 327 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 328 BasicBlock *InsertAtEnd); 329 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 330 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, 331 Instruction *InsertBefore = nullptr); 332 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 333 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); 334 335 // allocate space for exactly two operands 336 void *operator new(size_t S) { return User::operator new(S, 2); } 337 void operator delete(void *Ptr) { User::operator delete(Ptr); } 338 339 /// Return true if this is a store to a volatile memory location. 340 bool isVolatile() const { return getSubclassData<VolatileField>(); } 341 342 /// Specify whether this is a volatile store or not. 343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 344 345 /// Transparently provide more efficient getOperand methods. 346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 347 348 /// Return the alignment of the access that is being performed 349 /// FIXME: Remove this function once transition to Align is over. 350 /// Use getAlign() instead. 351 uint64_t getAlignment() const { return getAlign().value(); } 352 353 Align getAlign() const { 354 return Align(1ULL << (getSubclassData<AlignmentField>())); 355 } 356 357 void setAlignment(Align Align) { 358 setSubclassData<AlignmentField>(Log2(Align)); 359 } 360 361 /// Returns the ordering constraint of this store instruction. 362 AtomicOrdering getOrdering() const { 363 return getSubclassData<OrderingField>(); 364 } 365 366 /// Sets the ordering constraint of this store instruction. May not be 367 /// Acquire or AcquireRelease. 368 void setOrdering(AtomicOrdering Ordering) { 369 setSubclassData<OrderingField>(Ordering); 370 } 371 372 /// Returns the synchronization scope ID of this store instruction. 373 SyncScope::ID getSyncScopeID() const { 374 return SSID; 375 } 376 377 /// Sets the synchronization scope ID of this store instruction. 378 void setSyncScopeID(SyncScope::ID SSID) { 379 this->SSID = SSID; 380 } 381 382 /// Sets the ordering constraint and the synchronization scope ID of this 383 /// store instruction. 384 void setAtomic(AtomicOrdering Ordering, 385 SyncScope::ID SSID = SyncScope::System) { 386 setOrdering(Ordering); 387 setSyncScopeID(SSID); 388 } 389 390 bool isSimple() const { return !isAtomic() && !isVolatile(); } 391 392 bool isUnordered() const { 393 return (getOrdering() == AtomicOrdering::NotAtomic || 394 getOrdering() == AtomicOrdering::Unordered) && 395 !isVolatile(); 396 } 397 398 Value *getValueOperand() { return getOperand(0); } 399 const Value *getValueOperand() const { return getOperand(0); } 400 401 Value *getPointerOperand() { return getOperand(1); } 402 const Value *getPointerOperand() const { return getOperand(1); } 403 static unsigned getPointerOperandIndex() { return 1U; } 404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 405 406 /// Returns the address space of the pointer operand. 407 unsigned getPointerAddressSpace() const { 408 return getPointerOperandType()->getPointerAddressSpace(); 409 } 410 411 // Methods for support type inquiry through isa, cast, and dyn_cast: 412 static bool classof(const Instruction *I) { 413 return I->getOpcode() == Instruction::Store; 414 } 415 static bool classof(const Value *V) { 416 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 417 } 418 419 private: 420 // Shadow Instruction::setInstructionSubclassData with a private forwarding 421 // method so that subclasses cannot accidentally use it. 422 template <typename Bitfield> 423 void setSubclassData(typename Bitfield::Type Value) { 424 Instruction::setSubclassData<Bitfield>(Value); 425 } 426 427 /// The synchronization scope ID of this store instruction. Not quite enough 428 /// room in SubClassData for everything, so synchronization scope ID gets its 429 /// own field. 430 SyncScope::ID SSID; 431 }; 432 433 template <> 434 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { 435 }; 436 437 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) 438 439 //===----------------------------------------------------------------------===// 440 // FenceInst Class 441 //===----------------------------------------------------------------------===// 442 443 /// An instruction for ordering other memory operations. 444 class FenceInst : public Instruction { 445 using OrderingField = AtomicOrderingBitfieldElementT<0>; 446 447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID); 448 449 protected: 450 // Note: Instruction needs to be a friend here to call cloneImpl. 451 friend class Instruction; 452 453 FenceInst *cloneImpl() const; 454 455 public: 456 // Ordering may only be Acquire, Release, AcquireRelease, or 457 // SequentiallyConsistent. 458 FenceInst(LLVMContext &C, AtomicOrdering Ordering, 459 SyncScope::ID SSID = SyncScope::System, 460 Instruction *InsertBefore = nullptr); 461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, 462 BasicBlock *InsertAtEnd); 463 464 // allocate space for exactly zero operands 465 void *operator new(size_t S) { return User::operator new(S, 0); } 466 void operator delete(void *Ptr) { User::operator delete(Ptr); } 467 468 /// Returns the ordering constraint of this fence instruction. 469 AtomicOrdering getOrdering() const { 470 return getSubclassData<OrderingField>(); 471 } 472 473 /// Sets the ordering constraint of this fence instruction. May only be 474 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. 475 void setOrdering(AtomicOrdering Ordering) { 476 setSubclassData<OrderingField>(Ordering); 477 } 478 479 /// Returns the synchronization scope ID of this fence instruction. 480 SyncScope::ID getSyncScopeID() const { 481 return SSID; 482 } 483 484 /// Sets the synchronization scope ID of this fence instruction. 485 void setSyncScopeID(SyncScope::ID SSID) { 486 this->SSID = SSID; 487 } 488 489 // Methods for support type inquiry through isa, cast, and dyn_cast: 490 static bool classof(const Instruction *I) { 491 return I->getOpcode() == Instruction::Fence; 492 } 493 static bool classof(const Value *V) { 494 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 495 } 496 497 private: 498 // Shadow Instruction::setInstructionSubclassData with a private forwarding 499 // method so that subclasses cannot accidentally use it. 500 template <typename Bitfield> 501 void setSubclassData(typename Bitfield::Type Value) { 502 Instruction::setSubclassData<Bitfield>(Value); 503 } 504 505 /// The synchronization scope ID of this fence instruction. Not quite enough 506 /// room in SubClassData for everything, so synchronization scope ID gets its 507 /// own field. 508 SyncScope::ID SSID; 509 }; 510 511 //===----------------------------------------------------------------------===// 512 // AtomicCmpXchgInst Class 513 //===----------------------------------------------------------------------===// 514 515 /// An instruction that atomically checks whether a 516 /// specified value is in a memory location, and, if it is, stores a new value 517 /// there. The value returned by this instruction is a pair containing the 518 /// original value as first element, and an i1 indicating success (true) or 519 /// failure (false) as second element. 520 /// 521 class AtomicCmpXchgInst : public Instruction { 522 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, 523 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, 524 SyncScope::ID SSID); 525 526 template <unsigned Offset> 527 using AtomicOrderingBitfieldElement = 528 typename Bitfield::Element<AtomicOrdering, Offset, 3, 529 AtomicOrdering::LAST>; 530 531 protected: 532 // Note: Instruction needs to be a friend here to call cloneImpl. 533 friend class Instruction; 534 535 AtomicCmpXchgInst *cloneImpl() const; 536 537 public: 538 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 539 AtomicOrdering SuccessOrdering, 540 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 541 Instruction *InsertBefore = nullptr); 542 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 543 AtomicOrdering SuccessOrdering, 544 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 545 BasicBlock *InsertAtEnd); 546 547 // allocate space for exactly three operands 548 void *operator new(size_t S) { return User::operator new(S, 3); } 549 void operator delete(void *Ptr) { User::operator delete(Ptr); } 550 551 using VolatileField = BoolBitfieldElementT<0>; 552 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; 553 using SuccessOrderingField = 554 AtomicOrderingBitfieldElementT<WeakField::NextBit>; 555 using FailureOrderingField = 556 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; 557 using AlignmentField = 558 AlignmentBitfieldElementT<FailureOrderingField::NextBit>; 559 static_assert( 560 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, 561 FailureOrderingField, AlignmentField>(), 562 "Bitfields must be contiguous"); 563 564 /// Return the alignment of the memory that is being allocated by the 565 /// instruction. 566 Align getAlign() const { 567 return Align(1ULL << getSubclassData<AlignmentField>()); 568 } 569 570 void setAlignment(Align Align) { 571 setSubclassData<AlignmentField>(Log2(Align)); 572 } 573 574 /// Return true if this is a cmpxchg from a volatile memory 575 /// location. 576 /// 577 bool isVolatile() const { return getSubclassData<VolatileField>(); } 578 579 /// Specify whether this is a volatile cmpxchg. 580 /// 581 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 582 583 /// Return true if this cmpxchg may spuriously fail. 584 bool isWeak() const { return getSubclassData<WeakField>(); } 585 586 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } 587 588 /// Transparently provide more efficient getOperand methods. 589 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 590 591 static bool isValidSuccessOrdering(AtomicOrdering Ordering) { 592 return Ordering != AtomicOrdering::NotAtomic && 593 Ordering != AtomicOrdering::Unordered; 594 } 595 596 static bool isValidFailureOrdering(AtomicOrdering Ordering) { 597 return Ordering != AtomicOrdering::NotAtomic && 598 Ordering != AtomicOrdering::Unordered && 599 Ordering != AtomicOrdering::AcquireRelease && 600 Ordering != AtomicOrdering::Release; 601 } 602 603 /// Returns the success ordering constraint of this cmpxchg instruction. 604 AtomicOrdering getSuccessOrdering() const { 605 return getSubclassData<SuccessOrderingField>(); 606 } 607 608 /// Sets the success ordering constraint of this cmpxchg instruction. 609 void setSuccessOrdering(AtomicOrdering Ordering) { 610 assert(isValidSuccessOrdering(Ordering) && 611 "invalid CmpXchg success ordering"); 612 setSubclassData<SuccessOrderingField>(Ordering); 613 } 614 615 /// Returns the failure ordering constraint of this cmpxchg instruction. 616 AtomicOrdering getFailureOrdering() const { 617 return getSubclassData<FailureOrderingField>(); 618 } 619 620 /// Sets the failure ordering constraint of this cmpxchg instruction. 621 void setFailureOrdering(AtomicOrdering Ordering) { 622 assert(isValidFailureOrdering(Ordering) && 623 "invalid CmpXchg failure ordering"); 624 setSubclassData<FailureOrderingField>(Ordering); 625 } 626 627 /// Returns a single ordering which is at least as strong as both the 628 /// success and failure orderings for this cmpxchg. 629 AtomicOrdering getMergedOrdering() const { 630 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) 631 return AtomicOrdering::SequentiallyConsistent; 632 if (getFailureOrdering() == AtomicOrdering::Acquire) { 633 if (getSuccessOrdering() == AtomicOrdering::Monotonic) 634 return AtomicOrdering::Acquire; 635 if (getSuccessOrdering() == AtomicOrdering::Release) 636 return AtomicOrdering::AcquireRelease; 637 } 638 return getSuccessOrdering(); 639 } 640 641 /// Returns the synchronization scope ID of this cmpxchg instruction. 642 SyncScope::ID getSyncScopeID() const { 643 return SSID; 644 } 645 646 /// Sets the synchronization scope ID of this cmpxchg instruction. 647 void setSyncScopeID(SyncScope::ID SSID) { 648 this->SSID = SSID; 649 } 650 651 Value *getPointerOperand() { return getOperand(0); } 652 const Value *getPointerOperand() const { return getOperand(0); } 653 static unsigned getPointerOperandIndex() { return 0U; } 654 655 Value *getCompareOperand() { return getOperand(1); } 656 const Value *getCompareOperand() const { return getOperand(1); } 657 658 Value *getNewValOperand() { return getOperand(2); } 659 const Value *getNewValOperand() const { return getOperand(2); } 660 661 /// Returns the address space of the pointer operand. 662 unsigned getPointerAddressSpace() const { 663 return getPointerOperand()->getType()->getPointerAddressSpace(); 664 } 665 666 /// Returns the strongest permitted ordering on failure, given the 667 /// desired ordering on success. 668 /// 669 /// If the comparison in a cmpxchg operation fails, there is no atomic store 670 /// so release semantics cannot be provided. So this function drops explicit 671 /// Release requests from the AtomicOrdering. A SequentiallyConsistent 672 /// operation would remain SequentiallyConsistent. 673 static AtomicOrdering 674 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { 675 switch (SuccessOrdering) { 676 default: 677 llvm_unreachable("invalid cmpxchg success ordering"); 678 case AtomicOrdering::Release: 679 case AtomicOrdering::Monotonic: 680 return AtomicOrdering::Monotonic; 681 case AtomicOrdering::AcquireRelease: 682 case AtomicOrdering::Acquire: 683 return AtomicOrdering::Acquire; 684 case AtomicOrdering::SequentiallyConsistent: 685 return AtomicOrdering::SequentiallyConsistent; 686 } 687 } 688 689 // Methods for support type inquiry through isa, cast, and dyn_cast: 690 static bool classof(const Instruction *I) { 691 return I->getOpcode() == Instruction::AtomicCmpXchg; 692 } 693 static bool classof(const Value *V) { 694 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 695 } 696 697 private: 698 // Shadow Instruction::setInstructionSubclassData with a private forwarding 699 // method so that subclasses cannot accidentally use it. 700 template <typename Bitfield> 701 void setSubclassData(typename Bitfield::Type Value) { 702 Instruction::setSubclassData<Bitfield>(Value); 703 } 704 705 /// The synchronization scope ID of this cmpxchg instruction. Not quite 706 /// enough room in SubClassData for everything, so synchronization scope ID 707 /// gets its own field. 708 SyncScope::ID SSID; 709 }; 710 711 template <> 712 struct OperandTraits<AtomicCmpXchgInst> : 713 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { 714 }; 715 716 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) 717 718 //===----------------------------------------------------------------------===// 719 // AtomicRMWInst Class 720 //===----------------------------------------------------------------------===// 721 722 /// an instruction that atomically reads a memory location, 723 /// combines it with another value, and then stores the result back. Returns 724 /// the old value. 725 /// 726 class AtomicRMWInst : public Instruction { 727 protected: 728 // Note: Instruction needs to be a friend here to call cloneImpl. 729 friend class Instruction; 730 731 AtomicRMWInst *cloneImpl() const; 732 733 public: 734 /// This enumeration lists the possible modifications atomicrmw can make. In 735 /// the descriptions, 'p' is the pointer to the instruction's memory location, 736 /// 'old' is the initial value of *p, and 'v' is the other value passed to the 737 /// instruction. These instructions always return 'old'. 738 enum BinOp : unsigned { 739 /// *p = v 740 Xchg, 741 /// *p = old + v 742 Add, 743 /// *p = old - v 744 Sub, 745 /// *p = old & v 746 And, 747 /// *p = ~(old & v) 748 Nand, 749 /// *p = old | v 750 Or, 751 /// *p = old ^ v 752 Xor, 753 /// *p = old >signed v ? old : v 754 Max, 755 /// *p = old <signed v ? old : v 756 Min, 757 /// *p = old >unsigned v ? old : v 758 UMax, 759 /// *p = old <unsigned v ? old : v 760 UMin, 761 762 /// *p = old + v 763 FAdd, 764 765 /// *p = old - v 766 FSub, 767 768 FIRST_BINOP = Xchg, 769 LAST_BINOP = FSub, 770 BAD_BINOP 771 }; 772 773 private: 774 template <unsigned Offset> 775 using AtomicOrderingBitfieldElement = 776 typename Bitfield::Element<AtomicOrdering, Offset, 3, 777 AtomicOrdering::LAST>; 778 779 template <unsigned Offset> 780 using BinOpBitfieldElement = 781 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>; 782 783 public: 784 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 785 AtomicOrdering Ordering, SyncScope::ID SSID, 786 Instruction *InsertBefore = nullptr); 787 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 788 AtomicOrdering Ordering, SyncScope::ID SSID, 789 BasicBlock *InsertAtEnd); 790 791 // allocate space for exactly two operands 792 void *operator new(size_t S) { return User::operator new(S, 2); } 793 void operator delete(void *Ptr) { User::operator delete(Ptr); } 794 795 using VolatileField = BoolBitfieldElementT<0>; 796 using AtomicOrderingField = 797 AtomicOrderingBitfieldElementT<VolatileField::NextBit>; 798 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; 799 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; 800 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, 801 OperationField, AlignmentField>(), 802 "Bitfields must be contiguous"); 803 804 BinOp getOperation() const { return getSubclassData<OperationField>(); } 805 806 static StringRef getOperationName(BinOp Op); 807 808 static bool isFPOperation(BinOp Op) { 809 switch (Op) { 810 case AtomicRMWInst::FAdd: 811 case AtomicRMWInst::FSub: 812 return true; 813 default: 814 return false; 815 } 816 } 817 818 void setOperation(BinOp Operation) { 819 setSubclassData<OperationField>(Operation); 820 } 821 822 /// Return the alignment of the memory that is being allocated by the 823 /// instruction. 824 Align getAlign() const { 825 return Align(1ULL << getSubclassData<AlignmentField>()); 826 } 827 828 void setAlignment(Align Align) { 829 setSubclassData<AlignmentField>(Log2(Align)); 830 } 831 832 /// Return true if this is a RMW on a volatile memory location. 833 /// 834 bool isVolatile() const { return getSubclassData<VolatileField>(); } 835 836 /// Specify whether this is a volatile RMW or not. 837 /// 838 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 839 840 /// Transparently provide more efficient getOperand methods. 841 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 842 843 /// Returns the ordering constraint of this rmw instruction. 844 AtomicOrdering getOrdering() const { 845 return getSubclassData<AtomicOrderingField>(); 846 } 847 848 /// Sets the ordering constraint of this rmw instruction. 849 void setOrdering(AtomicOrdering Ordering) { 850 assert(Ordering != AtomicOrdering::NotAtomic && 851 "atomicrmw instructions can only be atomic."); 852 setSubclassData<AtomicOrderingField>(Ordering); 853 } 854 855 /// Returns the synchronization scope ID of this rmw instruction. 856 SyncScope::ID getSyncScopeID() const { 857 return SSID; 858 } 859 860 /// Sets the synchronization scope ID of this rmw instruction. 861 void setSyncScopeID(SyncScope::ID SSID) { 862 this->SSID = SSID; 863 } 864 865 Value *getPointerOperand() { return getOperand(0); } 866 const Value *getPointerOperand() const { return getOperand(0); } 867 static unsigned getPointerOperandIndex() { return 0U; } 868 869 Value *getValOperand() { return getOperand(1); } 870 const Value *getValOperand() const { return getOperand(1); } 871 872 /// Returns the address space of the pointer operand. 873 unsigned getPointerAddressSpace() const { 874 return getPointerOperand()->getType()->getPointerAddressSpace(); 875 } 876 877 bool isFloatingPointOperation() const { 878 return isFPOperation(getOperation()); 879 } 880 881 // Methods for support type inquiry through isa, cast, and dyn_cast: 882 static bool classof(const Instruction *I) { 883 return I->getOpcode() == Instruction::AtomicRMW; 884 } 885 static bool classof(const Value *V) { 886 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 887 } 888 889 private: 890 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, 891 AtomicOrdering Ordering, SyncScope::ID SSID); 892 893 // Shadow Instruction::setInstructionSubclassData with a private forwarding 894 // method so that subclasses cannot accidentally use it. 895 template <typename Bitfield> 896 void setSubclassData(typename Bitfield::Type Value) { 897 Instruction::setSubclassData<Bitfield>(Value); 898 } 899 900 /// The synchronization scope ID of this rmw instruction. Not quite enough 901 /// room in SubClassData for everything, so synchronization scope ID gets its 902 /// own field. 903 SyncScope::ID SSID; 904 }; 905 906 template <> 907 struct OperandTraits<AtomicRMWInst> 908 : public FixedNumOperandTraits<AtomicRMWInst,2> { 909 }; 910 911 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) 912 913 //===----------------------------------------------------------------------===// 914 // GetElementPtrInst Class 915 //===----------------------------------------------------------------------===// 916 917 // checkGEPType - Simple wrapper function to give a better assertion failure 918 // message on bad indexes for a gep instruction. 919 // 920 inline Type *checkGEPType(Type *Ty) { 921 assert(Ty && "Invalid GetElementPtrInst indices for type!"); 922 return Ty; 923 } 924 925 /// an instruction for type-safe pointer arithmetic to 926 /// access elements of arrays and structs 927 /// 928 class GetElementPtrInst : public Instruction { 929 Type *SourceElementType; 930 Type *ResultElementType; 931 932 GetElementPtrInst(const GetElementPtrInst &GEPI); 933 934 /// Constructors - Create a getelementptr instruction with a base pointer an 935 /// list of indices. The first ctor can optionally insert before an existing 936 /// instruction, the second appends the new instruction to the specified 937 /// BasicBlock. 938 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 939 ArrayRef<Value *> IdxList, unsigned Values, 940 const Twine &NameStr, Instruction *InsertBefore); 941 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 942 ArrayRef<Value *> IdxList, unsigned Values, 943 const Twine &NameStr, BasicBlock *InsertAtEnd); 944 945 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); 946 947 protected: 948 // Note: Instruction needs to be a friend here to call cloneImpl. 949 friend class Instruction; 950 951 GetElementPtrInst *cloneImpl() const; 952 953 public: 954 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 955 ArrayRef<Value *> IdxList, 956 const Twine &NameStr = "", 957 Instruction *InsertBefore = nullptr) { 958 unsigned Values = 1 + unsigned(IdxList.size()); 959 assert(PointeeType && "Must specify element type"); 960 assert(cast<PointerType>(Ptr->getType()->getScalarType()) 961 ->isOpaqueOrPointeeTypeMatches(PointeeType)); 962 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 963 NameStr, InsertBefore); 964 } 965 966 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 967 ArrayRef<Value *> IdxList, 968 const Twine &NameStr, 969 BasicBlock *InsertAtEnd) { 970 unsigned Values = 1 + unsigned(IdxList.size()); 971 assert(PointeeType && "Must specify element type"); 972 assert(cast<PointerType>(Ptr->getType()->getScalarType()) 973 ->isOpaqueOrPointeeTypeMatches(PointeeType)); 974 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 975 NameStr, InsertAtEnd); 976 } 977 978 LLVM_ATTRIBUTE_DEPRECATED(static GetElementPtrInst *CreateInBounds( 979 Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr = "", 980 Instruction *InsertBefore = nullptr), 981 "Use the version with explicit element type instead") { 982 return CreateInBounds( 983 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList, 984 NameStr, InsertBefore); 985 } 986 987 /// Create an "inbounds" getelementptr. See the documentation for the 988 /// "inbounds" flag in LangRef.html for details. 989 static GetElementPtrInst * 990 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, 991 const Twine &NameStr = "", 992 Instruction *InsertBefore = nullptr) { 993 GetElementPtrInst *GEP = 994 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); 995 GEP->setIsInBounds(true); 996 return GEP; 997 } 998 999 LLVM_ATTRIBUTE_DEPRECATED(static GetElementPtrInst *CreateInBounds( 1000 Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr, 1001 BasicBlock *InsertAtEnd), 1002 "Use the version with explicit element type instead") { 1003 return CreateInBounds( 1004 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList, 1005 NameStr, InsertAtEnd); 1006 } 1007 1008 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, 1009 ArrayRef<Value *> IdxList, 1010 const Twine &NameStr, 1011 BasicBlock *InsertAtEnd) { 1012 GetElementPtrInst *GEP = 1013 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); 1014 GEP->setIsInBounds(true); 1015 return GEP; 1016 } 1017 1018 /// Transparently provide more efficient getOperand methods. 1019 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1020 1021 Type *getSourceElementType() const { return SourceElementType; } 1022 1023 void setSourceElementType(Type *Ty) { SourceElementType = Ty; } 1024 void setResultElementType(Type *Ty) { ResultElementType = Ty; } 1025 1026 Type *getResultElementType() const { 1027 assert(cast<PointerType>(getType()->getScalarType()) 1028 ->isOpaqueOrPointeeTypeMatches(ResultElementType)); 1029 return ResultElementType; 1030 } 1031 1032 /// Returns the address space of this instruction's pointer type. 1033 unsigned getAddressSpace() const { 1034 // Note that this is always the same as the pointer operand's address space 1035 // and that is cheaper to compute, so cheat here. 1036 return getPointerAddressSpace(); 1037 } 1038 1039 /// Returns the result type of a getelementptr with the given source 1040 /// element type and indexes. 1041 /// 1042 /// Null is returned if the indices are invalid for the specified 1043 /// source element type. 1044 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); 1045 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); 1046 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); 1047 1048 /// Return the type of the element at the given index of an indexable 1049 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". 1050 /// 1051 /// Returns null if the type can't be indexed, or the given index is not 1052 /// legal for the given type. 1053 static Type *getTypeAtIndex(Type *Ty, Value *Idx); 1054 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); 1055 1056 inline op_iterator idx_begin() { return op_begin()+1; } 1057 inline const_op_iterator idx_begin() const { return op_begin()+1; } 1058 inline op_iterator idx_end() { return op_end(); } 1059 inline const_op_iterator idx_end() const { return op_end(); } 1060 1061 inline iterator_range<op_iterator> indices() { 1062 return make_range(idx_begin(), idx_end()); 1063 } 1064 1065 inline iterator_range<const_op_iterator> indices() const { 1066 return make_range(idx_begin(), idx_end()); 1067 } 1068 1069 Value *getPointerOperand() { 1070 return getOperand(0); 1071 } 1072 const Value *getPointerOperand() const { 1073 return getOperand(0); 1074 } 1075 static unsigned getPointerOperandIndex() { 1076 return 0U; // get index for modifying correct operand. 1077 } 1078 1079 /// Method to return the pointer operand as a 1080 /// PointerType. 1081 Type *getPointerOperandType() const { 1082 return getPointerOperand()->getType(); 1083 } 1084 1085 /// Returns the address space of the pointer operand. 1086 unsigned getPointerAddressSpace() const { 1087 return getPointerOperandType()->getPointerAddressSpace(); 1088 } 1089 1090 /// Returns the pointer type returned by the GEP 1091 /// instruction, which may be a vector of pointers. 1092 static Type *getGEPReturnType(Type *ElTy, Value *Ptr, 1093 ArrayRef<Value *> IdxList) { 1094 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); 1095 unsigned AddrSpace = OrigPtrTy->getAddressSpace(); 1096 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList)); 1097 Type *PtrTy = OrigPtrTy->isOpaque() 1098 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace) 1099 : PointerType::get(ResultElemTy, AddrSpace); 1100 // Vector GEP 1101 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { 1102 ElementCount EltCount = PtrVTy->getElementCount(); 1103 return VectorType::get(PtrTy, EltCount); 1104 } 1105 for (Value *Index : IdxList) 1106 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { 1107 ElementCount EltCount = IndexVTy->getElementCount(); 1108 return VectorType::get(PtrTy, EltCount); 1109 } 1110 // Scalar GEP 1111 return PtrTy; 1112 } 1113 1114 unsigned getNumIndices() const { // Note: always non-negative 1115 return getNumOperands() - 1; 1116 } 1117 1118 bool hasIndices() const { 1119 return getNumOperands() > 1; 1120 } 1121 1122 /// Return true if all of the indices of this GEP are 1123 /// zeros. If so, the result pointer and the first operand have the same 1124 /// value, just potentially different types. 1125 bool hasAllZeroIndices() const; 1126 1127 /// Return true if all of the indices of this GEP are 1128 /// constant integers. If so, the result pointer and the first operand have 1129 /// a constant offset between them. 1130 bool hasAllConstantIndices() const; 1131 1132 /// Set or clear the inbounds flag on this GEP instruction. 1133 /// See LangRef.html for the meaning of inbounds on a getelementptr. 1134 void setIsInBounds(bool b = true); 1135 1136 /// Determine whether the GEP has the inbounds flag. 1137 bool isInBounds() const; 1138 1139 /// Accumulate the constant address offset of this GEP if possible. 1140 /// 1141 /// This routine accepts an APInt into which it will accumulate the constant 1142 /// offset of this GEP if the GEP is in fact constant. If the GEP is not 1143 /// all-constant, it returns false and the value of the offset APInt is 1144 /// undefined (it is *not* preserved!). The APInt passed into this routine 1145 /// must be at least as wide as the IntPtr type for the address space of 1146 /// the base GEP pointer. 1147 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; 1148 bool collectOffset(const DataLayout &DL, unsigned BitWidth, 1149 MapVector<Value *, APInt> &VariableOffsets, 1150 APInt &ConstantOffset) const; 1151 // Methods for support type inquiry through isa, cast, and dyn_cast: 1152 static bool classof(const Instruction *I) { 1153 return (I->getOpcode() == Instruction::GetElementPtr); 1154 } 1155 static bool classof(const Value *V) { 1156 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1157 } 1158 }; 1159 1160 template <> 1161 struct OperandTraits<GetElementPtrInst> : 1162 public VariadicOperandTraits<GetElementPtrInst, 1> { 1163 }; 1164 1165 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1166 ArrayRef<Value *> IdxList, unsigned Values, 1167 const Twine &NameStr, 1168 Instruction *InsertBefore) 1169 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1170 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1171 Values, InsertBefore), 1172 SourceElementType(PointeeType), 1173 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1174 assert(cast<PointerType>(getType()->getScalarType()) 1175 ->isOpaqueOrPointeeTypeMatches(ResultElementType)); 1176 init(Ptr, IdxList, NameStr); 1177 } 1178 1179 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1180 ArrayRef<Value *> IdxList, unsigned Values, 1181 const Twine &NameStr, 1182 BasicBlock *InsertAtEnd) 1183 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1184 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1185 Values, InsertAtEnd), 1186 SourceElementType(PointeeType), 1187 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1188 assert(cast<PointerType>(getType()->getScalarType()) 1189 ->isOpaqueOrPointeeTypeMatches(ResultElementType)); 1190 init(Ptr, IdxList, NameStr); 1191 } 1192 1193 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) 1194 1195 //===----------------------------------------------------------------------===// 1196 // ICmpInst Class 1197 //===----------------------------------------------------------------------===// 1198 1199 /// This instruction compares its operands according to the predicate given 1200 /// to the constructor. It only operates on integers or pointers. The operands 1201 /// must be identical types. 1202 /// Represent an integer comparison operator. 1203 class ICmpInst: public CmpInst { 1204 void AssertOK() { 1205 assert(isIntPredicate() && 1206 "Invalid ICmp predicate value"); 1207 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1208 "Both operands to ICmp instruction are not of the same type!"); 1209 // Check that the operands are the right type 1210 assert((getOperand(0)->getType()->isIntOrIntVectorTy() || 1211 getOperand(0)->getType()->isPtrOrPtrVectorTy()) && 1212 "Invalid operand types for ICmp instruction"); 1213 } 1214 1215 protected: 1216 // Note: Instruction needs to be a friend here to call cloneImpl. 1217 friend class Instruction; 1218 1219 /// Clone an identical ICmpInst 1220 ICmpInst *cloneImpl() const; 1221 1222 public: 1223 /// Constructor with insert-before-instruction semantics. 1224 ICmpInst( 1225 Instruction *InsertBefore, ///< Where to insert 1226 Predicate pred, ///< The predicate to use for the comparison 1227 Value *LHS, ///< The left-hand-side of the expression 1228 Value *RHS, ///< The right-hand-side of the expression 1229 const Twine &NameStr = "" ///< Name of the instruction 1230 ) : CmpInst(makeCmpResultType(LHS->getType()), 1231 Instruction::ICmp, pred, LHS, RHS, NameStr, 1232 InsertBefore) { 1233 #ifndef NDEBUG 1234 AssertOK(); 1235 #endif 1236 } 1237 1238 /// Constructor with insert-at-end semantics. 1239 ICmpInst( 1240 BasicBlock &InsertAtEnd, ///< Block to insert into. 1241 Predicate pred, ///< The predicate to use for the comparison 1242 Value *LHS, ///< The left-hand-side of the expression 1243 Value *RHS, ///< The right-hand-side of the expression 1244 const Twine &NameStr = "" ///< Name of the instruction 1245 ) : CmpInst(makeCmpResultType(LHS->getType()), 1246 Instruction::ICmp, pred, LHS, RHS, NameStr, 1247 &InsertAtEnd) { 1248 #ifndef NDEBUG 1249 AssertOK(); 1250 #endif 1251 } 1252 1253 /// Constructor with no-insertion semantics 1254 ICmpInst( 1255 Predicate pred, ///< The predicate to use for the comparison 1256 Value *LHS, ///< The left-hand-side of the expression 1257 Value *RHS, ///< The right-hand-side of the expression 1258 const Twine &NameStr = "" ///< Name of the instruction 1259 ) : CmpInst(makeCmpResultType(LHS->getType()), 1260 Instruction::ICmp, pred, LHS, RHS, NameStr) { 1261 #ifndef NDEBUG 1262 AssertOK(); 1263 #endif 1264 } 1265 1266 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. 1267 /// @returns the predicate that would be the result if the operand were 1268 /// regarded as signed. 1269 /// Return the signed version of the predicate 1270 Predicate getSignedPredicate() const { 1271 return getSignedPredicate(getPredicate()); 1272 } 1273 1274 /// This is a static version that you can use without an instruction. 1275 /// Return the signed version of the predicate. 1276 static Predicate getSignedPredicate(Predicate pred); 1277 1278 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. 1279 /// @returns the predicate that would be the result if the operand were 1280 /// regarded as unsigned. 1281 /// Return the unsigned version of the predicate 1282 Predicate getUnsignedPredicate() const { 1283 return getUnsignedPredicate(getPredicate()); 1284 } 1285 1286 /// This is a static version that you can use without an instruction. 1287 /// Return the unsigned version of the predicate. 1288 static Predicate getUnsignedPredicate(Predicate pred); 1289 1290 /// Return true if this predicate is either EQ or NE. This also 1291 /// tests for commutativity. 1292 static bool isEquality(Predicate P) { 1293 return P == ICMP_EQ || P == ICMP_NE; 1294 } 1295 1296 /// Return true if this predicate is either EQ or NE. This also 1297 /// tests for commutativity. 1298 bool isEquality() const { 1299 return isEquality(getPredicate()); 1300 } 1301 1302 /// @returns true if the predicate of this ICmpInst is commutative 1303 /// Determine if this relation is commutative. 1304 bool isCommutative() const { return isEquality(); } 1305 1306 /// Return true if the predicate is relational (not EQ or NE). 1307 /// 1308 bool isRelational() const { 1309 return !isEquality(); 1310 } 1311 1312 /// Return true if the predicate is relational (not EQ or NE). 1313 /// 1314 static bool isRelational(Predicate P) { 1315 return !isEquality(P); 1316 } 1317 1318 /// Return true if the predicate is SGT or UGT. 1319 /// 1320 static bool isGT(Predicate P) { 1321 return P == ICMP_SGT || P == ICMP_UGT; 1322 } 1323 1324 /// Return true if the predicate is SLT or ULT. 1325 /// 1326 static bool isLT(Predicate P) { 1327 return P == ICMP_SLT || P == ICMP_ULT; 1328 } 1329 1330 /// Return true if the predicate is SGE or UGE. 1331 /// 1332 static bool isGE(Predicate P) { 1333 return P == ICMP_SGE || P == ICMP_UGE; 1334 } 1335 1336 /// Return true if the predicate is SLE or ULE. 1337 /// 1338 static bool isLE(Predicate P) { 1339 return P == ICMP_SLE || P == ICMP_ULE; 1340 } 1341 1342 /// Returns the sequence of all ICmp predicates. 1343 /// 1344 static auto predicates() { return ICmpPredicates(); } 1345 1346 /// Exchange the two operands to this instruction in such a way that it does 1347 /// not modify the semantics of the instruction. The predicate value may be 1348 /// changed to retain the same result if the predicate is order dependent 1349 /// (e.g. ult). 1350 /// Swap operands and adjust predicate. 1351 void swapOperands() { 1352 setPredicate(getSwappedPredicate()); 1353 Op<0>().swap(Op<1>()); 1354 } 1355 1356 /// Return result of `LHS Pred RHS` comparison. 1357 static bool compare(const APInt &LHS, const APInt &RHS, 1358 ICmpInst::Predicate Pred); 1359 1360 // Methods for support type inquiry through isa, cast, and dyn_cast: 1361 static bool classof(const Instruction *I) { 1362 return I->getOpcode() == Instruction::ICmp; 1363 } 1364 static bool classof(const Value *V) { 1365 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1366 } 1367 }; 1368 1369 //===----------------------------------------------------------------------===// 1370 // FCmpInst Class 1371 //===----------------------------------------------------------------------===// 1372 1373 /// This instruction compares its operands according to the predicate given 1374 /// to the constructor. It only operates on floating point values or packed 1375 /// vectors of floating point values. The operands must be identical types. 1376 /// Represents a floating point comparison operator. 1377 class FCmpInst: public CmpInst { 1378 void AssertOK() { 1379 assert(isFPPredicate() && "Invalid FCmp predicate value"); 1380 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1381 "Both operands to FCmp instruction are not of the same type!"); 1382 // Check that the operands are the right type 1383 assert(getOperand(0)->getType()->isFPOrFPVectorTy() && 1384 "Invalid operand types for FCmp instruction"); 1385 } 1386 1387 protected: 1388 // Note: Instruction needs to be a friend here to call cloneImpl. 1389 friend class Instruction; 1390 1391 /// Clone an identical FCmpInst 1392 FCmpInst *cloneImpl() const; 1393 1394 public: 1395 /// Constructor with insert-before-instruction semantics. 1396 FCmpInst( 1397 Instruction *InsertBefore, ///< Where to insert 1398 Predicate pred, ///< The predicate to use for the comparison 1399 Value *LHS, ///< The left-hand-side of the expression 1400 Value *RHS, ///< The right-hand-side of the expression 1401 const Twine &NameStr = "" ///< Name of the instruction 1402 ) : CmpInst(makeCmpResultType(LHS->getType()), 1403 Instruction::FCmp, pred, LHS, RHS, NameStr, 1404 InsertBefore) { 1405 AssertOK(); 1406 } 1407 1408 /// Constructor with insert-at-end semantics. 1409 FCmpInst( 1410 BasicBlock &InsertAtEnd, ///< Block to insert into. 1411 Predicate pred, ///< The predicate to use for the comparison 1412 Value *LHS, ///< The left-hand-side of the expression 1413 Value *RHS, ///< The right-hand-side of the expression 1414 const Twine &NameStr = "" ///< Name of the instruction 1415 ) : CmpInst(makeCmpResultType(LHS->getType()), 1416 Instruction::FCmp, pred, LHS, RHS, NameStr, 1417 &InsertAtEnd) { 1418 AssertOK(); 1419 } 1420 1421 /// Constructor with no-insertion semantics 1422 FCmpInst( 1423 Predicate Pred, ///< The predicate to use for the comparison 1424 Value *LHS, ///< The left-hand-side of the expression 1425 Value *RHS, ///< The right-hand-side of the expression 1426 const Twine &NameStr = "", ///< Name of the instruction 1427 Instruction *FlagsSource = nullptr 1428 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, 1429 RHS, NameStr, nullptr, FlagsSource) { 1430 AssertOK(); 1431 } 1432 1433 /// @returns true if the predicate of this instruction is EQ or NE. 1434 /// Determine if this is an equality predicate. 1435 static bool isEquality(Predicate Pred) { 1436 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || 1437 Pred == FCMP_UNE; 1438 } 1439 1440 /// @returns true if the predicate of this instruction is EQ or NE. 1441 /// Determine if this is an equality predicate. 1442 bool isEquality() const { return isEquality(getPredicate()); } 1443 1444 /// @returns true if the predicate of this instruction is commutative. 1445 /// Determine if this is a commutative predicate. 1446 bool isCommutative() const { 1447 return isEquality() || 1448 getPredicate() == FCMP_FALSE || 1449 getPredicate() == FCMP_TRUE || 1450 getPredicate() == FCMP_ORD || 1451 getPredicate() == FCMP_UNO; 1452 } 1453 1454 /// @returns true if the predicate is relational (not EQ or NE). 1455 /// Determine if this a relational predicate. 1456 bool isRelational() const { return !isEquality(); } 1457 1458 /// Exchange the two operands to this instruction in such a way that it does 1459 /// not modify the semantics of the instruction. The predicate value may be 1460 /// changed to retain the same result if the predicate is order dependent 1461 /// (e.g. ult). 1462 /// Swap operands and adjust predicate. 1463 void swapOperands() { 1464 setPredicate(getSwappedPredicate()); 1465 Op<0>().swap(Op<1>()); 1466 } 1467 1468 /// Returns the sequence of all FCmp predicates. 1469 /// 1470 static auto predicates() { return FCmpPredicates(); } 1471 1472 /// Methods for support type inquiry through isa, cast, and dyn_cast: 1473 static bool classof(const Instruction *I) { 1474 return I->getOpcode() == Instruction::FCmp; 1475 } 1476 static bool classof(const Value *V) { 1477 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1478 } 1479 }; 1480 1481 //===----------------------------------------------------------------------===// 1482 /// This class represents a function call, abstracting a target 1483 /// machine's calling convention. This class uses low bit of the SubClassData 1484 /// field to indicate whether or not this is a tail call. The rest of the bits 1485 /// hold the calling convention of the call. 1486 /// 1487 class CallInst : public CallBase { 1488 CallInst(const CallInst &CI); 1489 1490 /// Construct a CallInst given a range of arguments. 1491 /// Construct a CallInst from a range of arguments 1492 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1493 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1494 Instruction *InsertBefore); 1495 1496 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1497 const Twine &NameStr, Instruction *InsertBefore) 1498 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} 1499 1500 /// Construct a CallInst given a range of arguments. 1501 /// Construct a CallInst from a range of arguments 1502 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1503 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1504 BasicBlock *InsertAtEnd); 1505 1506 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, 1507 Instruction *InsertBefore); 1508 1509 CallInst(FunctionType *ty, Value *F, const Twine &NameStr, 1510 BasicBlock *InsertAtEnd); 1511 1512 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 1513 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 1514 void init(FunctionType *FTy, Value *Func, const Twine &NameStr); 1515 1516 /// Compute the number of operands to allocate. 1517 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 1518 // We need one operand for the called function, plus the input operand 1519 // counts provided. 1520 return 1 + NumArgs + NumBundleInputs; 1521 } 1522 1523 protected: 1524 // Note: Instruction needs to be a friend here to call cloneImpl. 1525 friend class Instruction; 1526 1527 CallInst *cloneImpl() const; 1528 1529 public: 1530 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", 1531 Instruction *InsertBefore = nullptr) { 1532 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); 1533 } 1534 1535 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1536 const Twine &NameStr, 1537 Instruction *InsertBefore = nullptr) { 1538 return new (ComputeNumOperands(Args.size())) 1539 CallInst(Ty, Func, Args, None, NameStr, InsertBefore); 1540 } 1541 1542 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1543 ArrayRef<OperandBundleDef> Bundles = None, 1544 const Twine &NameStr = "", 1545 Instruction *InsertBefore = nullptr) { 1546 const int NumOperands = 1547 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1548 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1549 1550 return new (NumOperands, DescriptorBytes) 1551 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); 1552 } 1553 1554 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, 1555 BasicBlock *InsertAtEnd) { 1556 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); 1557 } 1558 1559 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1560 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1561 return new (ComputeNumOperands(Args.size())) 1562 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); 1563 } 1564 1565 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1566 ArrayRef<OperandBundleDef> Bundles, 1567 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1568 const int NumOperands = 1569 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1570 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1571 1572 return new (NumOperands, DescriptorBytes) 1573 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); 1574 } 1575 1576 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", 1577 Instruction *InsertBefore = nullptr) { 1578 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1579 InsertBefore); 1580 } 1581 1582 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1583 ArrayRef<OperandBundleDef> Bundles = None, 1584 const Twine &NameStr = "", 1585 Instruction *InsertBefore = nullptr) { 1586 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1587 NameStr, InsertBefore); 1588 } 1589 1590 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1591 const Twine &NameStr, 1592 Instruction *InsertBefore = nullptr) { 1593 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1594 InsertBefore); 1595 } 1596 1597 static CallInst *Create(FunctionCallee Func, const Twine &NameStr, 1598 BasicBlock *InsertAtEnd) { 1599 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1600 InsertAtEnd); 1601 } 1602 1603 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1604 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1605 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1606 InsertAtEnd); 1607 } 1608 1609 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1610 ArrayRef<OperandBundleDef> Bundles, 1611 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1612 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1613 NameStr, InsertAtEnd); 1614 } 1615 1616 /// Create a clone of \p CI with a different set of operand bundles and 1617 /// insert it before \p InsertPt. 1618 /// 1619 /// The returned call instruction is identical \p CI in every way except that 1620 /// the operand bundles for the new instruction are set to the operand bundles 1621 /// in \p Bundles. 1622 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, 1623 Instruction *InsertPt = nullptr); 1624 1625 /// Generate the IR for a call to malloc: 1626 /// 1. Compute the malloc call's argument as the specified type's size, 1627 /// possibly multiplied by the array size if the array size is not 1628 /// constant 1. 1629 /// 2. Call malloc with that argument. 1630 /// 3. Bitcast the result of the malloc call to the specified type. 1631 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1632 Type *AllocTy, Value *AllocSize, 1633 Value *ArraySize = nullptr, 1634 Function *MallocF = nullptr, 1635 const Twine &Name = ""); 1636 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1637 Type *AllocTy, Value *AllocSize, 1638 Value *ArraySize = nullptr, 1639 Function *MallocF = nullptr, 1640 const Twine &Name = ""); 1641 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1642 Type *AllocTy, Value *AllocSize, 1643 Value *ArraySize = nullptr, 1644 ArrayRef<OperandBundleDef> Bundles = None, 1645 Function *MallocF = nullptr, 1646 const Twine &Name = ""); 1647 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1648 Type *AllocTy, Value *AllocSize, 1649 Value *ArraySize = nullptr, 1650 ArrayRef<OperandBundleDef> Bundles = None, 1651 Function *MallocF = nullptr, 1652 const Twine &Name = ""); 1653 /// Generate the IR for a call to the builtin free function. 1654 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); 1655 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); 1656 static Instruction *CreateFree(Value *Source, 1657 ArrayRef<OperandBundleDef> Bundles, 1658 Instruction *InsertBefore); 1659 static Instruction *CreateFree(Value *Source, 1660 ArrayRef<OperandBundleDef> Bundles, 1661 BasicBlock *InsertAtEnd); 1662 1663 // Note that 'musttail' implies 'tail'. 1664 enum TailCallKind : unsigned { 1665 TCK_None = 0, 1666 TCK_Tail = 1, 1667 TCK_MustTail = 2, 1668 TCK_NoTail = 3, 1669 TCK_LAST = TCK_NoTail 1670 }; 1671 1672 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; 1673 static_assert( 1674 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), 1675 "Bitfields must be contiguous"); 1676 1677 TailCallKind getTailCallKind() const { 1678 return getSubclassData<TailCallKindField>(); 1679 } 1680 1681 bool isTailCall() const { 1682 TailCallKind Kind = getTailCallKind(); 1683 return Kind == TCK_Tail || Kind == TCK_MustTail; 1684 } 1685 1686 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } 1687 1688 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } 1689 1690 void setTailCallKind(TailCallKind TCK) { 1691 setSubclassData<TailCallKindField>(TCK); 1692 } 1693 1694 void setTailCall(bool IsTc = true) { 1695 setTailCallKind(IsTc ? TCK_Tail : TCK_None); 1696 } 1697 1698 /// Return true if the call can return twice 1699 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } 1700 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); } 1701 1702 // Methods for support type inquiry through isa, cast, and dyn_cast: 1703 static bool classof(const Instruction *I) { 1704 return I->getOpcode() == Instruction::Call; 1705 } 1706 static bool classof(const Value *V) { 1707 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1708 } 1709 1710 /// Updates profile metadata by scaling it by \p S / \p T. 1711 void updateProfWeight(uint64_t S, uint64_t T); 1712 1713 private: 1714 // Shadow Instruction::setInstructionSubclassData with a private forwarding 1715 // method so that subclasses cannot accidentally use it. 1716 template <typename Bitfield> 1717 void setSubclassData(typename Bitfield::Type Value) { 1718 Instruction::setSubclassData<Bitfield>(Value); 1719 } 1720 }; 1721 1722 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1723 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1724 BasicBlock *InsertAtEnd) 1725 : CallBase(Ty->getReturnType(), Instruction::Call, 1726 OperandTraits<CallBase>::op_end(this) - 1727 (Args.size() + CountBundleInputs(Bundles) + 1), 1728 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1729 InsertAtEnd) { 1730 init(Ty, Func, Args, Bundles, NameStr); 1731 } 1732 1733 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1734 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1735 Instruction *InsertBefore) 1736 : CallBase(Ty->getReturnType(), Instruction::Call, 1737 OperandTraits<CallBase>::op_end(this) - 1738 (Args.size() + CountBundleInputs(Bundles) + 1), 1739 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1740 InsertBefore) { 1741 init(Ty, Func, Args, Bundles, NameStr); 1742 } 1743 1744 //===----------------------------------------------------------------------===// 1745 // SelectInst Class 1746 //===----------------------------------------------------------------------===// 1747 1748 /// This class represents the LLVM 'select' instruction. 1749 /// 1750 class SelectInst : public Instruction { 1751 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1752 Instruction *InsertBefore) 1753 : Instruction(S1->getType(), Instruction::Select, 1754 &Op<0>(), 3, InsertBefore) { 1755 init(C, S1, S2); 1756 setName(NameStr); 1757 } 1758 1759 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1760 BasicBlock *InsertAtEnd) 1761 : Instruction(S1->getType(), Instruction::Select, 1762 &Op<0>(), 3, InsertAtEnd) { 1763 init(C, S1, S2); 1764 setName(NameStr); 1765 } 1766 1767 void init(Value *C, Value *S1, Value *S2) { 1768 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); 1769 Op<0>() = C; 1770 Op<1>() = S1; 1771 Op<2>() = S2; 1772 } 1773 1774 protected: 1775 // Note: Instruction needs to be a friend here to call cloneImpl. 1776 friend class Instruction; 1777 1778 SelectInst *cloneImpl() const; 1779 1780 public: 1781 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1782 const Twine &NameStr = "", 1783 Instruction *InsertBefore = nullptr, 1784 Instruction *MDFrom = nullptr) { 1785 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); 1786 if (MDFrom) 1787 Sel->copyMetadata(*MDFrom); 1788 return Sel; 1789 } 1790 1791 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1792 const Twine &NameStr, 1793 BasicBlock *InsertAtEnd) { 1794 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); 1795 } 1796 1797 const Value *getCondition() const { return Op<0>(); } 1798 const Value *getTrueValue() const { return Op<1>(); } 1799 const Value *getFalseValue() const { return Op<2>(); } 1800 Value *getCondition() { return Op<0>(); } 1801 Value *getTrueValue() { return Op<1>(); } 1802 Value *getFalseValue() { return Op<2>(); } 1803 1804 void setCondition(Value *V) { Op<0>() = V; } 1805 void setTrueValue(Value *V) { Op<1>() = V; } 1806 void setFalseValue(Value *V) { Op<2>() = V; } 1807 1808 /// Swap the true and false values of the select instruction. 1809 /// This doesn't swap prof metadata. 1810 void swapValues() { Op<1>().swap(Op<2>()); } 1811 1812 /// Return a string if the specified operands are invalid 1813 /// for a select operation, otherwise return null. 1814 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); 1815 1816 /// Transparently provide more efficient getOperand methods. 1817 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1818 1819 OtherOps getOpcode() const { 1820 return static_cast<OtherOps>(Instruction::getOpcode()); 1821 } 1822 1823 // Methods for support type inquiry through isa, cast, and dyn_cast: 1824 static bool classof(const Instruction *I) { 1825 return I->getOpcode() == Instruction::Select; 1826 } 1827 static bool classof(const Value *V) { 1828 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1829 } 1830 }; 1831 1832 template <> 1833 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { 1834 }; 1835 1836 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) 1837 1838 //===----------------------------------------------------------------------===// 1839 // VAArgInst Class 1840 //===----------------------------------------------------------------------===// 1841 1842 /// This class represents the va_arg llvm instruction, which returns 1843 /// an argument of the specified type given a va_list and increments that list 1844 /// 1845 class VAArgInst : public UnaryInstruction { 1846 protected: 1847 // Note: Instruction needs to be a friend here to call cloneImpl. 1848 friend class Instruction; 1849 1850 VAArgInst *cloneImpl() const; 1851 1852 public: 1853 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", 1854 Instruction *InsertBefore = nullptr) 1855 : UnaryInstruction(Ty, VAArg, List, InsertBefore) { 1856 setName(NameStr); 1857 } 1858 1859 VAArgInst(Value *List, Type *Ty, const Twine &NameStr, 1860 BasicBlock *InsertAtEnd) 1861 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { 1862 setName(NameStr); 1863 } 1864 1865 Value *getPointerOperand() { return getOperand(0); } 1866 const Value *getPointerOperand() const { return getOperand(0); } 1867 static unsigned getPointerOperandIndex() { return 0U; } 1868 1869 // Methods for support type inquiry through isa, cast, and dyn_cast: 1870 static bool classof(const Instruction *I) { 1871 return I->getOpcode() == VAArg; 1872 } 1873 static bool classof(const Value *V) { 1874 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1875 } 1876 }; 1877 1878 //===----------------------------------------------------------------------===// 1879 // ExtractElementInst Class 1880 //===----------------------------------------------------------------------===// 1881 1882 /// This instruction extracts a single (scalar) 1883 /// element from a VectorType value 1884 /// 1885 class ExtractElementInst : public Instruction { 1886 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", 1887 Instruction *InsertBefore = nullptr); 1888 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, 1889 BasicBlock *InsertAtEnd); 1890 1891 protected: 1892 // Note: Instruction needs to be a friend here to call cloneImpl. 1893 friend class Instruction; 1894 1895 ExtractElementInst *cloneImpl() const; 1896 1897 public: 1898 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1899 const Twine &NameStr = "", 1900 Instruction *InsertBefore = nullptr) { 1901 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); 1902 } 1903 1904 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1905 const Twine &NameStr, 1906 BasicBlock *InsertAtEnd) { 1907 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); 1908 } 1909 1910 /// Return true if an extractelement instruction can be 1911 /// formed with the specified operands. 1912 static bool isValidOperands(const Value *Vec, const Value *Idx); 1913 1914 Value *getVectorOperand() { return Op<0>(); } 1915 Value *getIndexOperand() { return Op<1>(); } 1916 const Value *getVectorOperand() const { return Op<0>(); } 1917 const Value *getIndexOperand() const { return Op<1>(); } 1918 1919 VectorType *getVectorOperandType() const { 1920 return cast<VectorType>(getVectorOperand()->getType()); 1921 } 1922 1923 /// Transparently provide more efficient getOperand methods. 1924 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1925 1926 // Methods for support type inquiry through isa, cast, and dyn_cast: 1927 static bool classof(const Instruction *I) { 1928 return I->getOpcode() == Instruction::ExtractElement; 1929 } 1930 static bool classof(const Value *V) { 1931 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1932 } 1933 }; 1934 1935 template <> 1936 struct OperandTraits<ExtractElementInst> : 1937 public FixedNumOperandTraits<ExtractElementInst, 2> { 1938 }; 1939 1940 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) 1941 1942 //===----------------------------------------------------------------------===// 1943 // InsertElementInst Class 1944 //===----------------------------------------------------------------------===// 1945 1946 /// This instruction inserts a single (scalar) 1947 /// element into a VectorType value 1948 /// 1949 class InsertElementInst : public Instruction { 1950 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, 1951 const Twine &NameStr = "", 1952 Instruction *InsertBefore = nullptr); 1953 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, 1954 BasicBlock *InsertAtEnd); 1955 1956 protected: 1957 // Note: Instruction needs to be a friend here to call cloneImpl. 1958 friend class Instruction; 1959 1960 InsertElementInst *cloneImpl() const; 1961 1962 public: 1963 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1964 const Twine &NameStr = "", 1965 Instruction *InsertBefore = nullptr) { 1966 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); 1967 } 1968 1969 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1970 const Twine &NameStr, 1971 BasicBlock *InsertAtEnd) { 1972 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); 1973 } 1974 1975 /// Return true if an insertelement instruction can be 1976 /// formed with the specified operands. 1977 static bool isValidOperands(const Value *Vec, const Value *NewElt, 1978 const Value *Idx); 1979 1980 /// Overload to return most specific vector type. 1981 /// 1982 VectorType *getType() const { 1983 return cast<VectorType>(Instruction::getType()); 1984 } 1985 1986 /// Transparently provide more efficient getOperand methods. 1987 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1988 1989 // Methods for support type inquiry through isa, cast, and dyn_cast: 1990 static bool classof(const Instruction *I) { 1991 return I->getOpcode() == Instruction::InsertElement; 1992 } 1993 static bool classof(const Value *V) { 1994 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1995 } 1996 }; 1997 1998 template <> 1999 struct OperandTraits<InsertElementInst> : 2000 public FixedNumOperandTraits<InsertElementInst, 3> { 2001 }; 2002 2003 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) 2004 2005 //===----------------------------------------------------------------------===// 2006 // ShuffleVectorInst Class 2007 //===----------------------------------------------------------------------===// 2008 2009 constexpr int UndefMaskElem = -1; 2010 2011 /// This instruction constructs a fixed permutation of two 2012 /// input vectors. 2013 /// 2014 /// For each element of the result vector, the shuffle mask selects an element 2015 /// from one of the input vectors to copy to the result. Non-negative elements 2016 /// in the mask represent an index into the concatenated pair of input vectors. 2017 /// UndefMaskElem (-1) specifies that the result element is undefined. 2018 /// 2019 /// For scalable vectors, all the elements of the mask must be 0 or -1. This 2020 /// requirement may be relaxed in the future. 2021 class ShuffleVectorInst : public Instruction { 2022 SmallVector<int, 4> ShuffleMask; 2023 Constant *ShuffleMaskForBitcode; 2024 2025 protected: 2026 // Note: Instruction needs to be a friend here to call cloneImpl. 2027 friend class Instruction; 2028 2029 ShuffleVectorInst *cloneImpl() const; 2030 2031 public: 2032 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "", 2033 Instruction *InsertBefore = nullptr); 2034 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, 2035 BasicBlock *InsertAtEnd); 2036 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "", 2037 Instruction *InsertBefore = nullptr); 2038 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr, 2039 BasicBlock *InsertAtEnd); 2040 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2041 const Twine &NameStr = "", 2042 Instruction *InsertBefor = nullptr); 2043 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2044 const Twine &NameStr, BasicBlock *InsertAtEnd); 2045 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2046 const Twine &NameStr = "", 2047 Instruction *InsertBefor = nullptr); 2048 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2049 const Twine &NameStr, BasicBlock *InsertAtEnd); 2050 2051 void *operator new(size_t S) { return User::operator new(S, 2); } 2052 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 2053 2054 /// Swap the operands and adjust the mask to preserve the semantics 2055 /// of the instruction. 2056 void commute(); 2057 2058 /// Return true if a shufflevector instruction can be 2059 /// formed with the specified operands. 2060 static bool isValidOperands(const Value *V1, const Value *V2, 2061 const Value *Mask); 2062 static bool isValidOperands(const Value *V1, const Value *V2, 2063 ArrayRef<int> Mask); 2064 2065 /// Overload to return most specific vector type. 2066 /// 2067 VectorType *getType() const { 2068 return cast<VectorType>(Instruction::getType()); 2069 } 2070 2071 /// Transparently provide more efficient getOperand methods. 2072 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2073 2074 /// Return the shuffle mask value of this instruction for the given element 2075 /// index. Return UndefMaskElem if the element is undef. 2076 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } 2077 2078 /// Convert the input shuffle mask operand to a vector of integers. Undefined 2079 /// elements of the mask are returned as UndefMaskElem. 2080 static void getShuffleMask(const Constant *Mask, 2081 SmallVectorImpl<int> &Result); 2082 2083 /// Return the mask for this instruction as a vector of integers. Undefined 2084 /// elements of the mask are returned as UndefMaskElem. 2085 void getShuffleMask(SmallVectorImpl<int> &Result) const { 2086 Result.assign(ShuffleMask.begin(), ShuffleMask.end()); 2087 } 2088 2089 /// Return the mask for this instruction, for use in bitcode. 2090 /// 2091 /// TODO: This is temporary until we decide a new bitcode encoding for 2092 /// shufflevector. 2093 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } 2094 2095 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, 2096 Type *ResultTy); 2097 2098 void setShuffleMask(ArrayRef<int> Mask); 2099 2100 ArrayRef<int> getShuffleMask() const { return ShuffleMask; } 2101 2102 /// Return true if this shuffle returns a vector with a different number of 2103 /// elements than its source vectors. 2104 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> 2105 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> 2106 bool changesLength() const { 2107 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2108 ->getElementCount() 2109 .getKnownMinValue(); 2110 unsigned NumMaskElts = ShuffleMask.size(); 2111 return NumSourceElts != NumMaskElts; 2112 } 2113 2114 /// Return true if this shuffle returns a vector with a greater number of 2115 /// elements than its source vectors. 2116 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> 2117 bool increasesLength() const { 2118 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2119 ->getElementCount() 2120 .getKnownMinValue(); 2121 unsigned NumMaskElts = ShuffleMask.size(); 2122 return NumSourceElts < NumMaskElts; 2123 } 2124 2125 /// Return true if this shuffle mask chooses elements from exactly one source 2126 /// vector. 2127 /// Example: <7,5,undef,7> 2128 /// This assumes that vector operands are the same length as the mask. 2129 static bool isSingleSourceMask(ArrayRef<int> Mask); 2130 static bool isSingleSourceMask(const Constant *Mask) { 2131 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2132 SmallVector<int, 16> MaskAsInts; 2133 getShuffleMask(Mask, MaskAsInts); 2134 return isSingleSourceMask(MaskAsInts); 2135 } 2136 2137 /// Return true if this shuffle chooses elements from exactly one source 2138 /// vector without changing the length of that vector. 2139 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> 2140 /// TODO: Optionally allow length-changing shuffles. 2141 bool isSingleSource() const { 2142 return !changesLength() && isSingleSourceMask(ShuffleMask); 2143 } 2144 2145 /// Return true if this shuffle mask chooses elements from exactly one source 2146 /// vector without lane crossings. A shuffle using this mask is not 2147 /// necessarily a no-op because it may change the number of elements from its 2148 /// input vectors or it may provide demanded bits knowledge via undef lanes. 2149 /// Example: <undef,undef,2,3> 2150 static bool isIdentityMask(ArrayRef<int> Mask); 2151 static bool isIdentityMask(const Constant *Mask) { 2152 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2153 SmallVector<int, 16> MaskAsInts; 2154 getShuffleMask(Mask, MaskAsInts); 2155 return isIdentityMask(MaskAsInts); 2156 } 2157 2158 /// Return true if this shuffle chooses elements from exactly one source 2159 /// vector without lane crossings and does not change the number of elements 2160 /// from its input vectors. 2161 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> 2162 bool isIdentity() const { 2163 return !changesLength() && isIdentityMask(ShuffleMask); 2164 } 2165 2166 /// Return true if this shuffle lengthens exactly one source vector with 2167 /// undefs in the high elements. 2168 bool isIdentityWithPadding() const; 2169 2170 /// Return true if this shuffle extracts the first N elements of exactly one 2171 /// source vector. 2172 bool isIdentityWithExtract() const; 2173 2174 /// Return true if this shuffle concatenates its 2 source vectors. This 2175 /// returns false if either input is undefined. In that case, the shuffle is 2176 /// is better classified as an identity with padding operation. 2177 bool isConcat() const; 2178 2179 /// Return true if this shuffle mask chooses elements from its source vectors 2180 /// without lane crossings. A shuffle using this mask would be 2181 /// equivalent to a vector select with a constant condition operand. 2182 /// Example: <4,1,6,undef> 2183 /// This returns false if the mask does not choose from both input vectors. 2184 /// In that case, the shuffle is better classified as an identity shuffle. 2185 /// This assumes that vector operands are the same length as the mask 2186 /// (a length-changing shuffle can never be equivalent to a vector select). 2187 static bool isSelectMask(ArrayRef<int> Mask); 2188 static bool isSelectMask(const Constant *Mask) { 2189 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2190 SmallVector<int, 16> MaskAsInts; 2191 getShuffleMask(Mask, MaskAsInts); 2192 return isSelectMask(MaskAsInts); 2193 } 2194 2195 /// Return true if this shuffle chooses elements from its source vectors 2196 /// without lane crossings and all operands have the same number of elements. 2197 /// In other words, this shuffle is equivalent to a vector select with a 2198 /// constant condition operand. 2199 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> 2200 /// This returns false if the mask does not choose from both input vectors. 2201 /// In that case, the shuffle is better classified as an identity shuffle. 2202 /// TODO: Optionally allow length-changing shuffles. 2203 bool isSelect() const { 2204 return !changesLength() && isSelectMask(ShuffleMask); 2205 } 2206 2207 /// Return true if this shuffle mask swaps the order of elements from exactly 2208 /// one source vector. 2209 /// Example: <7,6,undef,4> 2210 /// This assumes that vector operands are the same length as the mask. 2211 static bool isReverseMask(ArrayRef<int> Mask); 2212 static bool isReverseMask(const Constant *Mask) { 2213 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2214 SmallVector<int, 16> MaskAsInts; 2215 getShuffleMask(Mask, MaskAsInts); 2216 return isReverseMask(MaskAsInts); 2217 } 2218 2219 /// Return true if this shuffle swaps the order of elements from exactly 2220 /// one source vector. 2221 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> 2222 /// TODO: Optionally allow length-changing shuffles. 2223 bool isReverse() const { 2224 return !changesLength() && isReverseMask(ShuffleMask); 2225 } 2226 2227 /// Return true if this shuffle mask chooses all elements with the same value 2228 /// as the first element of exactly one source vector. 2229 /// Example: <4,undef,undef,4> 2230 /// This assumes that vector operands are the same length as the mask. 2231 static bool isZeroEltSplatMask(ArrayRef<int> Mask); 2232 static bool isZeroEltSplatMask(const Constant *Mask) { 2233 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2234 SmallVector<int, 16> MaskAsInts; 2235 getShuffleMask(Mask, MaskAsInts); 2236 return isZeroEltSplatMask(MaskAsInts); 2237 } 2238 2239 /// Return true if all elements of this shuffle are the same value as the 2240 /// first element of exactly one source vector without changing the length 2241 /// of that vector. 2242 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> 2243 /// TODO: Optionally allow length-changing shuffles. 2244 /// TODO: Optionally allow splats from other elements. 2245 bool isZeroEltSplat() const { 2246 return !changesLength() && isZeroEltSplatMask(ShuffleMask); 2247 } 2248 2249 /// Return true if this shuffle mask is a transpose mask. 2250 /// Transpose vector masks transpose a 2xn matrix. They read corresponding 2251 /// even- or odd-numbered vector elements from two n-dimensional source 2252 /// vectors and write each result into consecutive elements of an 2253 /// n-dimensional destination vector. Two shuffles are necessary to complete 2254 /// the transpose, one for the even elements and another for the odd elements. 2255 /// This description closely follows how the TRN1 and TRN2 AArch64 2256 /// instructions operate. 2257 /// 2258 /// For example, a simple 2x2 matrix can be transposed with: 2259 /// 2260 /// ; Original matrix 2261 /// m0 = < a, b > 2262 /// m1 = < c, d > 2263 /// 2264 /// ; Transposed matrix 2265 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > 2266 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > 2267 /// 2268 /// For matrices having greater than n columns, the resulting nx2 transposed 2269 /// matrix is stored in two result vectors such that one vector contains 2270 /// interleaved elements from all the even-numbered rows and the other vector 2271 /// contains interleaved elements from all the odd-numbered rows. For example, 2272 /// a 2x4 matrix can be transposed with: 2273 /// 2274 /// ; Original matrix 2275 /// m0 = < a, b, c, d > 2276 /// m1 = < e, f, g, h > 2277 /// 2278 /// ; Transposed matrix 2279 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > 2280 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > 2281 static bool isTransposeMask(ArrayRef<int> Mask); 2282 static bool isTransposeMask(const Constant *Mask) { 2283 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2284 SmallVector<int, 16> MaskAsInts; 2285 getShuffleMask(Mask, MaskAsInts); 2286 return isTransposeMask(MaskAsInts); 2287 } 2288 2289 /// Return true if this shuffle transposes the elements of its inputs without 2290 /// changing the length of the vectors. This operation may also be known as a 2291 /// merge or interleave. See the description for isTransposeMask() for the 2292 /// exact specification. 2293 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> 2294 bool isTranspose() const { 2295 return !changesLength() && isTransposeMask(ShuffleMask); 2296 } 2297 2298 /// Return true if this shuffle mask is an extract subvector mask. 2299 /// A valid extract subvector mask returns a smaller vector from a single 2300 /// source operand. The base extraction index is returned as well. 2301 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2302 int &Index); 2303 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, 2304 int &Index) { 2305 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2306 // Not possible to express a shuffle mask for a scalable vector for this 2307 // case. 2308 if (isa<ScalableVectorType>(Mask->getType())) 2309 return false; 2310 SmallVector<int, 16> MaskAsInts; 2311 getShuffleMask(Mask, MaskAsInts); 2312 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); 2313 } 2314 2315 /// Return true if this shuffle mask is an extract subvector mask. 2316 bool isExtractSubvectorMask(int &Index) const { 2317 // Not possible to express a shuffle mask for a scalable vector for this 2318 // case. 2319 if (isa<ScalableVectorType>(getType())) 2320 return false; 2321 2322 int NumSrcElts = 2323 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2324 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); 2325 } 2326 2327 /// Return true if this shuffle mask is an insert subvector mask. 2328 /// A valid insert subvector mask inserts the lowest elements of a second 2329 /// source operand into an in-place first source operand operand. 2330 /// Both the sub vector width and the insertion index is returned. 2331 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2332 int &NumSubElts, int &Index); 2333 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, 2334 int &NumSubElts, int &Index) { 2335 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2336 // Not possible to express a shuffle mask for a scalable vector for this 2337 // case. 2338 if (isa<ScalableVectorType>(Mask->getType())) 2339 return false; 2340 SmallVector<int, 16> MaskAsInts; 2341 getShuffleMask(Mask, MaskAsInts); 2342 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index); 2343 } 2344 2345 /// Return true if this shuffle mask is an insert subvector mask. 2346 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const { 2347 // Not possible to express a shuffle mask for a scalable vector for this 2348 // case. 2349 if (isa<ScalableVectorType>(getType())) 2350 return false; 2351 2352 int NumSrcElts = 2353 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2354 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index); 2355 } 2356 2357 /// Return true if this shuffle mask replicates each of the \p VF elements 2358 /// in a vector \p ReplicationFactor times. 2359 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is: 2360 /// <0,0,0,1,1,1,2,2,2,3,3,3> 2361 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor, 2362 int &VF); 2363 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, 2364 int &VF) { 2365 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2366 // Not possible to express a shuffle mask for a scalable vector for this 2367 // case. 2368 if (isa<ScalableVectorType>(Mask->getType())) 2369 return false; 2370 SmallVector<int, 16> MaskAsInts; 2371 getShuffleMask(Mask, MaskAsInts); 2372 return isReplicationMask(MaskAsInts, ReplicationFactor, VF); 2373 } 2374 2375 /// Return true if this shuffle mask is a replication mask. 2376 bool isReplicationMask(int &ReplicationFactor, int &VF) const; 2377 2378 /// Change values in a shuffle permute mask assuming the two vector operands 2379 /// of length InVecNumElts have swapped position. 2380 static void commuteShuffleMask(MutableArrayRef<int> Mask, 2381 unsigned InVecNumElts) { 2382 for (int &Idx : Mask) { 2383 if (Idx == -1) 2384 continue; 2385 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; 2386 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 && 2387 "shufflevector mask index out of range"); 2388 } 2389 } 2390 2391 // Methods for support type inquiry through isa, cast, and dyn_cast: 2392 static bool classof(const Instruction *I) { 2393 return I->getOpcode() == Instruction::ShuffleVector; 2394 } 2395 static bool classof(const Value *V) { 2396 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2397 } 2398 }; 2399 2400 template <> 2401 struct OperandTraits<ShuffleVectorInst> 2402 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; 2403 2404 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) 2405 2406 //===----------------------------------------------------------------------===// 2407 // ExtractValueInst Class 2408 //===----------------------------------------------------------------------===// 2409 2410 /// This instruction extracts a struct member or array 2411 /// element value from an aggregate value. 2412 /// 2413 class ExtractValueInst : public UnaryInstruction { 2414 SmallVector<unsigned, 4> Indices; 2415 2416 ExtractValueInst(const ExtractValueInst &EVI); 2417 2418 /// Constructors - Create a extractvalue instruction with a base aggregate 2419 /// value and a list of indices. The first ctor can optionally insert before 2420 /// an existing instruction, the second appends the new instruction to the 2421 /// specified BasicBlock. 2422 inline ExtractValueInst(Value *Agg, 2423 ArrayRef<unsigned> Idxs, 2424 const Twine &NameStr, 2425 Instruction *InsertBefore); 2426 inline ExtractValueInst(Value *Agg, 2427 ArrayRef<unsigned> Idxs, 2428 const Twine &NameStr, BasicBlock *InsertAtEnd); 2429 2430 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); 2431 2432 protected: 2433 // Note: Instruction needs to be a friend here to call cloneImpl. 2434 friend class Instruction; 2435 2436 ExtractValueInst *cloneImpl() const; 2437 2438 public: 2439 static ExtractValueInst *Create(Value *Agg, 2440 ArrayRef<unsigned> Idxs, 2441 const Twine &NameStr = "", 2442 Instruction *InsertBefore = nullptr) { 2443 return new 2444 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); 2445 } 2446 2447 static ExtractValueInst *Create(Value *Agg, 2448 ArrayRef<unsigned> Idxs, 2449 const Twine &NameStr, 2450 BasicBlock *InsertAtEnd) { 2451 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); 2452 } 2453 2454 /// Returns the type of the element that would be extracted 2455 /// with an extractvalue instruction with the specified parameters. 2456 /// 2457 /// Null is returned if the indices are invalid for the specified type. 2458 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); 2459 2460 using idx_iterator = const unsigned*; 2461 2462 inline idx_iterator idx_begin() const { return Indices.begin(); } 2463 inline idx_iterator idx_end() const { return Indices.end(); } 2464 inline iterator_range<idx_iterator> indices() const { 2465 return make_range(idx_begin(), idx_end()); 2466 } 2467 2468 Value *getAggregateOperand() { 2469 return getOperand(0); 2470 } 2471 const Value *getAggregateOperand() const { 2472 return getOperand(0); 2473 } 2474 static unsigned getAggregateOperandIndex() { 2475 return 0U; // get index for modifying correct operand 2476 } 2477 2478 ArrayRef<unsigned> getIndices() const { 2479 return Indices; 2480 } 2481 2482 unsigned getNumIndices() const { 2483 return (unsigned)Indices.size(); 2484 } 2485 2486 bool hasIndices() const { 2487 return true; 2488 } 2489 2490 // Methods for support type inquiry through isa, cast, and dyn_cast: 2491 static bool classof(const Instruction *I) { 2492 return I->getOpcode() == Instruction::ExtractValue; 2493 } 2494 static bool classof(const Value *V) { 2495 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2496 } 2497 }; 2498 2499 ExtractValueInst::ExtractValueInst(Value *Agg, 2500 ArrayRef<unsigned> Idxs, 2501 const Twine &NameStr, 2502 Instruction *InsertBefore) 2503 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2504 ExtractValue, Agg, InsertBefore) { 2505 init(Idxs, NameStr); 2506 } 2507 2508 ExtractValueInst::ExtractValueInst(Value *Agg, 2509 ArrayRef<unsigned> Idxs, 2510 const Twine &NameStr, 2511 BasicBlock *InsertAtEnd) 2512 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2513 ExtractValue, Agg, InsertAtEnd) { 2514 init(Idxs, NameStr); 2515 } 2516 2517 //===----------------------------------------------------------------------===// 2518 // InsertValueInst Class 2519 //===----------------------------------------------------------------------===// 2520 2521 /// This instruction inserts a struct field of array element 2522 /// value into an aggregate value. 2523 /// 2524 class InsertValueInst : public Instruction { 2525 SmallVector<unsigned, 4> Indices; 2526 2527 InsertValueInst(const InsertValueInst &IVI); 2528 2529 /// Constructors - Create a insertvalue instruction with a base aggregate 2530 /// value, a value to insert, and a list of indices. The first ctor can 2531 /// optionally insert before an existing instruction, the second appends 2532 /// the new instruction to the specified BasicBlock. 2533 inline InsertValueInst(Value *Agg, Value *Val, 2534 ArrayRef<unsigned> Idxs, 2535 const Twine &NameStr, 2536 Instruction *InsertBefore); 2537 inline InsertValueInst(Value *Agg, Value *Val, 2538 ArrayRef<unsigned> Idxs, 2539 const Twine &NameStr, BasicBlock *InsertAtEnd); 2540 2541 /// Constructors - These two constructors are convenience methods because one 2542 /// and two index insertvalue instructions are so common. 2543 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, 2544 const Twine &NameStr = "", 2545 Instruction *InsertBefore = nullptr); 2546 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, 2547 BasicBlock *InsertAtEnd); 2548 2549 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2550 const Twine &NameStr); 2551 2552 protected: 2553 // Note: Instruction needs to be a friend here to call cloneImpl. 2554 friend class Instruction; 2555 2556 InsertValueInst *cloneImpl() const; 2557 2558 public: 2559 // allocate space for exactly two operands 2560 void *operator new(size_t S) { return User::operator new(S, 2); } 2561 void operator delete(void *Ptr) { User::operator delete(Ptr); } 2562 2563 static InsertValueInst *Create(Value *Agg, Value *Val, 2564 ArrayRef<unsigned> Idxs, 2565 const Twine &NameStr = "", 2566 Instruction *InsertBefore = nullptr) { 2567 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); 2568 } 2569 2570 static InsertValueInst *Create(Value *Agg, Value *Val, 2571 ArrayRef<unsigned> Idxs, 2572 const Twine &NameStr, 2573 BasicBlock *InsertAtEnd) { 2574 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); 2575 } 2576 2577 /// Transparently provide more efficient getOperand methods. 2578 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2579 2580 using idx_iterator = const unsigned*; 2581 2582 inline idx_iterator idx_begin() const { return Indices.begin(); } 2583 inline idx_iterator idx_end() const { return Indices.end(); } 2584 inline iterator_range<idx_iterator> indices() const { 2585 return make_range(idx_begin(), idx_end()); 2586 } 2587 2588 Value *getAggregateOperand() { 2589 return getOperand(0); 2590 } 2591 const Value *getAggregateOperand() const { 2592 return getOperand(0); 2593 } 2594 static unsigned getAggregateOperandIndex() { 2595 return 0U; // get index for modifying correct operand 2596 } 2597 2598 Value *getInsertedValueOperand() { 2599 return getOperand(1); 2600 } 2601 const Value *getInsertedValueOperand() const { 2602 return getOperand(1); 2603 } 2604 static unsigned getInsertedValueOperandIndex() { 2605 return 1U; // get index for modifying correct operand 2606 } 2607 2608 ArrayRef<unsigned> getIndices() const { 2609 return Indices; 2610 } 2611 2612 unsigned getNumIndices() const { 2613 return (unsigned)Indices.size(); 2614 } 2615 2616 bool hasIndices() const { 2617 return true; 2618 } 2619 2620 // Methods for support type inquiry through isa, cast, and dyn_cast: 2621 static bool classof(const Instruction *I) { 2622 return I->getOpcode() == Instruction::InsertValue; 2623 } 2624 static bool classof(const Value *V) { 2625 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2626 } 2627 }; 2628 2629 template <> 2630 struct OperandTraits<InsertValueInst> : 2631 public FixedNumOperandTraits<InsertValueInst, 2> { 2632 }; 2633 2634 InsertValueInst::InsertValueInst(Value *Agg, 2635 Value *Val, 2636 ArrayRef<unsigned> Idxs, 2637 const Twine &NameStr, 2638 Instruction *InsertBefore) 2639 : Instruction(Agg->getType(), InsertValue, 2640 OperandTraits<InsertValueInst>::op_begin(this), 2641 2, InsertBefore) { 2642 init(Agg, Val, Idxs, NameStr); 2643 } 2644 2645 InsertValueInst::InsertValueInst(Value *Agg, 2646 Value *Val, 2647 ArrayRef<unsigned> Idxs, 2648 const Twine &NameStr, 2649 BasicBlock *InsertAtEnd) 2650 : Instruction(Agg->getType(), InsertValue, 2651 OperandTraits<InsertValueInst>::op_begin(this), 2652 2, InsertAtEnd) { 2653 init(Agg, Val, Idxs, NameStr); 2654 } 2655 2656 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) 2657 2658 //===----------------------------------------------------------------------===// 2659 // PHINode Class 2660 //===----------------------------------------------------------------------===// 2661 2662 // PHINode - The PHINode class is used to represent the magical mystical PHI 2663 // node, that can not exist in nature, but can be synthesized in a computer 2664 // scientist's overactive imagination. 2665 // 2666 class PHINode : public Instruction { 2667 /// The number of operands actually allocated. NumOperands is 2668 /// the number actually in use. 2669 unsigned ReservedSpace; 2670 2671 PHINode(const PHINode &PN); 2672 2673 explicit PHINode(Type *Ty, unsigned NumReservedValues, 2674 const Twine &NameStr = "", 2675 Instruction *InsertBefore = nullptr) 2676 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), 2677 ReservedSpace(NumReservedValues) { 2678 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2679 setName(NameStr); 2680 allocHungoffUses(ReservedSpace); 2681 } 2682 2683 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, 2684 BasicBlock *InsertAtEnd) 2685 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), 2686 ReservedSpace(NumReservedValues) { 2687 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2688 setName(NameStr); 2689 allocHungoffUses(ReservedSpace); 2690 } 2691 2692 protected: 2693 // Note: Instruction needs to be a friend here to call cloneImpl. 2694 friend class Instruction; 2695 2696 PHINode *cloneImpl() const; 2697 2698 // allocHungoffUses - this is more complicated than the generic 2699 // User::allocHungoffUses, because we have to allocate Uses for the incoming 2700 // values and pointers to the incoming blocks, all in one allocation. 2701 void allocHungoffUses(unsigned N) { 2702 User::allocHungoffUses(N, /* IsPhi */ true); 2703 } 2704 2705 public: 2706 /// Constructors - NumReservedValues is a hint for the number of incoming 2707 /// edges that this phi node will have (use 0 if you really have no idea). 2708 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2709 const Twine &NameStr = "", 2710 Instruction *InsertBefore = nullptr) { 2711 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); 2712 } 2713 2714 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2715 const Twine &NameStr, BasicBlock *InsertAtEnd) { 2716 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); 2717 } 2718 2719 /// Provide fast operand accessors 2720 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2721 2722 // Block iterator interface. This provides access to the list of incoming 2723 // basic blocks, which parallels the list of incoming values. 2724 2725 using block_iterator = BasicBlock **; 2726 using const_block_iterator = BasicBlock * const *; 2727 2728 block_iterator block_begin() { 2729 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); 2730 } 2731 2732 const_block_iterator block_begin() const { 2733 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 2734 } 2735 2736 block_iterator block_end() { 2737 return block_begin() + getNumOperands(); 2738 } 2739 2740 const_block_iterator block_end() const { 2741 return block_begin() + getNumOperands(); 2742 } 2743 2744 iterator_range<block_iterator> blocks() { 2745 return make_range(block_begin(), block_end()); 2746 } 2747 2748 iterator_range<const_block_iterator> blocks() const { 2749 return make_range(block_begin(), block_end()); 2750 } 2751 2752 op_range incoming_values() { return operands(); } 2753 2754 const_op_range incoming_values() const { return operands(); } 2755 2756 /// Return the number of incoming edges 2757 /// 2758 unsigned getNumIncomingValues() const { return getNumOperands(); } 2759 2760 /// Return incoming value number x 2761 /// 2762 Value *getIncomingValue(unsigned i) const { 2763 return getOperand(i); 2764 } 2765 void setIncomingValue(unsigned i, Value *V) { 2766 assert(V && "PHI node got a null value!"); 2767 assert(getType() == V->getType() && 2768 "All operands to PHI node must be the same type as the PHI node!"); 2769 setOperand(i, V); 2770 } 2771 2772 static unsigned getOperandNumForIncomingValue(unsigned i) { 2773 return i; 2774 } 2775 2776 static unsigned getIncomingValueNumForOperand(unsigned i) { 2777 return i; 2778 } 2779 2780 /// Return incoming basic block number @p i. 2781 /// 2782 BasicBlock *getIncomingBlock(unsigned i) const { 2783 return block_begin()[i]; 2784 } 2785 2786 /// Return incoming basic block corresponding 2787 /// to an operand of the PHI. 2788 /// 2789 BasicBlock *getIncomingBlock(const Use &U) const { 2790 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 2791 return getIncomingBlock(unsigned(&U - op_begin())); 2792 } 2793 2794 /// Return incoming basic block corresponding 2795 /// to value use iterator. 2796 /// 2797 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { 2798 return getIncomingBlock(I.getUse()); 2799 } 2800 2801 void setIncomingBlock(unsigned i, BasicBlock *BB) { 2802 assert(BB && "PHI node got a null basic block!"); 2803 block_begin()[i] = BB; 2804 } 2805 2806 /// Replace every incoming basic block \p Old to basic block \p New. 2807 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { 2808 assert(New && Old && "PHI node got a null basic block!"); 2809 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2810 if (getIncomingBlock(Op) == Old) 2811 setIncomingBlock(Op, New); 2812 } 2813 2814 /// Add an incoming value to the end of the PHI list 2815 /// 2816 void addIncoming(Value *V, BasicBlock *BB) { 2817 if (getNumOperands() == ReservedSpace) 2818 growOperands(); // Get more space! 2819 // Initialize some new operands. 2820 setNumHungOffUseOperands(getNumOperands() + 1); 2821 setIncomingValue(getNumOperands() - 1, V); 2822 setIncomingBlock(getNumOperands() - 1, BB); 2823 } 2824 2825 /// Remove an incoming value. This is useful if a 2826 /// predecessor basic block is deleted. The value removed is returned. 2827 /// 2828 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty 2829 /// is true), the PHI node is destroyed and any uses of it are replaced with 2830 /// dummy values. The only time there should be zero incoming values to a PHI 2831 /// node is when the block is dead, so this strategy is sound. 2832 /// 2833 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); 2834 2835 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { 2836 int Idx = getBasicBlockIndex(BB); 2837 assert(Idx >= 0 && "Invalid basic block argument to remove!"); 2838 return removeIncomingValue(Idx, DeletePHIIfEmpty); 2839 } 2840 2841 /// Return the first index of the specified basic 2842 /// block in the value list for this PHI. Returns -1 if no instance. 2843 /// 2844 int getBasicBlockIndex(const BasicBlock *BB) const { 2845 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 2846 if (block_begin()[i] == BB) 2847 return i; 2848 return -1; 2849 } 2850 2851 Value *getIncomingValueForBlock(const BasicBlock *BB) const { 2852 int Idx = getBasicBlockIndex(BB); 2853 assert(Idx >= 0 && "Invalid basic block argument!"); 2854 return getIncomingValue(Idx); 2855 } 2856 2857 /// Set every incoming value(s) for block \p BB to \p V. 2858 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { 2859 assert(BB && "PHI node got a null basic block!"); 2860 bool Found = false; 2861 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2862 if (getIncomingBlock(Op) == BB) { 2863 Found = true; 2864 setIncomingValue(Op, V); 2865 } 2866 (void)Found; 2867 assert(Found && "Invalid basic block argument to set!"); 2868 } 2869 2870 /// If the specified PHI node always merges together the 2871 /// same value, return the value, otherwise return null. 2872 Value *hasConstantValue() const; 2873 2874 /// Whether the specified PHI node always merges 2875 /// together the same value, assuming undefs are equal to a unique 2876 /// non-undef value. 2877 bool hasConstantOrUndefValue() const; 2878 2879 /// If the PHI node is complete which means all of its parent's predecessors 2880 /// have incoming value in this PHI, return true, otherwise return false. 2881 bool isComplete() const { 2882 return llvm::all_of(predecessors(getParent()), 2883 [this](const BasicBlock *Pred) { 2884 return getBasicBlockIndex(Pred) >= 0; 2885 }); 2886 } 2887 2888 /// Methods for support type inquiry through isa, cast, and dyn_cast: 2889 static bool classof(const Instruction *I) { 2890 return I->getOpcode() == Instruction::PHI; 2891 } 2892 static bool classof(const Value *V) { 2893 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2894 } 2895 2896 private: 2897 void growOperands(); 2898 }; 2899 2900 template <> 2901 struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { 2902 }; 2903 2904 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) 2905 2906 //===----------------------------------------------------------------------===// 2907 // LandingPadInst Class 2908 //===----------------------------------------------------------------------===// 2909 2910 //===--------------------------------------------------------------------------- 2911 /// The landingpad instruction holds all of the information 2912 /// necessary to generate correct exception handling. The landingpad instruction 2913 /// cannot be moved from the top of a landing pad block, which itself is 2914 /// accessible only from the 'unwind' edge of an invoke. This uses the 2915 /// SubclassData field in Value to store whether or not the landingpad is a 2916 /// cleanup. 2917 /// 2918 class LandingPadInst : public Instruction { 2919 using CleanupField = BoolBitfieldElementT<0>; 2920 2921 /// The number of operands actually allocated. NumOperands is 2922 /// the number actually in use. 2923 unsigned ReservedSpace; 2924 2925 LandingPadInst(const LandingPadInst &LP); 2926 2927 public: 2928 enum ClauseType { Catch, Filter }; 2929 2930 private: 2931 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2932 const Twine &NameStr, Instruction *InsertBefore); 2933 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2934 const Twine &NameStr, BasicBlock *InsertAtEnd); 2935 2936 // Allocate space for exactly zero operands. 2937 void *operator new(size_t S) { return User::operator new(S); } 2938 2939 void growOperands(unsigned Size); 2940 void init(unsigned NumReservedValues, const Twine &NameStr); 2941 2942 protected: 2943 // Note: Instruction needs to be a friend here to call cloneImpl. 2944 friend class Instruction; 2945 2946 LandingPadInst *cloneImpl() const; 2947 2948 public: 2949 void operator delete(void *Ptr) { User::operator delete(Ptr); } 2950 2951 /// Constructors - NumReservedClauses is a hint for the number of incoming 2952 /// clauses that this landingpad will have (use 0 if you really have no idea). 2953 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2954 const Twine &NameStr = "", 2955 Instruction *InsertBefore = nullptr); 2956 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2957 const Twine &NameStr, BasicBlock *InsertAtEnd); 2958 2959 /// Provide fast operand accessors 2960 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2961 2962 /// Return 'true' if this landingpad instruction is a 2963 /// cleanup. I.e., it should be run when unwinding even if its landing pad 2964 /// doesn't catch the exception. 2965 bool isCleanup() const { return getSubclassData<CleanupField>(); } 2966 2967 /// Indicate that this landingpad instruction is a cleanup. 2968 void setCleanup(bool V) { setSubclassData<CleanupField>(V); } 2969 2970 /// Add a catch or filter clause to the landing pad. 2971 void addClause(Constant *ClauseVal); 2972 2973 /// Get the value of the clause at index Idx. Use isCatch/isFilter to 2974 /// determine what type of clause this is. 2975 Constant *getClause(unsigned Idx) const { 2976 return cast<Constant>(getOperandList()[Idx]); 2977 } 2978 2979 /// Return 'true' if the clause and index Idx is a catch clause. 2980 bool isCatch(unsigned Idx) const { 2981 return !isa<ArrayType>(getOperandList()[Idx]->getType()); 2982 } 2983 2984 /// Return 'true' if the clause and index Idx is a filter clause. 2985 bool isFilter(unsigned Idx) const { 2986 return isa<ArrayType>(getOperandList()[Idx]->getType()); 2987 } 2988 2989 /// Get the number of clauses for this landing pad. 2990 unsigned getNumClauses() const { return getNumOperands(); } 2991 2992 /// Grow the size of the operand list to accommodate the new 2993 /// number of clauses. 2994 void reserveClauses(unsigned Size) { growOperands(Size); } 2995 2996 // Methods for support type inquiry through isa, cast, and dyn_cast: 2997 static bool classof(const Instruction *I) { 2998 return I->getOpcode() == Instruction::LandingPad; 2999 } 3000 static bool classof(const Value *V) { 3001 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3002 } 3003 }; 3004 3005 template <> 3006 struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { 3007 }; 3008 3009 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value) 3010 3011 //===----------------------------------------------------------------------===// 3012 // ReturnInst Class 3013 //===----------------------------------------------------------------------===// 3014 3015 //===--------------------------------------------------------------------------- 3016 /// Return a value (possibly void), from a function. Execution 3017 /// does not continue in this function any longer. 3018 /// 3019 class ReturnInst : public Instruction { 3020 ReturnInst(const ReturnInst &RI); 3021 3022 private: 3023 // ReturnInst constructors: 3024 // ReturnInst() - 'ret void' instruction 3025 // ReturnInst( null) - 'ret void' instruction 3026 // ReturnInst(Value* X) - 'ret X' instruction 3027 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I 3028 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I 3029 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B 3030 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B 3031 // 3032 // NOTE: If the Value* passed is of type void then the constructor behaves as 3033 // if it was passed NULL. 3034 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, 3035 Instruction *InsertBefore = nullptr); 3036 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); 3037 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); 3038 3039 protected: 3040 // Note: Instruction needs to be a friend here to call cloneImpl. 3041 friend class Instruction; 3042 3043 ReturnInst *cloneImpl() const; 3044 3045 public: 3046 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, 3047 Instruction *InsertBefore = nullptr) { 3048 return new(!!retVal) ReturnInst(C, retVal, InsertBefore); 3049 } 3050 3051 static ReturnInst* Create(LLVMContext &C, Value *retVal, 3052 BasicBlock *InsertAtEnd) { 3053 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); 3054 } 3055 3056 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { 3057 return new(0) ReturnInst(C, InsertAtEnd); 3058 } 3059 3060 /// Provide fast operand accessors 3061 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3062 3063 /// Convenience accessor. Returns null if there is no return value. 3064 Value *getReturnValue() const { 3065 return getNumOperands() != 0 ? getOperand(0) : nullptr; 3066 } 3067 3068 unsigned getNumSuccessors() const { return 0; } 3069 3070 // Methods for support type inquiry through isa, cast, and dyn_cast: 3071 static bool classof(const Instruction *I) { 3072 return (I->getOpcode() == Instruction::Ret); 3073 } 3074 static bool classof(const Value *V) { 3075 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3076 } 3077 3078 private: 3079 BasicBlock *getSuccessor(unsigned idx) const { 3080 llvm_unreachable("ReturnInst has no successors!"); 3081 } 3082 3083 void setSuccessor(unsigned idx, BasicBlock *B) { 3084 llvm_unreachable("ReturnInst has no successors!"); 3085 } 3086 }; 3087 3088 template <> 3089 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { 3090 }; 3091 3092 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value) 3093 3094 //===----------------------------------------------------------------------===// 3095 // BranchInst Class 3096 //===----------------------------------------------------------------------===// 3097 3098 //===--------------------------------------------------------------------------- 3099 /// Conditional or Unconditional Branch instruction. 3100 /// 3101 class BranchInst : public Instruction { 3102 /// Ops list - Branches are strange. The operands are ordered: 3103 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because 3104 /// they don't have to check for cond/uncond branchness. These are mostly 3105 /// accessed relative from op_end(). 3106 BranchInst(const BranchInst &BI); 3107 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): 3108 // BranchInst(BB *B) - 'br B' 3109 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' 3110 // BranchInst(BB* B, Inst *I) - 'br B' insert before I 3111 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I 3112 // BranchInst(BB* B, BB *I) - 'br B' insert at end 3113 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end 3114 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); 3115 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3116 Instruction *InsertBefore = nullptr); 3117 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); 3118 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3119 BasicBlock *InsertAtEnd); 3120 3121 void AssertOK(); 3122 3123 protected: 3124 // Note: Instruction needs to be a friend here to call cloneImpl. 3125 friend class Instruction; 3126 3127 BranchInst *cloneImpl() const; 3128 3129 public: 3130 /// Iterator type that casts an operand to a basic block. 3131 /// 3132 /// This only makes sense because the successors are stored as adjacent 3133 /// operands for branch instructions. 3134 struct succ_op_iterator 3135 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3136 std::random_access_iterator_tag, BasicBlock *, 3137 ptrdiff_t, BasicBlock *, BasicBlock *> { 3138 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3139 3140 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3141 BasicBlock *operator->() const { return operator*(); } 3142 }; 3143 3144 /// The const version of `succ_op_iterator`. 3145 struct const_succ_op_iterator 3146 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3147 std::random_access_iterator_tag, 3148 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3149 const BasicBlock *> { 3150 explicit const_succ_op_iterator(const_value_op_iterator I) 3151 : iterator_adaptor_base(I) {} 3152 3153 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3154 const BasicBlock *operator->() const { return operator*(); } 3155 }; 3156 3157 static BranchInst *Create(BasicBlock *IfTrue, 3158 Instruction *InsertBefore = nullptr) { 3159 return new(1) BranchInst(IfTrue, InsertBefore); 3160 } 3161 3162 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3163 Value *Cond, Instruction *InsertBefore = nullptr) { 3164 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); 3165 } 3166 3167 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { 3168 return new(1) BranchInst(IfTrue, InsertAtEnd); 3169 } 3170 3171 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3172 Value *Cond, BasicBlock *InsertAtEnd) { 3173 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); 3174 } 3175 3176 /// Transparently provide more efficient getOperand methods. 3177 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3178 3179 bool isUnconditional() const { return getNumOperands() == 1; } 3180 bool isConditional() const { return getNumOperands() == 3; } 3181 3182 Value *getCondition() const { 3183 assert(isConditional() && "Cannot get condition of an uncond branch!"); 3184 return Op<-3>(); 3185 } 3186 3187 void setCondition(Value *V) { 3188 assert(isConditional() && "Cannot set condition of unconditional branch!"); 3189 Op<-3>() = V; 3190 } 3191 3192 unsigned getNumSuccessors() const { return 1+isConditional(); } 3193 3194 BasicBlock *getSuccessor(unsigned i) const { 3195 assert(i < getNumSuccessors() && "Successor # out of range for Branch!"); 3196 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); 3197 } 3198 3199 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3200 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!"); 3201 *(&Op<-1>() - idx) = NewSucc; 3202 } 3203 3204 /// Swap the successors of this branch instruction. 3205 /// 3206 /// Swaps the successors of the branch instruction. This also swaps any 3207 /// branch weight metadata associated with the instruction so that it 3208 /// continues to map correctly to each operand. 3209 void swapSuccessors(); 3210 3211 iterator_range<succ_op_iterator> successors() { 3212 return make_range( 3213 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), 3214 succ_op_iterator(value_op_end())); 3215 } 3216 3217 iterator_range<const_succ_op_iterator> successors() const { 3218 return make_range(const_succ_op_iterator( 3219 std::next(value_op_begin(), isConditional() ? 1 : 0)), 3220 const_succ_op_iterator(value_op_end())); 3221 } 3222 3223 // Methods for support type inquiry through isa, cast, and dyn_cast: 3224 static bool classof(const Instruction *I) { 3225 return (I->getOpcode() == Instruction::Br); 3226 } 3227 static bool classof(const Value *V) { 3228 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3229 } 3230 }; 3231 3232 template <> 3233 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { 3234 }; 3235 3236 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) 3237 3238 //===----------------------------------------------------------------------===// 3239 // SwitchInst Class 3240 //===----------------------------------------------------------------------===// 3241 3242 //===--------------------------------------------------------------------------- 3243 /// Multiway switch 3244 /// 3245 class SwitchInst : public Instruction { 3246 unsigned ReservedSpace; 3247 3248 // Operand[0] = Value to switch on 3249 // Operand[1] = Default basic block destination 3250 // Operand[2n ] = Value to match 3251 // Operand[2n+1] = BasicBlock to go to on match 3252 SwitchInst(const SwitchInst &SI); 3253 3254 /// Create a new switch instruction, specifying a value to switch on and a 3255 /// default destination. The number of additional cases can be specified here 3256 /// to make memory allocation more efficient. This constructor can also 3257 /// auto-insert before another instruction. 3258 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3259 Instruction *InsertBefore); 3260 3261 /// Create a new switch instruction, specifying a value to switch on and a 3262 /// default destination. The number of additional cases can be specified here 3263 /// to make memory allocation more efficient. This constructor also 3264 /// auto-inserts at the end of the specified BasicBlock. 3265 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3266 BasicBlock *InsertAtEnd); 3267 3268 // allocate space for exactly zero operands 3269 void *operator new(size_t S) { return User::operator new(S); } 3270 3271 void init(Value *Value, BasicBlock *Default, unsigned NumReserved); 3272 void growOperands(); 3273 3274 protected: 3275 // Note: Instruction needs to be a friend here to call cloneImpl. 3276 friend class Instruction; 3277 3278 SwitchInst *cloneImpl() const; 3279 3280 public: 3281 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3282 3283 // -2 3284 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); 3285 3286 template <typename CaseHandleT> class CaseIteratorImpl; 3287 3288 /// A handle to a particular switch case. It exposes a convenient interface 3289 /// to both the case value and the successor block. 3290 /// 3291 /// We define this as a template and instantiate it to form both a const and 3292 /// non-const handle. 3293 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> 3294 class CaseHandleImpl { 3295 // Directly befriend both const and non-const iterators. 3296 friend class SwitchInst::CaseIteratorImpl< 3297 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; 3298 3299 protected: 3300 // Expose the switch type we're parameterized with to the iterator. 3301 using SwitchInstType = SwitchInstT; 3302 3303 SwitchInstT *SI; 3304 ptrdiff_t Index; 3305 3306 CaseHandleImpl() = default; 3307 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} 3308 3309 public: 3310 /// Resolves case value for current case. 3311 ConstantIntT *getCaseValue() const { 3312 assert((unsigned)Index < SI->getNumCases() && 3313 "Index out the number of cases."); 3314 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); 3315 } 3316 3317 /// Resolves successor for current case. 3318 BasicBlockT *getCaseSuccessor() const { 3319 assert(((unsigned)Index < SI->getNumCases() || 3320 (unsigned)Index == DefaultPseudoIndex) && 3321 "Index out the number of cases."); 3322 return SI->getSuccessor(getSuccessorIndex()); 3323 } 3324 3325 /// Returns number of current case. 3326 unsigned getCaseIndex() const { return Index; } 3327 3328 /// Returns successor index for current case successor. 3329 unsigned getSuccessorIndex() const { 3330 assert(((unsigned)Index == DefaultPseudoIndex || 3331 (unsigned)Index < SI->getNumCases()) && 3332 "Index out the number of cases."); 3333 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; 3334 } 3335 3336 bool operator==(const CaseHandleImpl &RHS) const { 3337 assert(SI == RHS.SI && "Incompatible operators."); 3338 return Index == RHS.Index; 3339 } 3340 }; 3341 3342 using ConstCaseHandle = 3343 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; 3344 3345 class CaseHandle 3346 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { 3347 friend class SwitchInst::CaseIteratorImpl<CaseHandle>; 3348 3349 public: 3350 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} 3351 3352 /// Sets the new value for current case. 3353 void setValue(ConstantInt *V) const { 3354 assert((unsigned)Index < SI->getNumCases() && 3355 "Index out the number of cases."); 3356 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); 3357 } 3358 3359 /// Sets the new successor for current case. 3360 void setSuccessor(BasicBlock *S) const { 3361 SI->setSuccessor(getSuccessorIndex(), S); 3362 } 3363 }; 3364 3365 template <typename CaseHandleT> 3366 class CaseIteratorImpl 3367 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, 3368 std::random_access_iterator_tag, 3369 const CaseHandleT> { 3370 using SwitchInstT = typename CaseHandleT::SwitchInstType; 3371 3372 CaseHandleT Case; 3373 3374 public: 3375 /// Default constructed iterator is in an invalid state until assigned to 3376 /// a case for a particular switch. 3377 CaseIteratorImpl() = default; 3378 3379 /// Initializes case iterator for given SwitchInst and for given 3380 /// case number. 3381 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} 3382 3383 /// Initializes case iterator for given SwitchInst and for given 3384 /// successor index. 3385 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, 3386 unsigned SuccessorIndex) { 3387 assert(SuccessorIndex < SI->getNumSuccessors() && 3388 "Successor index # out of range!"); 3389 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) 3390 : CaseIteratorImpl(SI, DefaultPseudoIndex); 3391 } 3392 3393 /// Support converting to the const variant. This will be a no-op for const 3394 /// variant. 3395 operator CaseIteratorImpl<ConstCaseHandle>() const { 3396 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); 3397 } 3398 3399 CaseIteratorImpl &operator+=(ptrdiff_t N) { 3400 // Check index correctness after addition. 3401 // Note: Index == getNumCases() means end(). 3402 assert(Case.Index + N >= 0 && 3403 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && 3404 "Case.Index out the number of cases."); 3405 Case.Index += N; 3406 return *this; 3407 } 3408 CaseIteratorImpl &operator-=(ptrdiff_t N) { 3409 // Check index correctness after subtraction. 3410 // Note: Case.Index == getNumCases() means end(). 3411 assert(Case.Index - N >= 0 && 3412 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && 3413 "Case.Index out the number of cases."); 3414 Case.Index -= N; 3415 return *this; 3416 } 3417 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { 3418 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3419 return Case.Index - RHS.Case.Index; 3420 } 3421 bool operator==(const CaseIteratorImpl &RHS) const { 3422 return Case == RHS.Case; 3423 } 3424 bool operator<(const CaseIteratorImpl &RHS) const { 3425 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3426 return Case.Index < RHS.Case.Index; 3427 } 3428 const CaseHandleT &operator*() const { return Case; } 3429 }; 3430 3431 using CaseIt = CaseIteratorImpl<CaseHandle>; 3432 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; 3433 3434 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3435 unsigned NumCases, 3436 Instruction *InsertBefore = nullptr) { 3437 return new SwitchInst(Value, Default, NumCases, InsertBefore); 3438 } 3439 3440 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3441 unsigned NumCases, BasicBlock *InsertAtEnd) { 3442 return new SwitchInst(Value, Default, NumCases, InsertAtEnd); 3443 } 3444 3445 /// Provide fast operand accessors 3446 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3447 3448 // Accessor Methods for Switch stmt 3449 Value *getCondition() const { return getOperand(0); } 3450 void setCondition(Value *V) { setOperand(0, V); } 3451 3452 BasicBlock *getDefaultDest() const { 3453 return cast<BasicBlock>(getOperand(1)); 3454 } 3455 3456 void setDefaultDest(BasicBlock *DefaultCase) { 3457 setOperand(1, reinterpret_cast<Value*>(DefaultCase)); 3458 } 3459 3460 /// Return the number of 'cases' in this switch instruction, excluding the 3461 /// default case. 3462 unsigned getNumCases() const { 3463 return getNumOperands()/2 - 1; 3464 } 3465 3466 /// Returns a read/write iterator that points to the first case in the 3467 /// SwitchInst. 3468 CaseIt case_begin() { 3469 return CaseIt(this, 0); 3470 } 3471 3472 /// Returns a read-only iterator that points to the first case in the 3473 /// SwitchInst. 3474 ConstCaseIt case_begin() const { 3475 return ConstCaseIt(this, 0); 3476 } 3477 3478 /// Returns a read/write iterator that points one past the last in the 3479 /// SwitchInst. 3480 CaseIt case_end() { 3481 return CaseIt(this, getNumCases()); 3482 } 3483 3484 /// Returns a read-only iterator that points one past the last in the 3485 /// SwitchInst. 3486 ConstCaseIt case_end() const { 3487 return ConstCaseIt(this, getNumCases()); 3488 } 3489 3490 /// Iteration adapter for range-for loops. 3491 iterator_range<CaseIt> cases() { 3492 return make_range(case_begin(), case_end()); 3493 } 3494 3495 /// Constant iteration adapter for range-for loops. 3496 iterator_range<ConstCaseIt> cases() const { 3497 return make_range(case_begin(), case_end()); 3498 } 3499 3500 /// Returns an iterator that points to the default case. 3501 /// Note: this iterator allows to resolve successor only. Attempt 3502 /// to resolve case value causes an assertion. 3503 /// Also note, that increment and decrement also causes an assertion and 3504 /// makes iterator invalid. 3505 CaseIt case_default() { 3506 return CaseIt(this, DefaultPseudoIndex); 3507 } 3508 ConstCaseIt case_default() const { 3509 return ConstCaseIt(this, DefaultPseudoIndex); 3510 } 3511 3512 /// Search all of the case values for the specified constant. If it is 3513 /// explicitly handled, return the case iterator of it, otherwise return 3514 /// default case iterator to indicate that it is handled by the default 3515 /// handler. 3516 CaseIt findCaseValue(const ConstantInt *C) { 3517 return CaseIt( 3518 this, 3519 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex()); 3520 } 3521 ConstCaseIt findCaseValue(const ConstantInt *C) const { 3522 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) { 3523 return Case.getCaseValue() == C; 3524 }); 3525 if (I != case_end()) 3526 return I; 3527 3528 return case_default(); 3529 } 3530 3531 /// Finds the unique case value for a given successor. Returns null if the 3532 /// successor is not found, not unique, or is the default case. 3533 ConstantInt *findCaseDest(BasicBlock *BB) { 3534 if (BB == getDefaultDest()) 3535 return nullptr; 3536 3537 ConstantInt *CI = nullptr; 3538 for (auto Case : cases()) { 3539 if (Case.getCaseSuccessor() != BB) 3540 continue; 3541 3542 if (CI) 3543 return nullptr; // Multiple cases lead to BB. 3544 3545 CI = Case.getCaseValue(); 3546 } 3547 3548 return CI; 3549 } 3550 3551 /// Add an entry to the switch instruction. 3552 /// Note: 3553 /// This action invalidates case_end(). Old case_end() iterator will 3554 /// point to the added case. 3555 void addCase(ConstantInt *OnVal, BasicBlock *Dest); 3556 3557 /// This method removes the specified case and its successor from the switch 3558 /// instruction. Note that this operation may reorder the remaining cases at 3559 /// index idx and above. 3560 /// Note: 3561 /// This action invalidates iterators for all cases following the one removed, 3562 /// including the case_end() iterator. It returns an iterator for the next 3563 /// case. 3564 CaseIt removeCase(CaseIt I); 3565 3566 unsigned getNumSuccessors() const { return getNumOperands()/2; } 3567 BasicBlock *getSuccessor(unsigned idx) const { 3568 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!"); 3569 return cast<BasicBlock>(getOperand(idx*2+1)); 3570 } 3571 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3572 assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); 3573 setOperand(idx * 2 + 1, NewSucc); 3574 } 3575 3576 // Methods for support type inquiry through isa, cast, and dyn_cast: 3577 static bool classof(const Instruction *I) { 3578 return I->getOpcode() == Instruction::Switch; 3579 } 3580 static bool classof(const Value *V) { 3581 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3582 } 3583 }; 3584 3585 /// A wrapper class to simplify modification of SwitchInst cases along with 3586 /// their prof branch_weights metadata. 3587 class SwitchInstProfUpdateWrapper { 3588 SwitchInst &SI; 3589 Optional<SmallVector<uint32_t, 8> > Weights = None; 3590 bool Changed = false; 3591 3592 protected: 3593 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); 3594 3595 MDNode *buildProfBranchWeightsMD(); 3596 3597 void init(); 3598 3599 public: 3600 using CaseWeightOpt = Optional<uint32_t>; 3601 SwitchInst *operator->() { return &SI; } 3602 SwitchInst &operator*() { return SI; } 3603 operator SwitchInst *() { return &SI; } 3604 3605 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } 3606 3607 ~SwitchInstProfUpdateWrapper() { 3608 if (Changed) 3609 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); 3610 } 3611 3612 /// Delegate the call to the underlying SwitchInst::removeCase() and remove 3613 /// correspondent branch weight. 3614 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); 3615 3616 /// Delegate the call to the underlying SwitchInst::addCase() and set the 3617 /// specified branch weight for the added case. 3618 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); 3619 3620 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark 3621 /// this object to not touch the underlying SwitchInst in destructor. 3622 SymbolTableList<Instruction>::iterator eraseFromParent(); 3623 3624 void setSuccessorWeight(unsigned idx, CaseWeightOpt W); 3625 CaseWeightOpt getSuccessorWeight(unsigned idx); 3626 3627 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); 3628 }; 3629 3630 template <> 3631 struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { 3632 }; 3633 3634 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) 3635 3636 //===----------------------------------------------------------------------===// 3637 // IndirectBrInst Class 3638 //===----------------------------------------------------------------------===// 3639 3640 //===--------------------------------------------------------------------------- 3641 /// Indirect Branch Instruction. 3642 /// 3643 class IndirectBrInst : public Instruction { 3644 unsigned ReservedSpace; 3645 3646 // Operand[0] = Address to jump to 3647 // Operand[n+1] = n-th destination 3648 IndirectBrInst(const IndirectBrInst &IBI); 3649 3650 /// Create a new indirectbr instruction, specifying an 3651 /// Address to jump to. The number of expected destinations can be specified 3652 /// here to make memory allocation more efficient. This constructor can also 3653 /// autoinsert before another instruction. 3654 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); 3655 3656 /// Create a new indirectbr instruction, specifying an 3657 /// Address to jump to. The number of expected destinations can be specified 3658 /// here to make memory allocation more efficient. This constructor also 3659 /// autoinserts at the end of the specified BasicBlock. 3660 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); 3661 3662 // allocate space for exactly zero operands 3663 void *operator new(size_t S) { return User::operator new(S); } 3664 3665 void init(Value *Address, unsigned NumDests); 3666 void growOperands(); 3667 3668 protected: 3669 // Note: Instruction needs to be a friend here to call cloneImpl. 3670 friend class Instruction; 3671 3672 IndirectBrInst *cloneImpl() const; 3673 3674 public: 3675 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3676 3677 /// Iterator type that casts an operand to a basic block. 3678 /// 3679 /// This only makes sense because the successors are stored as adjacent 3680 /// operands for indirectbr instructions. 3681 struct succ_op_iterator 3682 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3683 std::random_access_iterator_tag, BasicBlock *, 3684 ptrdiff_t, BasicBlock *, BasicBlock *> { 3685 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3686 3687 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3688 BasicBlock *operator->() const { return operator*(); } 3689 }; 3690 3691 /// The const version of `succ_op_iterator`. 3692 struct const_succ_op_iterator 3693 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3694 std::random_access_iterator_tag, 3695 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3696 const BasicBlock *> { 3697 explicit const_succ_op_iterator(const_value_op_iterator I) 3698 : iterator_adaptor_base(I) {} 3699 3700 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3701 const BasicBlock *operator->() const { return operator*(); } 3702 }; 3703 3704 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3705 Instruction *InsertBefore = nullptr) { 3706 return new IndirectBrInst(Address, NumDests, InsertBefore); 3707 } 3708 3709 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3710 BasicBlock *InsertAtEnd) { 3711 return new IndirectBrInst(Address, NumDests, InsertAtEnd); 3712 } 3713 3714 /// Provide fast operand accessors. 3715 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3716 3717 // Accessor Methods for IndirectBrInst instruction. 3718 Value *getAddress() { return getOperand(0); } 3719 const Value *getAddress() const { return getOperand(0); } 3720 void setAddress(Value *V) { setOperand(0, V); } 3721 3722 /// return the number of possible destinations in this 3723 /// indirectbr instruction. 3724 unsigned getNumDestinations() const { return getNumOperands()-1; } 3725 3726 /// Return the specified destination. 3727 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } 3728 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } 3729 3730 /// Add a destination. 3731 /// 3732 void addDestination(BasicBlock *Dest); 3733 3734 /// This method removes the specified successor from the 3735 /// indirectbr instruction. 3736 void removeDestination(unsigned i); 3737 3738 unsigned getNumSuccessors() const { return getNumOperands()-1; } 3739 BasicBlock *getSuccessor(unsigned i) const { 3740 return cast<BasicBlock>(getOperand(i+1)); 3741 } 3742 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3743 setOperand(i + 1, NewSucc); 3744 } 3745 3746 iterator_range<succ_op_iterator> successors() { 3747 return make_range(succ_op_iterator(std::next(value_op_begin())), 3748 succ_op_iterator(value_op_end())); 3749 } 3750 3751 iterator_range<const_succ_op_iterator> successors() const { 3752 return make_range(const_succ_op_iterator(std::next(value_op_begin())), 3753 const_succ_op_iterator(value_op_end())); 3754 } 3755 3756 // Methods for support type inquiry through isa, cast, and dyn_cast: 3757 static bool classof(const Instruction *I) { 3758 return I->getOpcode() == Instruction::IndirectBr; 3759 } 3760 static bool classof(const Value *V) { 3761 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3762 } 3763 }; 3764 3765 template <> 3766 struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { 3767 }; 3768 3769 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value) 3770 3771 //===----------------------------------------------------------------------===// 3772 // InvokeInst Class 3773 //===----------------------------------------------------------------------===// 3774 3775 /// Invoke instruction. The SubclassData field is used to hold the 3776 /// calling convention of the call. 3777 /// 3778 class InvokeInst : public CallBase { 3779 /// The number of operands for this call beyond the called function, 3780 /// arguments, and operand bundles. 3781 static constexpr int NumExtraOperands = 2; 3782 3783 /// The index from the end of the operand array to the normal destination. 3784 static constexpr int NormalDestOpEndIdx = -3; 3785 3786 /// The index from the end of the operand array to the unwind destination. 3787 static constexpr int UnwindDestOpEndIdx = -2; 3788 3789 InvokeInst(const InvokeInst &BI); 3790 3791 /// Construct an InvokeInst given a range of arguments. 3792 /// 3793 /// Construct an InvokeInst from a range of arguments 3794 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3795 BasicBlock *IfException, ArrayRef<Value *> Args, 3796 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3797 const Twine &NameStr, Instruction *InsertBefore); 3798 3799 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3800 BasicBlock *IfException, ArrayRef<Value *> Args, 3801 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3802 const Twine &NameStr, BasicBlock *InsertAtEnd); 3803 3804 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3805 BasicBlock *IfException, ArrayRef<Value *> Args, 3806 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3807 3808 /// Compute the number of operands to allocate. 3809 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 3810 // We need one operand for the called function, plus our extra operands and 3811 // the input operand counts provided. 3812 return 1 + NumExtraOperands + NumArgs + NumBundleInputs; 3813 } 3814 3815 protected: 3816 // Note: Instruction needs to be a friend here to call cloneImpl. 3817 friend class Instruction; 3818 3819 InvokeInst *cloneImpl() const; 3820 3821 public: 3822 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3823 BasicBlock *IfException, ArrayRef<Value *> Args, 3824 const Twine &NameStr, 3825 Instruction *InsertBefore = nullptr) { 3826 int NumOperands = ComputeNumOperands(Args.size()); 3827 return new (NumOperands) 3828 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3829 NameStr, InsertBefore); 3830 } 3831 3832 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3833 BasicBlock *IfException, ArrayRef<Value *> Args, 3834 ArrayRef<OperandBundleDef> Bundles = None, 3835 const Twine &NameStr = "", 3836 Instruction *InsertBefore = nullptr) { 3837 int NumOperands = 3838 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3839 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3840 3841 return new (NumOperands, DescriptorBytes) 3842 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3843 NameStr, InsertBefore); 3844 } 3845 3846 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3847 BasicBlock *IfException, ArrayRef<Value *> Args, 3848 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3849 int NumOperands = ComputeNumOperands(Args.size()); 3850 return new (NumOperands) 3851 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3852 NameStr, InsertAtEnd); 3853 } 3854 3855 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3856 BasicBlock *IfException, ArrayRef<Value *> Args, 3857 ArrayRef<OperandBundleDef> Bundles, 3858 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3859 int NumOperands = 3860 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3861 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3862 3863 return new (NumOperands, DescriptorBytes) 3864 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3865 NameStr, InsertAtEnd); 3866 } 3867 3868 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3869 BasicBlock *IfException, ArrayRef<Value *> Args, 3870 const Twine &NameStr, 3871 Instruction *InsertBefore = nullptr) { 3872 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3873 IfException, Args, None, NameStr, InsertBefore); 3874 } 3875 3876 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3877 BasicBlock *IfException, ArrayRef<Value *> Args, 3878 ArrayRef<OperandBundleDef> Bundles = None, 3879 const Twine &NameStr = "", 3880 Instruction *InsertBefore = nullptr) { 3881 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3882 IfException, Args, Bundles, NameStr, InsertBefore); 3883 } 3884 3885 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3886 BasicBlock *IfException, ArrayRef<Value *> Args, 3887 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3888 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3889 IfException, Args, NameStr, InsertAtEnd); 3890 } 3891 3892 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3893 BasicBlock *IfException, ArrayRef<Value *> Args, 3894 ArrayRef<OperandBundleDef> Bundles, 3895 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3896 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3897 IfException, Args, Bundles, NameStr, InsertAtEnd); 3898 } 3899 3900 /// Create a clone of \p II with a different set of operand bundles and 3901 /// insert it before \p InsertPt. 3902 /// 3903 /// The returned invoke instruction is identical to \p II in every way except 3904 /// that the operand bundles for the new instruction are set to the operand 3905 /// bundles in \p Bundles. 3906 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, 3907 Instruction *InsertPt = nullptr); 3908 3909 // get*Dest - Return the destination basic blocks... 3910 BasicBlock *getNormalDest() const { 3911 return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); 3912 } 3913 BasicBlock *getUnwindDest() const { 3914 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); 3915 } 3916 void setNormalDest(BasicBlock *B) { 3917 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3918 } 3919 void setUnwindDest(BasicBlock *B) { 3920 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3921 } 3922 3923 /// Get the landingpad instruction from the landing pad 3924 /// block (the unwind destination). 3925 LandingPadInst *getLandingPadInst() const; 3926 3927 BasicBlock *getSuccessor(unsigned i) const { 3928 assert(i < 2 && "Successor # out of range for invoke!"); 3929 return i == 0 ? getNormalDest() : getUnwindDest(); 3930 } 3931 3932 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3933 assert(i < 2 && "Successor # out of range for invoke!"); 3934 if (i == 0) 3935 setNormalDest(NewSucc); 3936 else 3937 setUnwindDest(NewSucc); 3938 } 3939 3940 unsigned getNumSuccessors() const { return 2; } 3941 3942 // Methods for support type inquiry through isa, cast, and dyn_cast: 3943 static bool classof(const Instruction *I) { 3944 return (I->getOpcode() == Instruction::Invoke); 3945 } 3946 static bool classof(const Value *V) { 3947 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3948 } 3949 3950 private: 3951 // Shadow Instruction::setInstructionSubclassData with a private forwarding 3952 // method so that subclasses cannot accidentally use it. 3953 template <typename Bitfield> 3954 void setSubclassData(typename Bitfield::Type Value) { 3955 Instruction::setSubclassData<Bitfield>(Value); 3956 } 3957 }; 3958 3959 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3960 BasicBlock *IfException, ArrayRef<Value *> Args, 3961 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3962 const Twine &NameStr, Instruction *InsertBefore) 3963 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3964 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3965 InsertBefore) { 3966 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3967 } 3968 3969 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3970 BasicBlock *IfException, ArrayRef<Value *> Args, 3971 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3972 const Twine &NameStr, BasicBlock *InsertAtEnd) 3973 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3974 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3975 InsertAtEnd) { 3976 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3977 } 3978 3979 //===----------------------------------------------------------------------===// 3980 // CallBrInst Class 3981 //===----------------------------------------------------------------------===// 3982 3983 /// CallBr instruction, tracking function calls that may not return control but 3984 /// instead transfer it to a third location. The SubclassData field is used to 3985 /// hold the calling convention of the call. 3986 /// 3987 class CallBrInst : public CallBase { 3988 3989 unsigned NumIndirectDests; 3990 3991 CallBrInst(const CallBrInst &BI); 3992 3993 /// Construct a CallBrInst given a range of arguments. 3994 /// 3995 /// Construct a CallBrInst from a range of arguments 3996 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3997 ArrayRef<BasicBlock *> IndirectDests, 3998 ArrayRef<Value *> Args, 3999 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4000 const Twine &NameStr, Instruction *InsertBefore); 4001 4002 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4003 ArrayRef<BasicBlock *> IndirectDests, 4004 ArrayRef<Value *> Args, 4005 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4006 const Twine &NameStr, BasicBlock *InsertAtEnd); 4007 4008 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, 4009 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 4010 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 4011 4012 /// Should the Indirect Destinations change, scan + update the Arg list. 4013 void updateArgBlockAddresses(unsigned i, BasicBlock *B); 4014 4015 /// Compute the number of operands to allocate. 4016 static int ComputeNumOperands(int NumArgs, int NumIndirectDests, 4017 int NumBundleInputs = 0) { 4018 // We need one operand for the called function, plus our extra operands and 4019 // the input operand counts provided. 4020 return 2 + NumIndirectDests + NumArgs + NumBundleInputs; 4021 } 4022 4023 protected: 4024 // Note: Instruction needs to be a friend here to call cloneImpl. 4025 friend class Instruction; 4026 4027 CallBrInst *cloneImpl() const; 4028 4029 public: 4030 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4031 BasicBlock *DefaultDest, 4032 ArrayRef<BasicBlock *> IndirectDests, 4033 ArrayRef<Value *> Args, const Twine &NameStr, 4034 Instruction *InsertBefore = nullptr) { 4035 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4036 return new (NumOperands) 4037 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 4038 NumOperands, NameStr, InsertBefore); 4039 } 4040 4041 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4042 BasicBlock *DefaultDest, 4043 ArrayRef<BasicBlock *> IndirectDests, 4044 ArrayRef<Value *> Args, 4045 ArrayRef<OperandBundleDef> Bundles = None, 4046 const Twine &NameStr = "", 4047 Instruction *InsertBefore = nullptr) { 4048 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4049 CountBundleInputs(Bundles)); 4050 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4051 4052 return new (NumOperands, DescriptorBytes) 4053 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4054 NumOperands, NameStr, InsertBefore); 4055 } 4056 4057 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4058 BasicBlock *DefaultDest, 4059 ArrayRef<BasicBlock *> IndirectDests, 4060 ArrayRef<Value *> Args, const Twine &NameStr, 4061 BasicBlock *InsertAtEnd) { 4062 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4063 return new (NumOperands) 4064 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 4065 NumOperands, NameStr, InsertAtEnd); 4066 } 4067 4068 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4069 BasicBlock *DefaultDest, 4070 ArrayRef<BasicBlock *> IndirectDests, 4071 ArrayRef<Value *> Args, 4072 ArrayRef<OperandBundleDef> Bundles, 4073 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4074 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4075 CountBundleInputs(Bundles)); 4076 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4077 4078 return new (NumOperands, DescriptorBytes) 4079 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4080 NumOperands, NameStr, InsertAtEnd); 4081 } 4082 4083 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4084 ArrayRef<BasicBlock *> IndirectDests, 4085 ArrayRef<Value *> Args, const Twine &NameStr, 4086 Instruction *InsertBefore = nullptr) { 4087 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4088 IndirectDests, Args, NameStr, InsertBefore); 4089 } 4090 4091 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4092 ArrayRef<BasicBlock *> IndirectDests, 4093 ArrayRef<Value *> Args, 4094 ArrayRef<OperandBundleDef> Bundles = None, 4095 const Twine &NameStr = "", 4096 Instruction *InsertBefore = nullptr) { 4097 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4098 IndirectDests, Args, Bundles, NameStr, InsertBefore); 4099 } 4100 4101 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4102 ArrayRef<BasicBlock *> IndirectDests, 4103 ArrayRef<Value *> Args, const Twine &NameStr, 4104 BasicBlock *InsertAtEnd) { 4105 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4106 IndirectDests, Args, NameStr, InsertAtEnd); 4107 } 4108 4109 static CallBrInst *Create(FunctionCallee Func, 4110 BasicBlock *DefaultDest, 4111 ArrayRef<BasicBlock *> IndirectDests, 4112 ArrayRef<Value *> Args, 4113 ArrayRef<OperandBundleDef> Bundles, 4114 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4115 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4116 IndirectDests, Args, Bundles, NameStr, InsertAtEnd); 4117 } 4118 4119 /// Create a clone of \p CBI with a different set of operand bundles and 4120 /// insert it before \p InsertPt. 4121 /// 4122 /// The returned callbr instruction is identical to \p CBI in every way 4123 /// except that the operand bundles for the new instruction are set to the 4124 /// operand bundles in \p Bundles. 4125 static CallBrInst *Create(CallBrInst *CBI, 4126 ArrayRef<OperandBundleDef> Bundles, 4127 Instruction *InsertPt = nullptr); 4128 4129 /// Return the number of callbr indirect dest labels. 4130 /// 4131 unsigned getNumIndirectDests() const { return NumIndirectDests; } 4132 4133 /// getIndirectDestLabel - Return the i-th indirect dest label. 4134 /// 4135 Value *getIndirectDestLabel(unsigned i) const { 4136 assert(i < getNumIndirectDests() && "Out of bounds!"); 4137 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1); 4138 } 4139 4140 Value *getIndirectDestLabelUse(unsigned i) const { 4141 assert(i < getNumIndirectDests() && "Out of bounds!"); 4142 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1); 4143 } 4144 4145 // Return the destination basic blocks... 4146 BasicBlock *getDefaultDest() const { 4147 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); 4148 } 4149 BasicBlock *getIndirectDest(unsigned i) const { 4150 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); 4151 } 4152 SmallVector<BasicBlock *, 16> getIndirectDests() const { 4153 SmallVector<BasicBlock *, 16> IndirectDests; 4154 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) 4155 IndirectDests.push_back(getIndirectDest(i)); 4156 return IndirectDests; 4157 } 4158 void setDefaultDest(BasicBlock *B) { 4159 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); 4160 } 4161 void setIndirectDest(unsigned i, BasicBlock *B) { 4162 updateArgBlockAddresses(i, B); 4163 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); 4164 } 4165 4166 BasicBlock *getSuccessor(unsigned i) const { 4167 assert(i < getNumSuccessors() + 1 && 4168 "Successor # out of range for callbr!"); 4169 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); 4170 } 4171 4172 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4173 assert(i < getNumIndirectDests() + 1 && 4174 "Successor # out of range for callbr!"); 4175 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); 4176 } 4177 4178 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } 4179 4180 // Methods for support type inquiry through isa, cast, and dyn_cast: 4181 static bool classof(const Instruction *I) { 4182 return (I->getOpcode() == Instruction::CallBr); 4183 } 4184 static bool classof(const Value *V) { 4185 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4186 } 4187 4188 private: 4189 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4190 // method so that subclasses cannot accidentally use it. 4191 template <typename Bitfield> 4192 void setSubclassData(typename Bitfield::Type Value) { 4193 Instruction::setSubclassData<Bitfield>(Value); 4194 } 4195 }; 4196 4197 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4198 ArrayRef<BasicBlock *> IndirectDests, 4199 ArrayRef<Value *> Args, 4200 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4201 const Twine &NameStr, Instruction *InsertBefore) 4202 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4203 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4204 InsertBefore) { 4205 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4206 } 4207 4208 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4209 ArrayRef<BasicBlock *> IndirectDests, 4210 ArrayRef<Value *> Args, 4211 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4212 const Twine &NameStr, BasicBlock *InsertAtEnd) 4213 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4214 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4215 InsertAtEnd) { 4216 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4217 } 4218 4219 //===----------------------------------------------------------------------===// 4220 // ResumeInst Class 4221 //===----------------------------------------------------------------------===// 4222 4223 //===--------------------------------------------------------------------------- 4224 /// Resume the propagation of an exception. 4225 /// 4226 class ResumeInst : public Instruction { 4227 ResumeInst(const ResumeInst &RI); 4228 4229 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); 4230 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); 4231 4232 protected: 4233 // Note: Instruction needs to be a friend here to call cloneImpl. 4234 friend class Instruction; 4235 4236 ResumeInst *cloneImpl() const; 4237 4238 public: 4239 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { 4240 return new(1) ResumeInst(Exn, InsertBefore); 4241 } 4242 4243 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { 4244 return new(1) ResumeInst(Exn, InsertAtEnd); 4245 } 4246 4247 /// Provide fast operand accessors 4248 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4249 4250 /// Convenience accessor. 4251 Value *getValue() const { return Op<0>(); } 4252 4253 unsigned getNumSuccessors() const { return 0; } 4254 4255 // Methods for support type inquiry through isa, cast, and dyn_cast: 4256 static bool classof(const Instruction *I) { 4257 return I->getOpcode() == Instruction::Resume; 4258 } 4259 static bool classof(const Value *V) { 4260 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4261 } 4262 4263 private: 4264 BasicBlock *getSuccessor(unsigned idx) const { 4265 llvm_unreachable("ResumeInst has no successors!"); 4266 } 4267 4268 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 4269 llvm_unreachable("ResumeInst has no successors!"); 4270 } 4271 }; 4272 4273 template <> 4274 struct OperandTraits<ResumeInst> : 4275 public FixedNumOperandTraits<ResumeInst, 1> { 4276 }; 4277 4278 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) 4279 4280 //===----------------------------------------------------------------------===// 4281 // CatchSwitchInst Class 4282 //===----------------------------------------------------------------------===// 4283 class CatchSwitchInst : public Instruction { 4284 using UnwindDestField = BoolBitfieldElementT<0>; 4285 4286 /// The number of operands actually allocated. NumOperands is 4287 /// the number actually in use. 4288 unsigned ReservedSpace; 4289 4290 // Operand[0] = Outer scope 4291 // Operand[1] = Unwind block destination 4292 // Operand[n] = BasicBlock to go to on match 4293 CatchSwitchInst(const CatchSwitchInst &CSI); 4294 4295 /// Create a new switch instruction, specifying a 4296 /// default destination. The number of additional handlers can be specified 4297 /// here to make memory allocation more efficient. 4298 /// This constructor can also autoinsert before another instruction. 4299 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4300 unsigned NumHandlers, const Twine &NameStr, 4301 Instruction *InsertBefore); 4302 4303 /// Create a new switch instruction, specifying a 4304 /// default destination. The number of additional handlers can be specified 4305 /// here to make memory allocation more efficient. 4306 /// This constructor also autoinserts at the end of the specified BasicBlock. 4307 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4308 unsigned NumHandlers, const Twine &NameStr, 4309 BasicBlock *InsertAtEnd); 4310 4311 // allocate space for exactly zero operands 4312 void *operator new(size_t S) { return User::operator new(S); } 4313 4314 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); 4315 void growOperands(unsigned Size); 4316 4317 protected: 4318 // Note: Instruction needs to be a friend here to call cloneImpl. 4319 friend class Instruction; 4320 4321 CatchSwitchInst *cloneImpl() const; 4322 4323 public: 4324 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 4325 4326 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4327 unsigned NumHandlers, 4328 const Twine &NameStr = "", 4329 Instruction *InsertBefore = nullptr) { 4330 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4331 InsertBefore); 4332 } 4333 4334 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4335 unsigned NumHandlers, const Twine &NameStr, 4336 BasicBlock *InsertAtEnd) { 4337 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4338 InsertAtEnd); 4339 } 4340 4341 /// Provide fast operand accessors 4342 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4343 4344 // Accessor Methods for CatchSwitch stmt 4345 Value *getParentPad() const { return getOperand(0); } 4346 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } 4347 4348 // Accessor Methods for CatchSwitch stmt 4349 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4350 bool unwindsToCaller() const { return !hasUnwindDest(); } 4351 BasicBlock *getUnwindDest() const { 4352 if (hasUnwindDest()) 4353 return cast<BasicBlock>(getOperand(1)); 4354 return nullptr; 4355 } 4356 void setUnwindDest(BasicBlock *UnwindDest) { 4357 assert(UnwindDest); 4358 assert(hasUnwindDest()); 4359 setOperand(1, UnwindDest); 4360 } 4361 4362 /// return the number of 'handlers' in this catchswitch 4363 /// instruction, except the default handler 4364 unsigned getNumHandlers() const { 4365 if (hasUnwindDest()) 4366 return getNumOperands() - 2; 4367 return getNumOperands() - 1; 4368 } 4369 4370 private: 4371 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } 4372 static const BasicBlock *handler_helper(const Value *V) { 4373 return cast<BasicBlock>(V); 4374 } 4375 4376 public: 4377 using DerefFnTy = BasicBlock *(*)(Value *); 4378 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; 4379 using handler_range = iterator_range<handler_iterator>; 4380 using ConstDerefFnTy = const BasicBlock *(*)(const Value *); 4381 using const_handler_iterator = 4382 mapped_iterator<const_op_iterator, ConstDerefFnTy>; 4383 using const_handler_range = iterator_range<const_handler_iterator>; 4384 4385 /// Returns an iterator that points to the first handler in CatchSwitchInst. 4386 handler_iterator handler_begin() { 4387 op_iterator It = op_begin() + 1; 4388 if (hasUnwindDest()) 4389 ++It; 4390 return handler_iterator(It, DerefFnTy(handler_helper)); 4391 } 4392 4393 /// Returns an iterator that points to the first handler in the 4394 /// CatchSwitchInst. 4395 const_handler_iterator handler_begin() const { 4396 const_op_iterator It = op_begin() + 1; 4397 if (hasUnwindDest()) 4398 ++It; 4399 return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); 4400 } 4401 4402 /// Returns a read-only iterator that points one past the last 4403 /// handler in the CatchSwitchInst. 4404 handler_iterator handler_end() { 4405 return handler_iterator(op_end(), DerefFnTy(handler_helper)); 4406 } 4407 4408 /// Returns an iterator that points one past the last handler in the 4409 /// CatchSwitchInst. 4410 const_handler_iterator handler_end() const { 4411 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); 4412 } 4413 4414 /// iteration adapter for range-for loops. 4415 handler_range handlers() { 4416 return make_range(handler_begin(), handler_end()); 4417 } 4418 4419 /// iteration adapter for range-for loops. 4420 const_handler_range handlers() const { 4421 return make_range(handler_begin(), handler_end()); 4422 } 4423 4424 /// Add an entry to the switch instruction... 4425 /// Note: 4426 /// This action invalidates handler_end(). Old handler_end() iterator will 4427 /// point to the added handler. 4428 void addHandler(BasicBlock *Dest); 4429 4430 void removeHandler(handler_iterator HI); 4431 4432 unsigned getNumSuccessors() const { return getNumOperands() - 1; } 4433 BasicBlock *getSuccessor(unsigned Idx) const { 4434 assert(Idx < getNumSuccessors() && 4435 "Successor # out of range for catchswitch!"); 4436 return cast<BasicBlock>(getOperand(Idx + 1)); 4437 } 4438 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { 4439 assert(Idx < getNumSuccessors() && 4440 "Successor # out of range for catchswitch!"); 4441 setOperand(Idx + 1, NewSucc); 4442 } 4443 4444 // Methods for support type inquiry through isa, cast, and dyn_cast: 4445 static bool classof(const Instruction *I) { 4446 return I->getOpcode() == Instruction::CatchSwitch; 4447 } 4448 static bool classof(const Value *V) { 4449 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4450 } 4451 }; 4452 4453 template <> 4454 struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; 4455 4456 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value) 4457 4458 //===----------------------------------------------------------------------===// 4459 // CleanupPadInst Class 4460 //===----------------------------------------------------------------------===// 4461 class CleanupPadInst : public FuncletPadInst { 4462 private: 4463 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4464 unsigned Values, const Twine &NameStr, 4465 Instruction *InsertBefore) 4466 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4467 NameStr, InsertBefore) {} 4468 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4469 unsigned Values, const Twine &NameStr, 4470 BasicBlock *InsertAtEnd) 4471 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4472 NameStr, InsertAtEnd) {} 4473 4474 public: 4475 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, 4476 const Twine &NameStr = "", 4477 Instruction *InsertBefore = nullptr) { 4478 unsigned Values = 1 + Args.size(); 4479 return new (Values) 4480 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); 4481 } 4482 4483 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, 4484 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4485 unsigned Values = 1 + Args.size(); 4486 return new (Values) 4487 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); 4488 } 4489 4490 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4491 static bool classof(const Instruction *I) { 4492 return I->getOpcode() == Instruction::CleanupPad; 4493 } 4494 static bool classof(const Value *V) { 4495 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4496 } 4497 }; 4498 4499 //===----------------------------------------------------------------------===// 4500 // CatchPadInst Class 4501 //===----------------------------------------------------------------------===// 4502 class CatchPadInst : public FuncletPadInst { 4503 private: 4504 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4505 unsigned Values, const Twine &NameStr, 4506 Instruction *InsertBefore) 4507 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4508 NameStr, InsertBefore) {} 4509 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4510 unsigned Values, const Twine &NameStr, 4511 BasicBlock *InsertAtEnd) 4512 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4513 NameStr, InsertAtEnd) {} 4514 4515 public: 4516 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4517 const Twine &NameStr = "", 4518 Instruction *InsertBefore = nullptr) { 4519 unsigned Values = 1 + Args.size(); 4520 return new (Values) 4521 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); 4522 } 4523 4524 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4525 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4526 unsigned Values = 1 + Args.size(); 4527 return new (Values) 4528 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); 4529 } 4530 4531 /// Convenience accessors 4532 CatchSwitchInst *getCatchSwitch() const { 4533 return cast<CatchSwitchInst>(Op<-1>()); 4534 } 4535 void setCatchSwitch(Value *CatchSwitch) { 4536 assert(CatchSwitch); 4537 Op<-1>() = CatchSwitch; 4538 } 4539 4540 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4541 static bool classof(const Instruction *I) { 4542 return I->getOpcode() == Instruction::CatchPad; 4543 } 4544 static bool classof(const Value *V) { 4545 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4546 } 4547 }; 4548 4549 //===----------------------------------------------------------------------===// 4550 // CatchReturnInst Class 4551 //===----------------------------------------------------------------------===// 4552 4553 class CatchReturnInst : public Instruction { 4554 CatchReturnInst(const CatchReturnInst &RI); 4555 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); 4556 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); 4557 4558 void init(Value *CatchPad, BasicBlock *BB); 4559 4560 protected: 4561 // Note: Instruction needs to be a friend here to call cloneImpl. 4562 friend class Instruction; 4563 4564 CatchReturnInst *cloneImpl() const; 4565 4566 public: 4567 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4568 Instruction *InsertBefore = nullptr) { 4569 assert(CatchPad); 4570 assert(BB); 4571 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); 4572 } 4573 4574 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4575 BasicBlock *InsertAtEnd) { 4576 assert(CatchPad); 4577 assert(BB); 4578 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); 4579 } 4580 4581 /// Provide fast operand accessors 4582 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4583 4584 /// Convenience accessors. 4585 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } 4586 void setCatchPad(CatchPadInst *CatchPad) { 4587 assert(CatchPad); 4588 Op<0>() = CatchPad; 4589 } 4590 4591 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } 4592 void setSuccessor(BasicBlock *NewSucc) { 4593 assert(NewSucc); 4594 Op<1>() = NewSucc; 4595 } 4596 unsigned getNumSuccessors() const { return 1; } 4597 4598 /// Get the parentPad of this catchret's catchpad's catchswitch. 4599 /// The successor block is implicitly a member of this funclet. 4600 Value *getCatchSwitchParentPad() const { 4601 return getCatchPad()->getCatchSwitch()->getParentPad(); 4602 } 4603 4604 // Methods for support type inquiry through isa, cast, and dyn_cast: 4605 static bool classof(const Instruction *I) { 4606 return (I->getOpcode() == Instruction::CatchRet); 4607 } 4608 static bool classof(const Value *V) { 4609 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4610 } 4611 4612 private: 4613 BasicBlock *getSuccessor(unsigned Idx) const { 4614 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4615 return getSuccessor(); 4616 } 4617 4618 void setSuccessor(unsigned Idx, BasicBlock *B) { 4619 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4620 setSuccessor(B); 4621 } 4622 }; 4623 4624 template <> 4625 struct OperandTraits<CatchReturnInst> 4626 : public FixedNumOperandTraits<CatchReturnInst, 2> {}; 4627 4628 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value) 4629 4630 //===----------------------------------------------------------------------===// 4631 // CleanupReturnInst Class 4632 //===----------------------------------------------------------------------===// 4633 4634 class CleanupReturnInst : public Instruction { 4635 using UnwindDestField = BoolBitfieldElementT<0>; 4636 4637 private: 4638 CleanupReturnInst(const CleanupReturnInst &RI); 4639 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4640 Instruction *InsertBefore = nullptr); 4641 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4642 BasicBlock *InsertAtEnd); 4643 4644 void init(Value *CleanupPad, BasicBlock *UnwindBB); 4645 4646 protected: 4647 // Note: Instruction needs to be a friend here to call cloneImpl. 4648 friend class Instruction; 4649 4650 CleanupReturnInst *cloneImpl() const; 4651 4652 public: 4653 static CleanupReturnInst *Create(Value *CleanupPad, 4654 BasicBlock *UnwindBB = nullptr, 4655 Instruction *InsertBefore = nullptr) { 4656 assert(CleanupPad); 4657 unsigned Values = 1; 4658 if (UnwindBB) 4659 ++Values; 4660 return new (Values) 4661 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); 4662 } 4663 4664 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, 4665 BasicBlock *InsertAtEnd) { 4666 assert(CleanupPad); 4667 unsigned Values = 1; 4668 if (UnwindBB) 4669 ++Values; 4670 return new (Values) 4671 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); 4672 } 4673 4674 /// Provide fast operand accessors 4675 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4676 4677 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4678 bool unwindsToCaller() const { return !hasUnwindDest(); } 4679 4680 /// Convenience accessor. 4681 CleanupPadInst *getCleanupPad() const { 4682 return cast<CleanupPadInst>(Op<0>()); 4683 } 4684 void setCleanupPad(CleanupPadInst *CleanupPad) { 4685 assert(CleanupPad); 4686 Op<0>() = CleanupPad; 4687 } 4688 4689 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } 4690 4691 BasicBlock *getUnwindDest() const { 4692 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; 4693 } 4694 void setUnwindDest(BasicBlock *NewDest) { 4695 assert(NewDest); 4696 assert(hasUnwindDest()); 4697 Op<1>() = NewDest; 4698 } 4699 4700 // Methods for support type inquiry through isa, cast, and dyn_cast: 4701 static bool classof(const Instruction *I) { 4702 return (I->getOpcode() == Instruction::CleanupRet); 4703 } 4704 static bool classof(const Value *V) { 4705 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4706 } 4707 4708 private: 4709 BasicBlock *getSuccessor(unsigned Idx) const { 4710 assert(Idx == 0); 4711 return getUnwindDest(); 4712 } 4713 4714 void setSuccessor(unsigned Idx, BasicBlock *B) { 4715 assert(Idx == 0); 4716 setUnwindDest(B); 4717 } 4718 4719 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4720 // method so that subclasses cannot accidentally use it. 4721 template <typename Bitfield> 4722 void setSubclassData(typename Bitfield::Type Value) { 4723 Instruction::setSubclassData<Bitfield>(Value); 4724 } 4725 }; 4726 4727 template <> 4728 struct OperandTraits<CleanupReturnInst> 4729 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; 4730 4731 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value) 4732 4733 //===----------------------------------------------------------------------===// 4734 // UnreachableInst Class 4735 //===----------------------------------------------------------------------===// 4736 4737 //===--------------------------------------------------------------------------- 4738 /// This function has undefined behavior. In particular, the 4739 /// presence of this instruction indicates some higher level knowledge that the 4740 /// end of the block cannot be reached. 4741 /// 4742 class UnreachableInst : public Instruction { 4743 protected: 4744 // Note: Instruction needs to be a friend here to call cloneImpl. 4745 friend class Instruction; 4746 4747 UnreachableInst *cloneImpl() const; 4748 4749 public: 4750 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); 4751 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); 4752 4753 // allocate space for exactly zero operands 4754 void *operator new(size_t S) { return User::operator new(S, 0); } 4755 void operator delete(void *Ptr) { User::operator delete(Ptr); } 4756 4757 unsigned getNumSuccessors() const { return 0; } 4758 4759 // Methods for support type inquiry through isa, cast, and dyn_cast: 4760 static bool classof(const Instruction *I) { 4761 return I->getOpcode() == Instruction::Unreachable; 4762 } 4763 static bool classof(const Value *V) { 4764 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4765 } 4766 4767 private: 4768 BasicBlock *getSuccessor(unsigned idx) const { 4769 llvm_unreachable("UnreachableInst has no successors!"); 4770 } 4771 4772 void setSuccessor(unsigned idx, BasicBlock *B) { 4773 llvm_unreachable("UnreachableInst has no successors!"); 4774 } 4775 }; 4776 4777 //===----------------------------------------------------------------------===// 4778 // TruncInst Class 4779 //===----------------------------------------------------------------------===// 4780 4781 /// This class represents a truncation of integer types. 4782 class TruncInst : public CastInst { 4783 protected: 4784 // Note: Instruction needs to be a friend here to call cloneImpl. 4785 friend class Instruction; 4786 4787 /// Clone an identical TruncInst 4788 TruncInst *cloneImpl() const; 4789 4790 public: 4791 /// Constructor with insert-before-instruction semantics 4792 TruncInst( 4793 Value *S, ///< The value to be truncated 4794 Type *Ty, ///< The (smaller) type to truncate to 4795 const Twine &NameStr = "", ///< A name for the new instruction 4796 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4797 ); 4798 4799 /// Constructor with insert-at-end-of-block semantics 4800 TruncInst( 4801 Value *S, ///< The value to be truncated 4802 Type *Ty, ///< The (smaller) type to truncate to 4803 const Twine &NameStr, ///< A name for the new instruction 4804 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4805 ); 4806 4807 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4808 static bool classof(const Instruction *I) { 4809 return I->getOpcode() == Trunc; 4810 } 4811 static bool classof(const Value *V) { 4812 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4813 } 4814 }; 4815 4816 //===----------------------------------------------------------------------===// 4817 // ZExtInst Class 4818 //===----------------------------------------------------------------------===// 4819 4820 /// This class represents zero extension of integer types. 4821 class ZExtInst : public CastInst { 4822 protected: 4823 // Note: Instruction needs to be a friend here to call cloneImpl. 4824 friend class Instruction; 4825 4826 /// Clone an identical ZExtInst 4827 ZExtInst *cloneImpl() const; 4828 4829 public: 4830 /// Constructor with insert-before-instruction semantics 4831 ZExtInst( 4832 Value *S, ///< The value to be zero extended 4833 Type *Ty, ///< The type to zero extend to 4834 const Twine &NameStr = "", ///< A name for the new instruction 4835 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4836 ); 4837 4838 /// Constructor with insert-at-end semantics. 4839 ZExtInst( 4840 Value *S, ///< The value to be zero extended 4841 Type *Ty, ///< The type to zero extend to 4842 const Twine &NameStr, ///< A name for the new instruction 4843 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4844 ); 4845 4846 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4847 static bool classof(const Instruction *I) { 4848 return I->getOpcode() == ZExt; 4849 } 4850 static bool classof(const Value *V) { 4851 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4852 } 4853 }; 4854 4855 //===----------------------------------------------------------------------===// 4856 // SExtInst Class 4857 //===----------------------------------------------------------------------===// 4858 4859 /// This class represents a sign extension of integer types. 4860 class SExtInst : public CastInst { 4861 protected: 4862 // Note: Instruction needs to be a friend here to call cloneImpl. 4863 friend class Instruction; 4864 4865 /// Clone an identical SExtInst 4866 SExtInst *cloneImpl() const; 4867 4868 public: 4869 /// Constructor with insert-before-instruction semantics 4870 SExtInst( 4871 Value *S, ///< The value to be sign extended 4872 Type *Ty, ///< The type to sign extend to 4873 const Twine &NameStr = "", ///< A name for the new instruction 4874 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4875 ); 4876 4877 /// Constructor with insert-at-end-of-block semantics 4878 SExtInst( 4879 Value *S, ///< The value to be sign extended 4880 Type *Ty, ///< The type to sign extend to 4881 const Twine &NameStr, ///< A name for the new instruction 4882 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4883 ); 4884 4885 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4886 static bool classof(const Instruction *I) { 4887 return I->getOpcode() == SExt; 4888 } 4889 static bool classof(const Value *V) { 4890 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4891 } 4892 }; 4893 4894 //===----------------------------------------------------------------------===// 4895 // FPTruncInst Class 4896 //===----------------------------------------------------------------------===// 4897 4898 /// This class represents a truncation of floating point types. 4899 class FPTruncInst : public CastInst { 4900 protected: 4901 // Note: Instruction needs to be a friend here to call cloneImpl. 4902 friend class Instruction; 4903 4904 /// Clone an identical FPTruncInst 4905 FPTruncInst *cloneImpl() const; 4906 4907 public: 4908 /// Constructor with insert-before-instruction semantics 4909 FPTruncInst( 4910 Value *S, ///< The value to be truncated 4911 Type *Ty, ///< The type to truncate to 4912 const Twine &NameStr = "", ///< A name for the new instruction 4913 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4914 ); 4915 4916 /// Constructor with insert-before-instruction semantics 4917 FPTruncInst( 4918 Value *S, ///< The value to be truncated 4919 Type *Ty, ///< The type to truncate to 4920 const Twine &NameStr, ///< A name for the new instruction 4921 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4922 ); 4923 4924 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4925 static bool classof(const Instruction *I) { 4926 return I->getOpcode() == FPTrunc; 4927 } 4928 static bool classof(const Value *V) { 4929 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4930 } 4931 }; 4932 4933 //===----------------------------------------------------------------------===// 4934 // FPExtInst Class 4935 //===----------------------------------------------------------------------===// 4936 4937 /// This class represents an extension of floating point types. 4938 class FPExtInst : public CastInst { 4939 protected: 4940 // Note: Instruction needs to be a friend here to call cloneImpl. 4941 friend class Instruction; 4942 4943 /// Clone an identical FPExtInst 4944 FPExtInst *cloneImpl() const; 4945 4946 public: 4947 /// Constructor with insert-before-instruction semantics 4948 FPExtInst( 4949 Value *S, ///< The value to be extended 4950 Type *Ty, ///< The type to extend to 4951 const Twine &NameStr = "", ///< A name for the new instruction 4952 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4953 ); 4954 4955 /// Constructor with insert-at-end-of-block semantics 4956 FPExtInst( 4957 Value *S, ///< The value to be extended 4958 Type *Ty, ///< The type to extend to 4959 const Twine &NameStr, ///< A name for the new instruction 4960 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4961 ); 4962 4963 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4964 static bool classof(const Instruction *I) { 4965 return I->getOpcode() == FPExt; 4966 } 4967 static bool classof(const Value *V) { 4968 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4969 } 4970 }; 4971 4972 //===----------------------------------------------------------------------===// 4973 // UIToFPInst Class 4974 //===----------------------------------------------------------------------===// 4975 4976 /// This class represents a cast unsigned integer to floating point. 4977 class UIToFPInst : public CastInst { 4978 protected: 4979 // Note: Instruction needs to be a friend here to call cloneImpl. 4980 friend class Instruction; 4981 4982 /// Clone an identical UIToFPInst 4983 UIToFPInst *cloneImpl() const; 4984 4985 public: 4986 /// Constructor with insert-before-instruction semantics 4987 UIToFPInst( 4988 Value *S, ///< The value to be converted 4989 Type *Ty, ///< The type to convert to 4990 const Twine &NameStr = "", ///< A name for the new instruction 4991 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4992 ); 4993 4994 /// Constructor with insert-at-end-of-block semantics 4995 UIToFPInst( 4996 Value *S, ///< The value to be converted 4997 Type *Ty, ///< The type to convert to 4998 const Twine &NameStr, ///< A name for the new instruction 4999 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5000 ); 5001 5002 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5003 static bool classof(const Instruction *I) { 5004 return I->getOpcode() == UIToFP; 5005 } 5006 static bool classof(const Value *V) { 5007 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5008 } 5009 }; 5010 5011 //===----------------------------------------------------------------------===// 5012 // SIToFPInst Class 5013 //===----------------------------------------------------------------------===// 5014 5015 /// This class represents a cast from signed integer to floating point. 5016 class SIToFPInst : public CastInst { 5017 protected: 5018 // Note: Instruction needs to be a friend here to call cloneImpl. 5019 friend class Instruction; 5020 5021 /// Clone an identical SIToFPInst 5022 SIToFPInst *cloneImpl() const; 5023 5024 public: 5025 /// Constructor with insert-before-instruction semantics 5026 SIToFPInst( 5027 Value *S, ///< The value to be converted 5028 Type *Ty, ///< The type to convert to 5029 const Twine &NameStr = "", ///< A name for the new instruction 5030 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5031 ); 5032 5033 /// Constructor with insert-at-end-of-block semantics 5034 SIToFPInst( 5035 Value *S, ///< The value to be converted 5036 Type *Ty, ///< The type to convert to 5037 const Twine &NameStr, ///< A name for the new instruction 5038 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5039 ); 5040 5041 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5042 static bool classof(const Instruction *I) { 5043 return I->getOpcode() == SIToFP; 5044 } 5045 static bool classof(const Value *V) { 5046 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5047 } 5048 }; 5049 5050 //===----------------------------------------------------------------------===// 5051 // FPToUIInst Class 5052 //===----------------------------------------------------------------------===// 5053 5054 /// This class represents a cast from floating point to unsigned integer 5055 class FPToUIInst : public CastInst { 5056 protected: 5057 // Note: Instruction needs to be a friend here to call cloneImpl. 5058 friend class Instruction; 5059 5060 /// Clone an identical FPToUIInst 5061 FPToUIInst *cloneImpl() const; 5062 5063 public: 5064 /// Constructor with insert-before-instruction semantics 5065 FPToUIInst( 5066 Value *S, ///< The value to be converted 5067 Type *Ty, ///< The type to convert to 5068 const Twine &NameStr = "", ///< A name for the new instruction 5069 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5070 ); 5071 5072 /// Constructor with insert-at-end-of-block semantics 5073 FPToUIInst( 5074 Value *S, ///< The value to be converted 5075 Type *Ty, ///< The type to convert to 5076 const Twine &NameStr, ///< A name for the new instruction 5077 BasicBlock *InsertAtEnd ///< Where to insert the new instruction 5078 ); 5079 5080 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5081 static bool classof(const Instruction *I) { 5082 return I->getOpcode() == FPToUI; 5083 } 5084 static bool classof(const Value *V) { 5085 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5086 } 5087 }; 5088 5089 //===----------------------------------------------------------------------===// 5090 // FPToSIInst Class 5091 //===----------------------------------------------------------------------===// 5092 5093 /// This class represents a cast from floating point to signed integer. 5094 class FPToSIInst : public CastInst { 5095 protected: 5096 // Note: Instruction needs to be a friend here to call cloneImpl. 5097 friend class Instruction; 5098 5099 /// Clone an identical FPToSIInst 5100 FPToSIInst *cloneImpl() const; 5101 5102 public: 5103 /// Constructor with insert-before-instruction semantics 5104 FPToSIInst( 5105 Value *S, ///< The value to be converted 5106 Type *Ty, ///< The type to convert to 5107 const Twine &NameStr = "", ///< A name for the new instruction 5108 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5109 ); 5110 5111 /// Constructor with insert-at-end-of-block semantics 5112 FPToSIInst( 5113 Value *S, ///< The value to be converted 5114 Type *Ty, ///< The type to convert to 5115 const Twine &NameStr, ///< A name for the new instruction 5116 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5117 ); 5118 5119 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5120 static bool classof(const Instruction *I) { 5121 return I->getOpcode() == FPToSI; 5122 } 5123 static bool classof(const Value *V) { 5124 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5125 } 5126 }; 5127 5128 //===----------------------------------------------------------------------===// 5129 // IntToPtrInst Class 5130 //===----------------------------------------------------------------------===// 5131 5132 /// This class represents a cast from an integer to a pointer. 5133 class IntToPtrInst : public CastInst { 5134 public: 5135 // Note: Instruction needs to be a friend here to call cloneImpl. 5136 friend class Instruction; 5137 5138 /// Constructor with insert-before-instruction semantics 5139 IntToPtrInst( 5140 Value *S, ///< The value to be converted 5141 Type *Ty, ///< The type to convert to 5142 const Twine &NameStr = "", ///< A name for the new instruction 5143 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5144 ); 5145 5146 /// Constructor with insert-at-end-of-block semantics 5147 IntToPtrInst( 5148 Value *S, ///< The value to be converted 5149 Type *Ty, ///< The type to convert to 5150 const Twine &NameStr, ///< A name for the new instruction 5151 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5152 ); 5153 5154 /// Clone an identical IntToPtrInst. 5155 IntToPtrInst *cloneImpl() const; 5156 5157 /// Returns the address space of this instruction's pointer type. 5158 unsigned getAddressSpace() const { 5159 return getType()->getPointerAddressSpace(); 5160 } 5161 5162 // Methods for support type inquiry through isa, cast, and dyn_cast: 5163 static bool classof(const Instruction *I) { 5164 return I->getOpcode() == IntToPtr; 5165 } 5166 static bool classof(const Value *V) { 5167 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5168 } 5169 }; 5170 5171 //===----------------------------------------------------------------------===// 5172 // PtrToIntInst Class 5173 //===----------------------------------------------------------------------===// 5174 5175 /// This class represents a cast from a pointer to an integer. 5176 class PtrToIntInst : public CastInst { 5177 protected: 5178 // Note: Instruction needs to be a friend here to call cloneImpl. 5179 friend class Instruction; 5180 5181 /// Clone an identical PtrToIntInst. 5182 PtrToIntInst *cloneImpl() const; 5183 5184 public: 5185 /// Constructor with insert-before-instruction semantics 5186 PtrToIntInst( 5187 Value *S, ///< The value to be converted 5188 Type *Ty, ///< The type to convert to 5189 const Twine &NameStr = "", ///< A name for the new instruction 5190 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5191 ); 5192 5193 /// Constructor with insert-at-end-of-block semantics 5194 PtrToIntInst( 5195 Value *S, ///< The value to be converted 5196 Type *Ty, ///< The type to convert to 5197 const Twine &NameStr, ///< A name for the new instruction 5198 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5199 ); 5200 5201 /// Gets the pointer operand. 5202 Value *getPointerOperand() { return getOperand(0); } 5203 /// Gets the pointer operand. 5204 const Value *getPointerOperand() const { return getOperand(0); } 5205 /// Gets the operand index of the pointer operand. 5206 static unsigned getPointerOperandIndex() { return 0U; } 5207 5208 /// Returns the address space of the pointer operand. 5209 unsigned getPointerAddressSpace() const { 5210 return getPointerOperand()->getType()->getPointerAddressSpace(); 5211 } 5212 5213 // Methods for support type inquiry through isa, cast, and dyn_cast: 5214 static bool classof(const Instruction *I) { 5215 return I->getOpcode() == PtrToInt; 5216 } 5217 static bool classof(const Value *V) { 5218 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5219 } 5220 }; 5221 5222 //===----------------------------------------------------------------------===// 5223 // BitCastInst Class 5224 //===----------------------------------------------------------------------===// 5225 5226 /// This class represents a no-op cast from one type to another. 5227 class BitCastInst : public CastInst { 5228 protected: 5229 // Note: Instruction needs to be a friend here to call cloneImpl. 5230 friend class Instruction; 5231 5232 /// Clone an identical BitCastInst. 5233 BitCastInst *cloneImpl() const; 5234 5235 public: 5236 /// Constructor with insert-before-instruction semantics 5237 BitCastInst( 5238 Value *S, ///< The value to be casted 5239 Type *Ty, ///< The type to casted to 5240 const Twine &NameStr = "", ///< A name for the new instruction 5241 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5242 ); 5243 5244 /// Constructor with insert-at-end-of-block semantics 5245 BitCastInst( 5246 Value *S, ///< The value to be casted 5247 Type *Ty, ///< The type to casted to 5248 const Twine &NameStr, ///< A name for the new instruction 5249 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5250 ); 5251 5252 // Methods for support type inquiry through isa, cast, and dyn_cast: 5253 static bool classof(const Instruction *I) { 5254 return I->getOpcode() == BitCast; 5255 } 5256 static bool classof(const Value *V) { 5257 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5258 } 5259 }; 5260 5261 //===----------------------------------------------------------------------===// 5262 // AddrSpaceCastInst Class 5263 //===----------------------------------------------------------------------===// 5264 5265 /// This class represents a conversion between pointers from one address space 5266 /// to another. 5267 class AddrSpaceCastInst : public CastInst { 5268 protected: 5269 // Note: Instruction needs to be a friend here to call cloneImpl. 5270 friend class Instruction; 5271 5272 /// Clone an identical AddrSpaceCastInst. 5273 AddrSpaceCastInst *cloneImpl() const; 5274 5275 public: 5276 /// Constructor with insert-before-instruction semantics 5277 AddrSpaceCastInst( 5278 Value *S, ///< The value to be casted 5279 Type *Ty, ///< The type to casted to 5280 const Twine &NameStr = "", ///< A name for the new instruction 5281 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5282 ); 5283 5284 /// Constructor with insert-at-end-of-block semantics 5285 AddrSpaceCastInst( 5286 Value *S, ///< The value to be casted 5287 Type *Ty, ///< The type to casted to 5288 const Twine &NameStr, ///< A name for the new instruction 5289 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5290 ); 5291 5292 // Methods for support type inquiry through isa, cast, and dyn_cast: 5293 static bool classof(const Instruction *I) { 5294 return I->getOpcode() == AddrSpaceCast; 5295 } 5296 static bool classof(const Value *V) { 5297 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5298 } 5299 5300 /// Gets the pointer operand. 5301 Value *getPointerOperand() { 5302 return getOperand(0); 5303 } 5304 5305 /// Gets the pointer operand. 5306 const Value *getPointerOperand() const { 5307 return getOperand(0); 5308 } 5309 5310 /// Gets the operand index of the pointer operand. 5311 static unsigned getPointerOperandIndex() { 5312 return 0U; 5313 } 5314 5315 /// Returns the address space of the pointer operand. 5316 unsigned getSrcAddressSpace() const { 5317 return getPointerOperand()->getType()->getPointerAddressSpace(); 5318 } 5319 5320 /// Returns the address space of the result. 5321 unsigned getDestAddressSpace() const { 5322 return getType()->getPointerAddressSpace(); 5323 } 5324 }; 5325 5326 /// A helper function that returns the pointer operand of a load or store 5327 /// instruction. Returns nullptr if not load or store. 5328 inline const Value *getLoadStorePointerOperand(const Value *V) { 5329 if (auto *Load = dyn_cast<LoadInst>(V)) 5330 return Load->getPointerOperand(); 5331 if (auto *Store = dyn_cast<StoreInst>(V)) 5332 return Store->getPointerOperand(); 5333 return nullptr; 5334 } 5335 inline Value *getLoadStorePointerOperand(Value *V) { 5336 return const_cast<Value *>( 5337 getLoadStorePointerOperand(static_cast<const Value *>(V))); 5338 } 5339 5340 /// A helper function that returns the pointer operand of a load, store 5341 /// or GEP instruction. Returns nullptr if not load, store, or GEP. 5342 inline const Value *getPointerOperand(const Value *V) { 5343 if (auto *Ptr = getLoadStorePointerOperand(V)) 5344 return Ptr; 5345 if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) 5346 return Gep->getPointerOperand(); 5347 return nullptr; 5348 } 5349 inline Value *getPointerOperand(Value *V) { 5350 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); 5351 } 5352 5353 /// A helper function that returns the alignment of load or store instruction. 5354 inline Align getLoadStoreAlignment(Value *I) { 5355 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5356 "Expected Load or Store instruction"); 5357 if (auto *LI = dyn_cast<LoadInst>(I)) 5358 return LI->getAlign(); 5359 return cast<StoreInst>(I)->getAlign(); 5360 } 5361 5362 /// A helper function that returns the address space of the pointer operand of 5363 /// load or store instruction. 5364 inline unsigned getLoadStoreAddressSpace(Value *I) { 5365 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5366 "Expected Load or Store instruction"); 5367 if (auto *LI = dyn_cast<LoadInst>(I)) 5368 return LI->getPointerAddressSpace(); 5369 return cast<StoreInst>(I)->getPointerAddressSpace(); 5370 } 5371 5372 /// A helper function that returns the type of a load or store instruction. 5373 inline Type *getLoadStoreType(Value *I) { 5374 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5375 "Expected Load or Store instruction"); 5376 if (auto *LI = dyn_cast<LoadInst>(I)) 5377 return LI->getType(); 5378 return cast<StoreInst>(I)->getValueOperand()->getType(); 5379 } 5380 5381 //===----------------------------------------------------------------------===// 5382 // FreezeInst Class 5383 //===----------------------------------------------------------------------===// 5384 5385 /// This class represents a freeze function that returns random concrete 5386 /// value if an operand is either a poison value or an undef value 5387 class FreezeInst : public UnaryInstruction { 5388 protected: 5389 // Note: Instruction needs to be a friend here to call cloneImpl. 5390 friend class Instruction; 5391 5392 /// Clone an identical FreezeInst 5393 FreezeInst *cloneImpl() const; 5394 5395 public: 5396 explicit FreezeInst(Value *S, 5397 const Twine &NameStr = "", 5398 Instruction *InsertBefore = nullptr); 5399 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); 5400 5401 // Methods for support type inquiry through isa, cast, and dyn_cast: 5402 static inline bool classof(const Instruction *I) { 5403 return I->getOpcode() == Freeze; 5404 } 5405 static inline bool classof(const Value *V) { 5406 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5407 } 5408 }; 5409 5410 } // end namespace llvm 5411 5412 #endif // LLVM_IR_INSTRUCTIONS_H 5413