xref: /freebsd/contrib/llvm-project/llvm/lib/IR/Instruction.cpp (revision 744bfb213144c63cbaf38d91a1c4f7aebb9b9fbc)
1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Instruction class for the IR library.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/IR/Instruction.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/IR/Constants.h"
16 #include "llvm/IR/Instructions.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/Operator.h"
20 #include "llvm/IR/Type.h"
21 using namespace llvm;
22 
23 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
24                          Instruction *InsertBefore)
25   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
26 
27   // If requested, insert this instruction into a basic block...
28   if (InsertBefore) {
29     BasicBlock *BB = InsertBefore->getParent();
30     assert(BB && "Instruction to insert before is not in a basic block!");
31     BB->getInstList().insert(InsertBefore->getIterator(), this);
32   }
33 }
34 
35 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
36                          BasicBlock *InsertAtEnd)
37   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
38 
39   // append this instruction into the basic block
40   assert(InsertAtEnd && "Basic block to append to may not be NULL!");
41   InsertAtEnd->getInstList().push_back(this);
42 }
43 
44 Instruction::~Instruction() {
45   assert(!Parent && "Instruction still linked in the program!");
46 
47   // Replace any extant metadata uses of this instruction with undef to
48   // preserve debug info accuracy. Some alternatives include:
49   // - Treat Instruction like any other Value, and point its extant metadata
50   //   uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
51   //   trivially dead (i.e. fair game for deletion in many passes), leading to
52   //   stale dbg.values being in effect for too long.
53   // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
54   //   correct. OTOH results in wasted work in some common cases (e.g. when all
55   //   instructions in a BasicBlock are deleted).
56   if (isUsedByMetadata())
57     ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
58 }
59 
60 
61 void Instruction::setParent(BasicBlock *P) {
62   Parent = P;
63 }
64 
65 const Module *Instruction::getModule() const {
66   return getParent()->getModule();
67 }
68 
69 const Function *Instruction::getFunction() const {
70   return getParent()->getParent();
71 }
72 
73 void Instruction::removeFromParent() {
74   getParent()->getInstList().remove(getIterator());
75 }
76 
77 iplist<Instruction>::iterator Instruction::eraseFromParent() {
78   return getParent()->getInstList().erase(getIterator());
79 }
80 
81 /// Insert an unlinked instruction into a basic block immediately before the
82 /// specified instruction.
83 void Instruction::insertBefore(Instruction *InsertPos) {
84   InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
85 }
86 
87 /// Insert an unlinked instruction into a basic block immediately after the
88 /// specified instruction.
89 void Instruction::insertAfter(Instruction *InsertPos) {
90   InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
91                                                     this);
92 }
93 
94 /// Unlink this instruction from its current basic block and insert it into the
95 /// basic block that MovePos lives in, right before MovePos.
96 void Instruction::moveBefore(Instruction *MovePos) {
97   moveBefore(*MovePos->getParent(), MovePos->getIterator());
98 }
99 
100 void Instruction::moveAfter(Instruction *MovePos) {
101   moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
102 }
103 
104 void Instruction::moveBefore(BasicBlock &BB,
105                              SymbolTableList<Instruction>::iterator I) {
106   assert(I == BB.end() || I->getParent() == &BB);
107   BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
108 }
109 
110 bool Instruction::comesBefore(const Instruction *Other) const {
111   assert(Parent && Other->Parent &&
112          "instructions without BB parents have no order");
113   assert(Parent == Other->Parent && "cross-BB instruction order comparison");
114   if (!Parent->isInstrOrderValid())
115     Parent->renumberInstructions();
116   return Order < Other->Order;
117 }
118 
119 bool Instruction::isOnlyUserOfAnyOperand() {
120   return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
121 }
122 
123 void Instruction::setHasNoUnsignedWrap(bool b) {
124   cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
125 }
126 
127 void Instruction::setHasNoSignedWrap(bool b) {
128   cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
129 }
130 
131 void Instruction::setIsExact(bool b) {
132   cast<PossiblyExactOperator>(this)->setIsExact(b);
133 }
134 
135 bool Instruction::hasNoUnsignedWrap() const {
136   return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
137 }
138 
139 bool Instruction::hasNoSignedWrap() const {
140   return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
141 }
142 
143 bool Instruction::hasPoisonGeneratingFlags() const {
144   return cast<Operator>(this)->hasPoisonGeneratingFlags();
145 }
146 
147 void Instruction::dropPoisonGeneratingFlags() {
148   switch (getOpcode()) {
149   case Instruction::Add:
150   case Instruction::Sub:
151   case Instruction::Mul:
152   case Instruction::Shl:
153     cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
154     cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
155     break;
156 
157   case Instruction::UDiv:
158   case Instruction::SDiv:
159   case Instruction::AShr:
160   case Instruction::LShr:
161     cast<PossiblyExactOperator>(this)->setIsExact(false);
162     break;
163 
164   case Instruction::GetElementPtr:
165     cast<GetElementPtrInst>(this)->setIsInBounds(false);
166     break;
167   }
168   if (isa<FPMathOperator>(this)) {
169     setHasNoNaNs(false);
170     setHasNoInfs(false);
171   }
172 
173   assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
174 }
175 
176 void Instruction::dropUndefImplyingAttrsAndUnknownMetadata(
177     ArrayRef<unsigned> KnownIDs) {
178   dropUnknownNonDebugMetadata(KnownIDs);
179   auto *CB = dyn_cast<CallBase>(this);
180   if (!CB)
181     return;
182   // For call instructions, we also need to drop parameter and return attributes
183   // that are can cause UB if the call is moved to a location where the
184   // attribute is not valid.
185   AttributeList AL = CB->getAttributes();
186   if (AL.isEmpty())
187     return;
188   AttributeMask UBImplyingAttributes =
189       AttributeFuncs::getUBImplyingAttributes();
190   for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
191     CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
192   CB->removeRetAttrs(UBImplyingAttributes);
193 }
194 
195 bool Instruction::isExact() const {
196   return cast<PossiblyExactOperator>(this)->isExact();
197 }
198 
199 void Instruction::setFast(bool B) {
200   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
201   cast<FPMathOperator>(this)->setFast(B);
202 }
203 
204 void Instruction::setHasAllowReassoc(bool B) {
205   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
206   cast<FPMathOperator>(this)->setHasAllowReassoc(B);
207 }
208 
209 void Instruction::setHasNoNaNs(bool B) {
210   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
211   cast<FPMathOperator>(this)->setHasNoNaNs(B);
212 }
213 
214 void Instruction::setHasNoInfs(bool B) {
215   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
216   cast<FPMathOperator>(this)->setHasNoInfs(B);
217 }
218 
219 void Instruction::setHasNoSignedZeros(bool B) {
220   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
221   cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
222 }
223 
224 void Instruction::setHasAllowReciprocal(bool B) {
225   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
226   cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
227 }
228 
229 void Instruction::setHasAllowContract(bool B) {
230   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
231   cast<FPMathOperator>(this)->setHasAllowContract(B);
232 }
233 
234 void Instruction::setHasApproxFunc(bool B) {
235   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
236   cast<FPMathOperator>(this)->setHasApproxFunc(B);
237 }
238 
239 void Instruction::setFastMathFlags(FastMathFlags FMF) {
240   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
241   cast<FPMathOperator>(this)->setFastMathFlags(FMF);
242 }
243 
244 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
245   assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
246   cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
247 }
248 
249 bool Instruction::isFast() const {
250   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
251   return cast<FPMathOperator>(this)->isFast();
252 }
253 
254 bool Instruction::hasAllowReassoc() const {
255   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
256   return cast<FPMathOperator>(this)->hasAllowReassoc();
257 }
258 
259 bool Instruction::hasNoNaNs() const {
260   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
261   return cast<FPMathOperator>(this)->hasNoNaNs();
262 }
263 
264 bool Instruction::hasNoInfs() const {
265   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
266   return cast<FPMathOperator>(this)->hasNoInfs();
267 }
268 
269 bool Instruction::hasNoSignedZeros() const {
270   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
271   return cast<FPMathOperator>(this)->hasNoSignedZeros();
272 }
273 
274 bool Instruction::hasAllowReciprocal() const {
275   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
276   return cast<FPMathOperator>(this)->hasAllowReciprocal();
277 }
278 
279 bool Instruction::hasAllowContract() const {
280   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
281   return cast<FPMathOperator>(this)->hasAllowContract();
282 }
283 
284 bool Instruction::hasApproxFunc() const {
285   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
286   return cast<FPMathOperator>(this)->hasApproxFunc();
287 }
288 
289 FastMathFlags Instruction::getFastMathFlags() const {
290   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
291   return cast<FPMathOperator>(this)->getFastMathFlags();
292 }
293 
294 void Instruction::copyFastMathFlags(const Instruction *I) {
295   copyFastMathFlags(I->getFastMathFlags());
296 }
297 
298 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
299   // Copy the wrapping flags.
300   if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
301     if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
302       setHasNoSignedWrap(OB->hasNoSignedWrap());
303       setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
304     }
305   }
306 
307   // Copy the exact flag.
308   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
309     if (isa<PossiblyExactOperator>(this))
310       setIsExact(PE->isExact());
311 
312   // Copy the fast-math flags.
313   if (auto *FP = dyn_cast<FPMathOperator>(V))
314     if (isa<FPMathOperator>(this))
315       copyFastMathFlags(FP->getFastMathFlags());
316 
317   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
318     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
319       DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
320 }
321 
322 void Instruction::andIRFlags(const Value *V) {
323   if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
324     if (isa<OverflowingBinaryOperator>(this)) {
325       setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
326       setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
327     }
328   }
329 
330   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
331     if (isa<PossiblyExactOperator>(this))
332       setIsExact(isExact() && PE->isExact());
333 
334   if (auto *FP = dyn_cast<FPMathOperator>(V)) {
335     if (isa<FPMathOperator>(this)) {
336       FastMathFlags FM = getFastMathFlags();
337       FM &= FP->getFastMathFlags();
338       copyFastMathFlags(FM);
339     }
340   }
341 
342   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
343     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
344       DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
345 }
346 
347 const char *Instruction::getOpcodeName(unsigned OpCode) {
348   switch (OpCode) {
349   // Terminators
350   case Ret:    return "ret";
351   case Br:     return "br";
352   case Switch: return "switch";
353   case IndirectBr: return "indirectbr";
354   case Invoke: return "invoke";
355   case Resume: return "resume";
356   case Unreachable: return "unreachable";
357   case CleanupRet: return "cleanupret";
358   case CatchRet: return "catchret";
359   case CatchPad: return "catchpad";
360   case CatchSwitch: return "catchswitch";
361   case CallBr: return "callbr";
362 
363   // Standard unary operators...
364   case FNeg: return "fneg";
365 
366   // Standard binary operators...
367   case Add: return "add";
368   case FAdd: return "fadd";
369   case Sub: return "sub";
370   case FSub: return "fsub";
371   case Mul: return "mul";
372   case FMul: return "fmul";
373   case UDiv: return "udiv";
374   case SDiv: return "sdiv";
375   case FDiv: return "fdiv";
376   case URem: return "urem";
377   case SRem: return "srem";
378   case FRem: return "frem";
379 
380   // Logical operators...
381   case And: return "and";
382   case Or : return "or";
383   case Xor: return "xor";
384 
385   // Memory instructions...
386   case Alloca:        return "alloca";
387   case Load:          return "load";
388   case Store:         return "store";
389   case AtomicCmpXchg: return "cmpxchg";
390   case AtomicRMW:     return "atomicrmw";
391   case Fence:         return "fence";
392   case GetElementPtr: return "getelementptr";
393 
394   // Convert instructions...
395   case Trunc:         return "trunc";
396   case ZExt:          return "zext";
397   case SExt:          return "sext";
398   case FPTrunc:       return "fptrunc";
399   case FPExt:         return "fpext";
400   case FPToUI:        return "fptoui";
401   case FPToSI:        return "fptosi";
402   case UIToFP:        return "uitofp";
403   case SIToFP:        return "sitofp";
404   case IntToPtr:      return "inttoptr";
405   case PtrToInt:      return "ptrtoint";
406   case BitCast:       return "bitcast";
407   case AddrSpaceCast: return "addrspacecast";
408 
409   // Other instructions...
410   case ICmp:           return "icmp";
411   case FCmp:           return "fcmp";
412   case PHI:            return "phi";
413   case Select:         return "select";
414   case Call:           return "call";
415   case Shl:            return "shl";
416   case LShr:           return "lshr";
417   case AShr:           return "ashr";
418   case VAArg:          return "va_arg";
419   case ExtractElement: return "extractelement";
420   case InsertElement:  return "insertelement";
421   case ShuffleVector:  return "shufflevector";
422   case ExtractValue:   return "extractvalue";
423   case InsertValue:    return "insertvalue";
424   case LandingPad:     return "landingpad";
425   case CleanupPad:     return "cleanuppad";
426   case Freeze:         return "freeze";
427 
428   default: return "<Invalid operator> ";
429   }
430 }
431 
432 /// Return true if both instructions have the same special state. This must be
433 /// kept in sync with FunctionComparator::cmpOperations in
434 /// lib/Transforms/IPO/MergeFunctions.cpp.
435 static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
436                                  bool IgnoreAlignment = false) {
437   assert(I1->getOpcode() == I2->getOpcode() &&
438          "Can not compare special state of different instructions");
439 
440   if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
441     return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
442            (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
443             IgnoreAlignment);
444   if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
445     return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
446            (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
447             IgnoreAlignment) &&
448            LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
449            LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
450   if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
451     return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
452            (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
453             IgnoreAlignment) &&
454            SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
455            SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
456   if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
457     return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
458   if (const CallInst *CI = dyn_cast<CallInst>(I1))
459     return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
460            CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
461            CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
462            CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
463   if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
464     return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
465            CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
466            CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
467   if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
468     return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
469            CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
470            CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
471   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
472     return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
473   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
474     return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
475   if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
476     return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
477            FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
478   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
479     return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
480            CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
481            CXI->getSuccessOrdering() ==
482                cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
483            CXI->getFailureOrdering() ==
484                cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
485            CXI->getSyncScopeID() ==
486                cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
487   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
488     return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
489            RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
490            RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
491            RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
492   if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
493     return SVI->getShuffleMask() ==
494            cast<ShuffleVectorInst>(I2)->getShuffleMask();
495 
496   return true;
497 }
498 
499 bool Instruction::isIdenticalTo(const Instruction *I) const {
500   return isIdenticalToWhenDefined(I) &&
501          SubclassOptionalData == I->SubclassOptionalData;
502 }
503 
504 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
505   if (getOpcode() != I->getOpcode() ||
506       getNumOperands() != I->getNumOperands() ||
507       getType() != I->getType())
508     return false;
509 
510   // If both instructions have no operands, they are identical.
511   if (getNumOperands() == 0 && I->getNumOperands() == 0)
512     return haveSameSpecialState(this, I);
513 
514   // We have two instructions of identical opcode and #operands.  Check to see
515   // if all operands are the same.
516   if (!std::equal(op_begin(), op_end(), I->op_begin()))
517     return false;
518 
519   // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
520   if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
521     const PHINode *otherPHI = cast<PHINode>(I);
522     return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
523                       otherPHI->block_begin());
524   }
525 
526   return haveSameSpecialState(this, I);
527 }
528 
529 // Keep this in sync with FunctionComparator::cmpOperations in
530 // lib/Transforms/IPO/MergeFunctions.cpp.
531 bool Instruction::isSameOperationAs(const Instruction *I,
532                                     unsigned flags) const {
533   bool IgnoreAlignment = flags & CompareIgnoringAlignment;
534   bool UseScalarTypes  = flags & CompareUsingScalarTypes;
535 
536   if (getOpcode() != I->getOpcode() ||
537       getNumOperands() != I->getNumOperands() ||
538       (UseScalarTypes ?
539        getType()->getScalarType() != I->getType()->getScalarType() :
540        getType() != I->getType()))
541     return false;
542 
543   // We have two instructions of identical opcode and #operands.  Check to see
544   // if all operands are the same type
545   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
546     if (UseScalarTypes ?
547         getOperand(i)->getType()->getScalarType() !=
548           I->getOperand(i)->getType()->getScalarType() :
549         getOperand(i)->getType() != I->getOperand(i)->getType())
550       return false;
551 
552   return haveSameSpecialState(this, I, IgnoreAlignment);
553 }
554 
555 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
556   for (const Use &U : uses()) {
557     // PHI nodes uses values in the corresponding predecessor block.  For other
558     // instructions, just check to see whether the parent of the use matches up.
559     const Instruction *I = cast<Instruction>(U.getUser());
560     const PHINode *PN = dyn_cast<PHINode>(I);
561     if (!PN) {
562       if (I->getParent() != BB)
563         return true;
564       continue;
565     }
566 
567     if (PN->getIncomingBlock(U) != BB)
568       return true;
569   }
570   return false;
571 }
572 
573 bool Instruction::mayReadFromMemory() const {
574   switch (getOpcode()) {
575   default: return false;
576   case Instruction::VAArg:
577   case Instruction::Load:
578   case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
579   case Instruction::AtomicCmpXchg:
580   case Instruction::AtomicRMW:
581   case Instruction::CatchPad:
582   case Instruction::CatchRet:
583     return true;
584   case Instruction::Call:
585   case Instruction::Invoke:
586   case Instruction::CallBr:
587     return !cast<CallBase>(this)->onlyWritesMemory();
588   case Instruction::Store:
589     return !cast<StoreInst>(this)->isUnordered();
590   }
591 }
592 
593 bool Instruction::mayWriteToMemory() const {
594   switch (getOpcode()) {
595   default: return false;
596   case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
597   case Instruction::Store:
598   case Instruction::VAArg:
599   case Instruction::AtomicCmpXchg:
600   case Instruction::AtomicRMW:
601   case Instruction::CatchPad:
602   case Instruction::CatchRet:
603     return true;
604   case Instruction::Call:
605   case Instruction::Invoke:
606   case Instruction::CallBr:
607     return !cast<CallBase>(this)->onlyReadsMemory();
608   case Instruction::Load:
609     return !cast<LoadInst>(this)->isUnordered();
610   }
611 }
612 
613 bool Instruction::isAtomic() const {
614   switch (getOpcode()) {
615   default:
616     return false;
617   case Instruction::AtomicCmpXchg:
618   case Instruction::AtomicRMW:
619   case Instruction::Fence:
620     return true;
621   case Instruction::Load:
622     return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
623   case Instruction::Store:
624     return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
625   }
626 }
627 
628 bool Instruction::hasAtomicLoad() const {
629   assert(isAtomic());
630   switch (getOpcode()) {
631   default:
632     return false;
633   case Instruction::AtomicCmpXchg:
634   case Instruction::AtomicRMW:
635   case Instruction::Load:
636     return true;
637   }
638 }
639 
640 bool Instruction::hasAtomicStore() const {
641   assert(isAtomic());
642   switch (getOpcode()) {
643   default:
644     return false;
645   case Instruction::AtomicCmpXchg:
646   case Instruction::AtomicRMW:
647   case Instruction::Store:
648     return true;
649   }
650 }
651 
652 bool Instruction::isVolatile() const {
653   switch (getOpcode()) {
654   default:
655     return false;
656   case Instruction::AtomicRMW:
657     return cast<AtomicRMWInst>(this)->isVolatile();
658   case Instruction::Store:
659     return cast<StoreInst>(this)->isVolatile();
660   case Instruction::Load:
661     return cast<LoadInst>(this)->isVolatile();
662   case Instruction::AtomicCmpXchg:
663     return cast<AtomicCmpXchgInst>(this)->isVolatile();
664   case Instruction::Call:
665   case Instruction::Invoke:
666     // There are a very limited number of intrinsics with volatile flags.
667     if (auto *II = dyn_cast<IntrinsicInst>(this)) {
668       if (auto *MI = dyn_cast<MemIntrinsic>(II))
669         return MI->isVolatile();
670       switch (II->getIntrinsicID()) {
671       default: break;
672       case Intrinsic::matrix_column_major_load:
673         return cast<ConstantInt>(II->getArgOperand(2))->isOne();
674       case Intrinsic::matrix_column_major_store:
675         return cast<ConstantInt>(II->getArgOperand(3))->isOne();
676       }
677     }
678     return false;
679   }
680 }
681 
682 bool Instruction::mayThrow() const {
683   if (const CallInst *CI = dyn_cast<CallInst>(this))
684     return !CI->doesNotThrow();
685   if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
686     return CRI->unwindsToCaller();
687   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
688     return CatchSwitch->unwindsToCaller();
689   return isa<ResumeInst>(this);
690 }
691 
692 bool Instruction::mayHaveSideEffects() const {
693   return mayWriteToMemory() || mayThrow() || !willReturn();
694 }
695 
696 bool Instruction::isSafeToRemove() const {
697   return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
698          !this->isTerminator();
699 }
700 
701 bool Instruction::willReturn() const {
702   // Volatile store isn't guaranteed to return; see LangRef.
703   if (auto *SI = dyn_cast<StoreInst>(this))
704     return !SI->isVolatile();
705 
706   if (const auto *CB = dyn_cast<CallBase>(this))
707     // FIXME: Temporarily assume that all side-effect free intrinsics will
708     // return. Remove this workaround once all intrinsics are appropriately
709     // annotated.
710     return CB->hasFnAttr(Attribute::WillReturn) ||
711            (isa<IntrinsicInst>(CB) && CB->onlyReadsMemory());
712   return true;
713 }
714 
715 bool Instruction::isLifetimeStartOrEnd() const {
716   auto *II = dyn_cast<IntrinsicInst>(this);
717   if (!II)
718     return false;
719   Intrinsic::ID ID = II->getIntrinsicID();
720   return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
721 }
722 
723 bool Instruction::isLaunderOrStripInvariantGroup() const {
724   auto *II = dyn_cast<IntrinsicInst>(this);
725   if (!II)
726     return false;
727   Intrinsic::ID ID = II->getIntrinsicID();
728   return ID == Intrinsic::launder_invariant_group ||
729          ID == Intrinsic::strip_invariant_group;
730 }
731 
732 bool Instruction::isDebugOrPseudoInst() const {
733   return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
734 }
735 
736 const Instruction *
737 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
738   for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
739     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
740       return I;
741   return nullptr;
742 }
743 
744 const Instruction *
745 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
746   for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
747     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
748       return I;
749   return nullptr;
750 }
751 
752 bool Instruction::isAssociative() const {
753   unsigned Opcode = getOpcode();
754   if (isAssociative(Opcode))
755     return true;
756 
757   switch (Opcode) {
758   case FMul:
759   case FAdd:
760     return cast<FPMathOperator>(this)->hasAllowReassoc() &&
761            cast<FPMathOperator>(this)->hasNoSignedZeros();
762   default:
763     return false;
764   }
765 }
766 
767 bool Instruction::isCommutative() const {
768   if (auto *II = dyn_cast<IntrinsicInst>(this))
769     return II->isCommutative();
770   // TODO: Should allow icmp/fcmp?
771   return isCommutative(getOpcode());
772 }
773 
774 unsigned Instruction::getNumSuccessors() const {
775   switch (getOpcode()) {
776 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
777   case Instruction::OPC:                                                       \
778     return static_cast<const CLASS *>(this)->getNumSuccessors();
779 #include "llvm/IR/Instruction.def"
780   default:
781     break;
782   }
783   llvm_unreachable("not a terminator");
784 }
785 
786 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
787   switch (getOpcode()) {
788 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
789   case Instruction::OPC:                                                       \
790     return static_cast<const CLASS *>(this)->getSuccessor(idx);
791 #include "llvm/IR/Instruction.def"
792   default:
793     break;
794   }
795   llvm_unreachable("not a terminator");
796 }
797 
798 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
799   switch (getOpcode()) {
800 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
801   case Instruction::OPC:                                                       \
802     return static_cast<CLASS *>(this)->setSuccessor(idx, B);
803 #include "llvm/IR/Instruction.def"
804   default:
805     break;
806   }
807   llvm_unreachable("not a terminator");
808 }
809 
810 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
811   for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
812        Idx != NumSuccessors; ++Idx)
813     if (getSuccessor(Idx) == OldBB)
814       setSuccessor(Idx, NewBB);
815 }
816 
817 Instruction *Instruction::cloneImpl() const {
818   llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
819 }
820 
821 void Instruction::swapProfMetadata() {
822   MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
823   if (!ProfileData || ProfileData->getNumOperands() != 3 ||
824       !isa<MDString>(ProfileData->getOperand(0)))
825     return;
826 
827   MDString *MDName = cast<MDString>(ProfileData->getOperand(0));
828   if (MDName->getString() != "branch_weights")
829     return;
830 
831   // The first operand is the name. Fetch them backwards and build a new one.
832   Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
833                      ProfileData->getOperand(1)};
834   setMetadata(LLVMContext::MD_prof,
835               MDNode::get(ProfileData->getContext(), Ops));
836 }
837 
838 void Instruction::copyMetadata(const Instruction &SrcInst,
839                                ArrayRef<unsigned> WL) {
840   if (!SrcInst.hasMetadata())
841     return;
842 
843   DenseSet<unsigned> WLS;
844   for (unsigned M : WL)
845     WLS.insert(M);
846 
847   // Otherwise, enumerate and copy over metadata from the old instruction to the
848   // new one.
849   SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
850   SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
851   for (const auto &MD : TheMDs) {
852     if (WL.empty() || WLS.count(MD.first))
853       setMetadata(MD.first, MD.second);
854   }
855   if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
856     setDebugLoc(SrcInst.getDebugLoc());
857 }
858 
859 Instruction *Instruction::clone() const {
860   Instruction *New = nullptr;
861   switch (getOpcode()) {
862   default:
863     llvm_unreachable("Unhandled Opcode.");
864 #define HANDLE_INST(num, opc, clas)                                            \
865   case Instruction::opc:                                                       \
866     New = cast<clas>(this)->cloneImpl();                                       \
867     break;
868 #include "llvm/IR/Instruction.def"
869 #undef HANDLE_INST
870   }
871 
872   New->SubclassOptionalData = SubclassOptionalData;
873   New->copyMetadata(*this);
874   return New;
875 }
876