xref: /freebsd/contrib/llvm-project/llvm/lib/IR/Instruction.cpp (revision ebacd8013fe5f7fdf9f6a5b286f6680dd2891036)
1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Instruction class for the IR library.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/IR/Instruction.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/IR/Constants.h"
16 #include "llvm/IR/Instructions.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/Operator.h"
20 #include "llvm/IR/Type.h"
21 using namespace llvm;
22 
23 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
24                          Instruction *InsertBefore)
25   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
26 
27   // If requested, insert this instruction into a basic block...
28   if (InsertBefore) {
29     BasicBlock *BB = InsertBefore->getParent();
30     assert(BB && "Instruction to insert before is not in a basic block!");
31     BB->getInstList().insert(InsertBefore->getIterator(), this);
32   }
33 }
34 
35 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
36                          BasicBlock *InsertAtEnd)
37   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
38 
39   // append this instruction into the basic block
40   assert(InsertAtEnd && "Basic block to append to may not be NULL!");
41   InsertAtEnd->getInstList().push_back(this);
42 }
43 
44 Instruction::~Instruction() {
45   assert(!Parent && "Instruction still linked in the program!");
46 
47   // Replace any extant metadata uses of this instruction with undef to
48   // preserve debug info accuracy. Some alternatives include:
49   // - Treat Instruction like any other Value, and point its extant metadata
50   //   uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
51   //   trivially dead (i.e. fair game for deletion in many passes), leading to
52   //   stale dbg.values being in effect for too long.
53   // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
54   //   correct. OTOH results in wasted work in some common cases (e.g. when all
55   //   instructions in a BasicBlock are deleted).
56   if (isUsedByMetadata())
57     ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
58 }
59 
60 
61 void Instruction::setParent(BasicBlock *P) {
62   Parent = P;
63 }
64 
65 const Module *Instruction::getModule() const {
66   return getParent()->getModule();
67 }
68 
69 const Function *Instruction::getFunction() const {
70   return getParent()->getParent();
71 }
72 
73 void Instruction::removeFromParent() {
74   getParent()->getInstList().remove(getIterator());
75 }
76 
77 iplist<Instruction>::iterator Instruction::eraseFromParent() {
78   return getParent()->getInstList().erase(getIterator());
79 }
80 
81 /// Insert an unlinked instruction into a basic block immediately before the
82 /// specified instruction.
83 void Instruction::insertBefore(Instruction *InsertPos) {
84   InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
85 }
86 
87 /// Insert an unlinked instruction into a basic block immediately after the
88 /// specified instruction.
89 void Instruction::insertAfter(Instruction *InsertPos) {
90   InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
91                                                     this);
92 }
93 
94 /// Unlink this instruction from its current basic block and insert it into the
95 /// basic block that MovePos lives in, right before MovePos.
96 void Instruction::moveBefore(Instruction *MovePos) {
97   moveBefore(*MovePos->getParent(), MovePos->getIterator());
98 }
99 
100 void Instruction::moveAfter(Instruction *MovePos) {
101   moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
102 }
103 
104 void Instruction::moveBefore(BasicBlock &BB,
105                              SymbolTableList<Instruction>::iterator I) {
106   assert(I == BB.end() || I->getParent() == &BB);
107   BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
108 }
109 
110 bool Instruction::comesBefore(const Instruction *Other) const {
111   assert(Parent && Other->Parent &&
112          "instructions without BB parents have no order");
113   assert(Parent == Other->Parent && "cross-BB instruction order comparison");
114   if (!Parent->isInstrOrderValid())
115     Parent->renumberInstructions();
116   return Order < Other->Order;
117 }
118 
119 bool Instruction::isOnlyUserOfAnyOperand() {
120   return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
121 }
122 
123 void Instruction::setHasNoUnsignedWrap(bool b) {
124   cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
125 }
126 
127 void Instruction::setHasNoSignedWrap(bool b) {
128   cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
129 }
130 
131 void Instruction::setIsExact(bool b) {
132   cast<PossiblyExactOperator>(this)->setIsExact(b);
133 }
134 
135 bool Instruction::hasNoUnsignedWrap() const {
136   return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
137 }
138 
139 bool Instruction::hasNoSignedWrap() const {
140   return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
141 }
142 
143 bool Instruction::hasPoisonGeneratingFlags() const {
144   return cast<Operator>(this)->hasPoisonGeneratingFlags();
145 }
146 
147 void Instruction::dropPoisonGeneratingFlags() {
148   switch (getOpcode()) {
149   case Instruction::Add:
150   case Instruction::Sub:
151   case Instruction::Mul:
152   case Instruction::Shl:
153     cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
154     cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
155     break;
156 
157   case Instruction::UDiv:
158   case Instruction::SDiv:
159   case Instruction::AShr:
160   case Instruction::LShr:
161     cast<PossiblyExactOperator>(this)->setIsExact(false);
162     break;
163 
164   case Instruction::GetElementPtr:
165     cast<GetElementPtrInst>(this)->setIsInBounds(false);
166     break;
167   }
168   if (isa<FPMathOperator>(this)) {
169     setHasNoNaNs(false);
170     setHasNoInfs(false);
171   }
172 
173   assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
174 }
175 
176 void Instruction::dropUndefImplyingAttrsAndUnknownMetadata(
177     ArrayRef<unsigned> KnownIDs) {
178   dropUnknownNonDebugMetadata(KnownIDs);
179   auto *CB = dyn_cast<CallBase>(this);
180   if (!CB)
181     return;
182   // For call instructions, we also need to drop parameter and return attributes
183   // that are can cause UB if the call is moved to a location where the
184   // attribute is not valid.
185   AttributeList AL = CB->getAttributes();
186   if (AL.isEmpty())
187     return;
188   AttributeMask UBImplyingAttributes =
189       AttributeFuncs::getUBImplyingAttributes();
190   for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
191     CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
192   CB->removeRetAttrs(UBImplyingAttributes);
193 }
194 
195 bool Instruction::isExact() const {
196   return cast<PossiblyExactOperator>(this)->isExact();
197 }
198 
199 void Instruction::setFast(bool B) {
200   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
201   cast<FPMathOperator>(this)->setFast(B);
202 }
203 
204 void Instruction::setHasAllowReassoc(bool B) {
205   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
206   cast<FPMathOperator>(this)->setHasAllowReassoc(B);
207 }
208 
209 void Instruction::setHasNoNaNs(bool B) {
210   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
211   cast<FPMathOperator>(this)->setHasNoNaNs(B);
212 }
213 
214 void Instruction::setHasNoInfs(bool B) {
215   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
216   cast<FPMathOperator>(this)->setHasNoInfs(B);
217 }
218 
219 void Instruction::setHasNoSignedZeros(bool B) {
220   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
221   cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
222 }
223 
224 void Instruction::setHasAllowReciprocal(bool B) {
225   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
226   cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
227 }
228 
229 void Instruction::setHasAllowContract(bool B) {
230   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
231   cast<FPMathOperator>(this)->setHasAllowContract(B);
232 }
233 
234 void Instruction::setHasApproxFunc(bool B) {
235   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
236   cast<FPMathOperator>(this)->setHasApproxFunc(B);
237 }
238 
239 void Instruction::setFastMathFlags(FastMathFlags FMF) {
240   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
241   cast<FPMathOperator>(this)->setFastMathFlags(FMF);
242 }
243 
244 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
245   assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
246   cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
247 }
248 
249 bool Instruction::isFast() const {
250   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
251   return cast<FPMathOperator>(this)->isFast();
252 }
253 
254 bool Instruction::hasAllowReassoc() const {
255   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
256   return cast<FPMathOperator>(this)->hasAllowReassoc();
257 }
258 
259 bool Instruction::hasNoNaNs() const {
260   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
261   return cast<FPMathOperator>(this)->hasNoNaNs();
262 }
263 
264 bool Instruction::hasNoInfs() const {
265   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
266   return cast<FPMathOperator>(this)->hasNoInfs();
267 }
268 
269 bool Instruction::hasNoSignedZeros() const {
270   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
271   return cast<FPMathOperator>(this)->hasNoSignedZeros();
272 }
273 
274 bool Instruction::hasAllowReciprocal() const {
275   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
276   return cast<FPMathOperator>(this)->hasAllowReciprocal();
277 }
278 
279 bool Instruction::hasAllowContract() const {
280   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
281   return cast<FPMathOperator>(this)->hasAllowContract();
282 }
283 
284 bool Instruction::hasApproxFunc() const {
285   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
286   return cast<FPMathOperator>(this)->hasApproxFunc();
287 }
288 
289 FastMathFlags Instruction::getFastMathFlags() const {
290   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
291   return cast<FPMathOperator>(this)->getFastMathFlags();
292 }
293 
294 void Instruction::copyFastMathFlags(const Instruction *I) {
295   copyFastMathFlags(I->getFastMathFlags());
296 }
297 
298 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
299   // Copy the wrapping flags.
300   if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
301     if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
302       setHasNoSignedWrap(OB->hasNoSignedWrap());
303       setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
304     }
305   }
306 
307   // Copy the exact flag.
308   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
309     if (isa<PossiblyExactOperator>(this))
310       setIsExact(PE->isExact());
311 
312   // Copy the fast-math flags.
313   if (auto *FP = dyn_cast<FPMathOperator>(V))
314     if (isa<FPMathOperator>(this))
315       copyFastMathFlags(FP->getFastMathFlags());
316 
317   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
318     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
319       DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
320 }
321 
322 void Instruction::andIRFlags(const Value *V) {
323   if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
324     if (isa<OverflowingBinaryOperator>(this)) {
325       setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
326       setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
327     }
328   }
329 
330   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
331     if (isa<PossiblyExactOperator>(this))
332       setIsExact(isExact() && PE->isExact());
333 
334   if (auto *FP = dyn_cast<FPMathOperator>(V)) {
335     if (isa<FPMathOperator>(this)) {
336       FastMathFlags FM = getFastMathFlags();
337       FM &= FP->getFastMathFlags();
338       copyFastMathFlags(FM);
339     }
340   }
341 
342   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
343     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
344       DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
345 }
346 
347 const char *Instruction::getOpcodeName(unsigned OpCode) {
348   switch (OpCode) {
349   // Terminators
350   case Ret:    return "ret";
351   case Br:     return "br";
352   case Switch: return "switch";
353   case IndirectBr: return "indirectbr";
354   case Invoke: return "invoke";
355   case Resume: return "resume";
356   case Unreachable: return "unreachable";
357   case CleanupRet: return "cleanupret";
358   case CatchRet: return "catchret";
359   case CatchPad: return "catchpad";
360   case CatchSwitch: return "catchswitch";
361   case CallBr: return "callbr";
362 
363   // Standard unary operators...
364   case FNeg: return "fneg";
365 
366   // Standard binary operators...
367   case Add: return "add";
368   case FAdd: return "fadd";
369   case Sub: return "sub";
370   case FSub: return "fsub";
371   case Mul: return "mul";
372   case FMul: return "fmul";
373   case UDiv: return "udiv";
374   case SDiv: return "sdiv";
375   case FDiv: return "fdiv";
376   case URem: return "urem";
377   case SRem: return "srem";
378   case FRem: return "frem";
379 
380   // Logical operators...
381   case And: return "and";
382   case Or : return "or";
383   case Xor: return "xor";
384 
385   // Memory instructions...
386   case Alloca:        return "alloca";
387   case Load:          return "load";
388   case Store:         return "store";
389   case AtomicCmpXchg: return "cmpxchg";
390   case AtomicRMW:     return "atomicrmw";
391   case Fence:         return "fence";
392   case GetElementPtr: return "getelementptr";
393 
394   // Convert instructions...
395   case Trunc:         return "trunc";
396   case ZExt:          return "zext";
397   case SExt:          return "sext";
398   case FPTrunc:       return "fptrunc";
399   case FPExt:         return "fpext";
400   case FPToUI:        return "fptoui";
401   case FPToSI:        return "fptosi";
402   case UIToFP:        return "uitofp";
403   case SIToFP:        return "sitofp";
404   case IntToPtr:      return "inttoptr";
405   case PtrToInt:      return "ptrtoint";
406   case BitCast:       return "bitcast";
407   case AddrSpaceCast: return "addrspacecast";
408 
409   // Other instructions...
410   case ICmp:           return "icmp";
411   case FCmp:           return "fcmp";
412   case PHI:            return "phi";
413   case Select:         return "select";
414   case Call:           return "call";
415   case Shl:            return "shl";
416   case LShr:           return "lshr";
417   case AShr:           return "ashr";
418   case VAArg:          return "va_arg";
419   case ExtractElement: return "extractelement";
420   case InsertElement:  return "insertelement";
421   case ShuffleVector:  return "shufflevector";
422   case ExtractValue:   return "extractvalue";
423   case InsertValue:    return "insertvalue";
424   case LandingPad:     return "landingpad";
425   case CleanupPad:     return "cleanuppad";
426   case Freeze:         return "freeze";
427 
428   default: return "<Invalid operator> ";
429   }
430 }
431 
432 /// Return true if both instructions have the same special state. This must be
433 /// kept in sync with FunctionComparator::cmpOperations in
434 /// lib/Transforms/IPO/MergeFunctions.cpp.
435 static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
436                                  bool IgnoreAlignment = false) {
437   assert(I1->getOpcode() == I2->getOpcode() &&
438          "Can not compare special state of different instructions");
439 
440   if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
441     return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
442            (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
443             IgnoreAlignment);
444   if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
445     return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
446            (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
447             IgnoreAlignment) &&
448            LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
449            LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
450   if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
451     return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
452            (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
453             IgnoreAlignment) &&
454            SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
455            SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
456   if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
457     return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
458   if (const CallInst *CI = dyn_cast<CallInst>(I1))
459     return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
460            CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
461            CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
462            CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
463   if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
464     return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
465            CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
466            CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
467   if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
468     return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
469            CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
470            CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
471   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
472     return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
473   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
474     return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
475   if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
476     return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
477            FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
478   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
479     return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
480            CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
481            CXI->getSuccessOrdering() ==
482                cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
483            CXI->getFailureOrdering() ==
484                cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
485            CXI->getSyncScopeID() ==
486                cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
487   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
488     return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
489            RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
490            RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
491            RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
492   if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
493     return SVI->getShuffleMask() ==
494            cast<ShuffleVectorInst>(I2)->getShuffleMask();
495   if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I1))
496     return GEP->getSourceElementType() ==
497            cast<GetElementPtrInst>(I2)->getSourceElementType();
498 
499   return true;
500 }
501 
502 bool Instruction::isIdenticalTo(const Instruction *I) const {
503   return isIdenticalToWhenDefined(I) &&
504          SubclassOptionalData == I->SubclassOptionalData;
505 }
506 
507 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
508   if (getOpcode() != I->getOpcode() ||
509       getNumOperands() != I->getNumOperands() ||
510       getType() != I->getType())
511     return false;
512 
513   // If both instructions have no operands, they are identical.
514   if (getNumOperands() == 0 && I->getNumOperands() == 0)
515     return haveSameSpecialState(this, I);
516 
517   // We have two instructions of identical opcode and #operands.  Check to see
518   // if all operands are the same.
519   if (!std::equal(op_begin(), op_end(), I->op_begin()))
520     return false;
521 
522   // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
523   if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
524     const PHINode *otherPHI = cast<PHINode>(I);
525     return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
526                       otherPHI->block_begin());
527   }
528 
529   return haveSameSpecialState(this, I);
530 }
531 
532 // Keep this in sync with FunctionComparator::cmpOperations in
533 // lib/Transforms/IPO/MergeFunctions.cpp.
534 bool Instruction::isSameOperationAs(const Instruction *I,
535                                     unsigned flags) const {
536   bool IgnoreAlignment = flags & CompareIgnoringAlignment;
537   bool UseScalarTypes  = flags & CompareUsingScalarTypes;
538 
539   if (getOpcode() != I->getOpcode() ||
540       getNumOperands() != I->getNumOperands() ||
541       (UseScalarTypes ?
542        getType()->getScalarType() != I->getType()->getScalarType() :
543        getType() != I->getType()))
544     return false;
545 
546   // We have two instructions of identical opcode and #operands.  Check to see
547   // if all operands are the same type
548   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
549     if (UseScalarTypes ?
550         getOperand(i)->getType()->getScalarType() !=
551           I->getOperand(i)->getType()->getScalarType() :
552         getOperand(i)->getType() != I->getOperand(i)->getType())
553       return false;
554 
555   return haveSameSpecialState(this, I, IgnoreAlignment);
556 }
557 
558 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
559   for (const Use &U : uses()) {
560     // PHI nodes uses values in the corresponding predecessor block.  For other
561     // instructions, just check to see whether the parent of the use matches up.
562     const Instruction *I = cast<Instruction>(U.getUser());
563     const PHINode *PN = dyn_cast<PHINode>(I);
564     if (!PN) {
565       if (I->getParent() != BB)
566         return true;
567       continue;
568     }
569 
570     if (PN->getIncomingBlock(U) != BB)
571       return true;
572   }
573   return false;
574 }
575 
576 bool Instruction::mayReadFromMemory() const {
577   switch (getOpcode()) {
578   default: return false;
579   case Instruction::VAArg:
580   case Instruction::Load:
581   case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
582   case Instruction::AtomicCmpXchg:
583   case Instruction::AtomicRMW:
584   case Instruction::CatchPad:
585   case Instruction::CatchRet:
586     return true;
587   case Instruction::Call:
588   case Instruction::Invoke:
589   case Instruction::CallBr:
590     return !cast<CallBase>(this)->onlyWritesMemory();
591   case Instruction::Store:
592     return !cast<StoreInst>(this)->isUnordered();
593   }
594 }
595 
596 bool Instruction::mayWriteToMemory() const {
597   switch (getOpcode()) {
598   default: return false;
599   case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
600   case Instruction::Store:
601   case Instruction::VAArg:
602   case Instruction::AtomicCmpXchg:
603   case Instruction::AtomicRMW:
604   case Instruction::CatchPad:
605   case Instruction::CatchRet:
606     return true;
607   case Instruction::Call:
608   case Instruction::Invoke:
609   case Instruction::CallBr:
610     return !cast<CallBase>(this)->onlyReadsMemory();
611   case Instruction::Load:
612     return !cast<LoadInst>(this)->isUnordered();
613   }
614 }
615 
616 bool Instruction::isAtomic() const {
617   switch (getOpcode()) {
618   default:
619     return false;
620   case Instruction::AtomicCmpXchg:
621   case Instruction::AtomicRMW:
622   case Instruction::Fence:
623     return true;
624   case Instruction::Load:
625     return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
626   case Instruction::Store:
627     return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
628   }
629 }
630 
631 bool Instruction::hasAtomicLoad() const {
632   assert(isAtomic());
633   switch (getOpcode()) {
634   default:
635     return false;
636   case Instruction::AtomicCmpXchg:
637   case Instruction::AtomicRMW:
638   case Instruction::Load:
639     return true;
640   }
641 }
642 
643 bool Instruction::hasAtomicStore() const {
644   assert(isAtomic());
645   switch (getOpcode()) {
646   default:
647     return false;
648   case Instruction::AtomicCmpXchg:
649   case Instruction::AtomicRMW:
650   case Instruction::Store:
651     return true;
652   }
653 }
654 
655 bool Instruction::isVolatile() const {
656   switch (getOpcode()) {
657   default:
658     return false;
659   case Instruction::AtomicRMW:
660     return cast<AtomicRMWInst>(this)->isVolatile();
661   case Instruction::Store:
662     return cast<StoreInst>(this)->isVolatile();
663   case Instruction::Load:
664     return cast<LoadInst>(this)->isVolatile();
665   case Instruction::AtomicCmpXchg:
666     return cast<AtomicCmpXchgInst>(this)->isVolatile();
667   case Instruction::Call:
668   case Instruction::Invoke:
669     // There are a very limited number of intrinsics with volatile flags.
670     if (auto *II = dyn_cast<IntrinsicInst>(this)) {
671       if (auto *MI = dyn_cast<MemIntrinsic>(II))
672         return MI->isVolatile();
673       switch (II->getIntrinsicID()) {
674       default: break;
675       case Intrinsic::matrix_column_major_load:
676         return cast<ConstantInt>(II->getArgOperand(2))->isOne();
677       case Intrinsic::matrix_column_major_store:
678         return cast<ConstantInt>(II->getArgOperand(3))->isOne();
679       }
680     }
681     return false;
682   }
683 }
684 
685 bool Instruction::mayThrow() const {
686   if (const CallInst *CI = dyn_cast<CallInst>(this))
687     return !CI->doesNotThrow();
688   if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
689     return CRI->unwindsToCaller();
690   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
691     return CatchSwitch->unwindsToCaller();
692   return isa<ResumeInst>(this);
693 }
694 
695 bool Instruction::mayHaveSideEffects() const {
696   return mayWriteToMemory() || mayThrow() || !willReturn();
697 }
698 
699 bool Instruction::isSafeToRemove() const {
700   return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
701          !this->isTerminator() && !this->isEHPad();
702 }
703 
704 bool Instruction::willReturn() const {
705   // Volatile store isn't guaranteed to return; see LangRef.
706   if (auto *SI = dyn_cast<StoreInst>(this))
707     return !SI->isVolatile();
708 
709   if (const auto *CB = dyn_cast<CallBase>(this))
710     // FIXME: Temporarily assume that all side-effect free intrinsics will
711     // return. Remove this workaround once all intrinsics are appropriately
712     // annotated.
713     return CB->hasFnAttr(Attribute::WillReturn) ||
714            (isa<IntrinsicInst>(CB) && CB->onlyReadsMemory());
715   return true;
716 }
717 
718 bool Instruction::isLifetimeStartOrEnd() const {
719   auto *II = dyn_cast<IntrinsicInst>(this);
720   if (!II)
721     return false;
722   Intrinsic::ID ID = II->getIntrinsicID();
723   return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
724 }
725 
726 bool Instruction::isLaunderOrStripInvariantGroup() const {
727   auto *II = dyn_cast<IntrinsicInst>(this);
728   if (!II)
729     return false;
730   Intrinsic::ID ID = II->getIntrinsicID();
731   return ID == Intrinsic::launder_invariant_group ||
732          ID == Intrinsic::strip_invariant_group;
733 }
734 
735 bool Instruction::isDebugOrPseudoInst() const {
736   return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
737 }
738 
739 const Instruction *
740 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
741   for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
742     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
743       return I;
744   return nullptr;
745 }
746 
747 const Instruction *
748 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
749   for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
750     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
751       return I;
752   return nullptr;
753 }
754 
755 bool Instruction::isAssociative() const {
756   unsigned Opcode = getOpcode();
757   if (isAssociative(Opcode))
758     return true;
759 
760   switch (Opcode) {
761   case FMul:
762   case FAdd:
763     return cast<FPMathOperator>(this)->hasAllowReassoc() &&
764            cast<FPMathOperator>(this)->hasNoSignedZeros();
765   default:
766     return false;
767   }
768 }
769 
770 bool Instruction::isCommutative() const {
771   if (auto *II = dyn_cast<IntrinsicInst>(this))
772     return II->isCommutative();
773   // TODO: Should allow icmp/fcmp?
774   return isCommutative(getOpcode());
775 }
776 
777 unsigned Instruction::getNumSuccessors() const {
778   switch (getOpcode()) {
779 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
780   case Instruction::OPC:                                                       \
781     return static_cast<const CLASS *>(this)->getNumSuccessors();
782 #include "llvm/IR/Instruction.def"
783   default:
784     break;
785   }
786   llvm_unreachable("not a terminator");
787 }
788 
789 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
790   switch (getOpcode()) {
791 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
792   case Instruction::OPC:                                                       \
793     return static_cast<const CLASS *>(this)->getSuccessor(idx);
794 #include "llvm/IR/Instruction.def"
795   default:
796     break;
797   }
798   llvm_unreachable("not a terminator");
799 }
800 
801 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
802   switch (getOpcode()) {
803 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
804   case Instruction::OPC:                                                       \
805     return static_cast<CLASS *>(this)->setSuccessor(idx, B);
806 #include "llvm/IR/Instruction.def"
807   default:
808     break;
809   }
810   llvm_unreachable("not a terminator");
811 }
812 
813 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
814   for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
815        Idx != NumSuccessors; ++Idx)
816     if (getSuccessor(Idx) == OldBB)
817       setSuccessor(Idx, NewBB);
818 }
819 
820 Instruction *Instruction::cloneImpl() const {
821   llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
822 }
823 
824 void Instruction::swapProfMetadata() {
825   MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
826   if (!ProfileData || ProfileData->getNumOperands() != 3 ||
827       !isa<MDString>(ProfileData->getOperand(0)))
828     return;
829 
830   MDString *MDName = cast<MDString>(ProfileData->getOperand(0));
831   if (MDName->getString() != "branch_weights")
832     return;
833 
834   // The first operand is the name. Fetch them backwards and build a new one.
835   Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
836                      ProfileData->getOperand(1)};
837   setMetadata(LLVMContext::MD_prof,
838               MDNode::get(ProfileData->getContext(), Ops));
839 }
840 
841 void Instruction::copyMetadata(const Instruction &SrcInst,
842                                ArrayRef<unsigned> WL) {
843   if (!SrcInst.hasMetadata())
844     return;
845 
846   DenseSet<unsigned> WLS;
847   for (unsigned M : WL)
848     WLS.insert(M);
849 
850   // Otherwise, enumerate and copy over metadata from the old instruction to the
851   // new one.
852   SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
853   SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
854   for (const auto &MD : TheMDs) {
855     if (WL.empty() || WLS.count(MD.first))
856       setMetadata(MD.first, MD.second);
857   }
858   if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
859     setDebugLoc(SrcInst.getDebugLoc());
860 }
861 
862 Instruction *Instruction::clone() const {
863   Instruction *New = nullptr;
864   switch (getOpcode()) {
865   default:
866     llvm_unreachable("Unhandled Opcode.");
867 #define HANDLE_INST(num, opc, clas)                                            \
868   case Instruction::opc:                                                       \
869     New = cast<clas>(this)->cloneImpl();                                       \
870     break;
871 #include "llvm/IR/Instruction.def"
872 #undef HANDLE_INST
873   }
874 
875   New->SubclassOptionalData = SubclassOptionalData;
876   New->copyMetadata(*this);
877   return New;
878 }
879