xref: /freebsd/contrib/llvm-project/llvm/lib/IR/Instruction.cpp (revision 4824e7fd18a1223177218d4aec1b3c6c5c4a444e)
1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Instruction class for the IR library.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/IR/Instruction.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/IR/Constants.h"
16 #include "llvm/IR/Instructions.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/MDBuilder.h"
20 #include "llvm/IR/Operator.h"
21 #include "llvm/IR/Type.h"
22 using namespace llvm;
23 
24 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
25                          Instruction *InsertBefore)
26   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
27 
28   // If requested, insert this instruction into a basic block...
29   if (InsertBefore) {
30     BasicBlock *BB = InsertBefore->getParent();
31     assert(BB && "Instruction to insert before is not in a basic block!");
32     BB->getInstList().insert(InsertBefore->getIterator(), this);
33   }
34 }
35 
36 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
37                          BasicBlock *InsertAtEnd)
38   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
39 
40   // append this instruction into the basic block
41   assert(InsertAtEnd && "Basic block to append to may not be NULL!");
42   InsertAtEnd->getInstList().push_back(this);
43 }
44 
45 Instruction::~Instruction() {
46   assert(!Parent && "Instruction still linked in the program!");
47 
48   // Replace any extant metadata uses of this instruction with undef to
49   // preserve debug info accuracy. Some alternatives include:
50   // - Treat Instruction like any other Value, and point its extant metadata
51   //   uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
52   //   trivially dead (i.e. fair game for deletion in many passes), leading to
53   //   stale dbg.values being in effect for too long.
54   // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
55   //   correct. OTOH results in wasted work in some common cases (e.g. when all
56   //   instructions in a BasicBlock are deleted).
57   if (isUsedByMetadata())
58     ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
59 }
60 
61 
62 void Instruction::setParent(BasicBlock *P) {
63   Parent = P;
64 }
65 
66 const Module *Instruction::getModule() const {
67   return getParent()->getModule();
68 }
69 
70 const Function *Instruction::getFunction() const {
71   return getParent()->getParent();
72 }
73 
74 void Instruction::removeFromParent() {
75   getParent()->getInstList().remove(getIterator());
76 }
77 
78 iplist<Instruction>::iterator Instruction::eraseFromParent() {
79   return getParent()->getInstList().erase(getIterator());
80 }
81 
82 /// Insert an unlinked instruction into a basic block immediately before the
83 /// specified instruction.
84 void Instruction::insertBefore(Instruction *InsertPos) {
85   InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
86 }
87 
88 /// Insert an unlinked instruction into a basic block immediately after the
89 /// specified instruction.
90 void Instruction::insertAfter(Instruction *InsertPos) {
91   InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
92                                                     this);
93 }
94 
95 /// Unlink this instruction from its current basic block and insert it into the
96 /// basic block that MovePos lives in, right before MovePos.
97 void Instruction::moveBefore(Instruction *MovePos) {
98   moveBefore(*MovePos->getParent(), MovePos->getIterator());
99 }
100 
101 void Instruction::moveAfter(Instruction *MovePos) {
102   moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
103 }
104 
105 void Instruction::moveBefore(BasicBlock &BB,
106                              SymbolTableList<Instruction>::iterator I) {
107   assert(I == BB.end() || I->getParent() == &BB);
108   BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
109 }
110 
111 bool Instruction::comesBefore(const Instruction *Other) const {
112   assert(Parent && Other->Parent &&
113          "instructions without BB parents have no order");
114   assert(Parent == Other->Parent && "cross-BB instruction order comparison");
115   if (!Parent->isInstrOrderValid())
116     Parent->renumberInstructions();
117   return Order < Other->Order;
118 }
119 
120 bool Instruction::isOnlyUserOfAnyOperand() {
121   return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
122 }
123 
124 void Instruction::setHasNoUnsignedWrap(bool b) {
125   cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
126 }
127 
128 void Instruction::setHasNoSignedWrap(bool b) {
129   cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
130 }
131 
132 void Instruction::setIsExact(bool b) {
133   cast<PossiblyExactOperator>(this)->setIsExact(b);
134 }
135 
136 bool Instruction::hasNoUnsignedWrap() const {
137   return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
138 }
139 
140 bool Instruction::hasNoSignedWrap() const {
141   return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
142 }
143 
144 bool Instruction::hasPoisonGeneratingFlags() const {
145   return cast<Operator>(this)->hasPoisonGeneratingFlags();
146 }
147 
148 void Instruction::dropPoisonGeneratingFlags() {
149   switch (getOpcode()) {
150   case Instruction::Add:
151   case Instruction::Sub:
152   case Instruction::Mul:
153   case Instruction::Shl:
154     cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
155     cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
156     break;
157 
158   case Instruction::UDiv:
159   case Instruction::SDiv:
160   case Instruction::AShr:
161   case Instruction::LShr:
162     cast<PossiblyExactOperator>(this)->setIsExact(false);
163     break;
164 
165   case Instruction::GetElementPtr:
166     cast<GetElementPtrInst>(this)->setIsInBounds(false);
167     break;
168   }
169   // TODO: FastMathFlags!
170 
171   assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
172 }
173 
174 void Instruction::dropUndefImplyingAttrsAndUnknownMetadata(
175     ArrayRef<unsigned> KnownIDs) {
176   dropUnknownNonDebugMetadata(KnownIDs);
177   auto *CB = dyn_cast<CallBase>(this);
178   if (!CB)
179     return;
180   // For call instructions, we also need to drop parameter and return attributes
181   // that are can cause UB if the call is moved to a location where the
182   // attribute is not valid.
183   AttributeList AL = CB->getAttributes();
184   if (AL.isEmpty())
185     return;
186   AttrBuilder UBImplyingAttributes = AttributeFuncs::getUBImplyingAttributes();
187   for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
188     CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
189   CB->removeRetAttrs(UBImplyingAttributes);
190 }
191 
192 bool Instruction::isExact() const {
193   return cast<PossiblyExactOperator>(this)->isExact();
194 }
195 
196 void Instruction::setFast(bool B) {
197   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
198   cast<FPMathOperator>(this)->setFast(B);
199 }
200 
201 void Instruction::setHasAllowReassoc(bool B) {
202   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
203   cast<FPMathOperator>(this)->setHasAllowReassoc(B);
204 }
205 
206 void Instruction::setHasNoNaNs(bool B) {
207   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
208   cast<FPMathOperator>(this)->setHasNoNaNs(B);
209 }
210 
211 void Instruction::setHasNoInfs(bool B) {
212   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
213   cast<FPMathOperator>(this)->setHasNoInfs(B);
214 }
215 
216 void Instruction::setHasNoSignedZeros(bool B) {
217   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
218   cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
219 }
220 
221 void Instruction::setHasAllowReciprocal(bool B) {
222   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
223   cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
224 }
225 
226 void Instruction::setHasAllowContract(bool B) {
227   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
228   cast<FPMathOperator>(this)->setHasAllowContract(B);
229 }
230 
231 void Instruction::setHasApproxFunc(bool B) {
232   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
233   cast<FPMathOperator>(this)->setHasApproxFunc(B);
234 }
235 
236 void Instruction::setFastMathFlags(FastMathFlags FMF) {
237   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
238   cast<FPMathOperator>(this)->setFastMathFlags(FMF);
239 }
240 
241 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
242   assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
243   cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
244 }
245 
246 bool Instruction::isFast() const {
247   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
248   return cast<FPMathOperator>(this)->isFast();
249 }
250 
251 bool Instruction::hasAllowReassoc() const {
252   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
253   return cast<FPMathOperator>(this)->hasAllowReassoc();
254 }
255 
256 bool Instruction::hasNoNaNs() const {
257   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
258   return cast<FPMathOperator>(this)->hasNoNaNs();
259 }
260 
261 bool Instruction::hasNoInfs() const {
262   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
263   return cast<FPMathOperator>(this)->hasNoInfs();
264 }
265 
266 bool Instruction::hasNoSignedZeros() const {
267   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
268   return cast<FPMathOperator>(this)->hasNoSignedZeros();
269 }
270 
271 bool Instruction::hasAllowReciprocal() const {
272   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
273   return cast<FPMathOperator>(this)->hasAllowReciprocal();
274 }
275 
276 bool Instruction::hasAllowContract() const {
277   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
278   return cast<FPMathOperator>(this)->hasAllowContract();
279 }
280 
281 bool Instruction::hasApproxFunc() const {
282   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
283   return cast<FPMathOperator>(this)->hasApproxFunc();
284 }
285 
286 FastMathFlags Instruction::getFastMathFlags() const {
287   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
288   return cast<FPMathOperator>(this)->getFastMathFlags();
289 }
290 
291 void Instruction::copyFastMathFlags(const Instruction *I) {
292   copyFastMathFlags(I->getFastMathFlags());
293 }
294 
295 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
296   // Copy the wrapping flags.
297   if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
298     if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
299       setHasNoSignedWrap(OB->hasNoSignedWrap());
300       setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
301     }
302   }
303 
304   // Copy the exact flag.
305   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
306     if (isa<PossiblyExactOperator>(this))
307       setIsExact(PE->isExact());
308 
309   // Copy the fast-math flags.
310   if (auto *FP = dyn_cast<FPMathOperator>(V))
311     if (isa<FPMathOperator>(this))
312       copyFastMathFlags(FP->getFastMathFlags());
313 
314   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
315     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
316       DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
317 }
318 
319 void Instruction::andIRFlags(const Value *V) {
320   if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
321     if (isa<OverflowingBinaryOperator>(this)) {
322       setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
323       setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
324     }
325   }
326 
327   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
328     if (isa<PossiblyExactOperator>(this))
329       setIsExact(isExact() && PE->isExact());
330 
331   if (auto *FP = dyn_cast<FPMathOperator>(V)) {
332     if (isa<FPMathOperator>(this)) {
333       FastMathFlags FM = getFastMathFlags();
334       FM &= FP->getFastMathFlags();
335       copyFastMathFlags(FM);
336     }
337   }
338 
339   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
340     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
341       DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
342 }
343 
344 const char *Instruction::getOpcodeName(unsigned OpCode) {
345   switch (OpCode) {
346   // Terminators
347   case Ret:    return "ret";
348   case Br:     return "br";
349   case Switch: return "switch";
350   case IndirectBr: return "indirectbr";
351   case Invoke: return "invoke";
352   case Resume: return "resume";
353   case Unreachable: return "unreachable";
354   case CleanupRet: return "cleanupret";
355   case CatchRet: return "catchret";
356   case CatchPad: return "catchpad";
357   case CatchSwitch: return "catchswitch";
358   case CallBr: return "callbr";
359 
360   // Standard unary operators...
361   case FNeg: return "fneg";
362 
363   // Standard binary operators...
364   case Add: return "add";
365   case FAdd: return "fadd";
366   case Sub: return "sub";
367   case FSub: return "fsub";
368   case Mul: return "mul";
369   case FMul: return "fmul";
370   case UDiv: return "udiv";
371   case SDiv: return "sdiv";
372   case FDiv: return "fdiv";
373   case URem: return "urem";
374   case SRem: return "srem";
375   case FRem: return "frem";
376 
377   // Logical operators...
378   case And: return "and";
379   case Or : return "or";
380   case Xor: return "xor";
381 
382   // Memory instructions...
383   case Alloca:        return "alloca";
384   case Load:          return "load";
385   case Store:         return "store";
386   case AtomicCmpXchg: return "cmpxchg";
387   case AtomicRMW:     return "atomicrmw";
388   case Fence:         return "fence";
389   case GetElementPtr: return "getelementptr";
390 
391   // Convert instructions...
392   case Trunc:         return "trunc";
393   case ZExt:          return "zext";
394   case SExt:          return "sext";
395   case FPTrunc:       return "fptrunc";
396   case FPExt:         return "fpext";
397   case FPToUI:        return "fptoui";
398   case FPToSI:        return "fptosi";
399   case UIToFP:        return "uitofp";
400   case SIToFP:        return "sitofp";
401   case IntToPtr:      return "inttoptr";
402   case PtrToInt:      return "ptrtoint";
403   case BitCast:       return "bitcast";
404   case AddrSpaceCast: return "addrspacecast";
405 
406   // Other instructions...
407   case ICmp:           return "icmp";
408   case FCmp:           return "fcmp";
409   case PHI:            return "phi";
410   case Select:         return "select";
411   case Call:           return "call";
412   case Shl:            return "shl";
413   case LShr:           return "lshr";
414   case AShr:           return "ashr";
415   case VAArg:          return "va_arg";
416   case ExtractElement: return "extractelement";
417   case InsertElement:  return "insertelement";
418   case ShuffleVector:  return "shufflevector";
419   case ExtractValue:   return "extractvalue";
420   case InsertValue:    return "insertvalue";
421   case LandingPad:     return "landingpad";
422   case CleanupPad:     return "cleanuppad";
423   case Freeze:         return "freeze";
424 
425   default: return "<Invalid operator> ";
426   }
427 }
428 
429 /// Return true if both instructions have the same special state. This must be
430 /// kept in sync with FunctionComparator::cmpOperations in
431 /// lib/Transforms/IPO/MergeFunctions.cpp.
432 static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
433                                  bool IgnoreAlignment = false) {
434   assert(I1->getOpcode() == I2->getOpcode() &&
435          "Can not compare special state of different instructions");
436 
437   if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
438     return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
439            (AI->getAlignment() == cast<AllocaInst>(I2)->getAlignment() ||
440             IgnoreAlignment);
441   if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
442     return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
443            (LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
444             IgnoreAlignment) &&
445            LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
446            LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
447   if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
448     return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
449            (SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
450             IgnoreAlignment) &&
451            SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
452            SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
453   if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
454     return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
455   if (const CallInst *CI = dyn_cast<CallInst>(I1))
456     return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
457            CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
458            CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
459            CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
460   if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
461     return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
462            CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
463            CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
464   if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
465     return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
466            CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
467            CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
468   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
469     return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
470   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
471     return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
472   if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
473     return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
474            FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
475   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
476     return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
477            CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
478            CXI->getSuccessOrdering() ==
479                cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
480            CXI->getFailureOrdering() ==
481                cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
482            CXI->getSyncScopeID() ==
483                cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
484   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
485     return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
486            RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
487            RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
488            RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
489   if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
490     return SVI->getShuffleMask() ==
491            cast<ShuffleVectorInst>(I2)->getShuffleMask();
492 
493   return true;
494 }
495 
496 bool Instruction::isIdenticalTo(const Instruction *I) const {
497   return isIdenticalToWhenDefined(I) &&
498          SubclassOptionalData == I->SubclassOptionalData;
499 }
500 
501 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
502   if (getOpcode() != I->getOpcode() ||
503       getNumOperands() != I->getNumOperands() ||
504       getType() != I->getType())
505     return false;
506 
507   // If both instructions have no operands, they are identical.
508   if (getNumOperands() == 0 && I->getNumOperands() == 0)
509     return haveSameSpecialState(this, I);
510 
511   // We have two instructions of identical opcode and #operands.  Check to see
512   // if all operands are the same.
513   if (!std::equal(op_begin(), op_end(), I->op_begin()))
514     return false;
515 
516   // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
517   if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
518     const PHINode *otherPHI = cast<PHINode>(I);
519     return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
520                       otherPHI->block_begin());
521   }
522 
523   return haveSameSpecialState(this, I);
524 }
525 
526 // Keep this in sync with FunctionComparator::cmpOperations in
527 // lib/Transforms/IPO/MergeFunctions.cpp.
528 bool Instruction::isSameOperationAs(const Instruction *I,
529                                     unsigned flags) const {
530   bool IgnoreAlignment = flags & CompareIgnoringAlignment;
531   bool UseScalarTypes  = flags & CompareUsingScalarTypes;
532 
533   if (getOpcode() != I->getOpcode() ||
534       getNumOperands() != I->getNumOperands() ||
535       (UseScalarTypes ?
536        getType()->getScalarType() != I->getType()->getScalarType() :
537        getType() != I->getType()))
538     return false;
539 
540   // We have two instructions of identical opcode and #operands.  Check to see
541   // if all operands are the same type
542   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
543     if (UseScalarTypes ?
544         getOperand(i)->getType()->getScalarType() !=
545           I->getOperand(i)->getType()->getScalarType() :
546         getOperand(i)->getType() != I->getOperand(i)->getType())
547       return false;
548 
549   return haveSameSpecialState(this, I, IgnoreAlignment);
550 }
551 
552 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
553   for (const Use &U : uses()) {
554     // PHI nodes uses values in the corresponding predecessor block.  For other
555     // instructions, just check to see whether the parent of the use matches up.
556     const Instruction *I = cast<Instruction>(U.getUser());
557     const PHINode *PN = dyn_cast<PHINode>(I);
558     if (!PN) {
559       if (I->getParent() != BB)
560         return true;
561       continue;
562     }
563 
564     if (PN->getIncomingBlock(U) != BB)
565       return true;
566   }
567   return false;
568 }
569 
570 bool Instruction::mayReadFromMemory() const {
571   switch (getOpcode()) {
572   default: return false;
573   case Instruction::VAArg:
574   case Instruction::Load:
575   case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
576   case Instruction::AtomicCmpXchg:
577   case Instruction::AtomicRMW:
578   case Instruction::CatchPad:
579   case Instruction::CatchRet:
580     return true;
581   case Instruction::Call:
582   case Instruction::Invoke:
583   case Instruction::CallBr:
584     return !cast<CallBase>(this)->doesNotReadMemory();
585   case Instruction::Store:
586     return !cast<StoreInst>(this)->isUnordered();
587   }
588 }
589 
590 bool Instruction::mayWriteToMemory() const {
591   switch (getOpcode()) {
592   default: return false;
593   case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
594   case Instruction::Store:
595   case Instruction::VAArg:
596   case Instruction::AtomicCmpXchg:
597   case Instruction::AtomicRMW:
598   case Instruction::CatchPad:
599   case Instruction::CatchRet:
600     return true;
601   case Instruction::Call:
602   case Instruction::Invoke:
603   case Instruction::CallBr:
604     return !cast<CallBase>(this)->onlyReadsMemory();
605   case Instruction::Load:
606     return !cast<LoadInst>(this)->isUnordered();
607   }
608 }
609 
610 bool Instruction::isAtomic() const {
611   switch (getOpcode()) {
612   default:
613     return false;
614   case Instruction::AtomicCmpXchg:
615   case Instruction::AtomicRMW:
616   case Instruction::Fence:
617     return true;
618   case Instruction::Load:
619     return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
620   case Instruction::Store:
621     return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
622   }
623 }
624 
625 bool Instruction::hasAtomicLoad() const {
626   assert(isAtomic());
627   switch (getOpcode()) {
628   default:
629     return false;
630   case Instruction::AtomicCmpXchg:
631   case Instruction::AtomicRMW:
632   case Instruction::Load:
633     return true;
634   }
635 }
636 
637 bool Instruction::hasAtomicStore() const {
638   assert(isAtomic());
639   switch (getOpcode()) {
640   default:
641     return false;
642   case Instruction::AtomicCmpXchg:
643   case Instruction::AtomicRMW:
644   case Instruction::Store:
645     return true;
646   }
647 }
648 
649 bool Instruction::isVolatile() const {
650   switch (getOpcode()) {
651   default:
652     return false;
653   case Instruction::AtomicRMW:
654     return cast<AtomicRMWInst>(this)->isVolatile();
655   case Instruction::Store:
656     return cast<StoreInst>(this)->isVolatile();
657   case Instruction::Load:
658     return cast<LoadInst>(this)->isVolatile();
659   case Instruction::AtomicCmpXchg:
660     return cast<AtomicCmpXchgInst>(this)->isVolatile();
661   case Instruction::Call:
662   case Instruction::Invoke:
663     // There are a very limited number of intrinsics with volatile flags.
664     if (auto *II = dyn_cast<IntrinsicInst>(this)) {
665       if (auto *MI = dyn_cast<MemIntrinsic>(II))
666         return MI->isVolatile();
667       switch (II->getIntrinsicID()) {
668       default: break;
669       case Intrinsic::matrix_column_major_load:
670         return cast<ConstantInt>(II->getArgOperand(2))->isOne();
671       case Intrinsic::matrix_column_major_store:
672         return cast<ConstantInt>(II->getArgOperand(3))->isOne();
673       }
674     }
675     return false;
676   }
677 }
678 
679 bool Instruction::mayThrow() const {
680   if (const CallInst *CI = dyn_cast<CallInst>(this))
681     return !CI->doesNotThrow();
682   if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
683     return CRI->unwindsToCaller();
684   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
685     return CatchSwitch->unwindsToCaller();
686   return isa<ResumeInst>(this);
687 }
688 
689 bool Instruction::mayHaveSideEffects() const {
690   return mayWriteToMemory() || mayThrow() || !willReturn();
691 }
692 
693 bool Instruction::isSafeToRemove() const {
694   return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
695          !this->isTerminator();
696 }
697 
698 bool Instruction::willReturn() const {
699   // Volatile store isn't guaranteed to return; see LangRef.
700   if (auto *SI = dyn_cast<StoreInst>(this))
701     return !SI->isVolatile();
702 
703   if (const auto *CB = dyn_cast<CallBase>(this))
704     // FIXME: Temporarily assume that all side-effect free intrinsics will
705     // return. Remove this workaround once all intrinsics are appropriately
706     // annotated.
707     return CB->hasFnAttr(Attribute::WillReturn) ||
708            (isa<IntrinsicInst>(CB) && CB->onlyReadsMemory());
709   return true;
710 }
711 
712 bool Instruction::isLifetimeStartOrEnd() const {
713   auto *II = dyn_cast<IntrinsicInst>(this);
714   if (!II)
715     return false;
716   Intrinsic::ID ID = II->getIntrinsicID();
717   return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
718 }
719 
720 bool Instruction::isLaunderOrStripInvariantGroup() const {
721   auto *II = dyn_cast<IntrinsicInst>(this);
722   if (!II)
723     return false;
724   Intrinsic::ID ID = II->getIntrinsicID();
725   return ID == Intrinsic::launder_invariant_group ||
726          ID == Intrinsic::strip_invariant_group;
727 }
728 
729 bool Instruction::isDebugOrPseudoInst() const {
730   return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
731 }
732 
733 const Instruction *
734 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
735   for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
736     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
737       return I;
738   return nullptr;
739 }
740 
741 const Instruction *
742 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
743   for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
744     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
745       return I;
746   return nullptr;
747 }
748 
749 bool Instruction::isAssociative() const {
750   unsigned Opcode = getOpcode();
751   if (isAssociative(Opcode))
752     return true;
753 
754   switch (Opcode) {
755   case FMul:
756   case FAdd:
757     return cast<FPMathOperator>(this)->hasAllowReassoc() &&
758            cast<FPMathOperator>(this)->hasNoSignedZeros();
759   default:
760     return false;
761   }
762 }
763 
764 bool Instruction::isCommutative() const {
765   if (auto *II = dyn_cast<IntrinsicInst>(this))
766     return II->isCommutative();
767   // TODO: Should allow icmp/fcmp?
768   return isCommutative(getOpcode());
769 }
770 
771 unsigned Instruction::getNumSuccessors() const {
772   switch (getOpcode()) {
773 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
774   case Instruction::OPC:                                                       \
775     return static_cast<const CLASS *>(this)->getNumSuccessors();
776 #include "llvm/IR/Instruction.def"
777   default:
778     break;
779   }
780   llvm_unreachable("not a terminator");
781 }
782 
783 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
784   switch (getOpcode()) {
785 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
786   case Instruction::OPC:                                                       \
787     return static_cast<const CLASS *>(this)->getSuccessor(idx);
788 #include "llvm/IR/Instruction.def"
789   default:
790     break;
791   }
792   llvm_unreachable("not a terminator");
793 }
794 
795 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
796   switch (getOpcode()) {
797 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
798   case Instruction::OPC:                                                       \
799     return static_cast<CLASS *>(this)->setSuccessor(idx, B);
800 #include "llvm/IR/Instruction.def"
801   default:
802     break;
803   }
804   llvm_unreachable("not a terminator");
805 }
806 
807 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
808   for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
809        Idx != NumSuccessors; ++Idx)
810     if (getSuccessor(Idx) == OldBB)
811       setSuccessor(Idx, NewBB);
812 }
813 
814 Instruction *Instruction::cloneImpl() const {
815   llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
816 }
817 
818 void Instruction::swapProfMetadata() {
819   MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
820   if (!ProfileData || ProfileData->getNumOperands() != 3 ||
821       !isa<MDString>(ProfileData->getOperand(0)))
822     return;
823 
824   MDString *MDName = cast<MDString>(ProfileData->getOperand(0));
825   if (MDName->getString() != "branch_weights")
826     return;
827 
828   // The first operand is the name. Fetch them backwards and build a new one.
829   Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
830                      ProfileData->getOperand(1)};
831   setMetadata(LLVMContext::MD_prof,
832               MDNode::get(ProfileData->getContext(), Ops));
833 }
834 
835 void Instruction::copyMetadata(const Instruction &SrcInst,
836                                ArrayRef<unsigned> WL) {
837   if (!SrcInst.hasMetadata())
838     return;
839 
840   DenseSet<unsigned> WLS;
841   for (unsigned M : WL)
842     WLS.insert(M);
843 
844   // Otherwise, enumerate and copy over metadata from the old instruction to the
845   // new one.
846   SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
847   SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
848   for (const auto &MD : TheMDs) {
849     if (WL.empty() || WLS.count(MD.first))
850       setMetadata(MD.first, MD.second);
851   }
852   if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
853     setDebugLoc(SrcInst.getDebugLoc());
854 }
855 
856 Instruction *Instruction::clone() const {
857   Instruction *New = nullptr;
858   switch (getOpcode()) {
859   default:
860     llvm_unreachable("Unhandled Opcode.");
861 #define HANDLE_INST(num, opc, clas)                                            \
862   case Instruction::opc:                                                       \
863     New = cast<clas>(this)->cloneImpl();                                       \
864     break;
865 #include "llvm/IR/Instruction.def"
866 #undef HANDLE_INST
867   }
868 
869   New->SubclassOptionalData = SubclassOptionalData;
870   New->copyMetadata(*this);
871   return New;
872 }
873