xref: /freebsd/contrib/llvm-project/llvm/lib/IR/Instructions.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements all of the non-inline methods for the LLVM instruction
10 // classes.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/IR/Instructions.h"
15 #include "LLVMContextImpl.h"
16 #include "llvm/ADT/SmallBitVector.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/ConstantRange.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/IR/MDBuilder.h"
32 #include "llvm/IR/Metadata.h"
33 #include "llvm/IR/Module.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/IR/ProfDataUtils.h"
37 #include "llvm/IR/Type.h"
38 #include "llvm/IR/Value.h"
39 #include "llvm/Support/AtomicOrdering.h"
40 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/CheckedArithmetic.h"
42 #include "llvm/Support/Compiler.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/KnownBits.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Support/ModRef.h"
47 #include "llvm/Support/TypeSize.h"
48 #include <algorithm>
49 #include <cassert>
50 #include <cstdint>
51 #include <optional>
52 #include <vector>
53 
54 using namespace llvm;
55 
56 static cl::opt<bool> DisableI2pP2iOpt(
57     "disable-i2p-p2i-opt", cl::init(false),
58     cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59 
60 //===----------------------------------------------------------------------===//
61 //                            AllocaInst Class
62 //===----------------------------------------------------------------------===//
63 
64 std::optional<TypeSize>
getAllocationSize(const DataLayout & DL) const65 AllocaInst::getAllocationSize(const DataLayout &DL) const {
66   TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
67   if (isArrayAllocation()) {
68     auto *C = dyn_cast<ConstantInt>(getArraySize());
69     if (!C)
70       return std::nullopt;
71     assert(!Size.isScalable() && "Array elements cannot have a scalable size");
72     auto CheckedProd =
73         checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
74     if (!CheckedProd)
75       return std::nullopt;
76     return TypeSize::getFixed(*CheckedProd);
77   }
78   return Size;
79 }
80 
81 std::optional<TypeSize>
getAllocationSizeInBits(const DataLayout & DL) const82 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
83   std::optional<TypeSize> Size = getAllocationSize(DL);
84   if (!Size)
85     return std::nullopt;
86   auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
87                                         static_cast<TypeSize::ScalarTy>(8));
88   if (!CheckedProd)
89     return std::nullopt;
90   return TypeSize::get(*CheckedProd, Size->isScalable());
91 }
92 
93 //===----------------------------------------------------------------------===//
94 //                              SelectInst Class
95 //===----------------------------------------------------------------------===//
96 
97 /// areInvalidOperands - Return a string if the specified operands are invalid
98 /// for a select operation, otherwise return null.
areInvalidOperands(Value * Op0,Value * Op1,Value * Op2)99 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
100   if (Op1->getType() != Op2->getType())
101     return "both values to select must have same type";
102 
103   if (Op1->getType()->isTokenTy())
104     return "select values cannot have token type";
105 
106   if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
107     // Vector select.
108     if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
109       return "vector select condition element type must be i1";
110     VectorType *ET = dyn_cast<VectorType>(Op1->getType());
111     if (!ET)
112       return "selected values for vector select must be vectors";
113     if (ET->getElementCount() != VT->getElementCount())
114       return "vector select requires selected vectors to have "
115                    "the same vector length as select condition";
116   } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
117     return "select condition must be i1 or <n x i1>";
118   }
119   return nullptr;
120 }
121 
122 //===----------------------------------------------------------------------===//
123 //                               PHINode Class
124 //===----------------------------------------------------------------------===//
125 
PHINode(const PHINode & PN)126 PHINode::PHINode(const PHINode &PN)
127     : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
128       ReservedSpace(PN.getNumOperands()) {
129   NumUserOperands = PN.getNumOperands();
130   allocHungoffUses(PN.getNumOperands());
131   std::copy(PN.op_begin(), PN.op_end(), op_begin());
132   copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
133   SubclassOptionalData = PN.SubclassOptionalData;
134 }
135 
136 // removeIncomingValue - Remove an incoming value.  This is useful if a
137 // predecessor basic block is deleted.
removeIncomingValue(unsigned Idx,bool DeletePHIIfEmpty)138 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
139   Value *Removed = getIncomingValue(Idx);
140 
141   // Move everything after this operand down.
142   //
143   // FIXME: we could just swap with the end of the list, then erase.  However,
144   // clients might not expect this to happen.  The code as it is thrashes the
145   // use/def lists, which is kinda lame.
146   std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
147   copyIncomingBlocks(drop_begin(blocks(), Idx + 1), Idx);
148 
149   // Nuke the last value.
150   Op<-1>().set(nullptr);
151   setNumHungOffUseOperands(getNumOperands() - 1);
152 
153   // If the PHI node is dead, because it has zero entries, nuke it now.
154   if (getNumOperands() == 0 && DeletePHIIfEmpty) {
155     // If anyone is using this PHI, make them use a dummy value instead...
156     replaceAllUsesWith(PoisonValue::get(getType()));
157     eraseFromParent();
158   }
159   return Removed;
160 }
161 
removeIncomingValueIf(function_ref<bool (unsigned)> Predicate,bool DeletePHIIfEmpty)162 void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
163                                     bool DeletePHIIfEmpty) {
164   SmallDenseSet<unsigned> RemoveIndices;
165   for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
166     if (Predicate(Idx))
167       RemoveIndices.insert(Idx);
168 
169   if (RemoveIndices.empty())
170     return;
171 
172   // Remove operands.
173   auto NewOpEnd = remove_if(operands(), [&](Use &U) {
174     return RemoveIndices.contains(U.getOperandNo());
175   });
176   for (Use &U : make_range(NewOpEnd, op_end()))
177     U.set(nullptr);
178 
179   // Remove incoming blocks.
180   (void)std::remove_if(const_cast<block_iterator>(block_begin()),
181                  const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
182                    return RemoveIndices.contains(&BB - block_begin());
183                  });
184 
185   setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
186 
187   // If the PHI node is dead, because it has zero entries, nuke it now.
188   if (getNumOperands() == 0 && DeletePHIIfEmpty) {
189     // If anyone is using this PHI, make them use a dummy value instead...
190     replaceAllUsesWith(PoisonValue::get(getType()));
191     eraseFromParent();
192   }
193 }
194 
195 /// growOperands - grow operands - This grows the operand list in response
196 /// to a push_back style of operation.  This grows the number of ops by 1.5
197 /// times.
198 ///
growOperands()199 void PHINode::growOperands() {
200   unsigned e = getNumOperands();
201   unsigned NumOps = e + e / 2;
202   if (NumOps < 2) NumOps = 2;      // 2 op PHI nodes are VERY common.
203 
204   ReservedSpace = NumOps;
205   growHungoffUses(ReservedSpace, /* IsPhi */ true);
206 }
207 
208 /// hasConstantValue - If the specified PHI node always merges together the same
209 /// value, return the value, otherwise return null.
hasConstantValue() const210 Value *PHINode::hasConstantValue() const {
211   // Exploit the fact that phi nodes always have at least one entry.
212   Value *ConstantValue = getIncomingValue(0);
213   for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
214     if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
215       if (ConstantValue != this)
216         return nullptr; // Incoming values not all the same.
217        // The case where the first value is this PHI.
218       ConstantValue = getIncomingValue(i);
219     }
220   if (ConstantValue == this)
221     return PoisonValue::get(getType());
222   return ConstantValue;
223 }
224 
225 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
226 /// together the same value, assuming that undefs result in the same value as
227 /// non-undefs.
228 /// Unlike \ref hasConstantValue, this does not return a value because the
229 /// unique non-undef incoming value need not dominate the PHI node.
hasConstantOrUndefValue() const230 bool PHINode::hasConstantOrUndefValue() const {
231   Value *ConstantValue = nullptr;
232   for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
233     Value *Incoming = getIncomingValue(i);
234     if (Incoming != this && !isa<UndefValue>(Incoming)) {
235       if (ConstantValue && ConstantValue != Incoming)
236         return false;
237       ConstantValue = Incoming;
238     }
239   }
240   return true;
241 }
242 
243 //===----------------------------------------------------------------------===//
244 //                       LandingPadInst Implementation
245 //===----------------------------------------------------------------------===//
246 
LandingPadInst(Type * RetTy,unsigned NumReservedValues,const Twine & NameStr,InsertPosition InsertBefore)247 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
248                                const Twine &NameStr,
249                                InsertPosition InsertBefore)
250     : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
251   init(NumReservedValues, NameStr);
252 }
253 
LandingPadInst(const LandingPadInst & LP)254 LandingPadInst::LandingPadInst(const LandingPadInst &LP)
255     : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
256       ReservedSpace(LP.getNumOperands()) {
257   NumUserOperands = LP.getNumOperands();
258   allocHungoffUses(LP.getNumOperands());
259   Use *OL = getOperandList();
260   const Use *InOL = LP.getOperandList();
261   for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
262     OL[I] = InOL[I];
263 
264   setCleanup(LP.isCleanup());
265 }
266 
Create(Type * RetTy,unsigned NumReservedClauses,const Twine & NameStr,InsertPosition InsertBefore)267 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
268                                        const Twine &NameStr,
269                                        InsertPosition InsertBefore) {
270   return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
271 }
272 
init(unsigned NumReservedValues,const Twine & NameStr)273 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
274   ReservedSpace = NumReservedValues;
275   setNumHungOffUseOperands(0);
276   allocHungoffUses(ReservedSpace);
277   setName(NameStr);
278   setCleanup(false);
279 }
280 
281 /// growOperands - grow operands - This grows the operand list in response to a
282 /// push_back style of operation. This grows the number of ops by 2 times.
growOperands(unsigned Size)283 void LandingPadInst::growOperands(unsigned Size) {
284   unsigned e = getNumOperands();
285   if (ReservedSpace >= e + Size) return;
286   ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
287   growHungoffUses(ReservedSpace);
288 }
289 
addClause(Constant * Val)290 void LandingPadInst::addClause(Constant *Val) {
291   unsigned OpNo = getNumOperands();
292   growOperands(1);
293   assert(OpNo < ReservedSpace && "Growing didn't work!");
294   setNumHungOffUseOperands(getNumOperands() + 1);
295   getOperandList()[OpNo] = Val;
296 }
297 
298 //===----------------------------------------------------------------------===//
299 //                        CallBase Implementation
300 //===----------------------------------------------------------------------===//
301 
Create(CallBase * CB,ArrayRef<OperandBundleDef> Bundles,InsertPosition InsertPt)302 CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
303                            InsertPosition InsertPt) {
304   switch (CB->getOpcode()) {
305   case Instruction::Call:
306     return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
307   case Instruction::Invoke:
308     return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
309   case Instruction::CallBr:
310     return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
311   default:
312     llvm_unreachable("Unknown CallBase sub-class!");
313   }
314 }
315 
Create(CallBase * CI,OperandBundleDef OpB,InsertPosition InsertPt)316 CallBase *CallBase::Create(CallBase *CI, OperandBundleDef OpB,
317                            InsertPosition InsertPt) {
318   SmallVector<OperandBundleDef, 2> OpDefs;
319   for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
320     auto ChildOB = CI->getOperandBundleAt(i);
321     if (ChildOB.getTagName() != OpB.getTag())
322       OpDefs.emplace_back(ChildOB);
323   }
324   OpDefs.emplace_back(OpB);
325   return CallBase::Create(CI, OpDefs, InsertPt);
326 }
327 
getCaller()328 Function *CallBase::getCaller() { return getParent()->getParent(); }
329 
getNumSubclassExtraOperandsDynamic() const330 unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
331   assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
332   return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
333 }
334 
isIndirectCall() const335 bool CallBase::isIndirectCall() const {
336   const Value *V = getCalledOperand();
337   if (isa<Function>(V) || isa<Constant>(V))
338     return false;
339   return !isInlineAsm();
340 }
341 
342 /// Tests if this call site must be tail call optimized. Only a CallInst can
343 /// be tail call optimized.
isMustTailCall() const344 bool CallBase::isMustTailCall() const {
345   if (auto *CI = dyn_cast<CallInst>(this))
346     return CI->isMustTailCall();
347   return false;
348 }
349 
350 /// Tests if this call site is marked as a tail call.
isTailCall() const351 bool CallBase::isTailCall() const {
352   if (auto *CI = dyn_cast<CallInst>(this))
353     return CI->isTailCall();
354   return false;
355 }
356 
getIntrinsicID() const357 Intrinsic::ID CallBase::getIntrinsicID() const {
358   if (auto *F = dyn_cast_or_null<Function>(getCalledOperand()))
359     return F->getIntrinsicID();
360   return Intrinsic::not_intrinsic;
361 }
362 
getRetNoFPClass() const363 FPClassTest CallBase::getRetNoFPClass() const {
364   FPClassTest Mask = Attrs.getRetNoFPClass();
365 
366   if (const Function *F = getCalledFunction())
367     Mask |= F->getAttributes().getRetNoFPClass();
368   return Mask;
369 }
370 
getParamNoFPClass(unsigned i) const371 FPClassTest CallBase::getParamNoFPClass(unsigned i) const {
372   FPClassTest Mask = Attrs.getParamNoFPClass(i);
373 
374   if (const Function *F = getCalledFunction())
375     Mask |= F->getAttributes().getParamNoFPClass(i);
376   return Mask;
377 }
378 
getRange() const379 std::optional<ConstantRange> CallBase::getRange() const {
380   Attribute CallAttr = Attrs.getRetAttr(Attribute::Range);
381   Attribute FnAttr;
382   if (const Function *F = getCalledFunction())
383     FnAttr = F->getRetAttribute(Attribute::Range);
384 
385   if (CallAttr.isValid() && FnAttr.isValid())
386     return CallAttr.getRange().intersectWith(FnAttr.getRange());
387   if (CallAttr.isValid())
388     return CallAttr.getRange();
389   if (FnAttr.isValid())
390     return FnAttr.getRange();
391   return std::nullopt;
392 }
393 
isReturnNonNull() const394 bool CallBase::isReturnNonNull() const {
395   if (hasRetAttr(Attribute::NonNull))
396     return true;
397 
398   if (getRetDereferenceableBytes() > 0 &&
399       !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
400     return true;
401 
402   return false;
403 }
404 
getArgOperandWithAttribute(Attribute::AttrKind Kind) const405 Value *CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind) const {
406   unsigned Index;
407 
408   if (Attrs.hasAttrSomewhere(Kind, &Index))
409     return getArgOperand(Index - AttributeList::FirstArgIndex);
410   if (const Function *F = getCalledFunction())
411     if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
412       return getArgOperand(Index - AttributeList::FirstArgIndex);
413 
414   return nullptr;
415 }
416 
417 /// Determine whether the argument or parameter has the given attribute.
paramHasAttr(unsigned ArgNo,Attribute::AttrKind Kind) const418 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
419   assert(ArgNo < arg_size() && "Param index out of bounds!");
420 
421   if (Attrs.hasParamAttr(ArgNo, Kind))
422     return true;
423 
424   const Function *F = getCalledFunction();
425   if (!F)
426     return false;
427 
428   if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
429     return false;
430 
431   // Take into account mod/ref by operand bundles.
432   switch (Kind) {
433   case Attribute::ReadNone:
434     return !hasReadingOperandBundles() && !hasClobberingOperandBundles();
435   case Attribute::ReadOnly:
436     return !hasClobberingOperandBundles();
437   case Attribute::WriteOnly:
438     return !hasReadingOperandBundles();
439   default:
440     return true;
441   }
442 }
443 
paramHasNonNullAttr(unsigned ArgNo,bool AllowUndefOrPoison) const444 bool CallBase::paramHasNonNullAttr(unsigned ArgNo,
445                                    bool AllowUndefOrPoison) const {
446   assert(getArgOperand(ArgNo)->getType()->isPointerTy() &&
447          "Argument must be a pointer");
448   if (paramHasAttr(ArgNo, Attribute::NonNull) &&
449       (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
450     return true;
451 
452   if (paramHasAttr(ArgNo, Attribute::Dereferenceable) &&
453       !NullPointerIsDefined(
454           getCaller(),
455           getArgOperand(ArgNo)->getType()->getPointerAddressSpace()))
456     return true;
457 
458   return false;
459 }
460 
hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const461 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
462   if (auto *F = dyn_cast<Function>(getCalledOperand()))
463     return F->getAttributes().hasFnAttr(Kind);
464 
465   return false;
466 }
467 
hasFnAttrOnCalledFunction(StringRef Kind) const468 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
469   if (auto *F = dyn_cast<Function>(getCalledOperand()))
470     return F->getAttributes().hasFnAttr(Kind);
471 
472   return false;
473 }
474 
475 template <typename AK>
getFnAttrOnCalledFunction(AK Kind) const476 Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
477   if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
478     // getMemoryEffects() correctly combines memory effects from the call-site,
479     // operand bundles and function.
480     assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
481   }
482 
483   if (auto *F = dyn_cast<Function>(getCalledOperand()))
484     return F->getAttributes().getFnAttr(Kind);
485 
486   return Attribute();
487 }
488 
489 template LLVM_ABI Attribute
490 CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
491 template LLVM_ABI Attribute
492 CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
493 
494 template <typename AK>
getParamAttrOnCalledFunction(unsigned ArgNo,AK Kind) const495 Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
496                                                  AK Kind) const {
497   Value *V = getCalledOperand();
498 
499   if (auto *F = dyn_cast<Function>(V))
500     return F->getAttributes().getParamAttr(ArgNo, Kind);
501 
502   return Attribute();
503 }
504 template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
505     unsigned ArgNo, Attribute::AttrKind Kind) const;
506 template LLVM_ABI Attribute
507 CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
508 
getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> & Defs) const509 void CallBase::getOperandBundlesAsDefs(
510     SmallVectorImpl<OperandBundleDef> &Defs) const {
511   for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
512     Defs.emplace_back(getOperandBundleAt(i));
513 }
514 
515 CallBase::op_iterator
populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,const unsigned BeginIndex)516 CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
517                                      const unsigned BeginIndex) {
518   auto It = op_begin() + BeginIndex;
519   for (auto &B : Bundles)
520     It = std::copy(B.input_begin(), B.input_end(), It);
521 
522   auto *ContextImpl = getContext().pImpl;
523   auto BI = Bundles.begin();
524   unsigned CurrentIndex = BeginIndex;
525 
526   for (auto &BOI : bundle_op_infos()) {
527     assert(BI != Bundles.end() && "Incorrect allocation?");
528 
529     BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
530     BOI.Begin = CurrentIndex;
531     BOI.End = CurrentIndex + BI->input_size();
532     CurrentIndex = BOI.End;
533     BI++;
534   }
535 
536   assert(BI == Bundles.end() && "Incorrect allocation?");
537 
538   return It;
539 }
540 
getBundleOpInfoForOperand(unsigned OpIdx)541 CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
542   /// When there isn't many bundles, we do a simple linear search.
543   /// Else fallback to a binary-search that use the fact that bundles usually
544   /// have similar number of argument to get faster convergence.
545   if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
546     for (auto &BOI : bundle_op_infos())
547       if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
548         return BOI;
549 
550     llvm_unreachable("Did not find operand bundle for operand!");
551   }
552 
553   assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
554   assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
555          OpIdx < std::prev(bundle_op_info_end())->End &&
556          "The Idx isn't in the operand bundle");
557 
558   /// We need a decimal number below and to prevent using floating point numbers
559   /// we use an intergal value multiplied by this constant.
560   constexpr unsigned NumberScaling = 1024;
561 
562   bundle_op_iterator Begin = bundle_op_info_begin();
563   bundle_op_iterator End = bundle_op_info_end();
564   bundle_op_iterator Current = Begin;
565 
566   while (Begin != End) {
567     unsigned ScaledOperandPerBundle =
568         NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
569     Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
570                        ScaledOperandPerBundle);
571     if (Current >= End)
572       Current = std::prev(End);
573     assert(Current < End && Current >= Begin &&
574            "the operand bundle doesn't cover every value in the range");
575     if (OpIdx >= Current->Begin && OpIdx < Current->End)
576       break;
577     if (OpIdx >= Current->End)
578       Begin = Current + 1;
579     else
580       End = Current;
581   }
582 
583   assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
584          "the operand bundle doesn't cover every value in the range");
585   return *Current;
586 }
587 
addOperandBundle(CallBase * CB,uint32_t ID,OperandBundleDef OB,InsertPosition InsertPt)588 CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
589                                      OperandBundleDef OB,
590                                      InsertPosition InsertPt) {
591   if (CB->getOperandBundle(ID))
592     return CB;
593 
594   SmallVector<OperandBundleDef, 1> Bundles;
595   CB->getOperandBundlesAsDefs(Bundles);
596   Bundles.push_back(OB);
597   return Create(CB, Bundles, InsertPt);
598 }
599 
removeOperandBundle(CallBase * CB,uint32_t ID,InsertPosition InsertPt)600 CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
601                                         InsertPosition InsertPt) {
602   SmallVector<OperandBundleDef, 1> Bundles;
603   bool CreateNew = false;
604 
605   for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
606     auto Bundle = CB->getOperandBundleAt(I);
607     if (Bundle.getTagID() == ID) {
608       CreateNew = true;
609       continue;
610     }
611     Bundles.emplace_back(Bundle);
612   }
613 
614   return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
615 }
616 
hasReadingOperandBundles() const617 bool CallBase::hasReadingOperandBundles() const {
618   // Implementation note: this is a conservative implementation of operand
619   // bundle semantics, where *any* non-assume operand bundle (other than
620   // ptrauth) forces a callsite to be at least readonly.
621   return hasOperandBundlesOtherThan({LLVMContext::OB_ptrauth,
622                                      LLVMContext::OB_kcfi,
623                                      LLVMContext::OB_convergencectrl}) &&
624          getIntrinsicID() != Intrinsic::assume;
625 }
626 
hasClobberingOperandBundles() const627 bool CallBase::hasClobberingOperandBundles() const {
628   return hasOperandBundlesOtherThan(
629              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
630               LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi,
631               LLVMContext::OB_convergencectrl}) &&
632          getIntrinsicID() != Intrinsic::assume;
633 }
634 
getMemoryEffects() const635 MemoryEffects CallBase::getMemoryEffects() const {
636   MemoryEffects ME = getAttributes().getMemoryEffects();
637   if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
638     MemoryEffects FnME = Fn->getMemoryEffects();
639     if (hasOperandBundles()) {
640       // TODO: Add a method to get memory effects for operand bundles instead.
641       if (hasReadingOperandBundles())
642         FnME |= MemoryEffects::readOnly();
643       if (hasClobberingOperandBundles())
644         FnME |= MemoryEffects::writeOnly();
645     }
646     if (isVolatile()) {
647       // Volatile operations also access inaccessible memory.
648       FnME |= MemoryEffects::inaccessibleMemOnly();
649     }
650     ME &= FnME;
651   }
652   return ME;
653 }
setMemoryEffects(MemoryEffects ME)654 void CallBase::setMemoryEffects(MemoryEffects ME) {
655   addFnAttr(Attribute::getWithMemoryEffects(getContext(), ME));
656 }
657 
658 /// Determine if the function does not access memory.
doesNotAccessMemory() const659 bool CallBase::doesNotAccessMemory() const {
660   return getMemoryEffects().doesNotAccessMemory();
661 }
setDoesNotAccessMemory()662 void CallBase::setDoesNotAccessMemory() {
663   setMemoryEffects(MemoryEffects::none());
664 }
665 
666 /// Determine if the function does not access or only reads memory.
onlyReadsMemory() const667 bool CallBase::onlyReadsMemory() const {
668   return getMemoryEffects().onlyReadsMemory();
669 }
setOnlyReadsMemory()670 void CallBase::setOnlyReadsMemory() {
671   setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly());
672 }
673 
674 /// Determine if the function does not access or only writes memory.
onlyWritesMemory() const675 bool CallBase::onlyWritesMemory() const {
676   return getMemoryEffects().onlyWritesMemory();
677 }
setOnlyWritesMemory()678 void CallBase::setOnlyWritesMemory() {
679   setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly());
680 }
681 
682 /// Determine if the call can access memmory only using pointers based
683 /// on its arguments.
onlyAccessesArgMemory() const684 bool CallBase::onlyAccessesArgMemory() const {
685   return getMemoryEffects().onlyAccessesArgPointees();
686 }
setOnlyAccessesArgMemory()687 void CallBase::setOnlyAccessesArgMemory() {
688   setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly());
689 }
690 
691 /// Determine if the function may only access memory that is
692 ///  inaccessible from the IR.
onlyAccessesInaccessibleMemory() const693 bool CallBase::onlyAccessesInaccessibleMemory() const {
694   return getMemoryEffects().onlyAccessesInaccessibleMem();
695 }
setOnlyAccessesInaccessibleMemory()696 void CallBase::setOnlyAccessesInaccessibleMemory() {
697   setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly());
698 }
699 
700 /// Determine if the function may only access memory that is
701 ///  either inaccessible from the IR or pointed to by its arguments.
onlyAccessesInaccessibleMemOrArgMem() const702 bool CallBase::onlyAccessesInaccessibleMemOrArgMem() const {
703   return getMemoryEffects().onlyAccessesInaccessibleOrArgMem();
704 }
setOnlyAccessesInaccessibleMemOrArgMem()705 void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
706   setMemoryEffects(getMemoryEffects() &
707                    MemoryEffects::inaccessibleOrArgMemOnly());
708 }
709 
getCaptureInfo(unsigned OpNo) const710 CaptureInfo CallBase::getCaptureInfo(unsigned OpNo) const {
711   if (OpNo < arg_size()) {
712     // If the argument is passed byval, the callee does not have access to the
713     // original pointer and thus cannot capture it.
714     if (isByValArgument(OpNo))
715       return CaptureInfo::none();
716 
717     CaptureInfo CI = getParamAttributes(OpNo).getCaptureInfo();
718     if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
719       CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
720     return CI;
721   }
722 
723   // deopt operand bundles are captures(none)
724   auto &BOI = getBundleOpInfoForOperand(OpNo);
725   auto OBU = operandBundleFromBundleOpInfo(BOI);
726   return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
727 }
728 
hasArgumentWithAdditionalReturnCaptureComponents() const729 bool CallBase::hasArgumentWithAdditionalReturnCaptureComponents() const {
730   for (unsigned I = 0, E = arg_size(); I < E; ++I) {
731     if (!getArgOperand(I)->getType()->isPointerTy())
732       continue;
733 
734     CaptureInfo CI = getParamAttributes(I).getCaptureInfo();
735     if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
736       CI &= Fn->getAttributes().getParamAttrs(I).getCaptureInfo();
737     if (capturesAnything(CI.getRetComponents() & ~CI.getOtherComponents()))
738       return true;
739   }
740   return false;
741 }
742 
743 //===----------------------------------------------------------------------===//
744 //                        CallInst Implementation
745 //===----------------------------------------------------------------------===//
746 
init(FunctionType * FTy,Value * Func,ArrayRef<Value * > Args,ArrayRef<OperandBundleDef> Bundles,const Twine & NameStr)747 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
748                     ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
749   this->FTy = FTy;
750   assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
751          "NumOperands not set up?");
752 
753 #ifndef NDEBUG
754   assert((Args.size() == FTy->getNumParams() ||
755           (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
756          "Calling a function with bad signature!");
757 
758   for (unsigned i = 0; i != Args.size(); ++i)
759     assert((i >= FTy->getNumParams() ||
760             FTy->getParamType(i) == Args[i]->getType()) &&
761            "Calling a function with a bad signature!");
762 #endif
763 
764   // Set operands in order of their index to match use-list-order
765   // prediction.
766   llvm::copy(Args, op_begin());
767   setCalledOperand(Func);
768 
769   auto It = populateBundleOperandInfos(Bundles, Args.size());
770   (void)It;
771   assert(It + 1 == op_end() && "Should add up!");
772 
773   setName(NameStr);
774 }
775 
init(FunctionType * FTy,Value * Func,const Twine & NameStr)776 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
777   this->FTy = FTy;
778   assert(getNumOperands() == 1 && "NumOperands not set up?");
779   setCalledOperand(Func);
780 
781   assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
782 
783   setName(NameStr);
784 }
785 
CallInst(FunctionType * Ty,Value * Func,const Twine & Name,AllocInfo AllocInfo,InsertPosition InsertBefore)786 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
787                    AllocInfo AllocInfo, InsertPosition InsertBefore)
788     : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
789                InsertBefore) {
790   init(Ty, Func, Name);
791 }
792 
CallInst(const CallInst & CI,AllocInfo AllocInfo)793 CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
794     : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
795   assert(getNumOperands() == CI.getNumOperands() &&
796          "Wrong number of operands allocated");
797   setTailCallKind(CI.getTailCallKind());
798   setCallingConv(CI.getCallingConv());
799 
800   std::copy(CI.op_begin(), CI.op_end(), op_begin());
801   std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
802             bundle_op_info_begin());
803   SubclassOptionalData = CI.SubclassOptionalData;
804 }
805 
Create(CallInst * CI,ArrayRef<OperandBundleDef> OpB,InsertPosition InsertPt)806 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
807                            InsertPosition InsertPt) {
808   std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
809 
810   auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
811                                  Args, OpB, CI->getName(), InsertPt);
812   NewCI->setTailCallKind(CI->getTailCallKind());
813   NewCI->setCallingConv(CI->getCallingConv());
814   NewCI->SubclassOptionalData = CI->SubclassOptionalData;
815   NewCI->setAttributes(CI->getAttributes());
816   NewCI->setDebugLoc(CI->getDebugLoc());
817   return NewCI;
818 }
819 
820 // Update profile weight for call instruction by scaling it using the ratio
821 // of S/T. The meaning of "branch_weights" meta data for call instruction is
822 // transfered to represent call count.
updateProfWeight(uint64_t S,uint64_t T)823 void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
824   if (T == 0) {
825     LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
826                          "div by 0. Ignoring. Likely the function "
827                       << getParent()->getParent()->getName()
828                       << " has 0 entry count, and contains call instructions "
829                          "with non-zero prof info.");
830     return;
831   }
832   scaleProfData(*this, S, T);
833 }
834 
835 //===----------------------------------------------------------------------===//
836 //                        InvokeInst Implementation
837 //===----------------------------------------------------------------------===//
838 
init(FunctionType * FTy,Value * Fn,BasicBlock * IfNormal,BasicBlock * IfException,ArrayRef<Value * > Args,ArrayRef<OperandBundleDef> Bundles,const Twine & NameStr)839 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
840                       BasicBlock *IfException, ArrayRef<Value *> Args,
841                       ArrayRef<OperandBundleDef> Bundles,
842                       const Twine &NameStr) {
843   this->FTy = FTy;
844 
845   assert(getNumOperands() ==
846              ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
847          "NumOperands not set up?");
848 
849 #ifndef NDEBUG
850   assert(((Args.size() == FTy->getNumParams()) ||
851           (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
852          "Invoking a function with bad signature");
853 
854   for (unsigned i = 0, e = Args.size(); i != e; i++)
855     assert((i >= FTy->getNumParams() ||
856             FTy->getParamType(i) == Args[i]->getType()) &&
857            "Invoking a function with a bad signature!");
858 #endif
859 
860   // Set operands in order of their index to match use-list-order
861   // prediction.
862   llvm::copy(Args, op_begin());
863   setNormalDest(IfNormal);
864   setUnwindDest(IfException);
865   setCalledOperand(Fn);
866 
867   auto It = populateBundleOperandInfos(Bundles, Args.size());
868   (void)It;
869   assert(It + 3 == op_end() && "Should add up!");
870 
871   setName(NameStr);
872 }
873 
InvokeInst(const InvokeInst & II,AllocInfo AllocInfo)874 InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
875     : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
876   assert(getNumOperands() == II.getNumOperands() &&
877          "Wrong number of operands allocated");
878   setCallingConv(II.getCallingConv());
879   std::copy(II.op_begin(), II.op_end(), op_begin());
880   std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
881             bundle_op_info_begin());
882   SubclassOptionalData = II.SubclassOptionalData;
883 }
884 
Create(InvokeInst * II,ArrayRef<OperandBundleDef> OpB,InsertPosition InsertPt)885 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
886                                InsertPosition InsertPt) {
887   std::vector<Value *> Args(II->arg_begin(), II->arg_end());
888 
889   auto *NewII = InvokeInst::Create(
890       II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
891       II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
892   NewII->setCallingConv(II->getCallingConv());
893   NewII->SubclassOptionalData = II->SubclassOptionalData;
894   NewII->setAttributes(II->getAttributes());
895   NewII->setDebugLoc(II->getDebugLoc());
896   return NewII;
897 }
898 
getLandingPadInst() const899 LandingPadInst *InvokeInst::getLandingPadInst() const {
900   return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
901 }
902 
updateProfWeight(uint64_t S,uint64_t T)903 void InvokeInst::updateProfWeight(uint64_t S, uint64_t T) {
904   if (T == 0) {
905     LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
906                          "div by 0. Ignoring. Likely the function "
907                       << getParent()->getParent()->getName()
908                       << " has 0 entry count, and contains call instructions "
909                          "with non-zero prof info.");
910     return;
911   }
912   scaleProfData(*this, S, T);
913 }
914 
915 //===----------------------------------------------------------------------===//
916 //                        CallBrInst Implementation
917 //===----------------------------------------------------------------------===//
918 
init(FunctionType * FTy,Value * Fn,BasicBlock * Fallthrough,ArrayRef<BasicBlock * > IndirectDests,ArrayRef<Value * > Args,ArrayRef<OperandBundleDef> Bundles,const Twine & NameStr)919 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
920                       ArrayRef<BasicBlock *> IndirectDests,
921                       ArrayRef<Value *> Args,
922                       ArrayRef<OperandBundleDef> Bundles,
923                       const Twine &NameStr) {
924   this->FTy = FTy;
925 
926   assert(getNumOperands() == ComputeNumOperands(Args.size(),
927                                                 IndirectDests.size(),
928                                                 CountBundleInputs(Bundles)) &&
929          "NumOperands not set up?");
930 
931 #ifndef NDEBUG
932   assert(((Args.size() == FTy->getNumParams()) ||
933           (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
934          "Calling a function with bad signature");
935 
936   for (unsigned i = 0, e = Args.size(); i != e; i++)
937     assert((i >= FTy->getNumParams() ||
938             FTy->getParamType(i) == Args[i]->getType()) &&
939            "Calling a function with a bad signature!");
940 #endif
941 
942   // Set operands in order of their index to match use-list-order
943   // prediction.
944   llvm::copy(Args, op_begin());
945   NumIndirectDests = IndirectDests.size();
946   setDefaultDest(Fallthrough);
947   for (unsigned i = 0; i != NumIndirectDests; ++i)
948     setIndirectDest(i, IndirectDests[i]);
949   setCalledOperand(Fn);
950 
951   auto It = populateBundleOperandInfos(Bundles, Args.size());
952   (void)It;
953   assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
954 
955   setName(NameStr);
956 }
957 
CallBrInst(const CallBrInst & CBI,AllocInfo AllocInfo)958 CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
959     : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
960                AllocInfo) {
961   assert(getNumOperands() == CBI.getNumOperands() &&
962          "Wrong number of operands allocated");
963   setCallingConv(CBI.getCallingConv());
964   std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
965   std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
966             bundle_op_info_begin());
967   SubclassOptionalData = CBI.SubclassOptionalData;
968   NumIndirectDests = CBI.NumIndirectDests;
969 }
970 
Create(CallBrInst * CBI,ArrayRef<OperandBundleDef> OpB,InsertPosition InsertPt)971 CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
972                                InsertPosition InsertPt) {
973   std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
974 
975   auto *NewCBI = CallBrInst::Create(
976       CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
977       CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
978   NewCBI->setCallingConv(CBI->getCallingConv());
979   NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
980   NewCBI->setAttributes(CBI->getAttributes());
981   NewCBI->setDebugLoc(CBI->getDebugLoc());
982   NewCBI->NumIndirectDests = CBI->NumIndirectDests;
983   return NewCBI;
984 }
985 
986 //===----------------------------------------------------------------------===//
987 //                        ReturnInst Implementation
988 //===----------------------------------------------------------------------===//
989 
ReturnInst(const ReturnInst & RI,AllocInfo AllocInfo)990 ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
991     : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
992                   AllocInfo) {
993   assert(getNumOperands() == RI.getNumOperands() &&
994          "Wrong number of operands allocated");
995   if (RI.getNumOperands())
996     Op<0>() = RI.Op<0>();
997   SubclassOptionalData = RI.SubclassOptionalData;
998 }
999 
ReturnInst(LLVMContext & C,Value * retVal,AllocInfo AllocInfo,InsertPosition InsertBefore)1000 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
1001                        InsertPosition InsertBefore)
1002     : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1003                   InsertBefore) {
1004   if (retVal)
1005     Op<0>() = retVal;
1006 }
1007 
1008 //===----------------------------------------------------------------------===//
1009 //                        ResumeInst Implementation
1010 //===----------------------------------------------------------------------===//
1011 
ResumeInst(const ResumeInst & RI)1012 ResumeInst::ResumeInst(const ResumeInst &RI)
1013     : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1014                   AllocMarker) {
1015   Op<0>() = RI.Op<0>();
1016 }
1017 
ResumeInst(Value * Exn,InsertPosition InsertBefore)1018 ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1019     : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1020                   AllocMarker, InsertBefore) {
1021   Op<0>() = Exn;
1022 }
1023 
1024 //===----------------------------------------------------------------------===//
1025 //                        CleanupReturnInst Implementation
1026 //===----------------------------------------------------------------------===//
1027 
CleanupReturnInst(const CleanupReturnInst & CRI,AllocInfo AllocInfo)1028 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1029                                      AllocInfo AllocInfo)
1030     : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1031   assert(getNumOperands() == CRI.getNumOperands() &&
1032          "Wrong number of operands allocated");
1033   setSubclassData<Instruction::OpaqueField>(
1034       CRI.getSubclassData<Instruction::OpaqueField>());
1035   Op<0>() = CRI.Op<0>();
1036   if (CRI.hasUnwindDest())
1037     Op<1>() = CRI.Op<1>();
1038 }
1039 
init(Value * CleanupPad,BasicBlock * UnwindBB)1040 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1041   if (UnwindBB)
1042     setSubclassData<UnwindDestField>(true);
1043 
1044   Op<0>() = CleanupPad;
1045   if (UnwindBB)
1046     Op<1>() = UnwindBB;
1047 }
1048 
CleanupReturnInst(Value * CleanupPad,BasicBlock * UnwindBB,AllocInfo AllocInfo,InsertPosition InsertBefore)1049 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1050                                      AllocInfo AllocInfo,
1051                                      InsertPosition InsertBefore)
1052     : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1053                   Instruction::CleanupRet, AllocInfo, InsertBefore) {
1054   init(CleanupPad, UnwindBB);
1055 }
1056 
1057 //===----------------------------------------------------------------------===//
1058 //                        CatchReturnInst Implementation
1059 //===----------------------------------------------------------------------===//
init(Value * CatchPad,BasicBlock * BB)1060 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1061   Op<0>() = CatchPad;
1062   Op<1>() = BB;
1063 }
1064 
CatchReturnInst(const CatchReturnInst & CRI)1065 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1066     : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1067                   AllocMarker) {
1068   Op<0>() = CRI.Op<0>();
1069   Op<1>() = CRI.Op<1>();
1070 }
1071 
CatchReturnInst(Value * CatchPad,BasicBlock * BB,InsertPosition InsertBefore)1072 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1073                                  InsertPosition InsertBefore)
1074     : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1075                   AllocMarker, InsertBefore) {
1076   init(CatchPad, BB);
1077 }
1078 
1079 //===----------------------------------------------------------------------===//
1080 //                       CatchSwitchInst Implementation
1081 //===----------------------------------------------------------------------===//
1082 
CatchSwitchInst(Value * ParentPad,BasicBlock * UnwindDest,unsigned NumReservedValues,const Twine & NameStr,InsertPosition InsertBefore)1083 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1084                                  unsigned NumReservedValues,
1085                                  const Twine &NameStr,
1086                                  InsertPosition InsertBefore)
1087     : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1088                   InsertBefore) {
1089   if (UnwindDest)
1090     ++NumReservedValues;
1091   init(ParentPad, UnwindDest, NumReservedValues + 1);
1092   setName(NameStr);
1093 }
1094 
CatchSwitchInst(const CatchSwitchInst & CSI)1095 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1096     : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1097   NumUserOperands = CSI.NumUserOperands;
1098   init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1099   setNumHungOffUseOperands(ReservedSpace);
1100   Use *OL = getOperandList();
1101   const Use *InOL = CSI.getOperandList();
1102   for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1103     OL[I] = InOL[I];
1104 }
1105 
init(Value * ParentPad,BasicBlock * UnwindDest,unsigned NumReservedValues)1106 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1107                            unsigned NumReservedValues) {
1108   assert(ParentPad && NumReservedValues);
1109 
1110   ReservedSpace = NumReservedValues;
1111   setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1112   allocHungoffUses(ReservedSpace);
1113 
1114   Op<0>() = ParentPad;
1115   if (UnwindDest) {
1116     setSubclassData<UnwindDestField>(true);
1117     setUnwindDest(UnwindDest);
1118   }
1119 }
1120 
1121 /// growOperands - grow operands - This grows the operand list in response to a
1122 /// push_back style of operation. This grows the number of ops by 2 times.
growOperands(unsigned Size)1123 void CatchSwitchInst::growOperands(unsigned Size) {
1124   unsigned NumOperands = getNumOperands();
1125   assert(NumOperands >= 1);
1126   if (ReservedSpace >= NumOperands + Size)
1127     return;
1128   ReservedSpace = (NumOperands + Size / 2) * 2;
1129   growHungoffUses(ReservedSpace);
1130 }
1131 
addHandler(BasicBlock * Handler)1132 void CatchSwitchInst::addHandler(BasicBlock *Handler) {
1133   unsigned OpNo = getNumOperands();
1134   growOperands(1);
1135   assert(OpNo < ReservedSpace && "Growing didn't work!");
1136   setNumHungOffUseOperands(getNumOperands() + 1);
1137   getOperandList()[OpNo] = Handler;
1138 }
1139 
removeHandler(handler_iterator HI)1140 void CatchSwitchInst::removeHandler(handler_iterator HI) {
1141   // Move all subsequent handlers up one.
1142   Use *EndDst = op_end() - 1;
1143   for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1144     *CurDst = *(CurDst + 1);
1145   // Null out the last handler use.
1146   *EndDst = nullptr;
1147 
1148   setNumHungOffUseOperands(getNumOperands() - 1);
1149 }
1150 
1151 //===----------------------------------------------------------------------===//
1152 //                        FuncletPadInst Implementation
1153 //===----------------------------------------------------------------------===//
init(Value * ParentPad,ArrayRef<Value * > Args,const Twine & NameStr)1154 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1155                           const Twine &NameStr) {
1156   assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1157   llvm::copy(Args, op_begin());
1158   setParentPad(ParentPad);
1159   setName(NameStr);
1160 }
1161 
FuncletPadInst(const FuncletPadInst & FPI,AllocInfo AllocInfo)1162 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1163     : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1164   assert(getNumOperands() == FPI.getNumOperands() &&
1165          "Wrong number of operands allocated");
1166   std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1167   setParentPad(FPI.getParentPad());
1168 }
1169 
FuncletPadInst(Instruction::FuncletPadOps Op,Value * ParentPad,ArrayRef<Value * > Args,AllocInfo AllocInfo,const Twine & NameStr,InsertPosition InsertBefore)1170 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1171                                ArrayRef<Value *> Args, AllocInfo AllocInfo,
1172                                const Twine &NameStr,
1173                                InsertPosition InsertBefore)
1174     : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1175   init(ParentPad, Args, NameStr);
1176 }
1177 
1178 //===----------------------------------------------------------------------===//
1179 //                      UnreachableInst Implementation
1180 //===----------------------------------------------------------------------===//
1181 
UnreachableInst(LLVMContext & Context,InsertPosition InsertBefore)1182 UnreachableInst::UnreachableInst(LLVMContext &Context,
1183                                  InsertPosition InsertBefore)
1184     : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1185                   AllocMarker, InsertBefore) {}
1186 
1187 //===----------------------------------------------------------------------===//
1188 //                        BranchInst Implementation
1189 //===----------------------------------------------------------------------===//
1190 
AssertOK()1191 void BranchInst::AssertOK() {
1192   if (isConditional())
1193     assert(getCondition()->getType()->isIntegerTy(1) &&
1194            "May only branch on boolean predicates!");
1195 }
1196 
BranchInst(BasicBlock * IfTrue,AllocInfo AllocInfo,InsertPosition InsertBefore)1197 BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1198                        InsertPosition InsertBefore)
1199     : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1200                   AllocInfo, InsertBefore) {
1201   assert(IfTrue && "Branch destination may not be null!");
1202   Op<-1>() = IfTrue;
1203 }
1204 
BranchInst(BasicBlock * IfTrue,BasicBlock * IfFalse,Value * Cond,AllocInfo AllocInfo,InsertPosition InsertBefore)1205 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1206                        AllocInfo AllocInfo, InsertPosition InsertBefore)
1207     : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1208                   AllocInfo, InsertBefore) {
1209   // Assign in order of operand index to make use-list order predictable.
1210   Op<-3>() = Cond;
1211   Op<-2>() = IfFalse;
1212   Op<-1>() = IfTrue;
1213 #ifndef NDEBUG
1214   AssertOK();
1215 #endif
1216 }
1217 
BranchInst(const BranchInst & BI,AllocInfo AllocInfo)1218 BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1219     : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1220                   AllocInfo) {
1221   assert(getNumOperands() == BI.getNumOperands() &&
1222          "Wrong number of operands allocated");
1223   // Assign in order of operand index to make use-list order predictable.
1224   if (BI.getNumOperands() != 1) {
1225     assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1226     Op<-3>() = BI.Op<-3>();
1227     Op<-2>() = BI.Op<-2>();
1228   }
1229   Op<-1>() = BI.Op<-1>();
1230   SubclassOptionalData = BI.SubclassOptionalData;
1231 }
1232 
swapSuccessors()1233 void BranchInst::swapSuccessors() {
1234   assert(isConditional() &&
1235          "Cannot swap successors of an unconditional branch");
1236   Op<-1>().swap(Op<-2>());
1237 
1238   // Update profile metadata if present and it matches our structural
1239   // expectations.
1240   swapProfMetadata();
1241 }
1242 
1243 //===----------------------------------------------------------------------===//
1244 //                        AllocaInst Implementation
1245 //===----------------------------------------------------------------------===//
1246 
getAISize(LLVMContext & Context,Value * Amt)1247 static Value *getAISize(LLVMContext &Context, Value *Amt) {
1248   if (!Amt)
1249     Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1250   else {
1251     assert(!isa<BasicBlock>(Amt) &&
1252            "Passed basic block into allocation size parameter! Use other ctor");
1253     assert(Amt->getType()->isIntegerTy() &&
1254            "Allocation array size is not an integer!");
1255   }
1256   return Amt;
1257 }
1258 
computeAllocaDefaultAlign(Type * Ty,InsertPosition Pos)1259 static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos) {
1260   assert(Pos.isValid() &&
1261          "Insertion position cannot be null when alignment not provided!");
1262   BasicBlock *BB = Pos.getBasicBlock();
1263   assert(BB->getParent() &&
1264          "BB must be in a Function when alignment not provided!");
1265   const DataLayout &DL = BB->getDataLayout();
1266   return DL.getPrefTypeAlign(Ty);
1267 }
1268 
AllocaInst(Type * Ty,unsigned AddrSpace,const Twine & Name,InsertPosition InsertBefore)1269 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1270                        InsertPosition InsertBefore)
1271     : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1272 
AllocaInst(Type * Ty,unsigned AddrSpace,Value * ArraySize,const Twine & Name,InsertPosition InsertBefore)1273 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1274                        const Twine &Name, InsertPosition InsertBefore)
1275     : AllocaInst(Ty, AddrSpace, ArraySize,
1276                  computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1277                  InsertBefore) {}
1278 
AllocaInst(Type * Ty,unsigned AddrSpace,Value * ArraySize,Align Align,const Twine & Name,InsertPosition InsertBefore)1279 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1280                        Align Align, const Twine &Name,
1281                        InsertPosition InsertBefore)
1282     : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1283                        getAISize(Ty->getContext(), ArraySize), InsertBefore),
1284       AllocatedType(Ty) {
1285   setAlignment(Align);
1286   assert(!Ty->isVoidTy() && "Cannot allocate void!");
1287   setName(Name);
1288 }
1289 
isArrayAllocation() const1290 bool AllocaInst::isArrayAllocation() const {
1291   if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1292     return !CI->isOne();
1293   return true;
1294 }
1295 
1296 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1297 /// function and is a constant size.  If so, the code generator will fold it
1298 /// into the prolog/epilog code, so it is basically free.
isStaticAlloca() const1299 bool AllocaInst::isStaticAlloca() const {
1300   // Must be constant size.
1301   if (!isa<ConstantInt>(getArraySize())) return false;
1302 
1303   // Must be in the entry block.
1304   const BasicBlock *Parent = getParent();
1305   return Parent->isEntryBlock() && !isUsedWithInAlloca();
1306 }
1307 
1308 //===----------------------------------------------------------------------===//
1309 //                           LoadInst Implementation
1310 //===----------------------------------------------------------------------===//
1311 
AssertOK()1312 void LoadInst::AssertOK() {
1313   assert(getOperand(0)->getType()->isPointerTy() &&
1314          "Ptr must have pointer type.");
1315 }
1316 
computeLoadStoreDefaultAlign(Type * Ty,InsertPosition Pos)1317 static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos) {
1318   assert(Pos.isValid() &&
1319          "Insertion position cannot be null when alignment not provided!");
1320   BasicBlock *BB = Pos.getBasicBlock();
1321   assert(BB->getParent() &&
1322          "BB must be in a Function when alignment not provided!");
1323   const DataLayout &DL = BB->getDataLayout();
1324   return DL.getABITypeAlign(Ty);
1325 }
1326 
LoadInst(Type * Ty,Value * Ptr,const Twine & Name,InsertPosition InsertBef)1327 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1328                    InsertPosition InsertBef)
1329     : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1330 
LoadInst(Type * Ty,Value * Ptr,const Twine & Name,bool isVolatile,InsertPosition InsertBef)1331 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1332                    InsertPosition InsertBef)
1333     : LoadInst(Ty, Ptr, Name, isVolatile,
1334                computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1335 
LoadInst(Type * Ty,Value * Ptr,const Twine & Name,bool isVolatile,Align Align,InsertPosition InsertBef)1336 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1337                    Align Align, InsertPosition InsertBef)
1338     : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1339                SyncScope::System, InsertBef) {}
1340 
LoadInst(Type * Ty,Value * Ptr,const Twine & Name,bool isVolatile,Align Align,AtomicOrdering Order,SyncScope::ID SSID,InsertPosition InsertBef)1341 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1342                    Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1343                    InsertPosition InsertBef)
1344     : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1345   setVolatile(isVolatile);
1346   setAlignment(Align);
1347   setAtomic(Order, SSID);
1348   AssertOK();
1349   setName(Name);
1350 }
1351 
1352 //===----------------------------------------------------------------------===//
1353 //                           StoreInst Implementation
1354 //===----------------------------------------------------------------------===//
1355 
AssertOK()1356 void StoreInst::AssertOK() {
1357   assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1358   assert(getOperand(1)->getType()->isPointerTy() &&
1359          "Ptr must have pointer type!");
1360 }
1361 
StoreInst(Value * val,Value * addr,InsertPosition InsertBefore)1362 StoreInst::StoreInst(Value *val, Value *addr, InsertPosition InsertBefore)
1363     : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1364 
StoreInst(Value * val,Value * addr,bool isVolatile,InsertPosition InsertBefore)1365 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1366                      InsertPosition InsertBefore)
1367     : StoreInst(val, addr, isVolatile,
1368                 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1369                 InsertBefore) {}
1370 
StoreInst(Value * val,Value * addr,bool isVolatile,Align Align,InsertPosition InsertBefore)1371 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1372                      InsertPosition InsertBefore)
1373     : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1374                 SyncScope::System, InsertBefore) {}
1375 
StoreInst(Value * val,Value * addr,bool isVolatile,Align Align,AtomicOrdering Order,SyncScope::ID SSID,InsertPosition InsertBefore)1376 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1377                      AtomicOrdering Order, SyncScope::ID SSID,
1378                      InsertPosition InsertBefore)
1379     : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1380                   InsertBefore) {
1381   Op<0>() = val;
1382   Op<1>() = addr;
1383   setVolatile(isVolatile);
1384   setAlignment(Align);
1385   setAtomic(Order, SSID);
1386   AssertOK();
1387 }
1388 
1389 //===----------------------------------------------------------------------===//
1390 //                       AtomicCmpXchgInst Implementation
1391 //===----------------------------------------------------------------------===//
1392 
Init(Value * Ptr,Value * Cmp,Value * NewVal,Align Alignment,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SyncScope::ID SSID)1393 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1394                              Align Alignment, AtomicOrdering SuccessOrdering,
1395                              AtomicOrdering FailureOrdering,
1396                              SyncScope::ID SSID) {
1397   Op<0>() = Ptr;
1398   Op<1>() = Cmp;
1399   Op<2>() = NewVal;
1400   setSuccessOrdering(SuccessOrdering);
1401   setFailureOrdering(FailureOrdering);
1402   setSyncScopeID(SSID);
1403   setAlignment(Alignment);
1404 
1405   assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1406          "All operands must be non-null!");
1407   assert(getOperand(0)->getType()->isPointerTy() &&
1408          "Ptr must have pointer type!");
1409   assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1410          "Cmp type and NewVal type must be same!");
1411 }
1412 
AtomicCmpXchgInst(Value * Ptr,Value * Cmp,Value * NewVal,Align Alignment,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SyncScope::ID SSID,InsertPosition InsertBefore)1413 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1414                                      Align Alignment,
1415                                      AtomicOrdering SuccessOrdering,
1416                                      AtomicOrdering FailureOrdering,
1417                                      SyncScope::ID SSID,
1418                                      InsertPosition InsertBefore)
1419     : Instruction(
1420           StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1421           AtomicCmpXchg, AllocMarker, InsertBefore) {
1422   Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1423 }
1424 
1425 //===----------------------------------------------------------------------===//
1426 //                       AtomicRMWInst Implementation
1427 //===----------------------------------------------------------------------===//
1428 
Init(BinOp Operation,Value * Ptr,Value * Val,Align Alignment,AtomicOrdering Ordering,SyncScope::ID SSID)1429 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1430                          Align Alignment, AtomicOrdering Ordering,
1431                          SyncScope::ID SSID) {
1432   assert(Ordering != AtomicOrdering::NotAtomic &&
1433          "atomicrmw instructions can only be atomic.");
1434   assert(Ordering != AtomicOrdering::Unordered &&
1435          "atomicrmw instructions cannot be unordered.");
1436   Op<0>() = Ptr;
1437   Op<1>() = Val;
1438   setOperation(Operation);
1439   setOrdering(Ordering);
1440   setSyncScopeID(SSID);
1441   setAlignment(Alignment);
1442 
1443   assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1444   assert(getOperand(0)->getType()->isPointerTy() &&
1445          "Ptr must have pointer type!");
1446   assert(Ordering != AtomicOrdering::NotAtomic &&
1447          "AtomicRMW instructions must be atomic!");
1448 }
1449 
AtomicRMWInst(BinOp Operation,Value * Ptr,Value * Val,Align Alignment,AtomicOrdering Ordering,SyncScope::ID SSID,InsertPosition InsertBefore)1450 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1451                              Align Alignment, AtomicOrdering Ordering,
1452                              SyncScope::ID SSID, InsertPosition InsertBefore)
1453     : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1454   Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1455 }
1456 
getOperationName(BinOp Op)1457 StringRef AtomicRMWInst::getOperationName(BinOp Op) {
1458   switch (Op) {
1459   case AtomicRMWInst::Xchg:
1460     return "xchg";
1461   case AtomicRMWInst::Add:
1462     return "add";
1463   case AtomicRMWInst::Sub:
1464     return "sub";
1465   case AtomicRMWInst::And:
1466     return "and";
1467   case AtomicRMWInst::Nand:
1468     return "nand";
1469   case AtomicRMWInst::Or:
1470     return "or";
1471   case AtomicRMWInst::Xor:
1472     return "xor";
1473   case AtomicRMWInst::Max:
1474     return "max";
1475   case AtomicRMWInst::Min:
1476     return "min";
1477   case AtomicRMWInst::UMax:
1478     return "umax";
1479   case AtomicRMWInst::UMin:
1480     return "umin";
1481   case AtomicRMWInst::FAdd:
1482     return "fadd";
1483   case AtomicRMWInst::FSub:
1484     return "fsub";
1485   case AtomicRMWInst::FMax:
1486     return "fmax";
1487   case AtomicRMWInst::FMin:
1488     return "fmin";
1489   case AtomicRMWInst::FMaximum:
1490     return "fmaximum";
1491   case AtomicRMWInst::FMinimum:
1492     return "fminimum";
1493   case AtomicRMWInst::UIncWrap:
1494     return "uinc_wrap";
1495   case AtomicRMWInst::UDecWrap:
1496     return "udec_wrap";
1497   case AtomicRMWInst::USubCond:
1498     return "usub_cond";
1499   case AtomicRMWInst::USubSat:
1500     return "usub_sat";
1501   case AtomicRMWInst::BAD_BINOP:
1502     return "<invalid operation>";
1503   }
1504 
1505   llvm_unreachable("invalid atomicrmw operation");
1506 }
1507 
1508 //===----------------------------------------------------------------------===//
1509 //                       FenceInst Implementation
1510 //===----------------------------------------------------------------------===//
1511 
FenceInst(LLVMContext & C,AtomicOrdering Ordering,SyncScope::ID SSID,InsertPosition InsertBefore)1512 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1513                      SyncScope::ID SSID, InsertPosition InsertBefore)
1514     : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1515   setOrdering(Ordering);
1516   setSyncScopeID(SSID);
1517 }
1518 
1519 //===----------------------------------------------------------------------===//
1520 //                       GetElementPtrInst Implementation
1521 //===----------------------------------------------------------------------===//
1522 
init(Value * Ptr,ArrayRef<Value * > IdxList,const Twine & Name)1523 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1524                              const Twine &Name) {
1525   assert(getNumOperands() == 1 + IdxList.size() &&
1526          "NumOperands not initialized?");
1527   Op<0>() = Ptr;
1528   llvm::copy(IdxList, op_begin() + 1);
1529   setName(Name);
1530 }
1531 
GetElementPtrInst(const GetElementPtrInst & GEPI,AllocInfo AllocInfo)1532 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1533                                      AllocInfo AllocInfo)
1534     : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1535       SourceElementType(GEPI.SourceElementType),
1536       ResultElementType(GEPI.ResultElementType) {
1537   assert(getNumOperands() == GEPI.getNumOperands() &&
1538          "Wrong number of operands allocated");
1539   std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1540   SubclassOptionalData = GEPI.SubclassOptionalData;
1541 }
1542 
getTypeAtIndex(Type * Ty,Value * Idx)1543 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) {
1544   if (auto *Struct = dyn_cast<StructType>(Ty)) {
1545     if (!Struct->indexValid(Idx))
1546       return nullptr;
1547     return Struct->getTypeAtIndex(Idx);
1548   }
1549   if (!Idx->getType()->isIntOrIntVectorTy())
1550     return nullptr;
1551   if (auto *Array = dyn_cast<ArrayType>(Ty))
1552     return Array->getElementType();
1553   if (auto *Vector = dyn_cast<VectorType>(Ty))
1554     return Vector->getElementType();
1555   return nullptr;
1556 }
1557 
getTypeAtIndex(Type * Ty,uint64_t Idx)1558 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) {
1559   if (auto *Struct = dyn_cast<StructType>(Ty)) {
1560     if (Idx >= Struct->getNumElements())
1561       return nullptr;
1562     return Struct->getElementType(Idx);
1563   }
1564   if (auto *Array = dyn_cast<ArrayType>(Ty))
1565     return Array->getElementType();
1566   if (auto *Vector = dyn_cast<VectorType>(Ty))
1567     return Vector->getElementType();
1568   return nullptr;
1569 }
1570 
1571 template <typename IndexTy>
getIndexedTypeInternal(Type * Ty,ArrayRef<IndexTy> IdxList)1572 static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
1573   if (IdxList.empty())
1574     return Ty;
1575   for (IndexTy V : IdxList.slice(1)) {
1576     Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
1577     if (!Ty)
1578       return Ty;
1579   }
1580   return Ty;
1581 }
1582 
getIndexedType(Type * Ty,ArrayRef<Value * > IdxList)1583 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
1584   return getIndexedTypeInternal(Ty, IdxList);
1585 }
1586 
getIndexedType(Type * Ty,ArrayRef<Constant * > IdxList)1587 Type *GetElementPtrInst::getIndexedType(Type *Ty,
1588                                         ArrayRef<Constant *> IdxList) {
1589   return getIndexedTypeInternal(Ty, IdxList);
1590 }
1591 
getIndexedType(Type * Ty,ArrayRef<uint64_t> IdxList)1592 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
1593   return getIndexedTypeInternal(Ty, IdxList);
1594 }
1595 
1596 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1597 /// zeros.  If so, the result pointer and the first operand have the same
1598 /// value, just potentially different types.
hasAllZeroIndices() const1599 bool GetElementPtrInst::hasAllZeroIndices() const {
1600   for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1601     if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1602       if (!CI->isZero()) return false;
1603     } else {
1604       return false;
1605     }
1606   }
1607   return true;
1608 }
1609 
1610 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1611 /// constant integers.  If so, the result pointer and the first operand have
1612 /// a constant offset between them.
hasAllConstantIndices() const1613 bool GetElementPtrInst::hasAllConstantIndices() const {
1614   for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1615     if (!isa<ConstantInt>(getOperand(i)))
1616       return false;
1617   }
1618   return true;
1619 }
1620 
setNoWrapFlags(GEPNoWrapFlags NW)1621 void GetElementPtrInst::setNoWrapFlags(GEPNoWrapFlags NW) {
1622   SubclassOptionalData = NW.getRaw();
1623 }
1624 
setIsInBounds(bool B)1625 void GetElementPtrInst::setIsInBounds(bool B) {
1626   GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1627   if (B)
1628     NW |= GEPNoWrapFlags::inBounds();
1629   else
1630     NW = NW.withoutInBounds();
1631   setNoWrapFlags(NW);
1632 }
1633 
getNoWrapFlags() const1634 GEPNoWrapFlags GetElementPtrInst::getNoWrapFlags() const {
1635   return cast<GEPOperator>(this)->getNoWrapFlags();
1636 }
1637 
isInBounds() const1638 bool GetElementPtrInst::isInBounds() const {
1639   return cast<GEPOperator>(this)->isInBounds();
1640 }
1641 
hasNoUnsignedSignedWrap() const1642 bool GetElementPtrInst::hasNoUnsignedSignedWrap() const {
1643   return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1644 }
1645 
hasNoUnsignedWrap() const1646 bool GetElementPtrInst::hasNoUnsignedWrap() const {
1647   return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1648 }
1649 
accumulateConstantOffset(const DataLayout & DL,APInt & Offset) const1650 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
1651                                                  APInt &Offset) const {
1652   // Delegate to the generic GEPOperator implementation.
1653   return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1654 }
1655 
collectOffset(const DataLayout & DL,unsigned BitWidth,SmallMapVector<Value *,APInt,4> & VariableOffsets,APInt & ConstantOffset) const1656 bool GetElementPtrInst::collectOffset(
1657     const DataLayout &DL, unsigned BitWidth,
1658     SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1659     APInt &ConstantOffset) const {
1660   // Delegate to the generic GEPOperator implementation.
1661   return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1662                                                 ConstantOffset);
1663 }
1664 
1665 //===----------------------------------------------------------------------===//
1666 //                           ExtractElementInst Implementation
1667 //===----------------------------------------------------------------------===//
1668 
ExtractElementInst(Value * Val,Value * Index,const Twine & Name,InsertPosition InsertBef)1669 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1670                                        const Twine &Name,
1671                                        InsertPosition InsertBef)
1672     : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1673                   ExtractElement, AllocMarker, InsertBef) {
1674   assert(isValidOperands(Val, Index) &&
1675          "Invalid extractelement instruction operands!");
1676   Op<0>() = Val;
1677   Op<1>() = Index;
1678   setName(Name);
1679 }
1680 
isValidOperands(const Value * Val,const Value * Index)1681 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1682   if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1683     return false;
1684   return true;
1685 }
1686 
1687 //===----------------------------------------------------------------------===//
1688 //                           InsertElementInst Implementation
1689 //===----------------------------------------------------------------------===//
1690 
InsertElementInst(Value * Vec,Value * Elt,Value * Index,const Twine & Name,InsertPosition InsertBef)1691 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1692                                      const Twine &Name,
1693                                      InsertPosition InsertBef)
1694     : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1695   assert(isValidOperands(Vec, Elt, Index) &&
1696          "Invalid insertelement instruction operands!");
1697   Op<0>() = Vec;
1698   Op<1>() = Elt;
1699   Op<2>() = Index;
1700   setName(Name);
1701 }
1702 
isValidOperands(const Value * Vec,const Value * Elt,const Value * Index)1703 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
1704                                         const Value *Index) {
1705   if (!Vec->getType()->isVectorTy())
1706     return false;   // First operand of insertelement must be vector type.
1707 
1708   if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1709     return false;// Second operand of insertelement must be vector element type.
1710 
1711   if (!Index->getType()->isIntegerTy())
1712     return false;  // Third operand of insertelement must be i32.
1713   return true;
1714 }
1715 
1716 //===----------------------------------------------------------------------===//
1717 //                      ShuffleVectorInst Implementation
1718 //===----------------------------------------------------------------------===//
1719 
createPlaceholderForShuffleVector(Value * V)1720 static Value *createPlaceholderForShuffleVector(Value *V) {
1721   assert(V && "Cannot create placeholder of nullptr V");
1722   return PoisonValue::get(V->getType());
1723 }
1724 
ShuffleVectorInst(Value * V1,Value * Mask,const Twine & Name,InsertPosition InsertBefore)1725 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
1726                                      InsertPosition InsertBefore)
1727     : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
1728                         InsertBefore) {}
1729 
ShuffleVectorInst(Value * V1,ArrayRef<int> Mask,const Twine & Name,InsertPosition InsertBefore)1730 ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
1731                                      const Twine &Name,
1732                                      InsertPosition InsertBefore)
1733     : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
1734                         InsertBefore) {}
1735 
ShuffleVectorInst(Value * V1,Value * V2,Value * Mask,const Twine & Name,InsertPosition InsertBefore)1736 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1737                                      const Twine &Name,
1738                                      InsertPosition InsertBefore)
1739     : Instruction(
1740           VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1741                           cast<VectorType>(Mask->getType())->getElementCount()),
1742           ShuffleVector, AllocMarker, InsertBefore) {
1743   assert(isValidOperands(V1, V2, Mask) &&
1744          "Invalid shuffle vector instruction operands!");
1745 
1746   Op<0>() = V1;
1747   Op<1>() = V2;
1748   SmallVector<int, 16> MaskArr;
1749   getShuffleMask(cast<Constant>(Mask), MaskArr);
1750   setShuffleMask(MaskArr);
1751   setName(Name);
1752 }
1753 
ShuffleVectorInst(Value * V1,Value * V2,ArrayRef<int> Mask,const Twine & Name,InsertPosition InsertBefore)1754 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
1755                                      const Twine &Name,
1756                                      InsertPosition InsertBefore)
1757     : Instruction(
1758           VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1759                           Mask.size(), isa<ScalableVectorType>(V1->getType())),
1760           ShuffleVector, AllocMarker, InsertBefore) {
1761   assert(isValidOperands(V1, V2, Mask) &&
1762          "Invalid shuffle vector instruction operands!");
1763   Op<0>() = V1;
1764   Op<1>() = V2;
1765   setShuffleMask(Mask);
1766   setName(Name);
1767 }
1768 
commute()1769 void ShuffleVectorInst::commute() {
1770   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1771   int NumMaskElts = ShuffleMask.size();
1772   SmallVector<int, 16> NewMask(NumMaskElts);
1773   for (int i = 0; i != NumMaskElts; ++i) {
1774     int MaskElt = getMaskValue(i);
1775     if (MaskElt == PoisonMaskElem) {
1776       NewMask[i] = PoisonMaskElem;
1777       continue;
1778     }
1779     assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1780     MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1781     NewMask[i] = MaskElt;
1782   }
1783   setShuffleMask(NewMask);
1784   Op<0>().swap(Op<1>());
1785 }
1786 
isValidOperands(const Value * V1,const Value * V2,ArrayRef<int> Mask)1787 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
1788                                         ArrayRef<int> Mask) {
1789   // V1 and V2 must be vectors of the same type.
1790   if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1791     return false;
1792 
1793   // Make sure the mask elements make sense.
1794   int V1Size =
1795       cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1796   for (int Elem : Mask)
1797     if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1798       return false;
1799 
1800   if (isa<ScalableVectorType>(V1->getType()))
1801     if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1802       return false;
1803 
1804   return true;
1805 }
1806 
isValidOperands(const Value * V1,const Value * V2,const Value * Mask)1807 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
1808                                         const Value *Mask) {
1809   // V1 and V2 must be vectors of the same type.
1810   if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1811     return false;
1812 
1813   // Mask must be vector of i32, and must be the same kind of vector as the
1814   // input vectors
1815   auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1816   if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1817       isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
1818     return false;
1819 
1820   // Check to see if Mask is valid.
1821   if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1822     return true;
1823 
1824   // NOTE: Through vector ConstantInt we have the potential to support more
1825   // than just zero splat masks but that requires a LangRef change.
1826   if (isa<ScalableVectorType>(MaskTy))
1827     return false;
1828 
1829   unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1830 
1831   if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1832     return !CI->uge(V1Size * 2);
1833 
1834   if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1835     for (Value *Op : MV->operands()) {
1836       if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1837         if (CI->uge(V1Size*2))
1838           return false;
1839       } else if (!isa<UndefValue>(Op)) {
1840         return false;
1841       }
1842     }
1843     return true;
1844   }
1845 
1846   if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1847     for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1848          i != e; ++i)
1849       if (CDS->getElementAsInteger(i) >= V1Size*2)
1850         return false;
1851     return true;
1852   }
1853 
1854   return false;
1855 }
1856 
getShuffleMask(const Constant * Mask,SmallVectorImpl<int> & Result)1857 void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
1858                                        SmallVectorImpl<int> &Result) {
1859   ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1860 
1861   if (isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) {
1862     int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1863     Result.append(EC.getKnownMinValue(), MaskVal);
1864     return;
1865   }
1866 
1867   assert(!EC.isScalable() &&
1868          "Scalable vector shuffle mask must be undef or zeroinitializer");
1869 
1870   unsigned NumElts = EC.getFixedValue();
1871 
1872   Result.reserve(NumElts);
1873 
1874   if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1875     for (unsigned i = 0; i != NumElts; ++i)
1876       Result.push_back(CDS->getElementAsInteger(i));
1877     return;
1878   }
1879   for (unsigned i = 0; i != NumElts; ++i) {
1880     Constant *C = Mask->getAggregateElement(i);
1881     Result.push_back(isa<UndefValue>(C) ? -1 :
1882                      cast<ConstantInt>(C)->getZExtValue());
1883   }
1884 }
1885 
setShuffleMask(ArrayRef<int> Mask)1886 void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
1887   ShuffleMask.assign(Mask.begin(), Mask.end());
1888   ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1889 }
1890 
convertShuffleMaskForBitcode(ArrayRef<int> Mask,Type * ResultTy)1891 Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
1892                                                           Type *ResultTy) {
1893   Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1894   if (isa<ScalableVectorType>(ResultTy)) {
1895     assert(all_equal(Mask) && "Unexpected shuffle");
1896     Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1897     if (Mask[0] == 0)
1898       return Constant::getNullValue(VecTy);
1899     return PoisonValue::get(VecTy);
1900   }
1901   SmallVector<Constant *, 16> MaskConst;
1902   for (int Elem : Mask) {
1903     if (Elem == PoisonMaskElem)
1904       MaskConst.push_back(PoisonValue::get(Int32Ty));
1905     else
1906       MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1907   }
1908   return ConstantVector::get(MaskConst);
1909 }
1910 
isSingleSourceMaskImpl(ArrayRef<int> Mask,int NumOpElts)1911 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1912   assert(!Mask.empty() && "Shuffle mask must contain elements");
1913   bool UsesLHS = false;
1914   bool UsesRHS = false;
1915   for (int I : Mask) {
1916     if (I == -1)
1917       continue;
1918     assert(I >= 0 && I < (NumOpElts * 2) &&
1919            "Out-of-bounds shuffle mask element");
1920     UsesLHS |= (I < NumOpElts);
1921     UsesRHS |= (I >= NumOpElts);
1922     if (UsesLHS && UsesRHS)
1923       return false;
1924   }
1925   // Allow for degenerate case: completely undef mask means neither source is used.
1926   return UsesLHS || UsesRHS;
1927 }
1928 
isSingleSourceMask(ArrayRef<int> Mask,int NumSrcElts)1929 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts) {
1930   // We don't have vector operand size information, so assume operands are the
1931   // same size as the mask.
1932   return isSingleSourceMaskImpl(Mask, NumSrcElts);
1933 }
1934 
isIdentityMaskImpl(ArrayRef<int> Mask,int NumOpElts)1935 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1936   if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1937     return false;
1938   for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1939     if (Mask[i] == -1)
1940       continue;
1941     if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1942       return false;
1943   }
1944   return true;
1945 }
1946 
isIdentityMask(ArrayRef<int> Mask,int NumSrcElts)1947 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask, int NumSrcElts) {
1948   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1949     return false;
1950   // We don't have vector operand size information, so assume operands are the
1951   // same size as the mask.
1952   return isIdentityMaskImpl(Mask, NumSrcElts);
1953 }
1954 
isReverseMask(ArrayRef<int> Mask,int NumSrcElts)1955 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask, int NumSrcElts) {
1956   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1957     return false;
1958   if (!isSingleSourceMask(Mask, NumSrcElts))
1959     return false;
1960 
1961   // The number of elements in the mask must be at least 2.
1962   if (NumSrcElts < 2)
1963     return false;
1964 
1965   for (int I = 0, E = Mask.size(); I < E; ++I) {
1966     if (Mask[I] == -1)
1967       continue;
1968     if (Mask[I] != (NumSrcElts - 1 - I) &&
1969         Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1970       return false;
1971   }
1972   return true;
1973 }
1974 
isZeroEltSplatMask(ArrayRef<int> Mask,int NumSrcElts)1975 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts) {
1976   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1977     return false;
1978   if (!isSingleSourceMask(Mask, NumSrcElts))
1979     return false;
1980   for (int I = 0, E = Mask.size(); I < E; ++I) {
1981     if (Mask[I] == -1)
1982       continue;
1983     if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1984       return false;
1985   }
1986   return true;
1987 }
1988 
isSelectMask(ArrayRef<int> Mask,int NumSrcElts)1989 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask, int NumSrcElts) {
1990   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1991     return false;
1992   // Select is differentiated from identity. It requires using both sources.
1993   if (isSingleSourceMask(Mask, NumSrcElts))
1994     return false;
1995   for (int I = 0, E = Mask.size(); I < E; ++I) {
1996     if (Mask[I] == -1)
1997       continue;
1998     if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
1999       return false;
2000   }
2001   return true;
2002 }
2003 
isTransposeMask(ArrayRef<int> Mask,int NumSrcElts)2004 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask, int NumSrcElts) {
2005   // Example masks that will return true:
2006   // v1 = <a, b, c, d>
2007   // v2 = <e, f, g, h>
2008   // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2009   // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2010 
2011   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2012     return false;
2013   // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2014   int Sz = Mask.size();
2015   if (Sz < 2 || !isPowerOf2_32(Sz))
2016     return false;
2017 
2018   // 2. The first element of the mask must be either a 0 or a 1.
2019   if (Mask[0] != 0 && Mask[0] != 1)
2020     return false;
2021 
2022   // 3. The difference between the first 2 elements must be equal to the
2023   // number of elements in the mask.
2024   if ((Mask[1] - Mask[0]) != NumSrcElts)
2025     return false;
2026 
2027   // 4. The difference between consecutive even-numbered and odd-numbered
2028   // elements must be equal to 2.
2029   for (int I = 2; I < Sz; ++I) {
2030     int MaskEltVal = Mask[I];
2031     if (MaskEltVal == -1)
2032       return false;
2033     int MaskEltPrevVal = Mask[I - 2];
2034     if (MaskEltVal - MaskEltPrevVal != 2)
2035       return false;
2036   }
2037   return true;
2038 }
2039 
isSpliceMask(ArrayRef<int> Mask,int NumSrcElts,int & Index)2040 bool ShuffleVectorInst::isSpliceMask(ArrayRef<int> Mask, int NumSrcElts,
2041                                      int &Index) {
2042   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2043     return false;
2044   // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2045   int StartIndex = -1;
2046   for (int I = 0, E = Mask.size(); I != E; ++I) {
2047     int MaskEltVal = Mask[I];
2048     if (MaskEltVal == -1)
2049       continue;
2050 
2051     if (StartIndex == -1) {
2052       // Don't support a StartIndex that begins in the second input, or if the
2053       // first non-undef index would access below the StartIndex.
2054       if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2055         return false;
2056 
2057       StartIndex = MaskEltVal - I;
2058       continue;
2059     }
2060 
2061     // Splice is sequential starting from StartIndex.
2062     if (MaskEltVal != (StartIndex + I))
2063       return false;
2064   }
2065 
2066   if (StartIndex == -1)
2067     return false;
2068 
2069   // NOTE: This accepts StartIndex == 0 (COPY).
2070   Index = StartIndex;
2071   return true;
2072 }
2073 
isExtractSubvectorMask(ArrayRef<int> Mask,int NumSrcElts,int & Index)2074 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
2075                                                int NumSrcElts, int &Index) {
2076   // Must extract from a single source.
2077   if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2078     return false;
2079 
2080   // Must be smaller (else this is an Identity shuffle).
2081   if (NumSrcElts <= (int)Mask.size())
2082     return false;
2083 
2084   // Find start of extraction, accounting that we may start with an UNDEF.
2085   int SubIndex = -1;
2086   for (int i = 0, e = Mask.size(); i != e; ++i) {
2087     int M = Mask[i];
2088     if (M < 0)
2089       continue;
2090     int Offset = (M % NumSrcElts) - i;
2091     if (0 <= SubIndex && SubIndex != Offset)
2092       return false;
2093     SubIndex = Offset;
2094   }
2095 
2096   if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2097     Index = SubIndex;
2098     return true;
2099   }
2100   return false;
2101 }
2102 
isInsertSubvectorMask(ArrayRef<int> Mask,int NumSrcElts,int & NumSubElts,int & Index)2103 bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask,
2104                                               int NumSrcElts, int &NumSubElts,
2105                                               int &Index) {
2106   int NumMaskElts = Mask.size();
2107 
2108   // Don't try to match if we're shuffling to a smaller size.
2109   if (NumMaskElts < NumSrcElts)
2110     return false;
2111 
2112   // TODO: We don't recognize self-insertion/widening.
2113   if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2114     return false;
2115 
2116   // Determine which mask elements are attributed to which source.
2117   APInt UndefElts = APInt::getZero(NumMaskElts);
2118   APInt Src0Elts = APInt::getZero(NumMaskElts);
2119   APInt Src1Elts = APInt::getZero(NumMaskElts);
2120   bool Src0Identity = true;
2121   bool Src1Identity = true;
2122 
2123   for (int i = 0; i != NumMaskElts; ++i) {
2124     int M = Mask[i];
2125     if (M < 0) {
2126       UndefElts.setBit(i);
2127       continue;
2128     }
2129     if (M < NumSrcElts) {
2130       Src0Elts.setBit(i);
2131       Src0Identity &= (M == i);
2132       continue;
2133     }
2134     Src1Elts.setBit(i);
2135     Src1Identity &= (M == (i + NumSrcElts));
2136   }
2137   assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2138          "unknown shuffle elements");
2139   assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2140          "2-source shuffle not found");
2141 
2142   // Determine lo/hi span ranges.
2143   // TODO: How should we handle undefs at the start of subvector insertions?
2144   int Src0Lo = Src0Elts.countr_zero();
2145   int Src1Lo = Src1Elts.countr_zero();
2146   int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2147   int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2148 
2149   // If src0 is in place, see if the src1 elements is inplace within its own
2150   // span.
2151   if (Src0Identity) {
2152     int NumSub1Elts = Src1Hi - Src1Lo;
2153     ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2154     if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2155       NumSubElts = NumSub1Elts;
2156       Index = Src1Lo;
2157       return true;
2158     }
2159   }
2160 
2161   // If src1 is in place, see if the src0 elements is inplace within its own
2162   // span.
2163   if (Src1Identity) {
2164     int NumSub0Elts = Src0Hi - Src0Lo;
2165     ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2166     if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2167       NumSubElts = NumSub0Elts;
2168       Index = Src0Lo;
2169       return true;
2170     }
2171   }
2172 
2173   return false;
2174 }
2175 
isIdentityWithPadding() const2176 bool ShuffleVectorInst::isIdentityWithPadding() const {
2177   // FIXME: Not currently possible to express a shuffle mask for a scalable
2178   // vector for this case.
2179   if (isa<ScalableVectorType>(getType()))
2180     return false;
2181 
2182   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2183   int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2184   if (NumMaskElts <= NumOpElts)
2185     return false;
2186 
2187   // The first part of the mask must choose elements from exactly 1 source op.
2188   ArrayRef<int> Mask = getShuffleMask();
2189   if (!isIdentityMaskImpl(Mask, NumOpElts))
2190     return false;
2191 
2192   // All extending must be with undef elements.
2193   for (int i = NumOpElts; i < NumMaskElts; ++i)
2194     if (Mask[i] != -1)
2195       return false;
2196 
2197   return true;
2198 }
2199 
isIdentityWithExtract() const2200 bool ShuffleVectorInst::isIdentityWithExtract() const {
2201   // FIXME: Not currently possible to express a shuffle mask for a scalable
2202   // vector for this case.
2203   if (isa<ScalableVectorType>(getType()))
2204     return false;
2205 
2206   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2207   int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2208   if (NumMaskElts >= NumOpElts)
2209     return false;
2210 
2211   return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2212 }
2213 
isConcat() const2214 bool ShuffleVectorInst::isConcat() const {
2215   // Vector concatenation is differentiated from identity with padding.
2216   if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2217     return false;
2218 
2219   // FIXME: Not currently possible to express a shuffle mask for a scalable
2220   // vector for this case.
2221   if (isa<ScalableVectorType>(getType()))
2222     return false;
2223 
2224   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2225   int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2226   if (NumMaskElts != NumOpElts * 2)
2227     return false;
2228 
2229   // Use the mask length rather than the operands' vector lengths here. We
2230   // already know that the shuffle returns a vector twice as long as the inputs,
2231   // and neither of the inputs are undef vectors. If the mask picks consecutive
2232   // elements from both inputs, then this is a concatenation of the inputs.
2233   return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2234 }
2235 
isReplicationMaskWithParams(ArrayRef<int> Mask,int ReplicationFactor,int VF)2236 static bool isReplicationMaskWithParams(ArrayRef<int> Mask,
2237                                         int ReplicationFactor, int VF) {
2238   assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2239          "Unexpected mask size.");
2240 
2241   for (int CurrElt : seq(VF)) {
2242     ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2243     assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2244            "Run out of mask?");
2245     Mask = Mask.drop_front(ReplicationFactor);
2246     if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2247           return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2248         }))
2249       return false;
2250   }
2251   assert(Mask.empty() && "Did not consume the whole mask?");
2252 
2253   return true;
2254 }
2255 
isReplicationMask(ArrayRef<int> Mask,int & ReplicationFactor,int & VF)2256 bool ShuffleVectorInst::isReplicationMask(ArrayRef<int> Mask,
2257                                           int &ReplicationFactor, int &VF) {
2258   // undef-less case is trivial.
2259   if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2260     ReplicationFactor =
2261         Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2262     if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2263       return false;
2264     VF = Mask.size() / ReplicationFactor;
2265     return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2266   }
2267 
2268   // However, if the mask contains undef's, we have to enumerate possible tuples
2269   // and pick one. There are bounds on replication factor: [1, mask size]
2270   // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2271   // Additionally, mask size is a replication factor multiplied by vector size,
2272   // which further significantly reduces the search space.
2273 
2274   // Before doing that, let's perform basic correctness checking first.
2275   int Largest = -1;
2276   for (int MaskElt : Mask) {
2277     if (MaskElt == PoisonMaskElem)
2278       continue;
2279     // Elements must be in non-decreasing order.
2280     if (MaskElt < Largest)
2281       return false;
2282     Largest = std::max(Largest, MaskElt);
2283   }
2284 
2285   // Prefer larger replication factor if all else equal.
2286   for (int PossibleReplicationFactor :
2287        reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2288     if (Mask.size() % PossibleReplicationFactor != 0)
2289       continue;
2290     int PossibleVF = Mask.size() / PossibleReplicationFactor;
2291     if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2292                                      PossibleVF))
2293       continue;
2294     ReplicationFactor = PossibleReplicationFactor;
2295     VF = PossibleVF;
2296     return true;
2297   }
2298 
2299   return false;
2300 }
2301 
isReplicationMask(int & ReplicationFactor,int & VF) const2302 bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2303                                           int &VF) const {
2304   // Not possible to express a shuffle mask for a scalable vector for this
2305   // case.
2306   if (isa<ScalableVectorType>(getType()))
2307     return false;
2308 
2309   VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2310   if (ShuffleMask.size() % VF != 0)
2311     return false;
2312   ReplicationFactor = ShuffleMask.size() / VF;
2313 
2314   return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2315 }
2316 
isOneUseSingleSourceMask(ArrayRef<int> Mask,int VF)2317 bool ShuffleVectorInst::isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF) {
2318   if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2319       Mask.size() % VF != 0)
2320     return false;
2321   for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2322     ArrayRef<int> SubMask = Mask.slice(K, VF);
2323     if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2324       continue;
2325     SmallBitVector Used(VF, false);
2326     for (int Idx : SubMask) {
2327       if (Idx != PoisonMaskElem && Idx < VF)
2328         Used.set(Idx);
2329     }
2330     if (!Used.all())
2331       return false;
2332   }
2333   return true;
2334 }
2335 
2336 /// Return true if this shuffle mask is a replication mask.
isOneUseSingleSourceMask(int VF) const2337 bool ShuffleVectorInst::isOneUseSingleSourceMask(int VF) const {
2338   // Not possible to express a shuffle mask for a scalable vector for this
2339   // case.
2340   if (isa<ScalableVectorType>(getType()))
2341     return false;
2342   if (!isSingleSourceMask(ShuffleMask, VF))
2343     return false;
2344 
2345   return isOneUseSingleSourceMask(ShuffleMask, VF);
2346 }
2347 
isInterleave(unsigned Factor)2348 bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2349   FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2350   // shuffle_vector can only interleave fixed length vectors - for scalable
2351   // vectors, see the @llvm.vector.interleave2 intrinsic
2352   if (!OpTy)
2353     return false;
2354   unsigned OpNumElts = OpTy->getNumElements();
2355 
2356   return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2357 }
2358 
isInterleaveMask(ArrayRef<int> Mask,unsigned Factor,unsigned NumInputElts,SmallVectorImpl<unsigned> & StartIndexes)2359 bool ShuffleVectorInst::isInterleaveMask(
2360     ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2361     SmallVectorImpl<unsigned> &StartIndexes) {
2362   unsigned NumElts = Mask.size();
2363   if (NumElts % Factor)
2364     return false;
2365 
2366   unsigned LaneLen = NumElts / Factor;
2367   if (!isPowerOf2_32(LaneLen))
2368     return false;
2369 
2370   StartIndexes.resize(Factor);
2371 
2372   // Check whether each element matches the general interleaved rule.
2373   // Ignore undef elements, as long as the defined elements match the rule.
2374   // Outer loop processes all factors (x, y, z in the above example)
2375   unsigned I = 0, J;
2376   for (; I < Factor; I++) {
2377     unsigned SavedLaneValue;
2378     unsigned SavedNoUndefs = 0;
2379 
2380     // Inner loop processes consecutive accesses (x, x+1... in the example)
2381     for (J = 0; J < LaneLen - 1; J++) {
2382       // Lane computes x's position in the Mask
2383       unsigned Lane = J * Factor + I;
2384       unsigned NextLane = Lane + Factor;
2385       int LaneValue = Mask[Lane];
2386       int NextLaneValue = Mask[NextLane];
2387 
2388       // If both are defined, values must be sequential
2389       if (LaneValue >= 0 && NextLaneValue >= 0 &&
2390           LaneValue + 1 != NextLaneValue)
2391         break;
2392 
2393       // If the next value is undef, save the current one as reference
2394       if (LaneValue >= 0 && NextLaneValue < 0) {
2395         SavedLaneValue = LaneValue;
2396         SavedNoUndefs = 1;
2397       }
2398 
2399       // Undefs are allowed, but defined elements must still be consecutive:
2400       // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2401       // Verify this by storing the last non-undef followed by an undef
2402       // Check that following non-undef masks are incremented with the
2403       // corresponding distance.
2404       if (SavedNoUndefs > 0 && LaneValue < 0) {
2405         SavedNoUndefs++;
2406         if (NextLaneValue >= 0 &&
2407             SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2408           break;
2409       }
2410     }
2411 
2412     if (J < LaneLen - 1)
2413       return false;
2414 
2415     int StartMask = 0;
2416     if (Mask[I] >= 0) {
2417       // Check that the start of the I range (J=0) is greater than 0
2418       StartMask = Mask[I];
2419     } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2420       // StartMask defined by the last value in lane
2421       StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2422     } else if (SavedNoUndefs > 0) {
2423       // StartMask defined by some non-zero value in the j loop
2424       StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2425     }
2426     // else StartMask remains set to 0, i.e. all elements are undefs
2427 
2428     if (StartMask < 0)
2429       return false;
2430     // We must stay within the vectors; This case can happen with undefs.
2431     if (StartMask + LaneLen > NumInputElts)
2432       return false;
2433 
2434     StartIndexes[I] = StartMask;
2435   }
2436 
2437   return true;
2438 }
2439 
2440 /// Check if the mask is a DE-interleave mask of the given factor
2441 /// \p Factor like:
2442 ///     <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
isDeInterleaveMaskOfFactor(ArrayRef<int> Mask,unsigned Factor,unsigned & Index)2443 bool ShuffleVectorInst::isDeInterleaveMaskOfFactor(ArrayRef<int> Mask,
2444                                                    unsigned Factor,
2445                                                    unsigned &Index) {
2446   // Check all potential start indices from 0 to (Factor - 1).
2447   for (unsigned Idx = 0; Idx < Factor; Idx++) {
2448     unsigned I = 0;
2449 
2450     // Check that elements are in ascending order by Factor. Ignore undef
2451     // elements.
2452     for (; I < Mask.size(); I++)
2453       if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2454         break;
2455 
2456     if (I == Mask.size()) {
2457       Index = Idx;
2458       return true;
2459     }
2460   }
2461 
2462   return false;
2463 }
2464 
2465 /// Try to lower a vector shuffle as a bit rotation.
2466 ///
2467 /// Look for a repeated rotation pattern in each sub group.
2468 /// Returns an element-wise left bit rotation amount or -1 if failed.
matchShuffleAsBitRotate(ArrayRef<int> Mask,int NumSubElts)2469 static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2470   int NumElts = Mask.size();
2471   assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2472 
2473   int RotateAmt = -1;
2474   for (int i = 0; i != NumElts; i += NumSubElts) {
2475     for (int j = 0; j != NumSubElts; ++j) {
2476       int M = Mask[i + j];
2477       if (M < 0)
2478         continue;
2479       if (M < i || M >= i + NumSubElts)
2480         return -1;
2481       int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2482       if (0 <= RotateAmt && Offset != RotateAmt)
2483         return -1;
2484       RotateAmt = Offset;
2485     }
2486   }
2487   return RotateAmt;
2488 }
2489 
isBitRotateMask(ArrayRef<int> Mask,unsigned EltSizeInBits,unsigned MinSubElts,unsigned MaxSubElts,unsigned & NumSubElts,unsigned & RotateAmt)2490 bool ShuffleVectorInst::isBitRotateMask(
2491     ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2492     unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2493   for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2494     int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2495     if (EltRotateAmt < 0)
2496       continue;
2497     RotateAmt = EltRotateAmt * EltSizeInBits;
2498     return true;
2499   }
2500 
2501   return false;
2502 }
2503 
2504 //===----------------------------------------------------------------------===//
2505 //                             InsertValueInst Class
2506 //===----------------------------------------------------------------------===//
2507 
init(Value * Agg,Value * Val,ArrayRef<unsigned> Idxs,const Twine & Name)2508 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2509                            const Twine &Name) {
2510   assert(getNumOperands() == 2 && "NumOperands not initialized?");
2511 
2512   // There's no fundamental reason why we require at least one index
2513   // (other than weirdness with &*IdxBegin being invalid; see
2514   // getelementptr's init routine for example). But there's no
2515   // present need to support it.
2516   assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2517 
2518   assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
2519          Val->getType() && "Inserted value must match indexed type!");
2520   Op<0>() = Agg;
2521   Op<1>() = Val;
2522 
2523   Indices.append(Idxs.begin(), Idxs.end());
2524   setName(Name);
2525 }
2526 
InsertValueInst(const InsertValueInst & IVI)2527 InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2528     : Instruction(IVI.getType(), InsertValue, AllocMarker),
2529       Indices(IVI.Indices) {
2530   Op<0>() = IVI.getOperand(0);
2531   Op<1>() = IVI.getOperand(1);
2532   SubclassOptionalData = IVI.SubclassOptionalData;
2533 }
2534 
2535 //===----------------------------------------------------------------------===//
2536 //                             ExtractValueInst Class
2537 //===----------------------------------------------------------------------===//
2538 
init(ArrayRef<unsigned> Idxs,const Twine & Name)2539 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2540   assert(getNumOperands() == 1 && "NumOperands not initialized?");
2541 
2542   // There's no fundamental reason why we require at least one index.
2543   // But there's no present need to support it.
2544   assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2545 
2546   Indices.append(Idxs.begin(), Idxs.end());
2547   setName(Name);
2548 }
2549 
ExtractValueInst(const ExtractValueInst & EVI)2550 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2551     : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2552                        (BasicBlock *)nullptr),
2553       Indices(EVI.Indices) {
2554   SubclassOptionalData = EVI.SubclassOptionalData;
2555 }
2556 
2557 // getIndexedType - Returns the type of the element that would be extracted
2558 // with an extractvalue instruction with the specified parameters.
2559 //
2560 // A null type is returned if the indices are invalid for the specified
2561 // pointer type.
2562 //
getIndexedType(Type * Agg,ArrayRef<unsigned> Idxs)2563 Type *ExtractValueInst::getIndexedType(Type *Agg,
2564                                        ArrayRef<unsigned> Idxs) {
2565   for (unsigned Index : Idxs) {
2566     // We can't use CompositeType::indexValid(Index) here.
2567     // indexValid() always returns true for arrays because getelementptr allows
2568     // out-of-bounds indices. Since we don't allow those for extractvalue and
2569     // insertvalue we need to check array indexing manually.
2570     // Since the only other types we can index into are struct types it's just
2571     // as easy to check those manually as well.
2572     if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2573       if (Index >= AT->getNumElements())
2574         return nullptr;
2575       Agg = AT->getElementType();
2576     } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2577       if (Index >= ST->getNumElements())
2578         return nullptr;
2579       Agg = ST->getElementType(Index);
2580     } else {
2581       // Not a valid type to index into.
2582       return nullptr;
2583     }
2584   }
2585   return Agg;
2586 }
2587 
2588 //===----------------------------------------------------------------------===//
2589 //                             UnaryOperator Class
2590 //===----------------------------------------------------------------------===//
2591 
UnaryOperator(UnaryOps iType,Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)2592 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
2593                              const Twine &Name, InsertPosition InsertBefore)
2594     : UnaryInstruction(Ty, iType, S, InsertBefore) {
2595   Op<0>() = S;
2596   setName(Name);
2597   AssertOK();
2598 }
2599 
Create(UnaryOps Op,Value * S,const Twine & Name,InsertPosition InsertBefore)2600 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, const Twine &Name,
2601                                      InsertPosition InsertBefore) {
2602   return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2603 }
2604 
AssertOK()2605 void UnaryOperator::AssertOK() {
2606   Value *LHS = getOperand(0);
2607   (void)LHS; // Silence warnings.
2608 #ifndef NDEBUG
2609   switch (getOpcode()) {
2610   case FNeg:
2611     assert(getType() == LHS->getType() &&
2612            "Unary operation should return same type as operand!");
2613     assert(getType()->isFPOrFPVectorTy() &&
2614            "Tried to create a floating-point operation on a "
2615            "non-floating-point type!");
2616     break;
2617   default: llvm_unreachable("Invalid opcode provided");
2618   }
2619 #endif
2620 }
2621 
2622 //===----------------------------------------------------------------------===//
2623 //                             BinaryOperator Class
2624 //===----------------------------------------------------------------------===//
2625 
BinaryOperator(BinaryOps iType,Value * S1,Value * S2,Type * Ty,const Twine & Name,InsertPosition InsertBefore)2626 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
2627                                const Twine &Name, InsertPosition InsertBefore)
2628     : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2629   Op<0>() = S1;
2630   Op<1>() = S2;
2631   setName(Name);
2632   AssertOK();
2633 }
2634 
AssertOK()2635 void BinaryOperator::AssertOK() {
2636   Value *LHS = getOperand(0), *RHS = getOperand(1);
2637   (void)LHS; (void)RHS; // Silence warnings.
2638   assert(LHS->getType() == RHS->getType() &&
2639          "Binary operator operand types must match!");
2640 #ifndef NDEBUG
2641   switch (getOpcode()) {
2642   case Add: case Sub:
2643   case Mul:
2644     assert(getType() == LHS->getType() &&
2645            "Arithmetic operation should return same type as operands!");
2646     assert(getType()->isIntOrIntVectorTy() &&
2647            "Tried to create an integer operation on a non-integer type!");
2648     break;
2649   case FAdd: case FSub:
2650   case FMul:
2651     assert(getType() == LHS->getType() &&
2652            "Arithmetic operation should return same type as operands!");
2653     assert(getType()->isFPOrFPVectorTy() &&
2654            "Tried to create a floating-point operation on a "
2655            "non-floating-point type!");
2656     break;
2657   case UDiv:
2658   case SDiv:
2659     assert(getType() == LHS->getType() &&
2660            "Arithmetic operation should return same type as operands!");
2661     assert(getType()->isIntOrIntVectorTy() &&
2662            "Incorrect operand type (not integer) for S/UDIV");
2663     break;
2664   case FDiv:
2665     assert(getType() == LHS->getType() &&
2666            "Arithmetic operation should return same type as operands!");
2667     assert(getType()->isFPOrFPVectorTy() &&
2668            "Incorrect operand type (not floating point) for FDIV");
2669     break;
2670   case URem:
2671   case SRem:
2672     assert(getType() == LHS->getType() &&
2673            "Arithmetic operation should return same type as operands!");
2674     assert(getType()->isIntOrIntVectorTy() &&
2675            "Incorrect operand type (not integer) for S/UREM");
2676     break;
2677   case FRem:
2678     assert(getType() == LHS->getType() &&
2679            "Arithmetic operation should return same type as operands!");
2680     assert(getType()->isFPOrFPVectorTy() &&
2681            "Incorrect operand type (not floating point) for FREM");
2682     break;
2683   case Shl:
2684   case LShr:
2685   case AShr:
2686     assert(getType() == LHS->getType() &&
2687            "Shift operation should return same type as operands!");
2688     assert(getType()->isIntOrIntVectorTy() &&
2689            "Tried to create a shift operation on a non-integral type!");
2690     break;
2691   case And: case Or:
2692   case Xor:
2693     assert(getType() == LHS->getType() &&
2694            "Logical operation should return same type as operands!");
2695     assert(getType()->isIntOrIntVectorTy() &&
2696            "Tried to create a logical operation on a non-integral type!");
2697     break;
2698   default: llvm_unreachable("Invalid opcode provided");
2699   }
2700 #endif
2701 }
2702 
Create(BinaryOps Op,Value * S1,Value * S2,const Twine & Name,InsertPosition InsertBefore)2703 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
2704                                        const Twine &Name,
2705                                        InsertPosition InsertBefore) {
2706   assert(S1->getType() == S2->getType() &&
2707          "Cannot create binary operator with two operands of differing type!");
2708   return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2709 }
2710 
CreateNeg(Value * Op,const Twine & Name,InsertPosition InsertBefore)2711 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
2712                                           InsertPosition InsertBefore) {
2713   Value *Zero = ConstantInt::get(Op->getType(), 0);
2714   return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2715                             InsertBefore);
2716 }
2717 
CreateNSWNeg(Value * Op,const Twine & Name,InsertPosition InsertBefore)2718 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
2719                                              InsertPosition InsertBefore) {
2720   Value *Zero = ConstantInt::get(Op->getType(), 0);
2721   return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2722 }
2723 
CreateNot(Value * Op,const Twine & Name,InsertPosition InsertBefore)2724 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
2725                                           InsertPosition InsertBefore) {
2726   Constant *C = Constant::getAllOnesValue(Op->getType());
2727   return new BinaryOperator(Instruction::Xor, Op, C,
2728                             Op->getType(), Name, InsertBefore);
2729 }
2730 
2731 // Exchange the two operands to this instruction. This instruction is safe to
2732 // use on any binary instruction and does not modify the semantics of the
2733 // instruction.
swapOperands()2734 bool BinaryOperator::swapOperands() {
2735   if (!isCommutative())
2736     return true; // Can't commute operands
2737   Op<0>().swap(Op<1>());
2738   return false;
2739 }
2740 
2741 //===----------------------------------------------------------------------===//
2742 //                             FPMathOperator Class
2743 //===----------------------------------------------------------------------===//
2744 
getFPAccuracy() const2745 float FPMathOperator::getFPAccuracy() const {
2746   const MDNode *MD =
2747       cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2748   if (!MD)
2749     return 0.0;
2750   ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
2751   return Accuracy->getValueAPF().convertToFloat();
2752 }
2753 
2754 //===----------------------------------------------------------------------===//
2755 //                                CastInst Class
2756 //===----------------------------------------------------------------------===//
2757 
2758 // Just determine if this cast only deals with integral->integral conversion.
isIntegerCast() const2759 bool CastInst::isIntegerCast() const {
2760   switch (getOpcode()) {
2761     default: return false;
2762     case Instruction::ZExt:
2763     case Instruction::SExt:
2764     case Instruction::Trunc:
2765       return true;
2766     case Instruction::BitCast:
2767       return getOperand(0)->getType()->isIntegerTy() &&
2768         getType()->isIntegerTy();
2769   }
2770 }
2771 
2772 /// This function determines if the CastInst does not require any bits to be
2773 /// changed in order to effect the cast. Essentially, it identifies cases where
2774 /// no code gen is necessary for the cast, hence the name no-op cast.  For
2775 /// example, the following are all no-op casts:
2776 /// # bitcast i32* %x to i8*
2777 /// # bitcast <2 x i32> %x to <4 x i16>
2778 /// # ptrtoint i32* %x to i32     ; on 32-bit plaforms only
2779 /// Determine if the described cast is a no-op.
isNoopCast(Instruction::CastOps Opcode,Type * SrcTy,Type * DestTy,const DataLayout & DL)2780 bool CastInst::isNoopCast(Instruction::CastOps Opcode,
2781                           Type *SrcTy,
2782                           Type *DestTy,
2783                           const DataLayout &DL) {
2784   assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2785   switch (Opcode) {
2786     default: llvm_unreachable("Invalid CastOp");
2787     case Instruction::Trunc:
2788     case Instruction::ZExt:
2789     case Instruction::SExt:
2790     case Instruction::FPTrunc:
2791     case Instruction::FPExt:
2792     case Instruction::UIToFP:
2793     case Instruction::SIToFP:
2794     case Instruction::FPToUI:
2795     case Instruction::FPToSI:
2796     case Instruction::AddrSpaceCast:
2797       // TODO: Target informations may give a more accurate answer here.
2798       return false;
2799     case Instruction::BitCast:
2800       return true;  // BitCast never modifies bits.
2801     case Instruction::PtrToInt:
2802       return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2803              DestTy->getScalarSizeInBits();
2804     case Instruction::IntToPtr:
2805       return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2806              SrcTy->getScalarSizeInBits();
2807   }
2808 }
2809 
isNoopCast(const DataLayout & DL) const2810 bool CastInst::isNoopCast(const DataLayout &DL) const {
2811   return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2812 }
2813 
2814 /// This function determines if a pair of casts can be eliminated and what
2815 /// opcode should be used in the elimination. This assumes that there are two
2816 /// instructions like this:
2817 /// *  %F = firstOpcode SrcTy %x to MidTy
2818 /// *  %S = secondOpcode MidTy %F to DstTy
2819 /// The function returns a resultOpcode so these two casts can be replaced with:
2820 /// *  %Replacement = resultOpcode %SrcTy %x to DstTy
2821 /// If no such cast is permitted, the function returns 0.
isEliminableCastPair(Instruction::CastOps firstOp,Instruction::CastOps secondOp,Type * SrcTy,Type * MidTy,Type * DstTy,Type * SrcIntPtrTy,Type * MidIntPtrTy,Type * DstIntPtrTy)2822 unsigned CastInst::isEliminableCastPair(
2823   Instruction::CastOps firstOp, Instruction::CastOps secondOp,
2824   Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2825   Type *DstIntPtrTy) {
2826   // Define the 144 possibilities for these two cast instructions. The values
2827   // in this matrix determine what to do in a given situation and select the
2828   // case in the switch below.  The rows correspond to firstOp, the columns
2829   // correspond to secondOp.  In looking at the table below, keep in mind
2830   // the following cast properties:
2831   //
2832   //          Size Compare       Source               Destination
2833   // Operator  Src ? Size   Type       Sign         Type       Sign
2834   // -------- ------------ -------------------   ---------------------
2835   // TRUNC         >       Integer      Any        Integral     Any
2836   // ZEXT          <       Integral   Unsigned     Integer      Any
2837   // SEXT          <       Integral    Signed      Integer      Any
2838   // FPTOUI       n/a      FloatPt      n/a        Integral   Unsigned
2839   // FPTOSI       n/a      FloatPt      n/a        Integral    Signed
2840   // UITOFP       n/a      Integral   Unsigned     FloatPt      n/a
2841   // SITOFP       n/a      Integral    Signed      FloatPt      n/a
2842   // FPTRUNC       >       FloatPt      n/a        FloatPt      n/a
2843   // FPEXT         <       FloatPt      n/a        FloatPt      n/a
2844   // PTRTOINT     n/a      Pointer      n/a        Integral   Unsigned
2845   // INTTOPTR     n/a      Integral   Unsigned     Pointer      n/a
2846   // BITCAST       =       FirstClass   n/a       FirstClass    n/a
2847   // ADDRSPCST    n/a      Pointer      n/a        Pointer      n/a
2848   //
2849   // NOTE: some transforms are safe, but we consider them to be non-profitable.
2850   // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2851   // into "fptoui double to i64", but this loses information about the range
2852   // of the produced value (we no longer know the top-part is all zeros).
2853   // Further this conversion is often much more expensive for typical hardware,
2854   // and causes issues when building libgcc.  We disallow fptosi+sext for the
2855   // same reason.
2856   const unsigned numCastOps =
2857     Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2858   static const uint8_t CastResults[numCastOps][numCastOps] = {
2859     // T        F  F  U  S  F  F  P  I  B  A  -+
2860     // R  Z  S  P  P  I  I  T  P  2  N  T  S   |
2861     // U  E  E  2  2  2  2  R  E  I  T  C  C   +- secondOp
2862     // N  X  X  U  S  F  F  N  X  N  2  V  V   |
2863     // C  T  T  I  I  P  P  C  T  T  P  T  T  -+
2864     {  1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc         -+
2865     {  8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt           |
2866     {  8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt           |
2867     {  0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI         |
2868     {  0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI         |
2869     { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP         +- firstOp
2870     { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP         |
2871     { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc        |
2872     { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt          |
2873     {  1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt       |
2874     { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr       |
2875     {  5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast        |
2876     {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2877   };
2878 
2879   // TODO: This logic could be encoded into the table above and handled in the
2880   // switch below.
2881   // If either of the casts are a bitcast from scalar to vector, disallow the
2882   // merging. However, any pair of bitcasts are allowed.
2883   bool IsFirstBitcast  = (firstOp == Instruction::BitCast);
2884   bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2885   bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2886 
2887   // Check if any of the casts convert scalars <-> vectors.
2888   if ((IsFirstBitcast  && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2889       (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2890     if (!AreBothBitcasts)
2891       return 0;
2892 
2893   int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2894                             [secondOp-Instruction::CastOpsBegin];
2895   switch (ElimCase) {
2896     case 0:
2897       // Categorically disallowed.
2898       return 0;
2899     case 1:
2900       // Allowed, use first cast's opcode.
2901       return firstOp;
2902     case 2:
2903       // Allowed, use second cast's opcode.
2904       return secondOp;
2905     case 3:
2906       // No-op cast in second op implies firstOp as long as the DestTy
2907       // is integer and we are not converting between a vector and a
2908       // non-vector type.
2909       if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2910         return firstOp;
2911       return 0;
2912     case 4:
2913       // No-op cast in second op implies firstOp as long as the DestTy
2914       // matches MidTy.
2915       if (DstTy == MidTy)
2916         return firstOp;
2917       return 0;
2918     case 5:
2919       // No-op cast in first op implies secondOp as long as the SrcTy
2920       // is an integer.
2921       if (SrcTy->isIntegerTy())
2922         return secondOp;
2923       return 0;
2924     case 7: {
2925       // Disable inttoptr/ptrtoint optimization if enabled.
2926       if (DisableI2pP2iOpt)
2927         return 0;
2928 
2929       // Cannot simplify if address spaces are different!
2930       if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2931         return 0;
2932 
2933       unsigned MidSize = MidTy->getScalarSizeInBits();
2934       // We can still fold this without knowing the actual sizes as long we
2935       // know that the intermediate pointer is the largest possible
2936       // pointer size.
2937       // FIXME: Is this always true?
2938       if (MidSize == 64)
2939         return Instruction::BitCast;
2940 
2941       // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
2942       if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2943         return 0;
2944       unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2945       if (MidSize >= PtrSize)
2946         return Instruction::BitCast;
2947       return 0;
2948     }
2949     case 8: {
2950       // ext, trunc -> bitcast,    if the SrcTy and DstTy are the same
2951       // ext, trunc -> ext,        if sizeof(SrcTy) < sizeof(DstTy)
2952       // ext, trunc -> trunc,      if sizeof(SrcTy) > sizeof(DstTy)
2953       unsigned SrcSize = SrcTy->getScalarSizeInBits();
2954       unsigned DstSize = DstTy->getScalarSizeInBits();
2955       if (SrcTy == DstTy)
2956         return Instruction::BitCast;
2957       if (SrcSize < DstSize)
2958         return firstOp;
2959       if (SrcSize > DstSize)
2960         return secondOp;
2961       return 0;
2962     }
2963     case 9:
2964       // zext, sext -> zext, because sext can't sign extend after zext
2965       return Instruction::ZExt;
2966     case 11: {
2967       // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2968       if (!MidIntPtrTy)
2969         return 0;
2970       unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2971       unsigned SrcSize = SrcTy->getScalarSizeInBits();
2972       unsigned DstSize = DstTy->getScalarSizeInBits();
2973       if (SrcSize <= PtrSize && SrcSize == DstSize)
2974         return Instruction::BitCast;
2975       return 0;
2976     }
2977     case 12:
2978       // addrspacecast, addrspacecast -> bitcast,       if SrcAS == DstAS
2979       // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2980       if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2981         return Instruction::AddrSpaceCast;
2982       return Instruction::BitCast;
2983     case 13:
2984       // FIXME: this state can be merged with (1), but the following assert
2985       // is useful to check the correcteness of the sequence due to semantic
2986       // change of bitcast.
2987       assert(
2988         SrcTy->isPtrOrPtrVectorTy() &&
2989         MidTy->isPtrOrPtrVectorTy() &&
2990         DstTy->isPtrOrPtrVectorTy() &&
2991         SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2992         MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2993         "Illegal addrspacecast, bitcast sequence!");
2994       // Allowed, use first cast's opcode
2995       return firstOp;
2996     case 14:
2997       // bitcast, addrspacecast -> addrspacecast
2998       return Instruction::AddrSpaceCast;
2999     case 15:
3000       // FIXME: this state can be merged with (1), but the following assert
3001       // is useful to check the correcteness of the sequence due to semantic
3002       // change of bitcast.
3003       assert(
3004         SrcTy->isIntOrIntVectorTy() &&
3005         MidTy->isPtrOrPtrVectorTy() &&
3006         DstTy->isPtrOrPtrVectorTy() &&
3007         MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3008         "Illegal inttoptr, bitcast sequence!");
3009       // Allowed, use first cast's opcode
3010       return firstOp;
3011     case 16:
3012       // FIXME: this state can be merged with (2), but the following assert
3013       // is useful to check the correcteness of the sequence due to semantic
3014       // change of bitcast.
3015       assert(
3016         SrcTy->isPtrOrPtrVectorTy() &&
3017         MidTy->isPtrOrPtrVectorTy() &&
3018         DstTy->isIntOrIntVectorTy() &&
3019         SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3020         "Illegal bitcast, ptrtoint sequence!");
3021       // Allowed, use second cast's opcode
3022       return secondOp;
3023     case 17:
3024       // (sitofp (zext x)) -> (uitofp x)
3025       return Instruction::UIToFP;
3026     case 99:
3027       // Cast combination can't happen (error in input). This is for all cases
3028       // where the MidTy is not the same for the two cast instructions.
3029       llvm_unreachable("Invalid Cast Combination");
3030     default:
3031       llvm_unreachable("Error in CastResults table!!!");
3032   }
3033 }
3034 
Create(Instruction::CastOps op,Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3035 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
3036                            const Twine &Name, InsertPosition InsertBefore) {
3037   assert(castIsValid(op, S, Ty) && "Invalid cast!");
3038   // Construct and return the appropriate CastInst subclass
3039   switch (op) {
3040   case Trunc:         return new TruncInst         (S, Ty, Name, InsertBefore);
3041   case ZExt:          return new ZExtInst          (S, Ty, Name, InsertBefore);
3042   case SExt:          return new SExtInst          (S, Ty, Name, InsertBefore);
3043   case FPTrunc:       return new FPTruncInst       (S, Ty, Name, InsertBefore);
3044   case FPExt:         return new FPExtInst         (S, Ty, Name, InsertBefore);
3045   case UIToFP:        return new UIToFPInst        (S, Ty, Name, InsertBefore);
3046   case SIToFP:        return new SIToFPInst        (S, Ty, Name, InsertBefore);
3047   case FPToUI:        return new FPToUIInst        (S, Ty, Name, InsertBefore);
3048   case FPToSI:        return new FPToSIInst        (S, Ty, Name, InsertBefore);
3049   case PtrToInt:      return new PtrToIntInst      (S, Ty, Name, InsertBefore);
3050   case IntToPtr:      return new IntToPtrInst      (S, Ty, Name, InsertBefore);
3051   case BitCast:
3052     return new BitCastInst(S, Ty, Name, InsertBefore);
3053   case AddrSpaceCast:
3054     return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3055   default:
3056     llvm_unreachable("Invalid opcode provided");
3057   }
3058 }
3059 
CreateZExtOrBitCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3060 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name,
3061                                         InsertPosition InsertBefore) {
3062   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3063     return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3064   return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3065 }
3066 
CreateSExtOrBitCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3067 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name,
3068                                         InsertPosition InsertBefore) {
3069   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3070     return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3071   return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3072 }
3073 
CreateTruncOrBitCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3074 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name,
3075                                          InsertPosition InsertBefore) {
3076   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3077     return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3078   return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3079 }
3080 
3081 /// Create a BitCast or a PtrToInt cast instruction
CreatePointerCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3082 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, const Twine &Name,
3083                                       InsertPosition InsertBefore) {
3084   assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3085   assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3086          "Invalid cast");
3087   assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3088   assert((!Ty->isVectorTy() ||
3089           cast<VectorType>(Ty)->getElementCount() ==
3090               cast<VectorType>(S->getType())->getElementCount()) &&
3091          "Invalid cast");
3092 
3093   if (Ty->isIntOrIntVectorTy())
3094     return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3095 
3096   return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3097 }
3098 
CreatePointerBitCastOrAddrSpaceCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3099 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
3100     Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3101   assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3102   assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3103 
3104   if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3105     return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3106 
3107   return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3108 }
3109 
CreateBitOrPointerCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3110 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
3111                                            const Twine &Name,
3112                                            InsertPosition InsertBefore) {
3113   if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3114     return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3115   if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3116     return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3117 
3118   return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3119 }
3120 
CreateIntegerCast(Value * C,Type * Ty,bool isSigned,const Twine & Name,InsertPosition InsertBefore)3121 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, bool isSigned,
3122                                       const Twine &Name,
3123                                       InsertPosition InsertBefore) {
3124   assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3125          "Invalid integer cast");
3126   unsigned SrcBits = C->getType()->getScalarSizeInBits();
3127   unsigned DstBits = Ty->getScalarSizeInBits();
3128   Instruction::CastOps opcode =
3129     (SrcBits == DstBits ? Instruction::BitCast :
3130      (SrcBits > DstBits ? Instruction::Trunc :
3131       (isSigned ? Instruction::SExt : Instruction::ZExt)));
3132   return Create(opcode, C, Ty, Name, InsertBefore);
3133 }
3134 
CreateFPCast(Value * C,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3135 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, const Twine &Name,
3136                                  InsertPosition InsertBefore) {
3137   assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3138          "Invalid cast");
3139   unsigned SrcBits = C->getType()->getScalarSizeInBits();
3140   unsigned DstBits = Ty->getScalarSizeInBits();
3141   assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3142   Instruction::CastOps opcode =
3143     (SrcBits == DstBits ? Instruction::BitCast :
3144      (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3145   return Create(opcode, C, Ty, Name, InsertBefore);
3146 }
3147 
isBitCastable(Type * SrcTy,Type * DestTy)3148 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3149   if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3150     return false;
3151 
3152   if (SrcTy == DestTy)
3153     return true;
3154 
3155   if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3156     if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3157       if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3158         // An element by element cast. Valid if casting the elements is valid.
3159         SrcTy = SrcVecTy->getElementType();
3160         DestTy = DestVecTy->getElementType();
3161       }
3162     }
3163   }
3164 
3165   if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3166     if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3167       return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3168     }
3169   }
3170 
3171   TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits();   // 0 for ptr
3172   TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3173 
3174   // Could still have vectors of pointers if the number of elements doesn't
3175   // match
3176   if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3177     return false;
3178 
3179   if (SrcBits != DestBits)
3180     return false;
3181 
3182   return true;
3183 }
3184 
isBitOrNoopPointerCastable(Type * SrcTy,Type * DestTy,const DataLayout & DL)3185 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
3186                                           const DataLayout &DL) {
3187   // ptrtoint and inttoptr are not allowed on non-integral pointers
3188   if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3189     if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3190       return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3191               !DL.isNonIntegralPointerType(PtrTy));
3192   if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3193     if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3194       return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3195               !DL.isNonIntegralPointerType(PtrTy));
3196 
3197   return isBitCastable(SrcTy, DestTy);
3198 }
3199 
3200 // Provide a way to get a "cast" where the cast opcode is inferred from the
3201 // types and size of the operand. This, basically, is a parallel of the
3202 // logic in the castIsValid function below.  This axiom should hold:
3203 //   castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3204 // should not assert in castIsValid. In other words, this produces a "correct"
3205 // casting opcode for the arguments passed to it.
3206 Instruction::CastOps
getCastOpcode(const Value * Src,bool SrcIsSigned,Type * DestTy,bool DestIsSigned)3207 CastInst::getCastOpcode(
3208   const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3209   Type *SrcTy = Src->getType();
3210 
3211   assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3212          "Only first class types are castable!");
3213 
3214   if (SrcTy == DestTy)
3215     return BitCast;
3216 
3217   // FIXME: Check address space sizes here
3218   if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3219     if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3220       if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3221         // An element by element cast.  Find the appropriate opcode based on the
3222         // element types.
3223         SrcTy = SrcVecTy->getElementType();
3224         DestTy = DestVecTy->getElementType();
3225       }
3226 
3227   // Get the bit sizes, we'll need these
3228   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();   // 0 for ptr
3229   unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3230 
3231   // Run through the possibilities ...
3232   if (DestTy->isIntegerTy()) {                      // Casting to integral
3233     if (SrcTy->isIntegerTy()) {                     // Casting from integral
3234       if (DestBits < SrcBits)
3235         return Trunc;                               // int -> smaller int
3236       else if (DestBits > SrcBits) {                // its an extension
3237         if (SrcIsSigned)
3238           return SExt;                              // signed -> SEXT
3239         else
3240           return ZExt;                              // unsigned -> ZEXT
3241       } else {
3242         return BitCast;                             // Same size, No-op cast
3243       }
3244     } else if (SrcTy->isFloatingPointTy()) {        // Casting from floating pt
3245       if (DestIsSigned)
3246         return FPToSI;                              // FP -> sint
3247       else
3248         return FPToUI;                              // FP -> uint
3249     } else if (SrcTy->isVectorTy()) {
3250       assert(DestBits == SrcBits &&
3251              "Casting vector to integer of different width");
3252       return BitCast;                             // Same size, no-op cast
3253     } else {
3254       assert(SrcTy->isPointerTy() &&
3255              "Casting from a value that is not first-class type");
3256       return PtrToInt;                              // ptr -> int
3257     }
3258   } else if (DestTy->isFloatingPointTy()) {         // Casting to floating pt
3259     if (SrcTy->isIntegerTy()) {                     // Casting from integral
3260       if (SrcIsSigned)
3261         return SIToFP;                              // sint -> FP
3262       else
3263         return UIToFP;                              // uint -> FP
3264     } else if (SrcTy->isFloatingPointTy()) {        // Casting from floating pt
3265       if (DestBits < SrcBits) {
3266         return FPTrunc;                             // FP -> smaller FP
3267       } else if (DestBits > SrcBits) {
3268         return FPExt;                               // FP -> larger FP
3269       } else  {
3270         return BitCast;                             // same size, no-op cast
3271       }
3272     } else if (SrcTy->isVectorTy()) {
3273       assert(DestBits == SrcBits &&
3274              "Casting vector to floating point of different width");
3275       return BitCast;                             // same size, no-op cast
3276     }
3277     llvm_unreachable("Casting pointer or non-first class to float");
3278   } else if (DestTy->isVectorTy()) {
3279     assert(DestBits == SrcBits &&
3280            "Illegal cast to vector (wrong type or size)");
3281     return BitCast;
3282   } else if (DestTy->isPointerTy()) {
3283     if (SrcTy->isPointerTy()) {
3284       if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3285         return AddrSpaceCast;
3286       return BitCast;                               // ptr -> ptr
3287     } else if (SrcTy->isIntegerTy()) {
3288       return IntToPtr;                              // int -> ptr
3289     }
3290     llvm_unreachable("Casting pointer to other than pointer or int");
3291   }
3292   llvm_unreachable("Casting to type that is not first-class");
3293 }
3294 
3295 //===----------------------------------------------------------------------===//
3296 //                    CastInst SubClass Constructors
3297 //===----------------------------------------------------------------------===//
3298 
3299 /// Check that the construction parameters for a CastInst are correct. This
3300 /// could be broken out into the separate constructors but it is useful to have
3301 /// it in one place and to eliminate the redundant code for getting the sizes
3302 /// of the types involved.
3303 bool
castIsValid(Instruction::CastOps op,Type * SrcTy,Type * DstTy)3304 CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {
3305   if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3306       SrcTy->isAggregateType() || DstTy->isAggregateType())
3307     return false;
3308 
3309   // Get the size of the types in bits, and whether we are dealing
3310   // with vector types, we'll need this later.
3311   bool SrcIsVec = isa<VectorType>(SrcTy);
3312   bool DstIsVec = isa<VectorType>(DstTy);
3313   unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3314   unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3315 
3316   // If these are vector types, get the lengths of the vectors (using zero for
3317   // scalar types means that checking that vector lengths match also checks that
3318   // scalars are not being converted to vectors or vectors to scalars).
3319   ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3320                                 : ElementCount::getFixed(0);
3321   ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3322                                 : ElementCount::getFixed(0);
3323 
3324   // Switch on the opcode provided
3325   switch (op) {
3326   default: return false; // This is an input error
3327   case Instruction::Trunc:
3328     return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3329            SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3330   case Instruction::ZExt:
3331     return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3332            SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3333   case Instruction::SExt:
3334     return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3335            SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3336   case Instruction::FPTrunc:
3337     return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3338            SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3339   case Instruction::FPExt:
3340     return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3341            SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3342   case Instruction::UIToFP:
3343   case Instruction::SIToFP:
3344     return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3345            SrcEC == DstEC;
3346   case Instruction::FPToUI:
3347   case Instruction::FPToSI:
3348     return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3349            SrcEC == DstEC;
3350   case Instruction::PtrToInt:
3351     if (SrcEC != DstEC)
3352       return false;
3353     return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3354   case Instruction::IntToPtr:
3355     if (SrcEC != DstEC)
3356       return false;
3357     return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3358   case Instruction::BitCast: {
3359     PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3360     PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3361 
3362     // BitCast implies a no-op cast of type only. No bits change.
3363     // However, you can't cast pointers to anything but pointers.
3364     if (!SrcPtrTy != !DstPtrTy)
3365       return false;
3366 
3367     // For non-pointer cases, the cast is okay if the source and destination bit
3368     // widths are identical.
3369     if (!SrcPtrTy)
3370       return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3371 
3372     // If both are pointers then the address spaces must match.
3373     if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3374       return false;
3375 
3376     // A vector of pointers must have the same number of elements.
3377     if (SrcIsVec && DstIsVec)
3378       return SrcEC == DstEC;
3379     if (SrcIsVec)
3380       return SrcEC == ElementCount::getFixed(1);
3381     if (DstIsVec)
3382       return DstEC == ElementCount::getFixed(1);
3383 
3384     return true;
3385   }
3386   case Instruction::AddrSpaceCast: {
3387     PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3388     if (!SrcPtrTy)
3389       return false;
3390 
3391     PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3392     if (!DstPtrTy)
3393       return false;
3394 
3395     if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3396       return false;
3397 
3398     return SrcEC == DstEC;
3399   }
3400   }
3401 }
3402 
TruncInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3403 TruncInst::TruncInst(Value *S, Type *Ty, const Twine &Name,
3404                      InsertPosition InsertBefore)
3405     : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3406   assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3407 }
3408 
ZExtInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3409 ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3410                    InsertPosition InsertBefore)
3411     : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3412   assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3413 }
3414 
SExtInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3415 SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3416                    InsertPosition InsertBefore)
3417     : CastInst(Ty, SExt, S, Name, InsertBefore) {
3418   assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3419 }
3420 
FPTruncInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3421 FPTruncInst::FPTruncInst(Value *S, Type *Ty, const Twine &Name,
3422                          InsertPosition InsertBefore)
3423     : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3424   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3425 }
3426 
FPExtInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3427 FPExtInst::FPExtInst(Value *S, Type *Ty, const Twine &Name,
3428                      InsertPosition InsertBefore)
3429     : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3430   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3431 }
3432 
UIToFPInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3433 UIToFPInst::UIToFPInst(Value *S, Type *Ty, const Twine &Name,
3434                        InsertPosition InsertBefore)
3435     : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3436   assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3437 }
3438 
SIToFPInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3439 SIToFPInst::SIToFPInst(Value *S, Type *Ty, const Twine &Name,
3440                        InsertPosition InsertBefore)
3441     : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3442   assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3443 }
3444 
FPToUIInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3445 FPToUIInst::FPToUIInst(Value *S, Type *Ty, const Twine &Name,
3446                        InsertPosition InsertBefore)
3447     : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3448   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3449 }
3450 
FPToSIInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3451 FPToSIInst::FPToSIInst(Value *S, Type *Ty, const Twine &Name,
3452                        InsertPosition InsertBefore)
3453     : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3454   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3455 }
3456 
PtrToIntInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3457 PtrToIntInst::PtrToIntInst(Value *S, Type *Ty, const Twine &Name,
3458                            InsertPosition InsertBefore)
3459     : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3460   assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3461 }
3462 
IntToPtrInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3463 IntToPtrInst::IntToPtrInst(Value *S, Type *Ty, const Twine &Name,
3464                            InsertPosition InsertBefore)
3465     : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3466   assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3467 }
3468 
BitCastInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3469 BitCastInst::BitCastInst(Value *S, Type *Ty, const Twine &Name,
3470                          InsertPosition InsertBefore)
3471     : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3472   assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3473 }
3474 
AddrSpaceCastInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3475 AddrSpaceCastInst::AddrSpaceCastInst(Value *S, Type *Ty, const Twine &Name,
3476                                      InsertPosition InsertBefore)
3477     : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3478   assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3479 }
3480 
3481 //===----------------------------------------------------------------------===//
3482 //                               CmpInst Classes
3483 //===----------------------------------------------------------------------===//
3484 
CmpInst(Type * ty,OtherOps op,Predicate predicate,Value * LHS,Value * RHS,const Twine & Name,InsertPosition InsertBefore,Instruction * FlagsSource)3485 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
3486                  Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3487                  Instruction *FlagsSource)
3488     : Instruction(ty, op, AllocMarker, InsertBefore) {
3489   Op<0>() = LHS;
3490   Op<1>() = RHS;
3491   setPredicate(predicate);
3492   setName(Name);
3493   if (FlagsSource)
3494     copyIRFlags(FlagsSource);
3495 }
3496 
Create(OtherOps Op,Predicate predicate,Value * S1,Value * S2,const Twine & Name,InsertPosition InsertBefore)3497 CmpInst *CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
3498                          const Twine &Name, InsertPosition InsertBefore) {
3499   if (Op == Instruction::ICmp) {
3500     if (InsertBefore.isValid())
3501       return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3502                           S1, S2, Name);
3503     else
3504       return new ICmpInst(CmpInst::Predicate(predicate),
3505                           S1, S2, Name);
3506   }
3507 
3508   if (InsertBefore.isValid())
3509     return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3510                         S1, S2, Name);
3511   else
3512     return new FCmpInst(CmpInst::Predicate(predicate),
3513                         S1, S2, Name);
3514 }
3515 
CreateWithCopiedFlags(OtherOps Op,Predicate Pred,Value * S1,Value * S2,const Instruction * FlagsSource,const Twine & Name,InsertPosition InsertBefore)3516 CmpInst *CmpInst::CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1,
3517                                         Value *S2,
3518                                         const Instruction *FlagsSource,
3519                                         const Twine &Name,
3520                                         InsertPosition InsertBefore) {
3521   CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3522   Inst->copyIRFlags(FlagsSource);
3523   return Inst;
3524 }
3525 
swapOperands()3526 void CmpInst::swapOperands() {
3527   if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3528     IC->swapOperands();
3529   else
3530     cast<FCmpInst>(this)->swapOperands();
3531 }
3532 
isCommutative() const3533 bool CmpInst::isCommutative() const {
3534   if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3535     return IC->isCommutative();
3536   return cast<FCmpInst>(this)->isCommutative();
3537 }
3538 
isEquality(Predicate P)3539 bool CmpInst::isEquality(Predicate P) {
3540   if (ICmpInst::isIntPredicate(P))
3541     return ICmpInst::isEquality(P);
3542   if (FCmpInst::isFPPredicate(P))
3543     return FCmpInst::isEquality(P);
3544   llvm_unreachable("Unsupported predicate kind");
3545 }
3546 
3547 // Returns true if either operand of CmpInst is a provably non-zero
3548 // floating-point constant.
hasNonZeroFPOperands(const CmpInst * Cmp)3549 static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3550   auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3551   auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3552   if (auto *Const = LHS ? LHS : RHS) {
3553     using namespace llvm::PatternMatch;
3554     return match(Const, m_NonZeroNotDenormalFP());
3555   }
3556   return false;
3557 }
3558 
3559 // Floating-point equality is not an equivalence when comparing +0.0 with
3560 // -0.0, when comparing NaN with another value, or when flushing
3561 // denormals-to-zero.
isEquivalence(bool Invert) const3562 bool CmpInst::isEquivalence(bool Invert) const {
3563   switch (Invert ? getInversePredicate() : getPredicate()) {
3564   case CmpInst::Predicate::ICMP_EQ:
3565     return true;
3566   case CmpInst::Predicate::FCMP_UEQ:
3567     if (!hasNoNaNs())
3568       return false;
3569     [[fallthrough]];
3570   case CmpInst::Predicate::FCMP_OEQ:
3571     return hasNonZeroFPOperands(this);
3572   default:
3573     return false;
3574   }
3575 }
3576 
getInversePredicate(Predicate pred)3577 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
3578   switch (pred) {
3579     default: llvm_unreachable("Unknown cmp predicate!");
3580     case ICMP_EQ: return ICMP_NE;
3581     case ICMP_NE: return ICMP_EQ;
3582     case ICMP_UGT: return ICMP_ULE;
3583     case ICMP_ULT: return ICMP_UGE;
3584     case ICMP_UGE: return ICMP_ULT;
3585     case ICMP_ULE: return ICMP_UGT;
3586     case ICMP_SGT: return ICMP_SLE;
3587     case ICMP_SLT: return ICMP_SGE;
3588     case ICMP_SGE: return ICMP_SLT;
3589     case ICMP_SLE: return ICMP_SGT;
3590 
3591     case FCMP_OEQ: return FCMP_UNE;
3592     case FCMP_ONE: return FCMP_UEQ;
3593     case FCMP_OGT: return FCMP_ULE;
3594     case FCMP_OLT: return FCMP_UGE;
3595     case FCMP_OGE: return FCMP_ULT;
3596     case FCMP_OLE: return FCMP_UGT;
3597     case FCMP_UEQ: return FCMP_ONE;
3598     case FCMP_UNE: return FCMP_OEQ;
3599     case FCMP_UGT: return FCMP_OLE;
3600     case FCMP_ULT: return FCMP_OGE;
3601     case FCMP_UGE: return FCMP_OLT;
3602     case FCMP_ULE: return FCMP_OGT;
3603     case FCMP_ORD: return FCMP_UNO;
3604     case FCMP_UNO: return FCMP_ORD;
3605     case FCMP_TRUE: return FCMP_FALSE;
3606     case FCMP_FALSE: return FCMP_TRUE;
3607   }
3608 }
3609 
getPredicateName(Predicate Pred)3610 StringRef CmpInst::getPredicateName(Predicate Pred) {
3611   switch (Pred) {
3612   default:                   return "unknown";
3613   case FCmpInst::FCMP_FALSE: return "false";
3614   case FCmpInst::FCMP_OEQ:   return "oeq";
3615   case FCmpInst::FCMP_OGT:   return "ogt";
3616   case FCmpInst::FCMP_OGE:   return "oge";
3617   case FCmpInst::FCMP_OLT:   return "olt";
3618   case FCmpInst::FCMP_OLE:   return "ole";
3619   case FCmpInst::FCMP_ONE:   return "one";
3620   case FCmpInst::FCMP_ORD:   return "ord";
3621   case FCmpInst::FCMP_UNO:   return "uno";
3622   case FCmpInst::FCMP_UEQ:   return "ueq";
3623   case FCmpInst::FCMP_UGT:   return "ugt";
3624   case FCmpInst::FCMP_UGE:   return "uge";
3625   case FCmpInst::FCMP_ULT:   return "ult";
3626   case FCmpInst::FCMP_ULE:   return "ule";
3627   case FCmpInst::FCMP_UNE:   return "une";
3628   case FCmpInst::FCMP_TRUE:  return "true";
3629   case ICmpInst::ICMP_EQ:    return "eq";
3630   case ICmpInst::ICMP_NE:    return "ne";
3631   case ICmpInst::ICMP_SGT:   return "sgt";
3632   case ICmpInst::ICMP_SGE:   return "sge";
3633   case ICmpInst::ICMP_SLT:   return "slt";
3634   case ICmpInst::ICMP_SLE:   return "sle";
3635   case ICmpInst::ICMP_UGT:   return "ugt";
3636   case ICmpInst::ICMP_UGE:   return "uge";
3637   case ICmpInst::ICMP_ULT:   return "ult";
3638   case ICmpInst::ICMP_ULE:   return "ule";
3639   }
3640 }
3641 
operator <<(raw_ostream & OS,CmpInst::Predicate Pred)3642 raw_ostream &llvm::operator<<(raw_ostream &OS, CmpInst::Predicate Pred) {
3643   OS << CmpInst::getPredicateName(Pred);
3644   return OS;
3645 }
3646 
getSignedPredicate(Predicate pred)3647 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
3648   switch (pred) {
3649     default: llvm_unreachable("Unknown icmp predicate!");
3650     case ICMP_EQ: case ICMP_NE:
3651     case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3652        return pred;
3653     case ICMP_UGT: return ICMP_SGT;
3654     case ICMP_ULT: return ICMP_SLT;
3655     case ICMP_UGE: return ICMP_SGE;
3656     case ICMP_ULE: return ICMP_SLE;
3657   }
3658 }
3659 
getUnsignedPredicate(Predicate pred)3660 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
3661   switch (pred) {
3662     default: llvm_unreachable("Unknown icmp predicate!");
3663     case ICMP_EQ: case ICMP_NE:
3664     case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3665        return pred;
3666     case ICMP_SGT: return ICMP_UGT;
3667     case ICMP_SLT: return ICMP_ULT;
3668     case ICMP_SGE: return ICMP_UGE;
3669     case ICMP_SLE: return ICMP_ULE;
3670   }
3671 }
3672 
getSwappedPredicate(Predicate pred)3673 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
3674   switch (pred) {
3675     default: llvm_unreachable("Unknown cmp predicate!");
3676     case ICMP_EQ: case ICMP_NE:
3677       return pred;
3678     case ICMP_SGT: return ICMP_SLT;
3679     case ICMP_SLT: return ICMP_SGT;
3680     case ICMP_SGE: return ICMP_SLE;
3681     case ICMP_SLE: return ICMP_SGE;
3682     case ICMP_UGT: return ICMP_ULT;
3683     case ICMP_ULT: return ICMP_UGT;
3684     case ICMP_UGE: return ICMP_ULE;
3685     case ICMP_ULE: return ICMP_UGE;
3686 
3687     case FCMP_FALSE: case FCMP_TRUE:
3688     case FCMP_OEQ: case FCMP_ONE:
3689     case FCMP_UEQ: case FCMP_UNE:
3690     case FCMP_ORD: case FCMP_UNO:
3691       return pred;
3692     case FCMP_OGT: return FCMP_OLT;
3693     case FCMP_OLT: return FCMP_OGT;
3694     case FCMP_OGE: return FCMP_OLE;
3695     case FCMP_OLE: return FCMP_OGE;
3696     case FCMP_UGT: return FCMP_ULT;
3697     case FCMP_ULT: return FCMP_UGT;
3698     case FCMP_UGE: return FCMP_ULE;
3699     case FCMP_ULE: return FCMP_UGE;
3700   }
3701 }
3702 
isNonStrictPredicate(Predicate pred)3703 bool CmpInst::isNonStrictPredicate(Predicate pred) {
3704   switch (pred) {
3705   case ICMP_SGE:
3706   case ICMP_SLE:
3707   case ICMP_UGE:
3708   case ICMP_ULE:
3709   case FCMP_OGE:
3710   case FCMP_OLE:
3711   case FCMP_UGE:
3712   case FCMP_ULE:
3713     return true;
3714   default:
3715     return false;
3716   }
3717 }
3718 
isStrictPredicate(Predicate pred)3719 bool CmpInst::isStrictPredicate(Predicate pred) {
3720   switch (pred) {
3721   case ICMP_SGT:
3722   case ICMP_SLT:
3723   case ICMP_UGT:
3724   case ICMP_ULT:
3725   case FCMP_OGT:
3726   case FCMP_OLT:
3727   case FCMP_UGT:
3728   case FCMP_ULT:
3729     return true;
3730   default:
3731     return false;
3732   }
3733 }
3734 
getStrictPredicate(Predicate pred)3735 CmpInst::Predicate CmpInst::getStrictPredicate(Predicate pred) {
3736   switch (pred) {
3737   case ICMP_SGE:
3738     return ICMP_SGT;
3739   case ICMP_SLE:
3740     return ICMP_SLT;
3741   case ICMP_UGE:
3742     return ICMP_UGT;
3743   case ICMP_ULE:
3744     return ICMP_ULT;
3745   case FCMP_OGE:
3746     return FCMP_OGT;
3747   case FCMP_OLE:
3748     return FCMP_OLT;
3749   case FCMP_UGE:
3750     return FCMP_UGT;
3751   case FCMP_ULE:
3752     return FCMP_ULT;
3753   default:
3754     return pred;
3755   }
3756 }
3757 
getNonStrictPredicate(Predicate pred)3758 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
3759   switch (pred) {
3760   case ICMP_SGT:
3761     return ICMP_SGE;
3762   case ICMP_SLT:
3763     return ICMP_SLE;
3764   case ICMP_UGT:
3765     return ICMP_UGE;
3766   case ICMP_ULT:
3767     return ICMP_ULE;
3768   case FCMP_OGT:
3769     return FCMP_OGE;
3770   case FCMP_OLT:
3771     return FCMP_OLE;
3772   case FCMP_UGT:
3773     return FCMP_UGE;
3774   case FCMP_ULT:
3775     return FCMP_ULE;
3776   default:
3777     return pred;
3778   }
3779 }
3780 
getFlippedStrictnessPredicate(Predicate pred)3781 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
3782   assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3783 
3784   if (isStrictPredicate(pred))
3785     return getNonStrictPredicate(pred);
3786   if (isNonStrictPredicate(pred))
3787     return getStrictPredicate(pred);
3788 
3789   llvm_unreachable("Unknown predicate!");
3790 }
3791 
isUnsigned(Predicate predicate)3792 bool CmpInst::isUnsigned(Predicate predicate) {
3793   switch (predicate) {
3794     default: return false;
3795     case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
3796     case ICmpInst::ICMP_UGE: return true;
3797   }
3798 }
3799 
isSigned(Predicate predicate)3800 bool CmpInst::isSigned(Predicate predicate) {
3801   switch (predicate) {
3802     default: return false;
3803     case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
3804     case ICmpInst::ICMP_SGE: return true;
3805   }
3806 }
3807 
compare(const APInt & LHS,const APInt & RHS,ICmpInst::Predicate Pred)3808 bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3809                        ICmpInst::Predicate Pred) {
3810   assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3811   switch (Pred) {
3812   case ICmpInst::Predicate::ICMP_EQ:
3813     return LHS.eq(RHS);
3814   case ICmpInst::Predicate::ICMP_NE:
3815     return LHS.ne(RHS);
3816   case ICmpInst::Predicate::ICMP_UGT:
3817     return LHS.ugt(RHS);
3818   case ICmpInst::Predicate::ICMP_UGE:
3819     return LHS.uge(RHS);
3820   case ICmpInst::Predicate::ICMP_ULT:
3821     return LHS.ult(RHS);
3822   case ICmpInst::Predicate::ICMP_ULE:
3823     return LHS.ule(RHS);
3824   case ICmpInst::Predicate::ICMP_SGT:
3825     return LHS.sgt(RHS);
3826   case ICmpInst::Predicate::ICMP_SGE:
3827     return LHS.sge(RHS);
3828   case ICmpInst::Predicate::ICMP_SLT:
3829     return LHS.slt(RHS);
3830   case ICmpInst::Predicate::ICMP_SLE:
3831     return LHS.sle(RHS);
3832   default:
3833     llvm_unreachable("Unexpected non-integer predicate.");
3834   };
3835 }
3836 
compare(const APFloat & LHS,const APFloat & RHS,FCmpInst::Predicate Pred)3837 bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3838                        FCmpInst::Predicate Pred) {
3839   APFloat::cmpResult R = LHS.compare(RHS);
3840   switch (Pred) {
3841   default:
3842     llvm_unreachable("Invalid FCmp Predicate");
3843   case FCmpInst::FCMP_FALSE:
3844     return false;
3845   case FCmpInst::FCMP_TRUE:
3846     return true;
3847   case FCmpInst::FCMP_UNO:
3848     return R == APFloat::cmpUnordered;
3849   case FCmpInst::FCMP_ORD:
3850     return R != APFloat::cmpUnordered;
3851   case FCmpInst::FCMP_UEQ:
3852     return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3853   case FCmpInst::FCMP_OEQ:
3854     return R == APFloat::cmpEqual;
3855   case FCmpInst::FCMP_UNE:
3856     return R != APFloat::cmpEqual;
3857   case FCmpInst::FCMP_ONE:
3858     return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;
3859   case FCmpInst::FCMP_ULT:
3860     return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3861   case FCmpInst::FCMP_OLT:
3862     return R == APFloat::cmpLessThan;
3863   case FCmpInst::FCMP_UGT:
3864     return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;
3865   case FCmpInst::FCMP_OGT:
3866     return R == APFloat::cmpGreaterThan;
3867   case FCmpInst::FCMP_ULE:
3868     return R != APFloat::cmpGreaterThan;
3869   case FCmpInst::FCMP_OLE:
3870     return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3871   case FCmpInst::FCMP_UGE:
3872     return R != APFloat::cmpLessThan;
3873   case FCmpInst::FCMP_OGE:
3874     return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3875   }
3876 }
3877 
compare(const KnownBits & LHS,const KnownBits & RHS,ICmpInst::Predicate Pred)3878 std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3879                                       const KnownBits &RHS,
3880                                       ICmpInst::Predicate Pred) {
3881   switch (Pred) {
3882   case ICmpInst::ICMP_EQ:
3883     return KnownBits::eq(LHS, RHS);
3884   case ICmpInst::ICMP_NE:
3885     return KnownBits::ne(LHS, RHS);
3886   case ICmpInst::ICMP_UGE:
3887     return KnownBits::uge(LHS, RHS);
3888   case ICmpInst::ICMP_UGT:
3889     return KnownBits::ugt(LHS, RHS);
3890   case ICmpInst::ICMP_ULE:
3891     return KnownBits::ule(LHS, RHS);
3892   case ICmpInst::ICMP_ULT:
3893     return KnownBits::ult(LHS, RHS);
3894   case ICmpInst::ICMP_SGE:
3895     return KnownBits::sge(LHS, RHS);
3896   case ICmpInst::ICMP_SGT:
3897     return KnownBits::sgt(LHS, RHS);
3898   case ICmpInst::ICMP_SLE:
3899     return KnownBits::sle(LHS, RHS);
3900   case ICmpInst::ICMP_SLT:
3901     return KnownBits::slt(LHS, RHS);
3902   default:
3903     llvm_unreachable("Unexpected non-integer predicate.");
3904   }
3905 }
3906 
getFlippedSignednessPredicate(Predicate pred)3907 CmpInst::Predicate ICmpInst::getFlippedSignednessPredicate(Predicate pred) {
3908   if (CmpInst::isEquality(pred))
3909     return pred;
3910   if (isSigned(pred))
3911     return getUnsignedPredicate(pred);
3912   if (isUnsigned(pred))
3913     return getSignedPredicate(pred);
3914 
3915   llvm_unreachable("Unknown predicate!");
3916 }
3917 
isOrdered(Predicate predicate)3918 bool CmpInst::isOrdered(Predicate predicate) {
3919   switch (predicate) {
3920     default: return false;
3921     case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
3922     case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
3923     case FCmpInst::FCMP_ORD: return true;
3924   }
3925 }
3926 
isUnordered(Predicate predicate)3927 bool CmpInst::isUnordered(Predicate predicate) {
3928   switch (predicate) {
3929     default: return false;
3930     case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
3931     case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
3932     case FCmpInst::FCMP_UNO: return true;
3933   }
3934 }
3935 
isTrueWhenEqual(Predicate predicate)3936 bool CmpInst::isTrueWhenEqual(Predicate predicate) {
3937   switch(predicate) {
3938     default: return false;
3939     case ICMP_EQ:   case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3940     case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3941   }
3942 }
3943 
isFalseWhenEqual(Predicate predicate)3944 bool CmpInst::isFalseWhenEqual(Predicate predicate) {
3945   switch(predicate) {
3946   case ICMP_NE:    case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3947   case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3948   default: return false;
3949   }
3950 }
3951 
isImpliedTrueByMatchingCmp(CmpPredicate Pred1,CmpPredicate Pred2)3952 static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2) {
3953   // If the predicates match, then we know the first condition implies the
3954   // second is true.
3955   if (CmpPredicate::getMatching(Pred1, Pred2))
3956     return true;
3957 
3958   if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3959     Pred1 = ICmpInst::getFlippedSignednessPredicate(Pred1);
3960   else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3961     Pred2 = ICmpInst::getFlippedSignednessPredicate(Pred2);
3962 
3963   switch (Pred1) {
3964   default:
3965     break;
3966   case CmpInst::ICMP_EQ:
3967     // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3968     return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3969            Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3970   case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3971     return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3972   case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3973     return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3974   case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3975     return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3976   case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3977     return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
3978   }
3979   return false;
3980 }
3981 
isImpliedFalseByMatchingCmp(CmpPredicate Pred1,CmpPredicate Pred2)3982 static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1,
3983                                         CmpPredicate Pred2) {
3984   return isImpliedTrueByMatchingCmp(Pred1,
3985                                     ICmpInst::getInverseCmpPredicate(Pred2));
3986 }
3987 
isImpliedByMatchingCmp(CmpPredicate Pred1,CmpPredicate Pred2)3988 std::optional<bool> ICmpInst::isImpliedByMatchingCmp(CmpPredicate Pred1,
3989                                                      CmpPredicate Pred2) {
3990   if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
3991     return true;
3992   if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
3993     return false;
3994   return std::nullopt;
3995 }
3996 
3997 //===----------------------------------------------------------------------===//
3998 //                       CmpPredicate Implementation
3999 //===----------------------------------------------------------------------===//
4000 
getMatching(CmpPredicate A,CmpPredicate B)4001 std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4002                                                       CmpPredicate B) {
4003   if (A.Pred == B.Pred)
4004     return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4005   if (CmpInst::isFPPredicate(A) || CmpInst::isFPPredicate(B))
4006     return {};
4007   if (A.HasSameSign &&
4008       A.Pred == ICmpInst::getFlippedSignednessPredicate(B.Pred))
4009     return B.Pred;
4010   if (B.HasSameSign &&
4011       B.Pred == ICmpInst::getFlippedSignednessPredicate(A.Pred))
4012     return A.Pred;
4013   return {};
4014 }
4015 
getPreferredSignedPredicate() const4016 CmpInst::Predicate CmpPredicate::getPreferredSignedPredicate() const {
4017   return HasSameSign ? ICmpInst::getSignedPredicate(Pred) : Pred;
4018 }
4019 
get(const CmpInst * Cmp)4020 CmpPredicate CmpPredicate::get(const CmpInst *Cmp) {
4021   if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
4022     return ICI->getCmpPredicate();
4023   return Cmp->getPredicate();
4024 }
4025 
getSwapped(CmpPredicate P)4026 CmpPredicate CmpPredicate::getSwapped(CmpPredicate P) {
4027   return {CmpInst::getSwappedPredicate(P), P.hasSameSign()};
4028 }
4029 
getSwapped(const CmpInst * Cmp)4030 CmpPredicate CmpPredicate::getSwapped(const CmpInst *Cmp) {
4031   return getSwapped(get(Cmp));
4032 }
4033 
4034 //===----------------------------------------------------------------------===//
4035 //                        SwitchInst Implementation
4036 //===----------------------------------------------------------------------===//
4037 
init(Value * Value,BasicBlock * Default,unsigned NumReserved)4038 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4039   assert(Value && Default && NumReserved);
4040   ReservedSpace = NumReserved;
4041   setNumHungOffUseOperands(2);
4042   allocHungoffUses(ReservedSpace);
4043 
4044   Op<0>() = Value;
4045   Op<1>() = Default;
4046 }
4047 
4048 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4049 /// switch on and a default destination.  The number of additional cases can
4050 /// be specified here to make memory allocation more efficient.  This
4051 /// constructor can also autoinsert before another instruction.
SwitchInst(Value * Value,BasicBlock * Default,unsigned NumCases,InsertPosition InsertBefore)4052 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4053                        InsertPosition InsertBefore)
4054     : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4055                   AllocMarker, InsertBefore) {
4056   init(Value, Default, 2+NumCases*2);
4057 }
4058 
SwitchInst(const SwitchInst & SI)4059 SwitchInst::SwitchInst(const SwitchInst &SI)
4060     : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4061   init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4062   setNumHungOffUseOperands(SI.getNumOperands());
4063   Use *OL = getOperandList();
4064   const Use *InOL = SI.getOperandList();
4065   for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
4066     OL[i] = InOL[i];
4067     OL[i+1] = InOL[i+1];
4068   }
4069   SubclassOptionalData = SI.SubclassOptionalData;
4070 }
4071 
4072 /// addCase - Add an entry to the switch instruction...
4073 ///
addCase(ConstantInt * OnVal,BasicBlock * Dest)4074 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
4075   unsigned NewCaseIdx = getNumCases();
4076   unsigned OpNo = getNumOperands();
4077   if (OpNo+2 > ReservedSpace)
4078     growOperands();  // Get more space!
4079   // Initialize some new operands.
4080   assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
4081   setNumHungOffUseOperands(OpNo+2);
4082   CaseHandle Case(this, NewCaseIdx);
4083   Case.setValue(OnVal);
4084   Case.setSuccessor(Dest);
4085 }
4086 
4087 /// removeCase - This method removes the specified case and its successor
4088 /// from the switch instruction.
removeCase(CaseIt I)4089 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
4090   unsigned idx = I->getCaseIndex();
4091 
4092   assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
4093 
4094   unsigned NumOps = getNumOperands();
4095   Use *OL = getOperandList();
4096 
4097   // Overwrite this case with the end of the list.
4098   if (2 + (idx + 1) * 2 != NumOps) {
4099     OL[2 + idx * 2] = OL[NumOps - 2];
4100     OL[2 + idx * 2 + 1] = OL[NumOps - 1];
4101   }
4102 
4103   // Nuke the last value.
4104   OL[NumOps-2].set(nullptr);
4105   OL[NumOps-2+1].set(nullptr);
4106   setNumHungOffUseOperands(NumOps-2);
4107 
4108   return CaseIt(this, idx);
4109 }
4110 
4111 /// growOperands - grow operands - This grows the operand list in response
4112 /// to a push_back style of operation.  This grows the number of ops by 3 times.
4113 ///
growOperands()4114 void SwitchInst::growOperands() {
4115   unsigned e = getNumOperands();
4116   unsigned NumOps = e*3;
4117 
4118   ReservedSpace = NumOps;
4119   growHungoffUses(ReservedSpace);
4120 }
4121 
buildProfBranchWeightsMD()4122 MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
4123   assert(Changed && "called only if metadata has changed");
4124 
4125   if (!Weights)
4126     return nullptr;
4127 
4128   assert(SI.getNumSuccessors() == Weights->size() &&
4129          "num of prof branch_weights must accord with num of successors");
4130 
4131   bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
4132 
4133   if (AllZeroes || Weights->size() < 2)
4134     return nullptr;
4135 
4136   return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
4137 }
4138 
init()4139 void SwitchInstProfUpdateWrapper::init() {
4140   MDNode *ProfileData = getBranchWeightMDNode(SI);
4141   if (!ProfileData)
4142     return;
4143 
4144   if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4145     llvm_unreachable("number of prof branch_weights metadata operands does "
4146                      "not correspond to number of succesors");
4147   }
4148 
4149   SmallVector<uint32_t, 8> Weights;
4150   if (!extractBranchWeights(ProfileData, Weights))
4151     return;
4152   this->Weights = std::move(Weights);
4153 }
4154 
4155 SwitchInst::CaseIt
removeCase(SwitchInst::CaseIt I)4156 SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
4157   if (Weights) {
4158     assert(SI.getNumSuccessors() == Weights->size() &&
4159            "num of prof branch_weights must accord with num of successors");
4160     Changed = true;
4161     // Copy the last case to the place of the removed one and shrink.
4162     // This is tightly coupled with the way SwitchInst::removeCase() removes
4163     // the cases in SwitchInst::removeCase(CaseIt).
4164     (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4165     Weights->pop_back();
4166   }
4167   return SI.removeCase(I);
4168 }
4169 
addCase(ConstantInt * OnVal,BasicBlock * Dest,SwitchInstProfUpdateWrapper::CaseWeightOpt W)4170 void SwitchInstProfUpdateWrapper::addCase(
4171     ConstantInt *OnVal, BasicBlock *Dest,
4172     SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4173   SI.addCase(OnVal, Dest);
4174 
4175   if (!Weights && W && *W) {
4176     Changed = true;
4177     Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4178     (*Weights)[SI.getNumSuccessors() - 1] = *W;
4179   } else if (Weights) {
4180     Changed = true;
4181     Weights->push_back(W.value_or(0));
4182   }
4183   if (Weights)
4184     assert(SI.getNumSuccessors() == Weights->size() &&
4185            "num of prof branch_weights must accord with num of successors");
4186 }
4187 
4188 Instruction::InstListType::iterator
eraseFromParent()4189 SwitchInstProfUpdateWrapper::eraseFromParent() {
4190   // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4191   Changed = false;
4192   if (Weights)
4193     Weights->resize(0);
4194   return SI.eraseFromParent();
4195 }
4196 
4197 SwitchInstProfUpdateWrapper::CaseWeightOpt
getSuccessorWeight(unsigned idx)4198 SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
4199   if (!Weights)
4200     return std::nullopt;
4201   return (*Weights)[idx];
4202 }
4203 
setSuccessorWeight(unsigned idx,SwitchInstProfUpdateWrapper::CaseWeightOpt W)4204 void SwitchInstProfUpdateWrapper::setSuccessorWeight(
4205     unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4206   if (!W)
4207     return;
4208 
4209   if (!Weights && *W)
4210     Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4211 
4212   if (Weights) {
4213     auto &OldW = (*Weights)[idx];
4214     if (*W != OldW) {
4215       Changed = true;
4216       OldW = *W;
4217     }
4218   }
4219 }
4220 
4221 SwitchInstProfUpdateWrapper::CaseWeightOpt
getSuccessorWeight(const SwitchInst & SI,unsigned idx)4222 SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,
4223                                                 unsigned idx) {
4224   if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4225     if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4226       return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4227           ->getValue()
4228           .getZExtValue();
4229 
4230   return std::nullopt;
4231 }
4232 
4233 //===----------------------------------------------------------------------===//
4234 //                        IndirectBrInst Implementation
4235 //===----------------------------------------------------------------------===//
4236 
init(Value * Address,unsigned NumDests)4237 void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4238   assert(Address && Address->getType()->isPointerTy() &&
4239          "Address of indirectbr must be a pointer");
4240   ReservedSpace = 1+NumDests;
4241   setNumHungOffUseOperands(1);
4242   allocHungoffUses(ReservedSpace);
4243 
4244   Op<0>() = Address;
4245 }
4246 
4247 
4248 /// growOperands - grow operands - This grows the operand list in response
4249 /// to a push_back style of operation.  This grows the number of ops by 2 times.
4250 ///
growOperands()4251 void IndirectBrInst::growOperands() {
4252   unsigned e = getNumOperands();
4253   unsigned NumOps = e*2;
4254 
4255   ReservedSpace = NumOps;
4256   growHungoffUses(ReservedSpace);
4257 }
4258 
IndirectBrInst(Value * Address,unsigned NumCases,InsertPosition InsertBefore)4259 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4260                                InsertPosition InsertBefore)
4261     : Instruction(Type::getVoidTy(Address->getContext()),
4262                   Instruction::IndirectBr, AllocMarker, InsertBefore) {
4263   init(Address, NumCases);
4264 }
4265 
IndirectBrInst(const IndirectBrInst & IBI)4266 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4267     : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4268                   AllocMarker) {
4269   NumUserOperands = IBI.NumUserOperands;
4270   allocHungoffUses(IBI.getNumOperands());
4271   Use *OL = getOperandList();
4272   const Use *InOL = IBI.getOperandList();
4273   for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4274     OL[i] = InOL[i];
4275   SubclassOptionalData = IBI.SubclassOptionalData;
4276 }
4277 
4278 /// addDestination - Add a destination.
4279 ///
addDestination(BasicBlock * DestBB)4280 void IndirectBrInst::addDestination(BasicBlock *DestBB) {
4281   unsigned OpNo = getNumOperands();
4282   if (OpNo+1 > ReservedSpace)
4283     growOperands();  // Get more space!
4284   // Initialize some new operands.
4285   assert(OpNo < ReservedSpace && "Growing didn't work!");
4286   setNumHungOffUseOperands(OpNo+1);
4287   getOperandList()[OpNo] = DestBB;
4288 }
4289 
4290 /// removeDestination - This method removes the specified successor from the
4291 /// indirectbr instruction.
removeDestination(unsigned idx)4292 void IndirectBrInst::removeDestination(unsigned idx) {
4293   assert(idx < getNumOperands()-1 && "Successor index out of range!");
4294 
4295   unsigned NumOps = getNumOperands();
4296   Use *OL = getOperandList();
4297 
4298   // Replace this value with the last one.
4299   OL[idx+1] = OL[NumOps-1];
4300 
4301   // Nuke the last value.
4302   OL[NumOps-1].set(nullptr);
4303   setNumHungOffUseOperands(NumOps-1);
4304 }
4305 
4306 //===----------------------------------------------------------------------===//
4307 //                            FreezeInst Implementation
4308 //===----------------------------------------------------------------------===//
4309 
FreezeInst(Value * S,const Twine & Name,InsertPosition InsertBefore)4310 FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4311     : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4312   setName(Name);
4313 }
4314 
4315 //===----------------------------------------------------------------------===//
4316 //                           cloneImpl() implementations
4317 //===----------------------------------------------------------------------===//
4318 
4319 // Define these methods here so vtables don't get emitted into every translation
4320 // unit that uses these classes.
4321 
cloneImpl() const4322 GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4323   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4324   return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4325 }
4326 
cloneImpl() const4327 UnaryOperator *UnaryOperator::cloneImpl() const {
4328   return Create(getOpcode(), Op<0>());
4329 }
4330 
cloneImpl() const4331 BinaryOperator *BinaryOperator::cloneImpl() const {
4332   return Create(getOpcode(), Op<0>(), Op<1>());
4333 }
4334 
cloneImpl() const4335 FCmpInst *FCmpInst::cloneImpl() const {
4336   return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4337 }
4338 
cloneImpl() const4339 ICmpInst *ICmpInst::cloneImpl() const {
4340   return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4341 }
4342 
cloneImpl() const4343 ExtractValueInst *ExtractValueInst::cloneImpl() const {
4344   return new ExtractValueInst(*this);
4345 }
4346 
cloneImpl() const4347 InsertValueInst *InsertValueInst::cloneImpl() const {
4348   return new InsertValueInst(*this);
4349 }
4350 
cloneImpl() const4351 AllocaInst *AllocaInst::cloneImpl() const {
4352   AllocaInst *Result = new AllocaInst(getAllocatedType(), getAddressSpace(),
4353                                       getOperand(0), getAlign());
4354   Result->setUsedWithInAlloca(isUsedWithInAlloca());
4355   Result->setSwiftError(isSwiftError());
4356   return Result;
4357 }
4358 
cloneImpl() const4359 LoadInst *LoadInst::cloneImpl() const {
4360   return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4361                       getAlign(), getOrdering(), getSyncScopeID());
4362 }
4363 
cloneImpl() const4364 StoreInst *StoreInst::cloneImpl() const {
4365   return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
4366                        getOrdering(), getSyncScopeID());
4367 }
4368 
cloneImpl() const4369 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
4370   AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
4371       getOperand(0), getOperand(1), getOperand(2), getAlign(),
4372       getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
4373   Result->setVolatile(isVolatile());
4374   Result->setWeak(isWeak());
4375   return Result;
4376 }
4377 
cloneImpl() const4378 AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
4379   AtomicRMWInst *Result =
4380       new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
4381                         getAlign(), getOrdering(), getSyncScopeID());
4382   Result->setVolatile(isVolatile());
4383   return Result;
4384 }
4385 
cloneImpl() const4386 FenceInst *FenceInst::cloneImpl() const {
4387   return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4388 }
4389 
cloneImpl() const4390 TruncInst *TruncInst::cloneImpl() const {
4391   return new TruncInst(getOperand(0), getType());
4392 }
4393 
cloneImpl() const4394 ZExtInst *ZExtInst::cloneImpl() const {
4395   return new ZExtInst(getOperand(0), getType());
4396 }
4397 
cloneImpl() const4398 SExtInst *SExtInst::cloneImpl() const {
4399   return new SExtInst(getOperand(0), getType());
4400 }
4401 
cloneImpl() const4402 FPTruncInst *FPTruncInst::cloneImpl() const {
4403   return new FPTruncInst(getOperand(0), getType());
4404 }
4405 
cloneImpl() const4406 FPExtInst *FPExtInst::cloneImpl() const {
4407   return new FPExtInst(getOperand(0), getType());
4408 }
4409 
cloneImpl() const4410 UIToFPInst *UIToFPInst::cloneImpl() const {
4411   return new UIToFPInst(getOperand(0), getType());
4412 }
4413 
cloneImpl() const4414 SIToFPInst *SIToFPInst::cloneImpl() const {
4415   return new SIToFPInst(getOperand(0), getType());
4416 }
4417 
cloneImpl() const4418 FPToUIInst *FPToUIInst::cloneImpl() const {
4419   return new FPToUIInst(getOperand(0), getType());
4420 }
4421 
cloneImpl() const4422 FPToSIInst *FPToSIInst::cloneImpl() const {
4423   return new FPToSIInst(getOperand(0), getType());
4424 }
4425 
cloneImpl() const4426 PtrToIntInst *PtrToIntInst::cloneImpl() const {
4427   return new PtrToIntInst(getOperand(0), getType());
4428 }
4429 
cloneImpl() const4430 IntToPtrInst *IntToPtrInst::cloneImpl() const {
4431   return new IntToPtrInst(getOperand(0), getType());
4432 }
4433 
cloneImpl() const4434 BitCastInst *BitCastInst::cloneImpl() const {
4435   return new BitCastInst(getOperand(0), getType());
4436 }
4437 
cloneImpl() const4438 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
4439   return new AddrSpaceCastInst(getOperand(0), getType());
4440 }
4441 
cloneImpl() const4442 CallInst *CallInst::cloneImpl() const {
4443   if (hasOperandBundles()) {
4444     IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
4445         getNumOperands(),
4446         getNumOperandBundles() * unsigned(sizeof(BundleOpInfo))};
4447     return new (AllocMarker) CallInst(*this, AllocMarker);
4448   }
4449   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4450   return new (AllocMarker) CallInst(*this, AllocMarker);
4451 }
4452 
cloneImpl() const4453 SelectInst *SelectInst::cloneImpl() const {
4454   return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
4455 }
4456 
cloneImpl() const4457 VAArgInst *VAArgInst::cloneImpl() const {
4458   return new VAArgInst(getOperand(0), getType());
4459 }
4460 
cloneImpl() const4461 ExtractElementInst *ExtractElementInst::cloneImpl() const {
4462   return ExtractElementInst::Create(getOperand(0), getOperand(1));
4463 }
4464 
cloneImpl() const4465 InsertElementInst *InsertElementInst::cloneImpl() const {
4466   return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
4467 }
4468 
cloneImpl() const4469 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
4470   return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask());
4471 }
4472 
cloneImpl() const4473 PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4474 
cloneImpl() const4475 LandingPadInst *LandingPadInst::cloneImpl() const {
4476   return new LandingPadInst(*this);
4477 }
4478 
cloneImpl() const4479 ReturnInst *ReturnInst::cloneImpl() const {
4480   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4481   return new (AllocMarker) ReturnInst(*this, AllocMarker);
4482 }
4483 
cloneImpl() const4484 BranchInst *BranchInst::cloneImpl() const {
4485   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4486   return new (AllocMarker) BranchInst(*this, AllocMarker);
4487 }
4488 
cloneImpl() const4489 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4490 
cloneImpl() const4491 IndirectBrInst *IndirectBrInst::cloneImpl() const {
4492   return new IndirectBrInst(*this);
4493 }
4494 
cloneImpl() const4495 InvokeInst *InvokeInst::cloneImpl() const {
4496   if (hasOperandBundles()) {
4497     IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
4498         getNumOperands(),
4499         getNumOperandBundles() * unsigned(sizeof(BundleOpInfo))};
4500     return new (AllocMarker) InvokeInst(*this, AllocMarker);
4501   }
4502   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4503   return new (AllocMarker) InvokeInst(*this, AllocMarker);
4504 }
4505 
cloneImpl() const4506 CallBrInst *CallBrInst::cloneImpl() const {
4507   if (hasOperandBundles()) {
4508     IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
4509         getNumOperands(),
4510         getNumOperandBundles() * unsigned(sizeof(BundleOpInfo))};
4511     return new (AllocMarker) CallBrInst(*this, AllocMarker);
4512   }
4513   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4514   return new (AllocMarker) CallBrInst(*this, AllocMarker);
4515 }
4516 
cloneImpl() const4517 ResumeInst *ResumeInst::cloneImpl() const {
4518   return new (AllocMarker) ResumeInst(*this);
4519 }
4520 
cloneImpl() const4521 CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4522   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4523   return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4524 }
4525 
cloneImpl() const4526 CatchReturnInst *CatchReturnInst::cloneImpl() const {
4527   return new (AllocMarker) CatchReturnInst(*this);
4528 }
4529 
cloneImpl() const4530 CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4531   return new CatchSwitchInst(*this);
4532 }
4533 
cloneImpl() const4534 FuncletPadInst *FuncletPadInst::cloneImpl() const {
4535   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4536   return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4537 }
4538 
cloneImpl() const4539 UnreachableInst *UnreachableInst::cloneImpl() const {
4540   LLVMContext &Context = getContext();
4541   return new UnreachableInst(Context);
4542 }
4543 
shouldLowerToTrap(bool TrapUnreachable,bool NoTrapAfterNoreturn) const4544 bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4545                                         bool NoTrapAfterNoreturn) const {
4546   if (!TrapUnreachable)
4547     return false;
4548 
4549   // We may be able to ignore unreachable behind a noreturn call.
4550   if (const CallInst *Call = dyn_cast_or_null<CallInst>(getPrevNode());
4551       Call && Call->doesNotReturn()) {
4552     if (NoTrapAfterNoreturn)
4553       return false;
4554     // Do not emit an additional trap instruction.
4555     if (Call->isNonContinuableTrap())
4556       return false;
4557   }
4558 
4559   if (getFunction()->hasFnAttribute(Attribute::Naked))
4560     return false;
4561 
4562   return true;
4563 }
4564 
cloneImpl() const4565 FreezeInst *FreezeInst::cloneImpl() const {
4566   return new FreezeInst(getOperand(0));
4567 }
4568