1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements all of the non-inline methods for the LLVM instruction
10 // classes.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/IR/Instructions.h"
15 #include "LLVMContextImpl.h"
16 #include "llvm/ADT/SmallBitVector.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/ConstantRange.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/IR/MDBuilder.h"
32 #include "llvm/IR/Metadata.h"
33 #include "llvm/IR/Module.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/IR/ProfDataUtils.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/Support/AtomicOrdering.h"
39 #include "llvm/Support/Casting.h"
40 #include "llvm/Support/CheckedArithmetic.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Support/MathExtras.h"
43 #include "llvm/Support/ModRef.h"
44 #include "llvm/Support/TypeSize.h"
45 #include <algorithm>
46 #include <cassert>
47 #include <cstdint>
48 #include <optional>
49 #include <vector>
50
51 using namespace llvm;
52
53 static cl::opt<bool> DisableI2pP2iOpt(
54 "disable-i2p-p2i-opt", cl::init(false),
55 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
56
57 //===----------------------------------------------------------------------===//
58 // AllocaInst Class
59 //===----------------------------------------------------------------------===//
60
61 std::optional<TypeSize>
getAllocationSize(const DataLayout & DL) const62 AllocaInst::getAllocationSize(const DataLayout &DL) const {
63 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
64 if (isArrayAllocation()) {
65 auto *C = dyn_cast<ConstantInt>(getArraySize());
66 if (!C)
67 return std::nullopt;
68 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
69 auto CheckedProd =
70 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
71 if (!CheckedProd)
72 return std::nullopt;
73 return TypeSize::getFixed(*CheckedProd);
74 }
75 return Size;
76 }
77
78 std::optional<TypeSize>
getAllocationSizeInBits(const DataLayout & DL) const79 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
80 std::optional<TypeSize> Size = getAllocationSize(DL);
81 if (!Size)
82 return std::nullopt;
83 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
84 static_cast<TypeSize::ScalarTy>(8));
85 if (!CheckedProd)
86 return std::nullopt;
87 return TypeSize::get(*CheckedProd, Size->isScalable());
88 }
89
90 //===----------------------------------------------------------------------===//
91 // SelectInst Class
92 //===----------------------------------------------------------------------===//
93
94 /// areInvalidOperands - Return a string if the specified operands are invalid
95 /// for a select operation, otherwise return null.
areInvalidOperands(Value * Op0,Value * Op1,Value * Op2)96 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
97 if (Op1->getType() != Op2->getType())
98 return "both values to select must have same type";
99
100 if (Op1->getType()->isTokenTy())
101 return "select values cannot have token type";
102
103 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
104 // Vector select.
105 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
106 return "vector select condition element type must be i1";
107 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
108 if (!ET)
109 return "selected values for vector select must be vectors";
110 if (ET->getElementCount() != VT->getElementCount())
111 return "vector select requires selected vectors to have "
112 "the same vector length as select condition";
113 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
114 return "select condition must be i1 or <n x i1>";
115 }
116 return nullptr;
117 }
118
119 //===----------------------------------------------------------------------===//
120 // PHINode Class
121 //===----------------------------------------------------------------------===//
122
PHINode(const PHINode & PN)123 PHINode::PHINode(const PHINode &PN)
124 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
125 ReservedSpace(PN.getNumOperands()) {
126 allocHungoffUses(PN.getNumOperands());
127 std::copy(PN.op_begin(), PN.op_end(), op_begin());
128 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
129 SubclassOptionalData = PN.SubclassOptionalData;
130 }
131
132 // removeIncomingValue - Remove an incoming value. This is useful if a
133 // predecessor basic block is deleted.
removeIncomingValue(unsigned Idx,bool DeletePHIIfEmpty)134 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
135 Value *Removed = getIncomingValue(Idx);
136
137 // Move everything after this operand down.
138 //
139 // FIXME: we could just swap with the end of the list, then erase. However,
140 // clients might not expect this to happen. The code as it is thrashes the
141 // use/def lists, which is kinda lame.
142 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
143 copyIncomingBlocks(drop_begin(blocks(), Idx + 1), Idx);
144
145 // Nuke the last value.
146 Op<-1>().set(nullptr);
147 setNumHungOffUseOperands(getNumOperands() - 1);
148
149 // If the PHI node is dead, because it has zero entries, nuke it now.
150 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
151 // If anyone is using this PHI, make them use a dummy value instead...
152 replaceAllUsesWith(PoisonValue::get(getType()));
153 eraseFromParent();
154 }
155 return Removed;
156 }
157
removeIncomingValueIf(function_ref<bool (unsigned)> Predicate,bool DeletePHIIfEmpty)158 void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
159 bool DeletePHIIfEmpty) {
160 SmallDenseSet<unsigned> RemoveIndices;
161 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
162 if (Predicate(Idx))
163 RemoveIndices.insert(Idx);
164
165 if (RemoveIndices.empty())
166 return;
167
168 // Remove operands.
169 auto NewOpEnd = remove_if(operands(), [&](Use &U) {
170 return RemoveIndices.contains(U.getOperandNo());
171 });
172 for (Use &U : make_range(NewOpEnd, op_end()))
173 U.set(nullptr);
174
175 // Remove incoming blocks.
176 (void)std::remove_if(const_cast<block_iterator>(block_begin()),
177 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
178 return RemoveIndices.contains(&BB - block_begin());
179 });
180
181 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
182
183 // If the PHI node is dead, because it has zero entries, nuke it now.
184 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
185 // If anyone is using this PHI, make them use a dummy value instead...
186 replaceAllUsesWith(PoisonValue::get(getType()));
187 eraseFromParent();
188 }
189 }
190
191 /// growOperands - grow operands - This grows the operand list in response
192 /// to a push_back style of operation. This grows the number of ops by 1.5
193 /// times.
194 ///
growOperands()195 void PHINode::growOperands() {
196 unsigned e = getNumOperands();
197 unsigned NumOps = e + e / 2;
198 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
199
200 ReservedSpace = NumOps;
201 growHungoffUses(ReservedSpace, /* IsPhi */ true);
202 }
203
204 /// hasConstantValue - If the specified PHI node always merges together the same
205 /// value, return the value, otherwise return null.
hasConstantValue() const206 Value *PHINode::hasConstantValue() const {
207 // Exploit the fact that phi nodes always have at least one entry.
208 Value *ConstantValue = getIncomingValue(0);
209 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
210 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
211 if (ConstantValue != this)
212 return nullptr; // Incoming values not all the same.
213 // The case where the first value is this PHI.
214 ConstantValue = getIncomingValue(i);
215 }
216 if (ConstantValue == this)
217 return PoisonValue::get(getType());
218 return ConstantValue;
219 }
220
221 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
222 /// together the same value, assuming that undefs result in the same value as
223 /// non-undefs.
224 /// Unlike \ref hasConstantValue, this does not return a value because the
225 /// unique non-undef incoming value need not dominate the PHI node.
hasConstantOrUndefValue() const226 bool PHINode::hasConstantOrUndefValue() const {
227 Value *ConstantValue = nullptr;
228 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
229 Value *Incoming = getIncomingValue(i);
230 if (Incoming != this && !isa<UndefValue>(Incoming)) {
231 if (ConstantValue && ConstantValue != Incoming)
232 return false;
233 ConstantValue = Incoming;
234 }
235 }
236 return true;
237 }
238
239 //===----------------------------------------------------------------------===//
240 // LandingPadInst Implementation
241 //===----------------------------------------------------------------------===//
242
LandingPadInst(Type * RetTy,unsigned NumReservedValues,const Twine & NameStr,InsertPosition InsertBefore)243 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
244 const Twine &NameStr,
245 InsertPosition InsertBefore)
246 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
247 init(NumReservedValues, NameStr);
248 }
249
LandingPadInst(const LandingPadInst & LP)250 LandingPadInst::LandingPadInst(const LandingPadInst &LP)
251 : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
252 LP.getNumOperands()),
253 ReservedSpace(LP.getNumOperands()) {
254 allocHungoffUses(LP.getNumOperands());
255 Use *OL = getOperandList();
256 const Use *InOL = LP.getOperandList();
257 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
258 OL[I] = InOL[I];
259
260 setCleanup(LP.isCleanup());
261 }
262
Create(Type * RetTy,unsigned NumReservedClauses,const Twine & NameStr,InsertPosition InsertBefore)263 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
264 const Twine &NameStr,
265 InsertPosition InsertBefore) {
266 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
267 }
268
init(unsigned NumReservedValues,const Twine & NameStr)269 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
270 ReservedSpace = NumReservedValues;
271 setNumHungOffUseOperands(0);
272 allocHungoffUses(ReservedSpace);
273 setName(NameStr);
274 setCleanup(false);
275 }
276
277 /// growOperands - grow operands - This grows the operand list in response to a
278 /// push_back style of operation. This grows the number of ops by 2 times.
growOperands(unsigned Size)279 void LandingPadInst::growOperands(unsigned Size) {
280 unsigned e = getNumOperands();
281 if (ReservedSpace >= e + Size) return;
282 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
283 growHungoffUses(ReservedSpace);
284 }
285
addClause(Constant * Val)286 void LandingPadInst::addClause(Constant *Val) {
287 unsigned OpNo = getNumOperands();
288 growOperands(1);
289 assert(OpNo < ReservedSpace && "Growing didn't work!");
290 setNumHungOffUseOperands(getNumOperands() + 1);
291 getOperandList()[OpNo] = Val;
292 }
293
294 //===----------------------------------------------------------------------===//
295 // CallBase Implementation
296 //===----------------------------------------------------------------------===//
297
Create(CallBase * CB,ArrayRef<OperandBundleDef> Bundles,InsertPosition InsertPt)298 CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
299 InsertPosition InsertPt) {
300 switch (CB->getOpcode()) {
301 case Instruction::Call:
302 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
303 case Instruction::Invoke:
304 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
305 case Instruction::CallBr:
306 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
307 default:
308 llvm_unreachable("Unknown CallBase sub-class!");
309 }
310 }
311
Create(CallBase * CI,OperandBundleDef OpB,InsertPosition InsertPt)312 CallBase *CallBase::Create(CallBase *CI, OperandBundleDef OpB,
313 InsertPosition InsertPt) {
314 SmallVector<OperandBundleDef, 2> OpDefs;
315 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
316 auto ChildOB = CI->getOperandBundleAt(i);
317 if (ChildOB.getTagName() != OpB.getTag())
318 OpDefs.emplace_back(ChildOB);
319 }
320 OpDefs.emplace_back(OpB);
321 return CallBase::Create(CI, OpDefs, InsertPt);
322 }
323
getCaller()324 Function *CallBase::getCaller() { return getParent()->getParent(); }
325
getNumSubclassExtraOperandsDynamic() const326 unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
327 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
328 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
329 }
330
isIndirectCall() const331 bool CallBase::isIndirectCall() const {
332 const Value *V = getCalledOperand();
333 if (isa<Function>(V) || isa<Constant>(V))
334 return false;
335 return !isInlineAsm();
336 }
337
338 /// Tests if this call site must be tail call optimized. Only a CallInst can
339 /// be tail call optimized.
isMustTailCall() const340 bool CallBase::isMustTailCall() const {
341 if (auto *CI = dyn_cast<CallInst>(this))
342 return CI->isMustTailCall();
343 return false;
344 }
345
346 /// Tests if this call site is marked as a tail call.
isTailCall() const347 bool CallBase::isTailCall() const {
348 if (auto *CI = dyn_cast<CallInst>(this))
349 return CI->isTailCall();
350 return false;
351 }
352
getIntrinsicID() const353 Intrinsic::ID CallBase::getIntrinsicID() const {
354 if (auto *F = getCalledFunction())
355 return F->getIntrinsicID();
356 return Intrinsic::not_intrinsic;
357 }
358
getRetNoFPClass() const359 FPClassTest CallBase::getRetNoFPClass() const {
360 FPClassTest Mask = Attrs.getRetNoFPClass();
361
362 if (const Function *F = getCalledFunction())
363 Mask |= F->getAttributes().getRetNoFPClass();
364 return Mask;
365 }
366
getParamNoFPClass(unsigned i) const367 FPClassTest CallBase::getParamNoFPClass(unsigned i) const {
368 FPClassTest Mask = Attrs.getParamNoFPClass(i);
369
370 if (const Function *F = getCalledFunction())
371 Mask |= F->getAttributes().getParamNoFPClass(i);
372 return Mask;
373 }
374
getRange() const375 std::optional<ConstantRange> CallBase::getRange() const {
376 const Attribute RangeAttr = getRetAttr(llvm::Attribute::Range);
377 if (RangeAttr.isValid())
378 return RangeAttr.getRange();
379 return std::nullopt;
380 }
381
isReturnNonNull() const382 bool CallBase::isReturnNonNull() const {
383 if (hasRetAttr(Attribute::NonNull))
384 return true;
385
386 if (getRetDereferenceableBytes() > 0 &&
387 !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
388 return true;
389
390 return false;
391 }
392
getArgOperandWithAttribute(Attribute::AttrKind Kind) const393 Value *CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind) const {
394 unsigned Index;
395
396 if (Attrs.hasAttrSomewhere(Kind, &Index))
397 return getArgOperand(Index - AttributeList::FirstArgIndex);
398 if (const Function *F = getCalledFunction())
399 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
400 return getArgOperand(Index - AttributeList::FirstArgIndex);
401
402 return nullptr;
403 }
404
405 /// Determine whether the argument or parameter has the given attribute.
paramHasAttr(unsigned ArgNo,Attribute::AttrKind Kind) const406 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
407 assert(ArgNo < arg_size() && "Param index out of bounds!");
408
409 if (Attrs.hasParamAttr(ArgNo, Kind))
410 return true;
411
412 const Function *F = getCalledFunction();
413 if (!F)
414 return false;
415
416 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
417 return false;
418
419 // Take into account mod/ref by operand bundles.
420 switch (Kind) {
421 case Attribute::ReadNone:
422 return !hasReadingOperandBundles() && !hasClobberingOperandBundles();
423 case Attribute::ReadOnly:
424 return !hasClobberingOperandBundles();
425 case Attribute::WriteOnly:
426 return !hasReadingOperandBundles();
427 default:
428 return true;
429 }
430 }
431
hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const432 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
433 if (auto *F = dyn_cast<Function>(getCalledOperand()))
434 return F->getAttributes().hasFnAttr(Kind);
435
436 return false;
437 }
438
hasFnAttrOnCalledFunction(StringRef Kind) const439 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
440 if (auto *F = dyn_cast<Function>(getCalledOperand()))
441 return F->getAttributes().hasFnAttr(Kind);
442
443 return false;
444 }
445
446 template <typename AK>
getFnAttrOnCalledFunction(AK Kind) const447 Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
448 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
449 // getMemoryEffects() correctly combines memory effects from the call-site,
450 // operand bundles and function.
451 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
452 }
453
454 if (auto *F = dyn_cast<Function>(getCalledOperand()))
455 return F->getAttributes().getFnAttr(Kind);
456
457 return Attribute();
458 }
459
460 template Attribute
461 CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
462 template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
463
464 template <typename AK>
getParamAttrOnCalledFunction(unsigned ArgNo,AK Kind) const465 Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
466 AK Kind) const {
467 Value *V = getCalledOperand();
468
469 if (auto *F = dyn_cast<Function>(V))
470 return F->getAttributes().getParamAttr(ArgNo, Kind);
471
472 return Attribute();
473 }
474 template Attribute
475 CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
476 Attribute::AttrKind Kind) const;
477 template Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
478 StringRef Kind) const;
479
getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> & Defs) const480 void CallBase::getOperandBundlesAsDefs(
481 SmallVectorImpl<OperandBundleDef> &Defs) const {
482 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
483 Defs.emplace_back(getOperandBundleAt(i));
484 }
485
486 CallBase::op_iterator
populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,const unsigned BeginIndex)487 CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
488 const unsigned BeginIndex) {
489 auto It = op_begin() + BeginIndex;
490 for (auto &B : Bundles)
491 It = std::copy(B.input_begin(), B.input_end(), It);
492
493 auto *ContextImpl = getContext().pImpl;
494 auto BI = Bundles.begin();
495 unsigned CurrentIndex = BeginIndex;
496
497 for (auto &BOI : bundle_op_infos()) {
498 assert(BI != Bundles.end() && "Incorrect allocation?");
499
500 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
501 BOI.Begin = CurrentIndex;
502 BOI.End = CurrentIndex + BI->input_size();
503 CurrentIndex = BOI.End;
504 BI++;
505 }
506
507 assert(BI == Bundles.end() && "Incorrect allocation?");
508
509 return It;
510 }
511
getBundleOpInfoForOperand(unsigned OpIdx)512 CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
513 /// When there isn't many bundles, we do a simple linear search.
514 /// Else fallback to a binary-search that use the fact that bundles usually
515 /// have similar number of argument to get faster convergence.
516 if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
517 for (auto &BOI : bundle_op_infos())
518 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
519 return BOI;
520
521 llvm_unreachable("Did not find operand bundle for operand!");
522 }
523
524 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
525 assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
526 OpIdx < std::prev(bundle_op_info_end())->End &&
527 "The Idx isn't in the operand bundle");
528
529 /// We need a decimal number below and to prevent using floating point numbers
530 /// we use an intergal value multiplied by this constant.
531 constexpr unsigned NumberScaling = 1024;
532
533 bundle_op_iterator Begin = bundle_op_info_begin();
534 bundle_op_iterator End = bundle_op_info_end();
535 bundle_op_iterator Current = Begin;
536
537 while (Begin != End) {
538 unsigned ScaledOperandPerBundle =
539 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
540 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
541 ScaledOperandPerBundle);
542 if (Current >= End)
543 Current = std::prev(End);
544 assert(Current < End && Current >= Begin &&
545 "the operand bundle doesn't cover every value in the range");
546 if (OpIdx >= Current->Begin && OpIdx < Current->End)
547 break;
548 if (OpIdx >= Current->End)
549 Begin = Current + 1;
550 else
551 End = Current;
552 }
553
554 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
555 "the operand bundle doesn't cover every value in the range");
556 return *Current;
557 }
558
addOperandBundle(CallBase * CB,uint32_t ID,OperandBundleDef OB,InsertPosition InsertPt)559 CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
560 OperandBundleDef OB,
561 InsertPosition InsertPt) {
562 if (CB->getOperandBundle(ID))
563 return CB;
564
565 SmallVector<OperandBundleDef, 1> Bundles;
566 CB->getOperandBundlesAsDefs(Bundles);
567 Bundles.push_back(OB);
568 return Create(CB, Bundles, InsertPt);
569 }
570
removeOperandBundle(CallBase * CB,uint32_t ID,InsertPosition InsertPt)571 CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
572 InsertPosition InsertPt) {
573 SmallVector<OperandBundleDef, 1> Bundles;
574 bool CreateNew = false;
575
576 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
577 auto Bundle = CB->getOperandBundleAt(I);
578 if (Bundle.getTagID() == ID) {
579 CreateNew = true;
580 continue;
581 }
582 Bundles.emplace_back(Bundle);
583 }
584
585 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
586 }
587
hasReadingOperandBundles() const588 bool CallBase::hasReadingOperandBundles() const {
589 // Implementation note: this is a conservative implementation of operand
590 // bundle semantics, where *any* non-assume operand bundle (other than
591 // ptrauth) forces a callsite to be at least readonly.
592 return hasOperandBundlesOtherThan(
593 {LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
594 getIntrinsicID() != Intrinsic::assume;
595 }
596
hasClobberingOperandBundles() const597 bool CallBase::hasClobberingOperandBundles() const {
598 return hasOperandBundlesOtherThan(
599 {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
600 LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
601 getIntrinsicID() != Intrinsic::assume;
602 }
603
getMemoryEffects() const604 MemoryEffects CallBase::getMemoryEffects() const {
605 MemoryEffects ME = getAttributes().getMemoryEffects();
606 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
607 MemoryEffects FnME = Fn->getMemoryEffects();
608 if (hasOperandBundles()) {
609 // TODO: Add a method to get memory effects for operand bundles instead.
610 if (hasReadingOperandBundles())
611 FnME |= MemoryEffects::readOnly();
612 if (hasClobberingOperandBundles())
613 FnME |= MemoryEffects::writeOnly();
614 }
615 ME &= FnME;
616 }
617 return ME;
618 }
setMemoryEffects(MemoryEffects ME)619 void CallBase::setMemoryEffects(MemoryEffects ME) {
620 addFnAttr(Attribute::getWithMemoryEffects(getContext(), ME));
621 }
622
623 /// Determine if the function does not access memory.
doesNotAccessMemory() const624 bool CallBase::doesNotAccessMemory() const {
625 return getMemoryEffects().doesNotAccessMemory();
626 }
setDoesNotAccessMemory()627 void CallBase::setDoesNotAccessMemory() {
628 setMemoryEffects(MemoryEffects::none());
629 }
630
631 /// Determine if the function does not access or only reads memory.
onlyReadsMemory() const632 bool CallBase::onlyReadsMemory() const {
633 return getMemoryEffects().onlyReadsMemory();
634 }
setOnlyReadsMemory()635 void CallBase::setOnlyReadsMemory() {
636 setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly());
637 }
638
639 /// Determine if the function does not access or only writes memory.
onlyWritesMemory() const640 bool CallBase::onlyWritesMemory() const {
641 return getMemoryEffects().onlyWritesMemory();
642 }
setOnlyWritesMemory()643 void CallBase::setOnlyWritesMemory() {
644 setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly());
645 }
646
647 /// Determine if the call can access memmory only using pointers based
648 /// on its arguments.
onlyAccessesArgMemory() const649 bool CallBase::onlyAccessesArgMemory() const {
650 return getMemoryEffects().onlyAccessesArgPointees();
651 }
setOnlyAccessesArgMemory()652 void CallBase::setOnlyAccessesArgMemory() {
653 setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly());
654 }
655
656 /// Determine if the function may only access memory that is
657 /// inaccessible from the IR.
onlyAccessesInaccessibleMemory() const658 bool CallBase::onlyAccessesInaccessibleMemory() const {
659 return getMemoryEffects().onlyAccessesInaccessibleMem();
660 }
setOnlyAccessesInaccessibleMemory()661 void CallBase::setOnlyAccessesInaccessibleMemory() {
662 setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly());
663 }
664
665 /// Determine if the function may only access memory that is
666 /// either inaccessible from the IR or pointed to by its arguments.
onlyAccessesInaccessibleMemOrArgMem() const667 bool CallBase::onlyAccessesInaccessibleMemOrArgMem() const {
668 return getMemoryEffects().onlyAccessesInaccessibleOrArgMem();
669 }
setOnlyAccessesInaccessibleMemOrArgMem()670 void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
671 setMemoryEffects(getMemoryEffects() &
672 MemoryEffects::inaccessibleOrArgMemOnly());
673 }
674
675 //===----------------------------------------------------------------------===//
676 // CallInst Implementation
677 //===----------------------------------------------------------------------===//
678
init(FunctionType * FTy,Value * Func,ArrayRef<Value * > Args,ArrayRef<OperandBundleDef> Bundles,const Twine & NameStr)679 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
680 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
681 this->FTy = FTy;
682 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
683 "NumOperands not set up?");
684
685 #ifndef NDEBUG
686 assert((Args.size() == FTy->getNumParams() ||
687 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
688 "Calling a function with bad signature!");
689
690 for (unsigned i = 0; i != Args.size(); ++i)
691 assert((i >= FTy->getNumParams() ||
692 FTy->getParamType(i) == Args[i]->getType()) &&
693 "Calling a function with a bad signature!");
694 #endif
695
696 // Set operands in order of their index to match use-list-order
697 // prediction.
698 llvm::copy(Args, op_begin());
699 setCalledOperand(Func);
700
701 auto It = populateBundleOperandInfos(Bundles, Args.size());
702 (void)It;
703 assert(It + 1 == op_end() && "Should add up!");
704
705 setName(NameStr);
706 }
707
init(FunctionType * FTy,Value * Func,const Twine & NameStr)708 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
709 this->FTy = FTy;
710 assert(getNumOperands() == 1 && "NumOperands not set up?");
711 setCalledOperand(Func);
712
713 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
714
715 setName(NameStr);
716 }
717
CallInst(FunctionType * Ty,Value * Func,const Twine & Name,InsertPosition InsertBefore)718 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
719 InsertPosition InsertBefore)
720 : CallBase(Ty->getReturnType(), Instruction::Call,
721 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
722 init(Ty, Func, Name);
723 }
724
CallInst(const CallInst & CI)725 CallInst::CallInst(const CallInst &CI)
726 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
727 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
728 CI.getNumOperands()) {
729 setTailCallKind(CI.getTailCallKind());
730 setCallingConv(CI.getCallingConv());
731
732 std::copy(CI.op_begin(), CI.op_end(), op_begin());
733 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
734 bundle_op_info_begin());
735 SubclassOptionalData = CI.SubclassOptionalData;
736 }
737
Create(CallInst * CI,ArrayRef<OperandBundleDef> OpB,InsertPosition InsertPt)738 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
739 InsertPosition InsertPt) {
740 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
741
742 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
743 Args, OpB, CI->getName(), InsertPt);
744 NewCI->setTailCallKind(CI->getTailCallKind());
745 NewCI->setCallingConv(CI->getCallingConv());
746 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
747 NewCI->setAttributes(CI->getAttributes());
748 NewCI->setDebugLoc(CI->getDebugLoc());
749 return NewCI;
750 }
751
752 // Update profile weight for call instruction by scaling it using the ratio
753 // of S/T. The meaning of "branch_weights" meta data for call instruction is
754 // transfered to represent call count.
updateProfWeight(uint64_t S,uint64_t T)755 void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
756 if (T == 0) {
757 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
758 "div by 0. Ignoring. Likely the function "
759 << getParent()->getParent()->getName()
760 << " has 0 entry count, and contains call instructions "
761 "with non-zero prof info.");
762 return;
763 }
764 scaleProfData(*this, S, T);
765 }
766
767 //===----------------------------------------------------------------------===//
768 // InvokeInst Implementation
769 //===----------------------------------------------------------------------===//
770
init(FunctionType * FTy,Value * Fn,BasicBlock * IfNormal,BasicBlock * IfException,ArrayRef<Value * > Args,ArrayRef<OperandBundleDef> Bundles,const Twine & NameStr)771 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
772 BasicBlock *IfException, ArrayRef<Value *> Args,
773 ArrayRef<OperandBundleDef> Bundles,
774 const Twine &NameStr) {
775 this->FTy = FTy;
776
777 assert((int)getNumOperands() ==
778 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
779 "NumOperands not set up?");
780
781 #ifndef NDEBUG
782 assert(((Args.size() == FTy->getNumParams()) ||
783 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
784 "Invoking a function with bad signature");
785
786 for (unsigned i = 0, e = Args.size(); i != e; i++)
787 assert((i >= FTy->getNumParams() ||
788 FTy->getParamType(i) == Args[i]->getType()) &&
789 "Invoking a function with a bad signature!");
790 #endif
791
792 // Set operands in order of their index to match use-list-order
793 // prediction.
794 llvm::copy(Args, op_begin());
795 setNormalDest(IfNormal);
796 setUnwindDest(IfException);
797 setCalledOperand(Fn);
798
799 auto It = populateBundleOperandInfos(Bundles, Args.size());
800 (void)It;
801 assert(It + 3 == op_end() && "Should add up!");
802
803 setName(NameStr);
804 }
805
InvokeInst(const InvokeInst & II)806 InvokeInst::InvokeInst(const InvokeInst &II)
807 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
808 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
809 II.getNumOperands()) {
810 setCallingConv(II.getCallingConv());
811 std::copy(II.op_begin(), II.op_end(), op_begin());
812 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
813 bundle_op_info_begin());
814 SubclassOptionalData = II.SubclassOptionalData;
815 }
816
Create(InvokeInst * II,ArrayRef<OperandBundleDef> OpB,InsertPosition InsertPt)817 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
818 InsertPosition InsertPt) {
819 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
820
821 auto *NewII = InvokeInst::Create(
822 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
823 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
824 NewII->setCallingConv(II->getCallingConv());
825 NewII->SubclassOptionalData = II->SubclassOptionalData;
826 NewII->setAttributes(II->getAttributes());
827 NewII->setDebugLoc(II->getDebugLoc());
828 return NewII;
829 }
830
getLandingPadInst() const831 LandingPadInst *InvokeInst::getLandingPadInst() const {
832 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
833 }
834
updateProfWeight(uint64_t S,uint64_t T)835 void InvokeInst::updateProfWeight(uint64_t S, uint64_t T) {
836 if (T == 0) {
837 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
838 "div by 0. Ignoring. Likely the function "
839 << getParent()->getParent()->getName()
840 << " has 0 entry count, and contains call instructions "
841 "with non-zero prof info.");
842 return;
843 }
844 scaleProfData(*this, S, T);
845 }
846
847 //===----------------------------------------------------------------------===//
848 // CallBrInst Implementation
849 //===----------------------------------------------------------------------===//
850
init(FunctionType * FTy,Value * Fn,BasicBlock * Fallthrough,ArrayRef<BasicBlock * > IndirectDests,ArrayRef<Value * > Args,ArrayRef<OperandBundleDef> Bundles,const Twine & NameStr)851 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
852 ArrayRef<BasicBlock *> IndirectDests,
853 ArrayRef<Value *> Args,
854 ArrayRef<OperandBundleDef> Bundles,
855 const Twine &NameStr) {
856 this->FTy = FTy;
857
858 assert((int)getNumOperands() ==
859 ComputeNumOperands(Args.size(), IndirectDests.size(),
860 CountBundleInputs(Bundles)) &&
861 "NumOperands not set up?");
862
863 #ifndef NDEBUG
864 assert(((Args.size() == FTy->getNumParams()) ||
865 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
866 "Calling a function with bad signature");
867
868 for (unsigned i = 0, e = Args.size(); i != e; i++)
869 assert((i >= FTy->getNumParams() ||
870 FTy->getParamType(i) == Args[i]->getType()) &&
871 "Calling a function with a bad signature!");
872 #endif
873
874 // Set operands in order of their index to match use-list-order
875 // prediction.
876 std::copy(Args.begin(), Args.end(), op_begin());
877 NumIndirectDests = IndirectDests.size();
878 setDefaultDest(Fallthrough);
879 for (unsigned i = 0; i != NumIndirectDests; ++i)
880 setIndirectDest(i, IndirectDests[i]);
881 setCalledOperand(Fn);
882
883 auto It = populateBundleOperandInfos(Bundles, Args.size());
884 (void)It;
885 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
886
887 setName(NameStr);
888 }
889
CallBrInst(const CallBrInst & CBI)890 CallBrInst::CallBrInst(const CallBrInst &CBI)
891 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
892 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
893 CBI.getNumOperands()) {
894 setCallingConv(CBI.getCallingConv());
895 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
896 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
897 bundle_op_info_begin());
898 SubclassOptionalData = CBI.SubclassOptionalData;
899 NumIndirectDests = CBI.NumIndirectDests;
900 }
901
Create(CallBrInst * CBI,ArrayRef<OperandBundleDef> OpB,InsertPosition InsertPt)902 CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
903 InsertPosition InsertPt) {
904 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
905
906 auto *NewCBI = CallBrInst::Create(
907 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
908 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
909 NewCBI->setCallingConv(CBI->getCallingConv());
910 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
911 NewCBI->setAttributes(CBI->getAttributes());
912 NewCBI->setDebugLoc(CBI->getDebugLoc());
913 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
914 return NewCBI;
915 }
916
917 //===----------------------------------------------------------------------===//
918 // ReturnInst Implementation
919 //===----------------------------------------------------------------------===//
920
ReturnInst(const ReturnInst & RI)921 ReturnInst::ReturnInst(const ReturnInst &RI)
922 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
923 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
924 RI.getNumOperands()) {
925 if (RI.getNumOperands())
926 Op<0>() = RI.Op<0>();
927 SubclassOptionalData = RI.SubclassOptionalData;
928 }
929
ReturnInst(LLVMContext & C,Value * retVal,InsertPosition InsertBefore)930 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
931 InsertPosition InsertBefore)
932 : Instruction(Type::getVoidTy(C), Instruction::Ret,
933 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
934 InsertBefore) {
935 if (retVal)
936 Op<0>() = retVal;
937 }
938
939 //===----------------------------------------------------------------------===//
940 // ResumeInst Implementation
941 //===----------------------------------------------------------------------===//
942
ResumeInst(const ResumeInst & RI)943 ResumeInst::ResumeInst(const ResumeInst &RI)
944 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
945 OperandTraits<ResumeInst>::op_begin(this), 1) {
946 Op<0>() = RI.Op<0>();
947 }
948
ResumeInst(Value * Exn,InsertPosition InsertBefore)949 ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
950 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
951 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
952 Op<0>() = Exn;
953 }
954
955 //===----------------------------------------------------------------------===//
956 // CleanupReturnInst Implementation
957 //===----------------------------------------------------------------------===//
958
CleanupReturnInst(const CleanupReturnInst & CRI)959 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
960 : Instruction(CRI.getType(), Instruction::CleanupRet,
961 OperandTraits<CleanupReturnInst>::op_end(this) -
962 CRI.getNumOperands(),
963 CRI.getNumOperands()) {
964 setSubclassData<Instruction::OpaqueField>(
965 CRI.getSubclassData<Instruction::OpaqueField>());
966 Op<0>() = CRI.Op<0>();
967 if (CRI.hasUnwindDest())
968 Op<1>() = CRI.Op<1>();
969 }
970
init(Value * CleanupPad,BasicBlock * UnwindBB)971 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
972 if (UnwindBB)
973 setSubclassData<UnwindDestField>(true);
974
975 Op<0>() = CleanupPad;
976 if (UnwindBB)
977 Op<1>() = UnwindBB;
978 }
979
CleanupReturnInst(Value * CleanupPad,BasicBlock * UnwindBB,unsigned Values,InsertPosition InsertBefore)980 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
981 unsigned Values,
982 InsertPosition InsertBefore)
983 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
984 Instruction::CleanupRet,
985 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
986 Values, InsertBefore) {
987 init(CleanupPad, UnwindBB);
988 }
989
990 //===----------------------------------------------------------------------===//
991 // CatchReturnInst Implementation
992 //===----------------------------------------------------------------------===//
init(Value * CatchPad,BasicBlock * BB)993 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
994 Op<0>() = CatchPad;
995 Op<1>() = BB;
996 }
997
CatchReturnInst(const CatchReturnInst & CRI)998 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
999 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1000 OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1001 Op<0>() = CRI.Op<0>();
1002 Op<1>() = CRI.Op<1>();
1003 }
1004
CatchReturnInst(Value * CatchPad,BasicBlock * BB,InsertPosition InsertBefore)1005 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1006 InsertPosition InsertBefore)
1007 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1008 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1009 InsertBefore) {
1010 init(CatchPad, BB);
1011 }
1012
1013 //===----------------------------------------------------------------------===//
1014 // CatchSwitchInst Implementation
1015 //===----------------------------------------------------------------------===//
1016
CatchSwitchInst(Value * ParentPad,BasicBlock * UnwindDest,unsigned NumReservedValues,const Twine & NameStr,InsertPosition InsertBefore)1017 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1018 unsigned NumReservedValues,
1019 const Twine &NameStr,
1020 InsertPosition InsertBefore)
1021 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1022 InsertBefore) {
1023 if (UnwindDest)
1024 ++NumReservedValues;
1025 init(ParentPad, UnwindDest, NumReservedValues + 1);
1026 setName(NameStr);
1027 }
1028
CatchSwitchInst(const CatchSwitchInst & CSI)1029 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1030 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1031 CSI.getNumOperands()) {
1032 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1033 setNumHungOffUseOperands(ReservedSpace);
1034 Use *OL = getOperandList();
1035 const Use *InOL = CSI.getOperandList();
1036 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1037 OL[I] = InOL[I];
1038 }
1039
init(Value * ParentPad,BasicBlock * UnwindDest,unsigned NumReservedValues)1040 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1041 unsigned NumReservedValues) {
1042 assert(ParentPad && NumReservedValues);
1043
1044 ReservedSpace = NumReservedValues;
1045 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1046 allocHungoffUses(ReservedSpace);
1047
1048 Op<0>() = ParentPad;
1049 if (UnwindDest) {
1050 setSubclassData<UnwindDestField>(true);
1051 setUnwindDest(UnwindDest);
1052 }
1053 }
1054
1055 /// growOperands - grow operands - This grows the operand list in response to a
1056 /// push_back style of operation. This grows the number of ops by 2 times.
growOperands(unsigned Size)1057 void CatchSwitchInst::growOperands(unsigned Size) {
1058 unsigned NumOperands = getNumOperands();
1059 assert(NumOperands >= 1);
1060 if (ReservedSpace >= NumOperands + Size)
1061 return;
1062 ReservedSpace = (NumOperands + Size / 2) * 2;
1063 growHungoffUses(ReservedSpace);
1064 }
1065
addHandler(BasicBlock * Handler)1066 void CatchSwitchInst::addHandler(BasicBlock *Handler) {
1067 unsigned OpNo = getNumOperands();
1068 growOperands(1);
1069 assert(OpNo < ReservedSpace && "Growing didn't work!");
1070 setNumHungOffUseOperands(getNumOperands() + 1);
1071 getOperandList()[OpNo] = Handler;
1072 }
1073
removeHandler(handler_iterator HI)1074 void CatchSwitchInst::removeHandler(handler_iterator HI) {
1075 // Move all subsequent handlers up one.
1076 Use *EndDst = op_end() - 1;
1077 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1078 *CurDst = *(CurDst + 1);
1079 // Null out the last handler use.
1080 *EndDst = nullptr;
1081
1082 setNumHungOffUseOperands(getNumOperands() - 1);
1083 }
1084
1085 //===----------------------------------------------------------------------===//
1086 // FuncletPadInst Implementation
1087 //===----------------------------------------------------------------------===//
init(Value * ParentPad,ArrayRef<Value * > Args,const Twine & NameStr)1088 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1089 const Twine &NameStr) {
1090 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1091 llvm::copy(Args, op_begin());
1092 setParentPad(ParentPad);
1093 setName(NameStr);
1094 }
1095
FuncletPadInst(const FuncletPadInst & FPI)1096 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1097 : Instruction(FPI.getType(), FPI.getOpcode(),
1098 OperandTraits<FuncletPadInst>::op_end(this) -
1099 FPI.getNumOperands(),
1100 FPI.getNumOperands()) {
1101 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1102 setParentPad(FPI.getParentPad());
1103 }
1104
FuncletPadInst(Instruction::FuncletPadOps Op,Value * ParentPad,ArrayRef<Value * > Args,unsigned Values,const Twine & NameStr,InsertPosition InsertBefore)1105 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1106 ArrayRef<Value *> Args, unsigned Values,
1107 const Twine &NameStr,
1108 InsertPosition InsertBefore)
1109 : Instruction(ParentPad->getType(), Op,
1110 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1111 InsertBefore) {
1112 init(ParentPad, Args, NameStr);
1113 }
1114
1115 //===----------------------------------------------------------------------===//
1116 // UnreachableInst Implementation
1117 //===----------------------------------------------------------------------===//
1118
UnreachableInst(LLVMContext & Context,InsertPosition InsertBefore)1119 UnreachableInst::UnreachableInst(LLVMContext &Context,
1120 InsertPosition InsertBefore)
1121 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1122 0, InsertBefore) {}
1123
1124 //===----------------------------------------------------------------------===//
1125 // BranchInst Implementation
1126 //===----------------------------------------------------------------------===//
1127
AssertOK()1128 void BranchInst::AssertOK() {
1129 if (isConditional())
1130 assert(getCondition()->getType()->isIntegerTy(1) &&
1131 "May only branch on boolean predicates!");
1132 }
1133
BranchInst(BasicBlock * IfTrue,InsertPosition InsertBefore)1134 BranchInst::BranchInst(BasicBlock *IfTrue, InsertPosition InsertBefore)
1135 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1136 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1137 InsertBefore) {
1138 assert(IfTrue && "Branch destination may not be null!");
1139 Op<-1>() = IfTrue;
1140 }
1141
BranchInst(BasicBlock * IfTrue,BasicBlock * IfFalse,Value * Cond,InsertPosition InsertBefore)1142 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1143 InsertPosition InsertBefore)
1144 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1145 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1146 InsertBefore) {
1147 // Assign in order of operand index to make use-list order predictable.
1148 Op<-3>() = Cond;
1149 Op<-2>() = IfFalse;
1150 Op<-1>() = IfTrue;
1151 #ifndef NDEBUG
1152 AssertOK();
1153 #endif
1154 }
1155
BranchInst(const BranchInst & BI)1156 BranchInst::BranchInst(const BranchInst &BI)
1157 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1158 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1159 BI.getNumOperands()) {
1160 // Assign in order of operand index to make use-list order predictable.
1161 if (BI.getNumOperands() != 1) {
1162 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1163 Op<-3>() = BI.Op<-3>();
1164 Op<-2>() = BI.Op<-2>();
1165 }
1166 Op<-1>() = BI.Op<-1>();
1167 SubclassOptionalData = BI.SubclassOptionalData;
1168 }
1169
swapSuccessors()1170 void BranchInst::swapSuccessors() {
1171 assert(isConditional() &&
1172 "Cannot swap successors of an unconditional branch");
1173 Op<-1>().swap(Op<-2>());
1174
1175 // Update profile metadata if present and it matches our structural
1176 // expectations.
1177 swapProfMetadata();
1178 }
1179
1180 //===----------------------------------------------------------------------===//
1181 // AllocaInst Implementation
1182 //===----------------------------------------------------------------------===//
1183
getAISize(LLVMContext & Context,Value * Amt)1184 static Value *getAISize(LLVMContext &Context, Value *Amt) {
1185 if (!Amt)
1186 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1187 else {
1188 assert(!isa<BasicBlock>(Amt) &&
1189 "Passed basic block into allocation size parameter! Use other ctor");
1190 assert(Amt->getType()->isIntegerTy() &&
1191 "Allocation array size is not an integer!");
1192 }
1193 return Amt;
1194 }
1195
computeAllocaDefaultAlign(Type * Ty,InsertPosition Pos)1196 static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos) {
1197 assert(Pos.isValid() &&
1198 "Insertion position cannot be null when alignment not provided!");
1199 BasicBlock *BB = Pos.getBasicBlock();
1200 assert(BB->getParent() &&
1201 "BB must be in a Function when alignment not provided!");
1202 const DataLayout &DL = BB->getDataLayout();
1203 return DL.getPrefTypeAlign(Ty);
1204 }
1205
AllocaInst(Type * Ty,unsigned AddrSpace,const Twine & Name,InsertPosition InsertBefore)1206 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1207 InsertPosition InsertBefore)
1208 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1209
AllocaInst(Type * Ty,unsigned AddrSpace,Value * ArraySize,const Twine & Name,InsertPosition InsertBefore)1210 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1211 const Twine &Name, InsertPosition InsertBefore)
1212 : AllocaInst(Ty, AddrSpace, ArraySize,
1213 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1214 InsertBefore) {}
1215
AllocaInst(Type * Ty,unsigned AddrSpace,Value * ArraySize,Align Align,const Twine & Name,InsertPosition InsertBefore)1216 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1217 Align Align, const Twine &Name,
1218 InsertPosition InsertBefore)
1219 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1220 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1221 AllocatedType(Ty) {
1222 setAlignment(Align);
1223 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1224 setName(Name);
1225 }
1226
isArrayAllocation() const1227 bool AllocaInst::isArrayAllocation() const {
1228 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1229 return !CI->isOne();
1230 return true;
1231 }
1232
1233 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1234 /// function and is a constant size. If so, the code generator will fold it
1235 /// into the prolog/epilog code, so it is basically free.
isStaticAlloca() const1236 bool AllocaInst::isStaticAlloca() const {
1237 // Must be constant size.
1238 if (!isa<ConstantInt>(getArraySize())) return false;
1239
1240 // Must be in the entry block.
1241 const BasicBlock *Parent = getParent();
1242 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1243 }
1244
1245 //===----------------------------------------------------------------------===//
1246 // LoadInst Implementation
1247 //===----------------------------------------------------------------------===//
1248
AssertOK()1249 void LoadInst::AssertOK() {
1250 assert(getOperand(0)->getType()->isPointerTy() &&
1251 "Ptr must have pointer type.");
1252 }
1253
computeLoadStoreDefaultAlign(Type * Ty,InsertPosition Pos)1254 static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos) {
1255 assert(Pos.isValid() &&
1256 "Insertion position cannot be null when alignment not provided!");
1257 BasicBlock *BB = Pos.getBasicBlock();
1258 assert(BB->getParent() &&
1259 "BB must be in a Function when alignment not provided!");
1260 const DataLayout &DL = BB->getDataLayout();
1261 return DL.getABITypeAlign(Ty);
1262 }
1263
LoadInst(Type * Ty,Value * Ptr,const Twine & Name,InsertPosition InsertBef)1264 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1265 InsertPosition InsertBef)
1266 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1267
LoadInst(Type * Ty,Value * Ptr,const Twine & Name,bool isVolatile,InsertPosition InsertBef)1268 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1269 InsertPosition InsertBef)
1270 : LoadInst(Ty, Ptr, Name, isVolatile,
1271 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1272
LoadInst(Type * Ty,Value * Ptr,const Twine & Name,bool isVolatile,Align Align,InsertPosition InsertBef)1273 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1274 Align Align, InsertPosition InsertBef)
1275 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1276 SyncScope::System, InsertBef) {}
1277
LoadInst(Type * Ty,Value * Ptr,const Twine & Name,bool isVolatile,Align Align,AtomicOrdering Order,SyncScope::ID SSID,InsertPosition InsertBef)1278 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1279 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1280 InsertPosition InsertBef)
1281 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1282 setVolatile(isVolatile);
1283 setAlignment(Align);
1284 setAtomic(Order, SSID);
1285 AssertOK();
1286 setName(Name);
1287 }
1288
1289 //===----------------------------------------------------------------------===//
1290 // StoreInst Implementation
1291 //===----------------------------------------------------------------------===//
1292
AssertOK()1293 void StoreInst::AssertOK() {
1294 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1295 assert(getOperand(1)->getType()->isPointerTy() &&
1296 "Ptr must have pointer type!");
1297 }
1298
StoreInst(Value * val,Value * addr,InsertPosition InsertBefore)1299 StoreInst::StoreInst(Value *val, Value *addr, InsertPosition InsertBefore)
1300 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1301
StoreInst(Value * val,Value * addr,bool isVolatile,InsertPosition InsertBefore)1302 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1303 InsertPosition InsertBefore)
1304 : StoreInst(val, addr, isVolatile,
1305 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1306 InsertBefore) {}
1307
StoreInst(Value * val,Value * addr,bool isVolatile,Align Align,InsertPosition InsertBefore)1308 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1309 InsertPosition InsertBefore)
1310 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1311 SyncScope::System, InsertBefore) {}
1312
StoreInst(Value * val,Value * addr,bool isVolatile,Align Align,AtomicOrdering Order,SyncScope::ID SSID,InsertPosition InsertBefore)1313 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1314 AtomicOrdering Order, SyncScope::ID SSID,
1315 InsertPosition InsertBefore)
1316 : Instruction(Type::getVoidTy(val->getContext()), Store,
1317 OperandTraits<StoreInst>::op_begin(this),
1318 OperandTraits<StoreInst>::operands(this), InsertBefore) {
1319 Op<0>() = val;
1320 Op<1>() = addr;
1321 setVolatile(isVolatile);
1322 setAlignment(Align);
1323 setAtomic(Order, SSID);
1324 AssertOK();
1325 }
1326
1327 //===----------------------------------------------------------------------===//
1328 // AtomicCmpXchgInst Implementation
1329 //===----------------------------------------------------------------------===//
1330
Init(Value * Ptr,Value * Cmp,Value * NewVal,Align Alignment,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SyncScope::ID SSID)1331 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1332 Align Alignment, AtomicOrdering SuccessOrdering,
1333 AtomicOrdering FailureOrdering,
1334 SyncScope::ID SSID) {
1335 Op<0>() = Ptr;
1336 Op<1>() = Cmp;
1337 Op<2>() = NewVal;
1338 setSuccessOrdering(SuccessOrdering);
1339 setFailureOrdering(FailureOrdering);
1340 setSyncScopeID(SSID);
1341 setAlignment(Alignment);
1342
1343 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1344 "All operands must be non-null!");
1345 assert(getOperand(0)->getType()->isPointerTy() &&
1346 "Ptr must have pointer type!");
1347 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1348 "Cmp type and NewVal type must be same!");
1349 }
1350
AtomicCmpXchgInst(Value * Ptr,Value * Cmp,Value * NewVal,Align Alignment,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SyncScope::ID SSID,InsertPosition InsertBefore)1351 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1352 Align Alignment,
1353 AtomicOrdering SuccessOrdering,
1354 AtomicOrdering FailureOrdering,
1355 SyncScope::ID SSID,
1356 InsertPosition InsertBefore)
1357 : Instruction(
1358 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1359 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1360 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1361 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1362 }
1363
1364 //===----------------------------------------------------------------------===//
1365 // AtomicRMWInst Implementation
1366 //===----------------------------------------------------------------------===//
1367
Init(BinOp Operation,Value * Ptr,Value * Val,Align Alignment,AtomicOrdering Ordering,SyncScope::ID SSID)1368 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1369 Align Alignment, AtomicOrdering Ordering,
1370 SyncScope::ID SSID) {
1371 assert(Ordering != AtomicOrdering::NotAtomic &&
1372 "atomicrmw instructions can only be atomic.");
1373 assert(Ordering != AtomicOrdering::Unordered &&
1374 "atomicrmw instructions cannot be unordered.");
1375 Op<0>() = Ptr;
1376 Op<1>() = Val;
1377 setOperation(Operation);
1378 setOrdering(Ordering);
1379 setSyncScopeID(SSID);
1380 setAlignment(Alignment);
1381
1382 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1383 assert(getOperand(0)->getType()->isPointerTy() &&
1384 "Ptr must have pointer type!");
1385 assert(Ordering != AtomicOrdering::NotAtomic &&
1386 "AtomicRMW instructions must be atomic!");
1387 }
1388
AtomicRMWInst(BinOp Operation,Value * Ptr,Value * Val,Align Alignment,AtomicOrdering Ordering,SyncScope::ID SSID,InsertPosition InsertBefore)1389 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1390 Align Alignment, AtomicOrdering Ordering,
1391 SyncScope::ID SSID, InsertPosition InsertBefore)
1392 : Instruction(Val->getType(), AtomicRMW,
1393 OperandTraits<AtomicRMWInst>::op_begin(this),
1394 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1395 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1396 }
1397
getOperationName(BinOp Op)1398 StringRef AtomicRMWInst::getOperationName(BinOp Op) {
1399 switch (Op) {
1400 case AtomicRMWInst::Xchg:
1401 return "xchg";
1402 case AtomicRMWInst::Add:
1403 return "add";
1404 case AtomicRMWInst::Sub:
1405 return "sub";
1406 case AtomicRMWInst::And:
1407 return "and";
1408 case AtomicRMWInst::Nand:
1409 return "nand";
1410 case AtomicRMWInst::Or:
1411 return "or";
1412 case AtomicRMWInst::Xor:
1413 return "xor";
1414 case AtomicRMWInst::Max:
1415 return "max";
1416 case AtomicRMWInst::Min:
1417 return "min";
1418 case AtomicRMWInst::UMax:
1419 return "umax";
1420 case AtomicRMWInst::UMin:
1421 return "umin";
1422 case AtomicRMWInst::FAdd:
1423 return "fadd";
1424 case AtomicRMWInst::FSub:
1425 return "fsub";
1426 case AtomicRMWInst::FMax:
1427 return "fmax";
1428 case AtomicRMWInst::FMin:
1429 return "fmin";
1430 case AtomicRMWInst::UIncWrap:
1431 return "uinc_wrap";
1432 case AtomicRMWInst::UDecWrap:
1433 return "udec_wrap";
1434 case AtomicRMWInst::BAD_BINOP:
1435 return "<invalid operation>";
1436 }
1437
1438 llvm_unreachable("invalid atomicrmw operation");
1439 }
1440
1441 //===----------------------------------------------------------------------===//
1442 // FenceInst Implementation
1443 //===----------------------------------------------------------------------===//
1444
FenceInst(LLVMContext & C,AtomicOrdering Ordering,SyncScope::ID SSID,InsertPosition InsertBefore)1445 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1446 SyncScope::ID SSID, InsertPosition InsertBefore)
1447 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1448 setOrdering(Ordering);
1449 setSyncScopeID(SSID);
1450 }
1451
1452 //===----------------------------------------------------------------------===//
1453 // GetElementPtrInst Implementation
1454 //===----------------------------------------------------------------------===//
1455
init(Value * Ptr,ArrayRef<Value * > IdxList,const Twine & Name)1456 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1457 const Twine &Name) {
1458 assert(getNumOperands() == 1 + IdxList.size() &&
1459 "NumOperands not initialized?");
1460 Op<0>() = Ptr;
1461 llvm::copy(IdxList, op_begin() + 1);
1462 setName(Name);
1463 }
1464
GetElementPtrInst(const GetElementPtrInst & GEPI)1465 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1466 : Instruction(GEPI.getType(), GetElementPtr,
1467 OperandTraits<GetElementPtrInst>::op_end(this) -
1468 GEPI.getNumOperands(),
1469 GEPI.getNumOperands()),
1470 SourceElementType(GEPI.SourceElementType),
1471 ResultElementType(GEPI.ResultElementType) {
1472 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1473 SubclassOptionalData = GEPI.SubclassOptionalData;
1474 }
1475
getTypeAtIndex(Type * Ty,Value * Idx)1476 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) {
1477 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1478 if (!Struct->indexValid(Idx))
1479 return nullptr;
1480 return Struct->getTypeAtIndex(Idx);
1481 }
1482 if (!Idx->getType()->isIntOrIntVectorTy())
1483 return nullptr;
1484 if (auto *Array = dyn_cast<ArrayType>(Ty))
1485 return Array->getElementType();
1486 if (auto *Vector = dyn_cast<VectorType>(Ty))
1487 return Vector->getElementType();
1488 return nullptr;
1489 }
1490
getTypeAtIndex(Type * Ty,uint64_t Idx)1491 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) {
1492 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1493 if (Idx >= Struct->getNumElements())
1494 return nullptr;
1495 return Struct->getElementType(Idx);
1496 }
1497 if (auto *Array = dyn_cast<ArrayType>(Ty))
1498 return Array->getElementType();
1499 if (auto *Vector = dyn_cast<VectorType>(Ty))
1500 return Vector->getElementType();
1501 return nullptr;
1502 }
1503
1504 template <typename IndexTy>
getIndexedTypeInternal(Type * Ty,ArrayRef<IndexTy> IdxList)1505 static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
1506 if (IdxList.empty())
1507 return Ty;
1508 for (IndexTy V : IdxList.slice(1)) {
1509 Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
1510 if (!Ty)
1511 return Ty;
1512 }
1513 return Ty;
1514 }
1515
getIndexedType(Type * Ty,ArrayRef<Value * > IdxList)1516 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
1517 return getIndexedTypeInternal(Ty, IdxList);
1518 }
1519
getIndexedType(Type * Ty,ArrayRef<Constant * > IdxList)1520 Type *GetElementPtrInst::getIndexedType(Type *Ty,
1521 ArrayRef<Constant *> IdxList) {
1522 return getIndexedTypeInternal(Ty, IdxList);
1523 }
1524
getIndexedType(Type * Ty,ArrayRef<uint64_t> IdxList)1525 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
1526 return getIndexedTypeInternal(Ty, IdxList);
1527 }
1528
1529 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1530 /// zeros. If so, the result pointer and the first operand have the same
1531 /// value, just potentially different types.
hasAllZeroIndices() const1532 bool GetElementPtrInst::hasAllZeroIndices() const {
1533 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1534 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1535 if (!CI->isZero()) return false;
1536 } else {
1537 return false;
1538 }
1539 }
1540 return true;
1541 }
1542
1543 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1544 /// constant integers. If so, the result pointer and the first operand have
1545 /// a constant offset between them.
hasAllConstantIndices() const1546 bool GetElementPtrInst::hasAllConstantIndices() const {
1547 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1548 if (!isa<ConstantInt>(getOperand(i)))
1549 return false;
1550 }
1551 return true;
1552 }
1553
setNoWrapFlags(GEPNoWrapFlags NW)1554 void GetElementPtrInst::setNoWrapFlags(GEPNoWrapFlags NW) {
1555 SubclassOptionalData = NW.getRaw();
1556 }
1557
setIsInBounds(bool B)1558 void GetElementPtrInst::setIsInBounds(bool B) {
1559 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1560 if (B)
1561 NW |= GEPNoWrapFlags::inBounds();
1562 else
1563 NW = NW.withoutInBounds();
1564 setNoWrapFlags(NW);
1565 }
1566
getNoWrapFlags() const1567 GEPNoWrapFlags GetElementPtrInst::getNoWrapFlags() const {
1568 return cast<GEPOperator>(this)->getNoWrapFlags();
1569 }
1570
isInBounds() const1571 bool GetElementPtrInst::isInBounds() const {
1572 return cast<GEPOperator>(this)->isInBounds();
1573 }
1574
hasNoUnsignedSignedWrap() const1575 bool GetElementPtrInst::hasNoUnsignedSignedWrap() const {
1576 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1577 }
1578
hasNoUnsignedWrap() const1579 bool GetElementPtrInst::hasNoUnsignedWrap() const {
1580 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1581 }
1582
accumulateConstantOffset(const DataLayout & DL,APInt & Offset) const1583 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
1584 APInt &Offset) const {
1585 // Delegate to the generic GEPOperator implementation.
1586 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1587 }
1588
collectOffset(const DataLayout & DL,unsigned BitWidth,MapVector<Value *,APInt> & VariableOffsets,APInt & ConstantOffset) const1589 bool GetElementPtrInst::collectOffset(
1590 const DataLayout &DL, unsigned BitWidth,
1591 MapVector<Value *, APInt> &VariableOffsets,
1592 APInt &ConstantOffset) const {
1593 // Delegate to the generic GEPOperator implementation.
1594 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1595 ConstantOffset);
1596 }
1597
1598 //===----------------------------------------------------------------------===//
1599 // ExtractElementInst Implementation
1600 //===----------------------------------------------------------------------===//
1601
ExtractElementInst(Value * Val,Value * Index,const Twine & Name,InsertPosition InsertBef)1602 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1603 const Twine &Name,
1604 InsertPosition InsertBef)
1605 : Instruction(
1606 cast<VectorType>(Val->getType())->getElementType(), ExtractElement,
1607 OperandTraits<ExtractElementInst>::op_begin(this), 2, InsertBef) {
1608 assert(isValidOperands(Val, Index) &&
1609 "Invalid extractelement instruction operands!");
1610 Op<0>() = Val;
1611 Op<1>() = Index;
1612 setName(Name);
1613 }
1614
isValidOperands(const Value * Val,const Value * Index)1615 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1616 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1617 return false;
1618 return true;
1619 }
1620
1621 //===----------------------------------------------------------------------===//
1622 // InsertElementInst Implementation
1623 //===----------------------------------------------------------------------===//
1624
InsertElementInst(Value * Vec,Value * Elt,Value * Index,const Twine & Name,InsertPosition InsertBef)1625 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1626 const Twine &Name,
1627 InsertPosition InsertBef)
1628 : Instruction(Vec->getType(), InsertElement,
1629 OperandTraits<InsertElementInst>::op_begin(this), 3,
1630 InsertBef) {
1631 assert(isValidOperands(Vec, Elt, Index) &&
1632 "Invalid insertelement instruction operands!");
1633 Op<0>() = Vec;
1634 Op<1>() = Elt;
1635 Op<2>() = Index;
1636 setName(Name);
1637 }
1638
isValidOperands(const Value * Vec,const Value * Elt,const Value * Index)1639 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
1640 const Value *Index) {
1641 if (!Vec->getType()->isVectorTy())
1642 return false; // First operand of insertelement must be vector type.
1643
1644 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1645 return false;// Second operand of insertelement must be vector element type.
1646
1647 if (!Index->getType()->isIntegerTy())
1648 return false; // Third operand of insertelement must be i32.
1649 return true;
1650 }
1651
1652 //===----------------------------------------------------------------------===//
1653 // ShuffleVectorInst Implementation
1654 //===----------------------------------------------------------------------===//
1655
createPlaceholderForShuffleVector(Value * V)1656 static Value *createPlaceholderForShuffleVector(Value *V) {
1657 assert(V && "Cannot create placeholder of nullptr V");
1658 return PoisonValue::get(V->getType());
1659 }
1660
ShuffleVectorInst(Value * V1,Value * Mask,const Twine & Name,InsertPosition InsertBefore)1661 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
1662 InsertPosition InsertBefore)
1663 : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
1664 InsertBefore) {}
1665
ShuffleVectorInst(Value * V1,ArrayRef<int> Mask,const Twine & Name,InsertPosition InsertBefore)1666 ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
1667 const Twine &Name,
1668 InsertPosition InsertBefore)
1669 : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
1670 InsertBefore) {}
1671
ShuffleVectorInst(Value * V1,Value * V2,Value * Mask,const Twine & Name,InsertPosition InsertBefore)1672 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1673 const Twine &Name,
1674 InsertPosition InsertBefore)
1675 : Instruction(
1676 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1677 cast<VectorType>(Mask->getType())->getElementCount()),
1678 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1679 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
1680 assert(isValidOperands(V1, V2, Mask) &&
1681 "Invalid shuffle vector instruction operands!");
1682
1683 Op<0>() = V1;
1684 Op<1>() = V2;
1685 SmallVector<int, 16> MaskArr;
1686 getShuffleMask(cast<Constant>(Mask), MaskArr);
1687 setShuffleMask(MaskArr);
1688 setName(Name);
1689 }
1690
ShuffleVectorInst(Value * V1,Value * V2,ArrayRef<int> Mask,const Twine & Name,InsertPosition InsertBefore)1691 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
1692 const Twine &Name,
1693 InsertPosition InsertBefore)
1694 : Instruction(
1695 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1696 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1697 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1698 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
1699 assert(isValidOperands(V1, V2, Mask) &&
1700 "Invalid shuffle vector instruction operands!");
1701 Op<0>() = V1;
1702 Op<1>() = V2;
1703 setShuffleMask(Mask);
1704 setName(Name);
1705 }
1706
commute()1707 void ShuffleVectorInst::commute() {
1708 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1709 int NumMaskElts = ShuffleMask.size();
1710 SmallVector<int, 16> NewMask(NumMaskElts);
1711 for (int i = 0; i != NumMaskElts; ++i) {
1712 int MaskElt = getMaskValue(i);
1713 if (MaskElt == PoisonMaskElem) {
1714 NewMask[i] = PoisonMaskElem;
1715 continue;
1716 }
1717 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1718 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1719 NewMask[i] = MaskElt;
1720 }
1721 setShuffleMask(NewMask);
1722 Op<0>().swap(Op<1>());
1723 }
1724
isValidOperands(const Value * V1,const Value * V2,ArrayRef<int> Mask)1725 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
1726 ArrayRef<int> Mask) {
1727 // V1 and V2 must be vectors of the same type.
1728 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1729 return false;
1730
1731 // Make sure the mask elements make sense.
1732 int V1Size =
1733 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1734 for (int Elem : Mask)
1735 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1736 return false;
1737
1738 if (isa<ScalableVectorType>(V1->getType()))
1739 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1740 return false;
1741
1742 return true;
1743 }
1744
isValidOperands(const Value * V1,const Value * V2,const Value * Mask)1745 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
1746 const Value *Mask) {
1747 // V1 and V2 must be vectors of the same type.
1748 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1749 return false;
1750
1751 // Mask must be vector of i32, and must be the same kind of vector as the
1752 // input vectors
1753 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1754 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1755 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
1756 return false;
1757
1758 // Check to see if Mask is valid.
1759 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1760 return true;
1761
1762 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1763 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1764 for (Value *Op : MV->operands()) {
1765 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1766 if (CI->uge(V1Size*2))
1767 return false;
1768 } else if (!isa<UndefValue>(Op)) {
1769 return false;
1770 }
1771 }
1772 return true;
1773 }
1774
1775 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1776 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1777 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1778 i != e; ++i)
1779 if (CDS->getElementAsInteger(i) >= V1Size*2)
1780 return false;
1781 return true;
1782 }
1783
1784 return false;
1785 }
1786
getShuffleMask(const Constant * Mask,SmallVectorImpl<int> & Result)1787 void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
1788 SmallVectorImpl<int> &Result) {
1789 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1790
1791 if (isa<ConstantAggregateZero>(Mask)) {
1792 Result.resize(EC.getKnownMinValue(), 0);
1793 return;
1794 }
1795
1796 Result.reserve(EC.getKnownMinValue());
1797
1798 if (EC.isScalable()) {
1799 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
1800 "Scalable vector shuffle mask must be undef or zeroinitializer");
1801 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1802 for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
1803 Result.emplace_back(MaskVal);
1804 return;
1805 }
1806
1807 unsigned NumElts = EC.getKnownMinValue();
1808
1809 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1810 for (unsigned i = 0; i != NumElts; ++i)
1811 Result.push_back(CDS->getElementAsInteger(i));
1812 return;
1813 }
1814 for (unsigned i = 0; i != NumElts; ++i) {
1815 Constant *C = Mask->getAggregateElement(i);
1816 Result.push_back(isa<UndefValue>(C) ? -1 :
1817 cast<ConstantInt>(C)->getZExtValue());
1818 }
1819 }
1820
setShuffleMask(ArrayRef<int> Mask)1821 void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
1822 ShuffleMask.assign(Mask.begin(), Mask.end());
1823 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1824 }
1825
convertShuffleMaskForBitcode(ArrayRef<int> Mask,Type * ResultTy)1826 Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
1827 Type *ResultTy) {
1828 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1829 if (isa<ScalableVectorType>(ResultTy)) {
1830 assert(all_equal(Mask) && "Unexpected shuffle");
1831 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1832 if (Mask[0] == 0)
1833 return Constant::getNullValue(VecTy);
1834 return PoisonValue::get(VecTy);
1835 }
1836 SmallVector<Constant *, 16> MaskConst;
1837 for (int Elem : Mask) {
1838 if (Elem == PoisonMaskElem)
1839 MaskConst.push_back(PoisonValue::get(Int32Ty));
1840 else
1841 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1842 }
1843 return ConstantVector::get(MaskConst);
1844 }
1845
isSingleSourceMaskImpl(ArrayRef<int> Mask,int NumOpElts)1846 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1847 assert(!Mask.empty() && "Shuffle mask must contain elements");
1848 bool UsesLHS = false;
1849 bool UsesRHS = false;
1850 for (int I : Mask) {
1851 if (I == -1)
1852 continue;
1853 assert(I >= 0 && I < (NumOpElts * 2) &&
1854 "Out-of-bounds shuffle mask element");
1855 UsesLHS |= (I < NumOpElts);
1856 UsesRHS |= (I >= NumOpElts);
1857 if (UsesLHS && UsesRHS)
1858 return false;
1859 }
1860 // Allow for degenerate case: completely undef mask means neither source is used.
1861 return UsesLHS || UsesRHS;
1862 }
1863
isSingleSourceMask(ArrayRef<int> Mask,int NumSrcElts)1864 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts) {
1865 // We don't have vector operand size information, so assume operands are the
1866 // same size as the mask.
1867 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1868 }
1869
isIdentityMaskImpl(ArrayRef<int> Mask,int NumOpElts)1870 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1871 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1872 return false;
1873 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1874 if (Mask[i] == -1)
1875 continue;
1876 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1877 return false;
1878 }
1879 return true;
1880 }
1881
isIdentityMask(ArrayRef<int> Mask,int NumSrcElts)1882 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask, int NumSrcElts) {
1883 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1884 return false;
1885 // We don't have vector operand size information, so assume operands are the
1886 // same size as the mask.
1887 return isIdentityMaskImpl(Mask, NumSrcElts);
1888 }
1889
isReverseMask(ArrayRef<int> Mask,int NumSrcElts)1890 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask, int NumSrcElts) {
1891 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1892 return false;
1893 if (!isSingleSourceMask(Mask, NumSrcElts))
1894 return false;
1895
1896 // The number of elements in the mask must be at least 2.
1897 if (NumSrcElts < 2)
1898 return false;
1899
1900 for (int I = 0, E = Mask.size(); I < E; ++I) {
1901 if (Mask[I] == -1)
1902 continue;
1903 if (Mask[I] != (NumSrcElts - 1 - I) &&
1904 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1905 return false;
1906 }
1907 return true;
1908 }
1909
isZeroEltSplatMask(ArrayRef<int> Mask,int NumSrcElts)1910 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts) {
1911 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1912 return false;
1913 if (!isSingleSourceMask(Mask, NumSrcElts))
1914 return false;
1915 for (int I = 0, E = Mask.size(); I < E; ++I) {
1916 if (Mask[I] == -1)
1917 continue;
1918 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1919 return false;
1920 }
1921 return true;
1922 }
1923
isSelectMask(ArrayRef<int> Mask,int NumSrcElts)1924 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask, int NumSrcElts) {
1925 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1926 return false;
1927 // Select is differentiated from identity. It requires using both sources.
1928 if (isSingleSourceMask(Mask, NumSrcElts))
1929 return false;
1930 for (int I = 0, E = Mask.size(); I < E; ++I) {
1931 if (Mask[I] == -1)
1932 continue;
1933 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
1934 return false;
1935 }
1936 return true;
1937 }
1938
isTransposeMask(ArrayRef<int> Mask,int NumSrcElts)1939 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask, int NumSrcElts) {
1940 // Example masks that will return true:
1941 // v1 = <a, b, c, d>
1942 // v2 = <e, f, g, h>
1943 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
1944 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
1945
1946 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1947 return false;
1948 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
1949 int Sz = Mask.size();
1950 if (Sz < 2 || !isPowerOf2_32(Sz))
1951 return false;
1952
1953 // 2. The first element of the mask must be either a 0 or a 1.
1954 if (Mask[0] != 0 && Mask[0] != 1)
1955 return false;
1956
1957 // 3. The difference between the first 2 elements must be equal to the
1958 // number of elements in the mask.
1959 if ((Mask[1] - Mask[0]) != NumSrcElts)
1960 return false;
1961
1962 // 4. The difference between consecutive even-numbered and odd-numbered
1963 // elements must be equal to 2.
1964 for (int I = 2; I < Sz; ++I) {
1965 int MaskEltVal = Mask[I];
1966 if (MaskEltVal == -1)
1967 return false;
1968 int MaskEltPrevVal = Mask[I - 2];
1969 if (MaskEltVal - MaskEltPrevVal != 2)
1970 return false;
1971 }
1972 return true;
1973 }
1974
isSpliceMask(ArrayRef<int> Mask,int NumSrcElts,int & Index)1975 bool ShuffleVectorInst::isSpliceMask(ArrayRef<int> Mask, int NumSrcElts,
1976 int &Index) {
1977 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1978 return false;
1979 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
1980 int StartIndex = -1;
1981 for (int I = 0, E = Mask.size(); I != E; ++I) {
1982 int MaskEltVal = Mask[I];
1983 if (MaskEltVal == -1)
1984 continue;
1985
1986 if (StartIndex == -1) {
1987 // Don't support a StartIndex that begins in the second input, or if the
1988 // first non-undef index would access below the StartIndex.
1989 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
1990 return false;
1991
1992 StartIndex = MaskEltVal - I;
1993 continue;
1994 }
1995
1996 // Splice is sequential starting from StartIndex.
1997 if (MaskEltVal != (StartIndex + I))
1998 return false;
1999 }
2000
2001 if (StartIndex == -1)
2002 return false;
2003
2004 // NOTE: This accepts StartIndex == 0 (COPY).
2005 Index = StartIndex;
2006 return true;
2007 }
2008
isExtractSubvectorMask(ArrayRef<int> Mask,int NumSrcElts,int & Index)2009 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
2010 int NumSrcElts, int &Index) {
2011 // Must extract from a single source.
2012 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2013 return false;
2014
2015 // Must be smaller (else this is an Identity shuffle).
2016 if (NumSrcElts <= (int)Mask.size())
2017 return false;
2018
2019 // Find start of extraction, accounting that we may start with an UNDEF.
2020 int SubIndex = -1;
2021 for (int i = 0, e = Mask.size(); i != e; ++i) {
2022 int M = Mask[i];
2023 if (M < 0)
2024 continue;
2025 int Offset = (M % NumSrcElts) - i;
2026 if (0 <= SubIndex && SubIndex != Offset)
2027 return false;
2028 SubIndex = Offset;
2029 }
2030
2031 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2032 Index = SubIndex;
2033 return true;
2034 }
2035 return false;
2036 }
2037
isInsertSubvectorMask(ArrayRef<int> Mask,int NumSrcElts,int & NumSubElts,int & Index)2038 bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask,
2039 int NumSrcElts, int &NumSubElts,
2040 int &Index) {
2041 int NumMaskElts = Mask.size();
2042
2043 // Don't try to match if we're shuffling to a smaller size.
2044 if (NumMaskElts < NumSrcElts)
2045 return false;
2046
2047 // TODO: We don't recognize self-insertion/widening.
2048 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2049 return false;
2050
2051 // Determine which mask elements are attributed to which source.
2052 APInt UndefElts = APInt::getZero(NumMaskElts);
2053 APInt Src0Elts = APInt::getZero(NumMaskElts);
2054 APInt Src1Elts = APInt::getZero(NumMaskElts);
2055 bool Src0Identity = true;
2056 bool Src1Identity = true;
2057
2058 for (int i = 0; i != NumMaskElts; ++i) {
2059 int M = Mask[i];
2060 if (M < 0) {
2061 UndefElts.setBit(i);
2062 continue;
2063 }
2064 if (M < NumSrcElts) {
2065 Src0Elts.setBit(i);
2066 Src0Identity &= (M == i);
2067 continue;
2068 }
2069 Src1Elts.setBit(i);
2070 Src1Identity &= (M == (i + NumSrcElts));
2071 }
2072 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2073 "unknown shuffle elements");
2074 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2075 "2-source shuffle not found");
2076
2077 // Determine lo/hi span ranges.
2078 // TODO: How should we handle undefs at the start of subvector insertions?
2079 int Src0Lo = Src0Elts.countr_zero();
2080 int Src1Lo = Src1Elts.countr_zero();
2081 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2082 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2083
2084 // If src0 is in place, see if the src1 elements is inplace within its own
2085 // span.
2086 if (Src0Identity) {
2087 int NumSub1Elts = Src1Hi - Src1Lo;
2088 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2089 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2090 NumSubElts = NumSub1Elts;
2091 Index = Src1Lo;
2092 return true;
2093 }
2094 }
2095
2096 // If src1 is in place, see if the src0 elements is inplace within its own
2097 // span.
2098 if (Src1Identity) {
2099 int NumSub0Elts = Src0Hi - Src0Lo;
2100 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2101 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2102 NumSubElts = NumSub0Elts;
2103 Index = Src0Lo;
2104 return true;
2105 }
2106 }
2107
2108 return false;
2109 }
2110
isIdentityWithPadding() const2111 bool ShuffleVectorInst::isIdentityWithPadding() const {
2112 // FIXME: Not currently possible to express a shuffle mask for a scalable
2113 // vector for this case.
2114 if (isa<ScalableVectorType>(getType()))
2115 return false;
2116
2117 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2118 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2119 if (NumMaskElts <= NumOpElts)
2120 return false;
2121
2122 // The first part of the mask must choose elements from exactly 1 source op.
2123 ArrayRef<int> Mask = getShuffleMask();
2124 if (!isIdentityMaskImpl(Mask, NumOpElts))
2125 return false;
2126
2127 // All extending must be with undef elements.
2128 for (int i = NumOpElts; i < NumMaskElts; ++i)
2129 if (Mask[i] != -1)
2130 return false;
2131
2132 return true;
2133 }
2134
isIdentityWithExtract() const2135 bool ShuffleVectorInst::isIdentityWithExtract() const {
2136 // FIXME: Not currently possible to express a shuffle mask for a scalable
2137 // vector for this case.
2138 if (isa<ScalableVectorType>(getType()))
2139 return false;
2140
2141 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2142 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2143 if (NumMaskElts >= NumOpElts)
2144 return false;
2145
2146 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2147 }
2148
isConcat() const2149 bool ShuffleVectorInst::isConcat() const {
2150 // Vector concatenation is differentiated from identity with padding.
2151 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2152 return false;
2153
2154 // FIXME: Not currently possible to express a shuffle mask for a scalable
2155 // vector for this case.
2156 if (isa<ScalableVectorType>(getType()))
2157 return false;
2158
2159 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2160 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2161 if (NumMaskElts != NumOpElts * 2)
2162 return false;
2163
2164 // Use the mask length rather than the operands' vector lengths here. We
2165 // already know that the shuffle returns a vector twice as long as the inputs,
2166 // and neither of the inputs are undef vectors. If the mask picks consecutive
2167 // elements from both inputs, then this is a concatenation of the inputs.
2168 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2169 }
2170
isReplicationMaskWithParams(ArrayRef<int> Mask,int ReplicationFactor,int VF)2171 static bool isReplicationMaskWithParams(ArrayRef<int> Mask,
2172 int ReplicationFactor, int VF) {
2173 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2174 "Unexpected mask size.");
2175
2176 for (int CurrElt : seq(VF)) {
2177 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2178 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2179 "Run out of mask?");
2180 Mask = Mask.drop_front(ReplicationFactor);
2181 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2182 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2183 }))
2184 return false;
2185 }
2186 assert(Mask.empty() && "Did not consume the whole mask?");
2187
2188 return true;
2189 }
2190
isReplicationMask(ArrayRef<int> Mask,int & ReplicationFactor,int & VF)2191 bool ShuffleVectorInst::isReplicationMask(ArrayRef<int> Mask,
2192 int &ReplicationFactor, int &VF) {
2193 // undef-less case is trivial.
2194 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2195 ReplicationFactor =
2196 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2197 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2198 return false;
2199 VF = Mask.size() / ReplicationFactor;
2200 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2201 }
2202
2203 // However, if the mask contains undef's, we have to enumerate possible tuples
2204 // and pick one. There are bounds on replication factor: [1, mask size]
2205 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2206 // Additionally, mask size is a replication factor multiplied by vector size,
2207 // which further significantly reduces the search space.
2208
2209 // Before doing that, let's perform basic correctness checking first.
2210 int Largest = -1;
2211 for (int MaskElt : Mask) {
2212 if (MaskElt == PoisonMaskElem)
2213 continue;
2214 // Elements must be in non-decreasing order.
2215 if (MaskElt < Largest)
2216 return false;
2217 Largest = std::max(Largest, MaskElt);
2218 }
2219
2220 // Prefer larger replication factor if all else equal.
2221 for (int PossibleReplicationFactor :
2222 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2223 if (Mask.size() % PossibleReplicationFactor != 0)
2224 continue;
2225 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2226 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2227 PossibleVF))
2228 continue;
2229 ReplicationFactor = PossibleReplicationFactor;
2230 VF = PossibleVF;
2231 return true;
2232 }
2233
2234 return false;
2235 }
2236
isReplicationMask(int & ReplicationFactor,int & VF) const2237 bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2238 int &VF) const {
2239 // Not possible to express a shuffle mask for a scalable vector for this
2240 // case.
2241 if (isa<ScalableVectorType>(getType()))
2242 return false;
2243
2244 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2245 if (ShuffleMask.size() % VF != 0)
2246 return false;
2247 ReplicationFactor = ShuffleMask.size() / VF;
2248
2249 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2250 }
2251
isOneUseSingleSourceMask(ArrayRef<int> Mask,int VF)2252 bool ShuffleVectorInst::isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF) {
2253 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2254 Mask.size() % VF != 0)
2255 return false;
2256 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2257 ArrayRef<int> SubMask = Mask.slice(K, VF);
2258 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2259 continue;
2260 SmallBitVector Used(VF, false);
2261 for (int Idx : SubMask) {
2262 if (Idx != PoisonMaskElem && Idx < VF)
2263 Used.set(Idx);
2264 }
2265 if (!Used.all())
2266 return false;
2267 }
2268 return true;
2269 }
2270
2271 /// Return true if this shuffle mask is a replication mask.
isOneUseSingleSourceMask(int VF) const2272 bool ShuffleVectorInst::isOneUseSingleSourceMask(int VF) const {
2273 // Not possible to express a shuffle mask for a scalable vector for this
2274 // case.
2275 if (isa<ScalableVectorType>(getType()))
2276 return false;
2277 if (!isSingleSourceMask(ShuffleMask, VF))
2278 return false;
2279
2280 return isOneUseSingleSourceMask(ShuffleMask, VF);
2281 }
2282
isInterleave(unsigned Factor)2283 bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2284 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2285 // shuffle_vector can only interleave fixed length vectors - for scalable
2286 // vectors, see the @llvm.vector.interleave2 intrinsic
2287 if (!OpTy)
2288 return false;
2289 unsigned OpNumElts = OpTy->getNumElements();
2290
2291 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2292 }
2293
isInterleaveMask(ArrayRef<int> Mask,unsigned Factor,unsigned NumInputElts,SmallVectorImpl<unsigned> & StartIndexes)2294 bool ShuffleVectorInst::isInterleaveMask(
2295 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2296 SmallVectorImpl<unsigned> &StartIndexes) {
2297 unsigned NumElts = Mask.size();
2298 if (NumElts % Factor)
2299 return false;
2300
2301 unsigned LaneLen = NumElts / Factor;
2302 if (!isPowerOf2_32(LaneLen))
2303 return false;
2304
2305 StartIndexes.resize(Factor);
2306
2307 // Check whether each element matches the general interleaved rule.
2308 // Ignore undef elements, as long as the defined elements match the rule.
2309 // Outer loop processes all factors (x, y, z in the above example)
2310 unsigned I = 0, J;
2311 for (; I < Factor; I++) {
2312 unsigned SavedLaneValue;
2313 unsigned SavedNoUndefs = 0;
2314
2315 // Inner loop processes consecutive accesses (x, x+1... in the example)
2316 for (J = 0; J < LaneLen - 1; J++) {
2317 // Lane computes x's position in the Mask
2318 unsigned Lane = J * Factor + I;
2319 unsigned NextLane = Lane + Factor;
2320 int LaneValue = Mask[Lane];
2321 int NextLaneValue = Mask[NextLane];
2322
2323 // If both are defined, values must be sequential
2324 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2325 LaneValue + 1 != NextLaneValue)
2326 break;
2327
2328 // If the next value is undef, save the current one as reference
2329 if (LaneValue >= 0 && NextLaneValue < 0) {
2330 SavedLaneValue = LaneValue;
2331 SavedNoUndefs = 1;
2332 }
2333
2334 // Undefs are allowed, but defined elements must still be consecutive:
2335 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2336 // Verify this by storing the last non-undef followed by an undef
2337 // Check that following non-undef masks are incremented with the
2338 // corresponding distance.
2339 if (SavedNoUndefs > 0 && LaneValue < 0) {
2340 SavedNoUndefs++;
2341 if (NextLaneValue >= 0 &&
2342 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2343 break;
2344 }
2345 }
2346
2347 if (J < LaneLen - 1)
2348 return false;
2349
2350 int StartMask = 0;
2351 if (Mask[I] >= 0) {
2352 // Check that the start of the I range (J=0) is greater than 0
2353 StartMask = Mask[I];
2354 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2355 // StartMask defined by the last value in lane
2356 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2357 } else if (SavedNoUndefs > 0) {
2358 // StartMask defined by some non-zero value in the j loop
2359 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2360 }
2361 // else StartMask remains set to 0, i.e. all elements are undefs
2362
2363 if (StartMask < 0)
2364 return false;
2365 // We must stay within the vectors; This case can happen with undefs.
2366 if (StartMask + LaneLen > NumInputElts)
2367 return false;
2368
2369 StartIndexes[I] = StartMask;
2370 }
2371
2372 return true;
2373 }
2374
2375 /// Check if the mask is a DE-interleave mask of the given factor
2376 /// \p Factor like:
2377 /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
isDeInterleaveMaskOfFactor(ArrayRef<int> Mask,unsigned Factor,unsigned & Index)2378 bool ShuffleVectorInst::isDeInterleaveMaskOfFactor(ArrayRef<int> Mask,
2379 unsigned Factor,
2380 unsigned &Index) {
2381 // Check all potential start indices from 0 to (Factor - 1).
2382 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2383 unsigned I = 0;
2384
2385 // Check that elements are in ascending order by Factor. Ignore undef
2386 // elements.
2387 for (; I < Mask.size(); I++)
2388 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2389 break;
2390
2391 if (I == Mask.size()) {
2392 Index = Idx;
2393 return true;
2394 }
2395 }
2396
2397 return false;
2398 }
2399
2400 /// Try to lower a vector shuffle as a bit rotation.
2401 ///
2402 /// Look for a repeated rotation pattern in each sub group.
2403 /// Returns an element-wise left bit rotation amount or -1 if failed.
matchShuffleAsBitRotate(ArrayRef<int> Mask,int NumSubElts)2404 static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2405 int NumElts = Mask.size();
2406 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2407
2408 int RotateAmt = -1;
2409 for (int i = 0; i != NumElts; i += NumSubElts) {
2410 for (int j = 0; j != NumSubElts; ++j) {
2411 int M = Mask[i + j];
2412 if (M < 0)
2413 continue;
2414 if (M < i || M >= i + NumSubElts)
2415 return -1;
2416 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2417 if (0 <= RotateAmt && Offset != RotateAmt)
2418 return -1;
2419 RotateAmt = Offset;
2420 }
2421 }
2422 return RotateAmt;
2423 }
2424
isBitRotateMask(ArrayRef<int> Mask,unsigned EltSizeInBits,unsigned MinSubElts,unsigned MaxSubElts,unsigned & NumSubElts,unsigned & RotateAmt)2425 bool ShuffleVectorInst::isBitRotateMask(
2426 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2427 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2428 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2429 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2430 if (EltRotateAmt < 0)
2431 continue;
2432 RotateAmt = EltRotateAmt * EltSizeInBits;
2433 return true;
2434 }
2435
2436 return false;
2437 }
2438
2439 //===----------------------------------------------------------------------===//
2440 // InsertValueInst Class
2441 //===----------------------------------------------------------------------===//
2442
init(Value * Agg,Value * Val,ArrayRef<unsigned> Idxs,const Twine & Name)2443 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2444 const Twine &Name) {
2445 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2446
2447 // There's no fundamental reason why we require at least one index
2448 // (other than weirdness with &*IdxBegin being invalid; see
2449 // getelementptr's init routine for example). But there's no
2450 // present need to support it.
2451 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2452
2453 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
2454 Val->getType() && "Inserted value must match indexed type!");
2455 Op<0>() = Agg;
2456 Op<1>() = Val;
2457
2458 Indices.append(Idxs.begin(), Idxs.end());
2459 setName(Name);
2460 }
2461
InsertValueInst(const InsertValueInst & IVI)2462 InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2463 : Instruction(IVI.getType(), InsertValue,
2464 OperandTraits<InsertValueInst>::op_begin(this), 2),
2465 Indices(IVI.Indices) {
2466 Op<0>() = IVI.getOperand(0);
2467 Op<1>() = IVI.getOperand(1);
2468 SubclassOptionalData = IVI.SubclassOptionalData;
2469 }
2470
2471 //===----------------------------------------------------------------------===//
2472 // ExtractValueInst Class
2473 //===----------------------------------------------------------------------===//
2474
init(ArrayRef<unsigned> Idxs,const Twine & Name)2475 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2476 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2477
2478 // There's no fundamental reason why we require at least one index.
2479 // But there's no present need to support it.
2480 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2481
2482 Indices.append(Idxs.begin(), Idxs.end());
2483 setName(Name);
2484 }
2485
ExtractValueInst(const ExtractValueInst & EVI)2486 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2487 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
2488 Indices(EVI.Indices) {
2489 SubclassOptionalData = EVI.SubclassOptionalData;
2490 }
2491
2492 // getIndexedType - Returns the type of the element that would be extracted
2493 // with an extractvalue instruction with the specified parameters.
2494 //
2495 // A null type is returned if the indices are invalid for the specified
2496 // pointer type.
2497 //
getIndexedType(Type * Agg,ArrayRef<unsigned> Idxs)2498 Type *ExtractValueInst::getIndexedType(Type *Agg,
2499 ArrayRef<unsigned> Idxs) {
2500 for (unsigned Index : Idxs) {
2501 // We can't use CompositeType::indexValid(Index) here.
2502 // indexValid() always returns true for arrays because getelementptr allows
2503 // out-of-bounds indices. Since we don't allow those for extractvalue and
2504 // insertvalue we need to check array indexing manually.
2505 // Since the only other types we can index into are struct types it's just
2506 // as easy to check those manually as well.
2507 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2508 if (Index >= AT->getNumElements())
2509 return nullptr;
2510 Agg = AT->getElementType();
2511 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2512 if (Index >= ST->getNumElements())
2513 return nullptr;
2514 Agg = ST->getElementType(Index);
2515 } else {
2516 // Not a valid type to index into.
2517 return nullptr;
2518 }
2519 }
2520 return const_cast<Type*>(Agg);
2521 }
2522
2523 //===----------------------------------------------------------------------===//
2524 // UnaryOperator Class
2525 //===----------------------------------------------------------------------===//
2526
UnaryOperator(UnaryOps iType,Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)2527 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
2528 const Twine &Name, InsertPosition InsertBefore)
2529 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2530 Op<0>() = S;
2531 setName(Name);
2532 AssertOK();
2533 }
2534
Create(UnaryOps Op,Value * S,const Twine & Name,InsertPosition InsertBefore)2535 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, const Twine &Name,
2536 InsertPosition InsertBefore) {
2537 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2538 }
2539
AssertOK()2540 void UnaryOperator::AssertOK() {
2541 Value *LHS = getOperand(0);
2542 (void)LHS; // Silence warnings.
2543 #ifndef NDEBUG
2544 switch (getOpcode()) {
2545 case FNeg:
2546 assert(getType() == LHS->getType() &&
2547 "Unary operation should return same type as operand!");
2548 assert(getType()->isFPOrFPVectorTy() &&
2549 "Tried to create a floating-point operation on a "
2550 "non-floating-point type!");
2551 break;
2552 default: llvm_unreachable("Invalid opcode provided");
2553 }
2554 #endif
2555 }
2556
2557 //===----------------------------------------------------------------------===//
2558 // BinaryOperator Class
2559 //===----------------------------------------------------------------------===//
2560
BinaryOperator(BinaryOps iType,Value * S1,Value * S2,Type * Ty,const Twine & Name,InsertPosition InsertBefore)2561 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
2562 const Twine &Name, InsertPosition InsertBefore)
2563 : Instruction(Ty, iType, OperandTraits<BinaryOperator>::op_begin(this),
2564 OperandTraits<BinaryOperator>::operands(this), InsertBefore) {
2565 Op<0>() = S1;
2566 Op<1>() = S2;
2567 setName(Name);
2568 AssertOK();
2569 }
2570
AssertOK()2571 void BinaryOperator::AssertOK() {
2572 Value *LHS = getOperand(0), *RHS = getOperand(1);
2573 (void)LHS; (void)RHS; // Silence warnings.
2574 assert(LHS->getType() == RHS->getType() &&
2575 "Binary operator operand types must match!");
2576 #ifndef NDEBUG
2577 switch (getOpcode()) {
2578 case Add: case Sub:
2579 case Mul:
2580 assert(getType() == LHS->getType() &&
2581 "Arithmetic operation should return same type as operands!");
2582 assert(getType()->isIntOrIntVectorTy() &&
2583 "Tried to create an integer operation on a non-integer type!");
2584 break;
2585 case FAdd: case FSub:
2586 case FMul:
2587 assert(getType() == LHS->getType() &&
2588 "Arithmetic operation should return same type as operands!");
2589 assert(getType()->isFPOrFPVectorTy() &&
2590 "Tried to create a floating-point operation on a "
2591 "non-floating-point type!");
2592 break;
2593 case UDiv:
2594 case SDiv:
2595 assert(getType() == LHS->getType() &&
2596 "Arithmetic operation should return same type as operands!");
2597 assert(getType()->isIntOrIntVectorTy() &&
2598 "Incorrect operand type (not integer) for S/UDIV");
2599 break;
2600 case FDiv:
2601 assert(getType() == LHS->getType() &&
2602 "Arithmetic operation should return same type as operands!");
2603 assert(getType()->isFPOrFPVectorTy() &&
2604 "Incorrect operand type (not floating point) for FDIV");
2605 break;
2606 case URem:
2607 case SRem:
2608 assert(getType() == LHS->getType() &&
2609 "Arithmetic operation should return same type as operands!");
2610 assert(getType()->isIntOrIntVectorTy() &&
2611 "Incorrect operand type (not integer) for S/UREM");
2612 break;
2613 case FRem:
2614 assert(getType() == LHS->getType() &&
2615 "Arithmetic operation should return same type as operands!");
2616 assert(getType()->isFPOrFPVectorTy() &&
2617 "Incorrect operand type (not floating point) for FREM");
2618 break;
2619 case Shl:
2620 case LShr:
2621 case AShr:
2622 assert(getType() == LHS->getType() &&
2623 "Shift operation should return same type as operands!");
2624 assert(getType()->isIntOrIntVectorTy() &&
2625 "Tried to create a shift operation on a non-integral type!");
2626 break;
2627 case And: case Or:
2628 case Xor:
2629 assert(getType() == LHS->getType() &&
2630 "Logical operation should return same type as operands!");
2631 assert(getType()->isIntOrIntVectorTy() &&
2632 "Tried to create a logical operation on a non-integral type!");
2633 break;
2634 default: llvm_unreachable("Invalid opcode provided");
2635 }
2636 #endif
2637 }
2638
Create(BinaryOps Op,Value * S1,Value * S2,const Twine & Name,InsertPosition InsertBefore)2639 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
2640 const Twine &Name,
2641 InsertPosition InsertBefore) {
2642 assert(S1->getType() == S2->getType() &&
2643 "Cannot create binary operator with two operands of differing type!");
2644 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2645 }
2646
CreateNeg(Value * Op,const Twine & Name,InsertPosition InsertBefore)2647 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
2648 InsertPosition InsertBefore) {
2649 Value *Zero = ConstantInt::get(Op->getType(), 0);
2650 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2651 InsertBefore);
2652 }
2653
CreateNSWNeg(Value * Op,const Twine & Name,InsertPosition InsertBefore)2654 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
2655 InsertPosition InsertBefore) {
2656 Value *Zero = ConstantInt::get(Op->getType(), 0);
2657 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2658 }
2659
CreateNot(Value * Op,const Twine & Name,InsertPosition InsertBefore)2660 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
2661 InsertPosition InsertBefore) {
2662 Constant *C = Constant::getAllOnesValue(Op->getType());
2663 return new BinaryOperator(Instruction::Xor, Op, C,
2664 Op->getType(), Name, InsertBefore);
2665 }
2666
2667 // Exchange the two operands to this instruction. This instruction is safe to
2668 // use on any binary instruction and does not modify the semantics of the
2669 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
2670 // is changed.
swapOperands()2671 bool BinaryOperator::swapOperands() {
2672 if (!isCommutative())
2673 return true; // Can't commute operands
2674 Op<0>().swap(Op<1>());
2675 return false;
2676 }
2677
2678 //===----------------------------------------------------------------------===//
2679 // FPMathOperator Class
2680 //===----------------------------------------------------------------------===//
2681
getFPAccuracy() const2682 float FPMathOperator::getFPAccuracy() const {
2683 const MDNode *MD =
2684 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2685 if (!MD)
2686 return 0.0;
2687 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
2688 return Accuracy->getValueAPF().convertToFloat();
2689 }
2690
2691 //===----------------------------------------------------------------------===//
2692 // CastInst Class
2693 //===----------------------------------------------------------------------===//
2694
2695 // Just determine if this cast only deals with integral->integral conversion.
isIntegerCast() const2696 bool CastInst::isIntegerCast() const {
2697 switch (getOpcode()) {
2698 default: return false;
2699 case Instruction::ZExt:
2700 case Instruction::SExt:
2701 case Instruction::Trunc:
2702 return true;
2703 case Instruction::BitCast:
2704 return getOperand(0)->getType()->isIntegerTy() &&
2705 getType()->isIntegerTy();
2706 }
2707 }
2708
2709 /// This function determines if the CastInst does not require any bits to be
2710 /// changed in order to effect the cast. Essentially, it identifies cases where
2711 /// no code gen is necessary for the cast, hence the name no-op cast. For
2712 /// example, the following are all no-op casts:
2713 /// # bitcast i32* %x to i8*
2714 /// # bitcast <2 x i32> %x to <4 x i16>
2715 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2716 /// Determine if the described cast is a no-op.
isNoopCast(Instruction::CastOps Opcode,Type * SrcTy,Type * DestTy,const DataLayout & DL)2717 bool CastInst::isNoopCast(Instruction::CastOps Opcode,
2718 Type *SrcTy,
2719 Type *DestTy,
2720 const DataLayout &DL) {
2721 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2722 switch (Opcode) {
2723 default: llvm_unreachable("Invalid CastOp");
2724 case Instruction::Trunc:
2725 case Instruction::ZExt:
2726 case Instruction::SExt:
2727 case Instruction::FPTrunc:
2728 case Instruction::FPExt:
2729 case Instruction::UIToFP:
2730 case Instruction::SIToFP:
2731 case Instruction::FPToUI:
2732 case Instruction::FPToSI:
2733 case Instruction::AddrSpaceCast:
2734 // TODO: Target informations may give a more accurate answer here.
2735 return false;
2736 case Instruction::BitCast:
2737 return true; // BitCast never modifies bits.
2738 case Instruction::PtrToInt:
2739 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2740 DestTy->getScalarSizeInBits();
2741 case Instruction::IntToPtr:
2742 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2743 SrcTy->getScalarSizeInBits();
2744 }
2745 }
2746
isNoopCast(const DataLayout & DL) const2747 bool CastInst::isNoopCast(const DataLayout &DL) const {
2748 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2749 }
2750
2751 /// This function determines if a pair of casts can be eliminated and what
2752 /// opcode should be used in the elimination. This assumes that there are two
2753 /// instructions like this:
2754 /// * %F = firstOpcode SrcTy %x to MidTy
2755 /// * %S = secondOpcode MidTy %F to DstTy
2756 /// The function returns a resultOpcode so these two casts can be replaced with:
2757 /// * %Replacement = resultOpcode %SrcTy %x to DstTy
2758 /// If no such cast is permitted, the function returns 0.
isEliminableCastPair(Instruction::CastOps firstOp,Instruction::CastOps secondOp,Type * SrcTy,Type * MidTy,Type * DstTy,Type * SrcIntPtrTy,Type * MidIntPtrTy,Type * DstIntPtrTy)2759 unsigned CastInst::isEliminableCastPair(
2760 Instruction::CastOps firstOp, Instruction::CastOps secondOp,
2761 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2762 Type *DstIntPtrTy) {
2763 // Define the 144 possibilities for these two cast instructions. The values
2764 // in this matrix determine what to do in a given situation and select the
2765 // case in the switch below. The rows correspond to firstOp, the columns
2766 // correspond to secondOp. In looking at the table below, keep in mind
2767 // the following cast properties:
2768 //
2769 // Size Compare Source Destination
2770 // Operator Src ? Size Type Sign Type Sign
2771 // -------- ------------ ------------------- ---------------------
2772 // TRUNC > Integer Any Integral Any
2773 // ZEXT < Integral Unsigned Integer Any
2774 // SEXT < Integral Signed Integer Any
2775 // FPTOUI n/a FloatPt n/a Integral Unsigned
2776 // FPTOSI n/a FloatPt n/a Integral Signed
2777 // UITOFP n/a Integral Unsigned FloatPt n/a
2778 // SITOFP n/a Integral Signed FloatPt n/a
2779 // FPTRUNC > FloatPt n/a FloatPt n/a
2780 // FPEXT < FloatPt n/a FloatPt n/a
2781 // PTRTOINT n/a Pointer n/a Integral Unsigned
2782 // INTTOPTR n/a Integral Unsigned Pointer n/a
2783 // BITCAST = FirstClass n/a FirstClass n/a
2784 // ADDRSPCST n/a Pointer n/a Pointer n/a
2785 //
2786 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2787 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2788 // into "fptoui double to i64", but this loses information about the range
2789 // of the produced value (we no longer know the top-part is all zeros).
2790 // Further this conversion is often much more expensive for typical hardware,
2791 // and causes issues when building libgcc. We disallow fptosi+sext for the
2792 // same reason.
2793 const unsigned numCastOps =
2794 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2795 static const uint8_t CastResults[numCastOps][numCastOps] = {
2796 // T F F U S F F P I B A -+
2797 // R Z S P P I I T P 2 N T S |
2798 // U E E 2 2 2 2 R E I T C C +- secondOp
2799 // N X X U S F F N X N 2 V V |
2800 // C T T I I P P C T T P T T -+
2801 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
2802 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
2803 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
2804 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
2805 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
2806 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
2807 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
2808 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
2809 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
2810 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
2811 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
2812 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast |
2813 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2814 };
2815
2816 // TODO: This logic could be encoded into the table above and handled in the
2817 // switch below.
2818 // If either of the casts are a bitcast from scalar to vector, disallow the
2819 // merging. However, any pair of bitcasts are allowed.
2820 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2821 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2822 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2823
2824 // Check if any of the casts convert scalars <-> vectors.
2825 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2826 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2827 if (!AreBothBitcasts)
2828 return 0;
2829
2830 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2831 [secondOp-Instruction::CastOpsBegin];
2832 switch (ElimCase) {
2833 case 0:
2834 // Categorically disallowed.
2835 return 0;
2836 case 1:
2837 // Allowed, use first cast's opcode.
2838 return firstOp;
2839 case 2:
2840 // Allowed, use second cast's opcode.
2841 return secondOp;
2842 case 3:
2843 // No-op cast in second op implies firstOp as long as the DestTy
2844 // is integer and we are not converting between a vector and a
2845 // non-vector type.
2846 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2847 return firstOp;
2848 return 0;
2849 case 4:
2850 // No-op cast in second op implies firstOp as long as the DestTy
2851 // matches MidTy.
2852 if (DstTy == MidTy)
2853 return firstOp;
2854 return 0;
2855 case 5:
2856 // No-op cast in first op implies secondOp as long as the SrcTy
2857 // is an integer.
2858 if (SrcTy->isIntegerTy())
2859 return secondOp;
2860 return 0;
2861 case 7: {
2862 // Disable inttoptr/ptrtoint optimization if enabled.
2863 if (DisableI2pP2iOpt)
2864 return 0;
2865
2866 // Cannot simplify if address spaces are different!
2867 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2868 return 0;
2869
2870 unsigned MidSize = MidTy->getScalarSizeInBits();
2871 // We can still fold this without knowing the actual sizes as long we
2872 // know that the intermediate pointer is the largest possible
2873 // pointer size.
2874 // FIXME: Is this always true?
2875 if (MidSize == 64)
2876 return Instruction::BitCast;
2877
2878 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
2879 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2880 return 0;
2881 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2882 if (MidSize >= PtrSize)
2883 return Instruction::BitCast;
2884 return 0;
2885 }
2886 case 8: {
2887 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2888 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2889 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2890 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2891 unsigned DstSize = DstTy->getScalarSizeInBits();
2892 if (SrcTy == DstTy)
2893 return Instruction::BitCast;
2894 if (SrcSize < DstSize)
2895 return firstOp;
2896 if (SrcSize > DstSize)
2897 return secondOp;
2898 return 0;
2899 }
2900 case 9:
2901 // zext, sext -> zext, because sext can't sign extend after zext
2902 return Instruction::ZExt;
2903 case 11: {
2904 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2905 if (!MidIntPtrTy)
2906 return 0;
2907 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2908 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2909 unsigned DstSize = DstTy->getScalarSizeInBits();
2910 if (SrcSize <= PtrSize && SrcSize == DstSize)
2911 return Instruction::BitCast;
2912 return 0;
2913 }
2914 case 12:
2915 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2916 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2917 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2918 return Instruction::AddrSpaceCast;
2919 return Instruction::BitCast;
2920 case 13:
2921 // FIXME: this state can be merged with (1), but the following assert
2922 // is useful to check the correcteness of the sequence due to semantic
2923 // change of bitcast.
2924 assert(
2925 SrcTy->isPtrOrPtrVectorTy() &&
2926 MidTy->isPtrOrPtrVectorTy() &&
2927 DstTy->isPtrOrPtrVectorTy() &&
2928 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2929 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2930 "Illegal addrspacecast, bitcast sequence!");
2931 // Allowed, use first cast's opcode
2932 return firstOp;
2933 case 14:
2934 // bitcast, addrspacecast -> addrspacecast
2935 return Instruction::AddrSpaceCast;
2936 case 15:
2937 // FIXME: this state can be merged with (1), but the following assert
2938 // is useful to check the correcteness of the sequence due to semantic
2939 // change of bitcast.
2940 assert(
2941 SrcTy->isIntOrIntVectorTy() &&
2942 MidTy->isPtrOrPtrVectorTy() &&
2943 DstTy->isPtrOrPtrVectorTy() &&
2944 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2945 "Illegal inttoptr, bitcast sequence!");
2946 // Allowed, use first cast's opcode
2947 return firstOp;
2948 case 16:
2949 // FIXME: this state can be merged with (2), but the following assert
2950 // is useful to check the correcteness of the sequence due to semantic
2951 // change of bitcast.
2952 assert(
2953 SrcTy->isPtrOrPtrVectorTy() &&
2954 MidTy->isPtrOrPtrVectorTy() &&
2955 DstTy->isIntOrIntVectorTy() &&
2956 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
2957 "Illegal bitcast, ptrtoint sequence!");
2958 // Allowed, use second cast's opcode
2959 return secondOp;
2960 case 17:
2961 // (sitofp (zext x)) -> (uitofp x)
2962 return Instruction::UIToFP;
2963 case 99:
2964 // Cast combination can't happen (error in input). This is for all cases
2965 // where the MidTy is not the same for the two cast instructions.
2966 llvm_unreachable("Invalid Cast Combination");
2967 default:
2968 llvm_unreachable("Error in CastResults table!!!");
2969 }
2970 }
2971
Create(Instruction::CastOps op,Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)2972 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
2973 const Twine &Name, InsertPosition InsertBefore) {
2974 assert(castIsValid(op, S, Ty) && "Invalid cast!");
2975 // Construct and return the appropriate CastInst subclass
2976 switch (op) {
2977 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
2978 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
2979 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
2980 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
2981 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
2982 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
2983 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
2984 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
2985 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
2986 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
2987 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
2988 case BitCast:
2989 return new BitCastInst(S, Ty, Name, InsertBefore);
2990 case AddrSpaceCast:
2991 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
2992 default:
2993 llvm_unreachable("Invalid opcode provided");
2994 }
2995 }
2996
CreateZExtOrBitCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)2997 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name,
2998 InsertPosition InsertBefore) {
2999 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3000 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3001 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3002 }
3003
CreateSExtOrBitCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3004 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name,
3005 InsertPosition InsertBefore) {
3006 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3007 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3008 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3009 }
3010
CreateTruncOrBitCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3011 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name,
3012 InsertPosition InsertBefore) {
3013 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3014 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3015 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3016 }
3017
3018 /// Create a BitCast or a PtrToInt cast instruction
CreatePointerCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3019 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, const Twine &Name,
3020 InsertPosition InsertBefore) {
3021 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3022 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3023 "Invalid cast");
3024 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3025 assert((!Ty->isVectorTy() ||
3026 cast<VectorType>(Ty)->getElementCount() ==
3027 cast<VectorType>(S->getType())->getElementCount()) &&
3028 "Invalid cast");
3029
3030 if (Ty->isIntOrIntVectorTy())
3031 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3032
3033 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3034 }
3035
CreatePointerBitCastOrAddrSpaceCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3036 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
3037 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3038 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3039 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3040
3041 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3042 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3043
3044 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3045 }
3046
CreateBitOrPointerCast(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3047 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
3048 const Twine &Name,
3049 InsertPosition InsertBefore) {
3050 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3051 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3052 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3053 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3054
3055 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3056 }
3057
CreateIntegerCast(Value * C,Type * Ty,bool isSigned,const Twine & Name,InsertPosition InsertBefore)3058 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, bool isSigned,
3059 const Twine &Name,
3060 InsertPosition InsertBefore) {
3061 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3062 "Invalid integer cast");
3063 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3064 unsigned DstBits = Ty->getScalarSizeInBits();
3065 Instruction::CastOps opcode =
3066 (SrcBits == DstBits ? Instruction::BitCast :
3067 (SrcBits > DstBits ? Instruction::Trunc :
3068 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3069 return Create(opcode, C, Ty, Name, InsertBefore);
3070 }
3071
CreateFPCast(Value * C,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3072 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, const Twine &Name,
3073 InsertPosition InsertBefore) {
3074 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3075 "Invalid cast");
3076 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3077 unsigned DstBits = Ty->getScalarSizeInBits();
3078 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3079 Instruction::CastOps opcode =
3080 (SrcBits == DstBits ? Instruction::BitCast :
3081 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3082 return Create(opcode, C, Ty, Name, InsertBefore);
3083 }
3084
isBitCastable(Type * SrcTy,Type * DestTy)3085 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3086 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3087 return false;
3088
3089 if (SrcTy == DestTy)
3090 return true;
3091
3092 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3093 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3094 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3095 // An element by element cast. Valid if casting the elements is valid.
3096 SrcTy = SrcVecTy->getElementType();
3097 DestTy = DestVecTy->getElementType();
3098 }
3099 }
3100 }
3101
3102 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3103 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3104 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3105 }
3106 }
3107
3108 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3109 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3110
3111 // Could still have vectors of pointers if the number of elements doesn't
3112 // match
3113 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3114 return false;
3115
3116 if (SrcBits != DestBits)
3117 return false;
3118
3119 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
3120 return false;
3121
3122 return true;
3123 }
3124
isBitOrNoopPointerCastable(Type * SrcTy,Type * DestTy,const DataLayout & DL)3125 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
3126 const DataLayout &DL) {
3127 // ptrtoint and inttoptr are not allowed on non-integral pointers
3128 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3129 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3130 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3131 !DL.isNonIntegralPointerType(PtrTy));
3132 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3133 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3134 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3135 !DL.isNonIntegralPointerType(PtrTy));
3136
3137 return isBitCastable(SrcTy, DestTy);
3138 }
3139
3140 // Provide a way to get a "cast" where the cast opcode is inferred from the
3141 // types and size of the operand. This, basically, is a parallel of the
3142 // logic in the castIsValid function below. This axiom should hold:
3143 // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3144 // should not assert in castIsValid. In other words, this produces a "correct"
3145 // casting opcode for the arguments passed to it.
3146 Instruction::CastOps
getCastOpcode(const Value * Src,bool SrcIsSigned,Type * DestTy,bool DestIsSigned)3147 CastInst::getCastOpcode(
3148 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3149 Type *SrcTy = Src->getType();
3150
3151 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3152 "Only first class types are castable!");
3153
3154 if (SrcTy == DestTy)
3155 return BitCast;
3156
3157 // FIXME: Check address space sizes here
3158 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3159 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3160 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3161 // An element by element cast. Find the appropriate opcode based on the
3162 // element types.
3163 SrcTy = SrcVecTy->getElementType();
3164 DestTy = DestVecTy->getElementType();
3165 }
3166
3167 // Get the bit sizes, we'll need these
3168 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3169 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3170
3171 // Run through the possibilities ...
3172 if (DestTy->isIntegerTy()) { // Casting to integral
3173 if (SrcTy->isIntegerTy()) { // Casting from integral
3174 if (DestBits < SrcBits)
3175 return Trunc; // int -> smaller int
3176 else if (DestBits > SrcBits) { // its an extension
3177 if (SrcIsSigned)
3178 return SExt; // signed -> SEXT
3179 else
3180 return ZExt; // unsigned -> ZEXT
3181 } else {
3182 return BitCast; // Same size, No-op cast
3183 }
3184 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3185 if (DestIsSigned)
3186 return FPToSI; // FP -> sint
3187 else
3188 return FPToUI; // FP -> uint
3189 } else if (SrcTy->isVectorTy()) {
3190 assert(DestBits == SrcBits &&
3191 "Casting vector to integer of different width");
3192 return BitCast; // Same size, no-op cast
3193 } else {
3194 assert(SrcTy->isPointerTy() &&
3195 "Casting from a value that is not first-class type");
3196 return PtrToInt; // ptr -> int
3197 }
3198 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3199 if (SrcTy->isIntegerTy()) { // Casting from integral
3200 if (SrcIsSigned)
3201 return SIToFP; // sint -> FP
3202 else
3203 return UIToFP; // uint -> FP
3204 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3205 if (DestBits < SrcBits) {
3206 return FPTrunc; // FP -> smaller FP
3207 } else if (DestBits > SrcBits) {
3208 return FPExt; // FP -> larger FP
3209 } else {
3210 return BitCast; // same size, no-op cast
3211 }
3212 } else if (SrcTy->isVectorTy()) {
3213 assert(DestBits == SrcBits &&
3214 "Casting vector to floating point of different width");
3215 return BitCast; // same size, no-op cast
3216 }
3217 llvm_unreachable("Casting pointer or non-first class to float");
3218 } else if (DestTy->isVectorTy()) {
3219 assert(DestBits == SrcBits &&
3220 "Illegal cast to vector (wrong type or size)");
3221 return BitCast;
3222 } else if (DestTy->isPointerTy()) {
3223 if (SrcTy->isPointerTy()) {
3224 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3225 return AddrSpaceCast;
3226 return BitCast; // ptr -> ptr
3227 } else if (SrcTy->isIntegerTy()) {
3228 return IntToPtr; // int -> ptr
3229 }
3230 llvm_unreachable("Casting pointer to other than pointer or int");
3231 } else if (DestTy->isX86_MMXTy()) {
3232 if (SrcTy->isVectorTy()) {
3233 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
3234 return BitCast; // 64-bit vector to MMX
3235 }
3236 llvm_unreachable("Illegal cast to X86_MMX");
3237 }
3238 llvm_unreachable("Casting to type that is not first-class");
3239 }
3240
3241 //===----------------------------------------------------------------------===//
3242 // CastInst SubClass Constructors
3243 //===----------------------------------------------------------------------===//
3244
3245 /// Check that the construction parameters for a CastInst are correct. This
3246 /// could be broken out into the separate constructors but it is useful to have
3247 /// it in one place and to eliminate the redundant code for getting the sizes
3248 /// of the types involved.
3249 bool
castIsValid(Instruction::CastOps op,Type * SrcTy,Type * DstTy)3250 CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {
3251 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3252 SrcTy->isAggregateType() || DstTy->isAggregateType())
3253 return false;
3254
3255 // Get the size of the types in bits, and whether we are dealing
3256 // with vector types, we'll need this later.
3257 bool SrcIsVec = isa<VectorType>(SrcTy);
3258 bool DstIsVec = isa<VectorType>(DstTy);
3259 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3260 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3261
3262 // If these are vector types, get the lengths of the vectors (using zero for
3263 // scalar types means that checking that vector lengths match also checks that
3264 // scalars are not being converted to vectors or vectors to scalars).
3265 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3266 : ElementCount::getFixed(0);
3267 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3268 : ElementCount::getFixed(0);
3269
3270 // Switch on the opcode provided
3271 switch (op) {
3272 default: return false; // This is an input error
3273 case Instruction::Trunc:
3274 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3275 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3276 case Instruction::ZExt:
3277 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3278 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3279 case Instruction::SExt:
3280 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3281 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3282 case Instruction::FPTrunc:
3283 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3284 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3285 case Instruction::FPExt:
3286 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3287 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3288 case Instruction::UIToFP:
3289 case Instruction::SIToFP:
3290 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3291 SrcEC == DstEC;
3292 case Instruction::FPToUI:
3293 case Instruction::FPToSI:
3294 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3295 SrcEC == DstEC;
3296 case Instruction::PtrToInt:
3297 if (SrcEC != DstEC)
3298 return false;
3299 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3300 case Instruction::IntToPtr:
3301 if (SrcEC != DstEC)
3302 return false;
3303 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3304 case Instruction::BitCast: {
3305 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3306 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3307
3308 // BitCast implies a no-op cast of type only. No bits change.
3309 // However, you can't cast pointers to anything but pointers.
3310 if (!SrcPtrTy != !DstPtrTy)
3311 return false;
3312
3313 // For non-pointer cases, the cast is okay if the source and destination bit
3314 // widths are identical.
3315 if (!SrcPtrTy)
3316 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3317
3318 // If both are pointers then the address spaces must match.
3319 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3320 return false;
3321
3322 // A vector of pointers must have the same number of elements.
3323 if (SrcIsVec && DstIsVec)
3324 return SrcEC == DstEC;
3325 if (SrcIsVec)
3326 return SrcEC == ElementCount::getFixed(1);
3327 if (DstIsVec)
3328 return DstEC == ElementCount::getFixed(1);
3329
3330 return true;
3331 }
3332 case Instruction::AddrSpaceCast: {
3333 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3334 if (!SrcPtrTy)
3335 return false;
3336
3337 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3338 if (!DstPtrTy)
3339 return false;
3340
3341 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3342 return false;
3343
3344 return SrcEC == DstEC;
3345 }
3346 }
3347 }
3348
TruncInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3349 TruncInst::TruncInst(Value *S, Type *Ty, const Twine &Name,
3350 InsertPosition InsertBefore)
3351 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3352 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3353 }
3354
ZExtInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3355 ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3356 InsertPosition InsertBefore)
3357 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3358 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3359 }
3360
SExtInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3361 SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3362 InsertPosition InsertBefore)
3363 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3364 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3365 }
3366
FPTruncInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3367 FPTruncInst::FPTruncInst(Value *S, Type *Ty, const Twine &Name,
3368 InsertPosition InsertBefore)
3369 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3370 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3371 }
3372
FPExtInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3373 FPExtInst::FPExtInst(Value *S, Type *Ty, const Twine &Name,
3374 InsertPosition InsertBefore)
3375 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3376 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3377 }
3378
UIToFPInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3379 UIToFPInst::UIToFPInst(Value *S, Type *Ty, const Twine &Name,
3380 InsertPosition InsertBefore)
3381 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3382 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3383 }
3384
SIToFPInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3385 SIToFPInst::SIToFPInst(Value *S, Type *Ty, const Twine &Name,
3386 InsertPosition InsertBefore)
3387 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3388 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3389 }
3390
FPToUIInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3391 FPToUIInst::FPToUIInst(Value *S, Type *Ty, const Twine &Name,
3392 InsertPosition InsertBefore)
3393 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3394 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3395 }
3396
FPToSIInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3397 FPToSIInst::FPToSIInst(Value *S, Type *Ty, const Twine &Name,
3398 InsertPosition InsertBefore)
3399 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3400 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3401 }
3402
PtrToIntInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3403 PtrToIntInst::PtrToIntInst(Value *S, Type *Ty, const Twine &Name,
3404 InsertPosition InsertBefore)
3405 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3406 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3407 }
3408
IntToPtrInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3409 IntToPtrInst::IntToPtrInst(Value *S, Type *Ty, const Twine &Name,
3410 InsertPosition InsertBefore)
3411 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3412 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3413 }
3414
BitCastInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3415 BitCastInst::BitCastInst(Value *S, Type *Ty, const Twine &Name,
3416 InsertPosition InsertBefore)
3417 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3418 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3419 }
3420
AddrSpaceCastInst(Value * S,Type * Ty,const Twine & Name,InsertPosition InsertBefore)3421 AddrSpaceCastInst::AddrSpaceCastInst(Value *S, Type *Ty, const Twine &Name,
3422 InsertPosition InsertBefore)
3423 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3424 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3425 }
3426
3427 //===----------------------------------------------------------------------===//
3428 // CmpInst Classes
3429 //===----------------------------------------------------------------------===//
3430
CmpInst(Type * ty,OtherOps op,Predicate predicate,Value * LHS,Value * RHS,const Twine & Name,InsertPosition InsertBefore,Instruction * FlagsSource)3431 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
3432 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3433 Instruction *FlagsSource)
3434 : Instruction(ty, op, OperandTraits<CmpInst>::op_begin(this),
3435 OperandTraits<CmpInst>::operands(this), InsertBefore) {
3436 Op<0>() = LHS;
3437 Op<1>() = RHS;
3438 setPredicate((Predicate)predicate);
3439 setName(Name);
3440 if (FlagsSource)
3441 copyIRFlags(FlagsSource);
3442 }
3443
Create(OtherOps Op,Predicate predicate,Value * S1,Value * S2,const Twine & Name,InsertPosition InsertBefore)3444 CmpInst *CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
3445 const Twine &Name, InsertPosition InsertBefore) {
3446 if (Op == Instruction::ICmp) {
3447 if (InsertBefore.isValid())
3448 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3449 S1, S2, Name);
3450 else
3451 return new ICmpInst(CmpInst::Predicate(predicate),
3452 S1, S2, Name);
3453 }
3454
3455 if (InsertBefore.isValid())
3456 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3457 S1, S2, Name);
3458 else
3459 return new FCmpInst(CmpInst::Predicate(predicate),
3460 S1, S2, Name);
3461 }
3462
CreateWithCopiedFlags(OtherOps Op,Predicate Pred,Value * S1,Value * S2,const Instruction * FlagsSource,const Twine & Name,InsertPosition InsertBefore)3463 CmpInst *CmpInst::CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1,
3464 Value *S2,
3465 const Instruction *FlagsSource,
3466 const Twine &Name,
3467 InsertPosition InsertBefore) {
3468 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3469 Inst->copyIRFlags(FlagsSource);
3470 return Inst;
3471 }
3472
swapOperands()3473 void CmpInst::swapOperands() {
3474 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3475 IC->swapOperands();
3476 else
3477 cast<FCmpInst>(this)->swapOperands();
3478 }
3479
isCommutative() const3480 bool CmpInst::isCommutative() const {
3481 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3482 return IC->isCommutative();
3483 return cast<FCmpInst>(this)->isCommutative();
3484 }
3485
isEquality(Predicate P)3486 bool CmpInst::isEquality(Predicate P) {
3487 if (ICmpInst::isIntPredicate(P))
3488 return ICmpInst::isEquality(P);
3489 if (FCmpInst::isFPPredicate(P))
3490 return FCmpInst::isEquality(P);
3491 llvm_unreachable("Unsupported predicate kind");
3492 }
3493
getInversePredicate(Predicate pred)3494 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
3495 switch (pred) {
3496 default: llvm_unreachable("Unknown cmp predicate!");
3497 case ICMP_EQ: return ICMP_NE;
3498 case ICMP_NE: return ICMP_EQ;
3499 case ICMP_UGT: return ICMP_ULE;
3500 case ICMP_ULT: return ICMP_UGE;
3501 case ICMP_UGE: return ICMP_ULT;
3502 case ICMP_ULE: return ICMP_UGT;
3503 case ICMP_SGT: return ICMP_SLE;
3504 case ICMP_SLT: return ICMP_SGE;
3505 case ICMP_SGE: return ICMP_SLT;
3506 case ICMP_SLE: return ICMP_SGT;
3507
3508 case FCMP_OEQ: return FCMP_UNE;
3509 case FCMP_ONE: return FCMP_UEQ;
3510 case FCMP_OGT: return FCMP_ULE;
3511 case FCMP_OLT: return FCMP_UGE;
3512 case FCMP_OGE: return FCMP_ULT;
3513 case FCMP_OLE: return FCMP_UGT;
3514 case FCMP_UEQ: return FCMP_ONE;
3515 case FCMP_UNE: return FCMP_OEQ;
3516 case FCMP_UGT: return FCMP_OLE;
3517 case FCMP_ULT: return FCMP_OGE;
3518 case FCMP_UGE: return FCMP_OLT;
3519 case FCMP_ULE: return FCMP_OGT;
3520 case FCMP_ORD: return FCMP_UNO;
3521 case FCMP_UNO: return FCMP_ORD;
3522 case FCMP_TRUE: return FCMP_FALSE;
3523 case FCMP_FALSE: return FCMP_TRUE;
3524 }
3525 }
3526
getPredicateName(Predicate Pred)3527 StringRef CmpInst::getPredicateName(Predicate Pred) {
3528 switch (Pred) {
3529 default: return "unknown";
3530 case FCmpInst::FCMP_FALSE: return "false";
3531 case FCmpInst::FCMP_OEQ: return "oeq";
3532 case FCmpInst::FCMP_OGT: return "ogt";
3533 case FCmpInst::FCMP_OGE: return "oge";
3534 case FCmpInst::FCMP_OLT: return "olt";
3535 case FCmpInst::FCMP_OLE: return "ole";
3536 case FCmpInst::FCMP_ONE: return "one";
3537 case FCmpInst::FCMP_ORD: return "ord";
3538 case FCmpInst::FCMP_UNO: return "uno";
3539 case FCmpInst::FCMP_UEQ: return "ueq";
3540 case FCmpInst::FCMP_UGT: return "ugt";
3541 case FCmpInst::FCMP_UGE: return "uge";
3542 case FCmpInst::FCMP_ULT: return "ult";
3543 case FCmpInst::FCMP_ULE: return "ule";
3544 case FCmpInst::FCMP_UNE: return "une";
3545 case FCmpInst::FCMP_TRUE: return "true";
3546 case ICmpInst::ICMP_EQ: return "eq";
3547 case ICmpInst::ICMP_NE: return "ne";
3548 case ICmpInst::ICMP_SGT: return "sgt";
3549 case ICmpInst::ICMP_SGE: return "sge";
3550 case ICmpInst::ICMP_SLT: return "slt";
3551 case ICmpInst::ICMP_SLE: return "sle";
3552 case ICmpInst::ICMP_UGT: return "ugt";
3553 case ICmpInst::ICMP_UGE: return "uge";
3554 case ICmpInst::ICMP_ULT: return "ult";
3555 case ICmpInst::ICMP_ULE: return "ule";
3556 }
3557 }
3558
operator <<(raw_ostream & OS,CmpInst::Predicate Pred)3559 raw_ostream &llvm::operator<<(raw_ostream &OS, CmpInst::Predicate Pred) {
3560 OS << CmpInst::getPredicateName(Pred);
3561 return OS;
3562 }
3563
getSignedPredicate(Predicate pred)3564 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
3565 switch (pred) {
3566 default: llvm_unreachable("Unknown icmp predicate!");
3567 case ICMP_EQ: case ICMP_NE:
3568 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3569 return pred;
3570 case ICMP_UGT: return ICMP_SGT;
3571 case ICMP_ULT: return ICMP_SLT;
3572 case ICMP_UGE: return ICMP_SGE;
3573 case ICMP_ULE: return ICMP_SLE;
3574 }
3575 }
3576
getUnsignedPredicate(Predicate pred)3577 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
3578 switch (pred) {
3579 default: llvm_unreachable("Unknown icmp predicate!");
3580 case ICMP_EQ: case ICMP_NE:
3581 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3582 return pred;
3583 case ICMP_SGT: return ICMP_UGT;
3584 case ICMP_SLT: return ICMP_ULT;
3585 case ICMP_SGE: return ICMP_UGE;
3586 case ICMP_SLE: return ICMP_ULE;
3587 }
3588 }
3589
getSwappedPredicate(Predicate pred)3590 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
3591 switch (pred) {
3592 default: llvm_unreachable("Unknown cmp predicate!");
3593 case ICMP_EQ: case ICMP_NE:
3594 return pred;
3595 case ICMP_SGT: return ICMP_SLT;
3596 case ICMP_SLT: return ICMP_SGT;
3597 case ICMP_SGE: return ICMP_SLE;
3598 case ICMP_SLE: return ICMP_SGE;
3599 case ICMP_UGT: return ICMP_ULT;
3600 case ICMP_ULT: return ICMP_UGT;
3601 case ICMP_UGE: return ICMP_ULE;
3602 case ICMP_ULE: return ICMP_UGE;
3603
3604 case FCMP_FALSE: case FCMP_TRUE:
3605 case FCMP_OEQ: case FCMP_ONE:
3606 case FCMP_UEQ: case FCMP_UNE:
3607 case FCMP_ORD: case FCMP_UNO:
3608 return pred;
3609 case FCMP_OGT: return FCMP_OLT;
3610 case FCMP_OLT: return FCMP_OGT;
3611 case FCMP_OGE: return FCMP_OLE;
3612 case FCMP_OLE: return FCMP_OGE;
3613 case FCMP_UGT: return FCMP_ULT;
3614 case FCMP_ULT: return FCMP_UGT;
3615 case FCMP_UGE: return FCMP_ULE;
3616 case FCMP_ULE: return FCMP_UGE;
3617 }
3618 }
3619
isNonStrictPredicate(Predicate pred)3620 bool CmpInst::isNonStrictPredicate(Predicate pred) {
3621 switch (pred) {
3622 case ICMP_SGE:
3623 case ICMP_SLE:
3624 case ICMP_UGE:
3625 case ICMP_ULE:
3626 case FCMP_OGE:
3627 case FCMP_OLE:
3628 case FCMP_UGE:
3629 case FCMP_ULE:
3630 return true;
3631 default:
3632 return false;
3633 }
3634 }
3635
isStrictPredicate(Predicate pred)3636 bool CmpInst::isStrictPredicate(Predicate pred) {
3637 switch (pred) {
3638 case ICMP_SGT:
3639 case ICMP_SLT:
3640 case ICMP_UGT:
3641 case ICMP_ULT:
3642 case FCMP_OGT:
3643 case FCMP_OLT:
3644 case FCMP_UGT:
3645 case FCMP_ULT:
3646 return true;
3647 default:
3648 return false;
3649 }
3650 }
3651
getStrictPredicate(Predicate pred)3652 CmpInst::Predicate CmpInst::getStrictPredicate(Predicate pred) {
3653 switch (pred) {
3654 case ICMP_SGE:
3655 return ICMP_SGT;
3656 case ICMP_SLE:
3657 return ICMP_SLT;
3658 case ICMP_UGE:
3659 return ICMP_UGT;
3660 case ICMP_ULE:
3661 return ICMP_ULT;
3662 case FCMP_OGE:
3663 return FCMP_OGT;
3664 case FCMP_OLE:
3665 return FCMP_OLT;
3666 case FCMP_UGE:
3667 return FCMP_UGT;
3668 case FCMP_ULE:
3669 return FCMP_ULT;
3670 default:
3671 return pred;
3672 }
3673 }
3674
getNonStrictPredicate(Predicate pred)3675 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
3676 switch (pred) {
3677 case ICMP_SGT:
3678 return ICMP_SGE;
3679 case ICMP_SLT:
3680 return ICMP_SLE;
3681 case ICMP_UGT:
3682 return ICMP_UGE;
3683 case ICMP_ULT:
3684 return ICMP_ULE;
3685 case FCMP_OGT:
3686 return FCMP_OGE;
3687 case FCMP_OLT:
3688 return FCMP_OLE;
3689 case FCMP_UGT:
3690 return FCMP_UGE;
3691 case FCMP_ULT:
3692 return FCMP_ULE;
3693 default:
3694 return pred;
3695 }
3696 }
3697
getFlippedStrictnessPredicate(Predicate pred)3698 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
3699 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3700
3701 if (isStrictPredicate(pred))
3702 return getNonStrictPredicate(pred);
3703 if (isNonStrictPredicate(pred))
3704 return getStrictPredicate(pred);
3705
3706 llvm_unreachable("Unknown predicate!");
3707 }
3708
getSignedPredicate(Predicate pred)3709 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
3710 assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
3711
3712 switch (pred) {
3713 default:
3714 llvm_unreachable("Unknown predicate!");
3715 case CmpInst::ICMP_ULT:
3716 return CmpInst::ICMP_SLT;
3717 case CmpInst::ICMP_ULE:
3718 return CmpInst::ICMP_SLE;
3719 case CmpInst::ICMP_UGT:
3720 return CmpInst::ICMP_SGT;
3721 case CmpInst::ICMP_UGE:
3722 return CmpInst::ICMP_SGE;
3723 }
3724 }
3725
getUnsignedPredicate(Predicate pred)3726 CmpInst::Predicate CmpInst::getUnsignedPredicate(Predicate pred) {
3727 assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
3728
3729 switch (pred) {
3730 default:
3731 llvm_unreachable("Unknown predicate!");
3732 case CmpInst::ICMP_SLT:
3733 return CmpInst::ICMP_ULT;
3734 case CmpInst::ICMP_SLE:
3735 return CmpInst::ICMP_ULE;
3736 case CmpInst::ICMP_SGT:
3737 return CmpInst::ICMP_UGT;
3738 case CmpInst::ICMP_SGE:
3739 return CmpInst::ICMP_UGE;
3740 }
3741 }
3742
isUnsigned(Predicate predicate)3743 bool CmpInst::isUnsigned(Predicate predicate) {
3744 switch (predicate) {
3745 default: return false;
3746 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
3747 case ICmpInst::ICMP_UGE: return true;
3748 }
3749 }
3750
isSigned(Predicate predicate)3751 bool CmpInst::isSigned(Predicate predicate) {
3752 switch (predicate) {
3753 default: return false;
3754 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
3755 case ICmpInst::ICMP_SGE: return true;
3756 }
3757 }
3758
compare(const APInt & LHS,const APInt & RHS,ICmpInst::Predicate Pred)3759 bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3760 ICmpInst::Predicate Pred) {
3761 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3762 switch (Pred) {
3763 case ICmpInst::Predicate::ICMP_EQ:
3764 return LHS.eq(RHS);
3765 case ICmpInst::Predicate::ICMP_NE:
3766 return LHS.ne(RHS);
3767 case ICmpInst::Predicate::ICMP_UGT:
3768 return LHS.ugt(RHS);
3769 case ICmpInst::Predicate::ICMP_UGE:
3770 return LHS.uge(RHS);
3771 case ICmpInst::Predicate::ICMP_ULT:
3772 return LHS.ult(RHS);
3773 case ICmpInst::Predicate::ICMP_ULE:
3774 return LHS.ule(RHS);
3775 case ICmpInst::Predicate::ICMP_SGT:
3776 return LHS.sgt(RHS);
3777 case ICmpInst::Predicate::ICMP_SGE:
3778 return LHS.sge(RHS);
3779 case ICmpInst::Predicate::ICMP_SLT:
3780 return LHS.slt(RHS);
3781 case ICmpInst::Predicate::ICMP_SLE:
3782 return LHS.sle(RHS);
3783 default:
3784 llvm_unreachable("Unexpected non-integer predicate.");
3785 };
3786 }
3787
compare(const APFloat & LHS,const APFloat & RHS,FCmpInst::Predicate Pred)3788 bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3789 FCmpInst::Predicate Pred) {
3790 APFloat::cmpResult R = LHS.compare(RHS);
3791 switch (Pred) {
3792 default:
3793 llvm_unreachable("Invalid FCmp Predicate");
3794 case FCmpInst::FCMP_FALSE:
3795 return false;
3796 case FCmpInst::FCMP_TRUE:
3797 return true;
3798 case FCmpInst::FCMP_UNO:
3799 return R == APFloat::cmpUnordered;
3800 case FCmpInst::FCMP_ORD:
3801 return R != APFloat::cmpUnordered;
3802 case FCmpInst::FCMP_UEQ:
3803 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3804 case FCmpInst::FCMP_OEQ:
3805 return R == APFloat::cmpEqual;
3806 case FCmpInst::FCMP_UNE:
3807 return R != APFloat::cmpEqual;
3808 case FCmpInst::FCMP_ONE:
3809 return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;
3810 case FCmpInst::FCMP_ULT:
3811 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3812 case FCmpInst::FCMP_OLT:
3813 return R == APFloat::cmpLessThan;
3814 case FCmpInst::FCMP_UGT:
3815 return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;
3816 case FCmpInst::FCMP_OGT:
3817 return R == APFloat::cmpGreaterThan;
3818 case FCmpInst::FCMP_ULE:
3819 return R != APFloat::cmpGreaterThan;
3820 case FCmpInst::FCMP_OLE:
3821 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3822 case FCmpInst::FCMP_UGE:
3823 return R != APFloat::cmpLessThan;
3824 case FCmpInst::FCMP_OGE:
3825 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3826 }
3827 }
3828
getFlippedSignednessPredicate(Predicate pred)3829 CmpInst::Predicate CmpInst::getFlippedSignednessPredicate(Predicate pred) {
3830 assert(CmpInst::isRelational(pred) &&
3831 "Call only with non-equality predicates!");
3832
3833 if (isSigned(pred))
3834 return getUnsignedPredicate(pred);
3835 if (isUnsigned(pred))
3836 return getSignedPredicate(pred);
3837
3838 llvm_unreachable("Unknown predicate!");
3839 }
3840
isOrdered(Predicate predicate)3841 bool CmpInst::isOrdered(Predicate predicate) {
3842 switch (predicate) {
3843 default: return false;
3844 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
3845 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
3846 case FCmpInst::FCMP_ORD: return true;
3847 }
3848 }
3849
isUnordered(Predicate predicate)3850 bool CmpInst::isUnordered(Predicate predicate) {
3851 switch (predicate) {
3852 default: return false;
3853 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
3854 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
3855 case FCmpInst::FCMP_UNO: return true;
3856 }
3857 }
3858
isTrueWhenEqual(Predicate predicate)3859 bool CmpInst::isTrueWhenEqual(Predicate predicate) {
3860 switch(predicate) {
3861 default: return false;
3862 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3863 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3864 }
3865 }
3866
isFalseWhenEqual(Predicate predicate)3867 bool CmpInst::isFalseWhenEqual(Predicate predicate) {
3868 switch(predicate) {
3869 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3870 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3871 default: return false;
3872 }
3873 }
3874
isImpliedTrueByMatchingCmp(Predicate Pred1,Predicate Pred2)3875 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) {
3876 // If the predicates match, then we know the first condition implies the
3877 // second is true.
3878 if (Pred1 == Pred2)
3879 return true;
3880
3881 switch (Pred1) {
3882 default:
3883 break;
3884 case ICMP_EQ:
3885 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3886 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
3887 Pred2 == ICMP_SLE;
3888 case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3889 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
3890 case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3891 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
3892 case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3893 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
3894 case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3895 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
3896 }
3897 return false;
3898 }
3899
isImpliedFalseByMatchingCmp(Predicate Pred1,Predicate Pred2)3900 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) {
3901 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
3902 }
3903
3904 //===----------------------------------------------------------------------===//
3905 // SwitchInst Implementation
3906 //===----------------------------------------------------------------------===//
3907
init(Value * Value,BasicBlock * Default,unsigned NumReserved)3908 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
3909 assert(Value && Default && NumReserved);
3910 ReservedSpace = NumReserved;
3911 setNumHungOffUseOperands(2);
3912 allocHungoffUses(ReservedSpace);
3913
3914 Op<0>() = Value;
3915 Op<1>() = Default;
3916 }
3917
3918 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
3919 /// switch on and a default destination. The number of additional cases can
3920 /// be specified here to make memory allocation more efficient. This
3921 /// constructor can also autoinsert before another instruction.
SwitchInst(Value * Value,BasicBlock * Default,unsigned NumCases,InsertPosition InsertBefore)3922 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3923 InsertPosition InsertBefore)
3924 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
3925 nullptr, 0, InsertBefore) {
3926 init(Value, Default, 2+NumCases*2);
3927 }
3928
SwitchInst(const SwitchInst & SI)3929 SwitchInst::SwitchInst(const SwitchInst &SI)
3930 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
3931 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
3932 setNumHungOffUseOperands(SI.getNumOperands());
3933 Use *OL = getOperandList();
3934 const Use *InOL = SI.getOperandList();
3935 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
3936 OL[i] = InOL[i];
3937 OL[i+1] = InOL[i+1];
3938 }
3939 SubclassOptionalData = SI.SubclassOptionalData;
3940 }
3941
3942 /// addCase - Add an entry to the switch instruction...
3943 ///
addCase(ConstantInt * OnVal,BasicBlock * Dest)3944 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
3945 unsigned NewCaseIdx = getNumCases();
3946 unsigned OpNo = getNumOperands();
3947 if (OpNo+2 > ReservedSpace)
3948 growOperands(); // Get more space!
3949 // Initialize some new operands.
3950 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
3951 setNumHungOffUseOperands(OpNo+2);
3952 CaseHandle Case(this, NewCaseIdx);
3953 Case.setValue(OnVal);
3954 Case.setSuccessor(Dest);
3955 }
3956
3957 /// removeCase - This method removes the specified case and its successor
3958 /// from the switch instruction.
removeCase(CaseIt I)3959 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
3960 unsigned idx = I->getCaseIndex();
3961
3962 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
3963
3964 unsigned NumOps = getNumOperands();
3965 Use *OL = getOperandList();
3966
3967 // Overwrite this case with the end of the list.
3968 if (2 + (idx + 1) * 2 != NumOps) {
3969 OL[2 + idx * 2] = OL[NumOps - 2];
3970 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
3971 }
3972
3973 // Nuke the last value.
3974 OL[NumOps-2].set(nullptr);
3975 OL[NumOps-2+1].set(nullptr);
3976 setNumHungOffUseOperands(NumOps-2);
3977
3978 return CaseIt(this, idx);
3979 }
3980
3981 /// growOperands - grow operands - This grows the operand list in response
3982 /// to a push_back style of operation. This grows the number of ops by 3 times.
3983 ///
growOperands()3984 void SwitchInst::growOperands() {
3985 unsigned e = getNumOperands();
3986 unsigned NumOps = e*3;
3987
3988 ReservedSpace = NumOps;
3989 growHungoffUses(ReservedSpace);
3990 }
3991
buildProfBranchWeightsMD()3992 MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
3993 assert(Changed && "called only if metadata has changed");
3994
3995 if (!Weights)
3996 return nullptr;
3997
3998 assert(SI.getNumSuccessors() == Weights->size() &&
3999 "num of prof branch_weights must accord with num of successors");
4000
4001 bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
4002
4003 if (AllZeroes || Weights->size() < 2)
4004 return nullptr;
4005
4006 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
4007 }
4008
init()4009 void SwitchInstProfUpdateWrapper::init() {
4010 MDNode *ProfileData = getBranchWeightMDNode(SI);
4011 if (!ProfileData)
4012 return;
4013
4014 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4015 llvm_unreachable("number of prof branch_weights metadata operands does "
4016 "not correspond to number of succesors");
4017 }
4018
4019 SmallVector<uint32_t, 8> Weights;
4020 if (!extractBranchWeights(ProfileData, Weights))
4021 return;
4022 this->Weights = std::move(Weights);
4023 }
4024
4025 SwitchInst::CaseIt
removeCase(SwitchInst::CaseIt I)4026 SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
4027 if (Weights) {
4028 assert(SI.getNumSuccessors() == Weights->size() &&
4029 "num of prof branch_weights must accord with num of successors");
4030 Changed = true;
4031 // Copy the last case to the place of the removed one and shrink.
4032 // This is tightly coupled with the way SwitchInst::removeCase() removes
4033 // the cases in SwitchInst::removeCase(CaseIt).
4034 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4035 Weights->pop_back();
4036 }
4037 return SI.removeCase(I);
4038 }
4039
addCase(ConstantInt * OnVal,BasicBlock * Dest,SwitchInstProfUpdateWrapper::CaseWeightOpt W)4040 void SwitchInstProfUpdateWrapper::addCase(
4041 ConstantInt *OnVal, BasicBlock *Dest,
4042 SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4043 SI.addCase(OnVal, Dest);
4044
4045 if (!Weights && W && *W) {
4046 Changed = true;
4047 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4048 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4049 } else if (Weights) {
4050 Changed = true;
4051 Weights->push_back(W.value_or(0));
4052 }
4053 if (Weights)
4054 assert(SI.getNumSuccessors() == Weights->size() &&
4055 "num of prof branch_weights must accord with num of successors");
4056 }
4057
4058 Instruction::InstListType::iterator
eraseFromParent()4059 SwitchInstProfUpdateWrapper::eraseFromParent() {
4060 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4061 Changed = false;
4062 if (Weights)
4063 Weights->resize(0);
4064 return SI.eraseFromParent();
4065 }
4066
4067 SwitchInstProfUpdateWrapper::CaseWeightOpt
getSuccessorWeight(unsigned idx)4068 SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
4069 if (!Weights)
4070 return std::nullopt;
4071 return (*Weights)[idx];
4072 }
4073
setSuccessorWeight(unsigned idx,SwitchInstProfUpdateWrapper::CaseWeightOpt W)4074 void SwitchInstProfUpdateWrapper::setSuccessorWeight(
4075 unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4076 if (!W)
4077 return;
4078
4079 if (!Weights && *W)
4080 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4081
4082 if (Weights) {
4083 auto &OldW = (*Weights)[idx];
4084 if (*W != OldW) {
4085 Changed = true;
4086 OldW = *W;
4087 }
4088 }
4089 }
4090
4091 SwitchInstProfUpdateWrapper::CaseWeightOpt
getSuccessorWeight(const SwitchInst & SI,unsigned idx)4092 SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,
4093 unsigned idx) {
4094 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4095 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4096 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4097 ->getValue()
4098 .getZExtValue();
4099
4100 return std::nullopt;
4101 }
4102
4103 //===----------------------------------------------------------------------===//
4104 // IndirectBrInst Implementation
4105 //===----------------------------------------------------------------------===//
4106
init(Value * Address,unsigned NumDests)4107 void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4108 assert(Address && Address->getType()->isPointerTy() &&
4109 "Address of indirectbr must be a pointer");
4110 ReservedSpace = 1+NumDests;
4111 setNumHungOffUseOperands(1);
4112 allocHungoffUses(ReservedSpace);
4113
4114 Op<0>() = Address;
4115 }
4116
4117
4118 /// growOperands - grow operands - This grows the operand list in response
4119 /// to a push_back style of operation. This grows the number of ops by 2 times.
4120 ///
growOperands()4121 void IndirectBrInst::growOperands() {
4122 unsigned e = getNumOperands();
4123 unsigned NumOps = e*2;
4124
4125 ReservedSpace = NumOps;
4126 growHungoffUses(ReservedSpace);
4127 }
4128
IndirectBrInst(Value * Address,unsigned NumCases,InsertPosition InsertBefore)4129 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4130 InsertPosition InsertBefore)
4131 : Instruction(Type::getVoidTy(Address->getContext()),
4132 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
4133 init(Address, NumCases);
4134 }
4135
IndirectBrInst(const IndirectBrInst & IBI)4136 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4137 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4138 nullptr, IBI.getNumOperands()) {
4139 allocHungoffUses(IBI.getNumOperands());
4140 Use *OL = getOperandList();
4141 const Use *InOL = IBI.getOperandList();
4142 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4143 OL[i] = InOL[i];
4144 SubclassOptionalData = IBI.SubclassOptionalData;
4145 }
4146
4147 /// addDestination - Add a destination.
4148 ///
addDestination(BasicBlock * DestBB)4149 void IndirectBrInst::addDestination(BasicBlock *DestBB) {
4150 unsigned OpNo = getNumOperands();
4151 if (OpNo+1 > ReservedSpace)
4152 growOperands(); // Get more space!
4153 // Initialize some new operands.
4154 assert(OpNo < ReservedSpace && "Growing didn't work!");
4155 setNumHungOffUseOperands(OpNo+1);
4156 getOperandList()[OpNo] = DestBB;
4157 }
4158
4159 /// removeDestination - This method removes the specified successor from the
4160 /// indirectbr instruction.
removeDestination(unsigned idx)4161 void IndirectBrInst::removeDestination(unsigned idx) {
4162 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4163
4164 unsigned NumOps = getNumOperands();
4165 Use *OL = getOperandList();
4166
4167 // Replace this value with the last one.
4168 OL[idx+1] = OL[NumOps-1];
4169
4170 // Nuke the last value.
4171 OL[NumOps-1].set(nullptr);
4172 setNumHungOffUseOperands(NumOps-1);
4173 }
4174
4175 //===----------------------------------------------------------------------===//
4176 // FreezeInst Implementation
4177 //===----------------------------------------------------------------------===//
4178
FreezeInst(Value * S,const Twine & Name,InsertPosition InsertBefore)4179 FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4180 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4181 setName(Name);
4182 }
4183
4184 //===----------------------------------------------------------------------===//
4185 // cloneImpl() implementations
4186 //===----------------------------------------------------------------------===//
4187
4188 // Define these methods here so vtables don't get emitted into every translation
4189 // unit that uses these classes.
4190
cloneImpl() const4191 GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4192 return new (getNumOperands()) GetElementPtrInst(*this);
4193 }
4194
cloneImpl() const4195 UnaryOperator *UnaryOperator::cloneImpl() const {
4196 return Create(getOpcode(), Op<0>());
4197 }
4198
cloneImpl() const4199 BinaryOperator *BinaryOperator::cloneImpl() const {
4200 return Create(getOpcode(), Op<0>(), Op<1>());
4201 }
4202
cloneImpl() const4203 FCmpInst *FCmpInst::cloneImpl() const {
4204 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4205 }
4206
cloneImpl() const4207 ICmpInst *ICmpInst::cloneImpl() const {
4208 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4209 }
4210
cloneImpl() const4211 ExtractValueInst *ExtractValueInst::cloneImpl() const {
4212 return new ExtractValueInst(*this);
4213 }
4214
cloneImpl() const4215 InsertValueInst *InsertValueInst::cloneImpl() const {
4216 return new InsertValueInst(*this);
4217 }
4218
cloneImpl() const4219 AllocaInst *AllocaInst::cloneImpl() const {
4220 AllocaInst *Result = new AllocaInst(getAllocatedType(), getAddressSpace(),
4221 getOperand(0), getAlign());
4222 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4223 Result->setSwiftError(isSwiftError());
4224 return Result;
4225 }
4226
cloneImpl() const4227 LoadInst *LoadInst::cloneImpl() const {
4228 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4229 getAlign(), getOrdering(), getSyncScopeID());
4230 }
4231
cloneImpl() const4232 StoreInst *StoreInst::cloneImpl() const {
4233 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
4234 getOrdering(), getSyncScopeID());
4235 }
4236
cloneImpl() const4237 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
4238 AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
4239 getOperand(0), getOperand(1), getOperand(2), getAlign(),
4240 getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
4241 Result->setVolatile(isVolatile());
4242 Result->setWeak(isWeak());
4243 return Result;
4244 }
4245
cloneImpl() const4246 AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
4247 AtomicRMWInst *Result =
4248 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
4249 getAlign(), getOrdering(), getSyncScopeID());
4250 Result->setVolatile(isVolatile());
4251 return Result;
4252 }
4253
cloneImpl() const4254 FenceInst *FenceInst::cloneImpl() const {
4255 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4256 }
4257
cloneImpl() const4258 TruncInst *TruncInst::cloneImpl() const {
4259 return new TruncInst(getOperand(0), getType());
4260 }
4261
cloneImpl() const4262 ZExtInst *ZExtInst::cloneImpl() const {
4263 return new ZExtInst(getOperand(0), getType());
4264 }
4265
cloneImpl() const4266 SExtInst *SExtInst::cloneImpl() const {
4267 return new SExtInst(getOperand(0), getType());
4268 }
4269
cloneImpl() const4270 FPTruncInst *FPTruncInst::cloneImpl() const {
4271 return new FPTruncInst(getOperand(0), getType());
4272 }
4273
cloneImpl() const4274 FPExtInst *FPExtInst::cloneImpl() const {
4275 return new FPExtInst(getOperand(0), getType());
4276 }
4277
cloneImpl() const4278 UIToFPInst *UIToFPInst::cloneImpl() const {
4279 return new UIToFPInst(getOperand(0), getType());
4280 }
4281
cloneImpl() const4282 SIToFPInst *SIToFPInst::cloneImpl() const {
4283 return new SIToFPInst(getOperand(0), getType());
4284 }
4285
cloneImpl() const4286 FPToUIInst *FPToUIInst::cloneImpl() const {
4287 return new FPToUIInst(getOperand(0), getType());
4288 }
4289
cloneImpl() const4290 FPToSIInst *FPToSIInst::cloneImpl() const {
4291 return new FPToSIInst(getOperand(0), getType());
4292 }
4293
cloneImpl() const4294 PtrToIntInst *PtrToIntInst::cloneImpl() const {
4295 return new PtrToIntInst(getOperand(0), getType());
4296 }
4297
cloneImpl() const4298 IntToPtrInst *IntToPtrInst::cloneImpl() const {
4299 return new IntToPtrInst(getOperand(0), getType());
4300 }
4301
cloneImpl() const4302 BitCastInst *BitCastInst::cloneImpl() const {
4303 return new BitCastInst(getOperand(0), getType());
4304 }
4305
cloneImpl() const4306 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
4307 return new AddrSpaceCastInst(getOperand(0), getType());
4308 }
4309
cloneImpl() const4310 CallInst *CallInst::cloneImpl() const {
4311 if (hasOperandBundles()) {
4312 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4313 return new(getNumOperands(), DescriptorBytes) CallInst(*this);
4314 }
4315 return new(getNumOperands()) CallInst(*this);
4316 }
4317
cloneImpl() const4318 SelectInst *SelectInst::cloneImpl() const {
4319 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
4320 }
4321
cloneImpl() const4322 VAArgInst *VAArgInst::cloneImpl() const {
4323 return new VAArgInst(getOperand(0), getType());
4324 }
4325
cloneImpl() const4326 ExtractElementInst *ExtractElementInst::cloneImpl() const {
4327 return ExtractElementInst::Create(getOperand(0), getOperand(1));
4328 }
4329
cloneImpl() const4330 InsertElementInst *InsertElementInst::cloneImpl() const {
4331 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
4332 }
4333
cloneImpl() const4334 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
4335 return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask());
4336 }
4337
cloneImpl() const4338 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
4339
cloneImpl() const4340 LandingPadInst *LandingPadInst::cloneImpl() const {
4341 return new LandingPadInst(*this);
4342 }
4343
cloneImpl() const4344 ReturnInst *ReturnInst::cloneImpl() const {
4345 return new(getNumOperands()) ReturnInst(*this);
4346 }
4347
cloneImpl() const4348 BranchInst *BranchInst::cloneImpl() const {
4349 return new(getNumOperands()) BranchInst(*this);
4350 }
4351
cloneImpl() const4352 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4353
cloneImpl() const4354 IndirectBrInst *IndirectBrInst::cloneImpl() const {
4355 return new IndirectBrInst(*this);
4356 }
4357
cloneImpl() const4358 InvokeInst *InvokeInst::cloneImpl() const {
4359 if (hasOperandBundles()) {
4360 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4361 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
4362 }
4363 return new(getNumOperands()) InvokeInst(*this);
4364 }
4365
cloneImpl() const4366 CallBrInst *CallBrInst::cloneImpl() const {
4367 if (hasOperandBundles()) {
4368 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4369 return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
4370 }
4371 return new (getNumOperands()) CallBrInst(*this);
4372 }
4373
cloneImpl() const4374 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
4375
cloneImpl() const4376 CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4377 return new (getNumOperands()) CleanupReturnInst(*this);
4378 }
4379
cloneImpl() const4380 CatchReturnInst *CatchReturnInst::cloneImpl() const {
4381 return new (getNumOperands()) CatchReturnInst(*this);
4382 }
4383
cloneImpl() const4384 CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4385 return new CatchSwitchInst(*this);
4386 }
4387
cloneImpl() const4388 FuncletPadInst *FuncletPadInst::cloneImpl() const {
4389 return new (getNumOperands()) FuncletPadInst(*this);
4390 }
4391
cloneImpl() const4392 UnreachableInst *UnreachableInst::cloneImpl() const {
4393 LLVMContext &Context = getContext();
4394 return new UnreachableInst(Context);
4395 }
4396
cloneImpl() const4397 FreezeInst *FreezeInst::cloneImpl() const {
4398 return new FreezeInst(getOperand(0));
4399 }
4400