xref: /freebsd/contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the IRBuilder class, which is used as a convenient way
10 // to create LLVM instructions with a consistent and simplified interface.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/IR/IRBuilder.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/IR/Constant.h"
17 #include "llvm/IR/Constants.h"
18 #include "llvm/IR/DerivedTypes.h"
19 #include "llvm/IR/Function.h"
20 #include "llvm/IR/GlobalValue.h"
21 #include "llvm/IR/GlobalVariable.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/Statepoint.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
32 #include <cassert>
33 #include <cstdint>
34 #include <optional>
35 #include <vector>
36 
37 using namespace llvm;
38 
39 /// CreateGlobalString - Make a new global variable with an initializer that
40 /// has array of i8 type filled in with the nul terminated string value
41 /// specified.  If Name is specified, it is the name of the global variable
42 /// created.
CreateGlobalString(StringRef Str,const Twine & Name,unsigned AddressSpace,Module * M,bool AddNull)43 GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
44                                                   const Twine &Name,
45                                                   unsigned AddressSpace,
46                                                   Module *M, bool AddNull) {
47   Constant *StrConstant = ConstantDataArray::getString(Context, Str, AddNull);
48   if (!M)
49     M = BB->getParent()->getParent();
50   auto *GV = new GlobalVariable(
51       *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
52       StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
53   GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
54   GV->setAlignment(M->getDataLayout().getPrefTypeAlign(getInt8Ty()));
55   return GV;
56 }
57 
getCurrentFunctionReturnType() const58 Type *IRBuilderBase::getCurrentFunctionReturnType() const {
59   assert(BB && BB->getParent() && "No current function!");
60   return BB->getParent()->getReturnType();
61 }
62 
getCurrentDebugLocation() const63 DebugLoc IRBuilderBase::getCurrentDebugLocation() const { return StoredDL; }
SetInstDebugLocation(Instruction * I) const64 void IRBuilderBase::SetInstDebugLocation(Instruction *I) const {
65   // We prefer to set our current debug location if any has been set, but if
66   // our debug location is empty and I has a valid location, we shouldn't
67   // overwrite it.
68   I->setDebugLoc(StoredDL.orElse(I->getDebugLoc()));
69 }
70 
CreateAggregateCast(Value * V,Type * DestTy)71 Value *IRBuilderBase::CreateAggregateCast(Value *V, Type *DestTy) {
72   Type *SrcTy = V->getType();
73   if (SrcTy == DestTy)
74     return V;
75 
76   if (SrcTy->isAggregateType()) {
77     unsigned NumElements;
78     if (SrcTy->isStructTy()) {
79       assert(DestTy->isStructTy() && "Expected StructType");
80       assert(SrcTy->getStructNumElements() == DestTy->getStructNumElements() &&
81              "Expected StructTypes with equal number of elements");
82       NumElements = SrcTy->getStructNumElements();
83     } else {
84       assert(SrcTy->isArrayTy() && DestTy->isArrayTy() && "Expected ArrayType");
85       assert(SrcTy->getArrayNumElements() == DestTy->getArrayNumElements() &&
86              "Expected ArrayTypes with equal number of elements");
87       NumElements = SrcTy->getArrayNumElements();
88     }
89 
90     Value *Result = PoisonValue::get(DestTy);
91     for (unsigned I = 0; I < NumElements; ++I) {
92       Type *ElementTy = SrcTy->isStructTy() ? DestTy->getStructElementType(I)
93                                             : DestTy->getArrayElementType();
94       Value *Element =
95           CreateAggregateCast(CreateExtractValue(V, ArrayRef(I)), ElementTy);
96 
97       Result = CreateInsertValue(Result, Element, ArrayRef(I));
98     }
99     return Result;
100   }
101 
102   return CreateBitOrPointerCast(V, DestTy);
103 }
104 
105 CallInst *
createCallHelper(Function * Callee,ArrayRef<Value * > Ops,const Twine & Name,FMFSource FMFSource,ArrayRef<OperandBundleDef> OpBundles)106 IRBuilderBase::createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
107                                 const Twine &Name, FMFSource FMFSource,
108                                 ArrayRef<OperandBundleDef> OpBundles) {
109   CallInst *CI = CreateCall(Callee, Ops, OpBundles, Name);
110   if (isa<FPMathOperator>(CI))
111     CI->setFastMathFlags(FMFSource.get(FMF));
112   return CI;
113 }
114 
CreateVScaleMultiple(IRBuilderBase & B,Type * Ty,uint64_t Scale)115 static Value *CreateVScaleMultiple(IRBuilderBase &B, Type *Ty, uint64_t Scale) {
116   Value *VScale = B.CreateVScale(Ty);
117   if (Scale == 1)
118     return VScale;
119 
120   return B.CreateNUWMul(VScale, ConstantInt::get(Ty, Scale));
121 }
122 
CreateElementCount(Type * Ty,ElementCount EC)123 Value *IRBuilderBase::CreateElementCount(Type *Ty, ElementCount EC) {
124   if (EC.isFixed() || EC.isZero())
125     return ConstantInt::get(Ty, EC.getKnownMinValue());
126 
127   return CreateVScaleMultiple(*this, Ty, EC.getKnownMinValue());
128 }
129 
CreateTypeSize(Type * Ty,TypeSize Size)130 Value *IRBuilderBase::CreateTypeSize(Type *Ty, TypeSize Size) {
131   if (Size.isFixed() || Size.isZero())
132     return ConstantInt::get(Ty, Size.getKnownMinValue());
133 
134   return CreateVScaleMultiple(*this, Ty, Size.getKnownMinValue());
135 }
136 
CreateStepVector(Type * DstType,const Twine & Name)137 Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) {
138   Type *STy = DstType->getScalarType();
139   if (isa<ScalableVectorType>(DstType)) {
140     Type *StepVecType = DstType;
141     // TODO: We expect this special case (element type < 8 bits) to be
142     // temporary - once the intrinsic properly supports < 8 bits this code
143     // can be removed.
144     if (STy->getScalarSizeInBits() < 8)
145       StepVecType =
146           VectorType::get(getInt8Ty(), cast<ScalableVectorType>(DstType));
147     Value *Res = CreateIntrinsic(Intrinsic::stepvector, {StepVecType}, {},
148                                  nullptr, Name);
149     if (StepVecType != DstType)
150       Res = CreateTrunc(Res, DstType);
151     return Res;
152   }
153 
154   unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
155 
156   // Create a vector of consecutive numbers from zero to VF.
157   SmallVector<Constant *, 8> Indices;
158   for (unsigned i = 0; i < NumEls; ++i)
159     Indices.push_back(ConstantInt::get(STy, i));
160 
161   // Add the consecutive indices to the vector value.
162   return ConstantVector::get(Indices);
163 }
164 
CreateMemSet(Value * Ptr,Value * Val,Value * Size,MaybeAlign Align,bool isVolatile,const AAMDNodes & AAInfo)165 CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
166                                       MaybeAlign Align, bool isVolatile,
167                                       const AAMDNodes &AAInfo) {
168   Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
169   Type *Tys[] = {Ptr->getType(), Size->getType()};
170 
171   CallInst *CI = CreateIntrinsic(Intrinsic::memset, Tys, Ops);
172 
173   if (Align)
174     cast<MemSetInst>(CI)->setDestAlignment(*Align);
175   CI->setAAMetadata(AAInfo);
176   return CI;
177 }
178 
CreateMemSetInline(Value * Dst,MaybeAlign DstAlign,Value * Val,Value * Size,bool IsVolatile,const AAMDNodes & AAInfo)179 CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign,
180                                             Value *Val, Value *Size,
181                                             bool IsVolatile,
182                                             const AAMDNodes &AAInfo) {
183   Value *Ops[] = {Dst, Val, Size, getInt1(IsVolatile)};
184   Type *Tys[] = {Dst->getType(), Size->getType()};
185 
186   CallInst *CI = CreateIntrinsic(Intrinsic::memset_inline, Tys, Ops);
187 
188   if (DstAlign)
189     cast<MemSetInst>(CI)->setDestAlignment(*DstAlign);
190   CI->setAAMetadata(AAInfo);
191   return CI;
192 }
193 
CreateElementUnorderedAtomicMemSet(Value * Ptr,Value * Val,Value * Size,Align Alignment,uint32_t ElementSize,const AAMDNodes & AAInfo)194 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
195     Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
196     const AAMDNodes &AAInfo) {
197 
198   Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
199   Type *Tys[] = {Ptr->getType(), Size->getType()};
200 
201   CallInst *CI =
202       CreateIntrinsic(Intrinsic::memset_element_unordered_atomic, Tys, Ops);
203 
204   cast<AnyMemSetInst>(CI)->setDestAlignment(Alignment);
205   CI->setAAMetadata(AAInfo);
206   return CI;
207 }
208 
CreateMemTransferInst(Intrinsic::ID IntrID,Value * Dst,MaybeAlign DstAlign,Value * Src,MaybeAlign SrcAlign,Value * Size,bool isVolatile,const AAMDNodes & AAInfo)209 CallInst *IRBuilderBase::CreateMemTransferInst(Intrinsic::ID IntrID, Value *Dst,
210                                                MaybeAlign DstAlign, Value *Src,
211                                                MaybeAlign SrcAlign, Value *Size,
212                                                bool isVolatile,
213                                                const AAMDNodes &AAInfo) {
214   assert((IntrID == Intrinsic::memcpy || IntrID == Intrinsic::memcpy_inline ||
215           IntrID == Intrinsic::memmove) &&
216          "Unexpected intrinsic ID");
217   Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
218   Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
219 
220   CallInst *CI = CreateIntrinsic(IntrID, Tys, Ops);
221 
222   auto* MCI = cast<MemTransferInst>(CI);
223   if (DstAlign)
224     MCI->setDestAlignment(*DstAlign);
225   if (SrcAlign)
226     MCI->setSourceAlignment(*SrcAlign);
227   MCI->setAAMetadata(AAInfo);
228   return CI;
229 }
230 
CreateElementUnorderedAtomicMemCpy(Value * Dst,Align DstAlign,Value * Src,Align SrcAlign,Value * Size,uint32_t ElementSize,const AAMDNodes & AAInfo)231 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
232     Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
233     uint32_t ElementSize, const AAMDNodes &AAInfo) {
234   assert(DstAlign >= ElementSize &&
235          "Pointer alignment must be at least element size");
236   assert(SrcAlign >= ElementSize &&
237          "Pointer alignment must be at least element size");
238   Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
239   Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
240 
241   CallInst *CI =
242       CreateIntrinsic(Intrinsic::memcpy_element_unordered_atomic, Tys, Ops);
243 
244   // Set the alignment of the pointer args.
245   auto *AMCI = cast<AnyMemCpyInst>(CI);
246   AMCI->setDestAlignment(DstAlign);
247   AMCI->setSourceAlignment(SrcAlign);
248   AMCI->setAAMetadata(AAInfo);
249   return CI;
250 }
251 
252 /// isConstantOne - Return true only if val is constant int 1
isConstantOne(const Value * Val)253 static bool isConstantOne(const Value *Val) {
254   assert(Val && "isConstantOne does not work with nullptr Val");
255   const ConstantInt *CVal = dyn_cast<ConstantInt>(Val);
256   return CVal && CVal->isOne();
257 }
258 
CreateMalloc(Type * IntPtrTy,Type * AllocTy,Value * AllocSize,Value * ArraySize,ArrayRef<OperandBundleDef> OpB,Function * MallocF,const Twine & Name)259 CallInst *IRBuilderBase::CreateMalloc(Type *IntPtrTy, Type *AllocTy,
260                                       Value *AllocSize, Value *ArraySize,
261                                       ArrayRef<OperandBundleDef> OpB,
262                                       Function *MallocF, const Twine &Name) {
263   // malloc(type) becomes:
264   //       i8* malloc(typeSize)
265   // malloc(type, arraySize) becomes:
266   //       i8* malloc(typeSize*arraySize)
267   if (!ArraySize)
268     ArraySize = ConstantInt::get(IntPtrTy, 1);
269   else if (ArraySize->getType() != IntPtrTy)
270     ArraySize = CreateIntCast(ArraySize, IntPtrTy, false);
271 
272   if (!isConstantOne(ArraySize)) {
273     if (isConstantOne(AllocSize)) {
274       AllocSize = ArraySize; // Operand * 1 = Operand
275     } else {
276       // Multiply type size by the array size...
277       AllocSize = CreateMul(ArraySize, AllocSize, "mallocsize");
278     }
279   }
280 
281   assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
282   // Create the call to Malloc.
283   Module *M = BB->getParent()->getParent();
284   Type *BPTy = PointerType::getUnqual(Context);
285   FunctionCallee MallocFunc = MallocF;
286   if (!MallocFunc)
287     // prototype malloc as "void *malloc(size_t)"
288     MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
289   CallInst *MCall = CreateCall(MallocFunc, AllocSize, OpB, Name);
290 
291   MCall->setTailCall();
292   if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
293     MCall->setCallingConv(F->getCallingConv());
294     F->setReturnDoesNotAlias();
295   }
296 
297   assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
298 
299   return MCall;
300 }
301 
CreateMalloc(Type * IntPtrTy,Type * AllocTy,Value * AllocSize,Value * ArraySize,Function * MallocF,const Twine & Name)302 CallInst *IRBuilderBase::CreateMalloc(Type *IntPtrTy, Type *AllocTy,
303                                       Value *AllocSize, Value *ArraySize,
304                                       Function *MallocF, const Twine &Name) {
305 
306   return CreateMalloc(IntPtrTy, AllocTy, AllocSize, ArraySize, {}, MallocF,
307                       Name);
308 }
309 
310 /// CreateFree - Generate the IR for a call to the builtin free function.
CreateFree(Value * Source,ArrayRef<OperandBundleDef> Bundles)311 CallInst *IRBuilderBase::CreateFree(Value *Source,
312                                     ArrayRef<OperandBundleDef> Bundles) {
313   assert(Source->getType()->isPointerTy() &&
314          "Can not free something of nonpointer type!");
315 
316   Module *M = BB->getParent()->getParent();
317 
318   Type *VoidTy = Type::getVoidTy(M->getContext());
319   Type *VoidPtrTy = PointerType::getUnqual(M->getContext());
320   // prototype free as "void free(void*)"
321   FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, VoidPtrTy);
322   CallInst *Result = CreateCall(FreeFunc, Source, Bundles, "");
323   Result->setTailCall();
324   if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
325     Result->setCallingConv(F->getCallingConv());
326 
327   return Result;
328 }
329 
CreateElementUnorderedAtomicMemMove(Value * Dst,Align DstAlign,Value * Src,Align SrcAlign,Value * Size,uint32_t ElementSize,const AAMDNodes & AAInfo)330 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
331     Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
332     uint32_t ElementSize, const AAMDNodes &AAInfo) {
333   assert(DstAlign >= ElementSize &&
334          "Pointer alignment must be at least element size");
335   assert(SrcAlign >= ElementSize &&
336          "Pointer alignment must be at least element size");
337   Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
338   Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
339 
340   CallInst *CI =
341       CreateIntrinsic(Intrinsic::memmove_element_unordered_atomic, Tys, Ops);
342 
343   // Set the alignment of the pointer args.
344   CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
345   CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
346   CI->setAAMetadata(AAInfo);
347   return CI;
348 }
349 
getReductionIntrinsic(Intrinsic::ID ID,Value * Src)350 CallInst *IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID, Value *Src) {
351   Value *Ops[] = {Src};
352   Type *Tys[] = { Src->getType() };
353   return CreateIntrinsic(ID, Tys, Ops);
354 }
355 
CreateFAddReduce(Value * Acc,Value * Src)356 CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
357   Value *Ops[] = {Acc, Src};
358   return CreateIntrinsic(Intrinsic::vector_reduce_fadd, {Src->getType()}, Ops);
359 }
360 
CreateFMulReduce(Value * Acc,Value * Src)361 CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
362   Value *Ops[] = {Acc, Src};
363   return CreateIntrinsic(Intrinsic::vector_reduce_fmul, {Src->getType()}, Ops);
364 }
365 
CreateAddReduce(Value * Src)366 CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
367   return getReductionIntrinsic(Intrinsic::vector_reduce_add, Src);
368 }
369 
CreateMulReduce(Value * Src)370 CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
371   return getReductionIntrinsic(Intrinsic::vector_reduce_mul, Src);
372 }
373 
CreateAndReduce(Value * Src)374 CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
375   return getReductionIntrinsic(Intrinsic::vector_reduce_and, Src);
376 }
377 
CreateOrReduce(Value * Src)378 CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
379   return getReductionIntrinsic(Intrinsic::vector_reduce_or, Src);
380 }
381 
CreateXorReduce(Value * Src)382 CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
383   return getReductionIntrinsic(Intrinsic::vector_reduce_xor, Src);
384 }
385 
CreateIntMaxReduce(Value * Src,bool IsSigned)386 CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
387   auto ID =
388       IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
389   return getReductionIntrinsic(ID, Src);
390 }
391 
CreateIntMinReduce(Value * Src,bool IsSigned)392 CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
393   auto ID =
394       IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
395   return getReductionIntrinsic(ID, Src);
396 }
397 
CreateFPMaxReduce(Value * Src)398 CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) {
399   return getReductionIntrinsic(Intrinsic::vector_reduce_fmax, Src);
400 }
401 
CreateFPMinReduce(Value * Src)402 CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) {
403   return getReductionIntrinsic(Intrinsic::vector_reduce_fmin, Src);
404 }
405 
CreateFPMaximumReduce(Value * Src)406 CallInst *IRBuilderBase::CreateFPMaximumReduce(Value *Src) {
407   return getReductionIntrinsic(Intrinsic::vector_reduce_fmaximum, Src);
408 }
409 
CreateFPMinimumReduce(Value * Src)410 CallInst *IRBuilderBase::CreateFPMinimumReduce(Value *Src) {
411   return getReductionIntrinsic(Intrinsic::vector_reduce_fminimum, Src);
412 }
413 
CreateLifetimeStart(Value * Ptr,ConstantInt * Size)414 CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
415   assert(isa<PointerType>(Ptr->getType()) &&
416          "lifetime.start only applies to pointers.");
417   if (!Size)
418     Size = getInt64(-1);
419   else
420     assert(Size->getType() == getInt64Ty() &&
421            "lifetime.start requires the size to be an i64");
422   Value *Ops[] = { Size, Ptr };
423   return CreateIntrinsic(Intrinsic::lifetime_start, {Ptr->getType()}, Ops);
424 }
425 
CreateLifetimeEnd(Value * Ptr,ConstantInt * Size)426 CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
427   assert(isa<PointerType>(Ptr->getType()) &&
428          "lifetime.end only applies to pointers.");
429   if (!Size)
430     Size = getInt64(-1);
431   else
432     assert(Size->getType() == getInt64Ty() &&
433            "lifetime.end requires the size to be an i64");
434   Value *Ops[] = { Size, Ptr };
435   return CreateIntrinsic(Intrinsic::lifetime_end, {Ptr->getType()}, Ops);
436 }
437 
CreateInvariantStart(Value * Ptr,ConstantInt * Size)438 CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
439 
440   assert(isa<PointerType>(Ptr->getType()) &&
441          "invariant.start only applies to pointers.");
442   if (!Size)
443     Size = getInt64(-1);
444   else
445     assert(Size->getType() == getInt64Ty() &&
446            "invariant.start requires the size to be an i64");
447 
448   Value *Ops[] = {Size, Ptr};
449   // Fill in the single overloaded type: memory object type.
450   Type *ObjectPtr[1] = {Ptr->getType()};
451   return CreateIntrinsic(Intrinsic::invariant_start, ObjectPtr, Ops);
452 }
453 
getAlign(Value * Ptr)454 static MaybeAlign getAlign(Value *Ptr) {
455   if (auto *V = dyn_cast<GlobalVariable>(Ptr))
456     return V->getAlign();
457   if (auto *A = dyn_cast<GlobalAlias>(Ptr))
458     return getAlign(A->getAliaseeObject());
459   return {};
460 }
461 
CreateThreadLocalAddress(Value * Ptr)462 CallInst *IRBuilderBase::CreateThreadLocalAddress(Value *Ptr) {
463   assert(isa<GlobalValue>(Ptr) && cast<GlobalValue>(Ptr)->isThreadLocal() &&
464          "threadlocal_address only applies to thread local variables.");
465   CallInst *CI = CreateIntrinsic(llvm::Intrinsic::threadlocal_address,
466                                  {Ptr->getType()}, {Ptr});
467   if (MaybeAlign A = getAlign(Ptr)) {
468     CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), *A));
469     CI->addRetAttr(Attribute::getWithAlignment(CI->getContext(), *A));
470   }
471   return CI;
472 }
473 
474 CallInst *
CreateAssumption(Value * Cond,ArrayRef<OperandBundleDef> OpBundles)475 IRBuilderBase::CreateAssumption(Value *Cond,
476                                 ArrayRef<OperandBundleDef> OpBundles) {
477   assert(Cond->getType() == getInt1Ty() &&
478          "an assumption condition must be of type i1");
479 
480   Value *Ops[] = { Cond };
481   Module *M = BB->getParent()->getParent();
482   Function *FnAssume = Intrinsic::getOrInsertDeclaration(M, Intrinsic::assume);
483   return CreateCall(FnAssume, Ops, OpBundles);
484 }
485 
CreateNoAliasScopeDeclaration(Value * Scope)486 Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
487   return CreateIntrinsic(Intrinsic::experimental_noalias_scope_decl, {},
488                          {Scope});
489 }
490 
491 /// Create a call to a Masked Load intrinsic.
492 /// \p Ty        - vector type to load
493 /// \p Ptr       - base pointer for the load
494 /// \p Alignment - alignment of the source location
495 /// \p Mask      - vector of booleans which indicates what vector lanes should
496 ///                be accessed in memory
497 /// \p PassThru  - pass-through value that is used to fill the masked-off lanes
498 ///                of the result
499 /// \p Name      - name of the result variable
CreateMaskedLoad(Type * Ty,Value * Ptr,Align Alignment,Value * Mask,Value * PassThru,const Twine & Name)500 CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment,
501                                           Value *Mask, Value *PassThru,
502                                           const Twine &Name) {
503   auto *PtrTy = cast<PointerType>(Ptr->getType());
504   assert(Ty->isVectorTy() && "Type should be vector");
505   assert(Mask && "Mask should not be all-ones (null)");
506   if (!PassThru)
507     PassThru = PoisonValue::get(Ty);
508   Type *OverloadedTypes[] = { Ty, PtrTy };
509   Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
510   return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
511                                OverloadedTypes, Name);
512 }
513 
514 /// Create a call to a Masked Store intrinsic.
515 /// \p Val       - data to be stored,
516 /// \p Ptr       - base pointer for the store
517 /// \p Alignment - alignment of the destination location
518 /// \p Mask      - vector of booleans which indicates what vector lanes should
519 ///                be accessed in memory
CreateMaskedStore(Value * Val,Value * Ptr,Align Alignment,Value * Mask)520 CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
521                                            Align Alignment, Value *Mask) {
522   auto *PtrTy = cast<PointerType>(Ptr->getType());
523   Type *DataTy = Val->getType();
524   assert(DataTy->isVectorTy() && "Val should be a vector");
525   assert(Mask && "Mask should not be all-ones (null)");
526   Type *OverloadedTypes[] = { DataTy, PtrTy };
527   Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
528   return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
529 }
530 
531 /// Create a call to a Masked intrinsic, with given intrinsic Id,
532 /// an array of operands - Ops, and an array of overloaded types -
533 /// OverloadedTypes.
CreateMaskedIntrinsic(Intrinsic::ID Id,ArrayRef<Value * > Ops,ArrayRef<Type * > OverloadedTypes,const Twine & Name)534 CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
535                                                ArrayRef<Value *> Ops,
536                                                ArrayRef<Type *> OverloadedTypes,
537                                                const Twine &Name) {
538   return CreateIntrinsic(Id, OverloadedTypes, Ops, {}, Name);
539 }
540 
541 /// Create a call to a Masked Gather intrinsic.
542 /// \p Ty       - vector type to gather
543 /// \p Ptrs     - vector of pointers for loading
544 /// \p Align    - alignment for one element
545 /// \p Mask     - vector of booleans which indicates what vector lanes should
546 ///               be accessed in memory
547 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
548 ///               of the result
549 /// \p Name     - name of the result variable
CreateMaskedGather(Type * Ty,Value * Ptrs,Align Alignment,Value * Mask,Value * PassThru,const Twine & Name)550 CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs,
551                                             Align Alignment, Value *Mask,
552                                             Value *PassThru,
553                                             const Twine &Name) {
554   auto *VecTy = cast<VectorType>(Ty);
555   ElementCount NumElts = VecTy->getElementCount();
556   auto *PtrsTy = cast<VectorType>(Ptrs->getType());
557   assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch");
558 
559   if (!Mask)
560     Mask = getAllOnesMask(NumElts);
561 
562   if (!PassThru)
563     PassThru = PoisonValue::get(Ty);
564 
565   Type *OverloadedTypes[] = {Ty, PtrsTy};
566   Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
567 
568   // We specify only one type when we create this intrinsic. Types of other
569   // arguments are derived from this type.
570   return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
571                                Name);
572 }
573 
574 /// Create a call to a Masked Scatter intrinsic.
575 /// \p Data  - data to be stored,
576 /// \p Ptrs  - the vector of pointers, where the \p Data elements should be
577 ///            stored
578 /// \p Align - alignment for one element
579 /// \p Mask  - vector of booleans which indicates what vector lanes should
580 ///            be accessed in memory
CreateMaskedScatter(Value * Data,Value * Ptrs,Align Alignment,Value * Mask)581 CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
582                                              Align Alignment, Value *Mask) {
583   auto *PtrsTy = cast<VectorType>(Ptrs->getType());
584   auto *DataTy = cast<VectorType>(Data->getType());
585   ElementCount NumElts = PtrsTy->getElementCount();
586 
587   if (!Mask)
588     Mask = getAllOnesMask(NumElts);
589 
590   Type *OverloadedTypes[] = {DataTy, PtrsTy};
591   Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
592 
593   // We specify only one type when we create this intrinsic. Types of other
594   // arguments are derived from this type.
595   return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
596 }
597 
598 /// Create a call to Masked Expand Load intrinsic
599 /// \p Ty        - vector type to load
600 /// \p Ptr       - base pointer for the load
601 /// \p Align     - alignment of \p Ptr
602 /// \p Mask      - vector of booleans which indicates what vector lanes should
603 ///                be accessed in memory
604 /// \p PassThru  - pass-through value that is used to fill the masked-off lanes
605 ///                of the result
606 /// \p Name      - name of the result variable
CreateMaskedExpandLoad(Type * Ty,Value * Ptr,MaybeAlign Align,Value * Mask,Value * PassThru,const Twine & Name)607 CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr,
608                                                 MaybeAlign Align, Value *Mask,
609                                                 Value *PassThru,
610                                                 const Twine &Name) {
611   assert(Ty->isVectorTy() && "Type should be vector");
612   assert(Mask && "Mask should not be all-ones (null)");
613   if (!PassThru)
614     PassThru = PoisonValue::get(Ty);
615   Type *OverloadedTypes[] = {Ty};
616   Value *Ops[] = {Ptr, Mask, PassThru};
617   CallInst *CI = CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops,
618                                        OverloadedTypes, Name);
619   if (Align)
620     CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), *Align));
621   return CI;
622 }
623 
624 /// Create a call to Masked Compress Store intrinsic
625 /// \p Val       - data to be stored,
626 /// \p Ptr       - base pointer for the store
627 /// \p Align     - alignment of \p Ptr
628 /// \p Mask      - vector of booleans which indicates what vector lanes should
629 ///                be accessed in memory
CreateMaskedCompressStore(Value * Val,Value * Ptr,MaybeAlign Align,Value * Mask)630 CallInst *IRBuilderBase::CreateMaskedCompressStore(Value *Val, Value *Ptr,
631                                                    MaybeAlign Align,
632                                                    Value *Mask) {
633   Type *DataTy = Val->getType();
634   assert(DataTy->isVectorTy() && "Val should be a vector");
635   assert(Mask && "Mask should not be all-ones (null)");
636   Type *OverloadedTypes[] = {DataTy};
637   Value *Ops[] = {Val, Ptr, Mask};
638   CallInst *CI = CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops,
639                                        OverloadedTypes);
640   if (Align)
641     CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), *Align));
642   return CI;
643 }
644 
645 template <typename T0>
646 static std::vector<Value *>
getStatepointArgs(IRBuilderBase & B,uint64_t ID,uint32_t NumPatchBytes,Value * ActualCallee,uint32_t Flags,ArrayRef<T0> CallArgs)647 getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
648                   Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
649   std::vector<Value *> Args;
650   Args.push_back(B.getInt64(ID));
651   Args.push_back(B.getInt32(NumPatchBytes));
652   Args.push_back(ActualCallee);
653   Args.push_back(B.getInt32(CallArgs.size()));
654   Args.push_back(B.getInt32(Flags));
655   llvm::append_range(Args, CallArgs);
656   // GC Transition and Deopt args are now always handled via operand bundle.
657   // They will be removed from the signature of gc.statepoint shortly.
658   Args.push_back(B.getInt32(0));
659   Args.push_back(B.getInt32(0));
660   // GC args are now encoded in the gc-live operand bundle
661   return Args;
662 }
663 
664 template<typename T1, typename T2, typename T3>
665 static std::vector<OperandBundleDef>
getStatepointBundles(std::optional<ArrayRef<T1>> TransitionArgs,std::optional<ArrayRef<T2>> DeoptArgs,ArrayRef<T3> GCArgs)666 getStatepointBundles(std::optional<ArrayRef<T1>> TransitionArgs,
667                      std::optional<ArrayRef<T2>> DeoptArgs,
668                      ArrayRef<T3> GCArgs) {
669   std::vector<OperandBundleDef> Rval;
670   if (DeoptArgs)
671     Rval.emplace_back("deopt", SmallVector<Value *, 16>(*DeoptArgs));
672   if (TransitionArgs)
673     Rval.emplace_back("gc-transition",
674                       SmallVector<Value *, 16>(*TransitionArgs));
675   if (GCArgs.size())
676     Rval.emplace_back("gc-live", SmallVector<Value *, 16>(GCArgs));
677   return Rval;
678 }
679 
680 template <typename T0, typename T1, typename T2, typename T3>
CreateGCStatepointCallCommon(IRBuilderBase * Builder,uint64_t ID,uint32_t NumPatchBytes,FunctionCallee ActualCallee,uint32_t Flags,ArrayRef<T0> CallArgs,std::optional<ArrayRef<T1>> TransitionArgs,std::optional<ArrayRef<T2>> DeoptArgs,ArrayRef<T3> GCArgs,const Twine & Name)681 static CallInst *CreateGCStatepointCallCommon(
682     IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
683     FunctionCallee ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
684     std::optional<ArrayRef<T1>> TransitionArgs,
685     std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
686     const Twine &Name) {
687   Module *M = Builder->GetInsertBlock()->getParent()->getParent();
688   // Fill in the one generic type'd argument (the function is also vararg)
689   Function *FnStatepoint = Intrinsic::getOrInsertDeclaration(
690       M, Intrinsic::experimental_gc_statepoint,
691       {ActualCallee.getCallee()->getType()});
692 
693   std::vector<Value *> Args = getStatepointArgs(
694       *Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs);
695 
696   CallInst *CI = Builder->CreateCall(
697       FnStatepoint, Args,
698       getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
699   CI->addParamAttr(2,
700                    Attribute::get(Builder->getContext(), Attribute::ElementType,
701                                   ActualCallee.getFunctionType()));
702   return CI;
703 }
704 
CreateGCStatepointCall(uint64_t ID,uint32_t NumPatchBytes,FunctionCallee ActualCallee,ArrayRef<Value * > CallArgs,std::optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)705 CallInst *IRBuilderBase::CreateGCStatepointCall(
706     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
707     ArrayRef<Value *> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
708     ArrayRef<Value *> GCArgs, const Twine &Name) {
709   return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
710       this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
711       CallArgs, std::nullopt /* No Transition Args */, DeoptArgs, GCArgs, Name);
712 }
713 
CreateGCStatepointCall(uint64_t ID,uint32_t NumPatchBytes,FunctionCallee ActualCallee,uint32_t Flags,ArrayRef<Value * > CallArgs,std::optional<ArrayRef<Use>> TransitionArgs,std::optional<ArrayRef<Use>> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)714 CallInst *IRBuilderBase::CreateGCStatepointCall(
715     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
716     uint32_t Flags, ArrayRef<Value *> CallArgs,
717     std::optional<ArrayRef<Use>> TransitionArgs,
718     std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
719     const Twine &Name) {
720   return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
721       this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
722       DeoptArgs, GCArgs, Name);
723 }
724 
CreateGCStatepointCall(uint64_t ID,uint32_t NumPatchBytes,FunctionCallee ActualCallee,ArrayRef<Use> CallArgs,std::optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)725 CallInst *IRBuilderBase::CreateGCStatepointCall(
726     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
727     ArrayRef<Use> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
728     ArrayRef<Value *> GCArgs, const Twine &Name) {
729   return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
730       this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
731       CallArgs, std::nullopt, DeoptArgs, GCArgs, Name);
732 }
733 
734 template <typename T0, typename T1, typename T2, typename T3>
CreateGCStatepointInvokeCommon(IRBuilderBase * Builder,uint64_t ID,uint32_t NumPatchBytes,FunctionCallee ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,uint32_t Flags,ArrayRef<T0> InvokeArgs,std::optional<ArrayRef<T1>> TransitionArgs,std::optional<ArrayRef<T2>> DeoptArgs,ArrayRef<T3> GCArgs,const Twine & Name)735 static InvokeInst *CreateGCStatepointInvokeCommon(
736     IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
737     FunctionCallee ActualInvokee, BasicBlock *NormalDest,
738     BasicBlock *UnwindDest, uint32_t Flags, ArrayRef<T0> InvokeArgs,
739     std::optional<ArrayRef<T1>> TransitionArgs,
740     std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
741     const Twine &Name) {
742   Module *M = Builder->GetInsertBlock()->getParent()->getParent();
743   // Fill in the one generic type'd argument (the function is also vararg)
744   Function *FnStatepoint = Intrinsic::getOrInsertDeclaration(
745       M, Intrinsic::experimental_gc_statepoint,
746       {ActualInvokee.getCallee()->getType()});
747 
748   std::vector<Value *> Args =
749       getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(),
750                         Flags, InvokeArgs);
751 
752   InvokeInst *II = Builder->CreateInvoke(
753       FnStatepoint, NormalDest, UnwindDest, Args,
754       getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
755   II->addParamAttr(2,
756                    Attribute::get(Builder->getContext(), Attribute::ElementType,
757                                   ActualInvokee.getFunctionType()));
758   return II;
759 }
760 
CreateGCStatepointInvoke(uint64_t ID,uint32_t NumPatchBytes,FunctionCallee ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,ArrayRef<Value * > InvokeArgs,std::optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)761 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
762     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
763     BasicBlock *NormalDest, BasicBlock *UnwindDest,
764     ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
765     ArrayRef<Value *> GCArgs, const Twine &Name) {
766   return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
767       this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
768       uint32_t(StatepointFlags::None), InvokeArgs,
769       std::nullopt /* No Transition Args*/, DeoptArgs, GCArgs, Name);
770 }
771 
CreateGCStatepointInvoke(uint64_t ID,uint32_t NumPatchBytes,FunctionCallee ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,uint32_t Flags,ArrayRef<Value * > InvokeArgs,std::optional<ArrayRef<Use>> TransitionArgs,std::optional<ArrayRef<Use>> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)772 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
773     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
774     BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
775     ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Use>> TransitionArgs,
776     std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
777     const Twine &Name) {
778   return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
779       this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
780       InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
781 }
782 
CreateGCStatepointInvoke(uint64_t ID,uint32_t NumPatchBytes,FunctionCallee ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,ArrayRef<Use> InvokeArgs,std::optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)783 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
784     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
785     BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
786     std::optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs,
787     const Twine &Name) {
788   return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
789       this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
790       uint32_t(StatepointFlags::None), InvokeArgs, std::nullopt, DeoptArgs,
791       GCArgs, Name);
792 }
793 
CreateGCResult(Instruction * Statepoint,Type * ResultType,const Twine & Name)794 CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
795                                         Type *ResultType, const Twine &Name) {
796   Intrinsic::ID ID = Intrinsic::experimental_gc_result;
797   Type *Types[] = {ResultType};
798 
799   Value *Args[] = {Statepoint};
800   return CreateIntrinsic(ID, Types, Args, {}, Name);
801 }
802 
CreateGCRelocate(Instruction * Statepoint,int BaseOffset,int DerivedOffset,Type * ResultType,const Twine & Name)803 CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
804                                           int BaseOffset, int DerivedOffset,
805                                           Type *ResultType, const Twine &Name) {
806   Type *Types[] = {ResultType};
807 
808   Value *Args[] = {Statepoint, getInt32(BaseOffset), getInt32(DerivedOffset)};
809   return CreateIntrinsic(Intrinsic::experimental_gc_relocate, Types, Args, {},
810                          Name);
811 }
812 
CreateGCGetPointerBase(Value * DerivedPtr,const Twine & Name)813 CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr,
814                                                 const Twine &Name) {
815   Type *PtrTy = DerivedPtr->getType();
816   return CreateIntrinsic(Intrinsic::experimental_gc_get_pointer_base,
817                          {PtrTy, PtrTy}, {DerivedPtr}, {}, Name);
818 }
819 
CreateGCGetPointerOffset(Value * DerivedPtr,const Twine & Name)820 CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr,
821                                                   const Twine &Name) {
822   Type *PtrTy = DerivedPtr->getType();
823   return CreateIntrinsic(Intrinsic::experimental_gc_get_pointer_offset, {PtrTy},
824                          {DerivedPtr}, {}, Name);
825 }
826 
CreateUnaryIntrinsic(Intrinsic::ID ID,Value * V,FMFSource FMFSource,const Twine & Name)827 CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
828                                               FMFSource FMFSource,
829                                               const Twine &Name) {
830   Module *M = BB->getModule();
831   Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, {V->getType()});
832   return createCallHelper(Fn, {V}, Name, FMFSource);
833 }
834 
CreateBinaryIntrinsic(Intrinsic::ID ID,Value * LHS,Value * RHS,FMFSource FMFSource,const Twine & Name)835 Value *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
836                                             Value *RHS, FMFSource FMFSource,
837                                             const Twine &Name) {
838   Module *M = BB->getModule();
839   Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, {LHS->getType()});
840   if (Value *V = Folder.FoldBinaryIntrinsic(ID, LHS, RHS, Fn->getReturnType(),
841                                             /*FMFSource=*/nullptr))
842     return V;
843   return createCallHelper(Fn, {LHS, RHS}, Name, FMFSource);
844 }
845 
CreateIntrinsic(Intrinsic::ID ID,ArrayRef<Type * > Types,ArrayRef<Value * > Args,FMFSource FMFSource,const Twine & Name)846 CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
847                                          ArrayRef<Type *> Types,
848                                          ArrayRef<Value *> Args,
849                                          FMFSource FMFSource,
850                                          const Twine &Name) {
851   Module *M = BB->getModule();
852   Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, Types);
853   return createCallHelper(Fn, Args, Name, FMFSource);
854 }
855 
CreateIntrinsic(Type * RetTy,Intrinsic::ID ID,ArrayRef<Value * > Args,FMFSource FMFSource,const Twine & Name)856 CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID,
857                                          ArrayRef<Value *> Args,
858                                          FMFSource FMFSource,
859                                          const Twine &Name) {
860   Module *M = BB->getModule();
861 
862   SmallVector<Intrinsic::IITDescriptor> Table;
863   Intrinsic::getIntrinsicInfoTableEntries(ID, Table);
864   ArrayRef<Intrinsic::IITDescriptor> TableRef(Table);
865 
866   SmallVector<Type *> ArgTys;
867   ArgTys.reserve(Args.size());
868   for (auto &I : Args)
869     ArgTys.push_back(I->getType());
870   FunctionType *FTy = FunctionType::get(RetTy, ArgTys, false);
871   SmallVector<Type *> OverloadTys;
872   Intrinsic::MatchIntrinsicTypesResult Res =
873       matchIntrinsicSignature(FTy, TableRef, OverloadTys);
874   (void)Res;
875   assert(Res == Intrinsic::MatchIntrinsicTypes_Match && TableRef.empty() &&
876          "Wrong types for intrinsic!");
877   // TODO: Handle varargs intrinsics.
878 
879   Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, OverloadTys);
880   return createCallHelper(Fn, Args, Name, FMFSource);
881 }
882 
CreateConstrainedFPBinOp(Intrinsic::ID ID,Value * L,Value * R,FMFSource FMFSource,const Twine & Name,MDNode * FPMathTag,std::optional<RoundingMode> Rounding,std::optional<fp::ExceptionBehavior> Except)883 CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
884     Intrinsic::ID ID, Value *L, Value *R, FMFSource FMFSource,
885     const Twine &Name, MDNode *FPMathTag, std::optional<RoundingMode> Rounding,
886     std::optional<fp::ExceptionBehavior> Except) {
887   Value *RoundingV = getConstrainedFPRounding(Rounding);
888   Value *ExceptV = getConstrainedFPExcept(Except);
889 
890   FastMathFlags UseFMF = FMFSource.get(FMF);
891 
892   CallInst *C = CreateIntrinsic(ID, {L->getType()},
893                                 {L, R, RoundingV, ExceptV}, nullptr, Name);
894   setConstrainedFPCallAttr(C);
895   setFPAttrs(C, FPMathTag, UseFMF);
896   return C;
897 }
898 
CreateConstrainedFPIntrinsic(Intrinsic::ID ID,ArrayRef<Type * > Types,ArrayRef<Value * > Args,FMFSource FMFSource,const Twine & Name,MDNode * FPMathTag,std::optional<RoundingMode> Rounding,std::optional<fp::ExceptionBehavior> Except)899 CallInst *IRBuilderBase::CreateConstrainedFPIntrinsic(
900     Intrinsic::ID ID, ArrayRef<Type *> Types, ArrayRef<Value *> Args,
901     FMFSource FMFSource, const Twine &Name, MDNode *FPMathTag,
902     std::optional<RoundingMode> Rounding,
903     std::optional<fp::ExceptionBehavior> Except) {
904   Value *RoundingV = getConstrainedFPRounding(Rounding);
905   Value *ExceptV = getConstrainedFPExcept(Except);
906 
907   FastMathFlags UseFMF = FMFSource.get(FMF);
908 
909   llvm::SmallVector<Value *, 5> ExtArgs(Args);
910   ExtArgs.push_back(RoundingV);
911   ExtArgs.push_back(ExceptV);
912 
913   CallInst *C = CreateIntrinsic(ID, Types, ExtArgs, nullptr, Name);
914   setConstrainedFPCallAttr(C);
915   setFPAttrs(C, FPMathTag, UseFMF);
916   return C;
917 }
918 
CreateConstrainedFPUnroundedBinOp(Intrinsic::ID ID,Value * L,Value * R,FMFSource FMFSource,const Twine & Name,MDNode * FPMathTag,std::optional<fp::ExceptionBehavior> Except)919 CallInst *IRBuilderBase::CreateConstrainedFPUnroundedBinOp(
920     Intrinsic::ID ID, Value *L, Value *R, FMFSource FMFSource,
921     const Twine &Name, MDNode *FPMathTag,
922     std::optional<fp::ExceptionBehavior> Except) {
923   Value *ExceptV = getConstrainedFPExcept(Except);
924 
925   FastMathFlags UseFMF = FMFSource.get(FMF);
926 
927   CallInst *C =
928       CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, nullptr, Name);
929   setConstrainedFPCallAttr(C);
930   setFPAttrs(C, FPMathTag, UseFMF);
931   return C;
932 }
933 
CreateNAryOp(unsigned Opc,ArrayRef<Value * > Ops,const Twine & Name,MDNode * FPMathTag)934 Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
935                                    const Twine &Name, MDNode *FPMathTag) {
936   if (Instruction::isBinaryOp(Opc)) {
937     assert(Ops.size() == 2 && "Invalid number of operands!");
938     return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
939                        Ops[0], Ops[1], Name, FPMathTag);
940   }
941   if (Instruction::isUnaryOp(Opc)) {
942     assert(Ops.size() == 1 && "Invalid number of operands!");
943     return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
944                       Ops[0], Name, FPMathTag);
945   }
946   llvm_unreachable("Unexpected opcode!");
947 }
948 
CreateConstrainedFPCast(Intrinsic::ID ID,Value * V,Type * DestTy,FMFSource FMFSource,const Twine & Name,MDNode * FPMathTag,std::optional<RoundingMode> Rounding,std::optional<fp::ExceptionBehavior> Except)949 CallInst *IRBuilderBase::CreateConstrainedFPCast(
950     Intrinsic::ID ID, Value *V, Type *DestTy, FMFSource FMFSource,
951     const Twine &Name, MDNode *FPMathTag, std::optional<RoundingMode> Rounding,
952     std::optional<fp::ExceptionBehavior> Except) {
953   Value *ExceptV = getConstrainedFPExcept(Except);
954 
955   FastMathFlags UseFMF = FMFSource.get(FMF);
956 
957   CallInst *C;
958   if (Intrinsic::hasConstrainedFPRoundingModeOperand(ID)) {
959     Value *RoundingV = getConstrainedFPRounding(Rounding);
960     C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
961                         nullptr, Name);
962   } else
963     C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
964                         Name);
965 
966   setConstrainedFPCallAttr(C);
967 
968   if (isa<FPMathOperator>(C))
969     setFPAttrs(C, FPMathTag, UseFMF);
970   return C;
971 }
972 
CreateFCmpHelper(CmpInst::Predicate P,Value * LHS,Value * RHS,const Twine & Name,MDNode * FPMathTag,FMFSource FMFSource,bool IsSignaling)973 Value *IRBuilderBase::CreateFCmpHelper(CmpInst::Predicate P, Value *LHS,
974                                        Value *RHS, const Twine &Name,
975                                        MDNode *FPMathTag, FMFSource FMFSource,
976                                        bool IsSignaling) {
977   if (IsFPConstrained) {
978     auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
979                           : Intrinsic::experimental_constrained_fcmp;
980     return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
981   }
982 
983   if (auto *V = Folder.FoldCmp(P, LHS, RHS))
984     return V;
985   return Insert(
986       setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMFSource.get(FMF)),
987       Name);
988 }
989 
CreateConstrainedFPCmp(Intrinsic::ID ID,CmpInst::Predicate P,Value * L,Value * R,const Twine & Name,std::optional<fp::ExceptionBehavior> Except)990 CallInst *IRBuilderBase::CreateConstrainedFPCmp(
991     Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
992     const Twine &Name, std::optional<fp::ExceptionBehavior> Except) {
993   Value *PredicateV = getConstrainedFPPredicate(P);
994   Value *ExceptV = getConstrainedFPExcept(Except);
995 
996   CallInst *C = CreateIntrinsic(ID, {L->getType()},
997                                 {L, R, PredicateV, ExceptV}, nullptr, Name);
998   setConstrainedFPCallAttr(C);
999   return C;
1000 }
1001 
CreateConstrainedFPCall(Function * Callee,ArrayRef<Value * > Args,const Twine & Name,std::optional<RoundingMode> Rounding,std::optional<fp::ExceptionBehavior> Except)1002 CallInst *IRBuilderBase::CreateConstrainedFPCall(
1003     Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
1004     std::optional<RoundingMode> Rounding,
1005     std::optional<fp::ExceptionBehavior> Except) {
1006   llvm::SmallVector<Value *, 6> UseArgs(Args);
1007 
1008   if (Intrinsic::hasConstrainedFPRoundingModeOperand(Callee->getIntrinsicID()))
1009     UseArgs.push_back(getConstrainedFPRounding(Rounding));
1010   UseArgs.push_back(getConstrainedFPExcept(Except));
1011 
1012   CallInst *C = CreateCall(Callee, UseArgs, Name);
1013   setConstrainedFPCallAttr(C);
1014   return C;
1015 }
1016 
CreateSelect(Value * C,Value * True,Value * False,const Twine & Name,Instruction * MDFrom)1017 Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
1018                                    const Twine &Name, Instruction *MDFrom) {
1019   return CreateSelectFMF(C, True, False, {}, Name, MDFrom);
1020 }
1021 
CreateSelectFMF(Value * C,Value * True,Value * False,FMFSource FMFSource,const Twine & Name,Instruction * MDFrom)1022 Value *IRBuilderBase::CreateSelectFMF(Value *C, Value *True, Value *False,
1023                                       FMFSource FMFSource, const Twine &Name,
1024                                       Instruction *MDFrom) {
1025   if (auto *V = Folder.FoldSelect(C, True, False))
1026     return V;
1027 
1028   SelectInst *Sel = SelectInst::Create(C, True, False);
1029   if (MDFrom) {
1030     MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
1031     MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
1032     Sel = addBranchMetadata(Sel, Prof, Unpred);
1033   }
1034   if (isa<FPMathOperator>(Sel))
1035     setFPAttrs(Sel, /*MDNode=*/nullptr, FMFSource.get(FMF));
1036   return Insert(Sel, Name);
1037 }
1038 
CreatePtrDiff(Type * ElemTy,Value * LHS,Value * RHS,const Twine & Name)1039 Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS,
1040                                     const Twine &Name) {
1041   assert(LHS->getType() == RHS->getType() &&
1042          "Pointer subtraction operand types must match!");
1043   Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
1044   Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
1045   Value *Difference = CreateSub(LHS_int, RHS_int);
1046   return CreateExactSDiv(Difference, ConstantExpr::getSizeOf(ElemTy),
1047                          Name);
1048 }
1049 
CreateLaunderInvariantGroup(Value * Ptr)1050 Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
1051   assert(isa<PointerType>(Ptr->getType()) &&
1052          "launder.invariant.group only applies to pointers.");
1053   auto *PtrType = Ptr->getType();
1054   Module *M = BB->getParent()->getParent();
1055   Function *FnLaunderInvariantGroup = Intrinsic::getOrInsertDeclaration(
1056       M, Intrinsic::launder_invariant_group, {PtrType});
1057 
1058   assert(FnLaunderInvariantGroup->getReturnType() == PtrType &&
1059          FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
1060              PtrType &&
1061          "LaunderInvariantGroup should take and return the same type");
1062 
1063   return CreateCall(FnLaunderInvariantGroup, {Ptr});
1064 }
1065 
CreateStripInvariantGroup(Value * Ptr)1066 Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
1067   assert(isa<PointerType>(Ptr->getType()) &&
1068          "strip.invariant.group only applies to pointers.");
1069 
1070   auto *PtrType = Ptr->getType();
1071   Module *M = BB->getParent()->getParent();
1072   Function *FnStripInvariantGroup = Intrinsic::getOrInsertDeclaration(
1073       M, Intrinsic::strip_invariant_group, {PtrType});
1074 
1075   assert(FnStripInvariantGroup->getReturnType() == PtrType &&
1076          FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
1077              PtrType &&
1078          "StripInvariantGroup should take and return the same type");
1079 
1080   return CreateCall(FnStripInvariantGroup, {Ptr});
1081 }
1082 
CreateVectorReverse(Value * V,const Twine & Name)1083 Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) {
1084   auto *Ty = cast<VectorType>(V->getType());
1085   if (isa<ScalableVectorType>(Ty)) {
1086     Module *M = BB->getParent()->getParent();
1087     Function *F =
1088         Intrinsic::getOrInsertDeclaration(M, Intrinsic::vector_reverse, Ty);
1089     return Insert(CallInst::Create(F, V), Name);
1090   }
1091   // Keep the original behaviour for fixed vector
1092   SmallVector<int, 8> ShuffleMask;
1093   int NumElts = Ty->getElementCount().getKnownMinValue();
1094   for (int i = 0; i < NumElts; ++i)
1095     ShuffleMask.push_back(NumElts - i - 1);
1096   return CreateShuffleVector(V, ShuffleMask, Name);
1097 }
1098 
CreateVectorSplice(Value * V1,Value * V2,int64_t Imm,const Twine & Name)1099 Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
1100                                          const Twine &Name) {
1101   assert(isa<VectorType>(V1->getType()) && "Unexpected type");
1102   assert(V1->getType() == V2->getType() &&
1103          "Splice expects matching operand types!");
1104 
1105   if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) {
1106     Module *M = BB->getParent()->getParent();
1107     Function *F =
1108         Intrinsic::getOrInsertDeclaration(M, Intrinsic::vector_splice, VTy);
1109 
1110     Value *Ops[] = {V1, V2, getInt32(Imm)};
1111     return Insert(CallInst::Create(F, Ops), Name);
1112   }
1113 
1114   unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements();
1115   assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
1116          "Invalid immediate for vector splice!");
1117 
1118   // Keep the original behaviour for fixed vector
1119   unsigned Idx = (NumElts + Imm) % NumElts;
1120   SmallVector<int, 8> Mask;
1121   for (unsigned I = 0; I < NumElts; ++I)
1122     Mask.push_back(Idx + I);
1123 
1124   return CreateShuffleVector(V1, V2, Mask);
1125 }
1126 
CreateVectorSplat(unsigned NumElts,Value * V,const Twine & Name)1127 Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
1128                                         const Twine &Name) {
1129   auto EC = ElementCount::getFixed(NumElts);
1130   return CreateVectorSplat(EC, V, Name);
1131 }
1132 
CreateVectorSplat(ElementCount EC,Value * V,const Twine & Name)1133 Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V,
1134                                         const Twine &Name) {
1135   assert(EC.isNonZero() && "Cannot splat to an empty vector!");
1136 
1137   // First insert it into a poison vector so we can shuffle it.
1138   Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC));
1139   V = CreateInsertElement(Poison, V, getInt64(0), Name + ".splatinsert");
1140 
1141   // Shuffle the value across the desired number of elements.
1142   SmallVector<int, 16> Zeros;
1143   Zeros.resize(EC.getKnownMinValue());
1144   return CreateShuffleVector(V, Zeros, Name + ".splat");
1145 }
1146 
CreatePreserveArrayAccessIndex(Type * ElTy,Value * Base,unsigned Dimension,unsigned LastIndex,MDNode * DbgInfo)1147 Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
1148     Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
1149     MDNode *DbgInfo) {
1150   auto *BaseType = Base->getType();
1151   assert(isa<PointerType>(BaseType) &&
1152          "Invalid Base ptr type for preserve.array.access.index.");
1153 
1154   Value *LastIndexV = getInt32(LastIndex);
1155   Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1156   SmallVector<Value *, 4> IdxList(Dimension, Zero);
1157   IdxList.push_back(LastIndexV);
1158 
1159   Type *ResultType = GetElementPtrInst::getGEPReturnType(Base, IdxList);
1160 
1161   Value *DimV = getInt32(Dimension);
1162   CallInst *Fn =
1163       CreateIntrinsic(Intrinsic::preserve_array_access_index,
1164                       {ResultType, BaseType}, {Base, DimV, LastIndexV});
1165   Fn->addParamAttr(
1166       0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
1167   if (DbgInfo)
1168     Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1169 
1170   return Fn;
1171 }
1172 
CreatePreserveUnionAccessIndex(Value * Base,unsigned FieldIndex,MDNode * DbgInfo)1173 Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
1174     Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
1175   assert(isa<PointerType>(Base->getType()) &&
1176          "Invalid Base ptr type for preserve.union.access.index.");
1177   auto *BaseType = Base->getType();
1178 
1179   Value *DIIndex = getInt32(FieldIndex);
1180   CallInst *Fn = CreateIntrinsic(Intrinsic::preserve_union_access_index,
1181                                  {BaseType, BaseType}, {Base, DIIndex});
1182   if (DbgInfo)
1183     Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1184 
1185   return Fn;
1186 }
1187 
CreatePreserveStructAccessIndex(Type * ElTy,Value * Base,unsigned Index,unsigned FieldIndex,MDNode * DbgInfo)1188 Value *IRBuilderBase::CreatePreserveStructAccessIndex(
1189     Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
1190     MDNode *DbgInfo) {
1191   auto *BaseType = Base->getType();
1192   assert(isa<PointerType>(BaseType) &&
1193          "Invalid Base ptr type for preserve.struct.access.index.");
1194 
1195   Value *GEPIndex = getInt32(Index);
1196   Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1197   Type *ResultType =
1198       GetElementPtrInst::getGEPReturnType(Base, {Zero, GEPIndex});
1199 
1200   Value *DIIndex = getInt32(FieldIndex);
1201   CallInst *Fn =
1202       CreateIntrinsic(Intrinsic::preserve_struct_access_index,
1203                       {ResultType, BaseType}, {Base, GEPIndex, DIIndex});
1204   Fn->addParamAttr(
1205       0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
1206   if (DbgInfo)
1207     Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1208 
1209   return Fn;
1210 }
1211 
createIsFPClass(Value * FPNum,unsigned Test)1212 Value *IRBuilderBase::createIsFPClass(Value *FPNum, unsigned Test) {
1213   ConstantInt *TestV = getInt32(Test);
1214   return CreateIntrinsic(Intrinsic::is_fpclass, {FPNum->getType()},
1215                          {FPNum, TestV});
1216 }
1217 
CreateAlignmentAssumptionHelper(const DataLayout & DL,Value * PtrValue,Value * AlignValue,Value * OffsetValue)1218 CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
1219                                                          Value *PtrValue,
1220                                                          Value *AlignValue,
1221                                                          Value *OffsetValue) {
1222   SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
1223   if (OffsetValue)
1224     Vals.push_back(OffsetValue);
1225   OperandBundleDefT<Value *> AlignOpB("align", Vals);
1226   return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
1227 }
1228 
CreateAlignmentAssumption(const DataLayout & DL,Value * PtrValue,unsigned Alignment,Value * OffsetValue)1229 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
1230                                                    Value *PtrValue,
1231                                                    unsigned Alignment,
1232                                                    Value *OffsetValue) {
1233   assert(isa<PointerType>(PtrValue->getType()) &&
1234          "trying to create an alignment assumption on a non-pointer?");
1235   assert(Alignment != 0 && "Invalid Alignment");
1236   auto *PtrTy = cast<PointerType>(PtrValue->getType());
1237   Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1238   Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
1239   return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
1240 }
1241 
CreateAlignmentAssumption(const DataLayout & DL,Value * PtrValue,Value * Alignment,Value * OffsetValue)1242 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
1243                                                    Value *PtrValue,
1244                                                    Value *Alignment,
1245                                                    Value *OffsetValue) {
1246   assert(isa<PointerType>(PtrValue->getType()) &&
1247          "trying to create an alignment assumption on a non-pointer?");
1248   return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
1249 }
1250 
CreateDereferenceableAssumption(Value * PtrValue,Value * SizeValue)1251 CallInst *IRBuilderBase::CreateDereferenceableAssumption(Value *PtrValue,
1252                                                          Value *SizeValue) {
1253   assert(isa<PointerType>(PtrValue->getType()) &&
1254          "trying to create an deferenceable assumption on a non-pointer?");
1255   SmallVector<Value *, 4> Vals({PtrValue, SizeValue});
1256   OperandBundleDefT<Value *> DereferenceableOpB("dereferenceable", Vals);
1257   return CreateAssumption(ConstantInt::getTrue(getContext()),
1258                           {DereferenceableOpB});
1259 }
1260 
1261 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default;
1262 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default;
1263 IRBuilderFolder::~IRBuilderFolder() = default;
anchor()1264 void ConstantFolder::anchor() {}
anchor()1265 void NoFolder::anchor() {}
1266