xref: /freebsd/contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the IRBuilder class, which is used as a convenient way
10 // to create LLVM instructions with a consistent and simplified interface.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/IR/IRBuilder.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/IR/Constant.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/Statepoint.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/MathExtras.h"
33 #include <cassert>
34 #include <cstdint>
35 #include <vector>
36 
37 using namespace llvm;
38 
39 /// CreateGlobalString - Make a new global variable with an initializer that
40 /// has array of i8 type filled in with the nul terminated string value
41 /// specified.  If Name is specified, it is the name of the global variable
42 /// created.
43 GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
44                                                   const Twine &Name,
45                                                   unsigned AddressSpace,
46                                                   Module *M) {
47   Constant *StrConstant = ConstantDataArray::getString(Context, Str);
48   if (!M)
49     M = BB->getParent()->getParent();
50   auto *GV = new GlobalVariable(
51       *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
52       StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
53   GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
54   GV->setAlignment(Align(1));
55   return GV;
56 }
57 
58 Type *IRBuilderBase::getCurrentFunctionReturnType() const {
59   assert(BB && BB->getParent() && "No current function!");
60   return BB->getParent()->getReturnType();
61 }
62 
63 Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
64   auto *PT = cast<PointerType>(Ptr->getType());
65   if (PT->isOpaqueOrPointeeTypeMatches(getInt8Ty()))
66     return Ptr;
67 
68   // Otherwise, we need to insert a bitcast.
69   return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace()));
70 }
71 
72 static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
73                                   IRBuilderBase *Builder,
74                                   const Twine &Name = "",
75                                   Instruction *FMFSource = nullptr,
76                                   ArrayRef<OperandBundleDef> OpBundles = {}) {
77   CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name);
78   if (FMFSource)
79     CI->copyFastMathFlags(FMFSource);
80   return CI;
81 }
82 
83 Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
84   assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
85   if (cast<ConstantInt>(Scaling)->isZero())
86     return Scaling;
87   Module *M = GetInsertBlock()->getParent()->getParent();
88   Function *TheFn =
89       Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()});
90   CallInst *CI = createCallHelper(TheFn, {}, this, Name);
91   return cast<ConstantInt>(Scaling)->getSExtValue() == 1
92              ? CI
93              : CreateMul(CI, Scaling);
94 }
95 
96 Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) {
97   Type *STy = DstType->getScalarType();
98   if (isa<ScalableVectorType>(DstType)) {
99     Type *StepVecType = DstType;
100     // TODO: We expect this special case (element type < 8 bits) to be
101     // temporary - once the intrinsic properly supports < 8 bits this code
102     // can be removed.
103     if (STy->getScalarSizeInBits() < 8)
104       StepVecType =
105           VectorType::get(getInt8Ty(), cast<ScalableVectorType>(DstType));
106     Value *Res = CreateIntrinsic(Intrinsic::experimental_stepvector,
107                                  {StepVecType}, {}, nullptr, Name);
108     if (StepVecType != DstType)
109       Res = CreateTrunc(Res, DstType);
110     return Res;
111   }
112 
113   unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
114 
115   // Create a vector of consecutive numbers from zero to VF.
116   SmallVector<Constant *, 8> Indices;
117   for (unsigned i = 0; i < NumEls; ++i)
118     Indices.push_back(ConstantInt::get(STy, i));
119 
120   // Add the consecutive indices to the vector value.
121   return ConstantVector::get(Indices);
122 }
123 
124 CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
125                                       MaybeAlign Align, bool isVolatile,
126                                       MDNode *TBAATag, MDNode *ScopeTag,
127                                       MDNode *NoAliasTag) {
128   Ptr = getCastedInt8PtrValue(Ptr);
129   Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
130   Type *Tys[] = { Ptr->getType(), Size->getType() };
131   Module *M = BB->getParent()->getParent();
132   Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
133 
134   CallInst *CI = createCallHelper(TheFn, Ops, this);
135 
136   if (Align)
137     cast<MemSetInst>(CI)->setDestAlignment(Align->value());
138 
139   // Set the TBAA info if present.
140   if (TBAATag)
141     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
142 
143   if (ScopeTag)
144     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
145 
146   if (NoAliasTag)
147     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
148 
149   return CI;
150 }
151 
152 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
153     Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
154     MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) {
155 
156   Ptr = getCastedInt8PtrValue(Ptr);
157   Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
158   Type *Tys[] = {Ptr->getType(), Size->getType()};
159   Module *M = BB->getParent()->getParent();
160   Function *TheFn = Intrinsic::getDeclaration(
161       M, Intrinsic::memset_element_unordered_atomic, Tys);
162 
163   CallInst *CI = createCallHelper(TheFn, Ops, this);
164 
165   cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
166 
167   // Set the TBAA info if present.
168   if (TBAATag)
169     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
170 
171   if (ScopeTag)
172     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
173 
174   if (NoAliasTag)
175     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
176 
177   return CI;
178 }
179 
180 CallInst *IRBuilderBase::CreateMemTransferInst(
181     Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
182     MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag,
183     MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) {
184   Dst = getCastedInt8PtrValue(Dst);
185   Src = getCastedInt8PtrValue(Src);
186 
187   Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
188   Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
189   Module *M = BB->getParent()->getParent();
190   Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys);
191 
192   CallInst *CI = createCallHelper(TheFn, Ops, this);
193 
194   auto* MCI = cast<MemTransferInst>(CI);
195   if (DstAlign)
196     MCI->setDestAlignment(*DstAlign);
197   if (SrcAlign)
198     MCI->setSourceAlignment(*SrcAlign);
199 
200   // Set the TBAA info if present.
201   if (TBAATag)
202     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
203 
204   // Set the TBAA Struct info if present.
205   if (TBAAStructTag)
206     CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
207 
208   if (ScopeTag)
209     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
210 
211   if (NoAliasTag)
212     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
213 
214   return CI;
215 }
216 
217 CallInst *IRBuilderBase::CreateMemCpyInline(
218     Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign,
219     Value *Size, bool IsVolatile, MDNode *TBAATag, MDNode *TBAAStructTag,
220     MDNode *ScopeTag, MDNode *NoAliasTag) {
221   Dst = getCastedInt8PtrValue(Dst);
222   Src = getCastedInt8PtrValue(Src);
223 
224   Value *Ops[] = {Dst, Src, Size, getInt1(IsVolatile)};
225   Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
226   Function *F = BB->getParent();
227   Module *M = F->getParent();
228   Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys);
229 
230   CallInst *CI = createCallHelper(TheFn, Ops, this);
231 
232   auto *MCI = cast<MemCpyInlineInst>(CI);
233   if (DstAlign)
234     MCI->setDestAlignment(*DstAlign);
235   if (SrcAlign)
236     MCI->setSourceAlignment(*SrcAlign);
237 
238   // Set the TBAA info if present.
239   if (TBAATag)
240     MCI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
241 
242   // Set the TBAA Struct info if present.
243   if (TBAAStructTag)
244     MCI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
245 
246   if (ScopeTag)
247     MCI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
248 
249   if (NoAliasTag)
250     MCI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
251 
252   return CI;
253 }
254 
255 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
256     Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
257     uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
258     MDNode *ScopeTag, MDNode *NoAliasTag) {
259   assert(DstAlign >= ElementSize &&
260          "Pointer alignment must be at least element size");
261   assert(SrcAlign >= ElementSize &&
262          "Pointer alignment must be at least element size");
263   Dst = getCastedInt8PtrValue(Dst);
264   Src = getCastedInt8PtrValue(Src);
265 
266   Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
267   Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
268   Module *M = BB->getParent()->getParent();
269   Function *TheFn = Intrinsic::getDeclaration(
270       M, Intrinsic::memcpy_element_unordered_atomic, Tys);
271 
272   CallInst *CI = createCallHelper(TheFn, Ops, this);
273 
274   // Set the alignment of the pointer args.
275   auto *AMCI = cast<AtomicMemCpyInst>(CI);
276   AMCI->setDestAlignment(DstAlign);
277   AMCI->setSourceAlignment(SrcAlign);
278 
279   // Set the TBAA info if present.
280   if (TBAATag)
281     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
282 
283   // Set the TBAA Struct info if present.
284   if (TBAAStructTag)
285     CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
286 
287   if (ScopeTag)
288     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
289 
290   if (NoAliasTag)
291     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
292 
293   return CI;
294 }
295 
296 CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign,
297                                        Value *Src, MaybeAlign SrcAlign,
298                                        Value *Size, bool isVolatile,
299                                        MDNode *TBAATag, MDNode *ScopeTag,
300                                        MDNode *NoAliasTag) {
301   Dst = getCastedInt8PtrValue(Dst);
302   Src = getCastedInt8PtrValue(Src);
303 
304   Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
305   Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
306   Module *M = BB->getParent()->getParent();
307   Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
308 
309   CallInst *CI = createCallHelper(TheFn, Ops, this);
310 
311   auto *MMI = cast<MemMoveInst>(CI);
312   if (DstAlign)
313     MMI->setDestAlignment(*DstAlign);
314   if (SrcAlign)
315     MMI->setSourceAlignment(*SrcAlign);
316 
317   // Set the TBAA info if present.
318   if (TBAATag)
319     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
320 
321   if (ScopeTag)
322     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
323 
324   if (NoAliasTag)
325     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
326 
327   return CI;
328 }
329 
330 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
331     Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
332     uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
333     MDNode *ScopeTag, MDNode *NoAliasTag) {
334   assert(DstAlign >= ElementSize &&
335          "Pointer alignment must be at least element size");
336   assert(SrcAlign >= ElementSize &&
337          "Pointer alignment must be at least element size");
338   Dst = getCastedInt8PtrValue(Dst);
339   Src = getCastedInt8PtrValue(Src);
340 
341   Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
342   Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
343   Module *M = BB->getParent()->getParent();
344   Function *TheFn = Intrinsic::getDeclaration(
345       M, Intrinsic::memmove_element_unordered_atomic, Tys);
346 
347   CallInst *CI = createCallHelper(TheFn, Ops, this);
348 
349   // Set the alignment of the pointer args.
350   CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
351   CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
352 
353   // Set the TBAA info if present.
354   if (TBAATag)
355     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
356 
357   // Set the TBAA Struct info if present.
358   if (TBAAStructTag)
359     CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
360 
361   if (ScopeTag)
362     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
363 
364   if (NoAliasTag)
365     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
366 
367   return CI;
368 }
369 
370 static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID,
371                                     Value *Src) {
372   Module *M = Builder->GetInsertBlock()->getParent()->getParent();
373   Value *Ops[] = {Src};
374   Type *Tys[] = { Src->getType() };
375   auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
376   return createCallHelper(Decl, Ops, Builder);
377 }
378 
379 CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
380   Module *M = GetInsertBlock()->getParent()->getParent();
381   Value *Ops[] = {Acc, Src};
382   auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd,
383                                         {Src->getType()});
384   return createCallHelper(Decl, Ops, this);
385 }
386 
387 CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
388   Module *M = GetInsertBlock()->getParent()->getParent();
389   Value *Ops[] = {Acc, Src};
390   auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul,
391                                         {Src->getType()});
392   return createCallHelper(Decl, Ops, this);
393 }
394 
395 CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
396   return getReductionIntrinsic(this, Intrinsic::vector_reduce_add, Src);
397 }
398 
399 CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
400   return getReductionIntrinsic(this, Intrinsic::vector_reduce_mul, Src);
401 }
402 
403 CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
404   return getReductionIntrinsic(this, Intrinsic::vector_reduce_and, Src);
405 }
406 
407 CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
408   return getReductionIntrinsic(this, Intrinsic::vector_reduce_or, Src);
409 }
410 
411 CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
412   return getReductionIntrinsic(this, Intrinsic::vector_reduce_xor, Src);
413 }
414 
415 CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
416   auto ID =
417       IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
418   return getReductionIntrinsic(this, ID, Src);
419 }
420 
421 CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
422   auto ID =
423       IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
424   return getReductionIntrinsic(this, ID, Src);
425 }
426 
427 CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) {
428   return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmax, Src);
429 }
430 
431 CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) {
432   return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmin, Src);
433 }
434 
435 CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
436   assert(isa<PointerType>(Ptr->getType()) &&
437          "lifetime.start only applies to pointers.");
438   Ptr = getCastedInt8PtrValue(Ptr);
439   if (!Size)
440     Size = getInt64(-1);
441   else
442     assert(Size->getType() == getInt64Ty() &&
443            "lifetime.start requires the size to be an i64");
444   Value *Ops[] = { Size, Ptr };
445   Module *M = BB->getParent()->getParent();
446   Function *TheFn =
447       Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
448   return createCallHelper(TheFn, Ops, this);
449 }
450 
451 CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
452   assert(isa<PointerType>(Ptr->getType()) &&
453          "lifetime.end only applies to pointers.");
454   Ptr = getCastedInt8PtrValue(Ptr);
455   if (!Size)
456     Size = getInt64(-1);
457   else
458     assert(Size->getType() == getInt64Ty() &&
459            "lifetime.end requires the size to be an i64");
460   Value *Ops[] = { Size, Ptr };
461   Module *M = BB->getParent()->getParent();
462   Function *TheFn =
463       Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
464   return createCallHelper(TheFn, Ops, this);
465 }
466 
467 CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
468 
469   assert(isa<PointerType>(Ptr->getType()) &&
470          "invariant.start only applies to pointers.");
471   Ptr = getCastedInt8PtrValue(Ptr);
472   if (!Size)
473     Size = getInt64(-1);
474   else
475     assert(Size->getType() == getInt64Ty() &&
476            "invariant.start requires the size to be an i64");
477 
478   Value *Ops[] = {Size, Ptr};
479   // Fill in the single overloaded type: memory object type.
480   Type *ObjectPtr[1] = {Ptr->getType()};
481   Module *M = BB->getParent()->getParent();
482   Function *TheFn =
483       Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
484   return createCallHelper(TheFn, Ops, this);
485 }
486 
487 CallInst *
488 IRBuilderBase::CreateAssumption(Value *Cond,
489                                 ArrayRef<OperandBundleDef> OpBundles) {
490   assert(Cond->getType() == getInt1Ty() &&
491          "an assumption condition must be of type i1");
492 
493   Value *Ops[] = { Cond };
494   Module *M = BB->getParent()->getParent();
495   Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
496   return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles);
497 }
498 
499 Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
500   Module *M = BB->getModule();
501   auto *FnIntrinsic = Intrinsic::getDeclaration(
502       M, Intrinsic::experimental_noalias_scope_decl, {});
503   return createCallHelper(FnIntrinsic, {Scope}, this);
504 }
505 
506 /// Create a call to a Masked Load intrinsic.
507 /// \p Ty        - vector type to load
508 /// \p Ptr       - base pointer for the load
509 /// \p Alignment - alignment of the source location
510 /// \p Mask      - vector of booleans which indicates what vector lanes should
511 ///                be accessed in memory
512 /// \p PassThru  - pass-through value that is used to fill the masked-off lanes
513 ///                of the result
514 /// \p Name      - name of the result variable
515 CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment,
516                                           Value *Mask, Value *PassThru,
517                                           const Twine &Name) {
518   auto *PtrTy = cast<PointerType>(Ptr->getType());
519   assert(Ty->isVectorTy() && "Type should be vector");
520   assert(PtrTy->isOpaqueOrPointeeTypeMatches(Ty) && "Wrong element type");
521   assert(Mask && "Mask should not be all-ones (null)");
522   if (!PassThru)
523     PassThru = UndefValue::get(Ty);
524   Type *OverloadedTypes[] = { Ty, PtrTy };
525   Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
526   return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
527                                OverloadedTypes, Name);
528 }
529 
530 /// Create a call to a Masked Store intrinsic.
531 /// \p Val       - data to be stored,
532 /// \p Ptr       - base pointer for the store
533 /// \p Alignment - alignment of the destination location
534 /// \p Mask      - vector of booleans which indicates what vector lanes should
535 ///                be accessed in memory
536 CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
537                                            Align Alignment, Value *Mask) {
538   auto *PtrTy = cast<PointerType>(Ptr->getType());
539   Type *DataTy = Val->getType();
540   assert(DataTy->isVectorTy() && "Val should be a vector");
541   assert(PtrTy->isOpaqueOrPointeeTypeMatches(DataTy) && "Wrong element type");
542   assert(Mask && "Mask should not be all-ones (null)");
543   Type *OverloadedTypes[] = { DataTy, PtrTy };
544   Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
545   return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
546 }
547 
548 /// Create a call to a Masked intrinsic, with given intrinsic Id,
549 /// an array of operands - Ops, and an array of overloaded types -
550 /// OverloadedTypes.
551 CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
552                                                ArrayRef<Value *> Ops,
553                                                ArrayRef<Type *> OverloadedTypes,
554                                                const Twine &Name) {
555   Module *M = BB->getParent()->getParent();
556   Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
557   return createCallHelper(TheFn, Ops, this, Name);
558 }
559 
560 /// Create a call to a Masked Gather intrinsic.
561 /// \p Ty       - vector type to gather
562 /// \p Ptrs     - vector of pointers for loading
563 /// \p Align    - alignment for one element
564 /// \p Mask     - vector of booleans which indicates what vector lanes should
565 ///               be accessed in memory
566 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
567 ///               of the result
568 /// \p Name     - name of the result variable
569 CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs,
570                                             Align Alignment, Value *Mask,
571                                             Value *PassThru,
572                                             const Twine &Name) {
573   auto *VecTy = cast<VectorType>(Ty);
574   ElementCount NumElts = VecTy->getElementCount();
575   auto *PtrsTy = cast<VectorType>(Ptrs->getType());
576   assert(cast<PointerType>(PtrsTy->getElementType())
577              ->isOpaqueOrPointeeTypeMatches(
578                  cast<VectorType>(Ty)->getElementType()) &&
579          "Element type mismatch");
580   assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch");
581 
582   if (!Mask)
583     Mask = Constant::getAllOnesValue(
584         VectorType::get(Type::getInt1Ty(Context), NumElts));
585 
586   if (!PassThru)
587     PassThru = UndefValue::get(Ty);
588 
589   Type *OverloadedTypes[] = {Ty, PtrsTy};
590   Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
591 
592   // We specify only one type when we create this intrinsic. Types of other
593   // arguments are derived from this type.
594   return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
595                                Name);
596 }
597 
598 /// Create a call to a Masked Scatter intrinsic.
599 /// \p Data  - data to be stored,
600 /// \p Ptrs  - the vector of pointers, where the \p Data elements should be
601 ///            stored
602 /// \p Align - alignment for one element
603 /// \p Mask  - vector of booleans which indicates what vector lanes should
604 ///            be accessed in memory
605 CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
606                                              Align Alignment, Value *Mask) {
607   auto *PtrsTy = cast<VectorType>(Ptrs->getType());
608   auto *DataTy = cast<VectorType>(Data->getType());
609   ElementCount NumElts = PtrsTy->getElementCount();
610 
611 #ifndef NDEBUG
612   auto *PtrTy = cast<PointerType>(PtrsTy->getElementType());
613   assert(NumElts == DataTy->getElementCount() &&
614          PtrTy->isOpaqueOrPointeeTypeMatches(DataTy->getElementType()) &&
615          "Incompatible pointer and data types");
616 #endif
617 
618   if (!Mask)
619     Mask = Constant::getAllOnesValue(
620         VectorType::get(Type::getInt1Ty(Context), NumElts));
621 
622   Type *OverloadedTypes[] = {DataTy, PtrsTy};
623   Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
624 
625   // We specify only one type when we create this intrinsic. Types of other
626   // arguments are derived from this type.
627   return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
628 }
629 
630 template <typename T0>
631 static std::vector<Value *>
632 getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
633                   Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
634   std::vector<Value *> Args;
635   Args.push_back(B.getInt64(ID));
636   Args.push_back(B.getInt32(NumPatchBytes));
637   Args.push_back(ActualCallee);
638   Args.push_back(B.getInt32(CallArgs.size()));
639   Args.push_back(B.getInt32(Flags));
640   llvm::append_range(Args, CallArgs);
641   // GC Transition and Deopt args are now always handled via operand bundle.
642   // They will be removed from the signature of gc.statepoint shortly.
643   Args.push_back(B.getInt32(0));
644   Args.push_back(B.getInt32(0));
645   // GC args are now encoded in the gc-live operand bundle
646   return Args;
647 }
648 
649 template<typename T1, typename T2, typename T3>
650 static std::vector<OperandBundleDef>
651 getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs,
652                      Optional<ArrayRef<T2>> DeoptArgs,
653                      ArrayRef<T3> GCArgs) {
654   std::vector<OperandBundleDef> Rval;
655   if (DeoptArgs) {
656     SmallVector<Value*, 16> DeoptValues;
657     llvm::append_range(DeoptValues, *DeoptArgs);
658     Rval.emplace_back("deopt", DeoptValues);
659   }
660   if (TransitionArgs) {
661     SmallVector<Value*, 16> TransitionValues;
662     llvm::append_range(TransitionValues, *TransitionArgs);
663     Rval.emplace_back("gc-transition", TransitionValues);
664   }
665   if (GCArgs.size()) {
666     SmallVector<Value*, 16> LiveValues;
667     llvm::append_range(LiveValues, GCArgs);
668     Rval.emplace_back("gc-live", LiveValues);
669   }
670   return Rval;
671 }
672 
673 template <typename T0, typename T1, typename T2, typename T3>
674 static CallInst *CreateGCStatepointCallCommon(
675     IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
676     Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
677     Optional<ArrayRef<T1>> TransitionArgs,
678     Optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
679     const Twine &Name) {
680   // Extract out the type of the callee.
681   auto *FuncPtrType = cast<PointerType>(ActualCallee->getType());
682   assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
683          "actual callee must be a callable value");
684 
685   Module *M = Builder->GetInsertBlock()->getParent()->getParent();
686   // Fill in the one generic type'd argument (the function is also vararg)
687   Type *ArgTypes[] = { FuncPtrType };
688   Function *FnStatepoint =
689     Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
690                               ArgTypes);
691 
692   std::vector<Value *> Args =
693       getStatepointArgs(*Builder, ID, NumPatchBytes, ActualCallee, Flags,
694                         CallArgs);
695 
696   return Builder->CreateCall(FnStatepoint, Args,
697                              getStatepointBundles(TransitionArgs, DeoptArgs,
698                                                   GCArgs),
699                              Name);
700 }
701 
702 CallInst *IRBuilderBase::CreateGCStatepointCall(
703     uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
704     ArrayRef<Value *> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
705     ArrayRef<Value *> GCArgs, const Twine &Name) {
706   return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
707       this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
708       CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name);
709 }
710 
711 CallInst *IRBuilderBase::CreateGCStatepointCall(
712     uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags,
713     ArrayRef<Value *> CallArgs, Optional<ArrayRef<Use>> TransitionArgs,
714     Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
715     const Twine &Name) {
716   return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
717       this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
718       DeoptArgs, GCArgs, Name);
719 }
720 
721 CallInst *IRBuilderBase::CreateGCStatepointCall(
722     uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
723     ArrayRef<Use> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
724     ArrayRef<Value *> GCArgs, const Twine &Name) {
725   return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
726       this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
727       CallArgs, None, DeoptArgs, GCArgs, Name);
728 }
729 
730 template <typename T0, typename T1, typename T2, typename T3>
731 static InvokeInst *CreateGCStatepointInvokeCommon(
732     IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
733     Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest,
734     uint32_t Flags, ArrayRef<T0> InvokeArgs,
735     Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs,
736     ArrayRef<T3> GCArgs, const Twine &Name) {
737   // Extract out the type of the callee.
738   auto *FuncPtrType = cast<PointerType>(ActualInvokee->getType());
739   assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
740          "actual callee must be a callable value");
741 
742   Module *M = Builder->GetInsertBlock()->getParent()->getParent();
743   // Fill in the one generic type'd argument (the function is also vararg)
744   Function *FnStatepoint = Intrinsic::getDeclaration(
745       M, Intrinsic::experimental_gc_statepoint, {FuncPtrType});
746 
747   std::vector<Value *> Args =
748       getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee, Flags,
749                         InvokeArgs);
750 
751   return Builder->CreateInvoke(FnStatepoint, NormalDest, UnwindDest, Args,
752                                getStatepointBundles(TransitionArgs, DeoptArgs,
753                                                     GCArgs),
754                                Name);
755 }
756 
757 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
758     uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
759     BasicBlock *NormalDest, BasicBlock *UnwindDest,
760     ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Value *>> DeoptArgs,
761     ArrayRef<Value *> GCArgs, const Twine &Name) {
762   return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
763       this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
764       uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/,
765       DeoptArgs, GCArgs, Name);
766 }
767 
768 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
769     uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
770     BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
771     ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
772     Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
773   return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
774       this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
775       InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
776 }
777 
778 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
779     uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
780     BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
781     Optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
782   return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
783       this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
784       uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs,
785       Name);
786 }
787 
788 CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
789                                        Type *ResultType,
790                                        const Twine &Name) {
791  Intrinsic::ID ID = Intrinsic::experimental_gc_result;
792  Module *M = BB->getParent()->getParent();
793  Type *Types[] = {ResultType};
794  Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
795 
796  Value *Args[] = {Statepoint};
797  return createCallHelper(FnGCResult, Args, this, Name);
798 }
799 
800 CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
801                                          int BaseOffset,
802                                          int DerivedOffset,
803                                          Type *ResultType,
804                                          const Twine &Name) {
805  Module *M = BB->getParent()->getParent();
806  Type *Types[] = {ResultType};
807  Function *FnGCRelocate =
808      Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
809 
810  Value *Args[] = {Statepoint,
811                   getInt32(BaseOffset),
812                   getInt32(DerivedOffset)};
813  return createCallHelper(FnGCRelocate, Args, this, Name);
814 }
815 
816 CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr,
817                                                 const Twine &Name) {
818   Module *M = BB->getParent()->getParent();
819   Type *PtrTy = DerivedPtr->getType();
820   Function *FnGCFindBase = Intrinsic::getDeclaration(
821       M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
822   return createCallHelper(FnGCFindBase, {DerivedPtr}, this, Name);
823 }
824 
825 CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr,
826                                                   const Twine &Name) {
827   Module *M = BB->getParent()->getParent();
828   Type *PtrTy = DerivedPtr->getType();
829   Function *FnGCGetOffset = Intrinsic::getDeclaration(
830       M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
831   return createCallHelper(FnGCGetOffset, {DerivedPtr}, this, Name);
832 }
833 
834 CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
835                                               Instruction *FMFSource,
836                                               const Twine &Name) {
837   Module *M = BB->getModule();
838   Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
839   return createCallHelper(Fn, {V}, this, Name, FMFSource);
840 }
841 
842 CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
843                                                Value *RHS,
844                                                Instruction *FMFSource,
845                                                const Twine &Name) {
846   Module *M = BB->getModule();
847   Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
848   return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource);
849 }
850 
851 CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
852                                          ArrayRef<Type *> Types,
853                                          ArrayRef<Value *> Args,
854                                          Instruction *FMFSource,
855                                          const Twine &Name) {
856   Module *M = BB->getModule();
857   Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
858   return createCallHelper(Fn, Args, this, Name, FMFSource);
859 }
860 
861 CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
862     Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
863     const Twine &Name, MDNode *FPMathTag,
864     Optional<RoundingMode> Rounding,
865     Optional<fp::ExceptionBehavior> Except) {
866   Value *RoundingV = getConstrainedFPRounding(Rounding);
867   Value *ExceptV = getConstrainedFPExcept(Except);
868 
869   FastMathFlags UseFMF = FMF;
870   if (FMFSource)
871     UseFMF = FMFSource->getFastMathFlags();
872 
873   CallInst *C = CreateIntrinsic(ID, {L->getType()},
874                                 {L, R, RoundingV, ExceptV}, nullptr, Name);
875   setConstrainedFPCallAttr(C);
876   setFPAttrs(C, FPMathTag, UseFMF);
877   return C;
878 }
879 
880 Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
881                                    const Twine &Name, MDNode *FPMathTag) {
882   if (Instruction::isBinaryOp(Opc)) {
883     assert(Ops.size() == 2 && "Invalid number of operands!");
884     return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
885                        Ops[0], Ops[1], Name, FPMathTag);
886   }
887   if (Instruction::isUnaryOp(Opc)) {
888     assert(Ops.size() == 1 && "Invalid number of operands!");
889     return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
890                       Ops[0], Name, FPMathTag);
891   }
892   llvm_unreachable("Unexpected opcode!");
893 }
894 
895 CallInst *IRBuilderBase::CreateConstrainedFPCast(
896     Intrinsic::ID ID, Value *V, Type *DestTy,
897     Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag,
898     Optional<RoundingMode> Rounding,
899     Optional<fp::ExceptionBehavior> Except) {
900   Value *ExceptV = getConstrainedFPExcept(Except);
901 
902   FastMathFlags UseFMF = FMF;
903   if (FMFSource)
904     UseFMF = FMFSource->getFastMathFlags();
905 
906   CallInst *C;
907   bool HasRoundingMD = false;
908   switch (ID) {
909   default:
910     break;
911 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)        \
912   case Intrinsic::INTRINSIC:                                \
913     HasRoundingMD = ROUND_MODE;                             \
914     break;
915 #include "llvm/IR/ConstrainedOps.def"
916   }
917   if (HasRoundingMD) {
918     Value *RoundingV = getConstrainedFPRounding(Rounding);
919     C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
920                         nullptr, Name);
921   } else
922     C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
923                         Name);
924 
925   setConstrainedFPCallAttr(C);
926 
927   if (isa<FPMathOperator>(C))
928     setFPAttrs(C, FPMathTag, UseFMF);
929   return C;
930 }
931 
932 Value *IRBuilderBase::CreateFCmpHelper(
933     CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name,
934     MDNode *FPMathTag, bool IsSignaling) {
935   if (IsFPConstrained) {
936     auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
937                           : Intrinsic::experimental_constrained_fcmp;
938     return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
939   }
940 
941   if (auto *LC = dyn_cast<Constant>(LHS))
942     if (auto *RC = dyn_cast<Constant>(RHS))
943       return Insert(Folder.CreateFCmp(P, LC, RC), Name);
944   return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
945 }
946 
947 CallInst *IRBuilderBase::CreateConstrainedFPCmp(
948     Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
949     const Twine &Name, Optional<fp::ExceptionBehavior> Except) {
950   Value *PredicateV = getConstrainedFPPredicate(P);
951   Value *ExceptV = getConstrainedFPExcept(Except);
952 
953   CallInst *C = CreateIntrinsic(ID, {L->getType()},
954                                 {L, R, PredicateV, ExceptV}, nullptr, Name);
955   setConstrainedFPCallAttr(C);
956   return C;
957 }
958 
959 CallInst *IRBuilderBase::CreateConstrainedFPCall(
960     Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
961     Optional<RoundingMode> Rounding,
962     Optional<fp::ExceptionBehavior> Except) {
963   llvm::SmallVector<Value *, 6> UseArgs;
964 
965   append_range(UseArgs, Args);
966   bool HasRoundingMD = false;
967   switch (Callee->getIntrinsicID()) {
968   default:
969     break;
970 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)        \
971   case Intrinsic::INTRINSIC:                                \
972     HasRoundingMD = ROUND_MODE;                             \
973     break;
974 #include "llvm/IR/ConstrainedOps.def"
975   }
976   if (HasRoundingMD)
977     UseArgs.push_back(getConstrainedFPRounding(Rounding));
978   UseArgs.push_back(getConstrainedFPExcept(Except));
979 
980   CallInst *C = CreateCall(Callee, UseArgs, Name);
981   setConstrainedFPCallAttr(C);
982   return C;
983 }
984 
985 Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
986                                    const Twine &Name, Instruction *MDFrom) {
987   if (auto *CC = dyn_cast<Constant>(C))
988     if (auto *TC = dyn_cast<Constant>(True))
989       if (auto *FC = dyn_cast<Constant>(False))
990         return Insert(Folder.CreateSelect(CC, TC, FC), Name);
991 
992   SelectInst *Sel = SelectInst::Create(C, True, False);
993   if (MDFrom) {
994     MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
995     MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
996     Sel = addBranchMetadata(Sel, Prof, Unpred);
997   }
998   if (isa<FPMathOperator>(Sel))
999     setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
1000   return Insert(Sel, Name);
1001 }
1002 
1003 Value *IRBuilderBase::CreatePtrDiff(Value *LHS, Value *RHS,
1004                                     const Twine &Name) {
1005   assert(LHS->getType() == RHS->getType() &&
1006          "Pointer subtraction operand types must match!");
1007   auto *ArgType = cast<PointerType>(LHS->getType());
1008   Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
1009   Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
1010   Value *Difference = CreateSub(LHS_int, RHS_int);
1011   return CreateExactSDiv(Difference,
1012                          ConstantExpr::getSizeOf(ArgType->getElementType()),
1013                          Name);
1014 }
1015 
1016 Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
1017   assert(isa<PointerType>(Ptr->getType()) &&
1018          "launder.invariant.group only applies to pointers.");
1019   // FIXME: we could potentially avoid casts to/from i8*.
1020   auto *PtrType = Ptr->getType();
1021   auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
1022   if (PtrType != Int8PtrTy)
1023     Ptr = CreateBitCast(Ptr, Int8PtrTy);
1024   Module *M = BB->getParent()->getParent();
1025   Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
1026       M, Intrinsic::launder_invariant_group, {Int8PtrTy});
1027 
1028   assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
1029          FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
1030              Int8PtrTy &&
1031          "LaunderInvariantGroup should take and return the same type");
1032 
1033   CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
1034 
1035   if (PtrType != Int8PtrTy)
1036     return CreateBitCast(Fn, PtrType);
1037   return Fn;
1038 }
1039 
1040 Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
1041   assert(isa<PointerType>(Ptr->getType()) &&
1042          "strip.invariant.group only applies to pointers.");
1043 
1044   // FIXME: we could potentially avoid casts to/from i8*.
1045   auto *PtrType = Ptr->getType();
1046   auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
1047   if (PtrType != Int8PtrTy)
1048     Ptr = CreateBitCast(Ptr, Int8PtrTy);
1049   Module *M = BB->getParent()->getParent();
1050   Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
1051       M, Intrinsic::strip_invariant_group, {Int8PtrTy});
1052 
1053   assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
1054          FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
1055              Int8PtrTy &&
1056          "StripInvariantGroup should take and return the same type");
1057 
1058   CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
1059 
1060   if (PtrType != Int8PtrTy)
1061     return CreateBitCast(Fn, PtrType);
1062   return Fn;
1063 }
1064 
1065 Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) {
1066   auto *Ty = cast<VectorType>(V->getType());
1067   if (isa<ScalableVectorType>(Ty)) {
1068     Module *M = BB->getParent()->getParent();
1069     Function *F = Intrinsic::getDeclaration(
1070         M, Intrinsic::experimental_vector_reverse, Ty);
1071     return Insert(CallInst::Create(F, V), Name);
1072   }
1073   // Keep the original behaviour for fixed vector
1074   SmallVector<int, 8> ShuffleMask;
1075   int NumElts = Ty->getElementCount().getKnownMinValue();
1076   for (int i = 0; i < NumElts; ++i)
1077     ShuffleMask.push_back(NumElts - i - 1);
1078   return CreateShuffleVector(V, ShuffleMask, Name);
1079 }
1080 
1081 Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
1082                                          const Twine &Name) {
1083   assert(isa<VectorType>(V1->getType()) && "Unexpected type");
1084   assert(V1->getType() == V2->getType() &&
1085          "Splice expects matching operand types!");
1086 
1087   if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) {
1088     Module *M = BB->getParent()->getParent();
1089     Function *F = Intrinsic::getDeclaration(
1090         M, Intrinsic::experimental_vector_splice, VTy);
1091 
1092     Value *Ops[] = {V1, V2, getInt32(Imm)};
1093     return Insert(CallInst::Create(F, Ops), Name);
1094   }
1095 
1096   unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements();
1097   assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
1098          "Invalid immediate for vector splice!");
1099 
1100   // Keep the original behaviour for fixed vector
1101   unsigned Idx = (NumElts + Imm) % NumElts;
1102   SmallVector<int, 8> Mask;
1103   for (unsigned I = 0; I < NumElts; ++I)
1104     Mask.push_back(Idx + I);
1105 
1106   return CreateShuffleVector(V1, V2, Mask);
1107 }
1108 
1109 Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
1110                                         const Twine &Name) {
1111   auto EC = ElementCount::getFixed(NumElts);
1112   return CreateVectorSplat(EC, V, Name);
1113 }
1114 
1115 Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V,
1116                                         const Twine &Name) {
1117   assert(EC.isNonZero() && "Cannot splat to an empty vector!");
1118 
1119   // First insert it into a poison vector so we can shuffle it.
1120   Type *I32Ty = getInt32Ty();
1121   Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC));
1122   V = CreateInsertElement(Poison, V, ConstantInt::get(I32Ty, 0),
1123                           Name + ".splatinsert");
1124 
1125   // Shuffle the value across the desired number of elements.
1126   SmallVector<int, 16> Zeros;
1127   Zeros.resize(EC.getKnownMinValue());
1128   return CreateShuffleVector(V, Zeros, Name + ".splat");
1129 }
1130 
1131 Value *IRBuilderBase::CreateExtractInteger(
1132     const DataLayout &DL, Value *From, IntegerType *ExtractedTy,
1133     uint64_t Offset, const Twine &Name) {
1134   auto *IntTy = cast<IntegerType>(From->getType());
1135   assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
1136              DL.getTypeStoreSize(IntTy) &&
1137          "Element extends past full value");
1138   uint64_t ShAmt = 8 * Offset;
1139   Value *V = From;
1140   if (DL.isBigEndian())
1141     ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
1142                  DL.getTypeStoreSize(ExtractedTy) - Offset);
1143   if (ShAmt) {
1144     V = CreateLShr(V, ShAmt, Name + ".shift");
1145   }
1146   assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
1147          "Cannot extract to a larger integer!");
1148   if (ExtractedTy != IntTy) {
1149     V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
1150   }
1151   return V;
1152 }
1153 
1154 Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
1155     Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
1156     MDNode *DbgInfo) {
1157   auto *BaseType = Base->getType();
1158   assert(isa<PointerType>(BaseType) &&
1159          "Invalid Base ptr type for preserve.array.access.index.");
1160   assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
1161          "Pointer element type mismatch");
1162 
1163   Value *LastIndexV = getInt32(LastIndex);
1164   Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1165   SmallVector<Value *, 4> IdxList(Dimension, Zero);
1166   IdxList.push_back(LastIndexV);
1167 
1168   Type *ResultType =
1169       GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList);
1170 
1171   Module *M = BB->getParent()->getParent();
1172   Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
1173       M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
1174 
1175   Value *DimV = getInt32(Dimension);
1176   CallInst *Fn =
1177       CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
1178   Fn->addParamAttr(
1179       0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
1180   if (DbgInfo)
1181     Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1182 
1183   return Fn;
1184 }
1185 
1186 Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
1187     Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
1188   assert(isa<PointerType>(Base->getType()) &&
1189          "Invalid Base ptr type for preserve.union.access.index.");
1190   auto *BaseType = Base->getType();
1191 
1192   Module *M = BB->getParent()->getParent();
1193   Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
1194       M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
1195 
1196   Value *DIIndex = getInt32(FieldIndex);
1197   CallInst *Fn =
1198       CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
1199   if (DbgInfo)
1200     Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1201 
1202   return Fn;
1203 }
1204 
1205 Value *IRBuilderBase::CreatePreserveStructAccessIndex(
1206     Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
1207     MDNode *DbgInfo) {
1208   auto *BaseType = Base->getType();
1209   assert(isa<PointerType>(BaseType) &&
1210          "Invalid Base ptr type for preserve.struct.access.index.");
1211   assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
1212          "Pointer element type mismatch");
1213 
1214   Value *GEPIndex = getInt32(Index);
1215   Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1216   Type *ResultType =
1217       GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex});
1218 
1219   Module *M = BB->getParent()->getParent();
1220   Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
1221       M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
1222 
1223   Value *DIIndex = getInt32(FieldIndex);
1224   CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
1225                             {Base, GEPIndex, DIIndex});
1226   Fn->addParamAttr(
1227       0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
1228   if (DbgInfo)
1229     Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1230 
1231   return Fn;
1232 }
1233 
1234 CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
1235                                                          Value *PtrValue,
1236                                                          Value *AlignValue,
1237                                                          Value *OffsetValue) {
1238   SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
1239   if (OffsetValue)
1240     Vals.push_back(OffsetValue);
1241   OperandBundleDefT<Value *> AlignOpB("align", Vals);
1242   return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
1243 }
1244 
1245 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
1246                                                    Value *PtrValue,
1247                                                    unsigned Alignment,
1248                                                    Value *OffsetValue) {
1249   assert(isa<PointerType>(PtrValue->getType()) &&
1250          "trying to create an alignment assumption on a non-pointer?");
1251   assert(Alignment != 0 && "Invalid Alignment");
1252   auto *PtrTy = cast<PointerType>(PtrValue->getType());
1253   Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1254   Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
1255   return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
1256 }
1257 
1258 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
1259                                                    Value *PtrValue,
1260                                                    Value *Alignment,
1261                                                    Value *OffsetValue) {
1262   assert(isa<PointerType>(PtrValue->getType()) &&
1263          "trying to create an alignment assumption on a non-pointer?");
1264   return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
1265 }
1266 
1267 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {}
1268 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() {}
1269 IRBuilderFolder::~IRBuilderFolder() {}
1270 void ConstantFolder::anchor() {}
1271 void NoFolder::anchor() {}
1272