1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the IRBuilder class, which is used as a convenient way 10 // to create LLVM instructions with a consistent and simplified interface. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/IR/IRBuilder.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/IR/Constant.h" 18 #include "llvm/IR/Constants.h" 19 #include "llvm/IR/DerivedTypes.h" 20 #include "llvm/IR/Function.h" 21 #include "llvm/IR/GlobalValue.h" 22 #include "llvm/IR/GlobalVariable.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/IR/Intrinsics.h" 25 #include "llvm/IR/LLVMContext.h" 26 #include "llvm/IR/NoFolder.h" 27 #include "llvm/IR/Operator.h" 28 #include "llvm/IR/Statepoint.h" 29 #include "llvm/IR/Type.h" 30 #include "llvm/IR/Value.h" 31 #include "llvm/Support/Casting.h" 32 #include "llvm/Support/MathExtras.h" 33 #include <cassert> 34 #include <cstdint> 35 #include <vector> 36 37 using namespace llvm; 38 39 /// CreateGlobalString - Make a new global variable with an initializer that 40 /// has array of i8 type filled in with the nul terminated string value 41 /// specified. If Name is specified, it is the name of the global variable 42 /// created. 43 GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str, 44 const Twine &Name, 45 unsigned AddressSpace, 46 Module *M) { 47 Constant *StrConstant = ConstantDataArray::getString(Context, Str); 48 if (!M) 49 M = BB->getParent()->getParent(); 50 auto *GV = new GlobalVariable( 51 *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage, 52 StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace); 53 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 54 GV->setAlignment(Align(1)); 55 return GV; 56 } 57 58 Type *IRBuilderBase::getCurrentFunctionReturnType() const { 59 assert(BB && BB->getParent() && "No current function!"); 60 return BB->getParent()->getReturnType(); 61 } 62 63 Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) { 64 auto *PT = cast<PointerType>(Ptr->getType()); 65 if (PT->isOpaqueOrPointeeTypeMatches(getInt8Ty())) 66 return Ptr; 67 68 // Otherwise, we need to insert a bitcast. 69 return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace())); 70 } 71 72 static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops, 73 IRBuilderBase *Builder, 74 const Twine &Name = "", 75 Instruction *FMFSource = nullptr, 76 ArrayRef<OperandBundleDef> OpBundles = {}) { 77 CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name); 78 if (FMFSource) 79 CI->copyFastMathFlags(FMFSource); 80 return CI; 81 } 82 83 Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) { 84 assert(isa<ConstantInt>(Scaling) && "Expected constant integer"); 85 if (cast<ConstantInt>(Scaling)->isZero()) 86 return Scaling; 87 Module *M = GetInsertBlock()->getParent()->getParent(); 88 Function *TheFn = 89 Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()}); 90 CallInst *CI = createCallHelper(TheFn, {}, this, Name); 91 return cast<ConstantInt>(Scaling)->getSExtValue() == 1 92 ? CI 93 : CreateMul(CI, Scaling); 94 } 95 96 Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) { 97 if (isa<ScalableVectorType>(DstType)) 98 return CreateIntrinsic(Intrinsic::experimental_stepvector, {DstType}, {}, 99 nullptr, Name); 100 101 Type *STy = DstType->getScalarType(); 102 unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements(); 103 104 // Create a vector of consecutive numbers from zero to VF. 105 SmallVector<Constant *, 8> Indices; 106 for (unsigned i = 0; i < NumEls; ++i) 107 Indices.push_back(ConstantInt::get(STy, i)); 108 109 // Add the consecutive indices to the vector value. 110 return ConstantVector::get(Indices); 111 } 112 113 CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size, 114 MaybeAlign Align, bool isVolatile, 115 MDNode *TBAATag, MDNode *ScopeTag, 116 MDNode *NoAliasTag) { 117 Ptr = getCastedInt8PtrValue(Ptr); 118 Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)}; 119 Type *Tys[] = { Ptr->getType(), Size->getType() }; 120 Module *M = BB->getParent()->getParent(); 121 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); 122 123 CallInst *CI = createCallHelper(TheFn, Ops, this); 124 125 if (Align) 126 cast<MemSetInst>(CI)->setDestAlignment(Align->value()); 127 128 // Set the TBAA info if present. 129 if (TBAATag) 130 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 131 132 if (ScopeTag) 133 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 134 135 if (NoAliasTag) 136 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 137 138 return CI; 139 } 140 141 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet( 142 Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize, 143 MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) { 144 145 Ptr = getCastedInt8PtrValue(Ptr); 146 Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)}; 147 Type *Tys[] = {Ptr->getType(), Size->getType()}; 148 Module *M = BB->getParent()->getParent(); 149 Function *TheFn = Intrinsic::getDeclaration( 150 M, Intrinsic::memset_element_unordered_atomic, Tys); 151 152 CallInst *CI = createCallHelper(TheFn, Ops, this); 153 154 cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment); 155 156 // Set the TBAA info if present. 157 if (TBAATag) 158 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 159 160 if (ScopeTag) 161 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 162 163 if (NoAliasTag) 164 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 165 166 return CI; 167 } 168 169 CallInst *IRBuilderBase::CreateMemTransferInst( 170 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, 171 MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag, 172 MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) { 173 Dst = getCastedInt8PtrValue(Dst); 174 Src = getCastedInt8PtrValue(Src); 175 176 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)}; 177 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() }; 178 Module *M = BB->getParent()->getParent(); 179 Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys); 180 181 CallInst *CI = createCallHelper(TheFn, Ops, this); 182 183 auto* MCI = cast<MemTransferInst>(CI); 184 if (DstAlign) 185 MCI->setDestAlignment(*DstAlign); 186 if (SrcAlign) 187 MCI->setSourceAlignment(*SrcAlign); 188 189 // Set the TBAA info if present. 190 if (TBAATag) 191 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 192 193 // Set the TBAA Struct info if present. 194 if (TBAAStructTag) 195 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 196 197 if (ScopeTag) 198 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 199 200 if (NoAliasTag) 201 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 202 203 return CI; 204 } 205 206 CallInst *IRBuilderBase::CreateMemCpyInline( 207 Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, 208 Value *Size, bool IsVolatile, MDNode *TBAATag, MDNode *TBAAStructTag, 209 MDNode *ScopeTag, MDNode *NoAliasTag) { 210 Dst = getCastedInt8PtrValue(Dst); 211 Src = getCastedInt8PtrValue(Src); 212 213 Value *Ops[] = {Dst, Src, Size, getInt1(IsVolatile)}; 214 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 215 Function *F = BB->getParent(); 216 Module *M = F->getParent(); 217 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys); 218 219 CallInst *CI = createCallHelper(TheFn, Ops, this); 220 221 auto *MCI = cast<MemCpyInlineInst>(CI); 222 if (DstAlign) 223 MCI->setDestAlignment(*DstAlign); 224 if (SrcAlign) 225 MCI->setSourceAlignment(*SrcAlign); 226 227 // Set the TBAA info if present. 228 if (TBAATag) 229 MCI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 230 231 // Set the TBAA Struct info if present. 232 if (TBAAStructTag) 233 MCI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 234 235 if (ScopeTag) 236 MCI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 237 238 if (NoAliasTag) 239 MCI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 240 241 return CI; 242 } 243 244 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy( 245 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, 246 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag, 247 MDNode *ScopeTag, MDNode *NoAliasTag) { 248 assert(DstAlign >= ElementSize && 249 "Pointer alignment must be at least element size"); 250 assert(SrcAlign >= ElementSize && 251 "Pointer alignment must be at least element size"); 252 Dst = getCastedInt8PtrValue(Dst); 253 Src = getCastedInt8PtrValue(Src); 254 255 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)}; 256 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 257 Module *M = BB->getParent()->getParent(); 258 Function *TheFn = Intrinsic::getDeclaration( 259 M, Intrinsic::memcpy_element_unordered_atomic, Tys); 260 261 CallInst *CI = createCallHelper(TheFn, Ops, this); 262 263 // Set the alignment of the pointer args. 264 auto *AMCI = cast<AtomicMemCpyInst>(CI); 265 AMCI->setDestAlignment(DstAlign); 266 AMCI->setSourceAlignment(SrcAlign); 267 268 // Set the TBAA info if present. 269 if (TBAATag) 270 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 271 272 // Set the TBAA Struct info if present. 273 if (TBAAStructTag) 274 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 275 276 if (ScopeTag) 277 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 278 279 if (NoAliasTag) 280 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 281 282 return CI; 283 } 284 285 CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign, 286 Value *Src, MaybeAlign SrcAlign, 287 Value *Size, bool isVolatile, 288 MDNode *TBAATag, MDNode *ScopeTag, 289 MDNode *NoAliasTag) { 290 Dst = getCastedInt8PtrValue(Dst); 291 Src = getCastedInt8PtrValue(Src); 292 293 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)}; 294 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() }; 295 Module *M = BB->getParent()->getParent(); 296 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys); 297 298 CallInst *CI = createCallHelper(TheFn, Ops, this); 299 300 auto *MMI = cast<MemMoveInst>(CI); 301 if (DstAlign) 302 MMI->setDestAlignment(*DstAlign); 303 if (SrcAlign) 304 MMI->setSourceAlignment(*SrcAlign); 305 306 // Set the TBAA info if present. 307 if (TBAATag) 308 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 309 310 if (ScopeTag) 311 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 312 313 if (NoAliasTag) 314 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 315 316 return CI; 317 } 318 319 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove( 320 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, 321 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag, 322 MDNode *ScopeTag, MDNode *NoAliasTag) { 323 assert(DstAlign >= ElementSize && 324 "Pointer alignment must be at least element size"); 325 assert(SrcAlign >= ElementSize && 326 "Pointer alignment must be at least element size"); 327 Dst = getCastedInt8PtrValue(Dst); 328 Src = getCastedInt8PtrValue(Src); 329 330 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)}; 331 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 332 Module *M = BB->getParent()->getParent(); 333 Function *TheFn = Intrinsic::getDeclaration( 334 M, Intrinsic::memmove_element_unordered_atomic, Tys); 335 336 CallInst *CI = createCallHelper(TheFn, Ops, this); 337 338 // Set the alignment of the pointer args. 339 CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign)); 340 CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign)); 341 342 // Set the TBAA info if present. 343 if (TBAATag) 344 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 345 346 // Set the TBAA Struct info if present. 347 if (TBAAStructTag) 348 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 349 350 if (ScopeTag) 351 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 352 353 if (NoAliasTag) 354 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 355 356 return CI; 357 } 358 359 static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID, 360 Value *Src) { 361 Module *M = Builder->GetInsertBlock()->getParent()->getParent(); 362 Value *Ops[] = {Src}; 363 Type *Tys[] = { Src->getType() }; 364 auto Decl = Intrinsic::getDeclaration(M, ID, Tys); 365 return createCallHelper(Decl, Ops, Builder); 366 } 367 368 CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) { 369 Module *M = GetInsertBlock()->getParent()->getParent(); 370 Value *Ops[] = {Acc, Src}; 371 auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd, 372 {Src->getType()}); 373 return createCallHelper(Decl, Ops, this); 374 } 375 376 CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) { 377 Module *M = GetInsertBlock()->getParent()->getParent(); 378 Value *Ops[] = {Acc, Src}; 379 auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul, 380 {Src->getType()}); 381 return createCallHelper(Decl, Ops, this); 382 } 383 384 CallInst *IRBuilderBase::CreateAddReduce(Value *Src) { 385 return getReductionIntrinsic(this, Intrinsic::vector_reduce_add, Src); 386 } 387 388 CallInst *IRBuilderBase::CreateMulReduce(Value *Src) { 389 return getReductionIntrinsic(this, Intrinsic::vector_reduce_mul, Src); 390 } 391 392 CallInst *IRBuilderBase::CreateAndReduce(Value *Src) { 393 return getReductionIntrinsic(this, Intrinsic::vector_reduce_and, Src); 394 } 395 396 CallInst *IRBuilderBase::CreateOrReduce(Value *Src) { 397 return getReductionIntrinsic(this, Intrinsic::vector_reduce_or, Src); 398 } 399 400 CallInst *IRBuilderBase::CreateXorReduce(Value *Src) { 401 return getReductionIntrinsic(this, Intrinsic::vector_reduce_xor, Src); 402 } 403 404 CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) { 405 auto ID = 406 IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax; 407 return getReductionIntrinsic(this, ID, Src); 408 } 409 410 CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) { 411 auto ID = 412 IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin; 413 return getReductionIntrinsic(this, ID, Src); 414 } 415 416 CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) { 417 return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmax, Src); 418 } 419 420 CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) { 421 return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmin, Src); 422 } 423 424 CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) { 425 assert(isa<PointerType>(Ptr->getType()) && 426 "lifetime.start only applies to pointers."); 427 Ptr = getCastedInt8PtrValue(Ptr); 428 if (!Size) 429 Size = getInt64(-1); 430 else 431 assert(Size->getType() == getInt64Ty() && 432 "lifetime.start requires the size to be an i64"); 433 Value *Ops[] = { Size, Ptr }; 434 Module *M = BB->getParent()->getParent(); 435 Function *TheFn = 436 Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()}); 437 return createCallHelper(TheFn, Ops, this); 438 } 439 440 CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) { 441 assert(isa<PointerType>(Ptr->getType()) && 442 "lifetime.end only applies to pointers."); 443 Ptr = getCastedInt8PtrValue(Ptr); 444 if (!Size) 445 Size = getInt64(-1); 446 else 447 assert(Size->getType() == getInt64Ty() && 448 "lifetime.end requires the size to be an i64"); 449 Value *Ops[] = { Size, Ptr }; 450 Module *M = BB->getParent()->getParent(); 451 Function *TheFn = 452 Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()}); 453 return createCallHelper(TheFn, Ops, this); 454 } 455 456 CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) { 457 458 assert(isa<PointerType>(Ptr->getType()) && 459 "invariant.start only applies to pointers."); 460 Ptr = getCastedInt8PtrValue(Ptr); 461 if (!Size) 462 Size = getInt64(-1); 463 else 464 assert(Size->getType() == getInt64Ty() && 465 "invariant.start requires the size to be an i64"); 466 467 Value *Ops[] = {Size, Ptr}; 468 // Fill in the single overloaded type: memory object type. 469 Type *ObjectPtr[1] = {Ptr->getType()}; 470 Module *M = BB->getParent()->getParent(); 471 Function *TheFn = 472 Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr); 473 return createCallHelper(TheFn, Ops, this); 474 } 475 476 CallInst * 477 IRBuilderBase::CreateAssumption(Value *Cond, 478 ArrayRef<OperandBundleDef> OpBundles) { 479 assert(Cond->getType() == getInt1Ty() && 480 "an assumption condition must be of type i1"); 481 482 Value *Ops[] = { Cond }; 483 Module *M = BB->getParent()->getParent(); 484 Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume); 485 return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles); 486 } 487 488 Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) { 489 Module *M = BB->getModule(); 490 auto *FnIntrinsic = Intrinsic::getDeclaration( 491 M, Intrinsic::experimental_noalias_scope_decl, {}); 492 return createCallHelper(FnIntrinsic, {Scope}, this); 493 } 494 495 /// Create a call to a Masked Load intrinsic. 496 /// \p Ty - vector type to load 497 /// \p Ptr - base pointer for the load 498 /// \p Alignment - alignment of the source location 499 /// \p Mask - vector of booleans which indicates what vector lanes should 500 /// be accessed in memory 501 /// \p PassThru - pass-through value that is used to fill the masked-off lanes 502 /// of the result 503 /// \p Name - name of the result variable 504 CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, 505 Value *Mask, Value *PassThru, 506 const Twine &Name) { 507 auto *PtrTy = cast<PointerType>(Ptr->getType()); 508 assert(Ty->isVectorTy() && "Type should be vector"); 509 assert(PtrTy->isOpaqueOrPointeeTypeMatches(Ty) && "Wrong element type"); 510 assert(Mask && "Mask should not be all-ones (null)"); 511 if (!PassThru) 512 PassThru = UndefValue::get(Ty); 513 Type *OverloadedTypes[] = { Ty, PtrTy }; 514 Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru}; 515 return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops, 516 OverloadedTypes, Name); 517 } 518 519 /// Create a call to a Masked Store intrinsic. 520 /// \p Val - data to be stored, 521 /// \p Ptr - base pointer for the store 522 /// \p Alignment - alignment of the destination location 523 /// \p Mask - vector of booleans which indicates what vector lanes should 524 /// be accessed in memory 525 CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr, 526 Align Alignment, Value *Mask) { 527 auto *PtrTy = cast<PointerType>(Ptr->getType()); 528 Type *DataTy = Val->getType(); 529 assert(DataTy->isVectorTy() && "Val should be a vector"); 530 assert(PtrTy->isOpaqueOrPointeeTypeMatches(DataTy) && "Wrong element type"); 531 assert(Mask && "Mask should not be all-ones (null)"); 532 Type *OverloadedTypes[] = { DataTy, PtrTy }; 533 Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask}; 534 return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes); 535 } 536 537 /// Create a call to a Masked intrinsic, with given intrinsic Id, 538 /// an array of operands - Ops, and an array of overloaded types - 539 /// OverloadedTypes. 540 CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id, 541 ArrayRef<Value *> Ops, 542 ArrayRef<Type *> OverloadedTypes, 543 const Twine &Name) { 544 Module *M = BB->getParent()->getParent(); 545 Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes); 546 return createCallHelper(TheFn, Ops, this, Name); 547 } 548 549 /// Create a call to a Masked Gather intrinsic. 550 /// \p Ty - vector type to gather 551 /// \p Ptrs - vector of pointers for loading 552 /// \p Align - alignment for one element 553 /// \p Mask - vector of booleans which indicates what vector lanes should 554 /// be accessed in memory 555 /// \p PassThru - pass-through value that is used to fill the masked-off lanes 556 /// of the result 557 /// \p Name - name of the result variable 558 CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs, 559 Align Alignment, Value *Mask, 560 Value *PassThru, 561 const Twine &Name) { 562 auto *VecTy = cast<VectorType>(Ty); 563 ElementCount NumElts = VecTy->getElementCount(); 564 auto *PtrsTy = cast<VectorType>(Ptrs->getType()); 565 assert(cast<PointerType>(PtrsTy->getElementType()) 566 ->isOpaqueOrPointeeTypeMatches( 567 cast<VectorType>(Ty)->getElementType()) && 568 "Element type mismatch"); 569 assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch"); 570 571 if (!Mask) 572 Mask = Constant::getAllOnesValue( 573 VectorType::get(Type::getInt1Ty(Context), NumElts)); 574 575 if (!PassThru) 576 PassThru = UndefValue::get(Ty); 577 578 Type *OverloadedTypes[] = {Ty, PtrsTy}; 579 Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru}; 580 581 // We specify only one type when we create this intrinsic. Types of other 582 // arguments are derived from this type. 583 return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes, 584 Name); 585 } 586 587 /// Create a call to a Masked Scatter intrinsic. 588 /// \p Data - data to be stored, 589 /// \p Ptrs - the vector of pointers, where the \p Data elements should be 590 /// stored 591 /// \p Align - alignment for one element 592 /// \p Mask - vector of booleans which indicates what vector lanes should 593 /// be accessed in memory 594 CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs, 595 Align Alignment, Value *Mask) { 596 auto *PtrsTy = cast<VectorType>(Ptrs->getType()); 597 auto *DataTy = cast<VectorType>(Data->getType()); 598 ElementCount NumElts = PtrsTy->getElementCount(); 599 600 #ifndef NDEBUG 601 auto *PtrTy = cast<PointerType>(PtrsTy->getElementType()); 602 assert(NumElts == DataTy->getElementCount() && 603 PtrTy->isOpaqueOrPointeeTypeMatches(DataTy->getElementType()) && 604 "Incompatible pointer and data types"); 605 #endif 606 607 if (!Mask) 608 Mask = Constant::getAllOnesValue( 609 VectorType::get(Type::getInt1Ty(Context), NumElts)); 610 611 Type *OverloadedTypes[] = {DataTy, PtrsTy}; 612 Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask}; 613 614 // We specify only one type when we create this intrinsic. Types of other 615 // arguments are derived from this type. 616 return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes); 617 } 618 619 template <typename T0> 620 static std::vector<Value *> 621 getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes, 622 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) { 623 std::vector<Value *> Args; 624 Args.push_back(B.getInt64(ID)); 625 Args.push_back(B.getInt32(NumPatchBytes)); 626 Args.push_back(ActualCallee); 627 Args.push_back(B.getInt32(CallArgs.size())); 628 Args.push_back(B.getInt32(Flags)); 629 llvm::append_range(Args, CallArgs); 630 // GC Transition and Deopt args are now always handled via operand bundle. 631 // They will be removed from the signature of gc.statepoint shortly. 632 Args.push_back(B.getInt32(0)); 633 Args.push_back(B.getInt32(0)); 634 // GC args are now encoded in the gc-live operand bundle 635 return Args; 636 } 637 638 template<typename T1, typename T2, typename T3> 639 static std::vector<OperandBundleDef> 640 getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs, 641 Optional<ArrayRef<T2>> DeoptArgs, 642 ArrayRef<T3> GCArgs) { 643 std::vector<OperandBundleDef> Rval; 644 if (DeoptArgs) { 645 SmallVector<Value*, 16> DeoptValues; 646 llvm::append_range(DeoptValues, *DeoptArgs); 647 Rval.emplace_back("deopt", DeoptValues); 648 } 649 if (TransitionArgs) { 650 SmallVector<Value*, 16> TransitionValues; 651 llvm::append_range(TransitionValues, *TransitionArgs); 652 Rval.emplace_back("gc-transition", TransitionValues); 653 } 654 if (GCArgs.size()) { 655 SmallVector<Value*, 16> LiveValues; 656 llvm::append_range(LiveValues, GCArgs); 657 Rval.emplace_back("gc-live", LiveValues); 658 } 659 return Rval; 660 } 661 662 template <typename T0, typename T1, typename T2, typename T3> 663 static CallInst *CreateGCStatepointCallCommon( 664 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, 665 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs, 666 Optional<ArrayRef<T1>> TransitionArgs, 667 Optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs, 668 const Twine &Name) { 669 // Extract out the type of the callee. 670 auto *FuncPtrType = cast<PointerType>(ActualCallee->getType()); 671 assert(isa<FunctionType>(FuncPtrType->getElementType()) && 672 "actual callee must be a callable value"); 673 674 Module *M = Builder->GetInsertBlock()->getParent()->getParent(); 675 // Fill in the one generic type'd argument (the function is also vararg) 676 Type *ArgTypes[] = { FuncPtrType }; 677 Function *FnStatepoint = 678 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint, 679 ArgTypes); 680 681 std::vector<Value *> Args = 682 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualCallee, Flags, 683 CallArgs); 684 685 return Builder->CreateCall(FnStatepoint, Args, 686 getStatepointBundles(TransitionArgs, DeoptArgs, 687 GCArgs), 688 Name); 689 } 690 691 CallInst *IRBuilderBase::CreateGCStatepointCall( 692 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, 693 ArrayRef<Value *> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs, 694 ArrayRef<Value *> GCArgs, const Twine &Name) { 695 return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>( 696 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None), 697 CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name); 698 } 699 700 CallInst *IRBuilderBase::CreateGCStatepointCall( 701 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags, 702 ArrayRef<Value *> CallArgs, Optional<ArrayRef<Use>> TransitionArgs, 703 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, 704 const Twine &Name) { 705 return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>( 706 this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs, 707 DeoptArgs, GCArgs, Name); 708 } 709 710 CallInst *IRBuilderBase::CreateGCStatepointCall( 711 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, 712 ArrayRef<Use> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs, 713 ArrayRef<Value *> GCArgs, const Twine &Name) { 714 return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>( 715 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None), 716 CallArgs, None, DeoptArgs, GCArgs, Name); 717 } 718 719 template <typename T0, typename T1, typename T2, typename T3> 720 static InvokeInst *CreateGCStatepointInvokeCommon( 721 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, 722 Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, 723 uint32_t Flags, ArrayRef<T0> InvokeArgs, 724 Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs, 725 ArrayRef<T3> GCArgs, const Twine &Name) { 726 // Extract out the type of the callee. 727 auto *FuncPtrType = cast<PointerType>(ActualInvokee->getType()); 728 assert(isa<FunctionType>(FuncPtrType->getElementType()) && 729 "actual callee must be a callable value"); 730 731 Module *M = Builder->GetInsertBlock()->getParent()->getParent(); 732 // Fill in the one generic type'd argument (the function is also vararg) 733 Function *FnStatepoint = Intrinsic::getDeclaration( 734 M, Intrinsic::experimental_gc_statepoint, {FuncPtrType}); 735 736 std::vector<Value *> Args = 737 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee, Flags, 738 InvokeArgs); 739 740 return Builder->CreateInvoke(FnStatepoint, NormalDest, UnwindDest, Args, 741 getStatepointBundles(TransitionArgs, DeoptArgs, 742 GCArgs), 743 Name); 744 } 745 746 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 747 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee, 748 BasicBlock *NormalDest, BasicBlock *UnwindDest, 749 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Value *>> DeoptArgs, 750 ArrayRef<Value *> GCArgs, const Twine &Name) { 751 return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>( 752 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, 753 uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/, 754 DeoptArgs, GCArgs, Name); 755 } 756 757 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 758 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee, 759 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, 760 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs, 761 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) { 762 return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>( 763 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags, 764 InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name); 765 } 766 767 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 768 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee, 769 BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, 770 Optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) { 771 return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>( 772 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, 773 uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs, 774 Name); 775 } 776 777 CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint, 778 Type *ResultType, 779 const Twine &Name) { 780 Intrinsic::ID ID = Intrinsic::experimental_gc_result; 781 Module *M = BB->getParent()->getParent(); 782 Type *Types[] = {ResultType}; 783 Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types); 784 785 Value *Args[] = {Statepoint}; 786 return createCallHelper(FnGCResult, Args, this, Name); 787 } 788 789 CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint, 790 int BaseOffset, 791 int DerivedOffset, 792 Type *ResultType, 793 const Twine &Name) { 794 Module *M = BB->getParent()->getParent(); 795 Type *Types[] = {ResultType}; 796 Function *FnGCRelocate = 797 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types); 798 799 Value *Args[] = {Statepoint, 800 getInt32(BaseOffset), 801 getInt32(DerivedOffset)}; 802 return createCallHelper(FnGCRelocate, Args, this, Name); 803 } 804 805 CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr, 806 const Twine &Name) { 807 Module *M = BB->getParent()->getParent(); 808 Type *PtrTy = DerivedPtr->getType(); 809 Function *FnGCFindBase = Intrinsic::getDeclaration( 810 M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy}); 811 return createCallHelper(FnGCFindBase, {DerivedPtr}, this, Name); 812 } 813 814 CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr, 815 const Twine &Name) { 816 Module *M = BB->getParent()->getParent(); 817 Type *PtrTy = DerivedPtr->getType(); 818 Function *FnGCGetOffset = Intrinsic::getDeclaration( 819 M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy}); 820 return createCallHelper(FnGCGetOffset, {DerivedPtr}, this, Name); 821 } 822 823 CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, 824 Instruction *FMFSource, 825 const Twine &Name) { 826 Module *M = BB->getModule(); 827 Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()}); 828 return createCallHelper(Fn, {V}, this, Name, FMFSource); 829 } 830 831 CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, 832 Value *RHS, 833 Instruction *FMFSource, 834 const Twine &Name) { 835 Module *M = BB->getModule(); 836 Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() }); 837 return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource); 838 } 839 840 CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID, 841 ArrayRef<Type *> Types, 842 ArrayRef<Value *> Args, 843 Instruction *FMFSource, 844 const Twine &Name) { 845 Module *M = BB->getModule(); 846 Function *Fn = Intrinsic::getDeclaration(M, ID, Types); 847 return createCallHelper(Fn, Args, this, Name, FMFSource); 848 } 849 850 CallInst *IRBuilderBase::CreateConstrainedFPBinOp( 851 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource, 852 const Twine &Name, MDNode *FPMathTag, 853 Optional<RoundingMode> Rounding, 854 Optional<fp::ExceptionBehavior> Except) { 855 Value *RoundingV = getConstrainedFPRounding(Rounding); 856 Value *ExceptV = getConstrainedFPExcept(Except); 857 858 FastMathFlags UseFMF = FMF; 859 if (FMFSource) 860 UseFMF = FMFSource->getFastMathFlags(); 861 862 CallInst *C = CreateIntrinsic(ID, {L->getType()}, 863 {L, R, RoundingV, ExceptV}, nullptr, Name); 864 setConstrainedFPCallAttr(C); 865 setFPAttrs(C, FPMathTag, UseFMF); 866 return C; 867 } 868 869 Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, 870 const Twine &Name, MDNode *FPMathTag) { 871 if (Instruction::isBinaryOp(Opc)) { 872 assert(Ops.size() == 2 && "Invalid number of operands!"); 873 return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc), 874 Ops[0], Ops[1], Name, FPMathTag); 875 } 876 if (Instruction::isUnaryOp(Opc)) { 877 assert(Ops.size() == 1 && "Invalid number of operands!"); 878 return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc), 879 Ops[0], Name, FPMathTag); 880 } 881 llvm_unreachable("Unexpected opcode!"); 882 } 883 884 CallInst *IRBuilderBase::CreateConstrainedFPCast( 885 Intrinsic::ID ID, Value *V, Type *DestTy, 886 Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag, 887 Optional<RoundingMode> Rounding, 888 Optional<fp::ExceptionBehavior> Except) { 889 Value *ExceptV = getConstrainedFPExcept(Except); 890 891 FastMathFlags UseFMF = FMF; 892 if (FMFSource) 893 UseFMF = FMFSource->getFastMathFlags(); 894 895 CallInst *C; 896 bool HasRoundingMD = false; 897 switch (ID) { 898 default: 899 break; 900 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 901 case Intrinsic::INTRINSIC: \ 902 HasRoundingMD = ROUND_MODE; \ 903 break; 904 #include "llvm/IR/ConstrainedOps.def" 905 } 906 if (HasRoundingMD) { 907 Value *RoundingV = getConstrainedFPRounding(Rounding); 908 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV}, 909 nullptr, Name); 910 } else 911 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr, 912 Name); 913 914 setConstrainedFPCallAttr(C); 915 916 if (isa<FPMathOperator>(C)) 917 setFPAttrs(C, FPMathTag, UseFMF); 918 return C; 919 } 920 921 Value *IRBuilderBase::CreateFCmpHelper( 922 CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name, 923 MDNode *FPMathTag, bool IsSignaling) { 924 if (IsFPConstrained) { 925 auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps 926 : Intrinsic::experimental_constrained_fcmp; 927 return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name); 928 } 929 930 if (auto *LC = dyn_cast<Constant>(LHS)) 931 if (auto *RC = dyn_cast<Constant>(RHS)) 932 return Insert(Folder.CreateFCmp(P, LC, RC), Name); 933 return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name); 934 } 935 936 CallInst *IRBuilderBase::CreateConstrainedFPCmp( 937 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, 938 const Twine &Name, Optional<fp::ExceptionBehavior> Except) { 939 Value *PredicateV = getConstrainedFPPredicate(P); 940 Value *ExceptV = getConstrainedFPExcept(Except); 941 942 CallInst *C = CreateIntrinsic(ID, {L->getType()}, 943 {L, R, PredicateV, ExceptV}, nullptr, Name); 944 setConstrainedFPCallAttr(C); 945 return C; 946 } 947 948 CallInst *IRBuilderBase::CreateConstrainedFPCall( 949 Function *Callee, ArrayRef<Value *> Args, const Twine &Name, 950 Optional<RoundingMode> Rounding, 951 Optional<fp::ExceptionBehavior> Except) { 952 llvm::SmallVector<Value *, 6> UseArgs; 953 954 append_range(UseArgs, Args); 955 bool HasRoundingMD = false; 956 switch (Callee->getIntrinsicID()) { 957 default: 958 break; 959 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 960 case Intrinsic::INTRINSIC: \ 961 HasRoundingMD = ROUND_MODE; \ 962 break; 963 #include "llvm/IR/ConstrainedOps.def" 964 } 965 if (HasRoundingMD) 966 UseArgs.push_back(getConstrainedFPRounding(Rounding)); 967 UseArgs.push_back(getConstrainedFPExcept(Except)); 968 969 CallInst *C = CreateCall(Callee, UseArgs, Name); 970 setConstrainedFPCallAttr(C); 971 return C; 972 } 973 974 Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False, 975 const Twine &Name, Instruction *MDFrom) { 976 if (auto *CC = dyn_cast<Constant>(C)) 977 if (auto *TC = dyn_cast<Constant>(True)) 978 if (auto *FC = dyn_cast<Constant>(False)) 979 return Insert(Folder.CreateSelect(CC, TC, FC), Name); 980 981 SelectInst *Sel = SelectInst::Create(C, True, False); 982 if (MDFrom) { 983 MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof); 984 MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable); 985 Sel = addBranchMetadata(Sel, Prof, Unpred); 986 } 987 if (isa<FPMathOperator>(Sel)) 988 setFPAttrs(Sel, nullptr /* MDNode* */, FMF); 989 return Insert(Sel, Name); 990 } 991 992 Value *IRBuilderBase::CreatePtrDiff(Value *LHS, Value *RHS, 993 const Twine &Name) { 994 assert(LHS->getType() == RHS->getType() && 995 "Pointer subtraction operand types must match!"); 996 auto *ArgType = cast<PointerType>(LHS->getType()); 997 Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context)); 998 Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context)); 999 Value *Difference = CreateSub(LHS_int, RHS_int); 1000 return CreateExactSDiv(Difference, 1001 ConstantExpr::getSizeOf(ArgType->getElementType()), 1002 Name); 1003 } 1004 1005 Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) { 1006 assert(isa<PointerType>(Ptr->getType()) && 1007 "launder.invariant.group only applies to pointers."); 1008 // FIXME: we could potentially avoid casts to/from i8*. 1009 auto *PtrType = Ptr->getType(); 1010 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace()); 1011 if (PtrType != Int8PtrTy) 1012 Ptr = CreateBitCast(Ptr, Int8PtrTy); 1013 Module *M = BB->getParent()->getParent(); 1014 Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration( 1015 M, Intrinsic::launder_invariant_group, {Int8PtrTy}); 1016 1017 assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy && 1018 FnLaunderInvariantGroup->getFunctionType()->getParamType(0) == 1019 Int8PtrTy && 1020 "LaunderInvariantGroup should take and return the same type"); 1021 1022 CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr}); 1023 1024 if (PtrType != Int8PtrTy) 1025 return CreateBitCast(Fn, PtrType); 1026 return Fn; 1027 } 1028 1029 Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) { 1030 assert(isa<PointerType>(Ptr->getType()) && 1031 "strip.invariant.group only applies to pointers."); 1032 1033 // FIXME: we could potentially avoid casts to/from i8*. 1034 auto *PtrType = Ptr->getType(); 1035 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace()); 1036 if (PtrType != Int8PtrTy) 1037 Ptr = CreateBitCast(Ptr, Int8PtrTy); 1038 Module *M = BB->getParent()->getParent(); 1039 Function *FnStripInvariantGroup = Intrinsic::getDeclaration( 1040 M, Intrinsic::strip_invariant_group, {Int8PtrTy}); 1041 1042 assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy && 1043 FnStripInvariantGroup->getFunctionType()->getParamType(0) == 1044 Int8PtrTy && 1045 "StripInvariantGroup should take and return the same type"); 1046 1047 CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr}); 1048 1049 if (PtrType != Int8PtrTy) 1050 return CreateBitCast(Fn, PtrType); 1051 return Fn; 1052 } 1053 1054 Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) { 1055 auto *Ty = cast<VectorType>(V->getType()); 1056 if (isa<ScalableVectorType>(Ty)) { 1057 Module *M = BB->getParent()->getParent(); 1058 Function *F = Intrinsic::getDeclaration( 1059 M, Intrinsic::experimental_vector_reverse, Ty); 1060 return Insert(CallInst::Create(F, V), Name); 1061 } 1062 // Keep the original behaviour for fixed vector 1063 SmallVector<int, 8> ShuffleMask; 1064 int NumElts = Ty->getElementCount().getKnownMinValue(); 1065 for (int i = 0; i < NumElts; ++i) 1066 ShuffleMask.push_back(NumElts - i - 1); 1067 return CreateShuffleVector(V, ShuffleMask, Name); 1068 } 1069 1070 Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, 1071 const Twine &Name) { 1072 assert(isa<VectorType>(V1->getType()) && "Unexpected type"); 1073 assert(V1->getType() == V2->getType() && 1074 "Splice expects matching operand types!"); 1075 1076 if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) { 1077 Module *M = BB->getParent()->getParent(); 1078 Function *F = Intrinsic::getDeclaration( 1079 M, Intrinsic::experimental_vector_splice, VTy); 1080 1081 Value *Ops[] = {V1, V2, getInt32(Imm)}; 1082 return Insert(CallInst::Create(F, Ops), Name); 1083 } 1084 1085 unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements(); 1086 assert(((-Imm <= NumElts) || (Imm < NumElts)) && 1087 "Invalid immediate for vector splice!"); 1088 1089 // Keep the original behaviour for fixed vector 1090 unsigned Idx = (NumElts + Imm) % NumElts; 1091 SmallVector<int, 8> Mask; 1092 for (unsigned I = 0; I < NumElts; ++I) 1093 Mask.push_back(Idx + I); 1094 1095 return CreateShuffleVector(V1, V2, Mask); 1096 } 1097 1098 Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V, 1099 const Twine &Name) { 1100 auto EC = ElementCount::getFixed(NumElts); 1101 return CreateVectorSplat(EC, V, Name); 1102 } 1103 1104 Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V, 1105 const Twine &Name) { 1106 assert(EC.isNonZero() && "Cannot splat to an empty vector!"); 1107 1108 // First insert it into a poison vector so we can shuffle it. 1109 Type *I32Ty = getInt32Ty(); 1110 Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC)); 1111 V = CreateInsertElement(Poison, V, ConstantInt::get(I32Ty, 0), 1112 Name + ".splatinsert"); 1113 1114 // Shuffle the value across the desired number of elements. 1115 SmallVector<int, 16> Zeros; 1116 Zeros.resize(EC.getKnownMinValue()); 1117 return CreateShuffleVector(V, Zeros, Name + ".splat"); 1118 } 1119 1120 Value *IRBuilderBase::CreateExtractInteger( 1121 const DataLayout &DL, Value *From, IntegerType *ExtractedTy, 1122 uint64_t Offset, const Twine &Name) { 1123 auto *IntTy = cast<IntegerType>(From->getType()); 1124 assert(DL.getTypeStoreSize(ExtractedTy) + Offset <= 1125 DL.getTypeStoreSize(IntTy) && 1126 "Element extends past full value"); 1127 uint64_t ShAmt = 8 * Offset; 1128 Value *V = From; 1129 if (DL.isBigEndian()) 1130 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - 1131 DL.getTypeStoreSize(ExtractedTy) - Offset); 1132 if (ShAmt) { 1133 V = CreateLShr(V, ShAmt, Name + ".shift"); 1134 } 1135 assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() && 1136 "Cannot extract to a larger integer!"); 1137 if (ExtractedTy != IntTy) { 1138 V = CreateTrunc(V, ExtractedTy, Name + ".trunc"); 1139 } 1140 return V; 1141 } 1142 1143 Value *IRBuilderBase::CreatePreserveArrayAccessIndex( 1144 Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex, 1145 MDNode *DbgInfo) { 1146 auto *BaseType = Base->getType(); 1147 assert(isa<PointerType>(BaseType) && 1148 "Invalid Base ptr type for preserve.array.access.index."); 1149 assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) && 1150 "Pointer element type mismatch"); 1151 1152 Value *LastIndexV = getInt32(LastIndex); 1153 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); 1154 SmallVector<Value *, 4> IdxList(Dimension, Zero); 1155 IdxList.push_back(LastIndexV); 1156 1157 Type *ResultType = 1158 GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList); 1159 1160 Module *M = BB->getParent()->getParent(); 1161 Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration( 1162 M, Intrinsic::preserve_array_access_index, {ResultType, BaseType}); 1163 1164 Value *DimV = getInt32(Dimension); 1165 CallInst *Fn = 1166 CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV}); 1167 Fn->addParamAttr( 1168 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy)); 1169 if (DbgInfo) 1170 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1171 1172 return Fn; 1173 } 1174 1175 Value *IRBuilderBase::CreatePreserveUnionAccessIndex( 1176 Value *Base, unsigned FieldIndex, MDNode *DbgInfo) { 1177 assert(isa<PointerType>(Base->getType()) && 1178 "Invalid Base ptr type for preserve.union.access.index."); 1179 auto *BaseType = Base->getType(); 1180 1181 Module *M = BB->getParent()->getParent(); 1182 Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration( 1183 M, Intrinsic::preserve_union_access_index, {BaseType, BaseType}); 1184 1185 Value *DIIndex = getInt32(FieldIndex); 1186 CallInst *Fn = 1187 CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex}); 1188 if (DbgInfo) 1189 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1190 1191 return Fn; 1192 } 1193 1194 Value *IRBuilderBase::CreatePreserveStructAccessIndex( 1195 Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex, 1196 MDNode *DbgInfo) { 1197 auto *BaseType = Base->getType(); 1198 assert(isa<PointerType>(BaseType) && 1199 "Invalid Base ptr type for preserve.struct.access.index."); 1200 assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) && 1201 "Pointer element type mismatch"); 1202 1203 Value *GEPIndex = getInt32(Index); 1204 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); 1205 Type *ResultType = 1206 GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex}); 1207 1208 Module *M = BB->getParent()->getParent(); 1209 Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration( 1210 M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType}); 1211 1212 Value *DIIndex = getInt32(FieldIndex); 1213 CallInst *Fn = CreateCall(FnPreserveStructAccessIndex, 1214 {Base, GEPIndex, DIIndex}); 1215 Fn->addParamAttr( 1216 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy)); 1217 if (DbgInfo) 1218 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1219 1220 return Fn; 1221 } 1222 1223 CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL, 1224 Value *PtrValue, 1225 Value *AlignValue, 1226 Value *OffsetValue) { 1227 SmallVector<Value *, 4> Vals({PtrValue, AlignValue}); 1228 if (OffsetValue) 1229 Vals.push_back(OffsetValue); 1230 OperandBundleDefT<Value *> AlignOpB("align", Vals); 1231 return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB}); 1232 } 1233 1234 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, 1235 Value *PtrValue, 1236 unsigned Alignment, 1237 Value *OffsetValue) { 1238 assert(isa<PointerType>(PtrValue->getType()) && 1239 "trying to create an alignment assumption on a non-pointer?"); 1240 assert(Alignment != 0 && "Invalid Alignment"); 1241 auto *PtrTy = cast<PointerType>(PtrValue->getType()); 1242 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace()); 1243 Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment); 1244 return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue); 1245 } 1246 1247 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, 1248 Value *PtrValue, 1249 Value *Alignment, 1250 Value *OffsetValue) { 1251 assert(isa<PointerType>(PtrValue->getType()) && 1252 "trying to create an alignment assumption on a non-pointer?"); 1253 return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue); 1254 } 1255 1256 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {} 1257 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() {} 1258 IRBuilderFolder::~IRBuilderFolder() {} 1259 void ConstantFolder::anchor() {} 1260 void NoFolder::anchor() {} 1261