1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the IRBuilder class, which is used as a convenient way 10 // to create LLVM instructions with a consistent and simplified interface. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/IR/IRBuilder.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/IR/Constant.h" 18 #include "llvm/IR/Constants.h" 19 #include "llvm/IR/DebugInfoMetadata.h" 20 #include "llvm/IR/DerivedTypes.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/GlobalValue.h" 23 #include "llvm/IR/GlobalVariable.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/IR/Intrinsics.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/IR/NoFolder.h" 28 #include "llvm/IR/Operator.h" 29 #include "llvm/IR/Statepoint.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/IR/Value.h" 32 #include "llvm/Support/Casting.h" 33 #include <cassert> 34 #include <cstdint> 35 #include <vector> 36 37 using namespace llvm; 38 39 /// CreateGlobalString - Make a new global variable with an initializer that 40 /// has array of i8 type filled in with the nul terminated string value 41 /// specified. If Name is specified, it is the name of the global variable 42 /// created. 43 GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str, 44 const Twine &Name, 45 unsigned AddressSpace, 46 Module *M) { 47 Constant *StrConstant = ConstantDataArray::getString(Context, Str); 48 if (!M) 49 M = BB->getParent()->getParent(); 50 auto *GV = new GlobalVariable( 51 *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage, 52 StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace); 53 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 54 GV->setAlignment(Align(1)); 55 return GV; 56 } 57 58 Type *IRBuilderBase::getCurrentFunctionReturnType() const { 59 assert(BB && BB->getParent() && "No current function!"); 60 return BB->getParent()->getReturnType(); 61 } 62 63 Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) { 64 auto *PT = cast<PointerType>(Ptr->getType()); 65 if (PT->isOpaqueOrPointeeTypeMatches(getInt8Ty())) 66 return Ptr; 67 68 // Otherwise, we need to insert a bitcast. 69 return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace())); 70 } 71 72 DebugLoc IRBuilderBase::getCurrentDebugLocation() const { 73 for (auto &KV : MetadataToCopy) 74 if (KV.first == LLVMContext::MD_dbg) 75 return {cast<DILocation>(KV.second)}; 76 77 return {}; 78 } 79 void IRBuilderBase::SetInstDebugLocation(Instruction *I) const { 80 for (const auto &KV : MetadataToCopy) 81 if (KV.first == LLVMContext::MD_dbg) { 82 I->setDebugLoc(DebugLoc(KV.second)); 83 return; 84 } 85 } 86 87 static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops, 88 IRBuilderBase *Builder, 89 const Twine &Name = "", 90 Instruction *FMFSource = nullptr, 91 ArrayRef<OperandBundleDef> OpBundles = {}) { 92 CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name); 93 if (FMFSource) 94 CI->copyFastMathFlags(FMFSource); 95 return CI; 96 } 97 98 Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) { 99 assert(isa<ConstantInt>(Scaling) && "Expected constant integer"); 100 if (cast<ConstantInt>(Scaling)->isZero()) 101 return Scaling; 102 Module *M = GetInsertBlock()->getParent()->getParent(); 103 Function *TheFn = 104 Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()}); 105 CallInst *CI = createCallHelper(TheFn, {}, this, Name); 106 return cast<ConstantInt>(Scaling)->getSExtValue() == 1 107 ? CI 108 : CreateMul(CI, Scaling); 109 } 110 111 Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) { 112 Type *STy = DstType->getScalarType(); 113 if (isa<ScalableVectorType>(DstType)) { 114 Type *StepVecType = DstType; 115 // TODO: We expect this special case (element type < 8 bits) to be 116 // temporary - once the intrinsic properly supports < 8 bits this code 117 // can be removed. 118 if (STy->getScalarSizeInBits() < 8) 119 StepVecType = 120 VectorType::get(getInt8Ty(), cast<ScalableVectorType>(DstType)); 121 Value *Res = CreateIntrinsic(Intrinsic::experimental_stepvector, 122 {StepVecType}, {}, nullptr, Name); 123 if (StepVecType != DstType) 124 Res = CreateTrunc(Res, DstType); 125 return Res; 126 } 127 128 unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements(); 129 130 // Create a vector of consecutive numbers from zero to VF. 131 SmallVector<Constant *, 8> Indices; 132 for (unsigned i = 0; i < NumEls; ++i) 133 Indices.push_back(ConstantInt::get(STy, i)); 134 135 // Add the consecutive indices to the vector value. 136 return ConstantVector::get(Indices); 137 } 138 139 CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size, 140 MaybeAlign Align, bool isVolatile, 141 MDNode *TBAATag, MDNode *ScopeTag, 142 MDNode *NoAliasTag) { 143 Ptr = getCastedInt8PtrValue(Ptr); 144 Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)}; 145 Type *Tys[] = { Ptr->getType(), Size->getType() }; 146 Module *M = BB->getParent()->getParent(); 147 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); 148 149 CallInst *CI = createCallHelper(TheFn, Ops, this); 150 151 if (Align) 152 cast<MemSetInst>(CI)->setDestAlignment(*Align); 153 154 // Set the TBAA info if present. 155 if (TBAATag) 156 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 157 158 if (ScopeTag) 159 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 160 161 if (NoAliasTag) 162 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 163 164 return CI; 165 } 166 167 CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, 168 Value *Val, Value *Size, 169 bool IsVolatile, MDNode *TBAATag, 170 MDNode *ScopeTag, 171 MDNode *NoAliasTag) { 172 Dst = getCastedInt8PtrValue(Dst); 173 Value *Ops[] = {Dst, Val, Size, getInt1(IsVolatile)}; 174 Type *Tys[] = {Dst->getType(), Size->getType()}; 175 Module *M = BB->getParent()->getParent(); 176 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset_inline, Tys); 177 178 CallInst *CI = createCallHelper(TheFn, Ops, this); 179 180 if (DstAlign) 181 cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign); 182 183 // Set the TBAA info if present. 184 if (TBAATag) 185 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 186 187 if (ScopeTag) 188 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 189 190 if (NoAliasTag) 191 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 192 193 return CI; 194 } 195 196 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet( 197 Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize, 198 MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) { 199 200 Ptr = getCastedInt8PtrValue(Ptr); 201 Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)}; 202 Type *Tys[] = {Ptr->getType(), Size->getType()}; 203 Module *M = BB->getParent()->getParent(); 204 Function *TheFn = Intrinsic::getDeclaration( 205 M, Intrinsic::memset_element_unordered_atomic, Tys); 206 207 CallInst *CI = createCallHelper(TheFn, Ops, this); 208 209 cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment); 210 211 // Set the TBAA info if present. 212 if (TBAATag) 213 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 214 215 if (ScopeTag) 216 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 217 218 if (NoAliasTag) 219 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 220 221 return CI; 222 } 223 224 CallInst *IRBuilderBase::CreateMemTransferInst( 225 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, 226 MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag, 227 MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) { 228 Dst = getCastedInt8PtrValue(Dst); 229 Src = getCastedInt8PtrValue(Src); 230 231 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)}; 232 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() }; 233 Module *M = BB->getParent()->getParent(); 234 Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys); 235 236 CallInst *CI = createCallHelper(TheFn, Ops, this); 237 238 auto* MCI = cast<MemTransferInst>(CI); 239 if (DstAlign) 240 MCI->setDestAlignment(*DstAlign); 241 if (SrcAlign) 242 MCI->setSourceAlignment(*SrcAlign); 243 244 // Set the TBAA info if present. 245 if (TBAATag) 246 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 247 248 // Set the TBAA Struct info if present. 249 if (TBAAStructTag) 250 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 251 252 if (ScopeTag) 253 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 254 255 if (NoAliasTag) 256 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 257 258 return CI; 259 } 260 261 CallInst *IRBuilderBase::CreateMemCpyInline( 262 Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, 263 Value *Size, bool IsVolatile, MDNode *TBAATag, MDNode *TBAAStructTag, 264 MDNode *ScopeTag, MDNode *NoAliasTag) { 265 Dst = getCastedInt8PtrValue(Dst); 266 Src = getCastedInt8PtrValue(Src); 267 268 Value *Ops[] = {Dst, Src, Size, getInt1(IsVolatile)}; 269 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 270 Function *F = BB->getParent(); 271 Module *M = F->getParent(); 272 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys); 273 274 CallInst *CI = createCallHelper(TheFn, Ops, this); 275 276 auto *MCI = cast<MemCpyInlineInst>(CI); 277 if (DstAlign) 278 MCI->setDestAlignment(*DstAlign); 279 if (SrcAlign) 280 MCI->setSourceAlignment(*SrcAlign); 281 282 // Set the TBAA info if present. 283 if (TBAATag) 284 MCI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 285 286 // Set the TBAA Struct info if present. 287 if (TBAAStructTag) 288 MCI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 289 290 if (ScopeTag) 291 MCI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 292 293 if (NoAliasTag) 294 MCI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 295 296 return CI; 297 } 298 299 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy( 300 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, 301 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag, 302 MDNode *ScopeTag, MDNode *NoAliasTag) { 303 assert(DstAlign >= ElementSize && 304 "Pointer alignment must be at least element size"); 305 assert(SrcAlign >= ElementSize && 306 "Pointer alignment must be at least element size"); 307 Dst = getCastedInt8PtrValue(Dst); 308 Src = getCastedInt8PtrValue(Src); 309 310 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)}; 311 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 312 Module *M = BB->getParent()->getParent(); 313 Function *TheFn = Intrinsic::getDeclaration( 314 M, Intrinsic::memcpy_element_unordered_atomic, Tys); 315 316 CallInst *CI = createCallHelper(TheFn, Ops, this); 317 318 // Set the alignment of the pointer args. 319 auto *AMCI = cast<AtomicMemCpyInst>(CI); 320 AMCI->setDestAlignment(DstAlign); 321 AMCI->setSourceAlignment(SrcAlign); 322 323 // Set the TBAA info if present. 324 if (TBAATag) 325 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 326 327 // Set the TBAA Struct info if present. 328 if (TBAAStructTag) 329 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 330 331 if (ScopeTag) 332 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 333 334 if (NoAliasTag) 335 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 336 337 return CI; 338 } 339 340 CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign, 341 Value *Src, MaybeAlign SrcAlign, 342 Value *Size, bool isVolatile, 343 MDNode *TBAATag, MDNode *ScopeTag, 344 MDNode *NoAliasTag) { 345 Dst = getCastedInt8PtrValue(Dst); 346 Src = getCastedInt8PtrValue(Src); 347 348 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)}; 349 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() }; 350 Module *M = BB->getParent()->getParent(); 351 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys); 352 353 CallInst *CI = createCallHelper(TheFn, Ops, this); 354 355 auto *MMI = cast<MemMoveInst>(CI); 356 if (DstAlign) 357 MMI->setDestAlignment(*DstAlign); 358 if (SrcAlign) 359 MMI->setSourceAlignment(*SrcAlign); 360 361 // Set the TBAA info if present. 362 if (TBAATag) 363 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 364 365 if (ScopeTag) 366 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 367 368 if (NoAliasTag) 369 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 370 371 return CI; 372 } 373 374 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove( 375 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, 376 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag, 377 MDNode *ScopeTag, MDNode *NoAliasTag) { 378 assert(DstAlign >= ElementSize && 379 "Pointer alignment must be at least element size"); 380 assert(SrcAlign >= ElementSize && 381 "Pointer alignment must be at least element size"); 382 Dst = getCastedInt8PtrValue(Dst); 383 Src = getCastedInt8PtrValue(Src); 384 385 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)}; 386 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 387 Module *M = BB->getParent()->getParent(); 388 Function *TheFn = Intrinsic::getDeclaration( 389 M, Intrinsic::memmove_element_unordered_atomic, Tys); 390 391 CallInst *CI = createCallHelper(TheFn, Ops, this); 392 393 // Set the alignment of the pointer args. 394 CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign)); 395 CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign)); 396 397 // Set the TBAA info if present. 398 if (TBAATag) 399 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 400 401 // Set the TBAA Struct info if present. 402 if (TBAAStructTag) 403 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 404 405 if (ScopeTag) 406 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 407 408 if (NoAliasTag) 409 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 410 411 return CI; 412 } 413 414 static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID, 415 Value *Src) { 416 Module *M = Builder->GetInsertBlock()->getParent()->getParent(); 417 Value *Ops[] = {Src}; 418 Type *Tys[] = { Src->getType() }; 419 auto Decl = Intrinsic::getDeclaration(M, ID, Tys); 420 return createCallHelper(Decl, Ops, Builder); 421 } 422 423 CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) { 424 Module *M = GetInsertBlock()->getParent()->getParent(); 425 Value *Ops[] = {Acc, Src}; 426 auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd, 427 {Src->getType()}); 428 return createCallHelper(Decl, Ops, this); 429 } 430 431 CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) { 432 Module *M = GetInsertBlock()->getParent()->getParent(); 433 Value *Ops[] = {Acc, Src}; 434 auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul, 435 {Src->getType()}); 436 return createCallHelper(Decl, Ops, this); 437 } 438 439 CallInst *IRBuilderBase::CreateAddReduce(Value *Src) { 440 return getReductionIntrinsic(this, Intrinsic::vector_reduce_add, Src); 441 } 442 443 CallInst *IRBuilderBase::CreateMulReduce(Value *Src) { 444 return getReductionIntrinsic(this, Intrinsic::vector_reduce_mul, Src); 445 } 446 447 CallInst *IRBuilderBase::CreateAndReduce(Value *Src) { 448 return getReductionIntrinsic(this, Intrinsic::vector_reduce_and, Src); 449 } 450 451 CallInst *IRBuilderBase::CreateOrReduce(Value *Src) { 452 return getReductionIntrinsic(this, Intrinsic::vector_reduce_or, Src); 453 } 454 455 CallInst *IRBuilderBase::CreateXorReduce(Value *Src) { 456 return getReductionIntrinsic(this, Intrinsic::vector_reduce_xor, Src); 457 } 458 459 CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) { 460 auto ID = 461 IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax; 462 return getReductionIntrinsic(this, ID, Src); 463 } 464 465 CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) { 466 auto ID = 467 IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin; 468 return getReductionIntrinsic(this, ID, Src); 469 } 470 471 CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) { 472 return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmax, Src); 473 } 474 475 CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) { 476 return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmin, Src); 477 } 478 479 CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) { 480 assert(isa<PointerType>(Ptr->getType()) && 481 "lifetime.start only applies to pointers."); 482 Ptr = getCastedInt8PtrValue(Ptr); 483 if (!Size) 484 Size = getInt64(-1); 485 else 486 assert(Size->getType() == getInt64Ty() && 487 "lifetime.start requires the size to be an i64"); 488 Value *Ops[] = { Size, Ptr }; 489 Module *M = BB->getParent()->getParent(); 490 Function *TheFn = 491 Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()}); 492 return createCallHelper(TheFn, Ops, this); 493 } 494 495 CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) { 496 assert(isa<PointerType>(Ptr->getType()) && 497 "lifetime.end only applies to pointers."); 498 Ptr = getCastedInt8PtrValue(Ptr); 499 if (!Size) 500 Size = getInt64(-1); 501 else 502 assert(Size->getType() == getInt64Ty() && 503 "lifetime.end requires the size to be an i64"); 504 Value *Ops[] = { Size, Ptr }; 505 Module *M = BB->getParent()->getParent(); 506 Function *TheFn = 507 Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()}); 508 return createCallHelper(TheFn, Ops, this); 509 } 510 511 CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) { 512 513 assert(isa<PointerType>(Ptr->getType()) && 514 "invariant.start only applies to pointers."); 515 Ptr = getCastedInt8PtrValue(Ptr); 516 if (!Size) 517 Size = getInt64(-1); 518 else 519 assert(Size->getType() == getInt64Ty() && 520 "invariant.start requires the size to be an i64"); 521 522 Value *Ops[] = {Size, Ptr}; 523 // Fill in the single overloaded type: memory object type. 524 Type *ObjectPtr[1] = {Ptr->getType()}; 525 Module *M = BB->getParent()->getParent(); 526 Function *TheFn = 527 Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr); 528 return createCallHelper(TheFn, Ops, this); 529 } 530 531 CallInst * 532 IRBuilderBase::CreateAssumption(Value *Cond, 533 ArrayRef<OperandBundleDef> OpBundles) { 534 assert(Cond->getType() == getInt1Ty() && 535 "an assumption condition must be of type i1"); 536 537 Value *Ops[] = { Cond }; 538 Module *M = BB->getParent()->getParent(); 539 Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume); 540 return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles); 541 } 542 543 Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) { 544 Module *M = BB->getModule(); 545 auto *FnIntrinsic = Intrinsic::getDeclaration( 546 M, Intrinsic::experimental_noalias_scope_decl, {}); 547 return createCallHelper(FnIntrinsic, {Scope}, this); 548 } 549 550 /// Create a call to a Masked Load intrinsic. 551 /// \p Ty - vector type to load 552 /// \p Ptr - base pointer for the load 553 /// \p Alignment - alignment of the source location 554 /// \p Mask - vector of booleans which indicates what vector lanes should 555 /// be accessed in memory 556 /// \p PassThru - pass-through value that is used to fill the masked-off lanes 557 /// of the result 558 /// \p Name - name of the result variable 559 CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, 560 Value *Mask, Value *PassThru, 561 const Twine &Name) { 562 auto *PtrTy = cast<PointerType>(Ptr->getType()); 563 assert(Ty->isVectorTy() && "Type should be vector"); 564 assert(PtrTy->isOpaqueOrPointeeTypeMatches(Ty) && "Wrong element type"); 565 assert(Mask && "Mask should not be all-ones (null)"); 566 if (!PassThru) 567 PassThru = UndefValue::get(Ty); 568 Type *OverloadedTypes[] = { Ty, PtrTy }; 569 Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru}; 570 return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops, 571 OverloadedTypes, Name); 572 } 573 574 /// Create a call to a Masked Store intrinsic. 575 /// \p Val - data to be stored, 576 /// \p Ptr - base pointer for the store 577 /// \p Alignment - alignment of the destination location 578 /// \p Mask - vector of booleans which indicates what vector lanes should 579 /// be accessed in memory 580 CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr, 581 Align Alignment, Value *Mask) { 582 auto *PtrTy = cast<PointerType>(Ptr->getType()); 583 Type *DataTy = Val->getType(); 584 assert(DataTy->isVectorTy() && "Val should be a vector"); 585 assert(PtrTy->isOpaqueOrPointeeTypeMatches(DataTy) && "Wrong element type"); 586 assert(Mask && "Mask should not be all-ones (null)"); 587 Type *OverloadedTypes[] = { DataTy, PtrTy }; 588 Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask}; 589 return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes); 590 } 591 592 /// Create a call to a Masked intrinsic, with given intrinsic Id, 593 /// an array of operands - Ops, and an array of overloaded types - 594 /// OverloadedTypes. 595 CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id, 596 ArrayRef<Value *> Ops, 597 ArrayRef<Type *> OverloadedTypes, 598 const Twine &Name) { 599 Module *M = BB->getParent()->getParent(); 600 Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes); 601 return createCallHelper(TheFn, Ops, this, Name); 602 } 603 604 /// Create a call to a Masked Gather intrinsic. 605 /// \p Ty - vector type to gather 606 /// \p Ptrs - vector of pointers for loading 607 /// \p Align - alignment for one element 608 /// \p Mask - vector of booleans which indicates what vector lanes should 609 /// be accessed in memory 610 /// \p PassThru - pass-through value that is used to fill the masked-off lanes 611 /// of the result 612 /// \p Name - name of the result variable 613 CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs, 614 Align Alignment, Value *Mask, 615 Value *PassThru, 616 const Twine &Name) { 617 auto *VecTy = cast<VectorType>(Ty); 618 ElementCount NumElts = VecTy->getElementCount(); 619 auto *PtrsTy = cast<VectorType>(Ptrs->getType()); 620 assert(cast<PointerType>(PtrsTy->getElementType()) 621 ->isOpaqueOrPointeeTypeMatches( 622 cast<VectorType>(Ty)->getElementType()) && 623 "Element type mismatch"); 624 assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch"); 625 626 if (!Mask) 627 Mask = Constant::getAllOnesValue( 628 VectorType::get(Type::getInt1Ty(Context), NumElts)); 629 630 if (!PassThru) 631 PassThru = UndefValue::get(Ty); 632 633 Type *OverloadedTypes[] = {Ty, PtrsTy}; 634 Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru}; 635 636 // We specify only one type when we create this intrinsic. Types of other 637 // arguments are derived from this type. 638 return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes, 639 Name); 640 } 641 642 /// Create a call to a Masked Scatter intrinsic. 643 /// \p Data - data to be stored, 644 /// \p Ptrs - the vector of pointers, where the \p Data elements should be 645 /// stored 646 /// \p Align - alignment for one element 647 /// \p Mask - vector of booleans which indicates what vector lanes should 648 /// be accessed in memory 649 CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs, 650 Align Alignment, Value *Mask) { 651 auto *PtrsTy = cast<VectorType>(Ptrs->getType()); 652 auto *DataTy = cast<VectorType>(Data->getType()); 653 ElementCount NumElts = PtrsTy->getElementCount(); 654 655 #ifndef NDEBUG 656 auto *PtrTy = cast<PointerType>(PtrsTy->getElementType()); 657 assert(NumElts == DataTy->getElementCount() && 658 PtrTy->isOpaqueOrPointeeTypeMatches(DataTy->getElementType()) && 659 "Incompatible pointer and data types"); 660 #endif 661 662 if (!Mask) 663 Mask = Constant::getAllOnesValue( 664 VectorType::get(Type::getInt1Ty(Context), NumElts)); 665 666 Type *OverloadedTypes[] = {DataTy, PtrsTy}; 667 Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask}; 668 669 // We specify only one type when we create this intrinsic. Types of other 670 // arguments are derived from this type. 671 return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes); 672 } 673 674 template <typename T0> 675 static std::vector<Value *> 676 getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes, 677 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) { 678 std::vector<Value *> Args; 679 Args.push_back(B.getInt64(ID)); 680 Args.push_back(B.getInt32(NumPatchBytes)); 681 Args.push_back(ActualCallee); 682 Args.push_back(B.getInt32(CallArgs.size())); 683 Args.push_back(B.getInt32(Flags)); 684 llvm::append_range(Args, CallArgs); 685 // GC Transition and Deopt args are now always handled via operand bundle. 686 // They will be removed from the signature of gc.statepoint shortly. 687 Args.push_back(B.getInt32(0)); 688 Args.push_back(B.getInt32(0)); 689 // GC args are now encoded in the gc-live operand bundle 690 return Args; 691 } 692 693 template<typename T1, typename T2, typename T3> 694 static std::vector<OperandBundleDef> 695 getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs, 696 Optional<ArrayRef<T2>> DeoptArgs, 697 ArrayRef<T3> GCArgs) { 698 std::vector<OperandBundleDef> Rval; 699 if (DeoptArgs) { 700 SmallVector<Value*, 16> DeoptValues; 701 llvm::append_range(DeoptValues, *DeoptArgs); 702 Rval.emplace_back("deopt", DeoptValues); 703 } 704 if (TransitionArgs) { 705 SmallVector<Value*, 16> TransitionValues; 706 llvm::append_range(TransitionValues, *TransitionArgs); 707 Rval.emplace_back("gc-transition", TransitionValues); 708 } 709 if (GCArgs.size()) { 710 SmallVector<Value*, 16> LiveValues; 711 llvm::append_range(LiveValues, GCArgs); 712 Rval.emplace_back("gc-live", LiveValues); 713 } 714 return Rval; 715 } 716 717 template <typename T0, typename T1, typename T2, typename T3> 718 static CallInst *CreateGCStatepointCallCommon( 719 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, 720 FunctionCallee ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs, 721 Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs, 722 ArrayRef<T3> GCArgs, const Twine &Name) { 723 Module *M = Builder->GetInsertBlock()->getParent()->getParent(); 724 // Fill in the one generic type'd argument (the function is also vararg) 725 Function *FnStatepoint = 726 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint, 727 {ActualCallee.getCallee()->getType()}); 728 729 std::vector<Value *> Args = getStatepointArgs( 730 *Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs); 731 732 CallInst *CI = Builder->CreateCall( 733 FnStatepoint, Args, 734 getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name); 735 CI->addParamAttr(2, 736 Attribute::get(Builder->getContext(), Attribute::ElementType, 737 ActualCallee.getFunctionType())); 738 return CI; 739 } 740 741 CallInst *IRBuilderBase::CreateGCStatepointCall( 742 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, 743 ArrayRef<Value *> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs, 744 ArrayRef<Value *> GCArgs, const Twine &Name) { 745 return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>( 746 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None), 747 CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name); 748 } 749 750 CallInst *IRBuilderBase::CreateGCStatepointCall( 751 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, 752 uint32_t Flags, ArrayRef<Value *> CallArgs, 753 Optional<ArrayRef<Use>> TransitionArgs, Optional<ArrayRef<Use>> DeoptArgs, 754 ArrayRef<Value *> GCArgs, const Twine &Name) { 755 return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>( 756 this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs, 757 DeoptArgs, GCArgs, Name); 758 } 759 760 CallInst *IRBuilderBase::CreateGCStatepointCall( 761 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, 762 ArrayRef<Use> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs, 763 ArrayRef<Value *> GCArgs, const Twine &Name) { 764 return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>( 765 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None), 766 CallArgs, None, DeoptArgs, GCArgs, Name); 767 } 768 769 template <typename T0, typename T1, typename T2, typename T3> 770 static InvokeInst *CreateGCStatepointInvokeCommon( 771 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, 772 FunctionCallee ActualInvokee, BasicBlock *NormalDest, 773 BasicBlock *UnwindDest, uint32_t Flags, ArrayRef<T0> InvokeArgs, 774 Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs, 775 ArrayRef<T3> GCArgs, const Twine &Name) { 776 Module *M = Builder->GetInsertBlock()->getParent()->getParent(); 777 // Fill in the one generic type'd argument (the function is also vararg) 778 Function *FnStatepoint = 779 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint, 780 {ActualInvokee.getCallee()->getType()}); 781 782 std::vector<Value *> Args = 783 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(), 784 Flags, InvokeArgs); 785 786 InvokeInst *II = Builder->CreateInvoke( 787 FnStatepoint, NormalDest, UnwindDest, Args, 788 getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name); 789 II->addParamAttr(2, 790 Attribute::get(Builder->getContext(), Attribute::ElementType, 791 ActualInvokee.getFunctionType())); 792 return II; 793 } 794 795 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 796 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, 797 BasicBlock *NormalDest, BasicBlock *UnwindDest, 798 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Value *>> DeoptArgs, 799 ArrayRef<Value *> GCArgs, const Twine &Name) { 800 return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>( 801 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, 802 uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/, 803 DeoptArgs, GCArgs, Name); 804 } 805 806 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 807 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, 808 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, 809 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs, 810 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, 811 const Twine &Name) { 812 return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>( 813 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags, 814 InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name); 815 } 816 817 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 818 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, 819 BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, 820 Optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, 821 const Twine &Name) { 822 return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>( 823 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, 824 uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs, 825 Name); 826 } 827 828 CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint, 829 Type *ResultType, const Twine &Name) { 830 Intrinsic::ID ID = Intrinsic::experimental_gc_result; 831 Module *M = BB->getParent()->getParent(); 832 Type *Types[] = {ResultType}; 833 Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types); 834 835 Value *Args[] = {Statepoint}; 836 return createCallHelper(FnGCResult, Args, this, Name); 837 } 838 839 CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint, 840 int BaseOffset, int DerivedOffset, 841 Type *ResultType, const Twine &Name) { 842 Module *M = BB->getParent()->getParent(); 843 Type *Types[] = {ResultType}; 844 Function *FnGCRelocate = 845 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types); 846 847 Value *Args[] = {Statepoint, getInt32(BaseOffset), getInt32(DerivedOffset)}; 848 return createCallHelper(FnGCRelocate, Args, this, Name); 849 } 850 851 CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr, 852 const Twine &Name) { 853 Module *M = BB->getParent()->getParent(); 854 Type *PtrTy = DerivedPtr->getType(); 855 Function *FnGCFindBase = Intrinsic::getDeclaration( 856 M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy}); 857 return createCallHelper(FnGCFindBase, {DerivedPtr}, this, Name); 858 } 859 860 CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr, 861 const Twine &Name) { 862 Module *M = BB->getParent()->getParent(); 863 Type *PtrTy = DerivedPtr->getType(); 864 Function *FnGCGetOffset = Intrinsic::getDeclaration( 865 M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy}); 866 return createCallHelper(FnGCGetOffset, {DerivedPtr}, this, Name); 867 } 868 869 CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, 870 Instruction *FMFSource, 871 const Twine &Name) { 872 Module *M = BB->getModule(); 873 Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()}); 874 return createCallHelper(Fn, {V}, this, Name, FMFSource); 875 } 876 877 CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, 878 Value *RHS, 879 Instruction *FMFSource, 880 const Twine &Name) { 881 Module *M = BB->getModule(); 882 Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() }); 883 return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource); 884 } 885 886 CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID, 887 ArrayRef<Type *> Types, 888 ArrayRef<Value *> Args, 889 Instruction *FMFSource, 890 const Twine &Name) { 891 Module *M = BB->getModule(); 892 Function *Fn = Intrinsic::getDeclaration(M, ID, Types); 893 return createCallHelper(Fn, Args, this, Name, FMFSource); 894 } 895 896 CallInst *IRBuilderBase::CreateConstrainedFPBinOp( 897 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource, 898 const Twine &Name, MDNode *FPMathTag, 899 Optional<RoundingMode> Rounding, 900 Optional<fp::ExceptionBehavior> Except) { 901 Value *RoundingV = getConstrainedFPRounding(Rounding); 902 Value *ExceptV = getConstrainedFPExcept(Except); 903 904 FastMathFlags UseFMF = FMF; 905 if (FMFSource) 906 UseFMF = FMFSource->getFastMathFlags(); 907 908 CallInst *C = CreateIntrinsic(ID, {L->getType()}, 909 {L, R, RoundingV, ExceptV}, nullptr, Name); 910 setConstrainedFPCallAttr(C); 911 setFPAttrs(C, FPMathTag, UseFMF); 912 return C; 913 } 914 915 Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, 916 const Twine &Name, MDNode *FPMathTag) { 917 if (Instruction::isBinaryOp(Opc)) { 918 assert(Ops.size() == 2 && "Invalid number of operands!"); 919 return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc), 920 Ops[0], Ops[1], Name, FPMathTag); 921 } 922 if (Instruction::isUnaryOp(Opc)) { 923 assert(Ops.size() == 1 && "Invalid number of operands!"); 924 return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc), 925 Ops[0], Name, FPMathTag); 926 } 927 llvm_unreachable("Unexpected opcode!"); 928 } 929 930 CallInst *IRBuilderBase::CreateConstrainedFPCast( 931 Intrinsic::ID ID, Value *V, Type *DestTy, 932 Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag, 933 Optional<RoundingMode> Rounding, 934 Optional<fp::ExceptionBehavior> Except) { 935 Value *ExceptV = getConstrainedFPExcept(Except); 936 937 FastMathFlags UseFMF = FMF; 938 if (FMFSource) 939 UseFMF = FMFSource->getFastMathFlags(); 940 941 CallInst *C; 942 bool HasRoundingMD = false; 943 switch (ID) { 944 default: 945 break; 946 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 947 case Intrinsic::INTRINSIC: \ 948 HasRoundingMD = ROUND_MODE; \ 949 break; 950 #include "llvm/IR/ConstrainedOps.def" 951 } 952 if (HasRoundingMD) { 953 Value *RoundingV = getConstrainedFPRounding(Rounding); 954 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV}, 955 nullptr, Name); 956 } else 957 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr, 958 Name); 959 960 setConstrainedFPCallAttr(C); 961 962 if (isa<FPMathOperator>(C)) 963 setFPAttrs(C, FPMathTag, UseFMF); 964 return C; 965 } 966 967 Value *IRBuilderBase::CreateFCmpHelper( 968 CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name, 969 MDNode *FPMathTag, bool IsSignaling) { 970 if (IsFPConstrained) { 971 auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps 972 : Intrinsic::experimental_constrained_fcmp; 973 return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name); 974 } 975 976 if (auto *LC = dyn_cast<Constant>(LHS)) 977 if (auto *RC = dyn_cast<Constant>(RHS)) 978 return Insert(Folder.CreateFCmp(P, LC, RC), Name); 979 return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name); 980 } 981 982 CallInst *IRBuilderBase::CreateConstrainedFPCmp( 983 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, 984 const Twine &Name, Optional<fp::ExceptionBehavior> Except) { 985 Value *PredicateV = getConstrainedFPPredicate(P); 986 Value *ExceptV = getConstrainedFPExcept(Except); 987 988 CallInst *C = CreateIntrinsic(ID, {L->getType()}, 989 {L, R, PredicateV, ExceptV}, nullptr, Name); 990 setConstrainedFPCallAttr(C); 991 return C; 992 } 993 994 CallInst *IRBuilderBase::CreateConstrainedFPCall( 995 Function *Callee, ArrayRef<Value *> Args, const Twine &Name, 996 Optional<RoundingMode> Rounding, 997 Optional<fp::ExceptionBehavior> Except) { 998 llvm::SmallVector<Value *, 6> UseArgs; 999 1000 append_range(UseArgs, Args); 1001 bool HasRoundingMD = false; 1002 switch (Callee->getIntrinsicID()) { 1003 default: 1004 break; 1005 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 1006 case Intrinsic::INTRINSIC: \ 1007 HasRoundingMD = ROUND_MODE; \ 1008 break; 1009 #include "llvm/IR/ConstrainedOps.def" 1010 } 1011 if (HasRoundingMD) 1012 UseArgs.push_back(getConstrainedFPRounding(Rounding)); 1013 UseArgs.push_back(getConstrainedFPExcept(Except)); 1014 1015 CallInst *C = CreateCall(Callee, UseArgs, Name); 1016 setConstrainedFPCallAttr(C); 1017 return C; 1018 } 1019 1020 Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False, 1021 const Twine &Name, Instruction *MDFrom) { 1022 if (auto *V = Folder.FoldSelect(C, True, False)) 1023 return V; 1024 1025 SelectInst *Sel = SelectInst::Create(C, True, False); 1026 if (MDFrom) { 1027 MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof); 1028 MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable); 1029 Sel = addBranchMetadata(Sel, Prof, Unpred); 1030 } 1031 if (isa<FPMathOperator>(Sel)) 1032 setFPAttrs(Sel, nullptr /* MDNode* */, FMF); 1033 return Insert(Sel, Name); 1034 } 1035 1036 Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, 1037 const Twine &Name) { 1038 assert(LHS->getType() == RHS->getType() && 1039 "Pointer subtraction operand types must match!"); 1040 assert(cast<PointerType>(LHS->getType()) 1041 ->isOpaqueOrPointeeTypeMatches(ElemTy) && 1042 "Pointer type must match element type"); 1043 Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context)); 1044 Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context)); 1045 Value *Difference = CreateSub(LHS_int, RHS_int); 1046 return CreateExactSDiv(Difference, ConstantExpr::getSizeOf(ElemTy), 1047 Name); 1048 } 1049 1050 Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) { 1051 assert(isa<PointerType>(Ptr->getType()) && 1052 "launder.invariant.group only applies to pointers."); 1053 // FIXME: we could potentially avoid casts to/from i8*. 1054 auto *PtrType = Ptr->getType(); 1055 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace()); 1056 if (PtrType != Int8PtrTy) 1057 Ptr = CreateBitCast(Ptr, Int8PtrTy); 1058 Module *M = BB->getParent()->getParent(); 1059 Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration( 1060 M, Intrinsic::launder_invariant_group, {Int8PtrTy}); 1061 1062 assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy && 1063 FnLaunderInvariantGroup->getFunctionType()->getParamType(0) == 1064 Int8PtrTy && 1065 "LaunderInvariantGroup should take and return the same type"); 1066 1067 CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr}); 1068 1069 if (PtrType != Int8PtrTy) 1070 return CreateBitCast(Fn, PtrType); 1071 return Fn; 1072 } 1073 1074 Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) { 1075 assert(isa<PointerType>(Ptr->getType()) && 1076 "strip.invariant.group only applies to pointers."); 1077 1078 // FIXME: we could potentially avoid casts to/from i8*. 1079 auto *PtrType = Ptr->getType(); 1080 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace()); 1081 if (PtrType != Int8PtrTy) 1082 Ptr = CreateBitCast(Ptr, Int8PtrTy); 1083 Module *M = BB->getParent()->getParent(); 1084 Function *FnStripInvariantGroup = Intrinsic::getDeclaration( 1085 M, Intrinsic::strip_invariant_group, {Int8PtrTy}); 1086 1087 assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy && 1088 FnStripInvariantGroup->getFunctionType()->getParamType(0) == 1089 Int8PtrTy && 1090 "StripInvariantGroup should take and return the same type"); 1091 1092 CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr}); 1093 1094 if (PtrType != Int8PtrTy) 1095 return CreateBitCast(Fn, PtrType); 1096 return Fn; 1097 } 1098 1099 Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) { 1100 auto *Ty = cast<VectorType>(V->getType()); 1101 if (isa<ScalableVectorType>(Ty)) { 1102 Module *M = BB->getParent()->getParent(); 1103 Function *F = Intrinsic::getDeclaration( 1104 M, Intrinsic::experimental_vector_reverse, Ty); 1105 return Insert(CallInst::Create(F, V), Name); 1106 } 1107 // Keep the original behaviour for fixed vector 1108 SmallVector<int, 8> ShuffleMask; 1109 int NumElts = Ty->getElementCount().getKnownMinValue(); 1110 for (int i = 0; i < NumElts; ++i) 1111 ShuffleMask.push_back(NumElts - i - 1); 1112 return CreateShuffleVector(V, ShuffleMask, Name); 1113 } 1114 1115 Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, 1116 const Twine &Name) { 1117 assert(isa<VectorType>(V1->getType()) && "Unexpected type"); 1118 assert(V1->getType() == V2->getType() && 1119 "Splice expects matching operand types!"); 1120 1121 if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) { 1122 Module *M = BB->getParent()->getParent(); 1123 Function *F = Intrinsic::getDeclaration( 1124 M, Intrinsic::experimental_vector_splice, VTy); 1125 1126 Value *Ops[] = {V1, V2, getInt32(Imm)}; 1127 return Insert(CallInst::Create(F, Ops), Name); 1128 } 1129 1130 unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements(); 1131 assert(((-Imm <= NumElts) || (Imm < NumElts)) && 1132 "Invalid immediate for vector splice!"); 1133 1134 // Keep the original behaviour for fixed vector 1135 unsigned Idx = (NumElts + Imm) % NumElts; 1136 SmallVector<int, 8> Mask; 1137 for (unsigned I = 0; I < NumElts; ++I) 1138 Mask.push_back(Idx + I); 1139 1140 return CreateShuffleVector(V1, V2, Mask); 1141 } 1142 1143 Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V, 1144 const Twine &Name) { 1145 auto EC = ElementCount::getFixed(NumElts); 1146 return CreateVectorSplat(EC, V, Name); 1147 } 1148 1149 Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V, 1150 const Twine &Name) { 1151 assert(EC.isNonZero() && "Cannot splat to an empty vector!"); 1152 1153 // First insert it into a poison vector so we can shuffle it. 1154 Type *I32Ty = getInt32Ty(); 1155 Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC)); 1156 V = CreateInsertElement(Poison, V, ConstantInt::get(I32Ty, 0), 1157 Name + ".splatinsert"); 1158 1159 // Shuffle the value across the desired number of elements. 1160 SmallVector<int, 16> Zeros; 1161 Zeros.resize(EC.getKnownMinValue()); 1162 return CreateShuffleVector(V, Zeros, Name + ".splat"); 1163 } 1164 1165 Value *IRBuilderBase::CreateExtractInteger( 1166 const DataLayout &DL, Value *From, IntegerType *ExtractedTy, 1167 uint64_t Offset, const Twine &Name) { 1168 auto *IntTy = cast<IntegerType>(From->getType()); 1169 assert(DL.getTypeStoreSize(ExtractedTy) + Offset <= 1170 DL.getTypeStoreSize(IntTy) && 1171 "Element extends past full value"); 1172 uint64_t ShAmt = 8 * Offset; 1173 Value *V = From; 1174 if (DL.isBigEndian()) 1175 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - 1176 DL.getTypeStoreSize(ExtractedTy) - Offset); 1177 if (ShAmt) { 1178 V = CreateLShr(V, ShAmt, Name + ".shift"); 1179 } 1180 assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() && 1181 "Cannot extract to a larger integer!"); 1182 if (ExtractedTy != IntTy) { 1183 V = CreateTrunc(V, ExtractedTy, Name + ".trunc"); 1184 } 1185 return V; 1186 } 1187 1188 Value *IRBuilderBase::CreatePreserveArrayAccessIndex( 1189 Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex, 1190 MDNode *DbgInfo) { 1191 auto *BaseType = Base->getType(); 1192 assert(isa<PointerType>(BaseType) && 1193 "Invalid Base ptr type for preserve.array.access.index."); 1194 assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) && 1195 "Pointer element type mismatch"); 1196 1197 Value *LastIndexV = getInt32(LastIndex); 1198 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); 1199 SmallVector<Value *, 4> IdxList(Dimension, Zero); 1200 IdxList.push_back(LastIndexV); 1201 1202 Type *ResultType = 1203 GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList); 1204 1205 Module *M = BB->getParent()->getParent(); 1206 Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration( 1207 M, Intrinsic::preserve_array_access_index, {ResultType, BaseType}); 1208 1209 Value *DimV = getInt32(Dimension); 1210 CallInst *Fn = 1211 CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV}); 1212 Fn->addParamAttr( 1213 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy)); 1214 if (DbgInfo) 1215 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1216 1217 return Fn; 1218 } 1219 1220 Value *IRBuilderBase::CreatePreserveUnionAccessIndex( 1221 Value *Base, unsigned FieldIndex, MDNode *DbgInfo) { 1222 assert(isa<PointerType>(Base->getType()) && 1223 "Invalid Base ptr type for preserve.union.access.index."); 1224 auto *BaseType = Base->getType(); 1225 1226 Module *M = BB->getParent()->getParent(); 1227 Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration( 1228 M, Intrinsic::preserve_union_access_index, {BaseType, BaseType}); 1229 1230 Value *DIIndex = getInt32(FieldIndex); 1231 CallInst *Fn = 1232 CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex}); 1233 if (DbgInfo) 1234 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1235 1236 return Fn; 1237 } 1238 1239 Value *IRBuilderBase::CreatePreserveStructAccessIndex( 1240 Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex, 1241 MDNode *DbgInfo) { 1242 auto *BaseType = Base->getType(); 1243 assert(isa<PointerType>(BaseType) && 1244 "Invalid Base ptr type for preserve.struct.access.index."); 1245 assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) && 1246 "Pointer element type mismatch"); 1247 1248 Value *GEPIndex = getInt32(Index); 1249 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); 1250 Type *ResultType = 1251 GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex}); 1252 1253 Module *M = BB->getParent()->getParent(); 1254 Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration( 1255 M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType}); 1256 1257 Value *DIIndex = getInt32(FieldIndex); 1258 CallInst *Fn = CreateCall(FnPreserveStructAccessIndex, 1259 {Base, GEPIndex, DIIndex}); 1260 Fn->addParamAttr( 1261 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy)); 1262 if (DbgInfo) 1263 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1264 1265 return Fn; 1266 } 1267 1268 CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL, 1269 Value *PtrValue, 1270 Value *AlignValue, 1271 Value *OffsetValue) { 1272 SmallVector<Value *, 4> Vals({PtrValue, AlignValue}); 1273 if (OffsetValue) 1274 Vals.push_back(OffsetValue); 1275 OperandBundleDefT<Value *> AlignOpB("align", Vals); 1276 return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB}); 1277 } 1278 1279 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, 1280 Value *PtrValue, 1281 unsigned Alignment, 1282 Value *OffsetValue) { 1283 assert(isa<PointerType>(PtrValue->getType()) && 1284 "trying to create an alignment assumption on a non-pointer?"); 1285 assert(Alignment != 0 && "Invalid Alignment"); 1286 auto *PtrTy = cast<PointerType>(PtrValue->getType()); 1287 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace()); 1288 Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment); 1289 return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue); 1290 } 1291 1292 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, 1293 Value *PtrValue, 1294 Value *Alignment, 1295 Value *OffsetValue) { 1296 assert(isa<PointerType>(PtrValue->getType()) && 1297 "trying to create an alignment assumption on a non-pointer?"); 1298 return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue); 1299 } 1300 1301 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default; 1302 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default; 1303 IRBuilderFolder::~IRBuilderFolder() = default; 1304 void ConstantFolder::anchor() {} 1305 void NoFolder::anchor() {} 1306