1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the IRBuilder class, which is used as a convenient way 10 // to create LLVM instructions with a consistent and simplified interface. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/IR/IRBuilder.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/IR/Constant.h" 17 #include "llvm/IR/Constants.h" 18 #include "llvm/IR/DebugInfoMetadata.h" 19 #include "llvm/IR/DerivedTypes.h" 20 #include "llvm/IR/Function.h" 21 #include "llvm/IR/GlobalValue.h" 22 #include "llvm/IR/GlobalVariable.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/IR/Intrinsics.h" 25 #include "llvm/IR/LLVMContext.h" 26 #include "llvm/IR/NoFolder.h" 27 #include "llvm/IR/Operator.h" 28 #include "llvm/IR/Statepoint.h" 29 #include "llvm/IR/Type.h" 30 #include "llvm/IR/Value.h" 31 #include "llvm/Support/Casting.h" 32 #include <cassert> 33 #include <cstdint> 34 #include <optional> 35 #include <vector> 36 37 using namespace llvm; 38 39 /// CreateGlobalString - Make a new global variable with an initializer that 40 /// has array of i8 type filled in with the nul terminated string value 41 /// specified. If Name is specified, it is the name of the global variable 42 /// created. 43 GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str, 44 const Twine &Name, 45 unsigned AddressSpace, 46 Module *M) { 47 Constant *StrConstant = ConstantDataArray::getString(Context, Str); 48 if (!M) 49 M = BB->getParent()->getParent(); 50 auto *GV = new GlobalVariable( 51 *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage, 52 StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace); 53 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 54 GV->setAlignment(Align(1)); 55 return GV; 56 } 57 58 Type *IRBuilderBase::getCurrentFunctionReturnType() const { 59 assert(BB && BB->getParent() && "No current function!"); 60 return BB->getParent()->getReturnType(); 61 } 62 63 Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) { 64 auto *PT = cast<PointerType>(Ptr->getType()); 65 if (PT->isOpaqueOrPointeeTypeMatches(getInt8Ty())) 66 return Ptr; 67 68 // Otherwise, we need to insert a bitcast. 69 return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace())); 70 } 71 72 DebugLoc IRBuilderBase::getCurrentDebugLocation() const { 73 for (auto &KV : MetadataToCopy) 74 if (KV.first == LLVMContext::MD_dbg) 75 return {cast<DILocation>(KV.second)}; 76 77 return {}; 78 } 79 void IRBuilderBase::SetInstDebugLocation(Instruction *I) const { 80 for (const auto &KV : MetadataToCopy) 81 if (KV.first == LLVMContext::MD_dbg) { 82 I->setDebugLoc(DebugLoc(KV.second)); 83 return; 84 } 85 } 86 87 CallInst * 88 IRBuilderBase::createCallHelper(Function *Callee, ArrayRef<Value *> Ops, 89 const Twine &Name, Instruction *FMFSource, 90 ArrayRef<OperandBundleDef> OpBundles) { 91 CallInst *CI = CreateCall(Callee, Ops, OpBundles, Name); 92 if (FMFSource) 93 CI->copyFastMathFlags(FMFSource); 94 return CI; 95 } 96 97 Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) { 98 assert(isa<ConstantInt>(Scaling) && "Expected constant integer"); 99 if (cast<ConstantInt>(Scaling)->isZero()) 100 return Scaling; 101 Module *M = GetInsertBlock()->getParent()->getParent(); 102 Function *TheFn = 103 Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()}); 104 CallInst *CI = CreateCall(TheFn, {}, {}, Name); 105 return cast<ConstantInt>(Scaling)->getSExtValue() == 1 106 ? CI 107 : CreateMul(CI, Scaling); 108 } 109 110 Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) { 111 Type *STy = DstType->getScalarType(); 112 if (isa<ScalableVectorType>(DstType)) { 113 Type *StepVecType = DstType; 114 // TODO: We expect this special case (element type < 8 bits) to be 115 // temporary - once the intrinsic properly supports < 8 bits this code 116 // can be removed. 117 if (STy->getScalarSizeInBits() < 8) 118 StepVecType = 119 VectorType::get(getInt8Ty(), cast<ScalableVectorType>(DstType)); 120 Value *Res = CreateIntrinsic(Intrinsic::experimental_stepvector, 121 {StepVecType}, {}, nullptr, Name); 122 if (StepVecType != DstType) 123 Res = CreateTrunc(Res, DstType); 124 return Res; 125 } 126 127 unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements(); 128 129 // Create a vector of consecutive numbers from zero to VF. 130 SmallVector<Constant *, 8> Indices; 131 for (unsigned i = 0; i < NumEls; ++i) 132 Indices.push_back(ConstantInt::get(STy, i)); 133 134 // Add the consecutive indices to the vector value. 135 return ConstantVector::get(Indices); 136 } 137 138 CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size, 139 MaybeAlign Align, bool isVolatile, 140 MDNode *TBAATag, MDNode *ScopeTag, 141 MDNode *NoAliasTag) { 142 Ptr = getCastedInt8PtrValue(Ptr); 143 Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)}; 144 Type *Tys[] = { Ptr->getType(), Size->getType() }; 145 Module *M = BB->getParent()->getParent(); 146 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); 147 148 CallInst *CI = CreateCall(TheFn, Ops); 149 150 if (Align) 151 cast<MemSetInst>(CI)->setDestAlignment(*Align); 152 153 // Set the TBAA info if present. 154 if (TBAATag) 155 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 156 157 if (ScopeTag) 158 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 159 160 if (NoAliasTag) 161 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 162 163 return CI; 164 } 165 166 CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, 167 Value *Val, Value *Size, 168 bool IsVolatile, MDNode *TBAATag, 169 MDNode *ScopeTag, 170 MDNode *NoAliasTag) { 171 Dst = getCastedInt8PtrValue(Dst); 172 Value *Ops[] = {Dst, Val, Size, getInt1(IsVolatile)}; 173 Type *Tys[] = {Dst->getType(), Size->getType()}; 174 Module *M = BB->getParent()->getParent(); 175 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset_inline, Tys); 176 177 CallInst *CI = CreateCall(TheFn, Ops); 178 179 if (DstAlign) 180 cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign); 181 182 // Set the TBAA info if present. 183 if (TBAATag) 184 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 185 186 if (ScopeTag) 187 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 188 189 if (NoAliasTag) 190 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 191 192 return CI; 193 } 194 195 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet( 196 Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize, 197 MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) { 198 199 Ptr = getCastedInt8PtrValue(Ptr); 200 Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)}; 201 Type *Tys[] = {Ptr->getType(), Size->getType()}; 202 Module *M = BB->getParent()->getParent(); 203 Function *TheFn = Intrinsic::getDeclaration( 204 M, Intrinsic::memset_element_unordered_atomic, Tys); 205 206 CallInst *CI = CreateCall(TheFn, Ops); 207 208 cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment); 209 210 // Set the TBAA info if present. 211 if (TBAATag) 212 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 213 214 if (ScopeTag) 215 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 216 217 if (NoAliasTag) 218 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 219 220 return CI; 221 } 222 223 CallInst *IRBuilderBase::CreateMemTransferInst( 224 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, 225 MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag, 226 MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) { 227 Dst = getCastedInt8PtrValue(Dst); 228 Src = getCastedInt8PtrValue(Src); 229 230 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)}; 231 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() }; 232 Module *M = BB->getParent()->getParent(); 233 Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys); 234 235 CallInst *CI = CreateCall(TheFn, Ops); 236 237 auto* MCI = cast<MemTransferInst>(CI); 238 if (DstAlign) 239 MCI->setDestAlignment(*DstAlign); 240 if (SrcAlign) 241 MCI->setSourceAlignment(*SrcAlign); 242 243 // Set the TBAA info if present. 244 if (TBAATag) 245 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 246 247 // Set the TBAA Struct info if present. 248 if (TBAAStructTag) 249 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 250 251 if (ScopeTag) 252 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 253 254 if (NoAliasTag) 255 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 256 257 return CI; 258 } 259 260 CallInst *IRBuilderBase::CreateMemCpyInline( 261 Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, 262 Value *Size, bool IsVolatile, MDNode *TBAATag, MDNode *TBAAStructTag, 263 MDNode *ScopeTag, MDNode *NoAliasTag) { 264 Dst = getCastedInt8PtrValue(Dst); 265 Src = getCastedInt8PtrValue(Src); 266 267 Value *Ops[] = {Dst, Src, Size, getInt1(IsVolatile)}; 268 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 269 Function *F = BB->getParent(); 270 Module *M = F->getParent(); 271 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys); 272 273 CallInst *CI = CreateCall(TheFn, Ops); 274 275 auto *MCI = cast<MemCpyInlineInst>(CI); 276 if (DstAlign) 277 MCI->setDestAlignment(*DstAlign); 278 if (SrcAlign) 279 MCI->setSourceAlignment(*SrcAlign); 280 281 // Set the TBAA info if present. 282 if (TBAATag) 283 MCI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 284 285 // Set the TBAA Struct info if present. 286 if (TBAAStructTag) 287 MCI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 288 289 if (ScopeTag) 290 MCI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 291 292 if (NoAliasTag) 293 MCI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 294 295 return CI; 296 } 297 298 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy( 299 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, 300 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag, 301 MDNode *ScopeTag, MDNode *NoAliasTag) { 302 assert(DstAlign >= ElementSize && 303 "Pointer alignment must be at least element size"); 304 assert(SrcAlign >= ElementSize && 305 "Pointer alignment must be at least element size"); 306 Dst = getCastedInt8PtrValue(Dst); 307 Src = getCastedInt8PtrValue(Src); 308 309 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)}; 310 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 311 Module *M = BB->getParent()->getParent(); 312 Function *TheFn = Intrinsic::getDeclaration( 313 M, Intrinsic::memcpy_element_unordered_atomic, Tys); 314 315 CallInst *CI = CreateCall(TheFn, Ops); 316 317 // Set the alignment of the pointer args. 318 auto *AMCI = cast<AtomicMemCpyInst>(CI); 319 AMCI->setDestAlignment(DstAlign); 320 AMCI->setSourceAlignment(SrcAlign); 321 322 // Set the TBAA info if present. 323 if (TBAATag) 324 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 325 326 // Set the TBAA Struct info if present. 327 if (TBAAStructTag) 328 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 329 330 if (ScopeTag) 331 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 332 333 if (NoAliasTag) 334 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 335 336 return CI; 337 } 338 339 CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign, 340 Value *Src, MaybeAlign SrcAlign, 341 Value *Size, bool isVolatile, 342 MDNode *TBAATag, MDNode *ScopeTag, 343 MDNode *NoAliasTag) { 344 Dst = getCastedInt8PtrValue(Dst); 345 Src = getCastedInt8PtrValue(Src); 346 347 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)}; 348 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() }; 349 Module *M = BB->getParent()->getParent(); 350 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys); 351 352 CallInst *CI = CreateCall(TheFn, Ops); 353 354 auto *MMI = cast<MemMoveInst>(CI); 355 if (DstAlign) 356 MMI->setDestAlignment(*DstAlign); 357 if (SrcAlign) 358 MMI->setSourceAlignment(*SrcAlign); 359 360 // Set the TBAA info if present. 361 if (TBAATag) 362 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 363 364 if (ScopeTag) 365 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 366 367 if (NoAliasTag) 368 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 369 370 return CI; 371 } 372 373 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove( 374 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, 375 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag, 376 MDNode *ScopeTag, MDNode *NoAliasTag) { 377 assert(DstAlign >= ElementSize && 378 "Pointer alignment must be at least element size"); 379 assert(SrcAlign >= ElementSize && 380 "Pointer alignment must be at least element size"); 381 Dst = getCastedInt8PtrValue(Dst); 382 Src = getCastedInt8PtrValue(Src); 383 384 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)}; 385 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 386 Module *M = BB->getParent()->getParent(); 387 Function *TheFn = Intrinsic::getDeclaration( 388 M, Intrinsic::memmove_element_unordered_atomic, Tys); 389 390 CallInst *CI = CreateCall(TheFn, Ops); 391 392 // Set the alignment of the pointer args. 393 CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign)); 394 CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign)); 395 396 // Set the TBAA info if present. 397 if (TBAATag) 398 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 399 400 // Set the TBAA Struct info if present. 401 if (TBAAStructTag) 402 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 403 404 if (ScopeTag) 405 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 406 407 if (NoAliasTag) 408 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 409 410 return CI; 411 } 412 413 CallInst *IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID, Value *Src) { 414 Module *M = GetInsertBlock()->getParent()->getParent(); 415 Value *Ops[] = {Src}; 416 Type *Tys[] = { Src->getType() }; 417 auto Decl = Intrinsic::getDeclaration(M, ID, Tys); 418 return CreateCall(Decl, Ops); 419 } 420 421 CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) { 422 Module *M = GetInsertBlock()->getParent()->getParent(); 423 Value *Ops[] = {Acc, Src}; 424 auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd, 425 {Src->getType()}); 426 return CreateCall(Decl, Ops); 427 } 428 429 CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) { 430 Module *M = GetInsertBlock()->getParent()->getParent(); 431 Value *Ops[] = {Acc, Src}; 432 auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul, 433 {Src->getType()}); 434 return CreateCall(Decl, Ops); 435 } 436 437 CallInst *IRBuilderBase::CreateAddReduce(Value *Src) { 438 return getReductionIntrinsic(Intrinsic::vector_reduce_add, Src); 439 } 440 441 CallInst *IRBuilderBase::CreateMulReduce(Value *Src) { 442 return getReductionIntrinsic(Intrinsic::vector_reduce_mul, Src); 443 } 444 445 CallInst *IRBuilderBase::CreateAndReduce(Value *Src) { 446 return getReductionIntrinsic(Intrinsic::vector_reduce_and, Src); 447 } 448 449 CallInst *IRBuilderBase::CreateOrReduce(Value *Src) { 450 return getReductionIntrinsic(Intrinsic::vector_reduce_or, Src); 451 } 452 453 CallInst *IRBuilderBase::CreateXorReduce(Value *Src) { 454 return getReductionIntrinsic(Intrinsic::vector_reduce_xor, Src); 455 } 456 457 CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) { 458 auto ID = 459 IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax; 460 return getReductionIntrinsic(ID, Src); 461 } 462 463 CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) { 464 auto ID = 465 IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin; 466 return getReductionIntrinsic(ID, Src); 467 } 468 469 CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) { 470 return getReductionIntrinsic(Intrinsic::vector_reduce_fmax, Src); 471 } 472 473 CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) { 474 return getReductionIntrinsic(Intrinsic::vector_reduce_fmin, Src); 475 } 476 477 CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) { 478 assert(isa<PointerType>(Ptr->getType()) && 479 "lifetime.start only applies to pointers."); 480 Ptr = getCastedInt8PtrValue(Ptr); 481 if (!Size) 482 Size = getInt64(-1); 483 else 484 assert(Size->getType() == getInt64Ty() && 485 "lifetime.start requires the size to be an i64"); 486 Value *Ops[] = { Size, Ptr }; 487 Module *M = BB->getParent()->getParent(); 488 Function *TheFn = 489 Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()}); 490 return CreateCall(TheFn, Ops); 491 } 492 493 CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) { 494 assert(isa<PointerType>(Ptr->getType()) && 495 "lifetime.end only applies to pointers."); 496 Ptr = getCastedInt8PtrValue(Ptr); 497 if (!Size) 498 Size = getInt64(-1); 499 else 500 assert(Size->getType() == getInt64Ty() && 501 "lifetime.end requires the size to be an i64"); 502 Value *Ops[] = { Size, Ptr }; 503 Module *M = BB->getParent()->getParent(); 504 Function *TheFn = 505 Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()}); 506 return CreateCall(TheFn, Ops); 507 } 508 509 CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) { 510 511 assert(isa<PointerType>(Ptr->getType()) && 512 "invariant.start only applies to pointers."); 513 Ptr = getCastedInt8PtrValue(Ptr); 514 if (!Size) 515 Size = getInt64(-1); 516 else 517 assert(Size->getType() == getInt64Ty() && 518 "invariant.start requires the size to be an i64"); 519 520 Value *Ops[] = {Size, Ptr}; 521 // Fill in the single overloaded type: memory object type. 522 Type *ObjectPtr[1] = {Ptr->getType()}; 523 Module *M = BB->getParent()->getParent(); 524 Function *TheFn = 525 Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr); 526 return CreateCall(TheFn, Ops); 527 } 528 529 static MaybeAlign getAlign(Value *Ptr) { 530 if (auto *O = dyn_cast<GlobalObject>(Ptr)) 531 return O->getAlign(); 532 if (auto *A = dyn_cast<GlobalAlias>(Ptr)) 533 return A->getAliaseeObject()->getAlign(); 534 return {}; 535 } 536 537 CallInst *IRBuilderBase::CreateThreadLocalAddress(Value *Ptr) { 538 #ifndef NDEBUG 539 // Handle specially for constexpr cast. This is possible when 540 // opaque pointers not enabled since constant could be sinked 541 // directly by the design of llvm. This could be eliminated 542 // after we eliminate the abuse of constexpr. 543 auto *V = Ptr; 544 if (auto *CE = dyn_cast<ConstantExpr>(V)) 545 if (CE->isCast()) 546 V = CE->getOperand(0); 547 548 assert(isa<GlobalValue>(V) && cast<GlobalValue>(V)->isThreadLocal() && 549 "threadlocal_address only applies to thread local variables."); 550 #endif 551 CallInst *CI = CreateIntrinsic(llvm::Intrinsic::threadlocal_address, 552 {Ptr->getType()}, {Ptr}); 553 if (MaybeAlign A = getAlign(Ptr)) { 554 CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), *A)); 555 CI->addRetAttr(Attribute::getWithAlignment(CI->getContext(), *A)); 556 } 557 return CI; 558 } 559 560 CallInst * 561 IRBuilderBase::CreateAssumption(Value *Cond, 562 ArrayRef<OperandBundleDef> OpBundles) { 563 assert(Cond->getType() == getInt1Ty() && 564 "an assumption condition must be of type i1"); 565 566 Value *Ops[] = { Cond }; 567 Module *M = BB->getParent()->getParent(); 568 Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume); 569 return CreateCall(FnAssume, Ops, OpBundles); 570 } 571 572 Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) { 573 Module *M = BB->getModule(); 574 auto *FnIntrinsic = Intrinsic::getDeclaration( 575 M, Intrinsic::experimental_noalias_scope_decl, {}); 576 return CreateCall(FnIntrinsic, {Scope}); 577 } 578 579 /// Create a call to a Masked Load intrinsic. 580 /// \p Ty - vector type to load 581 /// \p Ptr - base pointer for the load 582 /// \p Alignment - alignment of the source location 583 /// \p Mask - vector of booleans which indicates what vector lanes should 584 /// be accessed in memory 585 /// \p PassThru - pass-through value that is used to fill the masked-off lanes 586 /// of the result 587 /// \p Name - name of the result variable 588 CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, 589 Value *Mask, Value *PassThru, 590 const Twine &Name) { 591 auto *PtrTy = cast<PointerType>(Ptr->getType()); 592 assert(Ty->isVectorTy() && "Type should be vector"); 593 assert(PtrTy->isOpaqueOrPointeeTypeMatches(Ty) && "Wrong element type"); 594 assert(Mask && "Mask should not be all-ones (null)"); 595 if (!PassThru) 596 PassThru = PoisonValue::get(Ty); 597 Type *OverloadedTypes[] = { Ty, PtrTy }; 598 Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru}; 599 return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops, 600 OverloadedTypes, Name); 601 } 602 603 /// Create a call to a Masked Store intrinsic. 604 /// \p Val - data to be stored, 605 /// \p Ptr - base pointer for the store 606 /// \p Alignment - alignment of the destination location 607 /// \p Mask - vector of booleans which indicates what vector lanes should 608 /// be accessed in memory 609 CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr, 610 Align Alignment, Value *Mask) { 611 auto *PtrTy = cast<PointerType>(Ptr->getType()); 612 Type *DataTy = Val->getType(); 613 assert(DataTy->isVectorTy() && "Val should be a vector"); 614 assert(PtrTy->isOpaqueOrPointeeTypeMatches(DataTy) && "Wrong element type"); 615 assert(Mask && "Mask should not be all-ones (null)"); 616 Type *OverloadedTypes[] = { DataTy, PtrTy }; 617 Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask}; 618 return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes); 619 } 620 621 /// Create a call to a Masked intrinsic, with given intrinsic Id, 622 /// an array of operands - Ops, and an array of overloaded types - 623 /// OverloadedTypes. 624 CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id, 625 ArrayRef<Value *> Ops, 626 ArrayRef<Type *> OverloadedTypes, 627 const Twine &Name) { 628 Module *M = BB->getParent()->getParent(); 629 Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes); 630 return CreateCall(TheFn, Ops, {}, Name); 631 } 632 633 /// Create a call to a Masked Gather intrinsic. 634 /// \p Ty - vector type to gather 635 /// \p Ptrs - vector of pointers for loading 636 /// \p Align - alignment for one element 637 /// \p Mask - vector of booleans which indicates what vector lanes should 638 /// be accessed in memory 639 /// \p PassThru - pass-through value that is used to fill the masked-off lanes 640 /// of the result 641 /// \p Name - name of the result variable 642 CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs, 643 Align Alignment, Value *Mask, 644 Value *PassThru, 645 const Twine &Name) { 646 auto *VecTy = cast<VectorType>(Ty); 647 ElementCount NumElts = VecTy->getElementCount(); 648 auto *PtrsTy = cast<VectorType>(Ptrs->getType()); 649 assert(cast<PointerType>(PtrsTy->getElementType()) 650 ->isOpaqueOrPointeeTypeMatches( 651 cast<VectorType>(Ty)->getElementType()) && 652 "Element type mismatch"); 653 assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch"); 654 655 if (!Mask) 656 Mask = Constant::getAllOnesValue( 657 VectorType::get(Type::getInt1Ty(Context), NumElts)); 658 659 if (!PassThru) 660 PassThru = PoisonValue::get(Ty); 661 662 Type *OverloadedTypes[] = {Ty, PtrsTy}; 663 Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru}; 664 665 // We specify only one type when we create this intrinsic. Types of other 666 // arguments are derived from this type. 667 return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes, 668 Name); 669 } 670 671 /// Create a call to a Masked Scatter intrinsic. 672 /// \p Data - data to be stored, 673 /// \p Ptrs - the vector of pointers, where the \p Data elements should be 674 /// stored 675 /// \p Align - alignment for one element 676 /// \p Mask - vector of booleans which indicates what vector lanes should 677 /// be accessed in memory 678 CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs, 679 Align Alignment, Value *Mask) { 680 auto *PtrsTy = cast<VectorType>(Ptrs->getType()); 681 auto *DataTy = cast<VectorType>(Data->getType()); 682 ElementCount NumElts = PtrsTy->getElementCount(); 683 684 #ifndef NDEBUG 685 auto *PtrTy = cast<PointerType>(PtrsTy->getElementType()); 686 assert(NumElts == DataTy->getElementCount() && 687 PtrTy->isOpaqueOrPointeeTypeMatches(DataTy->getElementType()) && 688 "Incompatible pointer and data types"); 689 #endif 690 691 if (!Mask) 692 Mask = Constant::getAllOnesValue( 693 VectorType::get(Type::getInt1Ty(Context), NumElts)); 694 695 Type *OverloadedTypes[] = {DataTy, PtrsTy}; 696 Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask}; 697 698 // We specify only one type when we create this intrinsic. Types of other 699 // arguments are derived from this type. 700 return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes); 701 } 702 703 /// Create a call to Masked Expand Load intrinsic 704 /// \p Ty - vector type to load 705 /// \p Ptr - base pointer for the load 706 /// \p Mask - vector of booleans which indicates what vector lanes should 707 /// be accessed in memory 708 /// \p PassThru - pass-through value that is used to fill the masked-off lanes 709 /// of the result 710 /// \p Name - name of the result variable 711 CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr, 712 Value *Mask, Value *PassThru, 713 const Twine &Name) { 714 auto *PtrTy = cast<PointerType>(Ptr->getType()); 715 assert(Ty->isVectorTy() && "Type should be vector"); 716 assert(PtrTy->isOpaqueOrPointeeTypeMatches( 717 cast<FixedVectorType>(Ty)->getElementType()) && 718 "Wrong element type"); 719 (void)PtrTy; 720 assert(Mask && "Mask should not be all-ones (null)"); 721 if (!PassThru) 722 PassThru = PoisonValue::get(Ty); 723 Type *OverloadedTypes[] = {Ty}; 724 Value *Ops[] = {Ptr, Mask, PassThru}; 725 return CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops, 726 OverloadedTypes, Name); 727 } 728 729 /// Create a call to Masked Compress Store intrinsic 730 /// \p Val - data to be stored, 731 /// \p Ptr - base pointer for the store 732 /// \p Mask - vector of booleans which indicates what vector lanes should 733 /// be accessed in memory 734 CallInst *IRBuilderBase::CreateMaskedCompressStore(Value *Val, Value *Ptr, 735 Value *Mask) { 736 auto *PtrTy = cast<PointerType>(Ptr->getType()); 737 Type *DataTy = Val->getType(); 738 assert(DataTy->isVectorTy() && "Val should be a vector"); 739 assert(PtrTy->isOpaqueOrPointeeTypeMatches( 740 cast<FixedVectorType>(DataTy)->getElementType()) && 741 "Wrong element type"); 742 (void)PtrTy; 743 assert(Mask && "Mask should not be all-ones (null)"); 744 Type *OverloadedTypes[] = {DataTy}; 745 Value *Ops[] = {Val, Ptr, Mask}; 746 return CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops, 747 OverloadedTypes); 748 } 749 750 template <typename T0> 751 static std::vector<Value *> 752 getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes, 753 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) { 754 std::vector<Value *> Args; 755 Args.push_back(B.getInt64(ID)); 756 Args.push_back(B.getInt32(NumPatchBytes)); 757 Args.push_back(ActualCallee); 758 Args.push_back(B.getInt32(CallArgs.size())); 759 Args.push_back(B.getInt32(Flags)); 760 llvm::append_range(Args, CallArgs); 761 // GC Transition and Deopt args are now always handled via operand bundle. 762 // They will be removed from the signature of gc.statepoint shortly. 763 Args.push_back(B.getInt32(0)); 764 Args.push_back(B.getInt32(0)); 765 // GC args are now encoded in the gc-live operand bundle 766 return Args; 767 } 768 769 template<typename T1, typename T2, typename T3> 770 static std::vector<OperandBundleDef> 771 getStatepointBundles(std::optional<ArrayRef<T1>> TransitionArgs, 772 std::optional<ArrayRef<T2>> DeoptArgs, 773 ArrayRef<T3> GCArgs) { 774 std::vector<OperandBundleDef> Rval; 775 if (DeoptArgs) { 776 SmallVector<Value*, 16> DeoptValues; 777 llvm::append_range(DeoptValues, *DeoptArgs); 778 Rval.emplace_back("deopt", DeoptValues); 779 } 780 if (TransitionArgs) { 781 SmallVector<Value*, 16> TransitionValues; 782 llvm::append_range(TransitionValues, *TransitionArgs); 783 Rval.emplace_back("gc-transition", TransitionValues); 784 } 785 if (GCArgs.size()) { 786 SmallVector<Value*, 16> LiveValues; 787 llvm::append_range(LiveValues, GCArgs); 788 Rval.emplace_back("gc-live", LiveValues); 789 } 790 return Rval; 791 } 792 793 template <typename T0, typename T1, typename T2, typename T3> 794 static CallInst *CreateGCStatepointCallCommon( 795 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, 796 FunctionCallee ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs, 797 std::optional<ArrayRef<T1>> TransitionArgs, 798 std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs, 799 const Twine &Name) { 800 Module *M = Builder->GetInsertBlock()->getParent()->getParent(); 801 // Fill in the one generic type'd argument (the function is also vararg) 802 Function *FnStatepoint = 803 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint, 804 {ActualCallee.getCallee()->getType()}); 805 806 std::vector<Value *> Args = getStatepointArgs( 807 *Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs); 808 809 CallInst *CI = Builder->CreateCall( 810 FnStatepoint, Args, 811 getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name); 812 CI->addParamAttr(2, 813 Attribute::get(Builder->getContext(), Attribute::ElementType, 814 ActualCallee.getFunctionType())); 815 return CI; 816 } 817 818 CallInst *IRBuilderBase::CreateGCStatepointCall( 819 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, 820 ArrayRef<Value *> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs, 821 ArrayRef<Value *> GCArgs, const Twine &Name) { 822 return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>( 823 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None), 824 CallArgs, std::nullopt /* No Transition Args */, DeoptArgs, GCArgs, Name); 825 } 826 827 CallInst *IRBuilderBase::CreateGCStatepointCall( 828 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, 829 uint32_t Flags, ArrayRef<Value *> CallArgs, 830 std::optional<ArrayRef<Use>> TransitionArgs, 831 std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, 832 const Twine &Name) { 833 return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>( 834 this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs, 835 DeoptArgs, GCArgs, Name); 836 } 837 838 CallInst *IRBuilderBase::CreateGCStatepointCall( 839 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, 840 ArrayRef<Use> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs, 841 ArrayRef<Value *> GCArgs, const Twine &Name) { 842 return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>( 843 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None), 844 CallArgs, std::nullopt, DeoptArgs, GCArgs, Name); 845 } 846 847 template <typename T0, typename T1, typename T2, typename T3> 848 static InvokeInst *CreateGCStatepointInvokeCommon( 849 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, 850 FunctionCallee ActualInvokee, BasicBlock *NormalDest, 851 BasicBlock *UnwindDest, uint32_t Flags, ArrayRef<T0> InvokeArgs, 852 std::optional<ArrayRef<T1>> TransitionArgs, 853 std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs, 854 const Twine &Name) { 855 Module *M = Builder->GetInsertBlock()->getParent()->getParent(); 856 // Fill in the one generic type'd argument (the function is also vararg) 857 Function *FnStatepoint = 858 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint, 859 {ActualInvokee.getCallee()->getType()}); 860 861 std::vector<Value *> Args = 862 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(), 863 Flags, InvokeArgs); 864 865 InvokeInst *II = Builder->CreateInvoke( 866 FnStatepoint, NormalDest, UnwindDest, Args, 867 getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name); 868 II->addParamAttr(2, 869 Attribute::get(Builder->getContext(), Attribute::ElementType, 870 ActualInvokee.getFunctionType())); 871 return II; 872 } 873 874 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 875 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, 876 BasicBlock *NormalDest, BasicBlock *UnwindDest, 877 ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Value *>> DeoptArgs, 878 ArrayRef<Value *> GCArgs, const Twine &Name) { 879 return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>( 880 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, 881 uint32_t(StatepointFlags::None), InvokeArgs, 882 std::nullopt /* No Transition Args*/, DeoptArgs, GCArgs, Name); 883 } 884 885 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 886 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, 887 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, 888 ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Use>> TransitionArgs, 889 std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, 890 const Twine &Name) { 891 return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>( 892 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags, 893 InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name); 894 } 895 896 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 897 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, 898 BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, 899 std::optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, 900 const Twine &Name) { 901 return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>( 902 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, 903 uint32_t(StatepointFlags::None), InvokeArgs, std::nullopt, DeoptArgs, 904 GCArgs, Name); 905 } 906 907 CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint, 908 Type *ResultType, const Twine &Name) { 909 Intrinsic::ID ID = Intrinsic::experimental_gc_result; 910 Module *M = BB->getParent()->getParent(); 911 Type *Types[] = {ResultType}; 912 Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types); 913 914 Value *Args[] = {Statepoint}; 915 return CreateCall(FnGCResult, Args, {}, Name); 916 } 917 918 CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint, 919 int BaseOffset, int DerivedOffset, 920 Type *ResultType, const Twine &Name) { 921 Module *M = BB->getParent()->getParent(); 922 Type *Types[] = {ResultType}; 923 Function *FnGCRelocate = 924 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types); 925 926 Value *Args[] = {Statepoint, getInt32(BaseOffset), getInt32(DerivedOffset)}; 927 return CreateCall(FnGCRelocate, Args, {}, Name); 928 } 929 930 CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr, 931 const Twine &Name) { 932 Module *M = BB->getParent()->getParent(); 933 Type *PtrTy = DerivedPtr->getType(); 934 Function *FnGCFindBase = Intrinsic::getDeclaration( 935 M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy}); 936 return CreateCall(FnGCFindBase, {DerivedPtr}, {}, Name); 937 } 938 939 CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr, 940 const Twine &Name) { 941 Module *M = BB->getParent()->getParent(); 942 Type *PtrTy = DerivedPtr->getType(); 943 Function *FnGCGetOffset = Intrinsic::getDeclaration( 944 M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy}); 945 return CreateCall(FnGCGetOffset, {DerivedPtr}, {}, Name); 946 } 947 948 CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, 949 Instruction *FMFSource, 950 const Twine &Name) { 951 Module *M = BB->getModule(); 952 Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()}); 953 return createCallHelper(Fn, {V}, Name, FMFSource); 954 } 955 956 CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, 957 Value *RHS, 958 Instruction *FMFSource, 959 const Twine &Name) { 960 Module *M = BB->getModule(); 961 Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() }); 962 return createCallHelper(Fn, {LHS, RHS}, Name, FMFSource); 963 } 964 965 CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID, 966 ArrayRef<Type *> Types, 967 ArrayRef<Value *> Args, 968 Instruction *FMFSource, 969 const Twine &Name) { 970 Module *M = BB->getModule(); 971 Function *Fn = Intrinsic::getDeclaration(M, ID, Types); 972 return createCallHelper(Fn, Args, Name, FMFSource); 973 } 974 975 CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID, 976 ArrayRef<Value *> Args, 977 Instruction *FMFSource, 978 const Twine &Name) { 979 Module *M = BB->getModule(); 980 981 SmallVector<Intrinsic::IITDescriptor> Table; 982 Intrinsic::getIntrinsicInfoTableEntries(ID, Table); 983 ArrayRef<Intrinsic::IITDescriptor> TableRef(Table); 984 985 SmallVector<Type *> ArgTys; 986 ArgTys.reserve(Args.size()); 987 for (auto &I : Args) 988 ArgTys.push_back(I->getType()); 989 FunctionType *FTy = FunctionType::get(RetTy, ArgTys, false); 990 SmallVector<Type *> OverloadTys; 991 Intrinsic::MatchIntrinsicTypesResult Res = 992 matchIntrinsicSignature(FTy, TableRef, OverloadTys); 993 (void)Res; 994 assert(Res == Intrinsic::MatchIntrinsicTypes_Match && TableRef.empty() && 995 "Wrong types for intrinsic!"); 996 // TODO: Handle varargs intrinsics. 997 998 Function *Fn = Intrinsic::getDeclaration(M, ID, OverloadTys); 999 return createCallHelper(Fn, Args, Name, FMFSource); 1000 } 1001 1002 CallInst *IRBuilderBase::CreateConstrainedFPBinOp( 1003 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource, 1004 const Twine &Name, MDNode *FPMathTag, 1005 std::optional<RoundingMode> Rounding, 1006 std::optional<fp::ExceptionBehavior> Except) { 1007 Value *RoundingV = getConstrainedFPRounding(Rounding); 1008 Value *ExceptV = getConstrainedFPExcept(Except); 1009 1010 FastMathFlags UseFMF = FMF; 1011 if (FMFSource) 1012 UseFMF = FMFSource->getFastMathFlags(); 1013 1014 CallInst *C = CreateIntrinsic(ID, {L->getType()}, 1015 {L, R, RoundingV, ExceptV}, nullptr, Name); 1016 setConstrainedFPCallAttr(C); 1017 setFPAttrs(C, FPMathTag, UseFMF); 1018 return C; 1019 } 1020 1021 Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, 1022 const Twine &Name, MDNode *FPMathTag) { 1023 if (Instruction::isBinaryOp(Opc)) { 1024 assert(Ops.size() == 2 && "Invalid number of operands!"); 1025 return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc), 1026 Ops[0], Ops[1], Name, FPMathTag); 1027 } 1028 if (Instruction::isUnaryOp(Opc)) { 1029 assert(Ops.size() == 1 && "Invalid number of operands!"); 1030 return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc), 1031 Ops[0], Name, FPMathTag); 1032 } 1033 llvm_unreachable("Unexpected opcode!"); 1034 } 1035 1036 CallInst *IRBuilderBase::CreateConstrainedFPCast( 1037 Intrinsic::ID ID, Value *V, Type *DestTy, 1038 Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag, 1039 std::optional<RoundingMode> Rounding, 1040 std::optional<fp::ExceptionBehavior> Except) { 1041 Value *ExceptV = getConstrainedFPExcept(Except); 1042 1043 FastMathFlags UseFMF = FMF; 1044 if (FMFSource) 1045 UseFMF = FMFSource->getFastMathFlags(); 1046 1047 CallInst *C; 1048 bool HasRoundingMD = false; 1049 switch (ID) { 1050 default: 1051 break; 1052 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 1053 case Intrinsic::INTRINSIC: \ 1054 HasRoundingMD = ROUND_MODE; \ 1055 break; 1056 #include "llvm/IR/ConstrainedOps.def" 1057 } 1058 if (HasRoundingMD) { 1059 Value *RoundingV = getConstrainedFPRounding(Rounding); 1060 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV}, 1061 nullptr, Name); 1062 } else 1063 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr, 1064 Name); 1065 1066 setConstrainedFPCallAttr(C); 1067 1068 if (isa<FPMathOperator>(C)) 1069 setFPAttrs(C, FPMathTag, UseFMF); 1070 return C; 1071 } 1072 1073 Value *IRBuilderBase::CreateFCmpHelper( 1074 CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name, 1075 MDNode *FPMathTag, bool IsSignaling) { 1076 if (IsFPConstrained) { 1077 auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps 1078 : Intrinsic::experimental_constrained_fcmp; 1079 return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name); 1080 } 1081 1082 if (auto *LC = dyn_cast<Constant>(LHS)) 1083 if (auto *RC = dyn_cast<Constant>(RHS)) 1084 return Insert(Folder.CreateFCmp(P, LC, RC), Name); 1085 return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name); 1086 } 1087 1088 CallInst *IRBuilderBase::CreateConstrainedFPCmp( 1089 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, 1090 const Twine &Name, std::optional<fp::ExceptionBehavior> Except) { 1091 Value *PredicateV = getConstrainedFPPredicate(P); 1092 Value *ExceptV = getConstrainedFPExcept(Except); 1093 1094 CallInst *C = CreateIntrinsic(ID, {L->getType()}, 1095 {L, R, PredicateV, ExceptV}, nullptr, Name); 1096 setConstrainedFPCallAttr(C); 1097 return C; 1098 } 1099 1100 CallInst *IRBuilderBase::CreateConstrainedFPCall( 1101 Function *Callee, ArrayRef<Value *> Args, const Twine &Name, 1102 std::optional<RoundingMode> Rounding, 1103 std::optional<fp::ExceptionBehavior> Except) { 1104 llvm::SmallVector<Value *, 6> UseArgs; 1105 1106 append_range(UseArgs, Args); 1107 bool HasRoundingMD = false; 1108 switch (Callee->getIntrinsicID()) { 1109 default: 1110 break; 1111 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 1112 case Intrinsic::INTRINSIC: \ 1113 HasRoundingMD = ROUND_MODE; \ 1114 break; 1115 #include "llvm/IR/ConstrainedOps.def" 1116 } 1117 if (HasRoundingMD) 1118 UseArgs.push_back(getConstrainedFPRounding(Rounding)); 1119 UseArgs.push_back(getConstrainedFPExcept(Except)); 1120 1121 CallInst *C = CreateCall(Callee, UseArgs, Name); 1122 setConstrainedFPCallAttr(C); 1123 return C; 1124 } 1125 1126 Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False, 1127 const Twine &Name, Instruction *MDFrom) { 1128 if (auto *V = Folder.FoldSelect(C, True, False)) 1129 return V; 1130 1131 SelectInst *Sel = SelectInst::Create(C, True, False); 1132 if (MDFrom) { 1133 MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof); 1134 MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable); 1135 Sel = addBranchMetadata(Sel, Prof, Unpred); 1136 } 1137 if (isa<FPMathOperator>(Sel)) 1138 setFPAttrs(Sel, nullptr /* MDNode* */, FMF); 1139 return Insert(Sel, Name); 1140 } 1141 1142 Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, 1143 const Twine &Name) { 1144 assert(LHS->getType() == RHS->getType() && 1145 "Pointer subtraction operand types must match!"); 1146 assert(cast<PointerType>(LHS->getType()) 1147 ->isOpaqueOrPointeeTypeMatches(ElemTy) && 1148 "Pointer type must match element type"); 1149 Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context)); 1150 Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context)); 1151 Value *Difference = CreateSub(LHS_int, RHS_int); 1152 return CreateExactSDiv(Difference, ConstantExpr::getSizeOf(ElemTy), 1153 Name); 1154 } 1155 1156 Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) { 1157 assert(isa<PointerType>(Ptr->getType()) && 1158 "launder.invariant.group only applies to pointers."); 1159 // FIXME: we could potentially avoid casts to/from i8*. 1160 auto *PtrType = Ptr->getType(); 1161 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace()); 1162 if (PtrType != Int8PtrTy) 1163 Ptr = CreateBitCast(Ptr, Int8PtrTy); 1164 Module *M = BB->getParent()->getParent(); 1165 Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration( 1166 M, Intrinsic::launder_invariant_group, {Int8PtrTy}); 1167 1168 assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy && 1169 FnLaunderInvariantGroup->getFunctionType()->getParamType(0) == 1170 Int8PtrTy && 1171 "LaunderInvariantGroup should take and return the same type"); 1172 1173 CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr}); 1174 1175 if (PtrType != Int8PtrTy) 1176 return CreateBitCast(Fn, PtrType); 1177 return Fn; 1178 } 1179 1180 Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) { 1181 assert(isa<PointerType>(Ptr->getType()) && 1182 "strip.invariant.group only applies to pointers."); 1183 1184 // FIXME: we could potentially avoid casts to/from i8*. 1185 auto *PtrType = Ptr->getType(); 1186 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace()); 1187 if (PtrType != Int8PtrTy) 1188 Ptr = CreateBitCast(Ptr, Int8PtrTy); 1189 Module *M = BB->getParent()->getParent(); 1190 Function *FnStripInvariantGroup = Intrinsic::getDeclaration( 1191 M, Intrinsic::strip_invariant_group, {Int8PtrTy}); 1192 1193 assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy && 1194 FnStripInvariantGroup->getFunctionType()->getParamType(0) == 1195 Int8PtrTy && 1196 "StripInvariantGroup should take and return the same type"); 1197 1198 CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr}); 1199 1200 if (PtrType != Int8PtrTy) 1201 return CreateBitCast(Fn, PtrType); 1202 return Fn; 1203 } 1204 1205 Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) { 1206 auto *Ty = cast<VectorType>(V->getType()); 1207 if (isa<ScalableVectorType>(Ty)) { 1208 Module *M = BB->getParent()->getParent(); 1209 Function *F = Intrinsic::getDeclaration( 1210 M, Intrinsic::experimental_vector_reverse, Ty); 1211 return Insert(CallInst::Create(F, V), Name); 1212 } 1213 // Keep the original behaviour for fixed vector 1214 SmallVector<int, 8> ShuffleMask; 1215 int NumElts = Ty->getElementCount().getKnownMinValue(); 1216 for (int i = 0; i < NumElts; ++i) 1217 ShuffleMask.push_back(NumElts - i - 1); 1218 return CreateShuffleVector(V, ShuffleMask, Name); 1219 } 1220 1221 Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, 1222 const Twine &Name) { 1223 assert(isa<VectorType>(V1->getType()) && "Unexpected type"); 1224 assert(V1->getType() == V2->getType() && 1225 "Splice expects matching operand types!"); 1226 1227 if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) { 1228 Module *M = BB->getParent()->getParent(); 1229 Function *F = Intrinsic::getDeclaration( 1230 M, Intrinsic::experimental_vector_splice, VTy); 1231 1232 Value *Ops[] = {V1, V2, getInt32(Imm)}; 1233 return Insert(CallInst::Create(F, Ops), Name); 1234 } 1235 1236 unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements(); 1237 assert(((-Imm <= NumElts) || (Imm < NumElts)) && 1238 "Invalid immediate for vector splice!"); 1239 1240 // Keep the original behaviour for fixed vector 1241 unsigned Idx = (NumElts + Imm) % NumElts; 1242 SmallVector<int, 8> Mask; 1243 for (unsigned I = 0; I < NumElts; ++I) 1244 Mask.push_back(Idx + I); 1245 1246 return CreateShuffleVector(V1, V2, Mask); 1247 } 1248 1249 Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V, 1250 const Twine &Name) { 1251 auto EC = ElementCount::getFixed(NumElts); 1252 return CreateVectorSplat(EC, V, Name); 1253 } 1254 1255 Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V, 1256 const Twine &Name) { 1257 assert(EC.isNonZero() && "Cannot splat to an empty vector!"); 1258 1259 // First insert it into a poison vector so we can shuffle it. 1260 Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC)); 1261 V = CreateInsertElement(Poison, V, getInt64(0), Name + ".splatinsert"); 1262 1263 // Shuffle the value across the desired number of elements. 1264 SmallVector<int, 16> Zeros; 1265 Zeros.resize(EC.getKnownMinValue()); 1266 return CreateShuffleVector(V, Zeros, Name + ".splat"); 1267 } 1268 1269 Value *IRBuilderBase::CreateExtractInteger( 1270 const DataLayout &DL, Value *From, IntegerType *ExtractedTy, 1271 uint64_t Offset, const Twine &Name) { 1272 auto *IntTy = cast<IntegerType>(From->getType()); 1273 assert(DL.getTypeStoreSize(ExtractedTy) + Offset <= 1274 DL.getTypeStoreSize(IntTy) && 1275 "Element extends past full value"); 1276 uint64_t ShAmt = 8 * Offset; 1277 Value *V = From; 1278 if (DL.isBigEndian()) 1279 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - 1280 DL.getTypeStoreSize(ExtractedTy) - Offset); 1281 if (ShAmt) { 1282 V = CreateLShr(V, ShAmt, Name + ".shift"); 1283 } 1284 assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() && 1285 "Cannot extract to a larger integer!"); 1286 if (ExtractedTy != IntTy) { 1287 V = CreateTrunc(V, ExtractedTy, Name + ".trunc"); 1288 } 1289 return V; 1290 } 1291 1292 Value *IRBuilderBase::CreatePreserveArrayAccessIndex( 1293 Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex, 1294 MDNode *DbgInfo) { 1295 auto *BaseType = Base->getType(); 1296 assert(isa<PointerType>(BaseType) && 1297 "Invalid Base ptr type for preserve.array.access.index."); 1298 assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) && 1299 "Pointer element type mismatch"); 1300 1301 Value *LastIndexV = getInt32(LastIndex); 1302 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); 1303 SmallVector<Value *, 4> IdxList(Dimension, Zero); 1304 IdxList.push_back(LastIndexV); 1305 1306 Type *ResultType = 1307 GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList); 1308 1309 Module *M = BB->getParent()->getParent(); 1310 Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration( 1311 M, Intrinsic::preserve_array_access_index, {ResultType, BaseType}); 1312 1313 Value *DimV = getInt32(Dimension); 1314 CallInst *Fn = 1315 CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV}); 1316 Fn->addParamAttr( 1317 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy)); 1318 if (DbgInfo) 1319 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1320 1321 return Fn; 1322 } 1323 1324 Value *IRBuilderBase::CreatePreserveUnionAccessIndex( 1325 Value *Base, unsigned FieldIndex, MDNode *DbgInfo) { 1326 assert(isa<PointerType>(Base->getType()) && 1327 "Invalid Base ptr type for preserve.union.access.index."); 1328 auto *BaseType = Base->getType(); 1329 1330 Module *M = BB->getParent()->getParent(); 1331 Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration( 1332 M, Intrinsic::preserve_union_access_index, {BaseType, BaseType}); 1333 1334 Value *DIIndex = getInt32(FieldIndex); 1335 CallInst *Fn = 1336 CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex}); 1337 if (DbgInfo) 1338 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1339 1340 return Fn; 1341 } 1342 1343 Value *IRBuilderBase::CreatePreserveStructAccessIndex( 1344 Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex, 1345 MDNode *DbgInfo) { 1346 auto *BaseType = Base->getType(); 1347 assert(isa<PointerType>(BaseType) && 1348 "Invalid Base ptr type for preserve.struct.access.index."); 1349 assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) && 1350 "Pointer element type mismatch"); 1351 1352 Value *GEPIndex = getInt32(Index); 1353 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); 1354 Type *ResultType = 1355 GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex}); 1356 1357 Module *M = BB->getParent()->getParent(); 1358 Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration( 1359 M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType}); 1360 1361 Value *DIIndex = getInt32(FieldIndex); 1362 CallInst *Fn = CreateCall(FnPreserveStructAccessIndex, 1363 {Base, GEPIndex, DIIndex}); 1364 Fn->addParamAttr( 1365 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy)); 1366 if (DbgInfo) 1367 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1368 1369 return Fn; 1370 } 1371 1372 CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL, 1373 Value *PtrValue, 1374 Value *AlignValue, 1375 Value *OffsetValue) { 1376 SmallVector<Value *, 4> Vals({PtrValue, AlignValue}); 1377 if (OffsetValue) 1378 Vals.push_back(OffsetValue); 1379 OperandBundleDefT<Value *> AlignOpB("align", Vals); 1380 return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB}); 1381 } 1382 1383 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, 1384 Value *PtrValue, 1385 unsigned Alignment, 1386 Value *OffsetValue) { 1387 assert(isa<PointerType>(PtrValue->getType()) && 1388 "trying to create an alignment assumption on a non-pointer?"); 1389 assert(Alignment != 0 && "Invalid Alignment"); 1390 auto *PtrTy = cast<PointerType>(PtrValue->getType()); 1391 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace()); 1392 Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment); 1393 return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue); 1394 } 1395 1396 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, 1397 Value *PtrValue, 1398 Value *Alignment, 1399 Value *OffsetValue) { 1400 assert(isa<PointerType>(PtrValue->getType()) && 1401 "trying to create an alignment assumption on a non-pointer?"); 1402 return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue); 1403 } 1404 1405 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default; 1406 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default; 1407 IRBuilderFolder::~IRBuilderFolder() = default; 1408 void ConstantFolder::anchor() {} 1409 void NoFolder::anchor() {} 1410