1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the IRBuilder class, which is used as a convenient way 10 // to create LLVM instructions with a consistent and simplified interface. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/IR/IRBuilder.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/IR/Constant.h" 18 #include "llvm/IR/Constants.h" 19 #include "llvm/IR/DerivedTypes.h" 20 #include "llvm/IR/Function.h" 21 #include "llvm/IR/GlobalValue.h" 22 #include "llvm/IR/GlobalVariable.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/IR/Intrinsics.h" 25 #include "llvm/IR/LLVMContext.h" 26 #include "llvm/IR/NoFolder.h" 27 #include "llvm/IR/Operator.h" 28 #include "llvm/IR/Statepoint.h" 29 #include "llvm/IR/Type.h" 30 #include "llvm/IR/Value.h" 31 #include "llvm/Support/Casting.h" 32 #include "llvm/Support/MathExtras.h" 33 #include <cassert> 34 #include <cstdint> 35 #include <vector> 36 37 using namespace llvm; 38 39 /// CreateGlobalString - Make a new global variable with an initializer that 40 /// has array of i8 type filled in with the nul terminated string value 41 /// specified. If Name is specified, it is the name of the global variable 42 /// created. 43 GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str, 44 const Twine &Name, 45 unsigned AddressSpace, 46 Module *M) { 47 Constant *StrConstant = ConstantDataArray::getString(Context, Str); 48 if (!M) 49 M = BB->getParent()->getParent(); 50 auto *GV = new GlobalVariable( 51 *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage, 52 StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace); 53 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 54 GV->setAlignment(Align(1)); 55 return GV; 56 } 57 58 Type *IRBuilderBase::getCurrentFunctionReturnType() const { 59 assert(BB && BB->getParent() && "No current function!"); 60 return BB->getParent()->getReturnType(); 61 } 62 63 Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) { 64 auto *PT = cast<PointerType>(Ptr->getType()); 65 if (PT->getElementType()->isIntegerTy(8)) 66 return Ptr; 67 68 // Otherwise, we need to insert a bitcast. 69 return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace())); 70 } 71 72 static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops, 73 IRBuilderBase *Builder, 74 const Twine &Name = "", 75 Instruction *FMFSource = nullptr, 76 ArrayRef<OperandBundleDef> OpBundles = {}) { 77 CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name); 78 if (FMFSource) 79 CI->copyFastMathFlags(FMFSource); 80 return CI; 81 } 82 83 Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) { 84 Module *M = GetInsertBlock()->getParent()->getParent(); 85 assert(isa<ConstantInt>(Scaling) && "Expected constant integer"); 86 Function *TheFn = 87 Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()}); 88 CallInst *CI = createCallHelper(TheFn, {}, this, Name); 89 return cast<ConstantInt>(Scaling)->getSExtValue() == 1 90 ? CI 91 : CreateMul(CI, Scaling); 92 } 93 94 CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size, 95 MaybeAlign Align, bool isVolatile, 96 MDNode *TBAATag, MDNode *ScopeTag, 97 MDNode *NoAliasTag) { 98 Ptr = getCastedInt8PtrValue(Ptr); 99 Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)}; 100 Type *Tys[] = { Ptr->getType(), Size->getType() }; 101 Module *M = BB->getParent()->getParent(); 102 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); 103 104 CallInst *CI = createCallHelper(TheFn, Ops, this); 105 106 if (Align) 107 cast<MemSetInst>(CI)->setDestAlignment(Align->value()); 108 109 // Set the TBAA info if present. 110 if (TBAATag) 111 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 112 113 if (ScopeTag) 114 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 115 116 if (NoAliasTag) 117 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 118 119 return CI; 120 } 121 122 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet( 123 Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize, 124 MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) { 125 126 Ptr = getCastedInt8PtrValue(Ptr); 127 Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)}; 128 Type *Tys[] = {Ptr->getType(), Size->getType()}; 129 Module *M = BB->getParent()->getParent(); 130 Function *TheFn = Intrinsic::getDeclaration( 131 M, Intrinsic::memset_element_unordered_atomic, Tys); 132 133 CallInst *CI = createCallHelper(TheFn, Ops, this); 134 135 cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment); 136 137 // Set the TBAA info if present. 138 if (TBAATag) 139 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 140 141 if (ScopeTag) 142 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 143 144 if (NoAliasTag) 145 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 146 147 return CI; 148 } 149 150 CallInst *IRBuilderBase::CreateMemTransferInst( 151 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, 152 MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag, 153 MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) { 154 Dst = getCastedInt8PtrValue(Dst); 155 Src = getCastedInt8PtrValue(Src); 156 157 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)}; 158 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() }; 159 Module *M = BB->getParent()->getParent(); 160 Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys); 161 162 CallInst *CI = createCallHelper(TheFn, Ops, this); 163 164 auto* MCI = cast<MemTransferInst>(CI); 165 if (DstAlign) 166 MCI->setDestAlignment(*DstAlign); 167 if (SrcAlign) 168 MCI->setSourceAlignment(*SrcAlign); 169 170 // Set the TBAA info if present. 171 if (TBAATag) 172 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 173 174 // Set the TBAA Struct info if present. 175 if (TBAAStructTag) 176 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 177 178 if (ScopeTag) 179 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 180 181 if (NoAliasTag) 182 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 183 184 return CI; 185 } 186 187 CallInst *IRBuilderBase::CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, 188 Value *Src, MaybeAlign SrcAlign, 189 Value *Size) { 190 Dst = getCastedInt8PtrValue(Dst); 191 Src = getCastedInt8PtrValue(Src); 192 Value *IsVolatile = getInt1(false); 193 194 Value *Ops[] = {Dst, Src, Size, IsVolatile}; 195 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 196 Function *F = BB->getParent(); 197 Module *M = F->getParent(); 198 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys); 199 200 CallInst *CI = createCallHelper(TheFn, Ops, this); 201 202 auto *MCI = cast<MemCpyInlineInst>(CI); 203 if (DstAlign) 204 MCI->setDestAlignment(*DstAlign); 205 if (SrcAlign) 206 MCI->setSourceAlignment(*SrcAlign); 207 208 return CI; 209 } 210 211 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy( 212 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, 213 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag, 214 MDNode *ScopeTag, MDNode *NoAliasTag) { 215 assert(DstAlign >= ElementSize && 216 "Pointer alignment must be at least element size"); 217 assert(SrcAlign >= ElementSize && 218 "Pointer alignment must be at least element size"); 219 Dst = getCastedInt8PtrValue(Dst); 220 Src = getCastedInt8PtrValue(Src); 221 222 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)}; 223 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 224 Module *M = BB->getParent()->getParent(); 225 Function *TheFn = Intrinsic::getDeclaration( 226 M, Intrinsic::memcpy_element_unordered_atomic, Tys); 227 228 CallInst *CI = createCallHelper(TheFn, Ops, this); 229 230 // Set the alignment of the pointer args. 231 auto *AMCI = cast<AtomicMemCpyInst>(CI); 232 AMCI->setDestAlignment(DstAlign); 233 AMCI->setSourceAlignment(SrcAlign); 234 235 // Set the TBAA info if present. 236 if (TBAATag) 237 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 238 239 // Set the TBAA Struct info if present. 240 if (TBAAStructTag) 241 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 242 243 if (ScopeTag) 244 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 245 246 if (NoAliasTag) 247 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 248 249 return CI; 250 } 251 252 CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign, 253 Value *Src, MaybeAlign SrcAlign, 254 Value *Size, bool isVolatile, 255 MDNode *TBAATag, MDNode *ScopeTag, 256 MDNode *NoAliasTag) { 257 Dst = getCastedInt8PtrValue(Dst); 258 Src = getCastedInt8PtrValue(Src); 259 260 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)}; 261 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() }; 262 Module *M = BB->getParent()->getParent(); 263 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys); 264 265 CallInst *CI = createCallHelper(TheFn, Ops, this); 266 267 auto *MMI = cast<MemMoveInst>(CI); 268 if (DstAlign) 269 MMI->setDestAlignment(*DstAlign); 270 if (SrcAlign) 271 MMI->setSourceAlignment(*SrcAlign); 272 273 // Set the TBAA info if present. 274 if (TBAATag) 275 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 276 277 if (ScopeTag) 278 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 279 280 if (NoAliasTag) 281 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 282 283 return CI; 284 } 285 286 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove( 287 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, 288 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag, 289 MDNode *ScopeTag, MDNode *NoAliasTag) { 290 assert(DstAlign >= ElementSize && 291 "Pointer alignment must be at least element size"); 292 assert(SrcAlign >= ElementSize && 293 "Pointer alignment must be at least element size"); 294 Dst = getCastedInt8PtrValue(Dst); 295 Src = getCastedInt8PtrValue(Src); 296 297 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)}; 298 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; 299 Module *M = BB->getParent()->getParent(); 300 Function *TheFn = Intrinsic::getDeclaration( 301 M, Intrinsic::memmove_element_unordered_atomic, Tys); 302 303 CallInst *CI = createCallHelper(TheFn, Ops, this); 304 305 // Set the alignment of the pointer args. 306 CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign)); 307 CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign)); 308 309 // Set the TBAA info if present. 310 if (TBAATag) 311 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); 312 313 // Set the TBAA Struct info if present. 314 if (TBAAStructTag) 315 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); 316 317 if (ScopeTag) 318 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); 319 320 if (NoAliasTag) 321 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); 322 323 return CI; 324 } 325 326 static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID, 327 Value *Src) { 328 Module *M = Builder->GetInsertBlock()->getParent()->getParent(); 329 Value *Ops[] = {Src}; 330 Type *Tys[] = { Src->getType() }; 331 auto Decl = Intrinsic::getDeclaration(M, ID, Tys); 332 return createCallHelper(Decl, Ops, Builder); 333 } 334 335 CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) { 336 Module *M = GetInsertBlock()->getParent()->getParent(); 337 Value *Ops[] = {Acc, Src}; 338 auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd, 339 {Src->getType()}); 340 return createCallHelper(Decl, Ops, this); 341 } 342 343 CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) { 344 Module *M = GetInsertBlock()->getParent()->getParent(); 345 Value *Ops[] = {Acc, Src}; 346 auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul, 347 {Src->getType()}); 348 return createCallHelper(Decl, Ops, this); 349 } 350 351 CallInst *IRBuilderBase::CreateAddReduce(Value *Src) { 352 return getReductionIntrinsic(this, Intrinsic::vector_reduce_add, Src); 353 } 354 355 CallInst *IRBuilderBase::CreateMulReduce(Value *Src) { 356 return getReductionIntrinsic(this, Intrinsic::vector_reduce_mul, Src); 357 } 358 359 CallInst *IRBuilderBase::CreateAndReduce(Value *Src) { 360 return getReductionIntrinsic(this, Intrinsic::vector_reduce_and, Src); 361 } 362 363 CallInst *IRBuilderBase::CreateOrReduce(Value *Src) { 364 return getReductionIntrinsic(this, Intrinsic::vector_reduce_or, Src); 365 } 366 367 CallInst *IRBuilderBase::CreateXorReduce(Value *Src) { 368 return getReductionIntrinsic(this, Intrinsic::vector_reduce_xor, Src); 369 } 370 371 CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) { 372 auto ID = 373 IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax; 374 return getReductionIntrinsic(this, ID, Src); 375 } 376 377 CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) { 378 auto ID = 379 IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin; 380 return getReductionIntrinsic(this, ID, Src); 381 } 382 383 CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) { 384 return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmax, Src); 385 } 386 387 CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) { 388 return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmin, Src); 389 } 390 391 CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) { 392 assert(isa<PointerType>(Ptr->getType()) && 393 "lifetime.start only applies to pointers."); 394 Ptr = getCastedInt8PtrValue(Ptr); 395 if (!Size) 396 Size = getInt64(-1); 397 else 398 assert(Size->getType() == getInt64Ty() && 399 "lifetime.start requires the size to be an i64"); 400 Value *Ops[] = { Size, Ptr }; 401 Module *M = BB->getParent()->getParent(); 402 Function *TheFn = 403 Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()}); 404 return createCallHelper(TheFn, Ops, this); 405 } 406 407 CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) { 408 assert(isa<PointerType>(Ptr->getType()) && 409 "lifetime.end only applies to pointers."); 410 Ptr = getCastedInt8PtrValue(Ptr); 411 if (!Size) 412 Size = getInt64(-1); 413 else 414 assert(Size->getType() == getInt64Ty() && 415 "lifetime.end requires the size to be an i64"); 416 Value *Ops[] = { Size, Ptr }; 417 Module *M = BB->getParent()->getParent(); 418 Function *TheFn = 419 Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()}); 420 return createCallHelper(TheFn, Ops, this); 421 } 422 423 CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) { 424 425 assert(isa<PointerType>(Ptr->getType()) && 426 "invariant.start only applies to pointers."); 427 Ptr = getCastedInt8PtrValue(Ptr); 428 if (!Size) 429 Size = getInt64(-1); 430 else 431 assert(Size->getType() == getInt64Ty() && 432 "invariant.start requires the size to be an i64"); 433 434 Value *Ops[] = {Size, Ptr}; 435 // Fill in the single overloaded type: memory object type. 436 Type *ObjectPtr[1] = {Ptr->getType()}; 437 Module *M = BB->getParent()->getParent(); 438 Function *TheFn = 439 Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr); 440 return createCallHelper(TheFn, Ops, this); 441 } 442 443 CallInst * 444 IRBuilderBase::CreateAssumption(Value *Cond, 445 ArrayRef<OperandBundleDef> OpBundles) { 446 assert(Cond->getType() == getInt1Ty() && 447 "an assumption condition must be of type i1"); 448 449 Value *Ops[] = { Cond }; 450 Module *M = BB->getParent()->getParent(); 451 Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume); 452 return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles); 453 } 454 455 Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) { 456 Module *M = BB->getModule(); 457 auto *FnIntrinsic = Intrinsic::getDeclaration( 458 M, Intrinsic::experimental_noalias_scope_decl, {}); 459 return createCallHelper(FnIntrinsic, {Scope}, this); 460 } 461 462 /// Create a call to a Masked Load intrinsic. 463 /// \p Ptr - base pointer for the load 464 /// \p Alignment - alignment of the source location 465 /// \p Mask - vector of booleans which indicates what vector lanes should 466 /// be accessed in memory 467 /// \p PassThru - pass-through value that is used to fill the masked-off lanes 468 /// of the result 469 /// \p Name - name of the result variable 470 CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment, 471 Value *Mask, Value *PassThru, 472 const Twine &Name) { 473 auto *PtrTy = cast<PointerType>(Ptr->getType()); 474 Type *DataTy = PtrTy->getElementType(); 475 assert(DataTy->isVectorTy() && "Ptr should point to a vector"); 476 assert(Mask && "Mask should not be all-ones (null)"); 477 if (!PassThru) 478 PassThru = UndefValue::get(DataTy); 479 Type *OverloadedTypes[] = { DataTy, PtrTy }; 480 Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru}; 481 return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops, 482 OverloadedTypes, Name); 483 } 484 485 /// Create a call to a Masked Store intrinsic. 486 /// \p Val - data to be stored, 487 /// \p Ptr - base pointer for the store 488 /// \p Alignment - alignment of the destination location 489 /// \p Mask - vector of booleans which indicates what vector lanes should 490 /// be accessed in memory 491 CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr, 492 Align Alignment, Value *Mask) { 493 auto *PtrTy = cast<PointerType>(Ptr->getType()); 494 Type *DataTy = PtrTy->getElementType(); 495 assert(DataTy->isVectorTy() && "Ptr should point to a vector"); 496 assert(Mask && "Mask should not be all-ones (null)"); 497 Type *OverloadedTypes[] = { DataTy, PtrTy }; 498 Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask}; 499 return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes); 500 } 501 502 /// Create a call to a Masked intrinsic, with given intrinsic Id, 503 /// an array of operands - Ops, and an array of overloaded types - 504 /// OverloadedTypes. 505 CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id, 506 ArrayRef<Value *> Ops, 507 ArrayRef<Type *> OverloadedTypes, 508 const Twine &Name) { 509 Module *M = BB->getParent()->getParent(); 510 Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes); 511 return createCallHelper(TheFn, Ops, this, Name); 512 } 513 514 /// Create a call to a Masked Gather intrinsic. 515 /// \p Ptrs - vector of pointers for loading 516 /// \p Align - alignment for one element 517 /// \p Mask - vector of booleans which indicates what vector lanes should 518 /// be accessed in memory 519 /// \p PassThru - pass-through value that is used to fill the masked-off lanes 520 /// of the result 521 /// \p Name - name of the result variable 522 CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, Align Alignment, 523 Value *Mask, Value *PassThru, 524 const Twine &Name) { 525 auto *PtrsTy = cast<FixedVectorType>(Ptrs->getType()); 526 auto *PtrTy = cast<PointerType>(PtrsTy->getElementType()); 527 unsigned NumElts = PtrsTy->getNumElements(); 528 auto *DataTy = FixedVectorType::get(PtrTy->getElementType(), NumElts); 529 530 if (!Mask) 531 Mask = Constant::getAllOnesValue( 532 FixedVectorType::get(Type::getInt1Ty(Context), NumElts)); 533 534 if (!PassThru) 535 PassThru = UndefValue::get(DataTy); 536 537 Type *OverloadedTypes[] = {DataTy, PtrsTy}; 538 Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru}; 539 540 // We specify only one type when we create this intrinsic. Types of other 541 // arguments are derived from this type. 542 return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes, 543 Name); 544 } 545 546 /// Create a call to a Masked Scatter intrinsic. 547 /// \p Data - data to be stored, 548 /// \p Ptrs - the vector of pointers, where the \p Data elements should be 549 /// stored 550 /// \p Align - alignment for one element 551 /// \p Mask - vector of booleans which indicates what vector lanes should 552 /// be accessed in memory 553 CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs, 554 Align Alignment, Value *Mask) { 555 auto *PtrsTy = cast<FixedVectorType>(Ptrs->getType()); 556 auto *DataTy = cast<FixedVectorType>(Data->getType()); 557 unsigned NumElts = PtrsTy->getNumElements(); 558 559 #ifndef NDEBUG 560 auto PtrTy = cast<PointerType>(PtrsTy->getElementType()); 561 assert(NumElts == DataTy->getNumElements() && 562 PtrTy->getElementType() == DataTy->getElementType() && 563 "Incompatible pointer and data types"); 564 #endif 565 566 if (!Mask) 567 Mask = Constant::getAllOnesValue( 568 FixedVectorType::get(Type::getInt1Ty(Context), NumElts)); 569 570 Type *OverloadedTypes[] = {DataTy, PtrsTy}; 571 Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask}; 572 573 // We specify only one type when we create this intrinsic. Types of other 574 // arguments are derived from this type. 575 return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes); 576 } 577 578 template <typename T0> 579 static std::vector<Value *> 580 getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes, 581 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) { 582 std::vector<Value *> Args; 583 Args.push_back(B.getInt64(ID)); 584 Args.push_back(B.getInt32(NumPatchBytes)); 585 Args.push_back(ActualCallee); 586 Args.push_back(B.getInt32(CallArgs.size())); 587 Args.push_back(B.getInt32(Flags)); 588 llvm::append_range(Args, CallArgs); 589 // GC Transition and Deopt args are now always handled via operand bundle. 590 // They will be removed from the signature of gc.statepoint shortly. 591 Args.push_back(B.getInt32(0)); 592 Args.push_back(B.getInt32(0)); 593 // GC args are now encoded in the gc-live operand bundle 594 return Args; 595 } 596 597 template<typename T1, typename T2, typename T3> 598 static std::vector<OperandBundleDef> 599 getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs, 600 Optional<ArrayRef<T2>> DeoptArgs, 601 ArrayRef<T3> GCArgs) { 602 std::vector<OperandBundleDef> Rval; 603 if (DeoptArgs) { 604 SmallVector<Value*, 16> DeoptValues; 605 llvm::append_range(DeoptValues, *DeoptArgs); 606 Rval.emplace_back("deopt", DeoptValues); 607 } 608 if (TransitionArgs) { 609 SmallVector<Value*, 16> TransitionValues; 610 llvm::append_range(TransitionValues, *TransitionArgs); 611 Rval.emplace_back("gc-transition", TransitionValues); 612 } 613 if (GCArgs.size()) { 614 SmallVector<Value*, 16> LiveValues; 615 llvm::append_range(LiveValues, GCArgs); 616 Rval.emplace_back("gc-live", LiveValues); 617 } 618 return Rval; 619 } 620 621 template <typename T0, typename T1, typename T2, typename T3> 622 static CallInst *CreateGCStatepointCallCommon( 623 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, 624 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs, 625 Optional<ArrayRef<T1>> TransitionArgs, 626 Optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs, 627 const Twine &Name) { 628 // Extract out the type of the callee. 629 auto *FuncPtrType = cast<PointerType>(ActualCallee->getType()); 630 assert(isa<FunctionType>(FuncPtrType->getElementType()) && 631 "actual callee must be a callable value"); 632 633 Module *M = Builder->GetInsertBlock()->getParent()->getParent(); 634 // Fill in the one generic type'd argument (the function is also vararg) 635 Type *ArgTypes[] = { FuncPtrType }; 636 Function *FnStatepoint = 637 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint, 638 ArgTypes); 639 640 std::vector<Value *> Args = 641 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualCallee, Flags, 642 CallArgs); 643 644 return Builder->CreateCall(FnStatepoint, Args, 645 getStatepointBundles(TransitionArgs, DeoptArgs, 646 GCArgs), 647 Name); 648 } 649 650 CallInst *IRBuilderBase::CreateGCStatepointCall( 651 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, 652 ArrayRef<Value *> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs, 653 ArrayRef<Value *> GCArgs, const Twine &Name) { 654 return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>( 655 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None), 656 CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name); 657 } 658 659 CallInst *IRBuilderBase::CreateGCStatepointCall( 660 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags, 661 ArrayRef<Value *> CallArgs, Optional<ArrayRef<Use>> TransitionArgs, 662 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, 663 const Twine &Name) { 664 return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>( 665 this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs, 666 DeoptArgs, GCArgs, Name); 667 } 668 669 CallInst *IRBuilderBase::CreateGCStatepointCall( 670 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, 671 ArrayRef<Use> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs, 672 ArrayRef<Value *> GCArgs, const Twine &Name) { 673 return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>( 674 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None), 675 CallArgs, None, DeoptArgs, GCArgs, Name); 676 } 677 678 template <typename T0, typename T1, typename T2, typename T3> 679 static InvokeInst *CreateGCStatepointInvokeCommon( 680 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, 681 Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, 682 uint32_t Flags, ArrayRef<T0> InvokeArgs, 683 Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs, 684 ArrayRef<T3> GCArgs, const Twine &Name) { 685 // Extract out the type of the callee. 686 auto *FuncPtrType = cast<PointerType>(ActualInvokee->getType()); 687 assert(isa<FunctionType>(FuncPtrType->getElementType()) && 688 "actual callee must be a callable value"); 689 690 Module *M = Builder->GetInsertBlock()->getParent()->getParent(); 691 // Fill in the one generic type'd argument (the function is also vararg) 692 Function *FnStatepoint = Intrinsic::getDeclaration( 693 M, Intrinsic::experimental_gc_statepoint, {FuncPtrType}); 694 695 std::vector<Value *> Args = 696 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee, Flags, 697 InvokeArgs); 698 699 return Builder->CreateInvoke(FnStatepoint, NormalDest, UnwindDest, Args, 700 getStatepointBundles(TransitionArgs, DeoptArgs, 701 GCArgs), 702 Name); 703 } 704 705 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 706 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee, 707 BasicBlock *NormalDest, BasicBlock *UnwindDest, 708 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Value *>> DeoptArgs, 709 ArrayRef<Value *> GCArgs, const Twine &Name) { 710 return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>( 711 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, 712 uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/, 713 DeoptArgs, GCArgs, Name); 714 } 715 716 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 717 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee, 718 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, 719 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs, 720 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) { 721 return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>( 722 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags, 723 InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name); 724 } 725 726 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( 727 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee, 728 BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, 729 Optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) { 730 return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>( 731 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, 732 uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs, 733 Name); 734 } 735 736 CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint, 737 Type *ResultType, 738 const Twine &Name) { 739 Intrinsic::ID ID = Intrinsic::experimental_gc_result; 740 Module *M = BB->getParent()->getParent(); 741 Type *Types[] = {ResultType}; 742 Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types); 743 744 Value *Args[] = {Statepoint}; 745 return createCallHelper(FnGCResult, Args, this, Name); 746 } 747 748 CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint, 749 int BaseOffset, 750 int DerivedOffset, 751 Type *ResultType, 752 const Twine &Name) { 753 Module *M = BB->getParent()->getParent(); 754 Type *Types[] = {ResultType}; 755 Function *FnGCRelocate = 756 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types); 757 758 Value *Args[] = {Statepoint, 759 getInt32(BaseOffset), 760 getInt32(DerivedOffset)}; 761 return createCallHelper(FnGCRelocate, Args, this, Name); 762 } 763 764 CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, 765 Instruction *FMFSource, 766 const Twine &Name) { 767 Module *M = BB->getModule(); 768 Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()}); 769 return createCallHelper(Fn, {V}, this, Name, FMFSource); 770 } 771 772 CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, 773 Value *RHS, 774 Instruction *FMFSource, 775 const Twine &Name) { 776 Module *M = BB->getModule(); 777 Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() }); 778 return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource); 779 } 780 781 CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID, 782 ArrayRef<Type *> Types, 783 ArrayRef<Value *> Args, 784 Instruction *FMFSource, 785 const Twine &Name) { 786 Module *M = BB->getModule(); 787 Function *Fn = Intrinsic::getDeclaration(M, ID, Types); 788 return createCallHelper(Fn, Args, this, Name, FMFSource); 789 } 790 791 CallInst *IRBuilderBase::CreateConstrainedFPBinOp( 792 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource, 793 const Twine &Name, MDNode *FPMathTag, 794 Optional<RoundingMode> Rounding, 795 Optional<fp::ExceptionBehavior> Except) { 796 Value *RoundingV = getConstrainedFPRounding(Rounding); 797 Value *ExceptV = getConstrainedFPExcept(Except); 798 799 FastMathFlags UseFMF = FMF; 800 if (FMFSource) 801 UseFMF = FMFSource->getFastMathFlags(); 802 803 CallInst *C = CreateIntrinsic(ID, {L->getType()}, 804 {L, R, RoundingV, ExceptV}, nullptr, Name); 805 setConstrainedFPCallAttr(C); 806 setFPAttrs(C, FPMathTag, UseFMF); 807 return C; 808 } 809 810 Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, 811 const Twine &Name, MDNode *FPMathTag) { 812 if (Instruction::isBinaryOp(Opc)) { 813 assert(Ops.size() == 2 && "Invalid number of operands!"); 814 return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc), 815 Ops[0], Ops[1], Name, FPMathTag); 816 } 817 if (Instruction::isUnaryOp(Opc)) { 818 assert(Ops.size() == 1 && "Invalid number of operands!"); 819 return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc), 820 Ops[0], Name, FPMathTag); 821 } 822 llvm_unreachable("Unexpected opcode!"); 823 } 824 825 CallInst *IRBuilderBase::CreateConstrainedFPCast( 826 Intrinsic::ID ID, Value *V, Type *DestTy, 827 Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag, 828 Optional<RoundingMode> Rounding, 829 Optional<fp::ExceptionBehavior> Except) { 830 Value *ExceptV = getConstrainedFPExcept(Except); 831 832 FastMathFlags UseFMF = FMF; 833 if (FMFSource) 834 UseFMF = FMFSource->getFastMathFlags(); 835 836 CallInst *C; 837 bool HasRoundingMD = false; 838 switch (ID) { 839 default: 840 break; 841 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 842 case Intrinsic::INTRINSIC: \ 843 HasRoundingMD = ROUND_MODE; \ 844 break; 845 #include "llvm/IR/ConstrainedOps.def" 846 } 847 if (HasRoundingMD) { 848 Value *RoundingV = getConstrainedFPRounding(Rounding); 849 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV}, 850 nullptr, Name); 851 } else 852 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr, 853 Name); 854 855 setConstrainedFPCallAttr(C); 856 857 if (isa<FPMathOperator>(C)) 858 setFPAttrs(C, FPMathTag, UseFMF); 859 return C; 860 } 861 862 Value *IRBuilderBase::CreateFCmpHelper( 863 CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name, 864 MDNode *FPMathTag, bool IsSignaling) { 865 if (IsFPConstrained) { 866 auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps 867 : Intrinsic::experimental_constrained_fcmp; 868 return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name); 869 } 870 871 if (auto *LC = dyn_cast<Constant>(LHS)) 872 if (auto *RC = dyn_cast<Constant>(RHS)) 873 return Insert(Folder.CreateFCmp(P, LC, RC), Name); 874 return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name); 875 } 876 877 CallInst *IRBuilderBase::CreateConstrainedFPCmp( 878 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, 879 const Twine &Name, Optional<fp::ExceptionBehavior> Except) { 880 Value *PredicateV = getConstrainedFPPredicate(P); 881 Value *ExceptV = getConstrainedFPExcept(Except); 882 883 CallInst *C = CreateIntrinsic(ID, {L->getType()}, 884 {L, R, PredicateV, ExceptV}, nullptr, Name); 885 setConstrainedFPCallAttr(C); 886 return C; 887 } 888 889 CallInst *IRBuilderBase::CreateConstrainedFPCall( 890 Function *Callee, ArrayRef<Value *> Args, const Twine &Name, 891 Optional<RoundingMode> Rounding, 892 Optional<fp::ExceptionBehavior> Except) { 893 llvm::SmallVector<Value *, 6> UseArgs; 894 895 for (auto *OneArg : Args) 896 UseArgs.push_back(OneArg); 897 bool HasRoundingMD = false; 898 switch (Callee->getIntrinsicID()) { 899 default: 900 break; 901 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 902 case Intrinsic::INTRINSIC: \ 903 HasRoundingMD = ROUND_MODE; \ 904 break; 905 #include "llvm/IR/ConstrainedOps.def" 906 } 907 if (HasRoundingMD) 908 UseArgs.push_back(getConstrainedFPRounding(Rounding)); 909 UseArgs.push_back(getConstrainedFPExcept(Except)); 910 911 CallInst *C = CreateCall(Callee, UseArgs, Name); 912 setConstrainedFPCallAttr(C); 913 return C; 914 } 915 916 Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False, 917 const Twine &Name, Instruction *MDFrom) { 918 if (auto *CC = dyn_cast<Constant>(C)) 919 if (auto *TC = dyn_cast<Constant>(True)) 920 if (auto *FC = dyn_cast<Constant>(False)) 921 return Insert(Folder.CreateSelect(CC, TC, FC), Name); 922 923 SelectInst *Sel = SelectInst::Create(C, True, False); 924 if (MDFrom) { 925 MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof); 926 MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable); 927 Sel = addBranchMetadata(Sel, Prof, Unpred); 928 } 929 if (isa<FPMathOperator>(Sel)) 930 setFPAttrs(Sel, nullptr /* MDNode* */, FMF); 931 return Insert(Sel, Name); 932 } 933 934 Value *IRBuilderBase::CreatePtrDiff(Value *LHS, Value *RHS, 935 const Twine &Name) { 936 assert(LHS->getType() == RHS->getType() && 937 "Pointer subtraction operand types must match!"); 938 auto *ArgType = cast<PointerType>(LHS->getType()); 939 Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context)); 940 Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context)); 941 Value *Difference = CreateSub(LHS_int, RHS_int); 942 return CreateExactSDiv(Difference, 943 ConstantExpr::getSizeOf(ArgType->getElementType()), 944 Name); 945 } 946 947 Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) { 948 assert(isa<PointerType>(Ptr->getType()) && 949 "launder.invariant.group only applies to pointers."); 950 // FIXME: we could potentially avoid casts to/from i8*. 951 auto *PtrType = Ptr->getType(); 952 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace()); 953 if (PtrType != Int8PtrTy) 954 Ptr = CreateBitCast(Ptr, Int8PtrTy); 955 Module *M = BB->getParent()->getParent(); 956 Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration( 957 M, Intrinsic::launder_invariant_group, {Int8PtrTy}); 958 959 assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy && 960 FnLaunderInvariantGroup->getFunctionType()->getParamType(0) == 961 Int8PtrTy && 962 "LaunderInvariantGroup should take and return the same type"); 963 964 CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr}); 965 966 if (PtrType != Int8PtrTy) 967 return CreateBitCast(Fn, PtrType); 968 return Fn; 969 } 970 971 Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) { 972 assert(isa<PointerType>(Ptr->getType()) && 973 "strip.invariant.group only applies to pointers."); 974 975 // FIXME: we could potentially avoid casts to/from i8*. 976 auto *PtrType = Ptr->getType(); 977 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace()); 978 if (PtrType != Int8PtrTy) 979 Ptr = CreateBitCast(Ptr, Int8PtrTy); 980 Module *M = BB->getParent()->getParent(); 981 Function *FnStripInvariantGroup = Intrinsic::getDeclaration( 982 M, Intrinsic::strip_invariant_group, {Int8PtrTy}); 983 984 assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy && 985 FnStripInvariantGroup->getFunctionType()->getParamType(0) == 986 Int8PtrTy && 987 "StripInvariantGroup should take and return the same type"); 988 989 CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr}); 990 991 if (PtrType != Int8PtrTy) 992 return CreateBitCast(Fn, PtrType); 993 return Fn; 994 } 995 996 Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V, 997 const Twine &Name) { 998 auto EC = ElementCount::getFixed(NumElts); 999 return CreateVectorSplat(EC, V, Name); 1000 } 1001 1002 Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V, 1003 const Twine &Name) { 1004 assert(EC.isNonZero() && "Cannot splat to an empty vector!"); 1005 1006 // First insert it into a poison vector so we can shuffle it. 1007 Type *I32Ty = getInt32Ty(); 1008 Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC)); 1009 V = CreateInsertElement(Poison, V, ConstantInt::get(I32Ty, 0), 1010 Name + ".splatinsert"); 1011 1012 // Shuffle the value across the desired number of elements. 1013 SmallVector<int, 16> Zeros; 1014 Zeros.resize(EC.getKnownMinValue()); 1015 return CreateShuffleVector(V, Zeros, Name + ".splat"); 1016 } 1017 1018 Value *IRBuilderBase::CreateExtractInteger( 1019 const DataLayout &DL, Value *From, IntegerType *ExtractedTy, 1020 uint64_t Offset, const Twine &Name) { 1021 auto *IntTy = cast<IntegerType>(From->getType()); 1022 assert(DL.getTypeStoreSize(ExtractedTy) + Offset <= 1023 DL.getTypeStoreSize(IntTy) && 1024 "Element extends past full value"); 1025 uint64_t ShAmt = 8 * Offset; 1026 Value *V = From; 1027 if (DL.isBigEndian()) 1028 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - 1029 DL.getTypeStoreSize(ExtractedTy) - Offset); 1030 if (ShAmt) { 1031 V = CreateLShr(V, ShAmt, Name + ".shift"); 1032 } 1033 assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() && 1034 "Cannot extract to a larger integer!"); 1035 if (ExtractedTy != IntTy) { 1036 V = CreateTrunc(V, ExtractedTy, Name + ".trunc"); 1037 } 1038 return V; 1039 } 1040 1041 Value *IRBuilderBase::CreatePreserveArrayAccessIndex( 1042 Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex, 1043 MDNode *DbgInfo) { 1044 assert(isa<PointerType>(Base->getType()) && 1045 "Invalid Base ptr type for preserve.array.access.index."); 1046 auto *BaseType = Base->getType(); 1047 1048 Value *LastIndexV = getInt32(LastIndex); 1049 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); 1050 SmallVector<Value *, 4> IdxList(Dimension, Zero); 1051 IdxList.push_back(LastIndexV); 1052 1053 Type *ResultType = 1054 GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList); 1055 1056 Module *M = BB->getParent()->getParent(); 1057 Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration( 1058 M, Intrinsic::preserve_array_access_index, {ResultType, BaseType}); 1059 1060 Value *DimV = getInt32(Dimension); 1061 CallInst *Fn = 1062 CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV}); 1063 if (DbgInfo) 1064 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1065 1066 return Fn; 1067 } 1068 1069 Value *IRBuilderBase::CreatePreserveUnionAccessIndex( 1070 Value *Base, unsigned FieldIndex, MDNode *DbgInfo) { 1071 assert(isa<PointerType>(Base->getType()) && 1072 "Invalid Base ptr type for preserve.union.access.index."); 1073 auto *BaseType = Base->getType(); 1074 1075 Module *M = BB->getParent()->getParent(); 1076 Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration( 1077 M, Intrinsic::preserve_union_access_index, {BaseType, BaseType}); 1078 1079 Value *DIIndex = getInt32(FieldIndex); 1080 CallInst *Fn = 1081 CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex}); 1082 if (DbgInfo) 1083 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1084 1085 return Fn; 1086 } 1087 1088 Value *IRBuilderBase::CreatePreserveStructAccessIndex( 1089 Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex, 1090 MDNode *DbgInfo) { 1091 assert(isa<PointerType>(Base->getType()) && 1092 "Invalid Base ptr type for preserve.struct.access.index."); 1093 auto *BaseType = Base->getType(); 1094 1095 Value *GEPIndex = getInt32(Index); 1096 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); 1097 Type *ResultType = 1098 GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex}); 1099 1100 Module *M = BB->getParent()->getParent(); 1101 Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration( 1102 M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType}); 1103 1104 Value *DIIndex = getInt32(FieldIndex); 1105 CallInst *Fn = CreateCall(FnPreserveStructAccessIndex, 1106 {Base, GEPIndex, DIIndex}); 1107 if (DbgInfo) 1108 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); 1109 1110 return Fn; 1111 } 1112 1113 CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL, 1114 Value *PtrValue, 1115 Value *AlignValue, 1116 Value *OffsetValue) { 1117 SmallVector<Value *, 4> Vals({PtrValue, AlignValue}); 1118 if (OffsetValue) 1119 Vals.push_back(OffsetValue); 1120 OperandBundleDefT<Value *> AlignOpB("align", Vals); 1121 return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB}); 1122 } 1123 1124 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, 1125 Value *PtrValue, 1126 unsigned Alignment, 1127 Value *OffsetValue) { 1128 assert(isa<PointerType>(PtrValue->getType()) && 1129 "trying to create an alignment assumption on a non-pointer?"); 1130 assert(Alignment != 0 && "Invalid Alignment"); 1131 auto *PtrTy = cast<PointerType>(PtrValue->getType()); 1132 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace()); 1133 Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment); 1134 return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue); 1135 } 1136 1137 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, 1138 Value *PtrValue, 1139 Value *Alignment, 1140 Value *OffsetValue) { 1141 assert(isa<PointerType>(PtrValue->getType()) && 1142 "trying to create an alignment assumption on a non-pointer?"); 1143 return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue); 1144 } 1145 1146 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {} 1147 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() {} 1148 IRBuilderFolder::~IRBuilderFolder() {} 1149 void ConstantFolder::anchor() {} 1150 void NoFolder::anchor() {} 1151