1 //===- InstCombineCalls.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visitCall, visitInvoke, and visitCallBr functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/APFloat.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/APSInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/STLFunctionalExtras.h" 19 #include "llvm/ADT/SmallBitVector.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumeBundleQueries.h" 24 #include "llvm/Analysis/AssumptionCache.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/Loads.h" 27 #include "llvm/Analysis/MemoryBuiltins.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/Analysis/VectorUtils.h" 30 #include "llvm/IR/AttributeMask.h" 31 #include "llvm/IR/Attributes.h" 32 #include "llvm/IR/BasicBlock.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/DebugInfo.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/GlobalVariable.h" 40 #include "llvm/IR/InlineAsm.h" 41 #include "llvm/IR/InstrTypes.h" 42 #include "llvm/IR/Instruction.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/Intrinsics.h" 46 #include "llvm/IR/IntrinsicsAArch64.h" 47 #include "llvm/IR/IntrinsicsAMDGPU.h" 48 #include "llvm/IR/IntrinsicsARM.h" 49 #include "llvm/IR/IntrinsicsHexagon.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/PatternMatch.h" 53 #include "llvm/IR/Statepoint.h" 54 #include "llvm/IR/Type.h" 55 #include "llvm/IR/User.h" 56 #include "llvm/IR/Value.h" 57 #include "llvm/IR/ValueHandle.h" 58 #include "llvm/Support/AtomicOrdering.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Compiler.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/ErrorHandling.h" 64 #include "llvm/Support/KnownBits.h" 65 #include "llvm/Support/MathExtras.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Transforms/InstCombine/InstCombiner.h" 68 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 69 #include "llvm/Transforms/Utils/Local.h" 70 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 71 #include <algorithm> 72 #include <cassert> 73 #include <cstdint> 74 #include <optional> 75 #include <utility> 76 #include <vector> 77 78 #define DEBUG_TYPE "instcombine" 79 #include "llvm/Transforms/Utils/InstructionWorklist.h" 80 81 using namespace llvm; 82 using namespace PatternMatch; 83 84 STATISTIC(NumSimplified, "Number of library calls simplified"); 85 86 static cl::opt<unsigned> GuardWideningWindow( 87 "instcombine-guard-widening-window", 88 cl::init(3), 89 cl::desc("How wide an instruction window to bypass looking for " 90 "another guard")); 91 92 namespace llvm { 93 /// enable preservation of attributes in assume like: 94 /// call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ] 95 extern cl::opt<bool> EnableKnowledgeRetention; 96 } // namespace llvm 97 98 /// Return the specified type promoted as it would be to pass though a va_arg 99 /// area. 100 static Type *getPromotedType(Type *Ty) { 101 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 102 if (ITy->getBitWidth() < 32) 103 return Type::getInt32Ty(Ty->getContext()); 104 } 105 return Ty; 106 } 107 108 /// Recognize a memcpy/memmove from a trivially otherwise unused alloca. 109 /// TODO: This should probably be integrated with visitAllocSites, but that 110 /// requires a deeper change to allow either unread or unwritten objects. 111 static bool hasUndefSource(AnyMemTransferInst *MI) { 112 auto *Src = MI->getRawSource(); 113 while (isa<GetElementPtrInst>(Src) || isa<BitCastInst>(Src)) { 114 if (!Src->hasOneUse()) 115 return false; 116 Src = cast<Instruction>(Src)->getOperand(0); 117 } 118 return isa<AllocaInst>(Src) && Src->hasOneUse(); 119 } 120 121 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) { 122 Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT); 123 MaybeAlign CopyDstAlign = MI->getDestAlign(); 124 if (!CopyDstAlign || *CopyDstAlign < DstAlign) { 125 MI->setDestAlignment(DstAlign); 126 return MI; 127 } 128 129 Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT); 130 MaybeAlign CopySrcAlign = MI->getSourceAlign(); 131 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) { 132 MI->setSourceAlignment(SrcAlign); 133 return MI; 134 } 135 136 // If we have a store to a location which is known constant, we can conclude 137 // that the store must be storing the constant value (else the memory 138 // wouldn't be constant), and this must be a noop. 139 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) { 140 // Set the size of the copy to 0, it will be deleted on the next iteration. 141 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 142 return MI; 143 } 144 145 // If the source is provably undef, the memcpy/memmove doesn't do anything 146 // (unless the transfer is volatile). 147 if (hasUndefSource(MI) && !MI->isVolatile()) { 148 // Set the size of the copy to 0, it will be deleted on the next iteration. 149 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 150 return MI; 151 } 152 153 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 154 // load/store. 155 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength()); 156 if (!MemOpLength) return nullptr; 157 158 // Source and destination pointer types are always "i8*" for intrinsic. See 159 // if the size is something we can handle with a single primitive load/store. 160 // A single load+store correctly handles overlapping memory in the memmove 161 // case. 162 uint64_t Size = MemOpLength->getLimitedValue(); 163 assert(Size && "0-sized memory transferring should be removed already."); 164 165 if (Size > 8 || (Size&(Size-1))) 166 return nullptr; // If not 1/2/4/8 bytes, exit. 167 168 // If it is an atomic and alignment is less than the size then we will 169 // introduce the unaligned memory access which will be later transformed 170 // into libcall in CodeGen. This is not evident performance gain so disable 171 // it now. 172 if (isa<AtomicMemTransferInst>(MI)) 173 if (*CopyDstAlign < Size || *CopySrcAlign < Size) 174 return nullptr; 175 176 // Use an integer load+store unless we can find something better. 177 unsigned SrcAddrSp = 178 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 179 unsigned DstAddrSp = 180 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 181 182 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 183 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 184 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 185 186 // If the memcpy has metadata describing the members, see if we can get the 187 // TBAA tag describing our copy. 188 MDNode *CopyMD = nullptr; 189 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) { 190 CopyMD = M; 191 } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) { 192 if (M->getNumOperands() == 3 && M->getOperand(0) && 193 mdconst::hasa<ConstantInt>(M->getOperand(0)) && 194 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() && 195 M->getOperand(1) && 196 mdconst::hasa<ConstantInt>(M->getOperand(1)) && 197 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() == 198 Size && 199 M->getOperand(2) && isa<MDNode>(M->getOperand(2))) 200 CopyMD = cast<MDNode>(M->getOperand(2)); 201 } 202 203 Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 204 Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 205 LoadInst *L = Builder.CreateLoad(IntType, Src); 206 // Alignment from the mem intrinsic will be better, so use it. 207 L->setAlignment(*CopySrcAlign); 208 if (CopyMD) 209 L->setMetadata(LLVMContext::MD_tbaa, CopyMD); 210 MDNode *LoopMemParallelMD = 211 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 212 if (LoopMemParallelMD) 213 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 214 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group); 215 if (AccessGroupMD) 216 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 217 218 StoreInst *S = Builder.CreateStore(L, Dest); 219 // Alignment from the mem intrinsic will be better, so use it. 220 S->setAlignment(*CopyDstAlign); 221 if (CopyMD) 222 S->setMetadata(LLVMContext::MD_tbaa, CopyMD); 223 if (LoopMemParallelMD) 224 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 225 if (AccessGroupMD) 226 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 227 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID); 228 229 if (auto *MT = dyn_cast<MemTransferInst>(MI)) { 230 // non-atomics can be volatile 231 L->setVolatile(MT->isVolatile()); 232 S->setVolatile(MT->isVolatile()); 233 } 234 if (isa<AtomicMemTransferInst>(MI)) { 235 // atomics have to be unordered 236 L->setOrdering(AtomicOrdering::Unordered); 237 S->setOrdering(AtomicOrdering::Unordered); 238 } 239 240 // Set the size of the copy to 0, it will be deleted on the next iteration. 241 MI->setLength(Constant::getNullValue(MemOpLength->getType())); 242 return MI; 243 } 244 245 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) { 246 const Align KnownAlignment = 247 getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT); 248 MaybeAlign MemSetAlign = MI->getDestAlign(); 249 if (!MemSetAlign || *MemSetAlign < KnownAlignment) { 250 MI->setDestAlignment(KnownAlignment); 251 return MI; 252 } 253 254 // If we have a store to a location which is known constant, we can conclude 255 // that the store must be storing the constant value (else the memory 256 // wouldn't be constant), and this must be a noop. 257 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) { 258 // Set the size of the copy to 0, it will be deleted on the next iteration. 259 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 260 return MI; 261 } 262 263 // Remove memset with an undef value. 264 // FIXME: This is technically incorrect because it might overwrite a poison 265 // value. Change to PoisonValue once #52930 is resolved. 266 if (isa<UndefValue>(MI->getValue())) { 267 // Set the size of the copy to 0, it will be deleted on the next iteration. 268 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 269 return MI; 270 } 271 272 // Extract the length and alignment and fill if they are constant. 273 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 274 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 275 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 276 return nullptr; 277 const uint64_t Len = LenC->getLimitedValue(); 278 assert(Len && "0-sized memory setting should be removed already."); 279 const Align Alignment = MI->getDestAlign().valueOrOne(); 280 281 // If it is an atomic and alignment is less than the size then we will 282 // introduce the unaligned memory access which will be later transformed 283 // into libcall in CodeGen. This is not evident performance gain so disable 284 // it now. 285 if (isa<AtomicMemSetInst>(MI)) 286 if (Alignment < Len) 287 return nullptr; 288 289 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 290 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 291 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 292 293 Value *Dest = MI->getDest(); 294 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace(); 295 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp); 296 Dest = Builder.CreateBitCast(Dest, NewDstPtrTy); 297 298 // Extract the fill value and store. 299 const uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 300 Constant *FillVal = ConstantInt::get(ITy, Fill); 301 StoreInst *S = Builder.CreateStore(FillVal, Dest, MI->isVolatile()); 302 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID); 303 for (auto *DAI : at::getAssignmentMarkers(S)) { 304 if (any_of(DAI->location_ops(), [&](Value *V) { return V == FillC; })) 305 DAI->replaceVariableLocationOp(FillC, FillVal); 306 } 307 308 S->setAlignment(Alignment); 309 if (isa<AtomicMemSetInst>(MI)) 310 S->setOrdering(AtomicOrdering::Unordered); 311 312 // Set the size of the copy to 0, it will be deleted on the next iteration. 313 MI->setLength(Constant::getNullValue(LenC->getType())); 314 return MI; 315 } 316 317 return nullptr; 318 } 319 320 // TODO, Obvious Missing Transforms: 321 // * Narrow width by halfs excluding zero/undef lanes 322 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) { 323 Value *LoadPtr = II.getArgOperand(0); 324 const Align Alignment = 325 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 326 327 // If the mask is all ones or undefs, this is a plain vector load of the 1st 328 // argument. 329 if (maskIsAllOneOrUndef(II.getArgOperand(2))) { 330 LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 331 "unmaskedload"); 332 L->copyMetadata(II); 333 return L; 334 } 335 336 // If we can unconditionally load from this address, replace with a 337 // load/select idiom. TODO: use DT for context sensitive query 338 if (isDereferenceablePointer(LoadPtr, II.getType(), 339 II.getModule()->getDataLayout(), &II, &AC)) { 340 LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 341 "unmaskedload"); 342 LI->copyMetadata(II); 343 return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3)); 344 } 345 346 return nullptr; 347 } 348 349 // TODO, Obvious Missing Transforms: 350 // * Single constant active lane -> store 351 // * Narrow width by halfs excluding zero/undef lanes 352 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) { 353 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 354 if (!ConstMask) 355 return nullptr; 356 357 // If the mask is all zeros, this instruction does nothing. 358 if (ConstMask->isNullValue()) 359 return eraseInstFromFunction(II); 360 361 // If the mask is all ones, this is a plain vector store of the 1st argument. 362 if (ConstMask->isAllOnesValue()) { 363 Value *StorePtr = II.getArgOperand(1); 364 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 365 StoreInst *S = 366 new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); 367 S->copyMetadata(II); 368 return S; 369 } 370 371 if (isa<ScalableVectorType>(ConstMask->getType())) 372 return nullptr; 373 374 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 375 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 376 APInt UndefElts(DemandedElts.getBitWidth(), 0); 377 if (Value *V = 378 SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts)) 379 return replaceOperand(II, 0, V); 380 381 return nullptr; 382 } 383 384 // TODO, Obvious Missing Transforms: 385 // * Single constant active lane load -> load 386 // * Dereferenceable address & few lanes -> scalarize speculative load/selects 387 // * Adjacent vector addresses -> masked.load 388 // * Narrow width by halfs excluding zero/undef lanes 389 // * Vector incrementing address -> vector masked load 390 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) { 391 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2)); 392 if (!ConstMask) 393 return nullptr; 394 395 // Vector splat address w/known mask -> scalar load 396 // Fold the gather to load the source vector first lane 397 // because it is reloading the same value each time 398 if (ConstMask->isAllOnesValue()) 399 if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) { 400 auto *VecTy = cast<VectorType>(II.getType()); 401 const Align Alignment = 402 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 403 LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr, 404 Alignment, "load.scalar"); 405 Value *Shuf = 406 Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast"); 407 return replaceInstUsesWith(II, cast<Instruction>(Shuf)); 408 } 409 410 return nullptr; 411 } 412 413 // TODO, Obvious Missing Transforms: 414 // * Single constant active lane -> store 415 // * Adjacent vector addresses -> masked.store 416 // * Narrow store width by halfs excluding zero/undef lanes 417 // * Vector incrementing address -> vector masked store 418 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) { 419 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 420 if (!ConstMask) 421 return nullptr; 422 423 // If the mask is all zeros, a scatter does nothing. 424 if (ConstMask->isNullValue()) 425 return eraseInstFromFunction(II); 426 427 // Vector splat address -> scalar store 428 if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) { 429 // scatter(splat(value), splat(ptr), non-zero-mask) -> store value, ptr 430 if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) { 431 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 432 StoreInst *S = 433 new StoreInst(SplatValue, SplatPtr, /*IsVolatile=*/false, Alignment); 434 S->copyMetadata(II); 435 return S; 436 } 437 // scatter(vector, splat(ptr), splat(true)) -> store extract(vector, 438 // lastlane), ptr 439 if (ConstMask->isAllOnesValue()) { 440 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 441 VectorType *WideLoadTy = cast<VectorType>(II.getArgOperand(1)->getType()); 442 ElementCount VF = WideLoadTy->getElementCount(); 443 Value *RunTimeVF = Builder.CreateElementCount(Builder.getInt32Ty(), VF); 444 Value *LastLane = Builder.CreateSub(RunTimeVF, Builder.getInt32(1)); 445 Value *Extract = 446 Builder.CreateExtractElement(II.getArgOperand(0), LastLane); 447 StoreInst *S = 448 new StoreInst(Extract, SplatPtr, /*IsVolatile=*/false, Alignment); 449 S->copyMetadata(II); 450 return S; 451 } 452 } 453 if (isa<ScalableVectorType>(ConstMask->getType())) 454 return nullptr; 455 456 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 457 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 458 APInt UndefElts(DemandedElts.getBitWidth(), 0); 459 if (Value *V = 460 SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts)) 461 return replaceOperand(II, 0, V); 462 if (Value *V = 463 SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, UndefElts)) 464 return replaceOperand(II, 1, V); 465 466 return nullptr; 467 } 468 469 /// This function transforms launder.invariant.group and strip.invariant.group 470 /// like: 471 /// launder(launder(%x)) -> launder(%x) (the result is not the argument) 472 /// launder(strip(%x)) -> launder(%x) 473 /// strip(strip(%x)) -> strip(%x) (the result is not the argument) 474 /// strip(launder(%x)) -> strip(%x) 475 /// This is legal because it preserves the most recent information about 476 /// the presence or absence of invariant.group. 477 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II, 478 InstCombinerImpl &IC) { 479 auto *Arg = II.getArgOperand(0); 480 auto *StrippedArg = Arg->stripPointerCasts(); 481 auto *StrippedInvariantGroupsArg = StrippedArg; 482 while (auto *Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) { 483 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group && 484 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group) 485 break; 486 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts(); 487 } 488 if (StrippedArg == StrippedInvariantGroupsArg) 489 return nullptr; // No launders/strips to remove. 490 491 Value *Result = nullptr; 492 493 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group) 494 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg); 495 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group) 496 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg); 497 else 498 llvm_unreachable( 499 "simplifyInvariantGroupIntrinsic only handles launder and strip"); 500 if (Result->getType()->getPointerAddressSpace() != 501 II.getType()->getPointerAddressSpace()) 502 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType()); 503 if (Result->getType() != II.getType()) 504 Result = IC.Builder.CreateBitCast(Result, II.getType()); 505 506 return cast<Instruction>(Result); 507 } 508 509 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) { 510 assert((II.getIntrinsicID() == Intrinsic::cttz || 511 II.getIntrinsicID() == Intrinsic::ctlz) && 512 "Expected cttz or ctlz intrinsic"); 513 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz; 514 Value *Op0 = II.getArgOperand(0); 515 Value *Op1 = II.getArgOperand(1); 516 Value *X; 517 // ctlz(bitreverse(x)) -> cttz(x) 518 // cttz(bitreverse(x)) -> ctlz(x) 519 if (match(Op0, m_BitReverse(m_Value(X)))) { 520 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz; 521 Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType()); 522 return CallInst::Create(F, {X, II.getArgOperand(1)}); 523 } 524 525 if (II.getType()->isIntOrIntVectorTy(1)) { 526 // ctlz/cttz i1 Op0 --> not Op0 527 if (match(Op1, m_Zero())) 528 return BinaryOperator::CreateNot(Op0); 529 // If zero is poison, then the input can be assumed to be "true", so the 530 // instruction simplifies to "false". 531 assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1"); 532 return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(II.getType())); 533 } 534 535 if (IsTZ) { 536 // cttz(-x) -> cttz(x) 537 if (match(Op0, m_Neg(m_Value(X)))) 538 return IC.replaceOperand(II, 0, X); 539 540 // cttz(-x & x) -> cttz(x) 541 if (match(Op0, m_c_And(m_Neg(m_Value(X)), m_Deferred(X)))) 542 return IC.replaceOperand(II, 0, X); 543 544 // cttz(sext(x)) -> cttz(zext(x)) 545 if (match(Op0, m_OneUse(m_SExt(m_Value(X))))) { 546 auto *Zext = IC.Builder.CreateZExt(X, II.getType()); 547 auto *CttzZext = 548 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Zext, Op1); 549 return IC.replaceInstUsesWith(II, CttzZext); 550 } 551 552 // Zext doesn't change the number of trailing zeros, so narrow: 553 // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsPoison' parameter is 'true'. 554 if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) { 555 auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X, 556 IC.Builder.getTrue()); 557 auto *ZextCttz = IC.Builder.CreateZExt(Cttz, II.getType()); 558 return IC.replaceInstUsesWith(II, ZextCttz); 559 } 560 561 // cttz(abs(x)) -> cttz(x) 562 // cttz(nabs(x)) -> cttz(x) 563 Value *Y; 564 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor; 565 if (SPF == SPF_ABS || SPF == SPF_NABS) 566 return IC.replaceOperand(II, 0, X); 567 568 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X)))) 569 return IC.replaceOperand(II, 0, X); 570 } 571 572 KnownBits Known = IC.computeKnownBits(Op0, 0, &II); 573 574 // Create a mask for bits above (ctlz) or below (cttz) the first known one. 575 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros() 576 : Known.countMaxLeadingZeros(); 577 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros() 578 : Known.countMinLeadingZeros(); 579 580 // If all bits above (ctlz) or below (cttz) the first known one are known 581 // zero, this value is constant. 582 // FIXME: This should be in InstSimplify because we're replacing an 583 // instruction with a constant. 584 if (PossibleZeros == DefiniteZeros) { 585 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros); 586 return IC.replaceInstUsesWith(II, C); 587 } 588 589 // If the input to cttz/ctlz is known to be non-zero, 590 // then change the 'ZeroIsPoison' parameter to 'true' 591 // because we know the zero behavior can't affect the result. 592 if (!Known.One.isZero() || 593 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II, 594 &IC.getDominatorTree())) { 595 if (!match(II.getArgOperand(1), m_One())) 596 return IC.replaceOperand(II, 1, IC.Builder.getTrue()); 597 } 598 599 // Add range metadata since known bits can't completely reflect what we know. 600 auto *IT = cast<IntegerType>(Op0->getType()->getScalarType()); 601 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 602 Metadata *LowAndHigh[] = { 603 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)), 604 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))}; 605 II.setMetadata(LLVMContext::MD_range, 606 MDNode::get(II.getContext(), LowAndHigh)); 607 return &II; 608 } 609 610 return nullptr; 611 } 612 613 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) { 614 assert(II.getIntrinsicID() == Intrinsic::ctpop && 615 "Expected ctpop intrinsic"); 616 Type *Ty = II.getType(); 617 unsigned BitWidth = Ty->getScalarSizeInBits(); 618 Value *Op0 = II.getArgOperand(0); 619 Value *X, *Y; 620 621 // ctpop(bitreverse(x)) -> ctpop(x) 622 // ctpop(bswap(x)) -> ctpop(x) 623 if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) 624 return IC.replaceOperand(II, 0, X); 625 626 // ctpop(rot(x)) -> ctpop(x) 627 if ((match(Op0, m_FShl(m_Value(X), m_Value(Y), m_Value())) || 628 match(Op0, m_FShr(m_Value(X), m_Value(Y), m_Value()))) && 629 X == Y) 630 return IC.replaceOperand(II, 0, X); 631 632 // ctpop(x | -x) -> bitwidth - cttz(x, false) 633 if (Op0->hasOneUse() && 634 match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) { 635 Function *F = 636 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 637 auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()}); 638 auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth)); 639 return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz)); 640 } 641 642 // ctpop(~x & (x - 1)) -> cttz(x, false) 643 if (match(Op0, 644 m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) { 645 Function *F = 646 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 647 return CallInst::Create(F, {X, IC.Builder.getFalse()}); 648 } 649 650 // Zext doesn't change the number of set bits, so narrow: 651 // ctpop (zext X) --> zext (ctpop X) 652 if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) { 653 Value *NarrowPop = IC.Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, X); 654 return CastInst::Create(Instruction::ZExt, NarrowPop, Ty); 655 } 656 657 KnownBits Known(BitWidth); 658 IC.computeKnownBits(Op0, Known, 0, &II); 659 660 // If all bits are zero except for exactly one fixed bit, then the result 661 // must be 0 or 1, and we can get that answer by shifting to LSB: 662 // ctpop (X & 32) --> (X & 32) >> 5 663 // TODO: Investigate removing this as its likely unnecessary given the below 664 // `isKnownToBeAPowerOfTwo` check. 665 if ((~Known.Zero).isPowerOf2()) 666 return BinaryOperator::CreateLShr( 667 Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2())); 668 669 // More generally we can also handle non-constant power of 2 patterns such as 670 // shl/shr(Pow2, X), (X & -X), etc... by transforming: 671 // ctpop(Pow2OrZero) --> icmp ne X, 0 672 if (IC.isKnownToBeAPowerOfTwo(Op0, /* OrZero */ true)) 673 return CastInst::Create(Instruction::ZExt, 674 IC.Builder.CreateICmp(ICmpInst::ICMP_NE, Op0, 675 Constant::getNullValue(Ty)), 676 Ty); 677 678 // Add range metadata since known bits can't completely reflect what we know. 679 auto *IT = cast<IntegerType>(Ty->getScalarType()); 680 unsigned MinCount = Known.countMinPopulation(); 681 unsigned MaxCount = Known.countMaxPopulation(); 682 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 683 Metadata *LowAndHigh[] = { 684 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)), 685 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))}; 686 II.setMetadata(LLVMContext::MD_range, 687 MDNode::get(II.getContext(), LowAndHigh)); 688 return &II; 689 } 690 691 return nullptr; 692 } 693 694 /// Convert a table lookup to shufflevector if the mask is constant. 695 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in 696 /// which case we could lower the shufflevector with rev64 instructions 697 /// as it's actually a byte reverse. 698 static Value *simplifyNeonTbl1(const IntrinsicInst &II, 699 InstCombiner::BuilderTy &Builder) { 700 // Bail out if the mask is not a constant. 701 auto *C = dyn_cast<Constant>(II.getArgOperand(1)); 702 if (!C) 703 return nullptr; 704 705 auto *VecTy = cast<FixedVectorType>(II.getType()); 706 unsigned NumElts = VecTy->getNumElements(); 707 708 // Only perform this transformation for <8 x i8> vector types. 709 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8) 710 return nullptr; 711 712 int Indexes[8]; 713 714 for (unsigned I = 0; I < NumElts; ++I) { 715 Constant *COp = C->getAggregateElement(I); 716 717 if (!COp || !isa<ConstantInt>(COp)) 718 return nullptr; 719 720 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue(); 721 722 // Make sure the mask indices are in range. 723 if ((unsigned)Indexes[I] >= NumElts) 724 return nullptr; 725 } 726 727 auto *V1 = II.getArgOperand(0); 728 auto *V2 = Constant::getNullValue(V1->getType()); 729 return Builder.CreateShuffleVector(V1, V2, ArrayRef(Indexes)); 730 } 731 732 // Returns true iff the 2 intrinsics have the same operands, limiting the 733 // comparison to the first NumOperands. 734 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, 735 unsigned NumOperands) { 736 assert(I.arg_size() >= NumOperands && "Not enough operands"); 737 assert(E.arg_size() >= NumOperands && "Not enough operands"); 738 for (unsigned i = 0; i < NumOperands; i++) 739 if (I.getArgOperand(i) != E.getArgOperand(i)) 740 return false; 741 return true; 742 } 743 744 // Remove trivially empty start/end intrinsic ranges, i.e. a start 745 // immediately followed by an end (ignoring debuginfo or other 746 // start/end intrinsics in between). As this handles only the most trivial 747 // cases, tracking the nesting level is not needed: 748 // 749 // call @llvm.foo.start(i1 0) 750 // call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed 751 // call @llvm.foo.end(i1 0) 752 // call @llvm.foo.end(i1 0) ; &I 753 static bool 754 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, 755 std::function<bool(const IntrinsicInst &)> IsStart) { 756 // We start from the end intrinsic and scan backwards, so that InstCombine 757 // has already processed (and potentially removed) all the instructions 758 // before the end intrinsic. 759 BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend()); 760 for (; BI != BE; ++BI) { 761 if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) { 762 if (I->isDebugOrPseudoInst() || 763 I->getIntrinsicID() == EndI.getIntrinsicID()) 764 continue; 765 if (IsStart(*I)) { 766 if (haveSameOperands(EndI, *I, EndI.arg_size())) { 767 IC.eraseInstFromFunction(*I); 768 IC.eraseInstFromFunction(EndI); 769 return true; 770 } 771 // Skip start intrinsics that don't pair with this end intrinsic. 772 continue; 773 } 774 } 775 break; 776 } 777 778 return false; 779 } 780 781 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) { 782 removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) { 783 return I.getIntrinsicID() == Intrinsic::vastart || 784 I.getIntrinsicID() == Intrinsic::vacopy; 785 }); 786 return nullptr; 787 } 788 789 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) { 790 assert(Call.arg_size() > 1 && "Need at least 2 args to swap"); 791 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1); 792 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) { 793 Call.setArgOperand(0, Arg1); 794 Call.setArgOperand(1, Arg0); 795 return &Call; 796 } 797 return nullptr; 798 } 799 800 /// Creates a result tuple for an overflow intrinsic \p II with a given 801 /// \p Result and a constant \p Overflow value. 802 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result, 803 Constant *Overflow) { 804 Constant *V[] = {PoisonValue::get(Result->getType()), Overflow}; 805 StructType *ST = cast<StructType>(II->getType()); 806 Constant *Struct = ConstantStruct::get(ST, V); 807 return InsertValueInst::Create(Struct, Result, 0); 808 } 809 810 Instruction * 811 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) { 812 WithOverflowInst *WO = cast<WithOverflowInst>(II); 813 Value *OperationResult = nullptr; 814 Constant *OverflowResult = nullptr; 815 if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(), 816 WO->getRHS(), *WO, OperationResult, OverflowResult)) 817 return createOverflowTuple(WO, OperationResult, OverflowResult); 818 return nullptr; 819 } 820 821 static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) { 822 Ty = Ty->getScalarType(); 823 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE; 824 } 825 826 static bool inputDenormalIsDAZ(const Function &F, const Type *Ty) { 827 Ty = Ty->getScalarType(); 828 return F.getDenormalMode(Ty->getFltSemantics()).inputsAreZero(); 829 } 830 831 /// \returns the compare predicate type if the test performed by 832 /// llvm.is.fpclass(x, \p Mask) is equivalent to fcmp o__ x, 0.0 with the 833 /// floating-point environment assumed for \p F for type \p Ty 834 static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, 835 const Function &F, Type *Ty) { 836 switch (static_cast<unsigned>(Mask)) { 837 case fcZero: 838 if (inputDenormalIsIEEE(F, Ty)) 839 return FCmpInst::FCMP_OEQ; 840 break; 841 case fcZero | fcSubnormal: 842 if (inputDenormalIsDAZ(F, Ty)) 843 return FCmpInst::FCMP_OEQ; 844 break; 845 case fcPositive | fcNegZero: 846 if (inputDenormalIsIEEE(F, Ty)) 847 return FCmpInst::FCMP_OGE; 848 break; 849 case fcPositive | fcNegZero | fcNegSubnormal: 850 if (inputDenormalIsDAZ(F, Ty)) 851 return FCmpInst::FCMP_OGE; 852 break; 853 case fcPosSubnormal | fcPosNormal | fcPosInf: 854 if (inputDenormalIsIEEE(F, Ty)) 855 return FCmpInst::FCMP_OGT; 856 break; 857 case fcNegative | fcPosZero: 858 if (inputDenormalIsIEEE(F, Ty)) 859 return FCmpInst::FCMP_OLE; 860 break; 861 case fcNegative | fcPosZero | fcPosSubnormal: 862 if (inputDenormalIsDAZ(F, Ty)) 863 return FCmpInst::FCMP_OLE; 864 break; 865 case fcNegSubnormal | fcNegNormal | fcNegInf: 866 if (inputDenormalIsIEEE(F, Ty)) 867 return FCmpInst::FCMP_OLT; 868 break; 869 case fcPosNormal | fcPosInf: 870 if (inputDenormalIsDAZ(F, Ty)) 871 return FCmpInst::FCMP_OGT; 872 break; 873 case fcNegNormal | fcNegInf: 874 if (inputDenormalIsDAZ(F, Ty)) 875 return FCmpInst::FCMP_OLT; 876 break; 877 case ~fcZero & ~fcNan: 878 if (inputDenormalIsIEEE(F, Ty)) 879 return FCmpInst::FCMP_ONE; 880 break; 881 case ~(fcZero | fcSubnormal) & ~fcNan: 882 if (inputDenormalIsDAZ(F, Ty)) 883 return FCmpInst::FCMP_ONE; 884 break; 885 default: 886 break; 887 } 888 889 return FCmpInst::BAD_FCMP_PREDICATE; 890 } 891 892 Instruction *InstCombinerImpl::foldIntrinsicIsFPClass(IntrinsicInst &II) { 893 Value *Src0 = II.getArgOperand(0); 894 Value *Src1 = II.getArgOperand(1); 895 const ConstantInt *CMask = cast<ConstantInt>(Src1); 896 FPClassTest Mask = static_cast<FPClassTest>(CMask->getZExtValue()); 897 const bool IsUnordered = (Mask & fcNan) == fcNan; 898 const bool IsOrdered = (Mask & fcNan) == fcNone; 899 const FPClassTest OrderedMask = Mask & ~fcNan; 900 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan; 901 902 const bool IsStrict = II.isStrictFP(); 903 904 Value *FNegSrc; 905 if (match(Src0, m_FNeg(m_Value(FNegSrc)))) { 906 // is.fpclass (fneg x), mask -> is.fpclass x, (fneg mask) 907 908 II.setArgOperand(1, ConstantInt::get(Src1->getType(), fneg(Mask))); 909 return replaceOperand(II, 0, FNegSrc); 910 } 911 912 Value *FAbsSrc; 913 if (match(Src0, m_FAbs(m_Value(FAbsSrc)))) { 914 II.setArgOperand(1, ConstantInt::get(Src1->getType(), fabs(Mask))); 915 return replaceOperand(II, 0, FAbsSrc); 916 } 917 918 // TODO: is.fpclass(x, fcInf) -> fabs(x) == inf 919 920 if ((OrderedMask == fcPosInf || OrderedMask == fcNegInf) && 921 (IsOrdered || IsUnordered) && !IsStrict) { 922 // is.fpclass(x, fcPosInf) -> fcmp oeq x, +inf 923 // is.fpclass(x, fcNegInf) -> fcmp oeq x, -inf 924 // is.fpclass(x, fcPosInf|fcNan) -> fcmp ueq x, +inf 925 // is.fpclass(x, fcNegInf|fcNan) -> fcmp ueq x, -inf 926 Constant *Inf = 927 ConstantFP::getInfinity(Src0->getType(), OrderedMask == fcNegInf); 928 Value *EqInf = IsUnordered ? Builder.CreateFCmpUEQ(Src0, Inf) 929 : Builder.CreateFCmpOEQ(Src0, Inf); 930 931 EqInf->takeName(&II); 932 return replaceInstUsesWith(II, EqInf); 933 } 934 935 if ((OrderedInvertedMask == fcPosInf || OrderedInvertedMask == fcNegInf) && 936 (IsOrdered || IsUnordered) && !IsStrict) { 937 // is.fpclass(x, ~fcPosInf) -> fcmp one x, +inf 938 // is.fpclass(x, ~fcNegInf) -> fcmp one x, -inf 939 // is.fpclass(x, ~fcPosInf|fcNan) -> fcmp une x, +inf 940 // is.fpclass(x, ~fcNegInf|fcNan) -> fcmp une x, -inf 941 Constant *Inf = ConstantFP::getInfinity(Src0->getType(), 942 OrderedInvertedMask == fcNegInf); 943 Value *NeInf = IsUnordered ? Builder.CreateFCmpUNE(Src0, Inf) 944 : Builder.CreateFCmpONE(Src0, Inf); 945 NeInf->takeName(&II); 946 return replaceInstUsesWith(II, NeInf); 947 } 948 949 if (Mask == fcNan && !IsStrict) { 950 // Equivalent of isnan. Replace with standard fcmp if we don't care about FP 951 // exceptions. 952 Value *IsNan = 953 Builder.CreateFCmpUNO(Src0, ConstantFP::getZero(Src0->getType())); 954 IsNan->takeName(&II); 955 return replaceInstUsesWith(II, IsNan); 956 } 957 958 if (Mask == (~fcNan & fcAllFlags) && !IsStrict) { 959 // Equivalent of !isnan. Replace with standard fcmp. 960 Value *FCmp = 961 Builder.CreateFCmpORD(Src0, ConstantFP::getZero(Src0->getType())); 962 FCmp->takeName(&II); 963 return replaceInstUsesWith(II, FCmp); 964 } 965 966 FCmpInst::Predicate PredType = FCmpInst::BAD_FCMP_PREDICATE; 967 968 // Try to replace with an fcmp with 0 969 // 970 // is.fpclass(x, fcZero) -> fcmp oeq x, 0.0 971 // is.fpclass(x, fcZero | fcNan) -> fcmp ueq x, 0.0 972 // is.fpclass(x, ~fcZero & ~fcNan) -> fcmp one x, 0.0 973 // is.fpclass(x, ~fcZero) -> fcmp une x, 0.0 974 // 975 // is.fpclass(x, fcPosSubnormal | fcPosNormal | fcPosInf) -> fcmp ogt x, 0.0 976 // is.fpclass(x, fcPositive | fcNegZero) -> fcmp oge x, 0.0 977 // 978 // is.fpclass(x, fcNegSubnormal | fcNegNormal | fcNegInf) -> fcmp olt x, 0.0 979 // is.fpclass(x, fcNegative | fcPosZero) -> fcmp ole x, 0.0 980 // 981 if (!IsStrict && (IsOrdered || IsUnordered) && 982 (PredType = fpclassTestIsFCmp0(OrderedMask, *II.getFunction(), 983 Src0->getType())) != 984 FCmpInst::BAD_FCMP_PREDICATE) { 985 Constant *Zero = ConstantFP::getZero(Src0->getType()); 986 // Equivalent of == 0. 987 Value *FCmp = Builder.CreateFCmp( 988 IsUnordered ? FCmpInst::getUnorderedPredicate(PredType) : PredType, 989 Src0, Zero); 990 991 FCmp->takeName(&II); 992 return replaceInstUsesWith(II, FCmp); 993 } 994 995 KnownFPClass Known = computeKnownFPClass( 996 Src0, DL, Mask, 0, &getTargetLibraryInfo(), &AC, &II, &DT); 997 998 // Clear test bits we know must be false from the source value. 999 // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other 1000 // fp_class (ninf x), ninf|pinf|other -> fp_class (ninf x), other 1001 if ((Mask & Known.KnownFPClasses) != Mask) { 1002 II.setArgOperand( 1003 1, ConstantInt::get(Src1->getType(), Mask & Known.KnownFPClasses)); 1004 return &II; 1005 } 1006 1007 // If none of the tests which can return false are possible, fold to true. 1008 // fp_class (nnan x), ~(qnan|snan) -> true 1009 // fp_class (ninf x), ~(ninf|pinf) -> true 1010 if (Mask == Known.KnownFPClasses) 1011 return replaceInstUsesWith(II, ConstantInt::get(II.getType(), true)); 1012 1013 return nullptr; 1014 } 1015 1016 static std::optional<bool> getKnownSign(Value *Op, Instruction *CxtI, 1017 const DataLayout &DL, AssumptionCache *AC, 1018 DominatorTree *DT) { 1019 KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT); 1020 if (Known.isNonNegative()) 1021 return false; 1022 if (Known.isNegative()) 1023 return true; 1024 1025 Value *X, *Y; 1026 if (match(Op, m_NSWSub(m_Value(X), m_Value(Y)))) 1027 return isImpliedByDomCondition(ICmpInst::ICMP_SLT, X, Y, CxtI, DL); 1028 1029 return isImpliedByDomCondition( 1030 ICmpInst::ICMP_SLT, Op, Constant::getNullValue(Op->getType()), CxtI, DL); 1031 } 1032 1033 /// Return true if two values \p Op0 and \p Op1 are known to have the same sign. 1034 static bool signBitMustBeTheSame(Value *Op0, Value *Op1, Instruction *CxtI, 1035 const DataLayout &DL, AssumptionCache *AC, 1036 DominatorTree *DT) { 1037 std::optional<bool> Known1 = getKnownSign(Op1, CxtI, DL, AC, DT); 1038 if (!Known1) 1039 return false; 1040 std::optional<bool> Known0 = getKnownSign(Op0, CxtI, DL, AC, DT); 1041 if (!Known0) 1042 return false; 1043 return *Known0 == *Known1; 1044 } 1045 1046 /// Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0. This 1047 /// can trigger other combines. 1048 static Instruction *moveAddAfterMinMax(IntrinsicInst *II, 1049 InstCombiner::BuilderTy &Builder) { 1050 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1051 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin || 1052 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) && 1053 "Expected a min or max intrinsic"); 1054 1055 // TODO: Match vectors with undef elements, but undef may not propagate. 1056 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 1057 Value *X; 1058 const APInt *C0, *C1; 1059 if (!match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C0)))) || 1060 !match(Op1, m_APInt(C1))) 1061 return nullptr; 1062 1063 // Check for necessary no-wrap and overflow constraints. 1064 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin; 1065 auto *Add = cast<BinaryOperator>(Op0); 1066 if ((IsSigned && !Add->hasNoSignedWrap()) || 1067 (!IsSigned && !Add->hasNoUnsignedWrap())) 1068 return nullptr; 1069 1070 // If the constant difference overflows, then instsimplify should reduce the 1071 // min/max to the add or C1. 1072 bool Overflow; 1073 APInt CDiff = 1074 IsSigned ? C1->ssub_ov(*C0, Overflow) : C1->usub_ov(*C0, Overflow); 1075 assert(!Overflow && "Expected simplify of min/max"); 1076 1077 // min/max (add X, C0), C1 --> add (min/max X, C1 - C0), C0 1078 // Note: the "mismatched" no-overflow setting does not propagate. 1079 Constant *NewMinMaxC = ConstantInt::get(II->getType(), CDiff); 1080 Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID, X, NewMinMaxC); 1081 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax, Add->getOperand(1)) 1082 : BinaryOperator::CreateNUWAdd(NewMinMax, Add->getOperand(1)); 1083 } 1084 /// Match a sadd_sat or ssub_sat which is using min/max to clamp the value. 1085 Instruction *InstCombinerImpl::matchSAddSubSat(IntrinsicInst &MinMax1) { 1086 Type *Ty = MinMax1.getType(); 1087 1088 // We are looking for a tree of: 1089 // max(INT_MIN, min(INT_MAX, add(sext(A), sext(B)))) 1090 // Where the min and max could be reversed 1091 Instruction *MinMax2; 1092 BinaryOperator *AddSub; 1093 const APInt *MinValue, *MaxValue; 1094 if (match(&MinMax1, m_SMin(m_Instruction(MinMax2), m_APInt(MaxValue)))) { 1095 if (!match(MinMax2, m_SMax(m_BinOp(AddSub), m_APInt(MinValue)))) 1096 return nullptr; 1097 } else if (match(&MinMax1, 1098 m_SMax(m_Instruction(MinMax2), m_APInt(MinValue)))) { 1099 if (!match(MinMax2, m_SMin(m_BinOp(AddSub), m_APInt(MaxValue)))) 1100 return nullptr; 1101 } else 1102 return nullptr; 1103 1104 // Check that the constants clamp a saturate, and that the new type would be 1105 // sensible to convert to. 1106 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1) 1107 return nullptr; 1108 // In what bitwidth can this be treated as saturating arithmetics? 1109 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1; 1110 // FIXME: This isn't quite right for vectors, but using the scalar type is a 1111 // good first approximation for what should be done there. 1112 if (!shouldChangeType(Ty->getScalarType()->getIntegerBitWidth(), NewBitWidth)) 1113 return nullptr; 1114 1115 // Also make sure that the inner min/max and the add/sub have one use. 1116 if (!MinMax2->hasOneUse() || !AddSub->hasOneUse()) 1117 return nullptr; 1118 1119 // Create the new type (which can be a vector type) 1120 Type *NewTy = Ty->getWithNewBitWidth(NewBitWidth); 1121 1122 Intrinsic::ID IntrinsicID; 1123 if (AddSub->getOpcode() == Instruction::Add) 1124 IntrinsicID = Intrinsic::sadd_sat; 1125 else if (AddSub->getOpcode() == Instruction::Sub) 1126 IntrinsicID = Intrinsic::ssub_sat; 1127 else 1128 return nullptr; 1129 1130 // The two operands of the add/sub must be nsw-truncatable to the NewTy. This 1131 // is usually achieved via a sext from a smaller type. 1132 if (ComputeMaxSignificantBits(AddSub->getOperand(0), 0, AddSub) > 1133 NewBitWidth || 1134 ComputeMaxSignificantBits(AddSub->getOperand(1), 0, AddSub) > NewBitWidth) 1135 return nullptr; 1136 1137 // Finally create and return the sat intrinsic, truncated to the new type 1138 Function *F = Intrinsic::getDeclaration(MinMax1.getModule(), IntrinsicID, NewTy); 1139 Value *AT = Builder.CreateTrunc(AddSub->getOperand(0), NewTy); 1140 Value *BT = Builder.CreateTrunc(AddSub->getOperand(1), NewTy); 1141 Value *Sat = Builder.CreateCall(F, {AT, BT}); 1142 return CastInst::Create(Instruction::SExt, Sat, Ty); 1143 } 1144 1145 1146 /// If we have a clamp pattern like max (min X, 42), 41 -- where the output 1147 /// can only be one of two possible constant values -- turn that into a select 1148 /// of constants. 1149 static Instruction *foldClampRangeOfTwo(IntrinsicInst *II, 1150 InstCombiner::BuilderTy &Builder) { 1151 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1152 Value *X; 1153 const APInt *C0, *C1; 1154 if (!match(I1, m_APInt(C1)) || !I0->hasOneUse()) 1155 return nullptr; 1156 1157 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 1158 switch (II->getIntrinsicID()) { 1159 case Intrinsic::smax: 1160 if (match(I0, m_SMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 1161 Pred = ICmpInst::ICMP_SGT; 1162 break; 1163 case Intrinsic::smin: 1164 if (match(I0, m_SMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 1165 Pred = ICmpInst::ICMP_SLT; 1166 break; 1167 case Intrinsic::umax: 1168 if (match(I0, m_UMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 1169 Pred = ICmpInst::ICMP_UGT; 1170 break; 1171 case Intrinsic::umin: 1172 if (match(I0, m_UMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 1173 Pred = ICmpInst::ICMP_ULT; 1174 break; 1175 default: 1176 llvm_unreachable("Expected min/max intrinsic"); 1177 } 1178 if (Pred == CmpInst::BAD_ICMP_PREDICATE) 1179 return nullptr; 1180 1181 // max (min X, 42), 41 --> X > 41 ? 42 : 41 1182 // min (max X, 42), 43 --> X < 43 ? 42 : 43 1183 Value *Cmp = Builder.CreateICmp(Pred, X, I1); 1184 return SelectInst::Create(Cmp, ConstantInt::get(II->getType(), *C0), I1); 1185 } 1186 1187 /// If this min/max has a constant operand and an operand that is a matching 1188 /// min/max with a constant operand, constant-fold the 2 constant operands. 1189 static Value *reassociateMinMaxWithConstants(IntrinsicInst *II, 1190 IRBuilderBase &Builder) { 1191 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1192 auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); 1193 if (!LHS || LHS->getIntrinsicID() != MinMaxID) 1194 return nullptr; 1195 1196 Constant *C0, *C1; 1197 if (!match(LHS->getArgOperand(1), m_ImmConstant(C0)) || 1198 !match(II->getArgOperand(1), m_ImmConstant(C1))) 1199 return nullptr; 1200 1201 // max (max X, C0), C1 --> max X, (max C0, C1) --> max X, NewC 1202 ICmpInst::Predicate Pred = MinMaxIntrinsic::getPredicate(MinMaxID); 1203 Value *CondC = Builder.CreateICmp(Pred, C0, C1); 1204 Value *NewC = Builder.CreateSelect(CondC, C0, C1); 1205 return Builder.CreateIntrinsic(MinMaxID, II->getType(), 1206 {LHS->getArgOperand(0), NewC}); 1207 } 1208 1209 /// If this min/max has a matching min/max operand with a constant, try to push 1210 /// the constant operand into this instruction. This can enable more folds. 1211 static Instruction * 1212 reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, 1213 InstCombiner::BuilderTy &Builder) { 1214 // Match and capture a min/max operand candidate. 1215 Value *X, *Y; 1216 Constant *C; 1217 Instruction *Inner; 1218 if (!match(II, m_c_MaxOrMin(m_OneUse(m_CombineAnd( 1219 m_Instruction(Inner), 1220 m_MaxOrMin(m_Value(X), m_ImmConstant(C)))), 1221 m_Value(Y)))) 1222 return nullptr; 1223 1224 // The inner op must match. Check for constants to avoid infinite loops. 1225 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1226 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner); 1227 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID || 1228 match(X, m_ImmConstant()) || match(Y, m_ImmConstant())) 1229 return nullptr; 1230 1231 // max (max X, C), Y --> max (max X, Y), C 1232 Function *MinMax = 1233 Intrinsic::getDeclaration(II->getModule(), MinMaxID, II->getType()); 1234 Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID, X, Y); 1235 NewInner->takeName(Inner); 1236 return CallInst::Create(MinMax, {NewInner, C}); 1237 } 1238 1239 /// Reduce a sequence of min/max intrinsics with a common operand. 1240 static Instruction *factorizeMinMaxTree(IntrinsicInst *II) { 1241 // Match 3 of the same min/max ops. Example: umin(umin(), umin()). 1242 auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); 1243 auto *RHS = dyn_cast<IntrinsicInst>(II->getArgOperand(1)); 1244 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1245 if (!LHS || !RHS || LHS->getIntrinsicID() != MinMaxID || 1246 RHS->getIntrinsicID() != MinMaxID || 1247 (!LHS->hasOneUse() && !RHS->hasOneUse())) 1248 return nullptr; 1249 1250 Value *A = LHS->getArgOperand(0); 1251 Value *B = LHS->getArgOperand(1); 1252 Value *C = RHS->getArgOperand(0); 1253 Value *D = RHS->getArgOperand(1); 1254 1255 // Look for a common operand. 1256 Value *MinMaxOp = nullptr; 1257 Value *ThirdOp = nullptr; 1258 if (LHS->hasOneUse()) { 1259 // If the LHS is only used in this chain and the RHS is used outside of it, 1260 // reuse the RHS min/max because that will eliminate the LHS. 1261 if (D == A || C == A) { 1262 // min(min(a, b), min(c, a)) --> min(min(c, a), b) 1263 // min(min(a, b), min(a, d)) --> min(min(a, d), b) 1264 MinMaxOp = RHS; 1265 ThirdOp = B; 1266 } else if (D == B || C == B) { 1267 // min(min(a, b), min(c, b)) --> min(min(c, b), a) 1268 // min(min(a, b), min(b, d)) --> min(min(b, d), a) 1269 MinMaxOp = RHS; 1270 ThirdOp = A; 1271 } 1272 } else { 1273 assert(RHS->hasOneUse() && "Expected one-use operand"); 1274 // Reuse the LHS. This will eliminate the RHS. 1275 if (D == A || D == B) { 1276 // min(min(a, b), min(c, a)) --> min(min(a, b), c) 1277 // min(min(a, b), min(c, b)) --> min(min(a, b), c) 1278 MinMaxOp = LHS; 1279 ThirdOp = C; 1280 } else if (C == A || C == B) { 1281 // min(min(a, b), min(b, d)) --> min(min(a, b), d) 1282 // min(min(a, b), min(c, b)) --> min(min(a, b), d) 1283 MinMaxOp = LHS; 1284 ThirdOp = D; 1285 } 1286 } 1287 1288 if (!MinMaxOp || !ThirdOp) 1289 return nullptr; 1290 1291 Module *Mod = II->getModule(); 1292 Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType()); 1293 return CallInst::Create(MinMax, { MinMaxOp, ThirdOp }); 1294 } 1295 1296 /// If all arguments of the intrinsic are unary shuffles with the same mask, 1297 /// try to shuffle after the intrinsic. 1298 static Instruction * 1299 foldShuffledIntrinsicOperands(IntrinsicInst *II, 1300 InstCombiner::BuilderTy &Builder) { 1301 // TODO: This should be extended to handle other intrinsics like fshl, ctpop, 1302 // etc. Use llvm::isTriviallyVectorizable() and related to determine 1303 // which intrinsics are safe to shuffle? 1304 switch (II->getIntrinsicID()) { 1305 case Intrinsic::smax: 1306 case Intrinsic::smin: 1307 case Intrinsic::umax: 1308 case Intrinsic::umin: 1309 case Intrinsic::fma: 1310 case Intrinsic::fshl: 1311 case Intrinsic::fshr: 1312 break; 1313 default: 1314 return nullptr; 1315 } 1316 1317 Value *X; 1318 ArrayRef<int> Mask; 1319 if (!match(II->getArgOperand(0), 1320 m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask)))) 1321 return nullptr; 1322 1323 // At least 1 operand must have 1 use because we are creating 2 instructions. 1324 if (none_of(II->args(), [](Value *V) { return V->hasOneUse(); })) 1325 return nullptr; 1326 1327 // See if all arguments are shuffled with the same mask. 1328 SmallVector<Value *, 4> NewArgs(II->arg_size()); 1329 NewArgs[0] = X; 1330 Type *SrcTy = X->getType(); 1331 for (unsigned i = 1, e = II->arg_size(); i != e; ++i) { 1332 if (!match(II->getArgOperand(i), 1333 m_Shuffle(m_Value(X), m_Undef(), m_SpecificMask(Mask))) || 1334 X->getType() != SrcTy) 1335 return nullptr; 1336 NewArgs[i] = X; 1337 } 1338 1339 // intrinsic (shuf X, M), (shuf Y, M), ... --> shuf (intrinsic X, Y, ...), M 1340 Instruction *FPI = isa<FPMathOperator>(II) ? II : nullptr; 1341 Value *NewIntrinsic = 1342 Builder.CreateIntrinsic(II->getIntrinsicID(), SrcTy, NewArgs, FPI); 1343 return new ShuffleVectorInst(NewIntrinsic, Mask); 1344 } 1345 1346 /// Fold the following cases and accepts bswap and bitreverse intrinsics: 1347 /// bswap(logic_op(bswap(x), y)) --> logic_op(x, bswap(y)) 1348 /// bswap(logic_op(bswap(x), bswap(y))) --> logic_op(x, y) (ignores multiuse) 1349 template <Intrinsic::ID IntrID> 1350 static Instruction *foldBitOrderCrossLogicOp(Value *V, 1351 InstCombiner::BuilderTy &Builder) { 1352 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse, 1353 "This helper only supports BSWAP and BITREVERSE intrinsics"); 1354 1355 Value *X, *Y; 1356 // Find bitwise logic op. Check that it is a BinaryOperator explicitly so we 1357 // don't match ConstantExpr that aren't meaningful for this transform. 1358 if (match(V, m_OneUse(m_BitwiseLogic(m_Value(X), m_Value(Y)))) && 1359 isa<BinaryOperator>(V)) { 1360 Value *OldReorderX, *OldReorderY; 1361 BinaryOperator::BinaryOps Op = cast<BinaryOperator>(V)->getOpcode(); 1362 1363 // If both X and Y are bswap/bitreverse, the transform reduces the number 1364 // of instructions even if there's multiuse. 1365 // If only one operand is bswap/bitreverse, we need to ensure the operand 1366 // have only one use. 1367 if (match(X, m_Intrinsic<IntrID>(m_Value(OldReorderX))) && 1368 match(Y, m_Intrinsic<IntrID>(m_Value(OldReorderY)))) { 1369 return BinaryOperator::Create(Op, OldReorderX, OldReorderY); 1370 } 1371 1372 if (match(X, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderX))))) { 1373 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, Y); 1374 return BinaryOperator::Create(Op, OldReorderX, NewReorder); 1375 } 1376 1377 if (match(Y, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderY))))) { 1378 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, X); 1379 return BinaryOperator::Create(Op, NewReorder, OldReorderY); 1380 } 1381 } 1382 return nullptr; 1383 } 1384 1385 /// CallInst simplification. This mostly only handles folding of intrinsic 1386 /// instructions. For normal calls, it allows visitCallBase to do the heavy 1387 /// lifting. 1388 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { 1389 // Don't try to simplify calls without uses. It will not do anything useful, 1390 // but will result in the following folds being skipped. 1391 if (!CI.use_empty()) { 1392 SmallVector<Value *, 4> Args; 1393 Args.reserve(CI.arg_size()); 1394 for (Value *Op : CI.args()) 1395 Args.push_back(Op); 1396 if (Value *V = simplifyCall(&CI, CI.getCalledOperand(), Args, 1397 SQ.getWithInstruction(&CI))) 1398 return replaceInstUsesWith(CI, V); 1399 } 1400 1401 if (Value *FreedOp = getFreedOperand(&CI, &TLI)) 1402 return visitFree(CI, FreedOp); 1403 1404 // If the caller function (i.e. us, the function that contains this CallInst) 1405 // is nounwind, mark the call as nounwind, even if the callee isn't. 1406 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) { 1407 CI.setDoesNotThrow(); 1408 return &CI; 1409 } 1410 1411 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 1412 if (!II) return visitCallBase(CI); 1413 1414 // For atomic unordered mem intrinsics if len is not a positive or 1415 // not a multiple of element size then behavior is undefined. 1416 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II)) 1417 if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength())) 1418 if (NumBytes->isNegative() || 1419 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) { 1420 CreateNonTerminatorUnreachable(AMI); 1421 assert(AMI->getType()->isVoidTy() && 1422 "non void atomic unordered mem intrinsic"); 1423 return eraseInstFromFunction(*AMI); 1424 } 1425 1426 // Intrinsics cannot occur in an invoke or a callbr, so handle them here 1427 // instead of in visitCallBase. 1428 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) { 1429 bool Changed = false; 1430 1431 // memmove/cpy/set of zero bytes is a noop. 1432 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 1433 if (NumBytes->isNullValue()) 1434 return eraseInstFromFunction(CI); 1435 } 1436 1437 // No other transformations apply to volatile transfers. 1438 if (auto *M = dyn_cast<MemIntrinsic>(MI)) 1439 if (M->isVolatile()) 1440 return nullptr; 1441 1442 // If we have a memmove and the source operation is a constant global, 1443 // then the source and dest pointers can't alias, so we can change this 1444 // into a call to memcpy. 1445 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) { 1446 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 1447 if (GVSrc->isConstant()) { 1448 Module *M = CI.getModule(); 1449 Intrinsic::ID MemCpyID = 1450 isa<AtomicMemMoveInst>(MMI) 1451 ? Intrinsic::memcpy_element_unordered_atomic 1452 : Intrinsic::memcpy; 1453 Type *Tys[3] = { CI.getArgOperand(0)->getType(), 1454 CI.getArgOperand(1)->getType(), 1455 CI.getArgOperand(2)->getType() }; 1456 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys)); 1457 Changed = true; 1458 } 1459 } 1460 1461 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 1462 // memmove(x,x,size) -> noop. 1463 if (MTI->getSource() == MTI->getDest()) 1464 return eraseInstFromFunction(CI); 1465 } 1466 1467 // If we can determine a pointer alignment that is bigger than currently 1468 // set, update the alignment. 1469 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 1470 if (Instruction *I = SimplifyAnyMemTransfer(MTI)) 1471 return I; 1472 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) { 1473 if (Instruction *I = SimplifyAnyMemSet(MSI)) 1474 return I; 1475 } 1476 1477 if (Changed) return II; 1478 } 1479 1480 // For fixed width vector result intrinsics, use the generic demanded vector 1481 // support. 1482 if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) { 1483 auto VWidth = IIFVTy->getNumElements(); 1484 APInt UndefElts(VWidth, 0); 1485 APInt AllOnesEltMask(APInt::getAllOnes(VWidth)); 1486 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) { 1487 if (V != II) 1488 return replaceInstUsesWith(*II, V); 1489 return II; 1490 } 1491 } 1492 1493 if (II->isCommutative()) { 1494 if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI)) 1495 return NewCall; 1496 } 1497 1498 // Unused constrained FP intrinsic calls may have declared side effect, which 1499 // prevents it from being removed. In some cases however the side effect is 1500 // actually absent. To detect this case, call SimplifyConstrainedFPCall. If it 1501 // returns a replacement, the call may be removed. 1502 if (CI.use_empty() && isa<ConstrainedFPIntrinsic>(CI)) { 1503 if (simplifyConstrainedFPCall(&CI, SQ.getWithInstruction(&CI))) 1504 return eraseInstFromFunction(CI); 1505 } 1506 1507 Intrinsic::ID IID = II->getIntrinsicID(); 1508 switch (IID) { 1509 case Intrinsic::objectsize: { 1510 SmallVector<Instruction *> InsertedInstructions; 1511 if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false, 1512 &InsertedInstructions)) { 1513 for (Instruction *Inserted : InsertedInstructions) 1514 Worklist.add(Inserted); 1515 return replaceInstUsesWith(CI, V); 1516 } 1517 return nullptr; 1518 } 1519 case Intrinsic::abs: { 1520 Value *IIOperand = II->getArgOperand(0); 1521 bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue(); 1522 1523 // abs(-x) -> abs(x) 1524 // TODO: Copy nsw if it was present on the neg? 1525 Value *X; 1526 if (match(IIOperand, m_Neg(m_Value(X)))) 1527 return replaceOperand(*II, 0, X); 1528 if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X))))) 1529 return replaceOperand(*II, 0, X); 1530 if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X)))) 1531 return replaceOperand(*II, 0, X); 1532 1533 if (std::optional<bool> Sign = getKnownSign(IIOperand, II, DL, &AC, &DT)) { 1534 // abs(x) -> x if x >= 0 1535 if (!*Sign) 1536 return replaceInstUsesWith(*II, IIOperand); 1537 1538 // abs(x) -> -x if x < 0 1539 if (IntMinIsPoison) 1540 return BinaryOperator::CreateNSWNeg(IIOperand); 1541 return BinaryOperator::CreateNeg(IIOperand); 1542 } 1543 1544 // abs (sext X) --> zext (abs X*) 1545 // Clear the IsIntMin (nsw) bit on the abs to allow narrowing. 1546 if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) { 1547 Value *NarrowAbs = 1548 Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse()); 1549 return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType()); 1550 } 1551 1552 // Match a complicated way to check if a number is odd/even: 1553 // abs (srem X, 2) --> and X, 1 1554 const APInt *C; 1555 if (match(IIOperand, m_SRem(m_Value(X), m_APInt(C))) && *C == 2) 1556 return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1)); 1557 1558 break; 1559 } 1560 case Intrinsic::umin: { 1561 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1562 // umin(x, 1) == zext(x != 0) 1563 if (match(I1, m_One())) { 1564 assert(II->getType()->getScalarSizeInBits() != 1 && 1565 "Expected simplify of umin with max constant"); 1566 Value *Zero = Constant::getNullValue(I0->getType()); 1567 Value *Cmp = Builder.CreateICmpNE(I0, Zero); 1568 return CastInst::Create(Instruction::ZExt, Cmp, II->getType()); 1569 } 1570 [[fallthrough]]; 1571 } 1572 case Intrinsic::umax: { 1573 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1574 Value *X, *Y; 1575 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) && 1576 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 1577 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 1578 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 1579 } 1580 Constant *C; 1581 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_Constant(C)) && 1582 I0->hasOneUse()) { 1583 Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType()); 1584 if (ConstantExpr::getZExt(NarrowC, II->getType()) == C) { 1585 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 1586 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 1587 } 1588 } 1589 // If both operands of unsigned min/max are sign-extended, it is still ok 1590 // to narrow the operation. 1591 [[fallthrough]]; 1592 } 1593 case Intrinsic::smax: 1594 case Intrinsic::smin: { 1595 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1596 Value *X, *Y; 1597 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) && 1598 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 1599 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 1600 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 1601 } 1602 1603 Constant *C; 1604 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_Constant(C)) && 1605 I0->hasOneUse()) { 1606 Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType()); 1607 if (ConstantExpr::getSExt(NarrowC, II->getType()) == C) { 1608 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 1609 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 1610 } 1611 } 1612 1613 if (IID == Intrinsic::smax || IID == Intrinsic::smin) { 1614 // smax (neg nsw X), (neg nsw Y) --> neg nsw (smin X, Y) 1615 // smin (neg nsw X), (neg nsw Y) --> neg nsw (smax X, Y) 1616 // TODO: Canonicalize neg after min/max if I1 is constant. 1617 if (match(I0, m_NSWNeg(m_Value(X))) && match(I1, m_NSWNeg(m_Value(Y))) && 1618 (I0->hasOneUse() || I1->hasOneUse())) { 1619 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID); 1620 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y); 1621 return BinaryOperator::CreateNSWNeg(InvMaxMin); 1622 } 1623 } 1624 1625 // (umax X, (xor X, Pow2)) 1626 // -> (or X, Pow2) 1627 // (umin X, (xor X, Pow2)) 1628 // -> (and X, ~Pow2) 1629 // (smax X, (xor X, Pos_Pow2)) 1630 // -> (or X, Pos_Pow2) 1631 // (smin X, (xor X, Pos_Pow2)) 1632 // -> (and X, ~Pos_Pow2) 1633 // (smax X, (xor X, Neg_Pow2)) 1634 // -> (and X, ~Neg_Pow2) 1635 // (smin X, (xor X, Neg_Pow2)) 1636 // -> (or X, Neg_Pow2) 1637 if ((match(I0, m_c_Xor(m_Specific(I1), m_Value(X))) || 1638 match(I1, m_c_Xor(m_Specific(I0), m_Value(X)))) && 1639 isKnownToBeAPowerOfTwo(X, /* OrZero */ true)) { 1640 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax; 1641 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin; 1642 1643 if (IID == Intrinsic::smax || IID == Intrinsic::smin) { 1644 auto KnownSign = getKnownSign(X, II, DL, &AC, &DT); 1645 if (KnownSign == std::nullopt) { 1646 UseOr = false; 1647 UseAndN = false; 1648 } else if (*KnownSign /* true is Signed. */) { 1649 UseOr ^= true; 1650 UseAndN ^= true; 1651 Type *Ty = I0->getType(); 1652 // Negative power of 2 must be IntMin. It's possible to be able to 1653 // prove negative / power of 2 without actually having known bits, so 1654 // just get the value by hand. 1655 X = Constant::getIntegerValue( 1656 Ty, APInt::getSignedMinValue(Ty->getScalarSizeInBits())); 1657 } 1658 } 1659 if (UseOr) 1660 return BinaryOperator::CreateOr(I0, X); 1661 else if (UseAndN) 1662 return BinaryOperator::CreateAnd(I0, Builder.CreateNot(X)); 1663 } 1664 1665 // If we can eliminate ~A and Y is free to invert: 1666 // max ~A, Y --> ~(min A, ~Y) 1667 // 1668 // Examples: 1669 // max ~A, ~Y --> ~(min A, Y) 1670 // max ~A, C --> ~(min A, ~C) 1671 // max ~A, (max ~Y, ~Z) --> ~min( A, (min Y, Z)) 1672 auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * { 1673 Value *A; 1674 if (match(X, m_OneUse(m_Not(m_Value(A)))) && 1675 !isFreeToInvert(A, A->hasOneUse()) && 1676 isFreeToInvert(Y, Y->hasOneUse())) { 1677 Value *NotY = Builder.CreateNot(Y); 1678 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID); 1679 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY); 1680 return BinaryOperator::CreateNot(InvMaxMin); 1681 } 1682 return nullptr; 1683 }; 1684 1685 if (Instruction *I = moveNotAfterMinMax(I0, I1)) 1686 return I; 1687 if (Instruction *I = moveNotAfterMinMax(I1, I0)) 1688 return I; 1689 1690 if (Instruction *I = moveAddAfterMinMax(II, Builder)) 1691 return I; 1692 1693 // smax(X, -X) --> abs(X) 1694 // smin(X, -X) --> -abs(X) 1695 // umax(X, -X) --> -abs(X) 1696 // umin(X, -X) --> abs(X) 1697 if (isKnownNegation(I0, I1)) { 1698 // We can choose either operand as the input to abs(), but if we can 1699 // eliminate the only use of a value, that's better for subsequent 1700 // transforms/analysis. 1701 if (I0->hasOneUse() && !I1->hasOneUse()) 1702 std::swap(I0, I1); 1703 1704 // This is some variant of abs(). See if we can propagate 'nsw' to the abs 1705 // operation and potentially its negation. 1706 bool IntMinIsPoison = isKnownNegation(I0, I1, /* NeedNSW */ true); 1707 Value *Abs = Builder.CreateBinaryIntrinsic( 1708 Intrinsic::abs, I0, 1709 ConstantInt::getBool(II->getContext(), IntMinIsPoison)); 1710 1711 // We don't have a "nabs" intrinsic, so negate if needed based on the 1712 // max/min operation. 1713 if (IID == Intrinsic::smin || IID == Intrinsic::umax) 1714 Abs = Builder.CreateNeg(Abs, "nabs", /* NUW */ false, IntMinIsPoison); 1715 return replaceInstUsesWith(CI, Abs); 1716 } 1717 1718 if (Instruction *Sel = foldClampRangeOfTwo(II, Builder)) 1719 return Sel; 1720 1721 if (Instruction *SAdd = matchSAddSubSat(*II)) 1722 return SAdd; 1723 1724 if (Value *NewMinMax = reassociateMinMaxWithConstants(II, Builder)) 1725 return replaceInstUsesWith(*II, NewMinMax); 1726 1727 if (Instruction *R = reassociateMinMaxWithConstantInOperand(II, Builder)) 1728 return R; 1729 1730 if (Instruction *NewMinMax = factorizeMinMaxTree(II)) 1731 return NewMinMax; 1732 1733 break; 1734 } 1735 case Intrinsic::bitreverse: { 1736 Value *IIOperand = II->getArgOperand(0); 1737 // bitrev (zext i1 X to ?) --> X ? SignBitC : 0 1738 Value *X; 1739 if (match(IIOperand, m_ZExt(m_Value(X))) && 1740 X->getType()->isIntOrIntVectorTy(1)) { 1741 Type *Ty = II->getType(); 1742 APInt SignBit = APInt::getSignMask(Ty->getScalarSizeInBits()); 1743 return SelectInst::Create(X, ConstantInt::get(Ty, SignBit), 1744 ConstantInt::getNullValue(Ty)); 1745 } 1746 1747 if (Instruction *crossLogicOpFold = 1748 foldBitOrderCrossLogicOp<Intrinsic::bitreverse>(IIOperand, Builder)) 1749 return crossLogicOpFold; 1750 1751 break; 1752 } 1753 case Intrinsic::bswap: { 1754 Value *IIOperand = II->getArgOperand(0); 1755 1756 // Try to canonicalize bswap-of-logical-shift-by-8-bit-multiple as 1757 // inverse-shift-of-bswap: 1758 // bswap (shl X, Y) --> lshr (bswap X), Y 1759 // bswap (lshr X, Y) --> shl (bswap X), Y 1760 Value *X, *Y; 1761 if (match(IIOperand, m_OneUse(m_LogicalShift(m_Value(X), m_Value(Y))))) { 1762 // The transform allows undef vector elements, so try a constant match 1763 // first. If knownbits can handle that case, that clause could be removed. 1764 unsigned BitWidth = IIOperand->getType()->getScalarSizeInBits(); 1765 const APInt *C; 1766 if ((match(Y, m_APIntAllowUndef(C)) && (*C & 7) == 0) || 1767 MaskedValueIsZero(Y, APInt::getLowBitsSet(BitWidth, 3))) { 1768 Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X); 1769 BinaryOperator::BinaryOps InverseShift = 1770 cast<BinaryOperator>(IIOperand)->getOpcode() == Instruction::Shl 1771 ? Instruction::LShr 1772 : Instruction::Shl; 1773 return BinaryOperator::Create(InverseShift, NewSwap, Y); 1774 } 1775 } 1776 1777 KnownBits Known = computeKnownBits(IIOperand, 0, II); 1778 uint64_t LZ = alignDown(Known.countMinLeadingZeros(), 8); 1779 uint64_t TZ = alignDown(Known.countMinTrailingZeros(), 8); 1780 unsigned BW = Known.getBitWidth(); 1781 1782 // bswap(x) -> shift(x) if x has exactly one "active byte" 1783 if (BW - LZ - TZ == 8) { 1784 assert(LZ != TZ && "active byte cannot be in the middle"); 1785 if (LZ > TZ) // -> shl(x) if the "active byte" is in the low part of x 1786 return BinaryOperator::CreateNUWShl( 1787 IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ)); 1788 // -> lshr(x) if the "active byte" is in the high part of x 1789 return BinaryOperator::CreateExactLShr( 1790 IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ)); 1791 } 1792 1793 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 1794 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) { 1795 unsigned C = X->getType()->getScalarSizeInBits() - BW; 1796 Value *CV = ConstantInt::get(X->getType(), C); 1797 Value *V = Builder.CreateLShr(X, CV); 1798 return new TruncInst(V, IIOperand->getType()); 1799 } 1800 1801 if (Instruction *crossLogicOpFold = 1802 foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand, Builder)) { 1803 return crossLogicOpFold; 1804 } 1805 1806 break; 1807 } 1808 case Intrinsic::masked_load: 1809 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II)) 1810 return replaceInstUsesWith(CI, SimplifiedMaskedOp); 1811 break; 1812 case Intrinsic::masked_store: 1813 return simplifyMaskedStore(*II); 1814 case Intrinsic::masked_gather: 1815 return simplifyMaskedGather(*II); 1816 case Intrinsic::masked_scatter: 1817 return simplifyMaskedScatter(*II); 1818 case Intrinsic::launder_invariant_group: 1819 case Intrinsic::strip_invariant_group: 1820 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this)) 1821 return replaceInstUsesWith(*II, SkippedBarrier); 1822 break; 1823 case Intrinsic::powi: 1824 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 1825 // 0 and 1 are handled in instsimplify 1826 // powi(x, -1) -> 1/x 1827 if (Power->isMinusOne()) 1828 return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0), 1829 II->getArgOperand(0), II); 1830 // powi(x, 2) -> x*x 1831 if (Power->equalsInt(2)) 1832 return BinaryOperator::CreateFMulFMF(II->getArgOperand(0), 1833 II->getArgOperand(0), II); 1834 1835 if (!Power->getValue()[0]) { 1836 Value *X; 1837 // If power is even: 1838 // powi(-x, p) -> powi(x, p) 1839 // powi(fabs(x), p) -> powi(x, p) 1840 // powi(copysign(x, y), p) -> powi(x, p) 1841 if (match(II->getArgOperand(0), m_FNeg(m_Value(X))) || 1842 match(II->getArgOperand(0), m_FAbs(m_Value(X))) || 1843 match(II->getArgOperand(0), 1844 m_Intrinsic<Intrinsic::copysign>(m_Value(X), m_Value()))) 1845 return replaceOperand(*II, 0, X); 1846 } 1847 } 1848 break; 1849 1850 case Intrinsic::cttz: 1851 case Intrinsic::ctlz: 1852 if (auto *I = foldCttzCtlz(*II, *this)) 1853 return I; 1854 break; 1855 1856 case Intrinsic::ctpop: 1857 if (auto *I = foldCtpop(*II, *this)) 1858 return I; 1859 break; 1860 1861 case Intrinsic::fshl: 1862 case Intrinsic::fshr: { 1863 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 1864 Type *Ty = II->getType(); 1865 unsigned BitWidth = Ty->getScalarSizeInBits(); 1866 Constant *ShAmtC; 1867 if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC))) { 1868 // Canonicalize a shift amount constant operand to modulo the bit-width. 1869 Constant *WidthC = ConstantInt::get(Ty, BitWidth); 1870 Constant *ModuloC = 1871 ConstantFoldBinaryOpOperands(Instruction::URem, ShAmtC, WidthC, DL); 1872 if (!ModuloC) 1873 return nullptr; 1874 if (ModuloC != ShAmtC) 1875 return replaceOperand(*II, 2, ModuloC); 1876 1877 assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) == 1878 ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) && 1879 "Shift amount expected to be modulo bitwidth"); 1880 1881 // Canonicalize funnel shift right by constant to funnel shift left. This 1882 // is not entirely arbitrary. For historical reasons, the backend may 1883 // recognize rotate left patterns but miss rotate right patterns. 1884 if (IID == Intrinsic::fshr) { 1885 // fshr X, Y, C --> fshl X, Y, (BitWidth - C) 1886 Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC); 1887 Module *Mod = II->getModule(); 1888 Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty); 1889 return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC }); 1890 } 1891 assert(IID == Intrinsic::fshl && 1892 "All funnel shifts by simple constants should go left"); 1893 1894 // fshl(X, 0, C) --> shl X, C 1895 // fshl(X, undef, C) --> shl X, C 1896 if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef())) 1897 return BinaryOperator::CreateShl(Op0, ShAmtC); 1898 1899 // fshl(0, X, C) --> lshr X, (BW-C) 1900 // fshl(undef, X, C) --> lshr X, (BW-C) 1901 if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef())) 1902 return BinaryOperator::CreateLShr(Op1, 1903 ConstantExpr::getSub(WidthC, ShAmtC)); 1904 1905 // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form) 1906 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) { 1907 Module *Mod = II->getModule(); 1908 Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty); 1909 return CallInst::Create(Bswap, { Op0 }); 1910 } 1911 if (Instruction *BitOp = 1912 matchBSwapOrBitReverse(*II, /*MatchBSwaps*/ true, 1913 /*MatchBitReversals*/ true)) 1914 return BitOp; 1915 } 1916 1917 // Left or right might be masked. 1918 if (SimplifyDemandedInstructionBits(*II)) 1919 return &CI; 1920 1921 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth, 1922 // so only the low bits of the shift amount are demanded if the bitwidth is 1923 // a power-of-2. 1924 if (!isPowerOf2_32(BitWidth)) 1925 break; 1926 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth)); 1927 KnownBits Op2Known(BitWidth); 1928 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known)) 1929 return &CI; 1930 break; 1931 } 1932 case Intrinsic::uadd_with_overflow: 1933 case Intrinsic::sadd_with_overflow: { 1934 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1935 return I; 1936 1937 // Given 2 constant operands whose sum does not overflow: 1938 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1 1939 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1 1940 Value *X; 1941 const APInt *C0, *C1; 1942 Value *Arg0 = II->getArgOperand(0); 1943 Value *Arg1 = II->getArgOperand(1); 1944 bool IsSigned = IID == Intrinsic::sadd_with_overflow; 1945 bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0))) 1946 : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0))); 1947 if (HasNWAdd && match(Arg1, m_APInt(C1))) { 1948 bool Overflow; 1949 APInt NewC = 1950 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow); 1951 if (!Overflow) 1952 return replaceInstUsesWith( 1953 *II, Builder.CreateBinaryIntrinsic( 1954 IID, X, ConstantInt::get(Arg1->getType(), NewC))); 1955 } 1956 break; 1957 } 1958 1959 case Intrinsic::umul_with_overflow: 1960 case Intrinsic::smul_with_overflow: 1961 case Intrinsic::usub_with_overflow: 1962 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1963 return I; 1964 break; 1965 1966 case Intrinsic::ssub_with_overflow: { 1967 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1968 return I; 1969 1970 Constant *C; 1971 Value *Arg0 = II->getArgOperand(0); 1972 Value *Arg1 = II->getArgOperand(1); 1973 // Given a constant C that is not the minimum signed value 1974 // for an integer of a given bit width: 1975 // 1976 // ssubo X, C -> saddo X, -C 1977 if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) { 1978 Value *NegVal = ConstantExpr::getNeg(C); 1979 // Build a saddo call that is equivalent to the discovered 1980 // ssubo call. 1981 return replaceInstUsesWith( 1982 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow, 1983 Arg0, NegVal)); 1984 } 1985 1986 break; 1987 } 1988 1989 case Intrinsic::uadd_sat: 1990 case Intrinsic::sadd_sat: 1991 case Intrinsic::usub_sat: 1992 case Intrinsic::ssub_sat: { 1993 SaturatingInst *SI = cast<SaturatingInst>(II); 1994 Type *Ty = SI->getType(); 1995 Value *Arg0 = SI->getLHS(); 1996 Value *Arg1 = SI->getRHS(); 1997 1998 // Make use of known overflow information. 1999 OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(), 2000 Arg0, Arg1, SI); 2001 switch (OR) { 2002 case OverflowResult::MayOverflow: 2003 break; 2004 case OverflowResult::NeverOverflows: 2005 if (SI->isSigned()) 2006 return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1); 2007 else 2008 return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1); 2009 case OverflowResult::AlwaysOverflowsLow: { 2010 unsigned BitWidth = Ty->getScalarSizeInBits(); 2011 APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned()); 2012 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min)); 2013 } 2014 case OverflowResult::AlwaysOverflowsHigh: { 2015 unsigned BitWidth = Ty->getScalarSizeInBits(); 2016 APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned()); 2017 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max)); 2018 } 2019 } 2020 2021 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN 2022 Constant *C; 2023 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) && 2024 C->isNotMinSignedValue()) { 2025 Value *NegVal = ConstantExpr::getNeg(C); 2026 return replaceInstUsesWith( 2027 *II, Builder.CreateBinaryIntrinsic( 2028 Intrinsic::sadd_sat, Arg0, NegVal)); 2029 } 2030 2031 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2)) 2032 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2)) 2033 // if Val and Val2 have the same sign 2034 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) { 2035 Value *X; 2036 const APInt *Val, *Val2; 2037 APInt NewVal; 2038 bool IsUnsigned = 2039 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat; 2040 if (Other->getIntrinsicID() == IID && 2041 match(Arg1, m_APInt(Val)) && 2042 match(Other->getArgOperand(0), m_Value(X)) && 2043 match(Other->getArgOperand(1), m_APInt(Val2))) { 2044 if (IsUnsigned) 2045 NewVal = Val->uadd_sat(*Val2); 2046 else if (Val->isNonNegative() == Val2->isNonNegative()) { 2047 bool Overflow; 2048 NewVal = Val->sadd_ov(*Val2, Overflow); 2049 if (Overflow) { 2050 // Both adds together may add more than SignedMaxValue 2051 // without saturating the final result. 2052 break; 2053 } 2054 } else { 2055 // Cannot fold saturated addition with different signs. 2056 break; 2057 } 2058 2059 return replaceInstUsesWith( 2060 *II, Builder.CreateBinaryIntrinsic( 2061 IID, X, ConstantInt::get(II->getType(), NewVal))); 2062 } 2063 } 2064 break; 2065 } 2066 2067 case Intrinsic::minnum: 2068 case Intrinsic::maxnum: 2069 case Intrinsic::minimum: 2070 case Intrinsic::maximum: { 2071 Value *Arg0 = II->getArgOperand(0); 2072 Value *Arg1 = II->getArgOperand(1); 2073 Value *X, *Y; 2074 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) && 2075 (Arg0->hasOneUse() || Arg1->hasOneUse())) { 2076 // If both operands are negated, invert the call and negate the result: 2077 // min(-X, -Y) --> -(max(X, Y)) 2078 // max(-X, -Y) --> -(min(X, Y)) 2079 Intrinsic::ID NewIID; 2080 switch (IID) { 2081 case Intrinsic::maxnum: 2082 NewIID = Intrinsic::minnum; 2083 break; 2084 case Intrinsic::minnum: 2085 NewIID = Intrinsic::maxnum; 2086 break; 2087 case Intrinsic::maximum: 2088 NewIID = Intrinsic::minimum; 2089 break; 2090 case Intrinsic::minimum: 2091 NewIID = Intrinsic::maximum; 2092 break; 2093 default: 2094 llvm_unreachable("unexpected intrinsic ID"); 2095 } 2096 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II); 2097 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall); 2098 FNeg->copyIRFlags(II); 2099 return FNeg; 2100 } 2101 2102 // m(m(X, C2), C1) -> m(X, C) 2103 const APFloat *C1, *C2; 2104 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) { 2105 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) && 2106 ((match(M->getArgOperand(0), m_Value(X)) && 2107 match(M->getArgOperand(1), m_APFloat(C2))) || 2108 (match(M->getArgOperand(1), m_Value(X)) && 2109 match(M->getArgOperand(0), m_APFloat(C2))))) { 2110 APFloat Res(0.0); 2111 switch (IID) { 2112 case Intrinsic::maxnum: 2113 Res = maxnum(*C1, *C2); 2114 break; 2115 case Intrinsic::minnum: 2116 Res = minnum(*C1, *C2); 2117 break; 2118 case Intrinsic::maximum: 2119 Res = maximum(*C1, *C2); 2120 break; 2121 case Intrinsic::minimum: 2122 Res = minimum(*C1, *C2); 2123 break; 2124 default: 2125 llvm_unreachable("unexpected intrinsic ID"); 2126 } 2127 Instruction *NewCall = Builder.CreateBinaryIntrinsic( 2128 IID, X, ConstantFP::get(Arg0->getType(), Res), II); 2129 // TODO: Conservatively intersecting FMF. If Res == C2, the transform 2130 // was a simplification (so Arg0 and its original flags could 2131 // propagate?) 2132 NewCall->andIRFlags(M); 2133 return replaceInstUsesWith(*II, NewCall); 2134 } 2135 } 2136 2137 // m((fpext X), (fpext Y)) -> fpext (m(X, Y)) 2138 if (match(Arg0, m_OneUse(m_FPExt(m_Value(X)))) && 2139 match(Arg1, m_OneUse(m_FPExt(m_Value(Y)))) && 2140 X->getType() == Y->getType()) { 2141 Value *NewCall = 2142 Builder.CreateBinaryIntrinsic(IID, X, Y, II, II->getName()); 2143 return new FPExtInst(NewCall, II->getType()); 2144 } 2145 2146 // max X, -X --> fabs X 2147 // min X, -X --> -(fabs X) 2148 // TODO: Remove one-use limitation? That is obviously better for max. 2149 // It would be an extra instruction for min (fnabs), but that is 2150 // still likely better for analysis and codegen. 2151 if ((match(Arg0, m_OneUse(m_FNeg(m_Value(X)))) && Arg1 == X) || 2152 (match(Arg1, m_OneUse(m_FNeg(m_Value(X)))) && Arg0 == X)) { 2153 Value *R = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II); 2154 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum) 2155 R = Builder.CreateFNegFMF(R, II); 2156 return replaceInstUsesWith(*II, R); 2157 } 2158 2159 break; 2160 } 2161 case Intrinsic::matrix_multiply: { 2162 // Optimize negation in matrix multiplication. 2163 2164 // -A * -B -> A * B 2165 Value *A, *B; 2166 if (match(II->getArgOperand(0), m_FNeg(m_Value(A))) && 2167 match(II->getArgOperand(1), m_FNeg(m_Value(B)))) { 2168 replaceOperand(*II, 0, A); 2169 replaceOperand(*II, 1, B); 2170 return II; 2171 } 2172 2173 Value *Op0 = II->getOperand(0); 2174 Value *Op1 = II->getOperand(1); 2175 Value *OpNotNeg, *NegatedOp; 2176 unsigned NegatedOpArg, OtherOpArg; 2177 if (match(Op0, m_FNeg(m_Value(OpNotNeg)))) { 2178 NegatedOp = Op0; 2179 NegatedOpArg = 0; 2180 OtherOpArg = 1; 2181 } else if (match(Op1, m_FNeg(m_Value(OpNotNeg)))) { 2182 NegatedOp = Op1; 2183 NegatedOpArg = 1; 2184 OtherOpArg = 0; 2185 } else 2186 // Multiplication doesn't have a negated operand. 2187 break; 2188 2189 // Only optimize if the negated operand has only one use. 2190 if (!NegatedOp->hasOneUse()) 2191 break; 2192 2193 Value *OtherOp = II->getOperand(OtherOpArg); 2194 VectorType *RetTy = cast<VectorType>(II->getType()); 2195 VectorType *NegatedOpTy = cast<VectorType>(NegatedOp->getType()); 2196 VectorType *OtherOpTy = cast<VectorType>(OtherOp->getType()); 2197 ElementCount NegatedCount = NegatedOpTy->getElementCount(); 2198 ElementCount OtherCount = OtherOpTy->getElementCount(); 2199 ElementCount RetCount = RetTy->getElementCount(); 2200 // (-A) * B -> A * (-B), if it is cheaper to negate B and vice versa. 2201 if (ElementCount::isKnownGT(NegatedCount, OtherCount) && 2202 ElementCount::isKnownLT(OtherCount, RetCount)) { 2203 Value *InverseOtherOp = Builder.CreateFNeg(OtherOp); 2204 replaceOperand(*II, NegatedOpArg, OpNotNeg); 2205 replaceOperand(*II, OtherOpArg, InverseOtherOp); 2206 return II; 2207 } 2208 // (-A) * B -> -(A * B), if it is cheaper to negate the result 2209 if (ElementCount::isKnownGT(NegatedCount, RetCount)) { 2210 SmallVector<Value *, 5> NewArgs(II->args()); 2211 NewArgs[NegatedOpArg] = OpNotNeg; 2212 Instruction *NewMul = 2213 Builder.CreateIntrinsic(II->getType(), IID, NewArgs, II); 2214 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(NewMul, II)); 2215 } 2216 break; 2217 } 2218 case Intrinsic::fmuladd: { 2219 // Canonicalize fast fmuladd to the separate fmul + fadd. 2220 if (II->isFast()) { 2221 BuilderTy::FastMathFlagGuard Guard(Builder); 2222 Builder.setFastMathFlags(II->getFastMathFlags()); 2223 Value *Mul = Builder.CreateFMul(II->getArgOperand(0), 2224 II->getArgOperand(1)); 2225 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2)); 2226 Add->takeName(II); 2227 return replaceInstUsesWith(*II, Add); 2228 } 2229 2230 // Try to simplify the underlying FMul. 2231 if (Value *V = simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1), 2232 II->getFastMathFlags(), 2233 SQ.getWithInstruction(II))) { 2234 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 2235 FAdd->copyFastMathFlags(II); 2236 return FAdd; 2237 } 2238 2239 [[fallthrough]]; 2240 } 2241 case Intrinsic::fma: { 2242 // fma fneg(x), fneg(y), z -> fma x, y, z 2243 Value *Src0 = II->getArgOperand(0); 2244 Value *Src1 = II->getArgOperand(1); 2245 Value *X, *Y; 2246 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) { 2247 replaceOperand(*II, 0, X); 2248 replaceOperand(*II, 1, Y); 2249 return II; 2250 } 2251 2252 // fma fabs(x), fabs(x), z -> fma x, x, z 2253 if (match(Src0, m_FAbs(m_Value(X))) && 2254 match(Src1, m_FAbs(m_Specific(X)))) { 2255 replaceOperand(*II, 0, X); 2256 replaceOperand(*II, 1, X); 2257 return II; 2258 } 2259 2260 // Try to simplify the underlying FMul. We can only apply simplifications 2261 // that do not require rounding. 2262 if (Value *V = simplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1), 2263 II->getFastMathFlags(), 2264 SQ.getWithInstruction(II))) { 2265 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 2266 FAdd->copyFastMathFlags(II); 2267 return FAdd; 2268 } 2269 2270 // fma x, y, 0 -> fmul x, y 2271 // This is always valid for -0.0, but requires nsz for +0.0 as 2272 // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own. 2273 if (match(II->getArgOperand(2), m_NegZeroFP()) || 2274 (match(II->getArgOperand(2), m_PosZeroFP()) && 2275 II->getFastMathFlags().noSignedZeros())) 2276 return BinaryOperator::CreateFMulFMF(Src0, Src1, II); 2277 2278 break; 2279 } 2280 case Intrinsic::copysign: { 2281 Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1); 2282 if (SignBitMustBeZero(Sign, DL, &TLI)) { 2283 // If we know that the sign argument is positive, reduce to FABS: 2284 // copysign Mag, +Sign --> fabs Mag 2285 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 2286 return replaceInstUsesWith(*II, Fabs); 2287 } 2288 // TODO: There should be a ValueTracking sibling like SignBitMustBeOne. 2289 const APFloat *C; 2290 if (match(Sign, m_APFloat(C)) && C->isNegative()) { 2291 // If we know that the sign argument is negative, reduce to FNABS: 2292 // copysign Mag, -Sign --> fneg (fabs Mag) 2293 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 2294 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II)); 2295 } 2296 2297 // Propagate sign argument through nested calls: 2298 // copysign Mag, (copysign ?, X) --> copysign Mag, X 2299 Value *X; 2300 if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X)))) 2301 return replaceOperand(*II, 1, X); 2302 2303 // Peek through changes of magnitude's sign-bit. This call rewrites those: 2304 // copysign (fabs X), Sign --> copysign X, Sign 2305 // copysign (fneg X), Sign --> copysign X, Sign 2306 if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X)))) 2307 return replaceOperand(*II, 0, X); 2308 2309 break; 2310 } 2311 case Intrinsic::fabs: { 2312 Value *Cond, *TVal, *FVal; 2313 if (match(II->getArgOperand(0), 2314 m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) { 2315 // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF 2316 if (isa<Constant>(TVal) && isa<Constant>(FVal)) { 2317 CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal}); 2318 CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal}); 2319 return SelectInst::Create(Cond, AbsT, AbsF); 2320 } 2321 // fabs (select Cond, -FVal, FVal) --> fabs FVal 2322 if (match(TVal, m_FNeg(m_Specific(FVal)))) 2323 return replaceOperand(*II, 0, FVal); 2324 // fabs (select Cond, TVal, -TVal) --> fabs TVal 2325 if (match(FVal, m_FNeg(m_Specific(TVal)))) 2326 return replaceOperand(*II, 0, TVal); 2327 } 2328 2329 Value *Magnitude, *Sign; 2330 if (match(II->getArgOperand(0), 2331 m_CopySign(m_Value(Magnitude), m_Value(Sign)))) { 2332 // fabs (copysign x, y) -> (fabs x) 2333 CallInst *AbsSign = 2334 Builder.CreateCall(II->getCalledFunction(), {Magnitude}); 2335 AbsSign->copyFastMathFlags(II); 2336 return replaceInstUsesWith(*II, AbsSign); 2337 } 2338 2339 [[fallthrough]]; 2340 } 2341 case Intrinsic::ceil: 2342 case Intrinsic::floor: 2343 case Intrinsic::round: 2344 case Intrinsic::roundeven: 2345 case Intrinsic::nearbyint: 2346 case Intrinsic::rint: 2347 case Intrinsic::trunc: { 2348 Value *ExtSrc; 2349 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) { 2350 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x) 2351 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II); 2352 return new FPExtInst(NarrowII, II->getType()); 2353 } 2354 break; 2355 } 2356 case Intrinsic::cos: 2357 case Intrinsic::amdgcn_cos: { 2358 Value *X; 2359 Value *Src = II->getArgOperand(0); 2360 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) { 2361 // cos(-x) -> cos(x) 2362 // cos(fabs(x)) -> cos(x) 2363 return replaceOperand(*II, 0, X); 2364 } 2365 break; 2366 } 2367 case Intrinsic::sin: { 2368 Value *X; 2369 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) { 2370 // sin(-x) --> -sin(x) 2371 Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II); 2372 Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin); 2373 FNeg->copyFastMathFlags(II); 2374 return FNeg; 2375 } 2376 break; 2377 } 2378 case Intrinsic::ldexp: { 2379 // ldexp(ldexp(x, a), b) -> ldexp(x, a + b) 2380 // 2381 // The danger is if the first ldexp would overflow to infinity or underflow 2382 // to zero, but the combined exponent avoids it. We ignore this with 2383 // reassoc. 2384 // 2385 // It's also safe to fold if we know both exponents are >= 0 or <= 0 since 2386 // it would just double down on the overflow/underflow which would occur 2387 // anyway. 2388 // 2389 // TODO: Could do better if we had range tracking for the input value 2390 // exponent. Also could broaden sign check to cover == 0 case. 2391 Value *Src = II->getArgOperand(0); 2392 Value *Exp = II->getArgOperand(1); 2393 Value *InnerSrc; 2394 Value *InnerExp; 2395 if (match(Src, m_OneUse(m_Intrinsic<Intrinsic::ldexp>( 2396 m_Value(InnerSrc), m_Value(InnerExp)))) && 2397 Exp->getType() == InnerExp->getType()) { 2398 FastMathFlags FMF = II->getFastMathFlags(); 2399 FastMathFlags InnerFlags = cast<FPMathOperator>(Src)->getFastMathFlags(); 2400 2401 if ((FMF.allowReassoc() && InnerFlags.allowReassoc()) || 2402 signBitMustBeTheSame(Exp, InnerExp, II, DL, &AC, &DT)) { 2403 // TODO: Add nsw/nuw probably safe if integer type exceeds exponent 2404 // width. 2405 Value *NewExp = Builder.CreateAdd(InnerExp, Exp); 2406 II->setArgOperand(1, NewExp); 2407 II->setFastMathFlags(InnerFlags); // Or the inner flags. 2408 return replaceOperand(*II, 0, InnerSrc); 2409 } 2410 } 2411 2412 break; 2413 } 2414 case Intrinsic::ptrauth_auth: 2415 case Intrinsic::ptrauth_resign: { 2416 // (sign|resign) + (auth|resign) can be folded by omitting the middle 2417 // sign+auth component if the key and discriminator match. 2418 bool NeedSign = II->getIntrinsicID() == Intrinsic::ptrauth_resign; 2419 Value *Key = II->getArgOperand(1); 2420 Value *Disc = II->getArgOperand(2); 2421 2422 // AuthKey will be the key we need to end up authenticating against in 2423 // whatever we replace this sequence with. 2424 Value *AuthKey = nullptr, *AuthDisc = nullptr, *BasePtr; 2425 if (auto CI = dyn_cast<CallBase>(II->getArgOperand(0))) { 2426 BasePtr = CI->getArgOperand(0); 2427 if (CI->getIntrinsicID() == Intrinsic::ptrauth_sign) { 2428 if (CI->getArgOperand(1) != Key || CI->getArgOperand(2) != Disc) 2429 break; 2430 } else if (CI->getIntrinsicID() == Intrinsic::ptrauth_resign) { 2431 if (CI->getArgOperand(3) != Key || CI->getArgOperand(4) != Disc) 2432 break; 2433 AuthKey = CI->getArgOperand(1); 2434 AuthDisc = CI->getArgOperand(2); 2435 } else 2436 break; 2437 } else 2438 break; 2439 2440 unsigned NewIntrin; 2441 if (AuthKey && NeedSign) { 2442 // resign(0,1) + resign(1,2) = resign(0, 2) 2443 NewIntrin = Intrinsic::ptrauth_resign; 2444 } else if (AuthKey) { 2445 // resign(0,1) + auth(1) = auth(0) 2446 NewIntrin = Intrinsic::ptrauth_auth; 2447 } else if (NeedSign) { 2448 // sign(0) + resign(0, 1) = sign(1) 2449 NewIntrin = Intrinsic::ptrauth_sign; 2450 } else { 2451 // sign(0) + auth(0) = nop 2452 replaceInstUsesWith(*II, BasePtr); 2453 eraseInstFromFunction(*II); 2454 return nullptr; 2455 } 2456 2457 SmallVector<Value *, 4> CallArgs; 2458 CallArgs.push_back(BasePtr); 2459 if (AuthKey) { 2460 CallArgs.push_back(AuthKey); 2461 CallArgs.push_back(AuthDisc); 2462 } 2463 2464 if (NeedSign) { 2465 CallArgs.push_back(II->getArgOperand(3)); 2466 CallArgs.push_back(II->getArgOperand(4)); 2467 } 2468 2469 Function *NewFn = Intrinsic::getDeclaration(II->getModule(), NewIntrin); 2470 return CallInst::Create(NewFn, CallArgs); 2471 } 2472 case Intrinsic::arm_neon_vtbl1: 2473 case Intrinsic::aarch64_neon_tbl1: 2474 if (Value *V = simplifyNeonTbl1(*II, Builder)) 2475 return replaceInstUsesWith(*II, V); 2476 break; 2477 2478 case Intrinsic::arm_neon_vmulls: 2479 case Intrinsic::arm_neon_vmullu: 2480 case Intrinsic::aarch64_neon_smull: 2481 case Intrinsic::aarch64_neon_umull: { 2482 Value *Arg0 = II->getArgOperand(0); 2483 Value *Arg1 = II->getArgOperand(1); 2484 2485 // Handle mul by zero first: 2486 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) { 2487 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType())); 2488 } 2489 2490 // Check for constant LHS & RHS - in this case we just simplify. 2491 bool Zext = (IID == Intrinsic::arm_neon_vmullu || 2492 IID == Intrinsic::aarch64_neon_umull); 2493 VectorType *NewVT = cast<VectorType>(II->getType()); 2494 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) { 2495 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) { 2496 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext); 2497 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext); 2498 2499 return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1)); 2500 } 2501 2502 // Couldn't simplify - canonicalize constant to the RHS. 2503 std::swap(Arg0, Arg1); 2504 } 2505 2506 // Handle mul by one: 2507 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) 2508 if (ConstantInt *Splat = 2509 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) 2510 if (Splat->isOne()) 2511 return CastInst::CreateIntegerCast(Arg0, II->getType(), 2512 /*isSigned=*/!Zext); 2513 2514 break; 2515 } 2516 case Intrinsic::arm_neon_aesd: 2517 case Intrinsic::arm_neon_aese: 2518 case Intrinsic::aarch64_crypto_aesd: 2519 case Intrinsic::aarch64_crypto_aese: { 2520 Value *DataArg = II->getArgOperand(0); 2521 Value *KeyArg = II->getArgOperand(1); 2522 2523 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR 2524 Value *Data, *Key; 2525 if (match(KeyArg, m_ZeroInt()) && 2526 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) { 2527 replaceOperand(*II, 0, Data); 2528 replaceOperand(*II, 1, Key); 2529 return II; 2530 } 2531 break; 2532 } 2533 case Intrinsic::hexagon_V6_vandvrt: 2534 case Intrinsic::hexagon_V6_vandvrt_128B: { 2535 // Simplify Q -> V -> Q conversion. 2536 if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 2537 Intrinsic::ID ID0 = Op0->getIntrinsicID(); 2538 if (ID0 != Intrinsic::hexagon_V6_vandqrt && 2539 ID0 != Intrinsic::hexagon_V6_vandqrt_128B) 2540 break; 2541 Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1); 2542 uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue(); 2543 uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue(); 2544 // Check if every byte has common bits in Bytes and Mask. 2545 uint64_t C = Bytes1 & Mask1; 2546 if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000)) 2547 return replaceInstUsesWith(*II, Op0->getArgOperand(0)); 2548 } 2549 break; 2550 } 2551 case Intrinsic::stackrestore: { 2552 enum class ClassifyResult { 2553 None, 2554 Alloca, 2555 StackRestore, 2556 CallWithSideEffects, 2557 }; 2558 auto Classify = [](const Instruction *I) { 2559 if (isa<AllocaInst>(I)) 2560 return ClassifyResult::Alloca; 2561 2562 if (auto *CI = dyn_cast<CallInst>(I)) { 2563 if (auto *II = dyn_cast<IntrinsicInst>(CI)) { 2564 if (II->getIntrinsicID() == Intrinsic::stackrestore) 2565 return ClassifyResult::StackRestore; 2566 2567 if (II->mayHaveSideEffects()) 2568 return ClassifyResult::CallWithSideEffects; 2569 } else { 2570 // Consider all non-intrinsic calls to be side effects 2571 return ClassifyResult::CallWithSideEffects; 2572 } 2573 } 2574 2575 return ClassifyResult::None; 2576 }; 2577 2578 // If the stacksave and the stackrestore are in the same BB, and there is 2579 // no intervening call, alloca, or stackrestore of a different stacksave, 2580 // remove the restore. This can happen when variable allocas are DCE'd. 2581 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 2582 if (SS->getIntrinsicID() == Intrinsic::stacksave && 2583 SS->getParent() == II->getParent()) { 2584 BasicBlock::iterator BI(SS); 2585 bool CannotRemove = false; 2586 for (++BI; &*BI != II; ++BI) { 2587 switch (Classify(&*BI)) { 2588 case ClassifyResult::None: 2589 // So far so good, look at next instructions. 2590 break; 2591 2592 case ClassifyResult::StackRestore: 2593 // If we found an intervening stackrestore for a different 2594 // stacksave, we can't remove the stackrestore. Otherwise, continue. 2595 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS) 2596 CannotRemove = true; 2597 break; 2598 2599 case ClassifyResult::Alloca: 2600 case ClassifyResult::CallWithSideEffects: 2601 // If we found an alloca, a non-intrinsic call, or an intrinsic 2602 // call with side effects, we can't remove the stackrestore. 2603 CannotRemove = true; 2604 break; 2605 } 2606 if (CannotRemove) 2607 break; 2608 } 2609 2610 if (!CannotRemove) 2611 return eraseInstFromFunction(CI); 2612 } 2613 } 2614 2615 // Scan down this block to see if there is another stack restore in the 2616 // same block without an intervening call/alloca. 2617 BasicBlock::iterator BI(II); 2618 Instruction *TI = II->getParent()->getTerminator(); 2619 bool CannotRemove = false; 2620 for (++BI; &*BI != TI; ++BI) { 2621 switch (Classify(&*BI)) { 2622 case ClassifyResult::None: 2623 // So far so good, look at next instructions. 2624 break; 2625 2626 case ClassifyResult::StackRestore: 2627 // If there is a stackrestore below this one, remove this one. 2628 return eraseInstFromFunction(CI); 2629 2630 case ClassifyResult::Alloca: 2631 case ClassifyResult::CallWithSideEffects: 2632 // If we found an alloca, a non-intrinsic call, or an intrinsic call 2633 // with side effects (such as llvm.stacksave and llvm.read_register), 2634 // we can't remove the stack restore. 2635 CannotRemove = true; 2636 break; 2637 } 2638 if (CannotRemove) 2639 break; 2640 } 2641 2642 // If the stack restore is in a return, resume, or unwind block and if there 2643 // are no allocas or calls between the restore and the return, nuke the 2644 // restore. 2645 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI))) 2646 return eraseInstFromFunction(CI); 2647 break; 2648 } 2649 case Intrinsic::lifetime_end: 2650 // Asan needs to poison memory to detect invalid access which is possible 2651 // even for empty lifetime range. 2652 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 2653 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) || 2654 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 2655 break; 2656 2657 if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) { 2658 return I.getIntrinsicID() == Intrinsic::lifetime_start; 2659 })) 2660 return nullptr; 2661 break; 2662 case Intrinsic::assume: { 2663 Value *IIOperand = II->getArgOperand(0); 2664 SmallVector<OperandBundleDef, 4> OpBundles; 2665 II->getOperandBundlesAsDefs(OpBundles); 2666 2667 /// This will remove the boolean Condition from the assume given as 2668 /// argument and remove the assume if it becomes useless. 2669 /// always returns nullptr for use as a return values. 2670 auto RemoveConditionFromAssume = [&](Instruction *Assume) -> Instruction * { 2671 assert(isa<AssumeInst>(Assume)); 2672 if (isAssumeWithEmptyBundle(*cast<AssumeInst>(II))) 2673 return eraseInstFromFunction(CI); 2674 replaceUse(II->getOperandUse(0), ConstantInt::getTrue(II->getContext())); 2675 return nullptr; 2676 }; 2677 // Remove an assume if it is followed by an identical assume. 2678 // TODO: Do we need this? Unless there are conflicting assumptions, the 2679 // computeKnownBits(IIOperand) below here eliminates redundant assumes. 2680 Instruction *Next = II->getNextNonDebugInstruction(); 2681 if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand)))) 2682 return RemoveConditionFromAssume(Next); 2683 2684 // Canonicalize assume(a && b) -> assume(a); assume(b); 2685 // Note: New assumption intrinsics created here are registered by 2686 // the InstCombineIRInserter object. 2687 FunctionType *AssumeIntrinsicTy = II->getFunctionType(); 2688 Value *AssumeIntrinsic = II->getCalledOperand(); 2689 Value *A, *B; 2690 if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) { 2691 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles, 2692 II->getName()); 2693 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName()); 2694 return eraseInstFromFunction(*II); 2695 } 2696 // assume(!(a || b)) -> assume(!a); assume(!b); 2697 if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) { 2698 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 2699 Builder.CreateNot(A), OpBundles, II->getName()); 2700 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 2701 Builder.CreateNot(B), II->getName()); 2702 return eraseInstFromFunction(*II); 2703 } 2704 2705 // assume( (load addr) != null ) -> add 'nonnull' metadata to load 2706 // (if assume is valid at the load) 2707 CmpInst::Predicate Pred; 2708 Instruction *LHS; 2709 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) && 2710 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load && 2711 LHS->getType()->isPointerTy() && 2712 isValidAssumeForContext(II, LHS, &DT)) { 2713 MDNode *MD = MDNode::get(II->getContext(), std::nullopt); 2714 LHS->setMetadata(LLVMContext::MD_nonnull, MD); 2715 LHS->setMetadata(LLVMContext::MD_noundef, MD); 2716 return RemoveConditionFromAssume(II); 2717 2718 // TODO: apply nonnull return attributes to calls and invokes 2719 // TODO: apply range metadata for range check patterns? 2720 } 2721 2722 // Separate storage assumptions apply to the underlying allocations, not any 2723 // particular pointer within them. When evaluating the hints for AA purposes 2724 // we getUnderlyingObject them; by precomputing the answers here we can 2725 // avoid having to do so repeatedly there. 2726 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) { 2727 OperandBundleUse OBU = II->getOperandBundleAt(Idx); 2728 if (OBU.getTagName() == "separate_storage") { 2729 assert(OBU.Inputs.size() == 2); 2730 auto MaybeSimplifyHint = [&](const Use &U) { 2731 Value *Hint = U.get(); 2732 // Not having a limit is safe because InstCombine removes unreachable 2733 // code. 2734 Value *UnderlyingObject = getUnderlyingObject(Hint, /*MaxLookup*/ 0); 2735 if (Hint != UnderlyingObject) 2736 replaceUse(const_cast<Use &>(U), UnderlyingObject); 2737 }; 2738 MaybeSimplifyHint(OBU.Inputs[0]); 2739 MaybeSimplifyHint(OBU.Inputs[1]); 2740 } 2741 } 2742 2743 // Convert nonnull assume like: 2744 // %A = icmp ne i32* %PTR, null 2745 // call void @llvm.assume(i1 %A) 2746 // into 2747 // call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ] 2748 if (EnableKnowledgeRetention && 2749 match(IIOperand, m_Cmp(Pred, m_Value(A), m_Zero())) && 2750 Pred == CmpInst::ICMP_NE && A->getType()->isPointerTy()) { 2751 if (auto *Replacement = buildAssumeFromKnowledge( 2752 {RetainedKnowledge{Attribute::NonNull, 0, A}}, Next, &AC, &DT)) { 2753 2754 Replacement->insertBefore(Next); 2755 AC.registerAssumption(Replacement); 2756 return RemoveConditionFromAssume(II); 2757 } 2758 } 2759 2760 // Convert alignment assume like: 2761 // %B = ptrtoint i32* %A to i64 2762 // %C = and i64 %B, Constant 2763 // %D = icmp eq i64 %C, 0 2764 // call void @llvm.assume(i1 %D) 2765 // into 2766 // call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 Constant + 1)] 2767 uint64_t AlignMask; 2768 if (EnableKnowledgeRetention && 2769 match(IIOperand, 2770 m_Cmp(Pred, m_And(m_Value(A), m_ConstantInt(AlignMask)), 2771 m_Zero())) && 2772 Pred == CmpInst::ICMP_EQ) { 2773 if (isPowerOf2_64(AlignMask + 1)) { 2774 uint64_t Offset = 0; 2775 match(A, m_Add(m_Value(A), m_ConstantInt(Offset))); 2776 if (match(A, m_PtrToInt(m_Value(A)))) { 2777 /// Note: this doesn't preserve the offset information but merges 2778 /// offset and alignment. 2779 /// TODO: we can generate a GEP instead of merging the alignment with 2780 /// the offset. 2781 RetainedKnowledge RK{Attribute::Alignment, 2782 (unsigned)MinAlign(Offset, AlignMask + 1), A}; 2783 if (auto *Replacement = 2784 buildAssumeFromKnowledge(RK, Next, &AC, &DT)) { 2785 2786 Replacement->insertAfter(II); 2787 AC.registerAssumption(Replacement); 2788 } 2789 return RemoveConditionFromAssume(II); 2790 } 2791 } 2792 } 2793 2794 /// Canonicalize Knowledge in operand bundles. 2795 if (EnableKnowledgeRetention && II->hasOperandBundles()) { 2796 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) { 2797 auto &BOI = II->bundle_op_info_begin()[Idx]; 2798 RetainedKnowledge RK = 2799 llvm::getKnowledgeFromBundle(cast<AssumeInst>(*II), BOI); 2800 if (BOI.End - BOI.Begin > 2) 2801 continue; // Prevent reducing knowledge in an align with offset since 2802 // extracting a RetainedKnowledge from them looses offset 2803 // information 2804 RetainedKnowledge CanonRK = 2805 llvm::simplifyRetainedKnowledge(cast<AssumeInst>(II), RK, 2806 &getAssumptionCache(), 2807 &getDominatorTree()); 2808 if (CanonRK == RK) 2809 continue; 2810 if (!CanonRK) { 2811 if (BOI.End - BOI.Begin > 0) { 2812 Worklist.pushValue(II->op_begin()[BOI.Begin]); 2813 Value::dropDroppableUse(II->op_begin()[BOI.Begin]); 2814 } 2815 continue; 2816 } 2817 assert(RK.AttrKind == CanonRK.AttrKind); 2818 if (BOI.End - BOI.Begin > 0) 2819 II->op_begin()[BOI.Begin].set(CanonRK.WasOn); 2820 if (BOI.End - BOI.Begin > 1) 2821 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get( 2822 Type::getInt64Ty(II->getContext()), CanonRK.ArgValue)); 2823 if (RK.WasOn) 2824 Worklist.pushValue(RK.WasOn); 2825 return II; 2826 } 2827 } 2828 2829 // If there is a dominating assume with the same condition as this one, 2830 // then this one is redundant, and should be removed. 2831 KnownBits Known(1); 2832 computeKnownBits(IIOperand, Known, 0, II); 2833 if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) 2834 return eraseInstFromFunction(*II); 2835 2836 // assume(false) is unreachable. 2837 if (match(IIOperand, m_CombineOr(m_Zero(), m_Undef()))) { 2838 CreateNonTerminatorUnreachable(II); 2839 return eraseInstFromFunction(*II); 2840 } 2841 2842 // Update the cache of affected values for this assumption (we might be 2843 // here because we just simplified the condition). 2844 AC.updateAffectedValues(cast<AssumeInst>(II)); 2845 break; 2846 } 2847 case Intrinsic::experimental_guard: { 2848 // Is this guard followed by another guard? We scan forward over a small 2849 // fixed window of instructions to handle common cases with conditions 2850 // computed between guards. 2851 Instruction *NextInst = II->getNextNonDebugInstruction(); 2852 for (unsigned i = 0; i < GuardWideningWindow; i++) { 2853 // Note: Using context-free form to avoid compile time blow up 2854 if (!isSafeToSpeculativelyExecute(NextInst)) 2855 break; 2856 NextInst = NextInst->getNextNonDebugInstruction(); 2857 } 2858 Value *NextCond = nullptr; 2859 if (match(NextInst, 2860 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) { 2861 Value *CurrCond = II->getArgOperand(0); 2862 2863 // Remove a guard that it is immediately preceded by an identical guard. 2864 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b). 2865 if (CurrCond != NextCond) { 2866 Instruction *MoveI = II->getNextNonDebugInstruction(); 2867 while (MoveI != NextInst) { 2868 auto *Temp = MoveI; 2869 MoveI = MoveI->getNextNonDebugInstruction(); 2870 Temp->moveBefore(II); 2871 } 2872 replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond)); 2873 } 2874 eraseInstFromFunction(*NextInst); 2875 return II; 2876 } 2877 break; 2878 } 2879 case Intrinsic::vector_insert: { 2880 Value *Vec = II->getArgOperand(0); 2881 Value *SubVec = II->getArgOperand(1); 2882 Value *Idx = II->getArgOperand(2); 2883 auto *DstTy = dyn_cast<FixedVectorType>(II->getType()); 2884 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 2885 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType()); 2886 2887 // Only canonicalize if the destination vector, Vec, and SubVec are all 2888 // fixed vectors. 2889 if (DstTy && VecTy && SubVecTy) { 2890 unsigned DstNumElts = DstTy->getNumElements(); 2891 unsigned VecNumElts = VecTy->getNumElements(); 2892 unsigned SubVecNumElts = SubVecTy->getNumElements(); 2893 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 2894 2895 // An insert that entirely overwrites Vec with SubVec is a nop. 2896 if (VecNumElts == SubVecNumElts) 2897 return replaceInstUsesWith(CI, SubVec); 2898 2899 // Widen SubVec into a vector of the same width as Vec, since 2900 // shufflevector requires the two input vectors to be the same width. 2901 // Elements beyond the bounds of SubVec within the widened vector are 2902 // undefined. 2903 SmallVector<int, 8> WidenMask; 2904 unsigned i; 2905 for (i = 0; i != SubVecNumElts; ++i) 2906 WidenMask.push_back(i); 2907 for (; i != VecNumElts; ++i) 2908 WidenMask.push_back(PoisonMaskElem); 2909 2910 Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask); 2911 2912 SmallVector<int, 8> Mask; 2913 for (unsigned i = 0; i != IdxN; ++i) 2914 Mask.push_back(i); 2915 for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i) 2916 Mask.push_back(i); 2917 for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i) 2918 Mask.push_back(i); 2919 2920 Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask); 2921 return replaceInstUsesWith(CI, Shuffle); 2922 } 2923 break; 2924 } 2925 case Intrinsic::vector_extract: { 2926 Value *Vec = II->getArgOperand(0); 2927 Value *Idx = II->getArgOperand(1); 2928 2929 Type *ReturnType = II->getType(); 2930 // (extract_vector (insert_vector InsertTuple, InsertValue, InsertIdx), 2931 // ExtractIdx) 2932 unsigned ExtractIdx = cast<ConstantInt>(Idx)->getZExtValue(); 2933 Value *InsertTuple, *InsertIdx, *InsertValue; 2934 if (match(Vec, m_Intrinsic<Intrinsic::vector_insert>(m_Value(InsertTuple), 2935 m_Value(InsertValue), 2936 m_Value(InsertIdx))) && 2937 InsertValue->getType() == ReturnType) { 2938 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue(); 2939 // Case where we get the same index right after setting it. 2940 // extract.vector(insert.vector(InsertTuple, InsertValue, Idx), Idx) --> 2941 // InsertValue 2942 if (ExtractIdx == Index) 2943 return replaceInstUsesWith(CI, InsertValue); 2944 // If we are getting a different index than what was set in the 2945 // insert.vector intrinsic. We can just set the input tuple to the one up 2946 // in the chain. extract.vector(insert.vector(InsertTuple, InsertValue, 2947 // InsertIndex), ExtractIndex) 2948 // --> extract.vector(InsertTuple, ExtractIndex) 2949 else 2950 return replaceOperand(CI, 0, InsertTuple); 2951 } 2952 2953 auto *DstTy = dyn_cast<FixedVectorType>(ReturnType); 2954 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 2955 2956 // Only canonicalize if the the destination vector and Vec are fixed 2957 // vectors. 2958 if (DstTy && VecTy) { 2959 unsigned DstNumElts = DstTy->getNumElements(); 2960 unsigned VecNumElts = VecTy->getNumElements(); 2961 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 2962 2963 // Extracting the entirety of Vec is a nop. 2964 if (VecNumElts == DstNumElts) { 2965 replaceInstUsesWith(CI, Vec); 2966 return eraseInstFromFunction(CI); 2967 } 2968 2969 SmallVector<int, 8> Mask; 2970 for (unsigned i = 0; i != DstNumElts; ++i) 2971 Mask.push_back(IdxN + i); 2972 2973 Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask); 2974 return replaceInstUsesWith(CI, Shuffle); 2975 } 2976 break; 2977 } 2978 case Intrinsic::experimental_vector_reverse: { 2979 Value *BO0, *BO1, *X, *Y; 2980 Value *Vec = II->getArgOperand(0); 2981 if (match(Vec, m_OneUse(m_BinOp(m_Value(BO0), m_Value(BO1))))) { 2982 auto *OldBinOp = cast<BinaryOperator>(Vec); 2983 if (match(BO0, m_VecReverse(m_Value(X)))) { 2984 // rev(binop rev(X), rev(Y)) --> binop X, Y 2985 if (match(BO1, m_VecReverse(m_Value(Y)))) 2986 return replaceInstUsesWith(CI, 2987 BinaryOperator::CreateWithCopiedFlags( 2988 OldBinOp->getOpcode(), X, Y, OldBinOp, 2989 OldBinOp->getName(), II)); 2990 // rev(binop rev(X), BO1Splat) --> binop X, BO1Splat 2991 if (isSplatValue(BO1)) 2992 return replaceInstUsesWith(CI, 2993 BinaryOperator::CreateWithCopiedFlags( 2994 OldBinOp->getOpcode(), X, BO1, 2995 OldBinOp, OldBinOp->getName(), II)); 2996 } 2997 // rev(binop BO0Splat, rev(Y)) --> binop BO0Splat, Y 2998 if (match(BO1, m_VecReverse(m_Value(Y))) && isSplatValue(BO0)) 2999 return replaceInstUsesWith(CI, BinaryOperator::CreateWithCopiedFlags( 3000 OldBinOp->getOpcode(), BO0, Y, 3001 OldBinOp, OldBinOp->getName(), II)); 3002 } 3003 // rev(unop rev(X)) --> unop X 3004 if (match(Vec, m_OneUse(m_UnOp(m_VecReverse(m_Value(X)))))) { 3005 auto *OldUnOp = cast<UnaryOperator>(Vec); 3006 auto *NewUnOp = UnaryOperator::CreateWithCopiedFlags( 3007 OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(), II); 3008 return replaceInstUsesWith(CI, NewUnOp); 3009 } 3010 break; 3011 } 3012 case Intrinsic::vector_reduce_or: 3013 case Intrinsic::vector_reduce_and: { 3014 // Canonicalize logical or/and reductions: 3015 // Or reduction for i1 is represented as: 3016 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 3017 // %res = cmp ne iReduxWidth %val, 0 3018 // And reduction for i1 is represented as: 3019 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 3020 // %res = cmp eq iReduxWidth %val, 11111 3021 Value *Arg = II->getArgOperand(0); 3022 Value *Vect; 3023 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3024 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3025 if (FTy->getElementType() == Builder.getInt1Ty()) { 3026 Value *Res = Builder.CreateBitCast( 3027 Vect, Builder.getIntNTy(FTy->getNumElements())); 3028 if (IID == Intrinsic::vector_reduce_and) { 3029 Res = Builder.CreateICmpEQ( 3030 Res, ConstantInt::getAllOnesValue(Res->getType())); 3031 } else { 3032 assert(IID == Intrinsic::vector_reduce_or && 3033 "Expected or reduction."); 3034 Res = Builder.CreateIsNotNull(Res); 3035 } 3036 if (Arg != Vect) 3037 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 3038 II->getType()); 3039 return replaceInstUsesWith(CI, Res); 3040 } 3041 } 3042 [[fallthrough]]; 3043 } 3044 case Intrinsic::vector_reduce_add: { 3045 if (IID == Intrinsic::vector_reduce_add) { 3046 // Convert vector_reduce_add(ZExt(<n x i1>)) to 3047 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)). 3048 // Convert vector_reduce_add(SExt(<n x i1>)) to 3049 // -ZExtOrTrunc(ctpop(bitcast <n x i1> to in)). 3050 // Convert vector_reduce_add(<n x i1>) to 3051 // Trunc(ctpop(bitcast <n x i1> to in)). 3052 Value *Arg = II->getArgOperand(0); 3053 Value *Vect; 3054 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3055 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3056 if (FTy->getElementType() == Builder.getInt1Ty()) { 3057 Value *V = Builder.CreateBitCast( 3058 Vect, Builder.getIntNTy(FTy->getNumElements())); 3059 Value *Res = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V); 3060 if (Res->getType() != II->getType()) 3061 Res = Builder.CreateZExtOrTrunc(Res, II->getType()); 3062 if (Arg != Vect && 3063 cast<Instruction>(Arg)->getOpcode() == Instruction::SExt) 3064 Res = Builder.CreateNeg(Res); 3065 return replaceInstUsesWith(CI, Res); 3066 } 3067 } 3068 } 3069 [[fallthrough]]; 3070 } 3071 case Intrinsic::vector_reduce_xor: { 3072 if (IID == Intrinsic::vector_reduce_xor) { 3073 // Exclusive disjunction reduction over the vector with 3074 // (potentially-extended) i1 element type is actually a 3075 // (potentially-extended) arithmetic `add` reduction over the original 3076 // non-extended value: 3077 // vector_reduce_xor(?ext(<n x i1>)) 3078 // --> 3079 // ?ext(vector_reduce_add(<n x i1>)) 3080 Value *Arg = II->getArgOperand(0); 3081 Value *Vect; 3082 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3083 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3084 if (FTy->getElementType() == Builder.getInt1Ty()) { 3085 Value *Res = Builder.CreateAddReduce(Vect); 3086 if (Arg != Vect) 3087 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 3088 II->getType()); 3089 return replaceInstUsesWith(CI, Res); 3090 } 3091 } 3092 } 3093 [[fallthrough]]; 3094 } 3095 case Intrinsic::vector_reduce_mul: { 3096 if (IID == Intrinsic::vector_reduce_mul) { 3097 // Multiplicative reduction over the vector with (potentially-extended) 3098 // i1 element type is actually a (potentially zero-extended) 3099 // logical `and` reduction over the original non-extended value: 3100 // vector_reduce_mul(?ext(<n x i1>)) 3101 // --> 3102 // zext(vector_reduce_and(<n x i1>)) 3103 Value *Arg = II->getArgOperand(0); 3104 Value *Vect; 3105 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3106 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3107 if (FTy->getElementType() == Builder.getInt1Ty()) { 3108 Value *Res = Builder.CreateAndReduce(Vect); 3109 if (Res->getType() != II->getType()) 3110 Res = Builder.CreateZExt(Res, II->getType()); 3111 return replaceInstUsesWith(CI, Res); 3112 } 3113 } 3114 } 3115 [[fallthrough]]; 3116 } 3117 case Intrinsic::vector_reduce_umin: 3118 case Intrinsic::vector_reduce_umax: { 3119 if (IID == Intrinsic::vector_reduce_umin || 3120 IID == Intrinsic::vector_reduce_umax) { 3121 // UMin/UMax reduction over the vector with (potentially-extended) 3122 // i1 element type is actually a (potentially-extended) 3123 // logical `and`/`or` reduction over the original non-extended value: 3124 // vector_reduce_u{min,max}(?ext(<n x i1>)) 3125 // --> 3126 // ?ext(vector_reduce_{and,or}(<n x i1>)) 3127 Value *Arg = II->getArgOperand(0); 3128 Value *Vect; 3129 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3130 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3131 if (FTy->getElementType() == Builder.getInt1Ty()) { 3132 Value *Res = IID == Intrinsic::vector_reduce_umin 3133 ? Builder.CreateAndReduce(Vect) 3134 : Builder.CreateOrReduce(Vect); 3135 if (Arg != Vect) 3136 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 3137 II->getType()); 3138 return replaceInstUsesWith(CI, Res); 3139 } 3140 } 3141 } 3142 [[fallthrough]]; 3143 } 3144 case Intrinsic::vector_reduce_smin: 3145 case Intrinsic::vector_reduce_smax: { 3146 if (IID == Intrinsic::vector_reduce_smin || 3147 IID == Intrinsic::vector_reduce_smax) { 3148 // SMin/SMax reduction over the vector with (potentially-extended) 3149 // i1 element type is actually a (potentially-extended) 3150 // logical `and`/`or` reduction over the original non-extended value: 3151 // vector_reduce_s{min,max}(<n x i1>) 3152 // --> 3153 // vector_reduce_{or,and}(<n x i1>) 3154 // and 3155 // vector_reduce_s{min,max}(sext(<n x i1>)) 3156 // --> 3157 // sext(vector_reduce_{or,and}(<n x i1>)) 3158 // and 3159 // vector_reduce_s{min,max}(zext(<n x i1>)) 3160 // --> 3161 // zext(vector_reduce_{and,or}(<n x i1>)) 3162 Value *Arg = II->getArgOperand(0); 3163 Value *Vect; 3164 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3165 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3166 if (FTy->getElementType() == Builder.getInt1Ty()) { 3167 Instruction::CastOps ExtOpc = Instruction::CastOps::CastOpsEnd; 3168 if (Arg != Vect) 3169 ExtOpc = cast<CastInst>(Arg)->getOpcode(); 3170 Value *Res = ((IID == Intrinsic::vector_reduce_smin) == 3171 (ExtOpc == Instruction::CastOps::ZExt)) 3172 ? Builder.CreateAndReduce(Vect) 3173 : Builder.CreateOrReduce(Vect); 3174 if (Arg != Vect) 3175 Res = Builder.CreateCast(ExtOpc, Res, II->getType()); 3176 return replaceInstUsesWith(CI, Res); 3177 } 3178 } 3179 } 3180 [[fallthrough]]; 3181 } 3182 case Intrinsic::vector_reduce_fmax: 3183 case Intrinsic::vector_reduce_fmin: 3184 case Intrinsic::vector_reduce_fadd: 3185 case Intrinsic::vector_reduce_fmul: { 3186 bool CanBeReassociated = (IID != Intrinsic::vector_reduce_fadd && 3187 IID != Intrinsic::vector_reduce_fmul) || 3188 II->hasAllowReassoc(); 3189 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd || 3190 IID == Intrinsic::vector_reduce_fmul) 3191 ? 1 3192 : 0; 3193 Value *Arg = II->getArgOperand(ArgIdx); 3194 Value *V; 3195 ArrayRef<int> Mask; 3196 if (!isa<FixedVectorType>(Arg->getType()) || !CanBeReassociated || 3197 !match(Arg, m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))) || 3198 !cast<ShuffleVectorInst>(Arg)->isSingleSource()) 3199 break; 3200 int Sz = Mask.size(); 3201 SmallBitVector UsedIndices(Sz); 3202 for (int Idx : Mask) { 3203 if (Idx == PoisonMaskElem || UsedIndices.test(Idx)) 3204 break; 3205 UsedIndices.set(Idx); 3206 } 3207 // Can remove shuffle iff just shuffled elements, no repeats, undefs, or 3208 // other changes. 3209 if (UsedIndices.all()) { 3210 replaceUse(II->getOperandUse(ArgIdx), V); 3211 return nullptr; 3212 } 3213 break; 3214 } 3215 case Intrinsic::is_fpclass: { 3216 if (Instruction *I = foldIntrinsicIsFPClass(*II)) 3217 return I; 3218 break; 3219 } 3220 default: { 3221 // Handle target specific intrinsics 3222 std::optional<Instruction *> V = targetInstCombineIntrinsic(*II); 3223 if (V) 3224 return *V; 3225 break; 3226 } 3227 } 3228 3229 // Try to fold intrinsic into select operands. This is legal if: 3230 // * The intrinsic is speculatable. 3231 // * The select condition is not a vector, or the intrinsic does not 3232 // perform cross-lane operations. 3233 switch (IID) { 3234 case Intrinsic::ctlz: 3235 case Intrinsic::cttz: 3236 case Intrinsic::ctpop: 3237 case Intrinsic::umin: 3238 case Intrinsic::umax: 3239 case Intrinsic::smin: 3240 case Intrinsic::smax: 3241 case Intrinsic::usub_sat: 3242 case Intrinsic::uadd_sat: 3243 case Intrinsic::ssub_sat: 3244 case Intrinsic::sadd_sat: 3245 for (Value *Op : II->args()) 3246 if (auto *Sel = dyn_cast<SelectInst>(Op)) 3247 if (Instruction *R = FoldOpIntoSelect(*II, Sel)) 3248 return R; 3249 [[fallthrough]]; 3250 default: 3251 break; 3252 } 3253 3254 if (Instruction *Shuf = foldShuffledIntrinsicOperands(II, Builder)) 3255 return Shuf; 3256 3257 // Some intrinsics (like experimental_gc_statepoint) can be used in invoke 3258 // context, so it is handled in visitCallBase and we should trigger it. 3259 return visitCallBase(*II); 3260 } 3261 3262 // Fence instruction simplification 3263 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) { 3264 auto *NFI = dyn_cast<FenceInst>(FI.getNextNonDebugInstruction()); 3265 // This check is solely here to handle arbitrary target-dependent syncscopes. 3266 // TODO: Can remove if does not matter in practice. 3267 if (NFI && FI.isIdenticalTo(NFI)) 3268 return eraseInstFromFunction(FI); 3269 3270 // Returns true if FI1 is identical or stronger fence than FI2. 3271 auto isIdenticalOrStrongerFence = [](FenceInst *FI1, FenceInst *FI2) { 3272 auto FI1SyncScope = FI1->getSyncScopeID(); 3273 // Consider same scope, where scope is global or single-thread. 3274 if (FI1SyncScope != FI2->getSyncScopeID() || 3275 (FI1SyncScope != SyncScope::System && 3276 FI1SyncScope != SyncScope::SingleThread)) 3277 return false; 3278 3279 return isAtLeastOrStrongerThan(FI1->getOrdering(), FI2->getOrdering()); 3280 }; 3281 if (NFI && isIdenticalOrStrongerFence(NFI, &FI)) 3282 return eraseInstFromFunction(FI); 3283 3284 if (auto *PFI = dyn_cast_or_null<FenceInst>(FI.getPrevNonDebugInstruction())) 3285 if (isIdenticalOrStrongerFence(PFI, &FI)) 3286 return eraseInstFromFunction(FI); 3287 return nullptr; 3288 } 3289 3290 // InvokeInst simplification 3291 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) { 3292 return visitCallBase(II); 3293 } 3294 3295 // CallBrInst simplification 3296 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) { 3297 return visitCallBase(CBI); 3298 } 3299 3300 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) { 3301 if (!CI->getCalledFunction()) return nullptr; 3302 3303 // Skip optimizing notail and musttail calls so 3304 // LibCallSimplifier::optimizeCall doesn't have to preserve those invariants. 3305 // LibCallSimplifier::optimizeCall should try to preseve tail calls though. 3306 if (CI->isMustTailCall() || CI->isNoTailCall()) 3307 return nullptr; 3308 3309 auto InstCombineRAUW = [this](Instruction *From, Value *With) { 3310 replaceInstUsesWith(*From, With); 3311 }; 3312 auto InstCombineErase = [this](Instruction *I) { 3313 eraseInstFromFunction(*I); 3314 }; 3315 LibCallSimplifier Simplifier(DL, &TLI, &AC, ORE, BFI, PSI, InstCombineRAUW, 3316 InstCombineErase); 3317 if (Value *With = Simplifier.optimizeCall(CI, Builder)) { 3318 ++NumSimplified; 3319 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With); 3320 } 3321 3322 return nullptr; 3323 } 3324 3325 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) { 3326 // Strip off at most one level of pointer casts, looking for an alloca. This 3327 // is good enough in practice and simpler than handling any number of casts. 3328 Value *Underlying = TrampMem->stripPointerCasts(); 3329 if (Underlying != TrampMem && 3330 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem)) 3331 return nullptr; 3332 if (!isa<AllocaInst>(Underlying)) 3333 return nullptr; 3334 3335 IntrinsicInst *InitTrampoline = nullptr; 3336 for (User *U : TrampMem->users()) { 3337 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3338 if (!II) 3339 return nullptr; 3340 if (II->getIntrinsicID() == Intrinsic::init_trampoline) { 3341 if (InitTrampoline) 3342 // More than one init_trampoline writes to this value. Give up. 3343 return nullptr; 3344 InitTrampoline = II; 3345 continue; 3346 } 3347 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline) 3348 // Allow any number of calls to adjust.trampoline. 3349 continue; 3350 return nullptr; 3351 } 3352 3353 // No call to init.trampoline found. 3354 if (!InitTrampoline) 3355 return nullptr; 3356 3357 // Check that the alloca is being used in the expected way. 3358 if (InitTrampoline->getOperand(0) != TrampMem) 3359 return nullptr; 3360 3361 return InitTrampoline; 3362 } 3363 3364 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, 3365 Value *TrampMem) { 3366 // Visit all the previous instructions in the basic block, and try to find a 3367 // init.trampoline which has a direct path to the adjust.trampoline. 3368 for (BasicBlock::iterator I = AdjustTramp->getIterator(), 3369 E = AdjustTramp->getParent()->begin(); 3370 I != E;) { 3371 Instruction *Inst = &*--I; 3372 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 3373 if (II->getIntrinsicID() == Intrinsic::init_trampoline && 3374 II->getOperand(0) == TrampMem) 3375 return II; 3376 if (Inst->mayWriteToMemory()) 3377 return nullptr; 3378 } 3379 return nullptr; 3380 } 3381 3382 // Given a call to llvm.adjust.trampoline, find and return the corresponding 3383 // call to llvm.init.trampoline if the call to the trampoline can be optimized 3384 // to a direct call to a function. Otherwise return NULL. 3385 static IntrinsicInst *findInitTrampoline(Value *Callee) { 3386 Callee = Callee->stripPointerCasts(); 3387 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee); 3388 if (!AdjustTramp || 3389 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline) 3390 return nullptr; 3391 3392 Value *TrampMem = AdjustTramp->getOperand(0); 3393 3394 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem)) 3395 return IT; 3396 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem)) 3397 return IT; 3398 return nullptr; 3399 } 3400 3401 bool InstCombinerImpl::annotateAnyAllocSite(CallBase &Call, 3402 const TargetLibraryInfo *TLI) { 3403 // Note: We only handle cases which can't be driven from generic attributes 3404 // here. So, for example, nonnull and noalias (which are common properties 3405 // of some allocation functions) are expected to be handled via annotation 3406 // of the respective allocator declaration with generic attributes. 3407 bool Changed = false; 3408 3409 if (!Call.getType()->isPointerTy()) 3410 return Changed; 3411 3412 std::optional<APInt> Size = getAllocSize(&Call, TLI); 3413 if (Size && *Size != 0) { 3414 // TODO: We really should just emit deref_or_null here and then 3415 // let the generic inference code combine that with nonnull. 3416 if (Call.hasRetAttr(Attribute::NonNull)) { 3417 Changed = !Call.hasRetAttr(Attribute::Dereferenceable); 3418 Call.addRetAttr(Attribute::getWithDereferenceableBytes( 3419 Call.getContext(), Size->getLimitedValue())); 3420 } else { 3421 Changed = !Call.hasRetAttr(Attribute::DereferenceableOrNull); 3422 Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes( 3423 Call.getContext(), Size->getLimitedValue())); 3424 } 3425 } 3426 3427 // Add alignment attribute if alignment is a power of two constant. 3428 Value *Alignment = getAllocAlignment(&Call, TLI); 3429 if (!Alignment) 3430 return Changed; 3431 3432 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment); 3433 if (AlignOpC && AlignOpC->getValue().ult(llvm::Value::MaximumAlignment)) { 3434 uint64_t AlignmentVal = AlignOpC->getZExtValue(); 3435 if (llvm::isPowerOf2_64(AlignmentVal)) { 3436 Align ExistingAlign = Call.getRetAlign().valueOrOne(); 3437 Align NewAlign = Align(AlignmentVal); 3438 if (NewAlign > ExistingAlign) { 3439 Call.addRetAttr( 3440 Attribute::getWithAlignment(Call.getContext(), NewAlign)); 3441 Changed = true; 3442 } 3443 } 3444 } 3445 return Changed; 3446 } 3447 3448 /// Improvements for call, callbr and invoke instructions. 3449 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) { 3450 bool Changed = annotateAnyAllocSite(Call, &TLI); 3451 3452 // Mark any parameters that are known to be non-null with the nonnull 3453 // attribute. This is helpful for inlining calls to functions with null 3454 // checks on their arguments. 3455 SmallVector<unsigned, 4> ArgNos; 3456 unsigned ArgNo = 0; 3457 3458 for (Value *V : Call.args()) { 3459 if (V->getType()->isPointerTy() && 3460 !Call.paramHasAttr(ArgNo, Attribute::NonNull) && 3461 isKnownNonZero(V, DL, 0, &AC, &Call, &DT)) 3462 ArgNos.push_back(ArgNo); 3463 ArgNo++; 3464 } 3465 3466 assert(ArgNo == Call.arg_size() && "Call arguments not processed correctly."); 3467 3468 if (!ArgNos.empty()) { 3469 AttributeList AS = Call.getAttributes(); 3470 LLVMContext &Ctx = Call.getContext(); 3471 AS = AS.addParamAttribute(Ctx, ArgNos, 3472 Attribute::get(Ctx, Attribute::NonNull)); 3473 Call.setAttributes(AS); 3474 Changed = true; 3475 } 3476 3477 // If the callee is a pointer to a function, attempt to move any casts to the 3478 // arguments of the call/callbr/invoke. 3479 Value *Callee = Call.getCalledOperand(); 3480 Function *CalleeF = dyn_cast<Function>(Callee); 3481 if ((!CalleeF || CalleeF->getFunctionType() != Call.getFunctionType()) && 3482 transformConstExprCastCall(Call)) 3483 return nullptr; 3484 3485 if (CalleeF) { 3486 // Remove the convergent attr on calls when the callee is not convergent. 3487 if (Call.isConvergent() && !CalleeF->isConvergent() && 3488 !CalleeF->isIntrinsic()) { 3489 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call 3490 << "\n"); 3491 Call.setNotConvergent(); 3492 return &Call; 3493 } 3494 3495 // If the call and callee calling conventions don't match, and neither one 3496 // of the calling conventions is compatible with C calling convention 3497 // this call must be unreachable, as the call is undefined. 3498 if ((CalleeF->getCallingConv() != Call.getCallingConv() && 3499 !(CalleeF->getCallingConv() == llvm::CallingConv::C && 3500 TargetLibraryInfoImpl::isCallingConvCCompatible(&Call)) && 3501 !(Call.getCallingConv() == llvm::CallingConv::C && 3502 TargetLibraryInfoImpl::isCallingConvCCompatible(CalleeF))) && 3503 // Only do this for calls to a function with a body. A prototype may 3504 // not actually end up matching the implementation's calling conv for a 3505 // variety of reasons (e.g. it may be written in assembly). 3506 !CalleeF->isDeclaration()) { 3507 Instruction *OldCall = &Call; 3508 CreateNonTerminatorUnreachable(OldCall); 3509 // If OldCall does not return void then replaceInstUsesWith poison. 3510 // This allows ValueHandlers and custom metadata to adjust itself. 3511 if (!OldCall->getType()->isVoidTy()) 3512 replaceInstUsesWith(*OldCall, PoisonValue::get(OldCall->getType())); 3513 if (isa<CallInst>(OldCall)) 3514 return eraseInstFromFunction(*OldCall); 3515 3516 // We cannot remove an invoke or a callbr, because it would change thexi 3517 // CFG, just change the callee to a null pointer. 3518 cast<CallBase>(OldCall)->setCalledFunction( 3519 CalleeF->getFunctionType(), 3520 Constant::getNullValue(CalleeF->getType())); 3521 return nullptr; 3522 } 3523 } 3524 3525 // Calling a null function pointer is undefined if a null address isn't 3526 // dereferenceable. 3527 if ((isa<ConstantPointerNull>(Callee) && 3528 !NullPointerIsDefined(Call.getFunction())) || 3529 isa<UndefValue>(Callee)) { 3530 // If Call does not return void then replaceInstUsesWith poison. 3531 // This allows ValueHandlers and custom metadata to adjust itself. 3532 if (!Call.getType()->isVoidTy()) 3533 replaceInstUsesWith(Call, PoisonValue::get(Call.getType())); 3534 3535 if (Call.isTerminator()) { 3536 // Can't remove an invoke or callbr because we cannot change the CFG. 3537 return nullptr; 3538 } 3539 3540 // This instruction is not reachable, just remove it. 3541 CreateNonTerminatorUnreachable(&Call); 3542 return eraseInstFromFunction(Call); 3543 } 3544 3545 if (IntrinsicInst *II = findInitTrampoline(Callee)) 3546 return transformCallThroughTrampoline(Call, *II); 3547 3548 if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) { 3549 InlineAsm *IA = cast<InlineAsm>(Callee); 3550 if (!IA->canThrow()) { 3551 // Normal inline asm calls cannot throw - mark them 3552 // 'nounwind'. 3553 Call.setDoesNotThrow(); 3554 Changed = true; 3555 } 3556 } 3557 3558 // Try to optimize the call if possible, we require DataLayout for most of 3559 // this. None of these calls are seen as possibly dead so go ahead and 3560 // delete the instruction now. 3561 if (CallInst *CI = dyn_cast<CallInst>(&Call)) { 3562 Instruction *I = tryOptimizeCall(CI); 3563 // If we changed something return the result, etc. Otherwise let 3564 // the fallthrough check. 3565 if (I) return eraseInstFromFunction(*I); 3566 } 3567 3568 if (!Call.use_empty() && !Call.isMustTailCall()) 3569 if (Value *ReturnedArg = Call.getReturnedArgOperand()) { 3570 Type *CallTy = Call.getType(); 3571 Type *RetArgTy = ReturnedArg->getType(); 3572 if (RetArgTy->canLosslesslyBitCastTo(CallTy)) 3573 return replaceInstUsesWith( 3574 Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy)); 3575 } 3576 3577 // Drop unnecessary kcfi operand bundles from calls that were converted 3578 // into direct calls. 3579 auto Bundle = Call.getOperandBundle(LLVMContext::OB_kcfi); 3580 if (Bundle && !Call.isIndirectCall()) { 3581 DEBUG_WITH_TYPE(DEBUG_TYPE "-kcfi", { 3582 if (CalleeF) { 3583 ConstantInt *FunctionType = nullptr; 3584 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]); 3585 3586 if (MDNode *MD = CalleeF->getMetadata(LLVMContext::MD_kcfi_type)) 3587 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0)); 3588 3589 if (FunctionType && 3590 FunctionType->getZExtValue() != ExpectedType->getZExtValue()) 3591 dbgs() << Call.getModule()->getName() 3592 << ": warning: kcfi: " << Call.getCaller()->getName() 3593 << ": call to " << CalleeF->getName() 3594 << " using a mismatching function pointer type\n"; 3595 } 3596 }); 3597 3598 return CallBase::removeOperandBundle(&Call, LLVMContext::OB_kcfi); 3599 } 3600 3601 if (isRemovableAlloc(&Call, &TLI)) 3602 return visitAllocSite(Call); 3603 3604 // Handle intrinsics which can be used in both call and invoke context. 3605 switch (Call.getIntrinsicID()) { 3606 case Intrinsic::experimental_gc_statepoint: { 3607 GCStatepointInst &GCSP = *cast<GCStatepointInst>(&Call); 3608 SmallPtrSet<Value *, 32> LiveGcValues; 3609 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 3610 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 3611 3612 // Remove the relocation if unused. 3613 if (GCR.use_empty()) { 3614 eraseInstFromFunction(GCR); 3615 continue; 3616 } 3617 3618 Value *DerivedPtr = GCR.getDerivedPtr(); 3619 Value *BasePtr = GCR.getBasePtr(); 3620 3621 // Undef is undef, even after relocation. 3622 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) { 3623 replaceInstUsesWith(GCR, UndefValue::get(GCR.getType())); 3624 eraseInstFromFunction(GCR); 3625 continue; 3626 } 3627 3628 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) { 3629 // The relocation of null will be null for most any collector. 3630 // TODO: provide a hook for this in GCStrategy. There might be some 3631 // weird collector this property does not hold for. 3632 if (isa<ConstantPointerNull>(DerivedPtr)) { 3633 // Use null-pointer of gc_relocate's type to replace it. 3634 replaceInstUsesWith(GCR, ConstantPointerNull::get(PT)); 3635 eraseInstFromFunction(GCR); 3636 continue; 3637 } 3638 3639 // isKnownNonNull -> nonnull attribute 3640 if (!GCR.hasRetAttr(Attribute::NonNull) && 3641 isKnownNonZero(DerivedPtr, DL, 0, &AC, &Call, &DT)) { 3642 GCR.addRetAttr(Attribute::NonNull); 3643 // We discovered new fact, re-check users. 3644 Worklist.pushUsersToWorkList(GCR); 3645 } 3646 } 3647 3648 // If we have two copies of the same pointer in the statepoint argument 3649 // list, canonicalize to one. This may let us common gc.relocates. 3650 if (GCR.getBasePtr() == GCR.getDerivedPtr() && 3651 GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) { 3652 auto *OpIntTy = GCR.getOperand(2)->getType(); 3653 GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex())); 3654 } 3655 3656 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p)) 3657 // Canonicalize on the type from the uses to the defs 3658 3659 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...) 3660 LiveGcValues.insert(BasePtr); 3661 LiveGcValues.insert(DerivedPtr); 3662 } 3663 std::optional<OperandBundleUse> Bundle = 3664 GCSP.getOperandBundle(LLVMContext::OB_gc_live); 3665 unsigned NumOfGCLives = LiveGcValues.size(); 3666 if (!Bundle || NumOfGCLives == Bundle->Inputs.size()) 3667 break; 3668 // We can reduce the size of gc live bundle. 3669 DenseMap<Value *, unsigned> Val2Idx; 3670 std::vector<Value *> NewLiveGc; 3671 for (Value *V : Bundle->Inputs) { 3672 if (Val2Idx.count(V)) 3673 continue; 3674 if (LiveGcValues.count(V)) { 3675 Val2Idx[V] = NewLiveGc.size(); 3676 NewLiveGc.push_back(V); 3677 } else 3678 Val2Idx[V] = NumOfGCLives; 3679 } 3680 // Update all gc.relocates 3681 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 3682 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 3683 Value *BasePtr = GCR.getBasePtr(); 3684 assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives && 3685 "Missed live gc for base pointer"); 3686 auto *OpIntTy1 = GCR.getOperand(1)->getType(); 3687 GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr])); 3688 Value *DerivedPtr = GCR.getDerivedPtr(); 3689 assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives && 3690 "Missed live gc for derived pointer"); 3691 auto *OpIntTy2 = GCR.getOperand(2)->getType(); 3692 GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr])); 3693 } 3694 // Create new statepoint instruction. 3695 OperandBundleDef NewBundle("gc-live", NewLiveGc); 3696 return CallBase::Create(&Call, NewBundle); 3697 } 3698 default: { break; } 3699 } 3700 3701 return Changed ? &Call : nullptr; 3702 } 3703 3704 /// If the callee is a constexpr cast of a function, attempt to move the cast to 3705 /// the arguments of the call/invoke. 3706 /// CallBrInst is not supported. 3707 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) { 3708 auto *Callee = 3709 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts()); 3710 if (!Callee) 3711 return false; 3712 3713 assert(!isa<CallBrInst>(Call) && 3714 "CallBr's don't have a single point after a def to insert at"); 3715 3716 // If this is a call to a thunk function, don't remove the cast. Thunks are 3717 // used to transparently forward all incoming parameters and outgoing return 3718 // values, so it's important to leave the cast in place. 3719 if (Callee->hasFnAttribute("thunk")) 3720 return false; 3721 3722 // If this is a musttail call, the callee's prototype must match the caller's 3723 // prototype with the exception of pointee types. The code below doesn't 3724 // implement that, so we can't do this transform. 3725 // TODO: Do the transform if it only requires adding pointer casts. 3726 if (Call.isMustTailCall()) 3727 return false; 3728 3729 Instruction *Caller = &Call; 3730 const AttributeList &CallerPAL = Call.getAttributes(); 3731 3732 // Okay, this is a cast from a function to a different type. Unless doing so 3733 // would cause a type conversion of one of our arguments, change this call to 3734 // be a direct call with arguments casted to the appropriate types. 3735 FunctionType *FT = Callee->getFunctionType(); 3736 Type *OldRetTy = Caller->getType(); 3737 Type *NewRetTy = FT->getReturnType(); 3738 3739 // Check to see if we are changing the return type... 3740 if (OldRetTy != NewRetTy) { 3741 3742 if (NewRetTy->isStructTy()) 3743 return false; // TODO: Handle multiple return values. 3744 3745 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) { 3746 if (Callee->isDeclaration()) 3747 return false; // Cannot transform this return value. 3748 3749 if (!Caller->use_empty() && 3750 // void -> non-void is handled specially 3751 !NewRetTy->isVoidTy()) 3752 return false; // Cannot transform this return value. 3753 } 3754 3755 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 3756 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs()); 3757 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy))) 3758 return false; // Attribute not compatible with transformed value. 3759 } 3760 3761 // If the callbase is an invoke instruction, and the return value is 3762 // used by a PHI node in a successor, we cannot change the return type of 3763 // the call because there is no place to put the cast instruction (without 3764 // breaking the critical edge). Bail out in this case. 3765 if (!Caller->use_empty()) { 3766 BasicBlock *PhisNotSupportedBlock = nullptr; 3767 if (auto *II = dyn_cast<InvokeInst>(Caller)) 3768 PhisNotSupportedBlock = II->getNormalDest(); 3769 if (PhisNotSupportedBlock) 3770 for (User *U : Caller->users()) 3771 if (PHINode *PN = dyn_cast<PHINode>(U)) 3772 if (PN->getParent() == PhisNotSupportedBlock) 3773 return false; 3774 } 3775 } 3776 3777 unsigned NumActualArgs = Call.arg_size(); 3778 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 3779 3780 // Prevent us turning: 3781 // declare void @takes_i32_inalloca(i32* inalloca) 3782 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0) 3783 // 3784 // into: 3785 // call void @takes_i32_inalloca(i32* null) 3786 // 3787 // Similarly, avoid folding away bitcasts of byval calls. 3788 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) || 3789 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated)) 3790 return false; 3791 3792 auto AI = Call.arg_begin(); 3793 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 3794 Type *ParamTy = FT->getParamType(i); 3795 Type *ActTy = (*AI)->getType(); 3796 3797 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL)) 3798 return false; // Cannot transform this parameter value. 3799 3800 // Check if there are any incompatible attributes we cannot drop safely. 3801 if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i)) 3802 .overlaps(AttributeFuncs::typeIncompatible( 3803 ParamTy, AttributeFuncs::ASK_UNSAFE_TO_DROP))) 3804 return false; // Attribute not compatible with transformed value. 3805 3806 if (Call.isInAllocaArgument(i) || 3807 CallerPAL.hasParamAttr(i, Attribute::Preallocated)) 3808 return false; // Cannot transform to and from inalloca/preallocated. 3809 3810 if (CallerPAL.hasParamAttr(i, Attribute::SwiftError)) 3811 return false; 3812 3813 if (CallerPAL.hasParamAttr(i, Attribute::ByVal) != 3814 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal)) 3815 return false; // Cannot transform to or from byval. 3816 } 3817 3818 if (Callee->isDeclaration()) { 3819 // Do not delete arguments unless we have a function body. 3820 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg()) 3821 return false; 3822 3823 // If the callee is just a declaration, don't change the varargsness of the 3824 // call. We don't want to introduce a varargs call where one doesn't 3825 // already exist. 3826 if (FT->isVarArg() != Call.getFunctionType()->isVarArg()) 3827 return false; 3828 3829 // If both the callee and the cast type are varargs, we still have to make 3830 // sure the number of fixed parameters are the same or we have the same 3831 // ABI issues as if we introduce a varargs call. 3832 if (FT->isVarArg() && Call.getFunctionType()->isVarArg() && 3833 FT->getNumParams() != Call.getFunctionType()->getNumParams()) 3834 return false; 3835 } 3836 3837 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 3838 !CallerPAL.isEmpty()) { 3839 // In this case we have more arguments than the new function type, but we 3840 // won't be dropping them. Check that these extra arguments have attributes 3841 // that are compatible with being a vararg call argument. 3842 unsigned SRetIdx; 3843 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) && 3844 SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams()) 3845 return false; 3846 } 3847 3848 // Okay, we decided that this is a safe thing to do: go ahead and start 3849 // inserting cast instructions as necessary. 3850 SmallVector<Value *, 8> Args; 3851 SmallVector<AttributeSet, 8> ArgAttrs; 3852 Args.reserve(NumActualArgs); 3853 ArgAttrs.reserve(NumActualArgs); 3854 3855 // Get any return attributes. 3856 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs()); 3857 3858 // If the return value is not being used, the type may not be compatible 3859 // with the existing attributes. Wipe out any problematic attributes. 3860 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy)); 3861 3862 LLVMContext &Ctx = Call.getContext(); 3863 AI = Call.arg_begin(); 3864 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 3865 Type *ParamTy = FT->getParamType(i); 3866 3867 Value *NewArg = *AI; 3868 if ((*AI)->getType() != ParamTy) 3869 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy); 3870 Args.push_back(NewArg); 3871 3872 // Add any parameter attributes except the ones incompatible with the new 3873 // type. Note that we made sure all incompatible ones are safe to drop. 3874 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible( 3875 ParamTy, AttributeFuncs::ASK_SAFE_TO_DROP); 3876 ArgAttrs.push_back( 3877 CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs)); 3878 } 3879 3880 // If the function takes more arguments than the call was taking, add them 3881 // now. 3882 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) { 3883 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 3884 ArgAttrs.push_back(AttributeSet()); 3885 } 3886 3887 // If we are removing arguments to the function, emit an obnoxious warning. 3888 if (FT->getNumParams() < NumActualArgs) { 3889 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722 3890 if (FT->isVarArg()) { 3891 // Add all of the arguments in their promoted form to the arg list. 3892 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 3893 Type *PTy = getPromotedType((*AI)->getType()); 3894 Value *NewArg = *AI; 3895 if (PTy != (*AI)->getType()) { 3896 // Must promote to pass through va_arg area! 3897 Instruction::CastOps opcode = 3898 CastInst::getCastOpcode(*AI, false, PTy, false); 3899 NewArg = Builder.CreateCast(opcode, *AI, PTy); 3900 } 3901 Args.push_back(NewArg); 3902 3903 // Add any parameter attributes. 3904 ArgAttrs.push_back(CallerPAL.getParamAttrs(i)); 3905 } 3906 } 3907 } 3908 3909 AttributeSet FnAttrs = CallerPAL.getFnAttrs(); 3910 3911 if (NewRetTy->isVoidTy()) 3912 Caller->setName(""); // Void type should not have a name. 3913 3914 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) && 3915 "missing argument attributes"); 3916 AttributeList NewCallerPAL = AttributeList::get( 3917 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs); 3918 3919 SmallVector<OperandBundleDef, 1> OpBundles; 3920 Call.getOperandBundlesAsDefs(OpBundles); 3921 3922 CallBase *NewCall; 3923 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 3924 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(), 3925 II->getUnwindDest(), Args, OpBundles); 3926 } else { 3927 NewCall = Builder.CreateCall(Callee, Args, OpBundles); 3928 cast<CallInst>(NewCall)->setTailCallKind( 3929 cast<CallInst>(Caller)->getTailCallKind()); 3930 } 3931 NewCall->takeName(Caller); 3932 NewCall->setCallingConv(Call.getCallingConv()); 3933 NewCall->setAttributes(NewCallerPAL); 3934 3935 // Preserve prof metadata if any. 3936 NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof}); 3937 3938 // Insert a cast of the return type as necessary. 3939 Instruction *NC = NewCall; 3940 Value *NV = NC; 3941 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 3942 if (!NV->getType()->isVoidTy()) { 3943 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy); 3944 NC->setDebugLoc(Caller->getDebugLoc()); 3945 3946 Instruction *InsertPt = NewCall->getInsertionPointAfterDef(); 3947 assert(InsertPt && "No place to insert cast"); 3948 InsertNewInstBefore(NC, *InsertPt); 3949 Worklist.pushUsersToWorkList(*Caller); 3950 } else { 3951 NV = PoisonValue::get(Caller->getType()); 3952 } 3953 } 3954 3955 if (!Caller->use_empty()) 3956 replaceInstUsesWith(*Caller, NV); 3957 else if (Caller->hasValueHandle()) { 3958 if (OldRetTy == NV->getType()) 3959 ValueHandleBase::ValueIsRAUWd(Caller, NV); 3960 else 3961 // We cannot call ValueIsRAUWd with a different type, and the 3962 // actual tracked value will disappear. 3963 ValueHandleBase::ValueIsDeleted(Caller); 3964 } 3965 3966 eraseInstFromFunction(*Caller); 3967 return true; 3968 } 3969 3970 /// Turn a call to a function created by init_trampoline / adjust_trampoline 3971 /// intrinsic pair into a direct call to the underlying function. 3972 Instruction * 3973 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call, 3974 IntrinsicInst &Tramp) { 3975 Value *Callee = Call.getCalledOperand(); 3976 Type *CalleeTy = Callee->getType(); 3977 FunctionType *FTy = Call.getFunctionType(); 3978 AttributeList Attrs = Call.getAttributes(); 3979 3980 // If the call already has the 'nest' attribute somewhere then give up - 3981 // otherwise 'nest' would occur twice after splicing in the chain. 3982 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 3983 return nullptr; 3984 3985 Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts()); 3986 FunctionType *NestFTy = NestF->getFunctionType(); 3987 3988 AttributeList NestAttrs = NestF->getAttributes(); 3989 if (!NestAttrs.isEmpty()) { 3990 unsigned NestArgNo = 0; 3991 Type *NestTy = nullptr; 3992 AttributeSet NestAttr; 3993 3994 // Look for a parameter marked with the 'nest' attribute. 3995 for (FunctionType::param_iterator I = NestFTy->param_begin(), 3996 E = NestFTy->param_end(); 3997 I != E; ++NestArgNo, ++I) { 3998 AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo); 3999 if (AS.hasAttribute(Attribute::Nest)) { 4000 // Record the parameter type and any other attributes. 4001 NestTy = *I; 4002 NestAttr = AS; 4003 break; 4004 } 4005 } 4006 4007 if (NestTy) { 4008 std::vector<Value*> NewArgs; 4009 std::vector<AttributeSet> NewArgAttrs; 4010 NewArgs.reserve(Call.arg_size() + 1); 4011 NewArgAttrs.reserve(Call.arg_size()); 4012 4013 // Insert the nest argument into the call argument list, which may 4014 // mean appending it. Likewise for attributes. 4015 4016 { 4017 unsigned ArgNo = 0; 4018 auto I = Call.arg_begin(), E = Call.arg_end(); 4019 do { 4020 if (ArgNo == NestArgNo) { 4021 // Add the chain argument and attributes. 4022 Value *NestVal = Tramp.getArgOperand(2); 4023 if (NestVal->getType() != NestTy) 4024 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest"); 4025 NewArgs.push_back(NestVal); 4026 NewArgAttrs.push_back(NestAttr); 4027 } 4028 4029 if (I == E) 4030 break; 4031 4032 // Add the original argument and attributes. 4033 NewArgs.push_back(*I); 4034 NewArgAttrs.push_back(Attrs.getParamAttrs(ArgNo)); 4035 4036 ++ArgNo; 4037 ++I; 4038 } while (true); 4039 } 4040 4041 // The trampoline may have been bitcast to a bogus type (FTy). 4042 // Handle this by synthesizing a new function type, equal to FTy 4043 // with the chain parameter inserted. 4044 4045 std::vector<Type*> NewTypes; 4046 NewTypes.reserve(FTy->getNumParams()+1); 4047 4048 // Insert the chain's type into the list of parameter types, which may 4049 // mean appending it. 4050 { 4051 unsigned ArgNo = 0; 4052 FunctionType::param_iterator I = FTy->param_begin(), 4053 E = FTy->param_end(); 4054 4055 do { 4056 if (ArgNo == NestArgNo) 4057 // Add the chain's type. 4058 NewTypes.push_back(NestTy); 4059 4060 if (I == E) 4061 break; 4062 4063 // Add the original type. 4064 NewTypes.push_back(*I); 4065 4066 ++ArgNo; 4067 ++I; 4068 } while (true); 4069 } 4070 4071 // Replace the trampoline call with a direct call. Let the generic 4072 // code sort out any function type mismatches. 4073 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 4074 FTy->isVarArg()); 4075 Constant *NewCallee = 4076 NestF->getType() == PointerType::getUnqual(NewFTy) ? 4077 NestF : ConstantExpr::getBitCast(NestF, 4078 PointerType::getUnqual(NewFTy)); 4079 AttributeList NewPAL = 4080 AttributeList::get(FTy->getContext(), Attrs.getFnAttrs(), 4081 Attrs.getRetAttrs(), NewArgAttrs); 4082 4083 SmallVector<OperandBundleDef, 1> OpBundles; 4084 Call.getOperandBundlesAsDefs(OpBundles); 4085 4086 Instruction *NewCaller; 4087 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) { 4088 NewCaller = InvokeInst::Create(NewFTy, NewCallee, 4089 II->getNormalDest(), II->getUnwindDest(), 4090 NewArgs, OpBundles); 4091 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 4092 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 4093 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) { 4094 NewCaller = 4095 CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(), 4096 CBI->getIndirectDests(), NewArgs, OpBundles); 4097 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv()); 4098 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL); 4099 } else { 4100 NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles); 4101 cast<CallInst>(NewCaller)->setTailCallKind( 4102 cast<CallInst>(Call).getTailCallKind()); 4103 cast<CallInst>(NewCaller)->setCallingConv( 4104 cast<CallInst>(Call).getCallingConv()); 4105 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 4106 } 4107 NewCaller->setDebugLoc(Call.getDebugLoc()); 4108 4109 return NewCaller; 4110 } 4111 } 4112 4113 // Replace the trampoline call with a direct call. Since there is no 'nest' 4114 // parameter, there is no need to adjust the argument list. Let the generic 4115 // code sort out any function type mismatches. 4116 Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy); 4117 Call.setCalledFunction(FTy, NewCallee); 4118 return &Call; 4119 } 4120