1 //===- InstCombineCalls.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visitCall, visitInvoke, and visitCallBr functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/APFloat.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/APSInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/STLFunctionalExtras.h" 19 #include "llvm/ADT/SmallBitVector.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumeBundleQueries.h" 24 #include "llvm/Analysis/AssumptionCache.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/Loads.h" 27 #include "llvm/Analysis/MemoryBuiltins.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/Analysis/VectorUtils.h" 30 #include "llvm/IR/Attributes.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Constant.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DebugInfo.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/InlineAsm.h" 40 #include "llvm/IR/InstrTypes.h" 41 #include "llvm/IR/Instruction.h" 42 #include "llvm/IR/Instructions.h" 43 #include "llvm/IR/IntrinsicInst.h" 44 #include "llvm/IR/Intrinsics.h" 45 #include "llvm/IR/IntrinsicsAArch64.h" 46 #include "llvm/IR/IntrinsicsAMDGPU.h" 47 #include "llvm/IR/IntrinsicsARM.h" 48 #include "llvm/IR/IntrinsicsHexagon.h" 49 #include "llvm/IR/LLVMContext.h" 50 #include "llvm/IR/Metadata.h" 51 #include "llvm/IR/PatternMatch.h" 52 #include "llvm/IR/Statepoint.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/ValueHandle.h" 57 #include "llvm/Support/AtomicOrdering.h" 58 #include "llvm/Support/Casting.h" 59 #include "llvm/Support/CommandLine.h" 60 #include "llvm/Support/Compiler.h" 61 #include "llvm/Support/Debug.h" 62 #include "llvm/Support/ErrorHandling.h" 63 #include "llvm/Support/KnownBits.h" 64 #include "llvm/Support/MathExtras.h" 65 #include "llvm/Support/raw_ostream.h" 66 #include "llvm/Transforms/InstCombine/InstCombiner.h" 67 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 68 #include "llvm/Transforms/Utils/Local.h" 69 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 70 #include <algorithm> 71 #include <cassert> 72 #include <cstdint> 73 #include <optional> 74 #include <utility> 75 #include <vector> 76 77 #define DEBUG_TYPE "instcombine" 78 #include "llvm/Transforms/Utils/InstructionWorklist.h" 79 80 using namespace llvm; 81 using namespace PatternMatch; 82 83 STATISTIC(NumSimplified, "Number of library calls simplified"); 84 85 static cl::opt<unsigned> GuardWideningWindow( 86 "instcombine-guard-widening-window", 87 cl::init(3), 88 cl::desc("How wide an instruction window to bypass looking for " 89 "another guard")); 90 91 namespace llvm { 92 /// enable preservation of attributes in assume like: 93 /// call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ] 94 extern cl::opt<bool> EnableKnowledgeRetention; 95 } // namespace llvm 96 97 /// Return the specified type promoted as it would be to pass though a va_arg 98 /// area. 99 static Type *getPromotedType(Type *Ty) { 100 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 101 if (ITy->getBitWidth() < 32) 102 return Type::getInt32Ty(Ty->getContext()); 103 } 104 return Ty; 105 } 106 107 /// Recognize a memcpy/memmove from a trivially otherwise unused alloca. 108 /// TODO: This should probably be integrated with visitAllocSites, but that 109 /// requires a deeper change to allow either unread or unwritten objects. 110 static bool hasUndefSource(AnyMemTransferInst *MI) { 111 auto *Src = MI->getRawSource(); 112 while (isa<GetElementPtrInst>(Src) || isa<BitCastInst>(Src)) { 113 if (!Src->hasOneUse()) 114 return false; 115 Src = cast<Instruction>(Src)->getOperand(0); 116 } 117 return isa<AllocaInst>(Src) && Src->hasOneUse(); 118 } 119 120 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) { 121 Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT); 122 MaybeAlign CopyDstAlign = MI->getDestAlign(); 123 if (!CopyDstAlign || *CopyDstAlign < DstAlign) { 124 MI->setDestAlignment(DstAlign); 125 return MI; 126 } 127 128 Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT); 129 MaybeAlign CopySrcAlign = MI->getSourceAlign(); 130 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) { 131 MI->setSourceAlignment(SrcAlign); 132 return MI; 133 } 134 135 // If we have a store to a location which is known constant, we can conclude 136 // that the store must be storing the constant value (else the memory 137 // wouldn't be constant), and this must be a noop. 138 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) { 139 // Set the size of the copy to 0, it will be deleted on the next iteration. 140 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 141 return MI; 142 } 143 144 // If the source is provably undef, the memcpy/memmove doesn't do anything 145 // (unless the transfer is volatile). 146 if (hasUndefSource(MI) && !MI->isVolatile()) { 147 // Set the size of the copy to 0, it will be deleted on the next iteration. 148 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 149 return MI; 150 } 151 152 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 153 // load/store. 154 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength()); 155 if (!MemOpLength) return nullptr; 156 157 // Source and destination pointer types are always "i8*" for intrinsic. See 158 // if the size is something we can handle with a single primitive load/store. 159 // A single load+store correctly handles overlapping memory in the memmove 160 // case. 161 uint64_t Size = MemOpLength->getLimitedValue(); 162 assert(Size && "0-sized memory transferring should be removed already."); 163 164 if (Size > 8 || (Size&(Size-1))) 165 return nullptr; // If not 1/2/4/8 bytes, exit. 166 167 // If it is an atomic and alignment is less than the size then we will 168 // introduce the unaligned memory access which will be later transformed 169 // into libcall in CodeGen. This is not evident performance gain so disable 170 // it now. 171 if (isa<AtomicMemTransferInst>(MI)) 172 if (*CopyDstAlign < Size || *CopySrcAlign < Size) 173 return nullptr; 174 175 // Use an integer load+store unless we can find something better. 176 unsigned SrcAddrSp = 177 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 178 unsigned DstAddrSp = 179 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 180 181 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 182 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 183 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 184 185 // If the memcpy has metadata describing the members, see if we can get the 186 // TBAA tag describing our copy. 187 MDNode *CopyMD = nullptr; 188 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) { 189 CopyMD = M; 190 } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) { 191 if (M->getNumOperands() == 3 && M->getOperand(0) && 192 mdconst::hasa<ConstantInt>(M->getOperand(0)) && 193 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() && 194 M->getOperand(1) && 195 mdconst::hasa<ConstantInt>(M->getOperand(1)) && 196 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() == 197 Size && 198 M->getOperand(2) && isa<MDNode>(M->getOperand(2))) 199 CopyMD = cast<MDNode>(M->getOperand(2)); 200 } 201 202 Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 203 Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 204 LoadInst *L = Builder.CreateLoad(IntType, Src); 205 // Alignment from the mem intrinsic will be better, so use it. 206 L->setAlignment(*CopySrcAlign); 207 if (CopyMD) 208 L->setMetadata(LLVMContext::MD_tbaa, CopyMD); 209 MDNode *LoopMemParallelMD = 210 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 211 if (LoopMemParallelMD) 212 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 213 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group); 214 if (AccessGroupMD) 215 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 216 217 StoreInst *S = Builder.CreateStore(L, Dest); 218 // Alignment from the mem intrinsic will be better, so use it. 219 S->setAlignment(*CopyDstAlign); 220 if (CopyMD) 221 S->setMetadata(LLVMContext::MD_tbaa, CopyMD); 222 if (LoopMemParallelMD) 223 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 224 if (AccessGroupMD) 225 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 226 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID); 227 228 if (auto *MT = dyn_cast<MemTransferInst>(MI)) { 229 // non-atomics can be volatile 230 L->setVolatile(MT->isVolatile()); 231 S->setVolatile(MT->isVolatile()); 232 } 233 if (isa<AtomicMemTransferInst>(MI)) { 234 // atomics have to be unordered 235 L->setOrdering(AtomicOrdering::Unordered); 236 S->setOrdering(AtomicOrdering::Unordered); 237 } 238 239 // Set the size of the copy to 0, it will be deleted on the next iteration. 240 MI->setLength(Constant::getNullValue(MemOpLength->getType())); 241 return MI; 242 } 243 244 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) { 245 const Align KnownAlignment = 246 getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT); 247 MaybeAlign MemSetAlign = MI->getDestAlign(); 248 if (!MemSetAlign || *MemSetAlign < KnownAlignment) { 249 MI->setDestAlignment(KnownAlignment); 250 return MI; 251 } 252 253 // If we have a store to a location which is known constant, we can conclude 254 // that the store must be storing the constant value (else the memory 255 // wouldn't be constant), and this must be a noop. 256 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) { 257 // Set the size of the copy to 0, it will be deleted on the next iteration. 258 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 259 return MI; 260 } 261 262 // Remove memset with an undef value. 263 // FIXME: This is technically incorrect because it might overwrite a poison 264 // value. Change to PoisonValue once #52930 is resolved. 265 if (isa<UndefValue>(MI->getValue())) { 266 // Set the size of the copy to 0, it will be deleted on the next iteration. 267 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 268 return MI; 269 } 270 271 // Extract the length and alignment and fill if they are constant. 272 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 273 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 274 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 275 return nullptr; 276 const uint64_t Len = LenC->getLimitedValue(); 277 assert(Len && "0-sized memory setting should be removed already."); 278 const Align Alignment = MI->getDestAlign().valueOrOne(); 279 280 // If it is an atomic and alignment is less than the size then we will 281 // introduce the unaligned memory access which will be later transformed 282 // into libcall in CodeGen. This is not evident performance gain so disable 283 // it now. 284 if (isa<AtomicMemSetInst>(MI)) 285 if (Alignment < Len) 286 return nullptr; 287 288 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 289 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 290 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 291 292 Value *Dest = MI->getDest(); 293 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace(); 294 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp); 295 Dest = Builder.CreateBitCast(Dest, NewDstPtrTy); 296 297 // Extract the fill value and store. 298 const uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 299 Constant *FillVal = ConstantInt::get(ITy, Fill); 300 StoreInst *S = Builder.CreateStore(FillVal, Dest, MI->isVolatile()); 301 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID); 302 for (auto *DAI : at::getAssignmentMarkers(S)) { 303 if (any_of(DAI->location_ops(), [&](Value *V) { return V == FillC; })) 304 DAI->replaceVariableLocationOp(FillC, FillVal); 305 } 306 307 S->setAlignment(Alignment); 308 if (isa<AtomicMemSetInst>(MI)) 309 S->setOrdering(AtomicOrdering::Unordered); 310 311 // Set the size of the copy to 0, it will be deleted on the next iteration. 312 MI->setLength(Constant::getNullValue(LenC->getType())); 313 return MI; 314 } 315 316 return nullptr; 317 } 318 319 // TODO, Obvious Missing Transforms: 320 // * Narrow width by halfs excluding zero/undef lanes 321 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) { 322 Value *LoadPtr = II.getArgOperand(0); 323 const Align Alignment = 324 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 325 326 // If the mask is all ones or undefs, this is a plain vector load of the 1st 327 // argument. 328 if (maskIsAllOneOrUndef(II.getArgOperand(2))) { 329 LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 330 "unmaskedload"); 331 L->copyMetadata(II); 332 return L; 333 } 334 335 // If we can unconditionally load from this address, replace with a 336 // load/select idiom. TODO: use DT for context sensitive query 337 if (isDereferenceablePointer(LoadPtr, II.getType(), 338 II.getModule()->getDataLayout(), &II, &AC)) { 339 LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 340 "unmaskedload"); 341 LI->copyMetadata(II); 342 return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3)); 343 } 344 345 return nullptr; 346 } 347 348 // TODO, Obvious Missing Transforms: 349 // * Single constant active lane -> store 350 // * Narrow width by halfs excluding zero/undef lanes 351 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) { 352 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 353 if (!ConstMask) 354 return nullptr; 355 356 // If the mask is all zeros, this instruction does nothing. 357 if (ConstMask->isNullValue()) 358 return eraseInstFromFunction(II); 359 360 // If the mask is all ones, this is a plain vector store of the 1st argument. 361 if (ConstMask->isAllOnesValue()) { 362 Value *StorePtr = II.getArgOperand(1); 363 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 364 StoreInst *S = 365 new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); 366 S->copyMetadata(II); 367 return S; 368 } 369 370 if (isa<ScalableVectorType>(ConstMask->getType())) 371 return nullptr; 372 373 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 374 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 375 APInt UndefElts(DemandedElts.getBitWidth(), 0); 376 if (Value *V = 377 SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts)) 378 return replaceOperand(II, 0, V); 379 380 return nullptr; 381 } 382 383 // TODO, Obvious Missing Transforms: 384 // * Single constant active lane load -> load 385 // * Dereferenceable address & few lanes -> scalarize speculative load/selects 386 // * Adjacent vector addresses -> masked.load 387 // * Narrow width by halfs excluding zero/undef lanes 388 // * Vector incrementing address -> vector masked load 389 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) { 390 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2)); 391 if (!ConstMask) 392 return nullptr; 393 394 // Vector splat address w/known mask -> scalar load 395 // Fold the gather to load the source vector first lane 396 // because it is reloading the same value each time 397 if (ConstMask->isAllOnesValue()) 398 if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) { 399 auto *VecTy = cast<VectorType>(II.getType()); 400 const Align Alignment = 401 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 402 LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr, 403 Alignment, "load.scalar"); 404 Value *Shuf = 405 Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast"); 406 return replaceInstUsesWith(II, cast<Instruction>(Shuf)); 407 } 408 409 return nullptr; 410 } 411 412 // TODO, Obvious Missing Transforms: 413 // * Single constant active lane -> store 414 // * Adjacent vector addresses -> masked.store 415 // * Narrow store width by halfs excluding zero/undef lanes 416 // * Vector incrementing address -> vector masked store 417 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) { 418 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 419 if (!ConstMask) 420 return nullptr; 421 422 // If the mask is all zeros, a scatter does nothing. 423 if (ConstMask->isNullValue()) 424 return eraseInstFromFunction(II); 425 426 // Vector splat address -> scalar store 427 if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) { 428 // scatter(splat(value), splat(ptr), non-zero-mask) -> store value, ptr 429 if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) { 430 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 431 StoreInst *S = 432 new StoreInst(SplatValue, SplatPtr, /*IsVolatile=*/false, Alignment); 433 S->copyMetadata(II); 434 return S; 435 } 436 // scatter(vector, splat(ptr), splat(true)) -> store extract(vector, 437 // lastlane), ptr 438 if (ConstMask->isAllOnesValue()) { 439 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 440 VectorType *WideLoadTy = cast<VectorType>(II.getArgOperand(1)->getType()); 441 ElementCount VF = WideLoadTy->getElementCount(); 442 Constant *EC = 443 ConstantInt::get(Builder.getInt32Ty(), VF.getKnownMinValue()); 444 Value *RunTimeVF = VF.isScalable() ? Builder.CreateVScale(EC) : EC; 445 Value *LastLane = Builder.CreateSub(RunTimeVF, Builder.getInt32(1)); 446 Value *Extract = 447 Builder.CreateExtractElement(II.getArgOperand(0), LastLane); 448 StoreInst *S = 449 new StoreInst(Extract, SplatPtr, /*IsVolatile=*/false, Alignment); 450 S->copyMetadata(II); 451 return S; 452 } 453 } 454 if (isa<ScalableVectorType>(ConstMask->getType())) 455 return nullptr; 456 457 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 458 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 459 APInt UndefElts(DemandedElts.getBitWidth(), 0); 460 if (Value *V = 461 SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts)) 462 return replaceOperand(II, 0, V); 463 if (Value *V = 464 SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, UndefElts)) 465 return replaceOperand(II, 1, V); 466 467 return nullptr; 468 } 469 470 /// This function transforms launder.invariant.group and strip.invariant.group 471 /// like: 472 /// launder(launder(%x)) -> launder(%x) (the result is not the argument) 473 /// launder(strip(%x)) -> launder(%x) 474 /// strip(strip(%x)) -> strip(%x) (the result is not the argument) 475 /// strip(launder(%x)) -> strip(%x) 476 /// This is legal because it preserves the most recent information about 477 /// the presence or absence of invariant.group. 478 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II, 479 InstCombinerImpl &IC) { 480 auto *Arg = II.getArgOperand(0); 481 auto *StrippedArg = Arg->stripPointerCasts(); 482 auto *StrippedInvariantGroupsArg = StrippedArg; 483 while (auto *Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) { 484 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group && 485 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group) 486 break; 487 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts(); 488 } 489 if (StrippedArg == StrippedInvariantGroupsArg) 490 return nullptr; // No launders/strips to remove. 491 492 Value *Result = nullptr; 493 494 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group) 495 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg); 496 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group) 497 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg); 498 else 499 llvm_unreachable( 500 "simplifyInvariantGroupIntrinsic only handles launder and strip"); 501 if (Result->getType()->getPointerAddressSpace() != 502 II.getType()->getPointerAddressSpace()) 503 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType()); 504 if (Result->getType() != II.getType()) 505 Result = IC.Builder.CreateBitCast(Result, II.getType()); 506 507 return cast<Instruction>(Result); 508 } 509 510 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) { 511 assert((II.getIntrinsicID() == Intrinsic::cttz || 512 II.getIntrinsicID() == Intrinsic::ctlz) && 513 "Expected cttz or ctlz intrinsic"); 514 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz; 515 Value *Op0 = II.getArgOperand(0); 516 Value *Op1 = II.getArgOperand(1); 517 Value *X; 518 // ctlz(bitreverse(x)) -> cttz(x) 519 // cttz(bitreverse(x)) -> ctlz(x) 520 if (match(Op0, m_BitReverse(m_Value(X)))) { 521 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz; 522 Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType()); 523 return CallInst::Create(F, {X, II.getArgOperand(1)}); 524 } 525 526 if (II.getType()->isIntOrIntVectorTy(1)) { 527 // ctlz/cttz i1 Op0 --> not Op0 528 if (match(Op1, m_Zero())) 529 return BinaryOperator::CreateNot(Op0); 530 // If zero is poison, then the input can be assumed to be "true", so the 531 // instruction simplifies to "false". 532 assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1"); 533 return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(II.getType())); 534 } 535 536 // If the operand is a select with constant arm(s), try to hoist ctlz/cttz. 537 if (auto *Sel = dyn_cast<SelectInst>(Op0)) 538 if (Instruction *R = IC.FoldOpIntoSelect(II, Sel)) 539 return R; 540 541 if (IsTZ) { 542 // cttz(-x) -> cttz(x) 543 if (match(Op0, m_Neg(m_Value(X)))) 544 return IC.replaceOperand(II, 0, X); 545 546 // cttz(sext(x)) -> cttz(zext(x)) 547 if (match(Op0, m_OneUse(m_SExt(m_Value(X))))) { 548 auto *Zext = IC.Builder.CreateZExt(X, II.getType()); 549 auto *CttzZext = 550 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Zext, Op1); 551 return IC.replaceInstUsesWith(II, CttzZext); 552 } 553 554 // Zext doesn't change the number of trailing zeros, so narrow: 555 // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsPoison' parameter is 'true'. 556 if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) { 557 auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X, 558 IC.Builder.getTrue()); 559 auto *ZextCttz = IC.Builder.CreateZExt(Cttz, II.getType()); 560 return IC.replaceInstUsesWith(II, ZextCttz); 561 } 562 563 // cttz(abs(x)) -> cttz(x) 564 // cttz(nabs(x)) -> cttz(x) 565 Value *Y; 566 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor; 567 if (SPF == SPF_ABS || SPF == SPF_NABS) 568 return IC.replaceOperand(II, 0, X); 569 570 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X)))) 571 return IC.replaceOperand(II, 0, X); 572 } 573 574 KnownBits Known = IC.computeKnownBits(Op0, 0, &II); 575 576 // Create a mask for bits above (ctlz) or below (cttz) the first known one. 577 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros() 578 : Known.countMaxLeadingZeros(); 579 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros() 580 : Known.countMinLeadingZeros(); 581 582 // If all bits above (ctlz) or below (cttz) the first known one are known 583 // zero, this value is constant. 584 // FIXME: This should be in InstSimplify because we're replacing an 585 // instruction with a constant. 586 if (PossibleZeros == DefiniteZeros) { 587 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros); 588 return IC.replaceInstUsesWith(II, C); 589 } 590 591 // If the input to cttz/ctlz is known to be non-zero, 592 // then change the 'ZeroIsPoison' parameter to 'true' 593 // because we know the zero behavior can't affect the result. 594 if (!Known.One.isZero() || 595 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II, 596 &IC.getDominatorTree())) { 597 if (!match(II.getArgOperand(1), m_One())) 598 return IC.replaceOperand(II, 1, IC.Builder.getTrue()); 599 } 600 601 // Add range metadata since known bits can't completely reflect what we know. 602 // TODO: Handle splat vectors. 603 auto *IT = dyn_cast<IntegerType>(Op0->getType()); 604 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 605 Metadata *LowAndHigh[] = { 606 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)), 607 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))}; 608 II.setMetadata(LLVMContext::MD_range, 609 MDNode::get(II.getContext(), LowAndHigh)); 610 return &II; 611 } 612 613 return nullptr; 614 } 615 616 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) { 617 assert(II.getIntrinsicID() == Intrinsic::ctpop && 618 "Expected ctpop intrinsic"); 619 Type *Ty = II.getType(); 620 unsigned BitWidth = Ty->getScalarSizeInBits(); 621 Value *Op0 = II.getArgOperand(0); 622 Value *X, *Y; 623 624 // ctpop(bitreverse(x)) -> ctpop(x) 625 // ctpop(bswap(x)) -> ctpop(x) 626 if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) 627 return IC.replaceOperand(II, 0, X); 628 629 // ctpop(rot(x)) -> ctpop(x) 630 if ((match(Op0, m_FShl(m_Value(X), m_Value(Y), m_Value())) || 631 match(Op0, m_FShr(m_Value(X), m_Value(Y), m_Value()))) && 632 X == Y) 633 return IC.replaceOperand(II, 0, X); 634 635 // ctpop(x | -x) -> bitwidth - cttz(x, false) 636 if (Op0->hasOneUse() && 637 match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) { 638 Function *F = 639 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 640 auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()}); 641 auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth)); 642 return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz)); 643 } 644 645 // ctpop(~x & (x - 1)) -> cttz(x, false) 646 if (match(Op0, 647 m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) { 648 Function *F = 649 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 650 return CallInst::Create(F, {X, IC.Builder.getFalse()}); 651 } 652 653 // Zext doesn't change the number of set bits, so narrow: 654 // ctpop (zext X) --> zext (ctpop X) 655 if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) { 656 Value *NarrowPop = IC.Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, X); 657 return CastInst::Create(Instruction::ZExt, NarrowPop, Ty); 658 } 659 660 // If the operand is a select with constant arm(s), try to hoist ctpop. 661 if (auto *Sel = dyn_cast<SelectInst>(Op0)) 662 if (Instruction *R = IC.FoldOpIntoSelect(II, Sel)) 663 return R; 664 665 KnownBits Known(BitWidth); 666 IC.computeKnownBits(Op0, Known, 0, &II); 667 668 // If all bits are zero except for exactly one fixed bit, then the result 669 // must be 0 or 1, and we can get that answer by shifting to LSB: 670 // ctpop (X & 32) --> (X & 32) >> 5 671 // TODO: Investigate removing this as its likely unnecessary given the below 672 // `isKnownToBeAPowerOfTwo` check. 673 if ((~Known.Zero).isPowerOf2()) 674 return BinaryOperator::CreateLShr( 675 Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2())); 676 677 // More generally we can also handle non-constant power of 2 patterns such as 678 // shl/shr(Pow2, X), (X & -X), etc... by transforming: 679 // ctpop(Pow2OrZero) --> icmp ne X, 0 680 if (IC.isKnownToBeAPowerOfTwo(Op0, /* OrZero */ true)) 681 return CastInst::Create(Instruction::ZExt, 682 IC.Builder.CreateICmp(ICmpInst::ICMP_NE, Op0, 683 Constant::getNullValue(Ty)), 684 Ty); 685 686 // FIXME: Try to simplify vectors of integers. 687 auto *IT = dyn_cast<IntegerType>(Ty); 688 if (!IT) 689 return nullptr; 690 691 // Add range metadata since known bits can't completely reflect what we know. 692 unsigned MinCount = Known.countMinPopulation(); 693 unsigned MaxCount = Known.countMaxPopulation(); 694 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 695 Metadata *LowAndHigh[] = { 696 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)), 697 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))}; 698 II.setMetadata(LLVMContext::MD_range, 699 MDNode::get(II.getContext(), LowAndHigh)); 700 return &II; 701 } 702 703 return nullptr; 704 } 705 706 /// Convert a table lookup to shufflevector if the mask is constant. 707 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in 708 /// which case we could lower the shufflevector with rev64 instructions 709 /// as it's actually a byte reverse. 710 static Value *simplifyNeonTbl1(const IntrinsicInst &II, 711 InstCombiner::BuilderTy &Builder) { 712 // Bail out if the mask is not a constant. 713 auto *C = dyn_cast<Constant>(II.getArgOperand(1)); 714 if (!C) 715 return nullptr; 716 717 auto *VecTy = cast<FixedVectorType>(II.getType()); 718 unsigned NumElts = VecTy->getNumElements(); 719 720 // Only perform this transformation for <8 x i8> vector types. 721 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8) 722 return nullptr; 723 724 int Indexes[8]; 725 726 for (unsigned I = 0; I < NumElts; ++I) { 727 Constant *COp = C->getAggregateElement(I); 728 729 if (!COp || !isa<ConstantInt>(COp)) 730 return nullptr; 731 732 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue(); 733 734 // Make sure the mask indices are in range. 735 if ((unsigned)Indexes[I] >= NumElts) 736 return nullptr; 737 } 738 739 auto *V1 = II.getArgOperand(0); 740 auto *V2 = Constant::getNullValue(V1->getType()); 741 return Builder.CreateShuffleVector(V1, V2, ArrayRef(Indexes)); 742 } 743 744 // Returns true iff the 2 intrinsics have the same operands, limiting the 745 // comparison to the first NumOperands. 746 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, 747 unsigned NumOperands) { 748 assert(I.arg_size() >= NumOperands && "Not enough operands"); 749 assert(E.arg_size() >= NumOperands && "Not enough operands"); 750 for (unsigned i = 0; i < NumOperands; i++) 751 if (I.getArgOperand(i) != E.getArgOperand(i)) 752 return false; 753 return true; 754 } 755 756 // Remove trivially empty start/end intrinsic ranges, i.e. a start 757 // immediately followed by an end (ignoring debuginfo or other 758 // start/end intrinsics in between). As this handles only the most trivial 759 // cases, tracking the nesting level is not needed: 760 // 761 // call @llvm.foo.start(i1 0) 762 // call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed 763 // call @llvm.foo.end(i1 0) 764 // call @llvm.foo.end(i1 0) ; &I 765 static bool 766 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, 767 std::function<bool(const IntrinsicInst &)> IsStart) { 768 // We start from the end intrinsic and scan backwards, so that InstCombine 769 // has already processed (and potentially removed) all the instructions 770 // before the end intrinsic. 771 BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend()); 772 for (; BI != BE; ++BI) { 773 if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) { 774 if (I->isDebugOrPseudoInst() || 775 I->getIntrinsicID() == EndI.getIntrinsicID()) 776 continue; 777 if (IsStart(*I)) { 778 if (haveSameOperands(EndI, *I, EndI.arg_size())) { 779 IC.eraseInstFromFunction(*I); 780 IC.eraseInstFromFunction(EndI); 781 return true; 782 } 783 // Skip start intrinsics that don't pair with this end intrinsic. 784 continue; 785 } 786 } 787 break; 788 } 789 790 return false; 791 } 792 793 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) { 794 removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) { 795 return I.getIntrinsicID() == Intrinsic::vastart || 796 I.getIntrinsicID() == Intrinsic::vacopy; 797 }); 798 return nullptr; 799 } 800 801 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) { 802 assert(Call.arg_size() > 1 && "Need at least 2 args to swap"); 803 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1); 804 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) { 805 Call.setArgOperand(0, Arg1); 806 Call.setArgOperand(1, Arg0); 807 return &Call; 808 } 809 return nullptr; 810 } 811 812 /// Creates a result tuple for an overflow intrinsic \p II with a given 813 /// \p Result and a constant \p Overflow value. 814 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result, 815 Constant *Overflow) { 816 Constant *V[] = {PoisonValue::get(Result->getType()), Overflow}; 817 StructType *ST = cast<StructType>(II->getType()); 818 Constant *Struct = ConstantStruct::get(ST, V); 819 return InsertValueInst::Create(Struct, Result, 0); 820 } 821 822 Instruction * 823 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) { 824 WithOverflowInst *WO = cast<WithOverflowInst>(II); 825 Value *OperationResult = nullptr; 826 Constant *OverflowResult = nullptr; 827 if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(), 828 WO->getRHS(), *WO, OperationResult, OverflowResult)) 829 return createOverflowTuple(WO, OperationResult, OverflowResult); 830 return nullptr; 831 } 832 833 static std::optional<bool> getKnownSign(Value *Op, Instruction *CxtI, 834 const DataLayout &DL, 835 AssumptionCache *AC, 836 DominatorTree *DT) { 837 KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT); 838 if (Known.isNonNegative()) 839 return false; 840 if (Known.isNegative()) 841 return true; 842 843 Value *X, *Y; 844 if (match(Op, m_NSWSub(m_Value(X), m_Value(Y)))) 845 return isImpliedByDomCondition(ICmpInst::ICMP_SLT, X, Y, CxtI, DL); 846 847 return isImpliedByDomCondition( 848 ICmpInst::ICMP_SLT, Op, Constant::getNullValue(Op->getType()), CxtI, DL); 849 } 850 851 /// Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0. This 852 /// can trigger other combines. 853 static Instruction *moveAddAfterMinMax(IntrinsicInst *II, 854 InstCombiner::BuilderTy &Builder) { 855 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 856 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin || 857 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) && 858 "Expected a min or max intrinsic"); 859 860 // TODO: Match vectors with undef elements, but undef may not propagate. 861 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 862 Value *X; 863 const APInt *C0, *C1; 864 if (!match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C0)))) || 865 !match(Op1, m_APInt(C1))) 866 return nullptr; 867 868 // Check for necessary no-wrap and overflow constraints. 869 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin; 870 auto *Add = cast<BinaryOperator>(Op0); 871 if ((IsSigned && !Add->hasNoSignedWrap()) || 872 (!IsSigned && !Add->hasNoUnsignedWrap())) 873 return nullptr; 874 875 // If the constant difference overflows, then instsimplify should reduce the 876 // min/max to the add or C1. 877 bool Overflow; 878 APInt CDiff = 879 IsSigned ? C1->ssub_ov(*C0, Overflow) : C1->usub_ov(*C0, Overflow); 880 assert(!Overflow && "Expected simplify of min/max"); 881 882 // min/max (add X, C0), C1 --> add (min/max X, C1 - C0), C0 883 // Note: the "mismatched" no-overflow setting does not propagate. 884 Constant *NewMinMaxC = ConstantInt::get(II->getType(), CDiff); 885 Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID, X, NewMinMaxC); 886 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax, Add->getOperand(1)) 887 : BinaryOperator::CreateNUWAdd(NewMinMax, Add->getOperand(1)); 888 } 889 /// Match a sadd_sat or ssub_sat which is using min/max to clamp the value. 890 Instruction *InstCombinerImpl::matchSAddSubSat(IntrinsicInst &MinMax1) { 891 Type *Ty = MinMax1.getType(); 892 893 // We are looking for a tree of: 894 // max(INT_MIN, min(INT_MAX, add(sext(A), sext(B)))) 895 // Where the min and max could be reversed 896 Instruction *MinMax2; 897 BinaryOperator *AddSub; 898 const APInt *MinValue, *MaxValue; 899 if (match(&MinMax1, m_SMin(m_Instruction(MinMax2), m_APInt(MaxValue)))) { 900 if (!match(MinMax2, m_SMax(m_BinOp(AddSub), m_APInt(MinValue)))) 901 return nullptr; 902 } else if (match(&MinMax1, 903 m_SMax(m_Instruction(MinMax2), m_APInt(MinValue)))) { 904 if (!match(MinMax2, m_SMin(m_BinOp(AddSub), m_APInt(MaxValue)))) 905 return nullptr; 906 } else 907 return nullptr; 908 909 // Check that the constants clamp a saturate, and that the new type would be 910 // sensible to convert to. 911 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1) 912 return nullptr; 913 // In what bitwidth can this be treated as saturating arithmetics? 914 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1; 915 // FIXME: This isn't quite right for vectors, but using the scalar type is a 916 // good first approximation for what should be done there. 917 if (!shouldChangeType(Ty->getScalarType()->getIntegerBitWidth(), NewBitWidth)) 918 return nullptr; 919 920 // Also make sure that the inner min/max and the add/sub have one use. 921 if (!MinMax2->hasOneUse() || !AddSub->hasOneUse()) 922 return nullptr; 923 924 // Create the new type (which can be a vector type) 925 Type *NewTy = Ty->getWithNewBitWidth(NewBitWidth); 926 927 Intrinsic::ID IntrinsicID; 928 if (AddSub->getOpcode() == Instruction::Add) 929 IntrinsicID = Intrinsic::sadd_sat; 930 else if (AddSub->getOpcode() == Instruction::Sub) 931 IntrinsicID = Intrinsic::ssub_sat; 932 else 933 return nullptr; 934 935 // The two operands of the add/sub must be nsw-truncatable to the NewTy. This 936 // is usually achieved via a sext from a smaller type. 937 if (ComputeMaxSignificantBits(AddSub->getOperand(0), 0, AddSub) > 938 NewBitWidth || 939 ComputeMaxSignificantBits(AddSub->getOperand(1), 0, AddSub) > NewBitWidth) 940 return nullptr; 941 942 // Finally create and return the sat intrinsic, truncated to the new type 943 Function *F = Intrinsic::getDeclaration(MinMax1.getModule(), IntrinsicID, NewTy); 944 Value *AT = Builder.CreateTrunc(AddSub->getOperand(0), NewTy); 945 Value *BT = Builder.CreateTrunc(AddSub->getOperand(1), NewTy); 946 Value *Sat = Builder.CreateCall(F, {AT, BT}); 947 return CastInst::Create(Instruction::SExt, Sat, Ty); 948 } 949 950 951 /// If we have a clamp pattern like max (min X, 42), 41 -- where the output 952 /// can only be one of two possible constant values -- turn that into a select 953 /// of constants. 954 static Instruction *foldClampRangeOfTwo(IntrinsicInst *II, 955 InstCombiner::BuilderTy &Builder) { 956 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 957 Value *X; 958 const APInt *C0, *C1; 959 if (!match(I1, m_APInt(C1)) || !I0->hasOneUse()) 960 return nullptr; 961 962 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 963 switch (II->getIntrinsicID()) { 964 case Intrinsic::smax: 965 if (match(I0, m_SMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 966 Pred = ICmpInst::ICMP_SGT; 967 break; 968 case Intrinsic::smin: 969 if (match(I0, m_SMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 970 Pred = ICmpInst::ICMP_SLT; 971 break; 972 case Intrinsic::umax: 973 if (match(I0, m_UMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 974 Pred = ICmpInst::ICMP_UGT; 975 break; 976 case Intrinsic::umin: 977 if (match(I0, m_UMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 978 Pred = ICmpInst::ICMP_ULT; 979 break; 980 default: 981 llvm_unreachable("Expected min/max intrinsic"); 982 } 983 if (Pred == CmpInst::BAD_ICMP_PREDICATE) 984 return nullptr; 985 986 // max (min X, 42), 41 --> X > 41 ? 42 : 41 987 // min (max X, 42), 43 --> X < 43 ? 42 : 43 988 Value *Cmp = Builder.CreateICmp(Pred, X, I1); 989 return SelectInst::Create(Cmp, ConstantInt::get(II->getType(), *C0), I1); 990 } 991 992 /// If this min/max has a constant operand and an operand that is a matching 993 /// min/max with a constant operand, constant-fold the 2 constant operands. 994 static Instruction *reassociateMinMaxWithConstants(IntrinsicInst *II) { 995 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 996 auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); 997 if (!LHS || LHS->getIntrinsicID() != MinMaxID) 998 return nullptr; 999 1000 Constant *C0, *C1; 1001 if (!match(LHS->getArgOperand(1), m_ImmConstant(C0)) || 1002 !match(II->getArgOperand(1), m_ImmConstant(C1))) 1003 return nullptr; 1004 1005 // max (max X, C0), C1 --> max X, (max C0, C1) --> max X, NewC 1006 ICmpInst::Predicate Pred = MinMaxIntrinsic::getPredicate(MinMaxID); 1007 Constant *CondC = ConstantExpr::getICmp(Pred, C0, C1); 1008 Constant *NewC = ConstantExpr::getSelect(CondC, C0, C1); 1009 1010 Module *Mod = II->getModule(); 1011 Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType()); 1012 return CallInst::Create(MinMax, {LHS->getArgOperand(0), NewC}); 1013 } 1014 1015 /// If this min/max has a matching min/max operand with a constant, try to push 1016 /// the constant operand into this instruction. This can enable more folds. 1017 static Instruction * 1018 reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, 1019 InstCombiner::BuilderTy &Builder) { 1020 // Match and capture a min/max operand candidate. 1021 Value *X, *Y; 1022 Constant *C; 1023 Instruction *Inner; 1024 if (!match(II, m_c_MaxOrMin(m_OneUse(m_CombineAnd( 1025 m_Instruction(Inner), 1026 m_MaxOrMin(m_Value(X), m_ImmConstant(C)))), 1027 m_Value(Y)))) 1028 return nullptr; 1029 1030 // The inner op must match. Check for constants to avoid infinite loops. 1031 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1032 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner); 1033 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID || 1034 match(X, m_ImmConstant()) || match(Y, m_ImmConstant())) 1035 return nullptr; 1036 1037 // max (max X, C), Y --> max (max X, Y), C 1038 Function *MinMax = 1039 Intrinsic::getDeclaration(II->getModule(), MinMaxID, II->getType()); 1040 Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID, X, Y); 1041 NewInner->takeName(Inner); 1042 return CallInst::Create(MinMax, {NewInner, C}); 1043 } 1044 1045 /// Reduce a sequence of min/max intrinsics with a common operand. 1046 static Instruction *factorizeMinMaxTree(IntrinsicInst *II) { 1047 // Match 3 of the same min/max ops. Example: umin(umin(), umin()). 1048 auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); 1049 auto *RHS = dyn_cast<IntrinsicInst>(II->getArgOperand(1)); 1050 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1051 if (!LHS || !RHS || LHS->getIntrinsicID() != MinMaxID || 1052 RHS->getIntrinsicID() != MinMaxID || 1053 (!LHS->hasOneUse() && !RHS->hasOneUse())) 1054 return nullptr; 1055 1056 Value *A = LHS->getArgOperand(0); 1057 Value *B = LHS->getArgOperand(1); 1058 Value *C = RHS->getArgOperand(0); 1059 Value *D = RHS->getArgOperand(1); 1060 1061 // Look for a common operand. 1062 Value *MinMaxOp = nullptr; 1063 Value *ThirdOp = nullptr; 1064 if (LHS->hasOneUse()) { 1065 // If the LHS is only used in this chain and the RHS is used outside of it, 1066 // reuse the RHS min/max because that will eliminate the LHS. 1067 if (D == A || C == A) { 1068 // min(min(a, b), min(c, a)) --> min(min(c, a), b) 1069 // min(min(a, b), min(a, d)) --> min(min(a, d), b) 1070 MinMaxOp = RHS; 1071 ThirdOp = B; 1072 } else if (D == B || C == B) { 1073 // min(min(a, b), min(c, b)) --> min(min(c, b), a) 1074 // min(min(a, b), min(b, d)) --> min(min(b, d), a) 1075 MinMaxOp = RHS; 1076 ThirdOp = A; 1077 } 1078 } else { 1079 assert(RHS->hasOneUse() && "Expected one-use operand"); 1080 // Reuse the LHS. This will eliminate the RHS. 1081 if (D == A || D == B) { 1082 // min(min(a, b), min(c, a)) --> min(min(a, b), c) 1083 // min(min(a, b), min(c, b)) --> min(min(a, b), c) 1084 MinMaxOp = LHS; 1085 ThirdOp = C; 1086 } else if (C == A || C == B) { 1087 // min(min(a, b), min(b, d)) --> min(min(a, b), d) 1088 // min(min(a, b), min(c, b)) --> min(min(a, b), d) 1089 MinMaxOp = LHS; 1090 ThirdOp = D; 1091 } 1092 } 1093 1094 if (!MinMaxOp || !ThirdOp) 1095 return nullptr; 1096 1097 Module *Mod = II->getModule(); 1098 Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType()); 1099 return CallInst::Create(MinMax, { MinMaxOp, ThirdOp }); 1100 } 1101 1102 /// If all arguments of the intrinsic are unary shuffles with the same mask, 1103 /// try to shuffle after the intrinsic. 1104 static Instruction * 1105 foldShuffledIntrinsicOperands(IntrinsicInst *II, 1106 InstCombiner::BuilderTy &Builder) { 1107 // TODO: This should be extended to handle other intrinsics like fshl, ctpop, 1108 // etc. Use llvm::isTriviallyVectorizable() and related to determine 1109 // which intrinsics are safe to shuffle? 1110 switch (II->getIntrinsicID()) { 1111 case Intrinsic::smax: 1112 case Intrinsic::smin: 1113 case Intrinsic::umax: 1114 case Intrinsic::umin: 1115 case Intrinsic::fma: 1116 case Intrinsic::fshl: 1117 case Intrinsic::fshr: 1118 break; 1119 default: 1120 return nullptr; 1121 } 1122 1123 Value *X; 1124 ArrayRef<int> Mask; 1125 if (!match(II->getArgOperand(0), 1126 m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask)))) 1127 return nullptr; 1128 1129 // At least 1 operand must have 1 use because we are creating 2 instructions. 1130 if (none_of(II->args(), [](Value *V) { return V->hasOneUse(); })) 1131 return nullptr; 1132 1133 // See if all arguments are shuffled with the same mask. 1134 SmallVector<Value *, 4> NewArgs(II->arg_size()); 1135 NewArgs[0] = X; 1136 Type *SrcTy = X->getType(); 1137 for (unsigned i = 1, e = II->arg_size(); i != e; ++i) { 1138 if (!match(II->getArgOperand(i), 1139 m_Shuffle(m_Value(X), m_Undef(), m_SpecificMask(Mask))) || 1140 X->getType() != SrcTy) 1141 return nullptr; 1142 NewArgs[i] = X; 1143 } 1144 1145 // intrinsic (shuf X, M), (shuf Y, M), ... --> shuf (intrinsic X, Y, ...), M 1146 Instruction *FPI = isa<FPMathOperator>(II) ? II : nullptr; 1147 Value *NewIntrinsic = 1148 Builder.CreateIntrinsic(II->getIntrinsicID(), SrcTy, NewArgs, FPI); 1149 return new ShuffleVectorInst(NewIntrinsic, Mask); 1150 } 1151 1152 /// CallInst simplification. This mostly only handles folding of intrinsic 1153 /// instructions. For normal calls, it allows visitCallBase to do the heavy 1154 /// lifting. 1155 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { 1156 // Don't try to simplify calls without uses. It will not do anything useful, 1157 // but will result in the following folds being skipped. 1158 if (!CI.use_empty()) 1159 if (Value *V = simplifyCall(&CI, SQ.getWithInstruction(&CI))) 1160 return replaceInstUsesWith(CI, V); 1161 1162 if (Value *FreedOp = getFreedOperand(&CI, &TLI)) 1163 return visitFree(CI, FreedOp); 1164 1165 // If the caller function (i.e. us, the function that contains this CallInst) 1166 // is nounwind, mark the call as nounwind, even if the callee isn't. 1167 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) { 1168 CI.setDoesNotThrow(); 1169 return &CI; 1170 } 1171 1172 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 1173 if (!II) return visitCallBase(CI); 1174 1175 // For atomic unordered mem intrinsics if len is not a positive or 1176 // not a multiple of element size then behavior is undefined. 1177 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II)) 1178 if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength())) 1179 if (NumBytes->getSExtValue() < 0 || 1180 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) { 1181 CreateNonTerminatorUnreachable(AMI); 1182 assert(AMI->getType()->isVoidTy() && 1183 "non void atomic unordered mem intrinsic"); 1184 return eraseInstFromFunction(*AMI); 1185 } 1186 1187 // Intrinsics cannot occur in an invoke or a callbr, so handle them here 1188 // instead of in visitCallBase. 1189 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) { 1190 bool Changed = false; 1191 1192 // memmove/cpy/set of zero bytes is a noop. 1193 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 1194 if (NumBytes->isNullValue()) 1195 return eraseInstFromFunction(CI); 1196 } 1197 1198 // No other transformations apply to volatile transfers. 1199 if (auto *M = dyn_cast<MemIntrinsic>(MI)) 1200 if (M->isVolatile()) 1201 return nullptr; 1202 1203 // If we have a memmove and the source operation is a constant global, 1204 // then the source and dest pointers can't alias, so we can change this 1205 // into a call to memcpy. 1206 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) { 1207 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 1208 if (GVSrc->isConstant()) { 1209 Module *M = CI.getModule(); 1210 Intrinsic::ID MemCpyID = 1211 isa<AtomicMemMoveInst>(MMI) 1212 ? Intrinsic::memcpy_element_unordered_atomic 1213 : Intrinsic::memcpy; 1214 Type *Tys[3] = { CI.getArgOperand(0)->getType(), 1215 CI.getArgOperand(1)->getType(), 1216 CI.getArgOperand(2)->getType() }; 1217 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys)); 1218 Changed = true; 1219 } 1220 } 1221 1222 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 1223 // memmove(x,x,size) -> noop. 1224 if (MTI->getSource() == MTI->getDest()) 1225 return eraseInstFromFunction(CI); 1226 } 1227 1228 // If we can determine a pointer alignment that is bigger than currently 1229 // set, update the alignment. 1230 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 1231 if (Instruction *I = SimplifyAnyMemTransfer(MTI)) 1232 return I; 1233 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) { 1234 if (Instruction *I = SimplifyAnyMemSet(MSI)) 1235 return I; 1236 } 1237 1238 if (Changed) return II; 1239 } 1240 1241 // For fixed width vector result intrinsics, use the generic demanded vector 1242 // support. 1243 if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) { 1244 auto VWidth = IIFVTy->getNumElements(); 1245 APInt UndefElts(VWidth, 0); 1246 APInt AllOnesEltMask(APInt::getAllOnes(VWidth)); 1247 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) { 1248 if (V != II) 1249 return replaceInstUsesWith(*II, V); 1250 return II; 1251 } 1252 } 1253 1254 if (II->isCommutative()) { 1255 if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI)) 1256 return NewCall; 1257 } 1258 1259 // Unused constrained FP intrinsic calls may have declared side effect, which 1260 // prevents it from being removed. In some cases however the side effect is 1261 // actually absent. To detect this case, call SimplifyConstrainedFPCall. If it 1262 // returns a replacement, the call may be removed. 1263 if (CI.use_empty() && isa<ConstrainedFPIntrinsic>(CI)) { 1264 if (simplifyConstrainedFPCall(&CI, SQ.getWithInstruction(&CI))) 1265 return eraseInstFromFunction(CI); 1266 } 1267 1268 Intrinsic::ID IID = II->getIntrinsicID(); 1269 switch (IID) { 1270 case Intrinsic::objectsize: 1271 if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false)) 1272 return replaceInstUsesWith(CI, V); 1273 return nullptr; 1274 case Intrinsic::abs: { 1275 Value *IIOperand = II->getArgOperand(0); 1276 bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue(); 1277 1278 // abs(-x) -> abs(x) 1279 // TODO: Copy nsw if it was present on the neg? 1280 Value *X; 1281 if (match(IIOperand, m_Neg(m_Value(X)))) 1282 return replaceOperand(*II, 0, X); 1283 if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X))))) 1284 return replaceOperand(*II, 0, X); 1285 if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X)))) 1286 return replaceOperand(*II, 0, X); 1287 1288 if (std::optional<bool> Sign = getKnownSign(IIOperand, II, DL, &AC, &DT)) { 1289 // abs(x) -> x if x >= 0 1290 if (!*Sign) 1291 return replaceInstUsesWith(*II, IIOperand); 1292 1293 // abs(x) -> -x if x < 0 1294 if (IntMinIsPoison) 1295 return BinaryOperator::CreateNSWNeg(IIOperand); 1296 return BinaryOperator::CreateNeg(IIOperand); 1297 } 1298 1299 // abs (sext X) --> zext (abs X*) 1300 // Clear the IsIntMin (nsw) bit on the abs to allow narrowing. 1301 if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) { 1302 Value *NarrowAbs = 1303 Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse()); 1304 return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType()); 1305 } 1306 1307 // Match a complicated way to check if a number is odd/even: 1308 // abs (srem X, 2) --> and X, 1 1309 const APInt *C; 1310 if (match(IIOperand, m_SRem(m_Value(X), m_APInt(C))) && *C == 2) 1311 return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1)); 1312 1313 break; 1314 } 1315 case Intrinsic::umin: { 1316 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1317 // umin(x, 1) == zext(x != 0) 1318 if (match(I1, m_One())) { 1319 assert(II->getType()->getScalarSizeInBits() != 1 && 1320 "Expected simplify of umin with max constant"); 1321 Value *Zero = Constant::getNullValue(I0->getType()); 1322 Value *Cmp = Builder.CreateICmpNE(I0, Zero); 1323 return CastInst::Create(Instruction::ZExt, Cmp, II->getType()); 1324 } 1325 [[fallthrough]]; 1326 } 1327 case Intrinsic::umax: { 1328 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1329 Value *X, *Y; 1330 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) && 1331 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 1332 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 1333 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 1334 } 1335 Constant *C; 1336 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_Constant(C)) && 1337 I0->hasOneUse()) { 1338 Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType()); 1339 if (ConstantExpr::getZExt(NarrowC, II->getType()) == C) { 1340 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 1341 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 1342 } 1343 } 1344 // If both operands of unsigned min/max are sign-extended, it is still ok 1345 // to narrow the operation. 1346 [[fallthrough]]; 1347 } 1348 case Intrinsic::smax: 1349 case Intrinsic::smin: { 1350 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1351 Value *X, *Y; 1352 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) && 1353 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 1354 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 1355 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 1356 } 1357 1358 Constant *C; 1359 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_Constant(C)) && 1360 I0->hasOneUse()) { 1361 Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType()); 1362 if (ConstantExpr::getSExt(NarrowC, II->getType()) == C) { 1363 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 1364 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 1365 } 1366 } 1367 1368 if (IID == Intrinsic::smax || IID == Intrinsic::smin) { 1369 // smax (neg nsw X), (neg nsw Y) --> neg nsw (smin X, Y) 1370 // smin (neg nsw X), (neg nsw Y) --> neg nsw (smax X, Y) 1371 // TODO: Canonicalize neg after min/max if I1 is constant. 1372 if (match(I0, m_NSWNeg(m_Value(X))) && match(I1, m_NSWNeg(m_Value(Y))) && 1373 (I0->hasOneUse() || I1->hasOneUse())) { 1374 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID); 1375 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y); 1376 return BinaryOperator::CreateNSWNeg(InvMaxMin); 1377 } 1378 } 1379 1380 // If we can eliminate ~A and Y is free to invert: 1381 // max ~A, Y --> ~(min A, ~Y) 1382 // 1383 // Examples: 1384 // max ~A, ~Y --> ~(min A, Y) 1385 // max ~A, C --> ~(min A, ~C) 1386 // max ~A, (max ~Y, ~Z) --> ~min( A, (min Y, Z)) 1387 auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * { 1388 Value *A; 1389 if (match(X, m_OneUse(m_Not(m_Value(A)))) && 1390 !isFreeToInvert(A, A->hasOneUse()) && 1391 isFreeToInvert(Y, Y->hasOneUse())) { 1392 Value *NotY = Builder.CreateNot(Y); 1393 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID); 1394 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY); 1395 return BinaryOperator::CreateNot(InvMaxMin); 1396 } 1397 return nullptr; 1398 }; 1399 1400 if (Instruction *I = moveNotAfterMinMax(I0, I1)) 1401 return I; 1402 if (Instruction *I = moveNotAfterMinMax(I1, I0)) 1403 return I; 1404 1405 if (Instruction *I = moveAddAfterMinMax(II, Builder)) 1406 return I; 1407 1408 // smax(X, -X) --> abs(X) 1409 // smin(X, -X) --> -abs(X) 1410 // umax(X, -X) --> -abs(X) 1411 // umin(X, -X) --> abs(X) 1412 if (isKnownNegation(I0, I1)) { 1413 // We can choose either operand as the input to abs(), but if we can 1414 // eliminate the only use of a value, that's better for subsequent 1415 // transforms/analysis. 1416 if (I0->hasOneUse() && !I1->hasOneUse()) 1417 std::swap(I0, I1); 1418 1419 // This is some variant of abs(). See if we can propagate 'nsw' to the abs 1420 // operation and potentially its negation. 1421 bool IntMinIsPoison = isKnownNegation(I0, I1, /* NeedNSW */ true); 1422 Value *Abs = Builder.CreateBinaryIntrinsic( 1423 Intrinsic::abs, I0, 1424 ConstantInt::getBool(II->getContext(), IntMinIsPoison)); 1425 1426 // We don't have a "nabs" intrinsic, so negate if needed based on the 1427 // max/min operation. 1428 if (IID == Intrinsic::smin || IID == Intrinsic::umax) 1429 Abs = Builder.CreateNeg(Abs, "nabs", /* NUW */ false, IntMinIsPoison); 1430 return replaceInstUsesWith(CI, Abs); 1431 } 1432 1433 if (Instruction *Sel = foldClampRangeOfTwo(II, Builder)) 1434 return Sel; 1435 1436 if (Instruction *SAdd = matchSAddSubSat(*II)) 1437 return SAdd; 1438 1439 if (match(I1, m_ImmConstant())) 1440 if (auto *Sel = dyn_cast<SelectInst>(I0)) 1441 if (Instruction *R = FoldOpIntoSelect(*II, Sel)) 1442 return R; 1443 1444 if (Instruction *NewMinMax = reassociateMinMaxWithConstants(II)) 1445 return NewMinMax; 1446 1447 if (Instruction *R = reassociateMinMaxWithConstantInOperand(II, Builder)) 1448 return R; 1449 1450 if (Instruction *NewMinMax = factorizeMinMaxTree(II)) 1451 return NewMinMax; 1452 1453 break; 1454 } 1455 case Intrinsic::bitreverse: { 1456 // bitrev (zext i1 X to ?) --> X ? SignBitC : 0 1457 Value *X; 1458 if (match(II->getArgOperand(0), m_ZExt(m_Value(X))) && 1459 X->getType()->isIntOrIntVectorTy(1)) { 1460 Type *Ty = II->getType(); 1461 APInt SignBit = APInt::getSignMask(Ty->getScalarSizeInBits()); 1462 return SelectInst::Create(X, ConstantInt::get(Ty, SignBit), 1463 ConstantInt::getNullValue(Ty)); 1464 } 1465 break; 1466 } 1467 case Intrinsic::bswap: { 1468 Value *IIOperand = II->getArgOperand(0); 1469 1470 // Try to canonicalize bswap-of-logical-shift-by-8-bit-multiple as 1471 // inverse-shift-of-bswap: 1472 // bswap (shl X, Y) --> lshr (bswap X), Y 1473 // bswap (lshr X, Y) --> shl (bswap X), Y 1474 Value *X, *Y; 1475 if (match(IIOperand, m_OneUse(m_LogicalShift(m_Value(X), m_Value(Y))))) { 1476 // The transform allows undef vector elements, so try a constant match 1477 // first. If knownbits can handle that case, that clause could be removed. 1478 unsigned BitWidth = IIOperand->getType()->getScalarSizeInBits(); 1479 const APInt *C; 1480 if ((match(Y, m_APIntAllowUndef(C)) && (*C & 7) == 0) || 1481 MaskedValueIsZero(Y, APInt::getLowBitsSet(BitWidth, 3))) { 1482 Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X); 1483 BinaryOperator::BinaryOps InverseShift = 1484 cast<BinaryOperator>(IIOperand)->getOpcode() == Instruction::Shl 1485 ? Instruction::LShr 1486 : Instruction::Shl; 1487 return BinaryOperator::Create(InverseShift, NewSwap, Y); 1488 } 1489 } 1490 1491 KnownBits Known = computeKnownBits(IIOperand, 0, II); 1492 uint64_t LZ = alignDown(Known.countMinLeadingZeros(), 8); 1493 uint64_t TZ = alignDown(Known.countMinTrailingZeros(), 8); 1494 unsigned BW = Known.getBitWidth(); 1495 1496 // bswap(x) -> shift(x) if x has exactly one "active byte" 1497 if (BW - LZ - TZ == 8) { 1498 assert(LZ != TZ && "active byte cannot be in the middle"); 1499 if (LZ > TZ) // -> shl(x) if the "active byte" is in the low part of x 1500 return BinaryOperator::CreateNUWShl( 1501 IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ)); 1502 // -> lshr(x) if the "active byte" is in the high part of x 1503 return BinaryOperator::CreateExactLShr( 1504 IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ)); 1505 } 1506 1507 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 1508 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) { 1509 unsigned C = X->getType()->getScalarSizeInBits() - BW; 1510 Value *CV = ConstantInt::get(X->getType(), C); 1511 Value *V = Builder.CreateLShr(X, CV); 1512 return new TruncInst(V, IIOperand->getType()); 1513 } 1514 break; 1515 } 1516 case Intrinsic::masked_load: 1517 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II)) 1518 return replaceInstUsesWith(CI, SimplifiedMaskedOp); 1519 break; 1520 case Intrinsic::masked_store: 1521 return simplifyMaskedStore(*II); 1522 case Intrinsic::masked_gather: 1523 return simplifyMaskedGather(*II); 1524 case Intrinsic::masked_scatter: 1525 return simplifyMaskedScatter(*II); 1526 case Intrinsic::launder_invariant_group: 1527 case Intrinsic::strip_invariant_group: 1528 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this)) 1529 return replaceInstUsesWith(*II, SkippedBarrier); 1530 break; 1531 case Intrinsic::powi: 1532 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 1533 // 0 and 1 are handled in instsimplify 1534 // powi(x, -1) -> 1/x 1535 if (Power->isMinusOne()) 1536 return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0), 1537 II->getArgOperand(0), II); 1538 // powi(x, 2) -> x*x 1539 if (Power->equalsInt(2)) 1540 return BinaryOperator::CreateFMulFMF(II->getArgOperand(0), 1541 II->getArgOperand(0), II); 1542 1543 if (!Power->getValue()[0]) { 1544 Value *X; 1545 // If power is even: 1546 // powi(-x, p) -> powi(x, p) 1547 // powi(fabs(x), p) -> powi(x, p) 1548 // powi(copysign(x, y), p) -> powi(x, p) 1549 if (match(II->getArgOperand(0), m_FNeg(m_Value(X))) || 1550 match(II->getArgOperand(0), m_FAbs(m_Value(X))) || 1551 match(II->getArgOperand(0), 1552 m_Intrinsic<Intrinsic::copysign>(m_Value(X), m_Value()))) 1553 return replaceOperand(*II, 0, X); 1554 } 1555 } 1556 break; 1557 1558 case Intrinsic::cttz: 1559 case Intrinsic::ctlz: 1560 if (auto *I = foldCttzCtlz(*II, *this)) 1561 return I; 1562 break; 1563 1564 case Intrinsic::ctpop: 1565 if (auto *I = foldCtpop(*II, *this)) 1566 return I; 1567 break; 1568 1569 case Intrinsic::fshl: 1570 case Intrinsic::fshr: { 1571 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 1572 Type *Ty = II->getType(); 1573 unsigned BitWidth = Ty->getScalarSizeInBits(); 1574 Constant *ShAmtC; 1575 if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC))) { 1576 // Canonicalize a shift amount constant operand to modulo the bit-width. 1577 Constant *WidthC = ConstantInt::get(Ty, BitWidth); 1578 Constant *ModuloC = 1579 ConstantFoldBinaryOpOperands(Instruction::URem, ShAmtC, WidthC, DL); 1580 if (!ModuloC) 1581 return nullptr; 1582 if (ModuloC != ShAmtC) 1583 return replaceOperand(*II, 2, ModuloC); 1584 1585 assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) == 1586 ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) && 1587 "Shift amount expected to be modulo bitwidth"); 1588 1589 // Canonicalize funnel shift right by constant to funnel shift left. This 1590 // is not entirely arbitrary. For historical reasons, the backend may 1591 // recognize rotate left patterns but miss rotate right patterns. 1592 if (IID == Intrinsic::fshr) { 1593 // fshr X, Y, C --> fshl X, Y, (BitWidth - C) 1594 Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC); 1595 Module *Mod = II->getModule(); 1596 Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty); 1597 return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC }); 1598 } 1599 assert(IID == Intrinsic::fshl && 1600 "All funnel shifts by simple constants should go left"); 1601 1602 // fshl(X, 0, C) --> shl X, C 1603 // fshl(X, undef, C) --> shl X, C 1604 if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef())) 1605 return BinaryOperator::CreateShl(Op0, ShAmtC); 1606 1607 // fshl(0, X, C) --> lshr X, (BW-C) 1608 // fshl(undef, X, C) --> lshr X, (BW-C) 1609 if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef())) 1610 return BinaryOperator::CreateLShr(Op1, 1611 ConstantExpr::getSub(WidthC, ShAmtC)); 1612 1613 // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form) 1614 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) { 1615 Module *Mod = II->getModule(); 1616 Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty); 1617 return CallInst::Create(Bswap, { Op0 }); 1618 } 1619 } 1620 1621 // Left or right might be masked. 1622 if (SimplifyDemandedInstructionBits(*II)) 1623 return &CI; 1624 1625 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth, 1626 // so only the low bits of the shift amount are demanded if the bitwidth is 1627 // a power-of-2. 1628 if (!isPowerOf2_32(BitWidth)) 1629 break; 1630 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth)); 1631 KnownBits Op2Known(BitWidth); 1632 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known)) 1633 return &CI; 1634 break; 1635 } 1636 case Intrinsic::uadd_with_overflow: 1637 case Intrinsic::sadd_with_overflow: { 1638 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1639 return I; 1640 1641 // Given 2 constant operands whose sum does not overflow: 1642 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1 1643 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1 1644 Value *X; 1645 const APInt *C0, *C1; 1646 Value *Arg0 = II->getArgOperand(0); 1647 Value *Arg1 = II->getArgOperand(1); 1648 bool IsSigned = IID == Intrinsic::sadd_with_overflow; 1649 bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0))) 1650 : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0))); 1651 if (HasNWAdd && match(Arg1, m_APInt(C1))) { 1652 bool Overflow; 1653 APInt NewC = 1654 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow); 1655 if (!Overflow) 1656 return replaceInstUsesWith( 1657 *II, Builder.CreateBinaryIntrinsic( 1658 IID, X, ConstantInt::get(Arg1->getType(), NewC))); 1659 } 1660 break; 1661 } 1662 1663 case Intrinsic::umul_with_overflow: 1664 case Intrinsic::smul_with_overflow: 1665 case Intrinsic::usub_with_overflow: 1666 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1667 return I; 1668 break; 1669 1670 case Intrinsic::ssub_with_overflow: { 1671 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1672 return I; 1673 1674 Constant *C; 1675 Value *Arg0 = II->getArgOperand(0); 1676 Value *Arg1 = II->getArgOperand(1); 1677 // Given a constant C that is not the minimum signed value 1678 // for an integer of a given bit width: 1679 // 1680 // ssubo X, C -> saddo X, -C 1681 if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) { 1682 Value *NegVal = ConstantExpr::getNeg(C); 1683 // Build a saddo call that is equivalent to the discovered 1684 // ssubo call. 1685 return replaceInstUsesWith( 1686 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow, 1687 Arg0, NegVal)); 1688 } 1689 1690 break; 1691 } 1692 1693 case Intrinsic::uadd_sat: 1694 case Intrinsic::sadd_sat: 1695 case Intrinsic::usub_sat: 1696 case Intrinsic::ssub_sat: { 1697 SaturatingInst *SI = cast<SaturatingInst>(II); 1698 Type *Ty = SI->getType(); 1699 Value *Arg0 = SI->getLHS(); 1700 Value *Arg1 = SI->getRHS(); 1701 1702 // Make use of known overflow information. 1703 OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(), 1704 Arg0, Arg1, SI); 1705 switch (OR) { 1706 case OverflowResult::MayOverflow: 1707 break; 1708 case OverflowResult::NeverOverflows: 1709 if (SI->isSigned()) 1710 return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1); 1711 else 1712 return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1); 1713 case OverflowResult::AlwaysOverflowsLow: { 1714 unsigned BitWidth = Ty->getScalarSizeInBits(); 1715 APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned()); 1716 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min)); 1717 } 1718 case OverflowResult::AlwaysOverflowsHigh: { 1719 unsigned BitWidth = Ty->getScalarSizeInBits(); 1720 APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned()); 1721 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max)); 1722 } 1723 } 1724 1725 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN 1726 Constant *C; 1727 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) && 1728 C->isNotMinSignedValue()) { 1729 Value *NegVal = ConstantExpr::getNeg(C); 1730 return replaceInstUsesWith( 1731 *II, Builder.CreateBinaryIntrinsic( 1732 Intrinsic::sadd_sat, Arg0, NegVal)); 1733 } 1734 1735 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2)) 1736 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2)) 1737 // if Val and Val2 have the same sign 1738 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) { 1739 Value *X; 1740 const APInt *Val, *Val2; 1741 APInt NewVal; 1742 bool IsUnsigned = 1743 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat; 1744 if (Other->getIntrinsicID() == IID && 1745 match(Arg1, m_APInt(Val)) && 1746 match(Other->getArgOperand(0), m_Value(X)) && 1747 match(Other->getArgOperand(1), m_APInt(Val2))) { 1748 if (IsUnsigned) 1749 NewVal = Val->uadd_sat(*Val2); 1750 else if (Val->isNonNegative() == Val2->isNonNegative()) { 1751 bool Overflow; 1752 NewVal = Val->sadd_ov(*Val2, Overflow); 1753 if (Overflow) { 1754 // Both adds together may add more than SignedMaxValue 1755 // without saturating the final result. 1756 break; 1757 } 1758 } else { 1759 // Cannot fold saturated addition with different signs. 1760 break; 1761 } 1762 1763 return replaceInstUsesWith( 1764 *II, Builder.CreateBinaryIntrinsic( 1765 IID, X, ConstantInt::get(II->getType(), NewVal))); 1766 } 1767 } 1768 break; 1769 } 1770 1771 case Intrinsic::minnum: 1772 case Intrinsic::maxnum: 1773 case Intrinsic::minimum: 1774 case Intrinsic::maximum: { 1775 Value *Arg0 = II->getArgOperand(0); 1776 Value *Arg1 = II->getArgOperand(1); 1777 Value *X, *Y; 1778 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) && 1779 (Arg0->hasOneUse() || Arg1->hasOneUse())) { 1780 // If both operands are negated, invert the call and negate the result: 1781 // min(-X, -Y) --> -(max(X, Y)) 1782 // max(-X, -Y) --> -(min(X, Y)) 1783 Intrinsic::ID NewIID; 1784 switch (IID) { 1785 case Intrinsic::maxnum: 1786 NewIID = Intrinsic::minnum; 1787 break; 1788 case Intrinsic::minnum: 1789 NewIID = Intrinsic::maxnum; 1790 break; 1791 case Intrinsic::maximum: 1792 NewIID = Intrinsic::minimum; 1793 break; 1794 case Intrinsic::minimum: 1795 NewIID = Intrinsic::maximum; 1796 break; 1797 default: 1798 llvm_unreachable("unexpected intrinsic ID"); 1799 } 1800 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II); 1801 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall); 1802 FNeg->copyIRFlags(II); 1803 return FNeg; 1804 } 1805 1806 // m(m(X, C2), C1) -> m(X, C) 1807 const APFloat *C1, *C2; 1808 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) { 1809 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) && 1810 ((match(M->getArgOperand(0), m_Value(X)) && 1811 match(M->getArgOperand(1), m_APFloat(C2))) || 1812 (match(M->getArgOperand(1), m_Value(X)) && 1813 match(M->getArgOperand(0), m_APFloat(C2))))) { 1814 APFloat Res(0.0); 1815 switch (IID) { 1816 case Intrinsic::maxnum: 1817 Res = maxnum(*C1, *C2); 1818 break; 1819 case Intrinsic::minnum: 1820 Res = minnum(*C1, *C2); 1821 break; 1822 case Intrinsic::maximum: 1823 Res = maximum(*C1, *C2); 1824 break; 1825 case Intrinsic::minimum: 1826 Res = minimum(*C1, *C2); 1827 break; 1828 default: 1829 llvm_unreachable("unexpected intrinsic ID"); 1830 } 1831 Instruction *NewCall = Builder.CreateBinaryIntrinsic( 1832 IID, X, ConstantFP::get(Arg0->getType(), Res), II); 1833 // TODO: Conservatively intersecting FMF. If Res == C2, the transform 1834 // was a simplification (so Arg0 and its original flags could 1835 // propagate?) 1836 NewCall->andIRFlags(M); 1837 return replaceInstUsesWith(*II, NewCall); 1838 } 1839 } 1840 1841 // m((fpext X), (fpext Y)) -> fpext (m(X, Y)) 1842 if (match(Arg0, m_OneUse(m_FPExt(m_Value(X)))) && 1843 match(Arg1, m_OneUse(m_FPExt(m_Value(Y)))) && 1844 X->getType() == Y->getType()) { 1845 Value *NewCall = 1846 Builder.CreateBinaryIntrinsic(IID, X, Y, II, II->getName()); 1847 return new FPExtInst(NewCall, II->getType()); 1848 } 1849 1850 // max X, -X --> fabs X 1851 // min X, -X --> -(fabs X) 1852 // TODO: Remove one-use limitation? That is obviously better for max. 1853 // It would be an extra instruction for min (fnabs), but that is 1854 // still likely better for analysis and codegen. 1855 if ((match(Arg0, m_OneUse(m_FNeg(m_Value(X)))) && Arg1 == X) || 1856 (match(Arg1, m_OneUse(m_FNeg(m_Value(X)))) && Arg0 == X)) { 1857 Value *R = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II); 1858 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum) 1859 R = Builder.CreateFNegFMF(R, II); 1860 return replaceInstUsesWith(*II, R); 1861 } 1862 1863 break; 1864 } 1865 case Intrinsic::matrix_multiply: { 1866 // Optimize negation in matrix multiplication. 1867 1868 // -A * -B -> A * B 1869 Value *A, *B; 1870 if (match(II->getArgOperand(0), m_FNeg(m_Value(A))) && 1871 match(II->getArgOperand(1), m_FNeg(m_Value(B)))) { 1872 replaceOperand(*II, 0, A); 1873 replaceOperand(*II, 1, B); 1874 return II; 1875 } 1876 1877 Value *Op0 = II->getOperand(0); 1878 Value *Op1 = II->getOperand(1); 1879 Value *OpNotNeg, *NegatedOp; 1880 unsigned NegatedOpArg, OtherOpArg; 1881 if (match(Op0, m_FNeg(m_Value(OpNotNeg)))) { 1882 NegatedOp = Op0; 1883 NegatedOpArg = 0; 1884 OtherOpArg = 1; 1885 } else if (match(Op1, m_FNeg(m_Value(OpNotNeg)))) { 1886 NegatedOp = Op1; 1887 NegatedOpArg = 1; 1888 OtherOpArg = 0; 1889 } else 1890 // Multiplication doesn't have a negated operand. 1891 break; 1892 1893 // Only optimize if the negated operand has only one use. 1894 if (!NegatedOp->hasOneUse()) 1895 break; 1896 1897 Value *OtherOp = II->getOperand(OtherOpArg); 1898 VectorType *RetTy = cast<VectorType>(II->getType()); 1899 VectorType *NegatedOpTy = cast<VectorType>(NegatedOp->getType()); 1900 VectorType *OtherOpTy = cast<VectorType>(OtherOp->getType()); 1901 ElementCount NegatedCount = NegatedOpTy->getElementCount(); 1902 ElementCount OtherCount = OtherOpTy->getElementCount(); 1903 ElementCount RetCount = RetTy->getElementCount(); 1904 // (-A) * B -> A * (-B), if it is cheaper to negate B and vice versa. 1905 if (ElementCount::isKnownGT(NegatedCount, OtherCount) && 1906 ElementCount::isKnownLT(OtherCount, RetCount)) { 1907 Value *InverseOtherOp = Builder.CreateFNeg(OtherOp); 1908 replaceOperand(*II, NegatedOpArg, OpNotNeg); 1909 replaceOperand(*II, OtherOpArg, InverseOtherOp); 1910 return II; 1911 } 1912 // (-A) * B -> -(A * B), if it is cheaper to negate the result 1913 if (ElementCount::isKnownGT(NegatedCount, RetCount)) { 1914 SmallVector<Value *, 5> NewArgs(II->args()); 1915 NewArgs[NegatedOpArg] = OpNotNeg; 1916 Instruction *NewMul = 1917 Builder.CreateIntrinsic(II->getType(), IID, NewArgs, II); 1918 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(NewMul, II)); 1919 } 1920 break; 1921 } 1922 case Intrinsic::fmuladd: { 1923 // Canonicalize fast fmuladd to the separate fmul + fadd. 1924 if (II->isFast()) { 1925 BuilderTy::FastMathFlagGuard Guard(Builder); 1926 Builder.setFastMathFlags(II->getFastMathFlags()); 1927 Value *Mul = Builder.CreateFMul(II->getArgOperand(0), 1928 II->getArgOperand(1)); 1929 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2)); 1930 Add->takeName(II); 1931 return replaceInstUsesWith(*II, Add); 1932 } 1933 1934 // Try to simplify the underlying FMul. 1935 if (Value *V = simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1), 1936 II->getFastMathFlags(), 1937 SQ.getWithInstruction(II))) { 1938 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 1939 FAdd->copyFastMathFlags(II); 1940 return FAdd; 1941 } 1942 1943 [[fallthrough]]; 1944 } 1945 case Intrinsic::fma: { 1946 // fma fneg(x), fneg(y), z -> fma x, y, z 1947 Value *Src0 = II->getArgOperand(0); 1948 Value *Src1 = II->getArgOperand(1); 1949 Value *X, *Y; 1950 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) { 1951 replaceOperand(*II, 0, X); 1952 replaceOperand(*II, 1, Y); 1953 return II; 1954 } 1955 1956 // fma fabs(x), fabs(x), z -> fma x, x, z 1957 if (match(Src0, m_FAbs(m_Value(X))) && 1958 match(Src1, m_FAbs(m_Specific(X)))) { 1959 replaceOperand(*II, 0, X); 1960 replaceOperand(*II, 1, X); 1961 return II; 1962 } 1963 1964 // Try to simplify the underlying FMul. We can only apply simplifications 1965 // that do not require rounding. 1966 if (Value *V = simplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1), 1967 II->getFastMathFlags(), 1968 SQ.getWithInstruction(II))) { 1969 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 1970 FAdd->copyFastMathFlags(II); 1971 return FAdd; 1972 } 1973 1974 // fma x, y, 0 -> fmul x, y 1975 // This is always valid for -0.0, but requires nsz for +0.0 as 1976 // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own. 1977 if (match(II->getArgOperand(2), m_NegZeroFP()) || 1978 (match(II->getArgOperand(2), m_PosZeroFP()) && 1979 II->getFastMathFlags().noSignedZeros())) 1980 return BinaryOperator::CreateFMulFMF(Src0, Src1, II); 1981 1982 break; 1983 } 1984 case Intrinsic::copysign: { 1985 Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1); 1986 if (SignBitMustBeZero(Sign, &TLI)) { 1987 // If we know that the sign argument is positive, reduce to FABS: 1988 // copysign Mag, +Sign --> fabs Mag 1989 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 1990 return replaceInstUsesWith(*II, Fabs); 1991 } 1992 // TODO: There should be a ValueTracking sibling like SignBitMustBeOne. 1993 const APFloat *C; 1994 if (match(Sign, m_APFloat(C)) && C->isNegative()) { 1995 // If we know that the sign argument is negative, reduce to FNABS: 1996 // copysign Mag, -Sign --> fneg (fabs Mag) 1997 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 1998 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II)); 1999 } 2000 2001 // Propagate sign argument through nested calls: 2002 // copysign Mag, (copysign ?, X) --> copysign Mag, X 2003 Value *X; 2004 if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X)))) 2005 return replaceOperand(*II, 1, X); 2006 2007 // Peek through changes of magnitude's sign-bit. This call rewrites those: 2008 // copysign (fabs X), Sign --> copysign X, Sign 2009 // copysign (fneg X), Sign --> copysign X, Sign 2010 if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X)))) 2011 return replaceOperand(*II, 0, X); 2012 2013 break; 2014 } 2015 case Intrinsic::fabs: { 2016 Value *Cond, *TVal, *FVal; 2017 if (match(II->getArgOperand(0), 2018 m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) { 2019 // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF 2020 if (isa<Constant>(TVal) && isa<Constant>(FVal)) { 2021 CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal}); 2022 CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal}); 2023 return SelectInst::Create(Cond, AbsT, AbsF); 2024 } 2025 // fabs (select Cond, -FVal, FVal) --> fabs FVal 2026 if (match(TVal, m_FNeg(m_Specific(FVal)))) 2027 return replaceOperand(*II, 0, FVal); 2028 // fabs (select Cond, TVal, -TVal) --> fabs TVal 2029 if (match(FVal, m_FNeg(m_Specific(TVal)))) 2030 return replaceOperand(*II, 0, TVal); 2031 } 2032 2033 Value *Magnitude, *Sign; 2034 if (match(II->getArgOperand(0), 2035 m_CopySign(m_Value(Magnitude), m_Value(Sign)))) { 2036 // fabs (copysign x, y) -> (fabs x) 2037 CallInst *AbsSign = 2038 Builder.CreateCall(II->getCalledFunction(), {Magnitude}); 2039 AbsSign->copyFastMathFlags(II); 2040 return replaceInstUsesWith(*II, AbsSign); 2041 } 2042 2043 [[fallthrough]]; 2044 } 2045 case Intrinsic::ceil: 2046 case Intrinsic::floor: 2047 case Intrinsic::round: 2048 case Intrinsic::roundeven: 2049 case Intrinsic::nearbyint: 2050 case Intrinsic::rint: 2051 case Intrinsic::trunc: { 2052 Value *ExtSrc; 2053 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) { 2054 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x) 2055 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II); 2056 return new FPExtInst(NarrowII, II->getType()); 2057 } 2058 break; 2059 } 2060 case Intrinsic::cos: 2061 case Intrinsic::amdgcn_cos: { 2062 Value *X; 2063 Value *Src = II->getArgOperand(0); 2064 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) { 2065 // cos(-x) -> cos(x) 2066 // cos(fabs(x)) -> cos(x) 2067 return replaceOperand(*II, 0, X); 2068 } 2069 break; 2070 } 2071 case Intrinsic::sin: { 2072 Value *X; 2073 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) { 2074 // sin(-x) --> -sin(x) 2075 Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II); 2076 Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin); 2077 FNeg->copyFastMathFlags(II); 2078 return FNeg; 2079 } 2080 break; 2081 } 2082 case Intrinsic::ptrauth_auth: 2083 case Intrinsic::ptrauth_resign: { 2084 // (sign|resign) + (auth|resign) can be folded by omitting the middle 2085 // sign+auth component if the key and discriminator match. 2086 bool NeedSign = II->getIntrinsicID() == Intrinsic::ptrauth_resign; 2087 Value *Key = II->getArgOperand(1); 2088 Value *Disc = II->getArgOperand(2); 2089 2090 // AuthKey will be the key we need to end up authenticating against in 2091 // whatever we replace this sequence with. 2092 Value *AuthKey = nullptr, *AuthDisc = nullptr, *BasePtr; 2093 if (auto CI = dyn_cast<CallBase>(II->getArgOperand(0))) { 2094 BasePtr = CI->getArgOperand(0); 2095 if (CI->getIntrinsicID() == Intrinsic::ptrauth_sign) { 2096 if (CI->getArgOperand(1) != Key || CI->getArgOperand(2) != Disc) 2097 break; 2098 } else if (CI->getIntrinsicID() == Intrinsic::ptrauth_resign) { 2099 if (CI->getArgOperand(3) != Key || CI->getArgOperand(4) != Disc) 2100 break; 2101 AuthKey = CI->getArgOperand(1); 2102 AuthDisc = CI->getArgOperand(2); 2103 } else 2104 break; 2105 } else 2106 break; 2107 2108 unsigned NewIntrin; 2109 if (AuthKey && NeedSign) { 2110 // resign(0,1) + resign(1,2) = resign(0, 2) 2111 NewIntrin = Intrinsic::ptrauth_resign; 2112 } else if (AuthKey) { 2113 // resign(0,1) + auth(1) = auth(0) 2114 NewIntrin = Intrinsic::ptrauth_auth; 2115 } else if (NeedSign) { 2116 // sign(0) + resign(0, 1) = sign(1) 2117 NewIntrin = Intrinsic::ptrauth_sign; 2118 } else { 2119 // sign(0) + auth(0) = nop 2120 replaceInstUsesWith(*II, BasePtr); 2121 eraseInstFromFunction(*II); 2122 return nullptr; 2123 } 2124 2125 SmallVector<Value *, 4> CallArgs; 2126 CallArgs.push_back(BasePtr); 2127 if (AuthKey) { 2128 CallArgs.push_back(AuthKey); 2129 CallArgs.push_back(AuthDisc); 2130 } 2131 2132 if (NeedSign) { 2133 CallArgs.push_back(II->getArgOperand(3)); 2134 CallArgs.push_back(II->getArgOperand(4)); 2135 } 2136 2137 Function *NewFn = Intrinsic::getDeclaration(II->getModule(), NewIntrin); 2138 return CallInst::Create(NewFn, CallArgs); 2139 } 2140 case Intrinsic::arm_neon_vtbl1: 2141 case Intrinsic::aarch64_neon_tbl1: 2142 if (Value *V = simplifyNeonTbl1(*II, Builder)) 2143 return replaceInstUsesWith(*II, V); 2144 break; 2145 2146 case Intrinsic::arm_neon_vmulls: 2147 case Intrinsic::arm_neon_vmullu: 2148 case Intrinsic::aarch64_neon_smull: 2149 case Intrinsic::aarch64_neon_umull: { 2150 Value *Arg0 = II->getArgOperand(0); 2151 Value *Arg1 = II->getArgOperand(1); 2152 2153 // Handle mul by zero first: 2154 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) { 2155 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType())); 2156 } 2157 2158 // Check for constant LHS & RHS - in this case we just simplify. 2159 bool Zext = (IID == Intrinsic::arm_neon_vmullu || 2160 IID == Intrinsic::aarch64_neon_umull); 2161 VectorType *NewVT = cast<VectorType>(II->getType()); 2162 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) { 2163 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) { 2164 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext); 2165 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext); 2166 2167 return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1)); 2168 } 2169 2170 // Couldn't simplify - canonicalize constant to the RHS. 2171 std::swap(Arg0, Arg1); 2172 } 2173 2174 // Handle mul by one: 2175 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) 2176 if (ConstantInt *Splat = 2177 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) 2178 if (Splat->isOne()) 2179 return CastInst::CreateIntegerCast(Arg0, II->getType(), 2180 /*isSigned=*/!Zext); 2181 2182 break; 2183 } 2184 case Intrinsic::arm_neon_aesd: 2185 case Intrinsic::arm_neon_aese: 2186 case Intrinsic::aarch64_crypto_aesd: 2187 case Intrinsic::aarch64_crypto_aese: { 2188 Value *DataArg = II->getArgOperand(0); 2189 Value *KeyArg = II->getArgOperand(1); 2190 2191 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR 2192 Value *Data, *Key; 2193 if (match(KeyArg, m_ZeroInt()) && 2194 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) { 2195 replaceOperand(*II, 0, Data); 2196 replaceOperand(*II, 1, Key); 2197 return II; 2198 } 2199 break; 2200 } 2201 case Intrinsic::hexagon_V6_vandvrt: 2202 case Intrinsic::hexagon_V6_vandvrt_128B: { 2203 // Simplify Q -> V -> Q conversion. 2204 if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 2205 Intrinsic::ID ID0 = Op0->getIntrinsicID(); 2206 if (ID0 != Intrinsic::hexagon_V6_vandqrt && 2207 ID0 != Intrinsic::hexagon_V6_vandqrt_128B) 2208 break; 2209 Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1); 2210 uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue(); 2211 uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue(); 2212 // Check if every byte has common bits in Bytes and Mask. 2213 uint64_t C = Bytes1 & Mask1; 2214 if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000)) 2215 return replaceInstUsesWith(*II, Op0->getArgOperand(0)); 2216 } 2217 break; 2218 } 2219 case Intrinsic::stackrestore: { 2220 enum class ClassifyResult { 2221 None, 2222 Alloca, 2223 StackRestore, 2224 CallWithSideEffects, 2225 }; 2226 auto Classify = [](const Instruction *I) { 2227 if (isa<AllocaInst>(I)) 2228 return ClassifyResult::Alloca; 2229 2230 if (auto *CI = dyn_cast<CallInst>(I)) { 2231 if (auto *II = dyn_cast<IntrinsicInst>(CI)) { 2232 if (II->getIntrinsicID() == Intrinsic::stackrestore) 2233 return ClassifyResult::StackRestore; 2234 2235 if (II->mayHaveSideEffects()) 2236 return ClassifyResult::CallWithSideEffects; 2237 } else { 2238 // Consider all non-intrinsic calls to be side effects 2239 return ClassifyResult::CallWithSideEffects; 2240 } 2241 } 2242 2243 return ClassifyResult::None; 2244 }; 2245 2246 // If the stacksave and the stackrestore are in the same BB, and there is 2247 // no intervening call, alloca, or stackrestore of a different stacksave, 2248 // remove the restore. This can happen when variable allocas are DCE'd. 2249 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 2250 if (SS->getIntrinsicID() == Intrinsic::stacksave && 2251 SS->getParent() == II->getParent()) { 2252 BasicBlock::iterator BI(SS); 2253 bool CannotRemove = false; 2254 for (++BI; &*BI != II; ++BI) { 2255 switch (Classify(&*BI)) { 2256 case ClassifyResult::None: 2257 // So far so good, look at next instructions. 2258 break; 2259 2260 case ClassifyResult::StackRestore: 2261 // If we found an intervening stackrestore for a different 2262 // stacksave, we can't remove the stackrestore. Otherwise, continue. 2263 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS) 2264 CannotRemove = true; 2265 break; 2266 2267 case ClassifyResult::Alloca: 2268 case ClassifyResult::CallWithSideEffects: 2269 // If we found an alloca, a non-intrinsic call, or an intrinsic 2270 // call with side effects, we can't remove the stackrestore. 2271 CannotRemove = true; 2272 break; 2273 } 2274 if (CannotRemove) 2275 break; 2276 } 2277 2278 if (!CannotRemove) 2279 return eraseInstFromFunction(CI); 2280 } 2281 } 2282 2283 // Scan down this block to see if there is another stack restore in the 2284 // same block without an intervening call/alloca. 2285 BasicBlock::iterator BI(II); 2286 Instruction *TI = II->getParent()->getTerminator(); 2287 bool CannotRemove = false; 2288 for (++BI; &*BI != TI; ++BI) { 2289 switch (Classify(&*BI)) { 2290 case ClassifyResult::None: 2291 // So far so good, look at next instructions. 2292 break; 2293 2294 case ClassifyResult::StackRestore: 2295 // If there is a stackrestore below this one, remove this one. 2296 return eraseInstFromFunction(CI); 2297 2298 case ClassifyResult::Alloca: 2299 case ClassifyResult::CallWithSideEffects: 2300 // If we found an alloca, a non-intrinsic call, or an intrinsic call 2301 // with side effects (such as llvm.stacksave and llvm.read_register), 2302 // we can't remove the stack restore. 2303 CannotRemove = true; 2304 break; 2305 } 2306 if (CannotRemove) 2307 break; 2308 } 2309 2310 // If the stack restore is in a return, resume, or unwind block and if there 2311 // are no allocas or calls between the restore and the return, nuke the 2312 // restore. 2313 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI))) 2314 return eraseInstFromFunction(CI); 2315 break; 2316 } 2317 case Intrinsic::lifetime_end: 2318 // Asan needs to poison memory to detect invalid access which is possible 2319 // even for empty lifetime range. 2320 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 2321 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) || 2322 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 2323 break; 2324 2325 if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) { 2326 return I.getIntrinsicID() == Intrinsic::lifetime_start; 2327 })) 2328 return nullptr; 2329 break; 2330 case Intrinsic::assume: { 2331 Value *IIOperand = II->getArgOperand(0); 2332 SmallVector<OperandBundleDef, 4> OpBundles; 2333 II->getOperandBundlesAsDefs(OpBundles); 2334 2335 /// This will remove the boolean Condition from the assume given as 2336 /// argument and remove the assume if it becomes useless. 2337 /// always returns nullptr for use as a return values. 2338 auto RemoveConditionFromAssume = [&](Instruction *Assume) -> Instruction * { 2339 assert(isa<AssumeInst>(Assume)); 2340 if (isAssumeWithEmptyBundle(*cast<AssumeInst>(II))) 2341 return eraseInstFromFunction(CI); 2342 replaceUse(II->getOperandUse(0), ConstantInt::getTrue(II->getContext())); 2343 return nullptr; 2344 }; 2345 // Remove an assume if it is followed by an identical assume. 2346 // TODO: Do we need this? Unless there are conflicting assumptions, the 2347 // computeKnownBits(IIOperand) below here eliminates redundant assumes. 2348 Instruction *Next = II->getNextNonDebugInstruction(); 2349 if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand)))) 2350 return RemoveConditionFromAssume(Next); 2351 2352 // Canonicalize assume(a && b) -> assume(a); assume(b); 2353 // Note: New assumption intrinsics created here are registered by 2354 // the InstCombineIRInserter object. 2355 FunctionType *AssumeIntrinsicTy = II->getFunctionType(); 2356 Value *AssumeIntrinsic = II->getCalledOperand(); 2357 Value *A, *B; 2358 if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) { 2359 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles, 2360 II->getName()); 2361 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName()); 2362 return eraseInstFromFunction(*II); 2363 } 2364 // assume(!(a || b)) -> assume(!a); assume(!b); 2365 if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) { 2366 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 2367 Builder.CreateNot(A), OpBundles, II->getName()); 2368 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 2369 Builder.CreateNot(B), II->getName()); 2370 return eraseInstFromFunction(*II); 2371 } 2372 2373 // assume( (load addr) != null ) -> add 'nonnull' metadata to load 2374 // (if assume is valid at the load) 2375 CmpInst::Predicate Pred; 2376 Instruction *LHS; 2377 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) && 2378 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load && 2379 LHS->getType()->isPointerTy() && 2380 isValidAssumeForContext(II, LHS, &DT)) { 2381 MDNode *MD = MDNode::get(II->getContext(), std::nullopt); 2382 LHS->setMetadata(LLVMContext::MD_nonnull, MD); 2383 return RemoveConditionFromAssume(II); 2384 2385 // TODO: apply nonnull return attributes to calls and invokes 2386 // TODO: apply range metadata for range check patterns? 2387 } 2388 2389 // Convert nonnull assume like: 2390 // %A = icmp ne i32* %PTR, null 2391 // call void @llvm.assume(i1 %A) 2392 // into 2393 // call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ] 2394 if (EnableKnowledgeRetention && 2395 match(IIOperand, m_Cmp(Pred, m_Value(A), m_Zero())) && 2396 Pred == CmpInst::ICMP_NE && A->getType()->isPointerTy()) { 2397 if (auto *Replacement = buildAssumeFromKnowledge( 2398 {RetainedKnowledge{Attribute::NonNull, 0, A}}, Next, &AC, &DT)) { 2399 2400 Replacement->insertBefore(Next); 2401 AC.registerAssumption(Replacement); 2402 return RemoveConditionFromAssume(II); 2403 } 2404 } 2405 2406 // Convert alignment assume like: 2407 // %B = ptrtoint i32* %A to i64 2408 // %C = and i64 %B, Constant 2409 // %D = icmp eq i64 %C, 0 2410 // call void @llvm.assume(i1 %D) 2411 // into 2412 // call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 Constant + 1)] 2413 uint64_t AlignMask; 2414 if (EnableKnowledgeRetention && 2415 match(IIOperand, 2416 m_Cmp(Pred, m_And(m_Value(A), m_ConstantInt(AlignMask)), 2417 m_Zero())) && 2418 Pred == CmpInst::ICMP_EQ) { 2419 if (isPowerOf2_64(AlignMask + 1)) { 2420 uint64_t Offset = 0; 2421 match(A, m_Add(m_Value(A), m_ConstantInt(Offset))); 2422 if (match(A, m_PtrToInt(m_Value(A)))) { 2423 /// Note: this doesn't preserve the offset information but merges 2424 /// offset and alignment. 2425 /// TODO: we can generate a GEP instead of merging the alignment with 2426 /// the offset. 2427 RetainedKnowledge RK{Attribute::Alignment, 2428 (unsigned)MinAlign(Offset, AlignMask + 1), A}; 2429 if (auto *Replacement = 2430 buildAssumeFromKnowledge(RK, Next, &AC, &DT)) { 2431 2432 Replacement->insertAfter(II); 2433 AC.registerAssumption(Replacement); 2434 } 2435 return RemoveConditionFromAssume(II); 2436 } 2437 } 2438 } 2439 2440 /// Canonicalize Knowledge in operand bundles. 2441 if (EnableKnowledgeRetention && II->hasOperandBundles()) { 2442 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) { 2443 auto &BOI = II->bundle_op_info_begin()[Idx]; 2444 RetainedKnowledge RK = 2445 llvm::getKnowledgeFromBundle(cast<AssumeInst>(*II), BOI); 2446 if (BOI.End - BOI.Begin > 2) 2447 continue; // Prevent reducing knowledge in an align with offset since 2448 // extracting a RetainedKnowledge from them looses offset 2449 // information 2450 RetainedKnowledge CanonRK = 2451 llvm::simplifyRetainedKnowledge(cast<AssumeInst>(II), RK, 2452 &getAssumptionCache(), 2453 &getDominatorTree()); 2454 if (CanonRK == RK) 2455 continue; 2456 if (!CanonRK) { 2457 if (BOI.End - BOI.Begin > 0) { 2458 Worklist.pushValue(II->op_begin()[BOI.Begin]); 2459 Value::dropDroppableUse(II->op_begin()[BOI.Begin]); 2460 } 2461 continue; 2462 } 2463 assert(RK.AttrKind == CanonRK.AttrKind); 2464 if (BOI.End - BOI.Begin > 0) 2465 II->op_begin()[BOI.Begin].set(CanonRK.WasOn); 2466 if (BOI.End - BOI.Begin > 1) 2467 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get( 2468 Type::getInt64Ty(II->getContext()), CanonRK.ArgValue)); 2469 if (RK.WasOn) 2470 Worklist.pushValue(RK.WasOn); 2471 return II; 2472 } 2473 } 2474 2475 // If there is a dominating assume with the same condition as this one, 2476 // then this one is redundant, and should be removed. 2477 KnownBits Known(1); 2478 computeKnownBits(IIOperand, Known, 0, II); 2479 if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) 2480 return eraseInstFromFunction(*II); 2481 2482 // Update the cache of affected values for this assumption (we might be 2483 // here because we just simplified the condition). 2484 AC.updateAffectedValues(cast<AssumeInst>(II)); 2485 break; 2486 } 2487 case Intrinsic::experimental_guard: { 2488 // Is this guard followed by another guard? We scan forward over a small 2489 // fixed window of instructions to handle common cases with conditions 2490 // computed between guards. 2491 Instruction *NextInst = II->getNextNonDebugInstruction(); 2492 for (unsigned i = 0; i < GuardWideningWindow; i++) { 2493 // Note: Using context-free form to avoid compile time blow up 2494 if (!isSafeToSpeculativelyExecute(NextInst)) 2495 break; 2496 NextInst = NextInst->getNextNonDebugInstruction(); 2497 } 2498 Value *NextCond = nullptr; 2499 if (match(NextInst, 2500 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) { 2501 Value *CurrCond = II->getArgOperand(0); 2502 2503 // Remove a guard that it is immediately preceded by an identical guard. 2504 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b). 2505 if (CurrCond != NextCond) { 2506 Instruction *MoveI = II->getNextNonDebugInstruction(); 2507 while (MoveI != NextInst) { 2508 auto *Temp = MoveI; 2509 MoveI = MoveI->getNextNonDebugInstruction(); 2510 Temp->moveBefore(II); 2511 } 2512 replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond)); 2513 } 2514 eraseInstFromFunction(*NextInst); 2515 return II; 2516 } 2517 break; 2518 } 2519 case Intrinsic::vector_insert: { 2520 Value *Vec = II->getArgOperand(0); 2521 Value *SubVec = II->getArgOperand(1); 2522 Value *Idx = II->getArgOperand(2); 2523 auto *DstTy = dyn_cast<FixedVectorType>(II->getType()); 2524 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 2525 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType()); 2526 2527 // Only canonicalize if the destination vector, Vec, and SubVec are all 2528 // fixed vectors. 2529 if (DstTy && VecTy && SubVecTy) { 2530 unsigned DstNumElts = DstTy->getNumElements(); 2531 unsigned VecNumElts = VecTy->getNumElements(); 2532 unsigned SubVecNumElts = SubVecTy->getNumElements(); 2533 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 2534 2535 // An insert that entirely overwrites Vec with SubVec is a nop. 2536 if (VecNumElts == SubVecNumElts) 2537 return replaceInstUsesWith(CI, SubVec); 2538 2539 // Widen SubVec into a vector of the same width as Vec, since 2540 // shufflevector requires the two input vectors to be the same width. 2541 // Elements beyond the bounds of SubVec within the widened vector are 2542 // undefined. 2543 SmallVector<int, 8> WidenMask; 2544 unsigned i; 2545 for (i = 0; i != SubVecNumElts; ++i) 2546 WidenMask.push_back(i); 2547 for (; i != VecNumElts; ++i) 2548 WidenMask.push_back(UndefMaskElem); 2549 2550 Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask); 2551 2552 SmallVector<int, 8> Mask; 2553 for (unsigned i = 0; i != IdxN; ++i) 2554 Mask.push_back(i); 2555 for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i) 2556 Mask.push_back(i); 2557 for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i) 2558 Mask.push_back(i); 2559 2560 Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask); 2561 return replaceInstUsesWith(CI, Shuffle); 2562 } 2563 break; 2564 } 2565 case Intrinsic::vector_extract: { 2566 Value *Vec = II->getArgOperand(0); 2567 Value *Idx = II->getArgOperand(1); 2568 2569 Type *ReturnType = II->getType(); 2570 // (extract_vector (insert_vector InsertTuple, InsertValue, InsertIdx), 2571 // ExtractIdx) 2572 unsigned ExtractIdx = cast<ConstantInt>(Idx)->getZExtValue(); 2573 Value *InsertTuple, *InsertIdx, *InsertValue; 2574 if (match(Vec, m_Intrinsic<Intrinsic::vector_insert>(m_Value(InsertTuple), 2575 m_Value(InsertValue), 2576 m_Value(InsertIdx))) && 2577 InsertValue->getType() == ReturnType) { 2578 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue(); 2579 // Case where we get the same index right after setting it. 2580 // extract.vector(insert.vector(InsertTuple, InsertValue, Idx), Idx) --> 2581 // InsertValue 2582 if (ExtractIdx == Index) 2583 return replaceInstUsesWith(CI, InsertValue); 2584 // If we are getting a different index than what was set in the 2585 // insert.vector intrinsic. We can just set the input tuple to the one up 2586 // in the chain. extract.vector(insert.vector(InsertTuple, InsertValue, 2587 // InsertIndex), ExtractIndex) 2588 // --> extract.vector(InsertTuple, ExtractIndex) 2589 else 2590 return replaceOperand(CI, 0, InsertTuple); 2591 } 2592 2593 auto *DstTy = dyn_cast<FixedVectorType>(ReturnType); 2594 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 2595 2596 // Only canonicalize if the the destination vector and Vec are fixed 2597 // vectors. 2598 if (DstTy && VecTy) { 2599 unsigned DstNumElts = DstTy->getNumElements(); 2600 unsigned VecNumElts = VecTy->getNumElements(); 2601 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 2602 2603 // Extracting the entirety of Vec is a nop. 2604 if (VecNumElts == DstNumElts) { 2605 replaceInstUsesWith(CI, Vec); 2606 return eraseInstFromFunction(CI); 2607 } 2608 2609 SmallVector<int, 8> Mask; 2610 for (unsigned i = 0; i != DstNumElts; ++i) 2611 Mask.push_back(IdxN + i); 2612 2613 Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask); 2614 return replaceInstUsesWith(CI, Shuffle); 2615 } 2616 break; 2617 } 2618 case Intrinsic::experimental_vector_reverse: { 2619 Value *BO0, *BO1, *X, *Y; 2620 Value *Vec = II->getArgOperand(0); 2621 if (match(Vec, m_OneUse(m_BinOp(m_Value(BO0), m_Value(BO1))))) { 2622 auto *OldBinOp = cast<BinaryOperator>(Vec); 2623 if (match(BO0, m_VecReverse(m_Value(X)))) { 2624 // rev(binop rev(X), rev(Y)) --> binop X, Y 2625 if (match(BO1, m_VecReverse(m_Value(Y)))) 2626 return replaceInstUsesWith(CI, 2627 BinaryOperator::CreateWithCopiedFlags( 2628 OldBinOp->getOpcode(), X, Y, OldBinOp, 2629 OldBinOp->getName(), II)); 2630 // rev(binop rev(X), BO1Splat) --> binop X, BO1Splat 2631 if (isSplatValue(BO1)) 2632 return replaceInstUsesWith(CI, 2633 BinaryOperator::CreateWithCopiedFlags( 2634 OldBinOp->getOpcode(), X, BO1, 2635 OldBinOp, OldBinOp->getName(), II)); 2636 } 2637 // rev(binop BO0Splat, rev(Y)) --> binop BO0Splat, Y 2638 if (match(BO1, m_VecReverse(m_Value(Y))) && isSplatValue(BO0)) 2639 return replaceInstUsesWith(CI, BinaryOperator::CreateWithCopiedFlags( 2640 OldBinOp->getOpcode(), BO0, Y, 2641 OldBinOp, OldBinOp->getName(), II)); 2642 } 2643 // rev(unop rev(X)) --> unop X 2644 if (match(Vec, m_OneUse(m_UnOp(m_VecReverse(m_Value(X)))))) { 2645 auto *OldUnOp = cast<UnaryOperator>(Vec); 2646 auto *NewUnOp = UnaryOperator::CreateWithCopiedFlags( 2647 OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(), II); 2648 return replaceInstUsesWith(CI, NewUnOp); 2649 } 2650 break; 2651 } 2652 case Intrinsic::vector_reduce_or: 2653 case Intrinsic::vector_reduce_and: { 2654 // Canonicalize logical or/and reductions: 2655 // Or reduction for i1 is represented as: 2656 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 2657 // %res = cmp ne iReduxWidth %val, 0 2658 // And reduction for i1 is represented as: 2659 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 2660 // %res = cmp eq iReduxWidth %val, 11111 2661 Value *Arg = II->getArgOperand(0); 2662 Value *Vect; 2663 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2664 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2665 if (FTy->getElementType() == Builder.getInt1Ty()) { 2666 Value *Res = Builder.CreateBitCast( 2667 Vect, Builder.getIntNTy(FTy->getNumElements())); 2668 if (IID == Intrinsic::vector_reduce_and) { 2669 Res = Builder.CreateICmpEQ( 2670 Res, ConstantInt::getAllOnesValue(Res->getType())); 2671 } else { 2672 assert(IID == Intrinsic::vector_reduce_or && 2673 "Expected or reduction."); 2674 Res = Builder.CreateIsNotNull(Res); 2675 } 2676 if (Arg != Vect) 2677 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 2678 II->getType()); 2679 return replaceInstUsesWith(CI, Res); 2680 } 2681 } 2682 [[fallthrough]]; 2683 } 2684 case Intrinsic::vector_reduce_add: { 2685 if (IID == Intrinsic::vector_reduce_add) { 2686 // Convert vector_reduce_add(ZExt(<n x i1>)) to 2687 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)). 2688 // Convert vector_reduce_add(SExt(<n x i1>)) to 2689 // -ZExtOrTrunc(ctpop(bitcast <n x i1> to in)). 2690 // Convert vector_reduce_add(<n x i1>) to 2691 // Trunc(ctpop(bitcast <n x i1> to in)). 2692 Value *Arg = II->getArgOperand(0); 2693 Value *Vect; 2694 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2695 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2696 if (FTy->getElementType() == Builder.getInt1Ty()) { 2697 Value *V = Builder.CreateBitCast( 2698 Vect, Builder.getIntNTy(FTy->getNumElements())); 2699 Value *Res = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V); 2700 if (Res->getType() != II->getType()) 2701 Res = Builder.CreateZExtOrTrunc(Res, II->getType()); 2702 if (Arg != Vect && 2703 cast<Instruction>(Arg)->getOpcode() == Instruction::SExt) 2704 Res = Builder.CreateNeg(Res); 2705 return replaceInstUsesWith(CI, Res); 2706 } 2707 } 2708 } 2709 [[fallthrough]]; 2710 } 2711 case Intrinsic::vector_reduce_xor: { 2712 if (IID == Intrinsic::vector_reduce_xor) { 2713 // Exclusive disjunction reduction over the vector with 2714 // (potentially-extended) i1 element type is actually a 2715 // (potentially-extended) arithmetic `add` reduction over the original 2716 // non-extended value: 2717 // vector_reduce_xor(?ext(<n x i1>)) 2718 // --> 2719 // ?ext(vector_reduce_add(<n x i1>)) 2720 Value *Arg = II->getArgOperand(0); 2721 Value *Vect; 2722 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2723 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2724 if (FTy->getElementType() == Builder.getInt1Ty()) { 2725 Value *Res = Builder.CreateAddReduce(Vect); 2726 if (Arg != Vect) 2727 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 2728 II->getType()); 2729 return replaceInstUsesWith(CI, Res); 2730 } 2731 } 2732 } 2733 [[fallthrough]]; 2734 } 2735 case Intrinsic::vector_reduce_mul: { 2736 if (IID == Intrinsic::vector_reduce_mul) { 2737 // Multiplicative reduction over the vector with (potentially-extended) 2738 // i1 element type is actually a (potentially zero-extended) 2739 // logical `and` reduction over the original non-extended value: 2740 // vector_reduce_mul(?ext(<n x i1>)) 2741 // --> 2742 // zext(vector_reduce_and(<n x i1>)) 2743 Value *Arg = II->getArgOperand(0); 2744 Value *Vect; 2745 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2746 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2747 if (FTy->getElementType() == Builder.getInt1Ty()) { 2748 Value *Res = Builder.CreateAndReduce(Vect); 2749 if (Res->getType() != II->getType()) 2750 Res = Builder.CreateZExt(Res, II->getType()); 2751 return replaceInstUsesWith(CI, Res); 2752 } 2753 } 2754 } 2755 [[fallthrough]]; 2756 } 2757 case Intrinsic::vector_reduce_umin: 2758 case Intrinsic::vector_reduce_umax: { 2759 if (IID == Intrinsic::vector_reduce_umin || 2760 IID == Intrinsic::vector_reduce_umax) { 2761 // UMin/UMax reduction over the vector with (potentially-extended) 2762 // i1 element type is actually a (potentially-extended) 2763 // logical `and`/`or` reduction over the original non-extended value: 2764 // vector_reduce_u{min,max}(?ext(<n x i1>)) 2765 // --> 2766 // ?ext(vector_reduce_{and,or}(<n x i1>)) 2767 Value *Arg = II->getArgOperand(0); 2768 Value *Vect; 2769 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2770 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2771 if (FTy->getElementType() == Builder.getInt1Ty()) { 2772 Value *Res = IID == Intrinsic::vector_reduce_umin 2773 ? Builder.CreateAndReduce(Vect) 2774 : Builder.CreateOrReduce(Vect); 2775 if (Arg != Vect) 2776 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 2777 II->getType()); 2778 return replaceInstUsesWith(CI, Res); 2779 } 2780 } 2781 } 2782 [[fallthrough]]; 2783 } 2784 case Intrinsic::vector_reduce_smin: 2785 case Intrinsic::vector_reduce_smax: { 2786 if (IID == Intrinsic::vector_reduce_smin || 2787 IID == Intrinsic::vector_reduce_smax) { 2788 // SMin/SMax reduction over the vector with (potentially-extended) 2789 // i1 element type is actually a (potentially-extended) 2790 // logical `and`/`or` reduction over the original non-extended value: 2791 // vector_reduce_s{min,max}(<n x i1>) 2792 // --> 2793 // vector_reduce_{or,and}(<n x i1>) 2794 // and 2795 // vector_reduce_s{min,max}(sext(<n x i1>)) 2796 // --> 2797 // sext(vector_reduce_{or,and}(<n x i1>)) 2798 // and 2799 // vector_reduce_s{min,max}(zext(<n x i1>)) 2800 // --> 2801 // zext(vector_reduce_{and,or}(<n x i1>)) 2802 Value *Arg = II->getArgOperand(0); 2803 Value *Vect; 2804 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2805 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2806 if (FTy->getElementType() == Builder.getInt1Ty()) { 2807 Instruction::CastOps ExtOpc = Instruction::CastOps::CastOpsEnd; 2808 if (Arg != Vect) 2809 ExtOpc = cast<CastInst>(Arg)->getOpcode(); 2810 Value *Res = ((IID == Intrinsic::vector_reduce_smin) == 2811 (ExtOpc == Instruction::CastOps::ZExt)) 2812 ? Builder.CreateAndReduce(Vect) 2813 : Builder.CreateOrReduce(Vect); 2814 if (Arg != Vect) 2815 Res = Builder.CreateCast(ExtOpc, Res, II->getType()); 2816 return replaceInstUsesWith(CI, Res); 2817 } 2818 } 2819 } 2820 [[fallthrough]]; 2821 } 2822 case Intrinsic::vector_reduce_fmax: 2823 case Intrinsic::vector_reduce_fmin: 2824 case Intrinsic::vector_reduce_fadd: 2825 case Intrinsic::vector_reduce_fmul: { 2826 bool CanBeReassociated = (IID != Intrinsic::vector_reduce_fadd && 2827 IID != Intrinsic::vector_reduce_fmul) || 2828 II->hasAllowReassoc(); 2829 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd || 2830 IID == Intrinsic::vector_reduce_fmul) 2831 ? 1 2832 : 0; 2833 Value *Arg = II->getArgOperand(ArgIdx); 2834 Value *V; 2835 ArrayRef<int> Mask; 2836 if (!isa<FixedVectorType>(Arg->getType()) || !CanBeReassociated || 2837 !match(Arg, m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))) || 2838 !cast<ShuffleVectorInst>(Arg)->isSingleSource()) 2839 break; 2840 int Sz = Mask.size(); 2841 SmallBitVector UsedIndices(Sz); 2842 for (int Idx : Mask) { 2843 if (Idx == UndefMaskElem || UsedIndices.test(Idx)) 2844 break; 2845 UsedIndices.set(Idx); 2846 } 2847 // Can remove shuffle iff just shuffled elements, no repeats, undefs, or 2848 // other changes. 2849 if (UsedIndices.all()) { 2850 replaceUse(II->getOperandUse(ArgIdx), V); 2851 return nullptr; 2852 } 2853 break; 2854 } 2855 default: { 2856 // Handle target specific intrinsics 2857 std::optional<Instruction *> V = targetInstCombineIntrinsic(*II); 2858 if (V) 2859 return *V; 2860 break; 2861 } 2862 } 2863 2864 if (Instruction *Shuf = foldShuffledIntrinsicOperands(II, Builder)) 2865 return Shuf; 2866 2867 // Some intrinsics (like experimental_gc_statepoint) can be used in invoke 2868 // context, so it is handled in visitCallBase and we should trigger it. 2869 return visitCallBase(*II); 2870 } 2871 2872 // Fence instruction simplification 2873 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) { 2874 auto *NFI = dyn_cast<FenceInst>(FI.getNextNonDebugInstruction()); 2875 // This check is solely here to handle arbitrary target-dependent syncscopes. 2876 // TODO: Can remove if does not matter in practice. 2877 if (NFI && FI.isIdenticalTo(NFI)) 2878 return eraseInstFromFunction(FI); 2879 2880 // Returns true if FI1 is identical or stronger fence than FI2. 2881 auto isIdenticalOrStrongerFence = [](FenceInst *FI1, FenceInst *FI2) { 2882 auto FI1SyncScope = FI1->getSyncScopeID(); 2883 // Consider same scope, where scope is global or single-thread. 2884 if (FI1SyncScope != FI2->getSyncScopeID() || 2885 (FI1SyncScope != SyncScope::System && 2886 FI1SyncScope != SyncScope::SingleThread)) 2887 return false; 2888 2889 return isAtLeastOrStrongerThan(FI1->getOrdering(), FI2->getOrdering()); 2890 }; 2891 if (NFI && isIdenticalOrStrongerFence(NFI, &FI)) 2892 return eraseInstFromFunction(FI); 2893 2894 if (auto *PFI = dyn_cast_or_null<FenceInst>(FI.getPrevNonDebugInstruction())) 2895 if (isIdenticalOrStrongerFence(PFI, &FI)) 2896 return eraseInstFromFunction(FI); 2897 return nullptr; 2898 } 2899 2900 // InvokeInst simplification 2901 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) { 2902 return visitCallBase(II); 2903 } 2904 2905 // CallBrInst simplification 2906 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) { 2907 return visitCallBase(CBI); 2908 } 2909 2910 /// If this cast does not affect the value passed through the varargs area, we 2911 /// can eliminate the use of the cast. 2912 static bool isSafeToEliminateVarargsCast(const CallBase &Call, 2913 const DataLayout &DL, 2914 const CastInst *const CI, 2915 const int ix) { 2916 if (!CI->isLosslessCast()) 2917 return false; 2918 2919 // If this is a GC intrinsic, avoid munging types. We need types for 2920 // statepoint reconstruction in SelectionDAG. 2921 // TODO: This is probably something which should be expanded to all 2922 // intrinsics since the entire point of intrinsics is that 2923 // they are understandable by the optimizer. 2924 if (isa<GCStatepointInst>(Call) || isa<GCRelocateInst>(Call) || 2925 isa<GCResultInst>(Call)) 2926 return false; 2927 2928 // Opaque pointers are compatible with any byval types. 2929 PointerType *SrcTy = cast<PointerType>(CI->getOperand(0)->getType()); 2930 if (SrcTy->isOpaque()) 2931 return true; 2932 2933 // The size of ByVal or InAlloca arguments is derived from the type, so we 2934 // can't change to a type with a different size. If the size were 2935 // passed explicitly we could avoid this check. 2936 if (!Call.isPassPointeeByValueArgument(ix)) 2937 return true; 2938 2939 // The transform currently only handles type replacement for byval, not other 2940 // type-carrying attributes. 2941 if (!Call.isByValArgument(ix)) 2942 return false; 2943 2944 Type *SrcElemTy = SrcTy->getNonOpaquePointerElementType(); 2945 Type *DstElemTy = Call.getParamByValType(ix); 2946 if (!SrcElemTy->isSized() || !DstElemTy->isSized()) 2947 return false; 2948 if (DL.getTypeAllocSize(SrcElemTy) != DL.getTypeAllocSize(DstElemTy)) 2949 return false; 2950 return true; 2951 } 2952 2953 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) { 2954 if (!CI->getCalledFunction()) return nullptr; 2955 2956 // Skip optimizing notail and musttail calls so 2957 // LibCallSimplifier::optimizeCall doesn't have to preserve those invariants. 2958 // LibCallSimplifier::optimizeCall should try to preseve tail calls though. 2959 if (CI->isMustTailCall() || CI->isNoTailCall()) 2960 return nullptr; 2961 2962 auto InstCombineRAUW = [this](Instruction *From, Value *With) { 2963 replaceInstUsesWith(*From, With); 2964 }; 2965 auto InstCombineErase = [this](Instruction *I) { 2966 eraseInstFromFunction(*I); 2967 }; 2968 LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW, 2969 InstCombineErase); 2970 if (Value *With = Simplifier.optimizeCall(CI, Builder)) { 2971 ++NumSimplified; 2972 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With); 2973 } 2974 2975 return nullptr; 2976 } 2977 2978 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) { 2979 // Strip off at most one level of pointer casts, looking for an alloca. This 2980 // is good enough in practice and simpler than handling any number of casts. 2981 Value *Underlying = TrampMem->stripPointerCasts(); 2982 if (Underlying != TrampMem && 2983 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem)) 2984 return nullptr; 2985 if (!isa<AllocaInst>(Underlying)) 2986 return nullptr; 2987 2988 IntrinsicInst *InitTrampoline = nullptr; 2989 for (User *U : TrampMem->users()) { 2990 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 2991 if (!II) 2992 return nullptr; 2993 if (II->getIntrinsicID() == Intrinsic::init_trampoline) { 2994 if (InitTrampoline) 2995 // More than one init_trampoline writes to this value. Give up. 2996 return nullptr; 2997 InitTrampoline = II; 2998 continue; 2999 } 3000 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline) 3001 // Allow any number of calls to adjust.trampoline. 3002 continue; 3003 return nullptr; 3004 } 3005 3006 // No call to init.trampoline found. 3007 if (!InitTrampoline) 3008 return nullptr; 3009 3010 // Check that the alloca is being used in the expected way. 3011 if (InitTrampoline->getOperand(0) != TrampMem) 3012 return nullptr; 3013 3014 return InitTrampoline; 3015 } 3016 3017 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, 3018 Value *TrampMem) { 3019 // Visit all the previous instructions in the basic block, and try to find a 3020 // init.trampoline which has a direct path to the adjust.trampoline. 3021 for (BasicBlock::iterator I = AdjustTramp->getIterator(), 3022 E = AdjustTramp->getParent()->begin(); 3023 I != E;) { 3024 Instruction *Inst = &*--I; 3025 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 3026 if (II->getIntrinsicID() == Intrinsic::init_trampoline && 3027 II->getOperand(0) == TrampMem) 3028 return II; 3029 if (Inst->mayWriteToMemory()) 3030 return nullptr; 3031 } 3032 return nullptr; 3033 } 3034 3035 // Given a call to llvm.adjust.trampoline, find and return the corresponding 3036 // call to llvm.init.trampoline if the call to the trampoline can be optimized 3037 // to a direct call to a function. Otherwise return NULL. 3038 static IntrinsicInst *findInitTrampoline(Value *Callee) { 3039 Callee = Callee->stripPointerCasts(); 3040 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee); 3041 if (!AdjustTramp || 3042 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline) 3043 return nullptr; 3044 3045 Value *TrampMem = AdjustTramp->getOperand(0); 3046 3047 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem)) 3048 return IT; 3049 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem)) 3050 return IT; 3051 return nullptr; 3052 } 3053 3054 bool InstCombinerImpl::annotateAnyAllocSite(CallBase &Call, 3055 const TargetLibraryInfo *TLI) { 3056 // Note: We only handle cases which can't be driven from generic attributes 3057 // here. So, for example, nonnull and noalias (which are common properties 3058 // of some allocation functions) are expected to be handled via annotation 3059 // of the respective allocator declaration with generic attributes. 3060 bool Changed = false; 3061 3062 if (!Call.getType()->isPointerTy()) 3063 return Changed; 3064 3065 std::optional<APInt> Size = getAllocSize(&Call, TLI); 3066 if (Size && *Size != 0) { 3067 // TODO: We really should just emit deref_or_null here and then 3068 // let the generic inference code combine that with nonnull. 3069 if (Call.hasRetAttr(Attribute::NonNull)) { 3070 Changed = !Call.hasRetAttr(Attribute::Dereferenceable); 3071 Call.addRetAttr(Attribute::getWithDereferenceableBytes( 3072 Call.getContext(), Size->getLimitedValue())); 3073 } else { 3074 Changed = !Call.hasRetAttr(Attribute::DereferenceableOrNull); 3075 Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes( 3076 Call.getContext(), Size->getLimitedValue())); 3077 } 3078 } 3079 3080 // Add alignment attribute if alignment is a power of two constant. 3081 Value *Alignment = getAllocAlignment(&Call, TLI); 3082 if (!Alignment) 3083 return Changed; 3084 3085 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment); 3086 if (AlignOpC && AlignOpC->getValue().ult(llvm::Value::MaximumAlignment)) { 3087 uint64_t AlignmentVal = AlignOpC->getZExtValue(); 3088 if (llvm::isPowerOf2_64(AlignmentVal)) { 3089 Align ExistingAlign = Call.getRetAlign().valueOrOne(); 3090 Align NewAlign = Align(AlignmentVal); 3091 if (NewAlign > ExistingAlign) { 3092 Call.addRetAttr( 3093 Attribute::getWithAlignment(Call.getContext(), NewAlign)); 3094 Changed = true; 3095 } 3096 } 3097 } 3098 return Changed; 3099 } 3100 3101 /// Improvements for call, callbr and invoke instructions. 3102 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) { 3103 bool Changed = annotateAnyAllocSite(Call, &TLI); 3104 3105 // Mark any parameters that are known to be non-null with the nonnull 3106 // attribute. This is helpful for inlining calls to functions with null 3107 // checks on their arguments. 3108 SmallVector<unsigned, 4> ArgNos; 3109 unsigned ArgNo = 0; 3110 3111 for (Value *V : Call.args()) { 3112 if (V->getType()->isPointerTy() && 3113 !Call.paramHasAttr(ArgNo, Attribute::NonNull) && 3114 isKnownNonZero(V, DL, 0, &AC, &Call, &DT)) 3115 ArgNos.push_back(ArgNo); 3116 ArgNo++; 3117 } 3118 3119 assert(ArgNo == Call.arg_size() && "Call arguments not processed correctly."); 3120 3121 if (!ArgNos.empty()) { 3122 AttributeList AS = Call.getAttributes(); 3123 LLVMContext &Ctx = Call.getContext(); 3124 AS = AS.addParamAttribute(Ctx, ArgNos, 3125 Attribute::get(Ctx, Attribute::NonNull)); 3126 Call.setAttributes(AS); 3127 Changed = true; 3128 } 3129 3130 // If the callee is a pointer to a function, attempt to move any casts to the 3131 // arguments of the call/callbr/invoke. 3132 Value *Callee = Call.getCalledOperand(); 3133 Function *CalleeF = dyn_cast<Function>(Callee); 3134 if ((!CalleeF || CalleeF->getFunctionType() != Call.getFunctionType()) && 3135 transformConstExprCastCall(Call)) 3136 return nullptr; 3137 3138 if (CalleeF) { 3139 // Remove the convergent attr on calls when the callee is not convergent. 3140 if (Call.isConvergent() && !CalleeF->isConvergent() && 3141 !CalleeF->isIntrinsic()) { 3142 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call 3143 << "\n"); 3144 Call.setNotConvergent(); 3145 return &Call; 3146 } 3147 3148 // If the call and callee calling conventions don't match, and neither one 3149 // of the calling conventions is compatible with C calling convention 3150 // this call must be unreachable, as the call is undefined. 3151 if ((CalleeF->getCallingConv() != Call.getCallingConv() && 3152 !(CalleeF->getCallingConv() == llvm::CallingConv::C && 3153 TargetLibraryInfoImpl::isCallingConvCCompatible(&Call)) && 3154 !(Call.getCallingConv() == llvm::CallingConv::C && 3155 TargetLibraryInfoImpl::isCallingConvCCompatible(CalleeF))) && 3156 // Only do this for calls to a function with a body. A prototype may 3157 // not actually end up matching the implementation's calling conv for a 3158 // variety of reasons (e.g. it may be written in assembly). 3159 !CalleeF->isDeclaration()) { 3160 Instruction *OldCall = &Call; 3161 CreateNonTerminatorUnreachable(OldCall); 3162 // If OldCall does not return void then replaceInstUsesWith poison. 3163 // This allows ValueHandlers and custom metadata to adjust itself. 3164 if (!OldCall->getType()->isVoidTy()) 3165 replaceInstUsesWith(*OldCall, PoisonValue::get(OldCall->getType())); 3166 if (isa<CallInst>(OldCall)) 3167 return eraseInstFromFunction(*OldCall); 3168 3169 // We cannot remove an invoke or a callbr, because it would change thexi 3170 // CFG, just change the callee to a null pointer. 3171 cast<CallBase>(OldCall)->setCalledFunction( 3172 CalleeF->getFunctionType(), 3173 Constant::getNullValue(CalleeF->getType())); 3174 return nullptr; 3175 } 3176 } 3177 3178 // Calling a null function pointer is undefined if a null address isn't 3179 // dereferenceable. 3180 if ((isa<ConstantPointerNull>(Callee) && 3181 !NullPointerIsDefined(Call.getFunction())) || 3182 isa<UndefValue>(Callee)) { 3183 // If Call does not return void then replaceInstUsesWith poison. 3184 // This allows ValueHandlers and custom metadata to adjust itself. 3185 if (!Call.getType()->isVoidTy()) 3186 replaceInstUsesWith(Call, PoisonValue::get(Call.getType())); 3187 3188 if (Call.isTerminator()) { 3189 // Can't remove an invoke or callbr because we cannot change the CFG. 3190 return nullptr; 3191 } 3192 3193 // This instruction is not reachable, just remove it. 3194 CreateNonTerminatorUnreachable(&Call); 3195 return eraseInstFromFunction(Call); 3196 } 3197 3198 if (IntrinsicInst *II = findInitTrampoline(Callee)) 3199 return transformCallThroughTrampoline(Call, *II); 3200 3201 // TODO: Drop this transform once opaque pointer transition is done. 3202 FunctionType *FTy = Call.getFunctionType(); 3203 if (FTy->isVarArg()) { 3204 int ix = FTy->getNumParams(); 3205 // See if we can optimize any arguments passed through the varargs area of 3206 // the call. 3207 for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end(); 3208 I != E; ++I, ++ix) { 3209 CastInst *CI = dyn_cast<CastInst>(*I); 3210 if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) { 3211 replaceUse(*I, CI->getOperand(0)); 3212 3213 // Update the byval type to match the pointer type. 3214 // Not necessary for opaque pointers. 3215 PointerType *NewTy = cast<PointerType>(CI->getOperand(0)->getType()); 3216 if (!NewTy->isOpaque() && Call.isByValArgument(ix)) { 3217 Call.removeParamAttr(ix, Attribute::ByVal); 3218 Call.addParamAttr(ix, Attribute::getWithByValType( 3219 Call.getContext(), 3220 NewTy->getNonOpaquePointerElementType())); 3221 } 3222 Changed = true; 3223 } 3224 } 3225 } 3226 3227 if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) { 3228 InlineAsm *IA = cast<InlineAsm>(Callee); 3229 if (!IA->canThrow()) { 3230 // Normal inline asm calls cannot throw - mark them 3231 // 'nounwind'. 3232 Call.setDoesNotThrow(); 3233 Changed = true; 3234 } 3235 } 3236 3237 // Try to optimize the call if possible, we require DataLayout for most of 3238 // this. None of these calls are seen as possibly dead so go ahead and 3239 // delete the instruction now. 3240 if (CallInst *CI = dyn_cast<CallInst>(&Call)) { 3241 Instruction *I = tryOptimizeCall(CI); 3242 // If we changed something return the result, etc. Otherwise let 3243 // the fallthrough check. 3244 if (I) return eraseInstFromFunction(*I); 3245 } 3246 3247 if (!Call.use_empty() && !Call.isMustTailCall()) 3248 if (Value *ReturnedArg = Call.getReturnedArgOperand()) { 3249 Type *CallTy = Call.getType(); 3250 Type *RetArgTy = ReturnedArg->getType(); 3251 if (RetArgTy->canLosslesslyBitCastTo(CallTy)) 3252 return replaceInstUsesWith( 3253 Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy)); 3254 } 3255 3256 // Drop unnecessary kcfi operand bundles from calls that were converted 3257 // into direct calls. 3258 auto Bundle = Call.getOperandBundle(LLVMContext::OB_kcfi); 3259 if (Bundle && !Call.isIndirectCall()) { 3260 DEBUG_WITH_TYPE(DEBUG_TYPE "-kcfi", { 3261 if (CalleeF) { 3262 ConstantInt *FunctionType = nullptr; 3263 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]); 3264 3265 if (MDNode *MD = CalleeF->getMetadata(LLVMContext::MD_kcfi_type)) 3266 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0)); 3267 3268 if (FunctionType && 3269 FunctionType->getZExtValue() != ExpectedType->getZExtValue()) 3270 dbgs() << Call.getModule()->getName() 3271 << ": warning: kcfi: " << Call.getCaller()->getName() 3272 << ": call to " << CalleeF->getName() 3273 << " using a mismatching function pointer type\n"; 3274 } 3275 }); 3276 3277 return CallBase::removeOperandBundle(&Call, LLVMContext::OB_kcfi); 3278 } 3279 3280 if (isRemovableAlloc(&Call, &TLI)) 3281 return visitAllocSite(Call); 3282 3283 // Handle intrinsics which can be used in both call and invoke context. 3284 switch (Call.getIntrinsicID()) { 3285 case Intrinsic::experimental_gc_statepoint: { 3286 GCStatepointInst &GCSP = *cast<GCStatepointInst>(&Call); 3287 SmallPtrSet<Value *, 32> LiveGcValues; 3288 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 3289 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 3290 3291 // Remove the relocation if unused. 3292 if (GCR.use_empty()) { 3293 eraseInstFromFunction(GCR); 3294 continue; 3295 } 3296 3297 Value *DerivedPtr = GCR.getDerivedPtr(); 3298 Value *BasePtr = GCR.getBasePtr(); 3299 3300 // Undef is undef, even after relocation. 3301 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) { 3302 replaceInstUsesWith(GCR, UndefValue::get(GCR.getType())); 3303 eraseInstFromFunction(GCR); 3304 continue; 3305 } 3306 3307 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) { 3308 // The relocation of null will be null for most any collector. 3309 // TODO: provide a hook for this in GCStrategy. There might be some 3310 // weird collector this property does not hold for. 3311 if (isa<ConstantPointerNull>(DerivedPtr)) { 3312 // Use null-pointer of gc_relocate's type to replace it. 3313 replaceInstUsesWith(GCR, ConstantPointerNull::get(PT)); 3314 eraseInstFromFunction(GCR); 3315 continue; 3316 } 3317 3318 // isKnownNonNull -> nonnull attribute 3319 if (!GCR.hasRetAttr(Attribute::NonNull) && 3320 isKnownNonZero(DerivedPtr, DL, 0, &AC, &Call, &DT)) { 3321 GCR.addRetAttr(Attribute::NonNull); 3322 // We discovered new fact, re-check users. 3323 Worklist.pushUsersToWorkList(GCR); 3324 } 3325 } 3326 3327 // If we have two copies of the same pointer in the statepoint argument 3328 // list, canonicalize to one. This may let us common gc.relocates. 3329 if (GCR.getBasePtr() == GCR.getDerivedPtr() && 3330 GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) { 3331 auto *OpIntTy = GCR.getOperand(2)->getType(); 3332 GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex())); 3333 } 3334 3335 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p)) 3336 // Canonicalize on the type from the uses to the defs 3337 3338 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...) 3339 LiveGcValues.insert(BasePtr); 3340 LiveGcValues.insert(DerivedPtr); 3341 } 3342 std::optional<OperandBundleUse> Bundle = 3343 GCSP.getOperandBundle(LLVMContext::OB_gc_live); 3344 unsigned NumOfGCLives = LiveGcValues.size(); 3345 if (!Bundle || NumOfGCLives == Bundle->Inputs.size()) 3346 break; 3347 // We can reduce the size of gc live bundle. 3348 DenseMap<Value *, unsigned> Val2Idx; 3349 std::vector<Value *> NewLiveGc; 3350 for (Value *V : Bundle->Inputs) { 3351 if (Val2Idx.count(V)) 3352 continue; 3353 if (LiveGcValues.count(V)) { 3354 Val2Idx[V] = NewLiveGc.size(); 3355 NewLiveGc.push_back(V); 3356 } else 3357 Val2Idx[V] = NumOfGCLives; 3358 } 3359 // Update all gc.relocates 3360 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 3361 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 3362 Value *BasePtr = GCR.getBasePtr(); 3363 assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives && 3364 "Missed live gc for base pointer"); 3365 auto *OpIntTy1 = GCR.getOperand(1)->getType(); 3366 GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr])); 3367 Value *DerivedPtr = GCR.getDerivedPtr(); 3368 assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives && 3369 "Missed live gc for derived pointer"); 3370 auto *OpIntTy2 = GCR.getOperand(2)->getType(); 3371 GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr])); 3372 } 3373 // Create new statepoint instruction. 3374 OperandBundleDef NewBundle("gc-live", NewLiveGc); 3375 return CallBase::Create(&Call, NewBundle); 3376 } 3377 default: { break; } 3378 } 3379 3380 return Changed ? &Call : nullptr; 3381 } 3382 3383 /// If the callee is a constexpr cast of a function, attempt to move the cast to 3384 /// the arguments of the call/callbr/invoke. 3385 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) { 3386 auto *Callee = 3387 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts()); 3388 if (!Callee) 3389 return false; 3390 3391 // If this is a call to a thunk function, don't remove the cast. Thunks are 3392 // used to transparently forward all incoming parameters and outgoing return 3393 // values, so it's important to leave the cast in place. 3394 if (Callee->hasFnAttribute("thunk")) 3395 return false; 3396 3397 // If this is a musttail call, the callee's prototype must match the caller's 3398 // prototype with the exception of pointee types. The code below doesn't 3399 // implement that, so we can't do this transform. 3400 // TODO: Do the transform if it only requires adding pointer casts. 3401 if (Call.isMustTailCall()) 3402 return false; 3403 3404 Instruction *Caller = &Call; 3405 const AttributeList &CallerPAL = Call.getAttributes(); 3406 3407 // Okay, this is a cast from a function to a different type. Unless doing so 3408 // would cause a type conversion of one of our arguments, change this call to 3409 // be a direct call with arguments casted to the appropriate types. 3410 FunctionType *FT = Callee->getFunctionType(); 3411 Type *OldRetTy = Caller->getType(); 3412 Type *NewRetTy = FT->getReturnType(); 3413 3414 // Check to see if we are changing the return type... 3415 if (OldRetTy != NewRetTy) { 3416 3417 if (NewRetTy->isStructTy()) 3418 return false; // TODO: Handle multiple return values. 3419 3420 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) { 3421 if (Callee->isDeclaration()) 3422 return false; // Cannot transform this return value. 3423 3424 if (!Caller->use_empty() && 3425 // void -> non-void is handled specially 3426 !NewRetTy->isVoidTy()) 3427 return false; // Cannot transform this return value. 3428 } 3429 3430 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 3431 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs()); 3432 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy))) 3433 return false; // Attribute not compatible with transformed value. 3434 } 3435 3436 // If the callbase is an invoke/callbr instruction, and the return value is 3437 // used by a PHI node in a successor, we cannot change the return type of 3438 // the call because there is no place to put the cast instruction (without 3439 // breaking the critical edge). Bail out in this case. 3440 if (!Caller->use_empty()) { 3441 BasicBlock *PhisNotSupportedBlock = nullptr; 3442 if (auto *II = dyn_cast<InvokeInst>(Caller)) 3443 PhisNotSupportedBlock = II->getNormalDest(); 3444 if (auto *CB = dyn_cast<CallBrInst>(Caller)) 3445 PhisNotSupportedBlock = CB->getDefaultDest(); 3446 if (PhisNotSupportedBlock) 3447 for (User *U : Caller->users()) 3448 if (PHINode *PN = dyn_cast<PHINode>(U)) 3449 if (PN->getParent() == PhisNotSupportedBlock) 3450 return false; 3451 } 3452 } 3453 3454 unsigned NumActualArgs = Call.arg_size(); 3455 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 3456 3457 // Prevent us turning: 3458 // declare void @takes_i32_inalloca(i32* inalloca) 3459 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0) 3460 // 3461 // into: 3462 // call void @takes_i32_inalloca(i32* null) 3463 // 3464 // Similarly, avoid folding away bitcasts of byval calls. 3465 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) || 3466 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated)) 3467 return false; 3468 3469 auto AI = Call.arg_begin(); 3470 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 3471 Type *ParamTy = FT->getParamType(i); 3472 Type *ActTy = (*AI)->getType(); 3473 3474 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL)) 3475 return false; // Cannot transform this parameter value. 3476 3477 // Check if there are any incompatible attributes we cannot drop safely. 3478 if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i)) 3479 .overlaps(AttributeFuncs::typeIncompatible( 3480 ParamTy, AttributeFuncs::ASK_UNSAFE_TO_DROP))) 3481 return false; // Attribute not compatible with transformed value. 3482 3483 if (Call.isInAllocaArgument(i) || 3484 CallerPAL.hasParamAttr(i, Attribute::Preallocated)) 3485 return false; // Cannot transform to and from inalloca/preallocated. 3486 3487 if (CallerPAL.hasParamAttr(i, Attribute::SwiftError)) 3488 return false; 3489 3490 if (CallerPAL.hasParamAttr(i, Attribute::ByVal) != 3491 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal)) 3492 return false; // Cannot transform to or from byval. 3493 3494 // If the parameter is passed as a byval argument, then we have to have a 3495 // sized type and the sized type has to have the same size as the old type. 3496 if (ParamTy != ActTy && CallerPAL.hasParamAttr(i, Attribute::ByVal)) { 3497 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy); 3498 if (!ParamPTy) 3499 return false; 3500 3501 if (!ParamPTy->isOpaque()) { 3502 Type *ParamElTy = ParamPTy->getNonOpaquePointerElementType(); 3503 if (!ParamElTy->isSized()) 3504 return false; 3505 3506 Type *CurElTy = Call.getParamByValType(i); 3507 if (DL.getTypeAllocSize(CurElTy) != DL.getTypeAllocSize(ParamElTy)) 3508 return false; 3509 } 3510 } 3511 } 3512 3513 if (Callee->isDeclaration()) { 3514 // Do not delete arguments unless we have a function body. 3515 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg()) 3516 return false; 3517 3518 // If the callee is just a declaration, don't change the varargsness of the 3519 // call. We don't want to introduce a varargs call where one doesn't 3520 // already exist. 3521 if (FT->isVarArg() != Call.getFunctionType()->isVarArg()) 3522 return false; 3523 3524 // If both the callee and the cast type are varargs, we still have to make 3525 // sure the number of fixed parameters are the same or we have the same 3526 // ABI issues as if we introduce a varargs call. 3527 if (FT->isVarArg() && Call.getFunctionType()->isVarArg() && 3528 FT->getNumParams() != Call.getFunctionType()->getNumParams()) 3529 return false; 3530 } 3531 3532 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 3533 !CallerPAL.isEmpty()) { 3534 // In this case we have more arguments than the new function type, but we 3535 // won't be dropping them. Check that these extra arguments have attributes 3536 // that are compatible with being a vararg call argument. 3537 unsigned SRetIdx; 3538 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) && 3539 SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams()) 3540 return false; 3541 } 3542 3543 // Okay, we decided that this is a safe thing to do: go ahead and start 3544 // inserting cast instructions as necessary. 3545 SmallVector<Value *, 8> Args; 3546 SmallVector<AttributeSet, 8> ArgAttrs; 3547 Args.reserve(NumActualArgs); 3548 ArgAttrs.reserve(NumActualArgs); 3549 3550 // Get any return attributes. 3551 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs()); 3552 3553 // If the return value is not being used, the type may not be compatible 3554 // with the existing attributes. Wipe out any problematic attributes. 3555 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy)); 3556 3557 LLVMContext &Ctx = Call.getContext(); 3558 AI = Call.arg_begin(); 3559 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 3560 Type *ParamTy = FT->getParamType(i); 3561 3562 Value *NewArg = *AI; 3563 if ((*AI)->getType() != ParamTy) 3564 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy); 3565 Args.push_back(NewArg); 3566 3567 // Add any parameter attributes except the ones incompatible with the new 3568 // type. Note that we made sure all incompatible ones are safe to drop. 3569 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible( 3570 ParamTy, AttributeFuncs::ASK_SAFE_TO_DROP); 3571 if (CallerPAL.hasParamAttr(i, Attribute::ByVal) && 3572 !ParamTy->isOpaquePointerTy()) { 3573 AttrBuilder AB(Ctx, CallerPAL.getParamAttrs(i).removeAttributes( 3574 Ctx, IncompatibleAttrs)); 3575 AB.addByValAttr(ParamTy->getNonOpaquePointerElementType()); 3576 ArgAttrs.push_back(AttributeSet::get(Ctx, AB)); 3577 } else { 3578 ArgAttrs.push_back( 3579 CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs)); 3580 } 3581 } 3582 3583 // If the function takes more arguments than the call was taking, add them 3584 // now. 3585 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) { 3586 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 3587 ArgAttrs.push_back(AttributeSet()); 3588 } 3589 3590 // If we are removing arguments to the function, emit an obnoxious warning. 3591 if (FT->getNumParams() < NumActualArgs) { 3592 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722 3593 if (FT->isVarArg()) { 3594 // Add all of the arguments in their promoted form to the arg list. 3595 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 3596 Type *PTy = getPromotedType((*AI)->getType()); 3597 Value *NewArg = *AI; 3598 if (PTy != (*AI)->getType()) { 3599 // Must promote to pass through va_arg area! 3600 Instruction::CastOps opcode = 3601 CastInst::getCastOpcode(*AI, false, PTy, false); 3602 NewArg = Builder.CreateCast(opcode, *AI, PTy); 3603 } 3604 Args.push_back(NewArg); 3605 3606 // Add any parameter attributes. 3607 ArgAttrs.push_back(CallerPAL.getParamAttrs(i)); 3608 } 3609 } 3610 } 3611 3612 AttributeSet FnAttrs = CallerPAL.getFnAttrs(); 3613 3614 if (NewRetTy->isVoidTy()) 3615 Caller->setName(""); // Void type should not have a name. 3616 3617 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) && 3618 "missing argument attributes"); 3619 AttributeList NewCallerPAL = AttributeList::get( 3620 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs); 3621 3622 SmallVector<OperandBundleDef, 1> OpBundles; 3623 Call.getOperandBundlesAsDefs(OpBundles); 3624 3625 CallBase *NewCall; 3626 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 3627 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(), 3628 II->getUnwindDest(), Args, OpBundles); 3629 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) { 3630 NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(), 3631 CBI->getIndirectDests(), Args, OpBundles); 3632 } else { 3633 NewCall = Builder.CreateCall(Callee, Args, OpBundles); 3634 cast<CallInst>(NewCall)->setTailCallKind( 3635 cast<CallInst>(Caller)->getTailCallKind()); 3636 } 3637 NewCall->takeName(Caller); 3638 NewCall->setCallingConv(Call.getCallingConv()); 3639 NewCall->setAttributes(NewCallerPAL); 3640 3641 // Preserve prof metadata if any. 3642 NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof}); 3643 3644 // Insert a cast of the return type as necessary. 3645 Instruction *NC = NewCall; 3646 Value *NV = NC; 3647 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 3648 if (!NV->getType()->isVoidTy()) { 3649 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy); 3650 NC->setDebugLoc(Caller->getDebugLoc()); 3651 3652 Instruction *InsertPt = NewCall->getInsertionPointAfterDef(); 3653 assert(InsertPt && "No place to insert cast"); 3654 InsertNewInstBefore(NC, *InsertPt); 3655 Worklist.pushUsersToWorkList(*Caller); 3656 } else { 3657 NV = PoisonValue::get(Caller->getType()); 3658 } 3659 } 3660 3661 if (!Caller->use_empty()) 3662 replaceInstUsesWith(*Caller, NV); 3663 else if (Caller->hasValueHandle()) { 3664 if (OldRetTy == NV->getType()) 3665 ValueHandleBase::ValueIsRAUWd(Caller, NV); 3666 else 3667 // We cannot call ValueIsRAUWd with a different type, and the 3668 // actual tracked value will disappear. 3669 ValueHandleBase::ValueIsDeleted(Caller); 3670 } 3671 3672 eraseInstFromFunction(*Caller); 3673 return true; 3674 } 3675 3676 /// Turn a call to a function created by init_trampoline / adjust_trampoline 3677 /// intrinsic pair into a direct call to the underlying function. 3678 Instruction * 3679 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call, 3680 IntrinsicInst &Tramp) { 3681 Value *Callee = Call.getCalledOperand(); 3682 Type *CalleeTy = Callee->getType(); 3683 FunctionType *FTy = Call.getFunctionType(); 3684 AttributeList Attrs = Call.getAttributes(); 3685 3686 // If the call already has the 'nest' attribute somewhere then give up - 3687 // otherwise 'nest' would occur twice after splicing in the chain. 3688 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 3689 return nullptr; 3690 3691 Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts()); 3692 FunctionType *NestFTy = NestF->getFunctionType(); 3693 3694 AttributeList NestAttrs = NestF->getAttributes(); 3695 if (!NestAttrs.isEmpty()) { 3696 unsigned NestArgNo = 0; 3697 Type *NestTy = nullptr; 3698 AttributeSet NestAttr; 3699 3700 // Look for a parameter marked with the 'nest' attribute. 3701 for (FunctionType::param_iterator I = NestFTy->param_begin(), 3702 E = NestFTy->param_end(); 3703 I != E; ++NestArgNo, ++I) { 3704 AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo); 3705 if (AS.hasAttribute(Attribute::Nest)) { 3706 // Record the parameter type and any other attributes. 3707 NestTy = *I; 3708 NestAttr = AS; 3709 break; 3710 } 3711 } 3712 3713 if (NestTy) { 3714 std::vector<Value*> NewArgs; 3715 std::vector<AttributeSet> NewArgAttrs; 3716 NewArgs.reserve(Call.arg_size() + 1); 3717 NewArgAttrs.reserve(Call.arg_size()); 3718 3719 // Insert the nest argument into the call argument list, which may 3720 // mean appending it. Likewise for attributes. 3721 3722 { 3723 unsigned ArgNo = 0; 3724 auto I = Call.arg_begin(), E = Call.arg_end(); 3725 do { 3726 if (ArgNo == NestArgNo) { 3727 // Add the chain argument and attributes. 3728 Value *NestVal = Tramp.getArgOperand(2); 3729 if (NestVal->getType() != NestTy) 3730 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest"); 3731 NewArgs.push_back(NestVal); 3732 NewArgAttrs.push_back(NestAttr); 3733 } 3734 3735 if (I == E) 3736 break; 3737 3738 // Add the original argument and attributes. 3739 NewArgs.push_back(*I); 3740 NewArgAttrs.push_back(Attrs.getParamAttrs(ArgNo)); 3741 3742 ++ArgNo; 3743 ++I; 3744 } while (true); 3745 } 3746 3747 // The trampoline may have been bitcast to a bogus type (FTy). 3748 // Handle this by synthesizing a new function type, equal to FTy 3749 // with the chain parameter inserted. 3750 3751 std::vector<Type*> NewTypes; 3752 NewTypes.reserve(FTy->getNumParams()+1); 3753 3754 // Insert the chain's type into the list of parameter types, which may 3755 // mean appending it. 3756 { 3757 unsigned ArgNo = 0; 3758 FunctionType::param_iterator I = FTy->param_begin(), 3759 E = FTy->param_end(); 3760 3761 do { 3762 if (ArgNo == NestArgNo) 3763 // Add the chain's type. 3764 NewTypes.push_back(NestTy); 3765 3766 if (I == E) 3767 break; 3768 3769 // Add the original type. 3770 NewTypes.push_back(*I); 3771 3772 ++ArgNo; 3773 ++I; 3774 } while (true); 3775 } 3776 3777 // Replace the trampoline call with a direct call. Let the generic 3778 // code sort out any function type mismatches. 3779 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 3780 FTy->isVarArg()); 3781 Constant *NewCallee = 3782 NestF->getType() == PointerType::getUnqual(NewFTy) ? 3783 NestF : ConstantExpr::getBitCast(NestF, 3784 PointerType::getUnqual(NewFTy)); 3785 AttributeList NewPAL = 3786 AttributeList::get(FTy->getContext(), Attrs.getFnAttrs(), 3787 Attrs.getRetAttrs(), NewArgAttrs); 3788 3789 SmallVector<OperandBundleDef, 1> OpBundles; 3790 Call.getOperandBundlesAsDefs(OpBundles); 3791 3792 Instruction *NewCaller; 3793 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) { 3794 NewCaller = InvokeInst::Create(NewFTy, NewCallee, 3795 II->getNormalDest(), II->getUnwindDest(), 3796 NewArgs, OpBundles); 3797 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 3798 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 3799 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) { 3800 NewCaller = 3801 CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(), 3802 CBI->getIndirectDests(), NewArgs, OpBundles); 3803 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv()); 3804 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL); 3805 } else { 3806 NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles); 3807 cast<CallInst>(NewCaller)->setTailCallKind( 3808 cast<CallInst>(Call).getTailCallKind()); 3809 cast<CallInst>(NewCaller)->setCallingConv( 3810 cast<CallInst>(Call).getCallingConv()); 3811 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 3812 } 3813 NewCaller->setDebugLoc(Call.getDebugLoc()); 3814 3815 return NewCaller; 3816 } 3817 } 3818 3819 // Replace the trampoline call with a direct call. Since there is no 'nest' 3820 // parameter, there is no need to adjust the argument list. Let the generic 3821 // code sort out any function type mismatches. 3822 Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy); 3823 Call.setCalledFunction(FTy, NewCallee); 3824 return &Call; 3825 } 3826