1 //===- InstCombineCalls.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visitCall, visitInvoke, and visitCallBr functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/APFloat.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/APSInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/STLFunctionalExtras.h" 19 #include "llvm/ADT/SmallBitVector.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumeBundleQueries.h" 24 #include "llvm/Analysis/AssumptionCache.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/Loads.h" 27 #include "llvm/Analysis/MemoryBuiltins.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/Analysis/VectorUtils.h" 30 #include "llvm/IR/AttributeMask.h" 31 #include "llvm/IR/Attributes.h" 32 #include "llvm/IR/BasicBlock.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/DebugInfo.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/GlobalVariable.h" 40 #include "llvm/IR/InlineAsm.h" 41 #include "llvm/IR/InstrTypes.h" 42 #include "llvm/IR/Instruction.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/Intrinsics.h" 46 #include "llvm/IR/IntrinsicsAArch64.h" 47 #include "llvm/IR/IntrinsicsAMDGPU.h" 48 #include "llvm/IR/IntrinsicsARM.h" 49 #include "llvm/IR/IntrinsicsHexagon.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/PatternMatch.h" 53 #include "llvm/IR/Statepoint.h" 54 #include "llvm/IR/Type.h" 55 #include "llvm/IR/User.h" 56 #include "llvm/IR/Value.h" 57 #include "llvm/IR/ValueHandle.h" 58 #include "llvm/Support/AtomicOrdering.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Compiler.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/ErrorHandling.h" 64 #include "llvm/Support/KnownBits.h" 65 #include "llvm/Support/MathExtras.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Transforms/InstCombine/InstCombiner.h" 68 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 69 #include "llvm/Transforms/Utils/Local.h" 70 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 71 #include <algorithm> 72 #include <cassert> 73 #include <cstdint> 74 #include <optional> 75 #include <utility> 76 #include <vector> 77 78 #define DEBUG_TYPE "instcombine" 79 #include "llvm/Transforms/Utils/InstructionWorklist.h" 80 81 using namespace llvm; 82 using namespace PatternMatch; 83 84 STATISTIC(NumSimplified, "Number of library calls simplified"); 85 86 static cl::opt<unsigned> GuardWideningWindow( 87 "instcombine-guard-widening-window", 88 cl::init(3), 89 cl::desc("How wide an instruction window to bypass looking for " 90 "another guard")); 91 92 /// Return the specified type promoted as it would be to pass though a va_arg 93 /// area. 94 static Type *getPromotedType(Type *Ty) { 95 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 96 if (ITy->getBitWidth() < 32) 97 return Type::getInt32Ty(Ty->getContext()); 98 } 99 return Ty; 100 } 101 102 /// Recognize a memcpy/memmove from a trivially otherwise unused alloca. 103 /// TODO: This should probably be integrated with visitAllocSites, but that 104 /// requires a deeper change to allow either unread or unwritten objects. 105 static bool hasUndefSource(AnyMemTransferInst *MI) { 106 auto *Src = MI->getRawSource(); 107 while (isa<GetElementPtrInst>(Src) || isa<BitCastInst>(Src)) { 108 if (!Src->hasOneUse()) 109 return false; 110 Src = cast<Instruction>(Src)->getOperand(0); 111 } 112 return isa<AllocaInst>(Src) && Src->hasOneUse(); 113 } 114 115 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) { 116 Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT); 117 MaybeAlign CopyDstAlign = MI->getDestAlign(); 118 if (!CopyDstAlign || *CopyDstAlign < DstAlign) { 119 MI->setDestAlignment(DstAlign); 120 return MI; 121 } 122 123 Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT); 124 MaybeAlign CopySrcAlign = MI->getSourceAlign(); 125 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) { 126 MI->setSourceAlignment(SrcAlign); 127 return MI; 128 } 129 130 // If we have a store to a location which is known constant, we can conclude 131 // that the store must be storing the constant value (else the memory 132 // wouldn't be constant), and this must be a noop. 133 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) { 134 // Set the size of the copy to 0, it will be deleted on the next iteration. 135 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 136 return MI; 137 } 138 139 // If the source is provably undef, the memcpy/memmove doesn't do anything 140 // (unless the transfer is volatile). 141 if (hasUndefSource(MI) && !MI->isVolatile()) { 142 // Set the size of the copy to 0, it will be deleted on the next iteration. 143 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 144 return MI; 145 } 146 147 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 148 // load/store. 149 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength()); 150 if (!MemOpLength) return nullptr; 151 152 // Source and destination pointer types are always "i8*" for intrinsic. See 153 // if the size is something we can handle with a single primitive load/store. 154 // A single load+store correctly handles overlapping memory in the memmove 155 // case. 156 uint64_t Size = MemOpLength->getLimitedValue(); 157 assert(Size && "0-sized memory transferring should be removed already."); 158 159 if (Size > 8 || (Size&(Size-1))) 160 return nullptr; // If not 1/2/4/8 bytes, exit. 161 162 // If it is an atomic and alignment is less than the size then we will 163 // introduce the unaligned memory access which will be later transformed 164 // into libcall in CodeGen. This is not evident performance gain so disable 165 // it now. 166 if (isa<AtomicMemTransferInst>(MI)) 167 if (*CopyDstAlign < Size || *CopySrcAlign < Size) 168 return nullptr; 169 170 // Use an integer load+store unless we can find something better. 171 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 172 173 // If the memcpy has metadata describing the members, see if we can get the 174 // TBAA tag describing our copy. 175 AAMDNodes AACopyMD = MI->getAAMetadata(); 176 177 if (MDNode *M = AACopyMD.TBAAStruct) { 178 AACopyMD.TBAAStruct = nullptr; 179 if (M->getNumOperands() == 3 && M->getOperand(0) && 180 mdconst::hasa<ConstantInt>(M->getOperand(0)) && 181 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() && 182 M->getOperand(1) && 183 mdconst::hasa<ConstantInt>(M->getOperand(1)) && 184 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() == 185 Size && 186 M->getOperand(2) && isa<MDNode>(M->getOperand(2))) 187 AACopyMD.TBAA = cast<MDNode>(M->getOperand(2)); 188 } 189 190 Value *Src = MI->getArgOperand(1); 191 Value *Dest = MI->getArgOperand(0); 192 LoadInst *L = Builder.CreateLoad(IntType, Src); 193 // Alignment from the mem intrinsic will be better, so use it. 194 L->setAlignment(*CopySrcAlign); 195 L->setAAMetadata(AACopyMD); 196 MDNode *LoopMemParallelMD = 197 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 198 if (LoopMemParallelMD) 199 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 200 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group); 201 if (AccessGroupMD) 202 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 203 204 StoreInst *S = Builder.CreateStore(L, Dest); 205 // Alignment from the mem intrinsic will be better, so use it. 206 S->setAlignment(*CopyDstAlign); 207 S->setAAMetadata(AACopyMD); 208 if (LoopMemParallelMD) 209 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 210 if (AccessGroupMD) 211 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 212 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID); 213 214 if (auto *MT = dyn_cast<MemTransferInst>(MI)) { 215 // non-atomics can be volatile 216 L->setVolatile(MT->isVolatile()); 217 S->setVolatile(MT->isVolatile()); 218 } 219 if (isa<AtomicMemTransferInst>(MI)) { 220 // atomics have to be unordered 221 L->setOrdering(AtomicOrdering::Unordered); 222 S->setOrdering(AtomicOrdering::Unordered); 223 } 224 225 // Set the size of the copy to 0, it will be deleted on the next iteration. 226 MI->setLength(Constant::getNullValue(MemOpLength->getType())); 227 return MI; 228 } 229 230 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) { 231 const Align KnownAlignment = 232 getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT); 233 MaybeAlign MemSetAlign = MI->getDestAlign(); 234 if (!MemSetAlign || *MemSetAlign < KnownAlignment) { 235 MI->setDestAlignment(KnownAlignment); 236 return MI; 237 } 238 239 // If we have a store to a location which is known constant, we can conclude 240 // that the store must be storing the constant value (else the memory 241 // wouldn't be constant), and this must be a noop. 242 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) { 243 // Set the size of the copy to 0, it will be deleted on the next iteration. 244 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 245 return MI; 246 } 247 248 // Remove memset with an undef value. 249 // FIXME: This is technically incorrect because it might overwrite a poison 250 // value. Change to PoisonValue once #52930 is resolved. 251 if (isa<UndefValue>(MI->getValue())) { 252 // Set the size of the copy to 0, it will be deleted on the next iteration. 253 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 254 return MI; 255 } 256 257 // Extract the length and alignment and fill if they are constant. 258 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 259 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 260 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 261 return nullptr; 262 const uint64_t Len = LenC->getLimitedValue(); 263 assert(Len && "0-sized memory setting should be removed already."); 264 const Align Alignment = MI->getDestAlign().valueOrOne(); 265 266 // If it is an atomic and alignment is less than the size then we will 267 // introduce the unaligned memory access which will be later transformed 268 // into libcall in CodeGen. This is not evident performance gain so disable 269 // it now. 270 if (isa<AtomicMemSetInst>(MI)) 271 if (Alignment < Len) 272 return nullptr; 273 274 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 275 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 276 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 277 278 Value *Dest = MI->getDest(); 279 280 // Extract the fill value and store. 281 const uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 282 Constant *FillVal = ConstantInt::get(ITy, Fill); 283 StoreInst *S = Builder.CreateStore(FillVal, Dest, MI->isVolatile()); 284 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID); 285 for (auto *DAI : at::getAssignmentMarkers(S)) { 286 if (llvm::is_contained(DAI->location_ops(), FillC)) 287 DAI->replaceVariableLocationOp(FillC, FillVal); 288 } 289 290 S->setAlignment(Alignment); 291 if (isa<AtomicMemSetInst>(MI)) 292 S->setOrdering(AtomicOrdering::Unordered); 293 294 // Set the size of the copy to 0, it will be deleted on the next iteration. 295 MI->setLength(Constant::getNullValue(LenC->getType())); 296 return MI; 297 } 298 299 return nullptr; 300 } 301 302 // TODO, Obvious Missing Transforms: 303 // * Narrow width by halfs excluding zero/undef lanes 304 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) { 305 Value *LoadPtr = II.getArgOperand(0); 306 const Align Alignment = 307 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 308 309 // If the mask is all ones or undefs, this is a plain vector load of the 1st 310 // argument. 311 if (maskIsAllOneOrUndef(II.getArgOperand(2))) { 312 LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 313 "unmaskedload"); 314 L->copyMetadata(II); 315 return L; 316 } 317 318 // If we can unconditionally load from this address, replace with a 319 // load/select idiom. TODO: use DT for context sensitive query 320 if (isDereferenceablePointer(LoadPtr, II.getType(), 321 II.getModule()->getDataLayout(), &II, &AC)) { 322 LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 323 "unmaskedload"); 324 LI->copyMetadata(II); 325 return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3)); 326 } 327 328 return nullptr; 329 } 330 331 // TODO, Obvious Missing Transforms: 332 // * Single constant active lane -> store 333 // * Narrow width by halfs excluding zero/undef lanes 334 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) { 335 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 336 if (!ConstMask) 337 return nullptr; 338 339 // If the mask is all zeros, this instruction does nothing. 340 if (ConstMask->isNullValue()) 341 return eraseInstFromFunction(II); 342 343 // If the mask is all ones, this is a plain vector store of the 1st argument. 344 if (ConstMask->isAllOnesValue()) { 345 Value *StorePtr = II.getArgOperand(1); 346 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 347 StoreInst *S = 348 new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); 349 S->copyMetadata(II); 350 return S; 351 } 352 353 if (isa<ScalableVectorType>(ConstMask->getType())) 354 return nullptr; 355 356 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 357 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 358 APInt PoisonElts(DemandedElts.getBitWidth(), 0); 359 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, 360 PoisonElts)) 361 return replaceOperand(II, 0, V); 362 363 return nullptr; 364 } 365 366 // TODO, Obvious Missing Transforms: 367 // * Single constant active lane load -> load 368 // * Dereferenceable address & few lanes -> scalarize speculative load/selects 369 // * Adjacent vector addresses -> masked.load 370 // * Narrow width by halfs excluding zero/undef lanes 371 // * Vector incrementing address -> vector masked load 372 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) { 373 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2)); 374 if (!ConstMask) 375 return nullptr; 376 377 // Vector splat address w/known mask -> scalar load 378 // Fold the gather to load the source vector first lane 379 // because it is reloading the same value each time 380 if (ConstMask->isAllOnesValue()) 381 if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) { 382 auto *VecTy = cast<VectorType>(II.getType()); 383 const Align Alignment = 384 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 385 LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr, 386 Alignment, "load.scalar"); 387 Value *Shuf = 388 Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast"); 389 return replaceInstUsesWith(II, cast<Instruction>(Shuf)); 390 } 391 392 return nullptr; 393 } 394 395 // TODO, Obvious Missing Transforms: 396 // * Single constant active lane -> store 397 // * Adjacent vector addresses -> masked.store 398 // * Narrow store width by halfs excluding zero/undef lanes 399 // * Vector incrementing address -> vector masked store 400 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) { 401 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 402 if (!ConstMask) 403 return nullptr; 404 405 // If the mask is all zeros, a scatter does nothing. 406 if (ConstMask->isNullValue()) 407 return eraseInstFromFunction(II); 408 409 // Vector splat address -> scalar store 410 if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) { 411 // scatter(splat(value), splat(ptr), non-zero-mask) -> store value, ptr 412 if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) { 413 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 414 StoreInst *S = 415 new StoreInst(SplatValue, SplatPtr, /*IsVolatile=*/false, Alignment); 416 S->copyMetadata(II); 417 return S; 418 } 419 // scatter(vector, splat(ptr), splat(true)) -> store extract(vector, 420 // lastlane), ptr 421 if (ConstMask->isAllOnesValue()) { 422 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 423 VectorType *WideLoadTy = cast<VectorType>(II.getArgOperand(1)->getType()); 424 ElementCount VF = WideLoadTy->getElementCount(); 425 Value *RunTimeVF = Builder.CreateElementCount(Builder.getInt32Ty(), VF); 426 Value *LastLane = Builder.CreateSub(RunTimeVF, Builder.getInt32(1)); 427 Value *Extract = 428 Builder.CreateExtractElement(II.getArgOperand(0), LastLane); 429 StoreInst *S = 430 new StoreInst(Extract, SplatPtr, /*IsVolatile=*/false, Alignment); 431 S->copyMetadata(II); 432 return S; 433 } 434 } 435 if (isa<ScalableVectorType>(ConstMask->getType())) 436 return nullptr; 437 438 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 439 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 440 APInt PoisonElts(DemandedElts.getBitWidth(), 0); 441 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, 442 PoisonElts)) 443 return replaceOperand(II, 0, V); 444 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, 445 PoisonElts)) 446 return replaceOperand(II, 1, V); 447 448 return nullptr; 449 } 450 451 /// This function transforms launder.invariant.group and strip.invariant.group 452 /// like: 453 /// launder(launder(%x)) -> launder(%x) (the result is not the argument) 454 /// launder(strip(%x)) -> launder(%x) 455 /// strip(strip(%x)) -> strip(%x) (the result is not the argument) 456 /// strip(launder(%x)) -> strip(%x) 457 /// This is legal because it preserves the most recent information about 458 /// the presence or absence of invariant.group. 459 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II, 460 InstCombinerImpl &IC) { 461 auto *Arg = II.getArgOperand(0); 462 auto *StrippedArg = Arg->stripPointerCasts(); 463 auto *StrippedInvariantGroupsArg = StrippedArg; 464 while (auto *Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) { 465 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group && 466 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group) 467 break; 468 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts(); 469 } 470 if (StrippedArg == StrippedInvariantGroupsArg) 471 return nullptr; // No launders/strips to remove. 472 473 Value *Result = nullptr; 474 475 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group) 476 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg); 477 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group) 478 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg); 479 else 480 llvm_unreachable( 481 "simplifyInvariantGroupIntrinsic only handles launder and strip"); 482 if (Result->getType()->getPointerAddressSpace() != 483 II.getType()->getPointerAddressSpace()) 484 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType()); 485 486 return cast<Instruction>(Result); 487 } 488 489 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) { 490 assert((II.getIntrinsicID() == Intrinsic::cttz || 491 II.getIntrinsicID() == Intrinsic::ctlz) && 492 "Expected cttz or ctlz intrinsic"); 493 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz; 494 Value *Op0 = II.getArgOperand(0); 495 Value *Op1 = II.getArgOperand(1); 496 Value *X; 497 // ctlz(bitreverse(x)) -> cttz(x) 498 // cttz(bitreverse(x)) -> ctlz(x) 499 if (match(Op0, m_BitReverse(m_Value(X)))) { 500 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz; 501 Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType()); 502 return CallInst::Create(F, {X, II.getArgOperand(1)}); 503 } 504 505 if (II.getType()->isIntOrIntVectorTy(1)) { 506 // ctlz/cttz i1 Op0 --> not Op0 507 if (match(Op1, m_Zero())) 508 return BinaryOperator::CreateNot(Op0); 509 // If zero is poison, then the input can be assumed to be "true", so the 510 // instruction simplifies to "false". 511 assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1"); 512 return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(II.getType())); 513 } 514 515 Constant *C; 516 517 if (IsTZ) { 518 // cttz(-x) -> cttz(x) 519 if (match(Op0, m_Neg(m_Value(X)))) 520 return IC.replaceOperand(II, 0, X); 521 522 // cttz(-x & x) -> cttz(x) 523 if (match(Op0, m_c_And(m_Neg(m_Value(X)), m_Deferred(X)))) 524 return IC.replaceOperand(II, 0, X); 525 526 // cttz(sext(x)) -> cttz(zext(x)) 527 if (match(Op0, m_OneUse(m_SExt(m_Value(X))))) { 528 auto *Zext = IC.Builder.CreateZExt(X, II.getType()); 529 auto *CttzZext = 530 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Zext, Op1); 531 return IC.replaceInstUsesWith(II, CttzZext); 532 } 533 534 // Zext doesn't change the number of trailing zeros, so narrow: 535 // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsPoison' parameter is 'true'. 536 if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) { 537 auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X, 538 IC.Builder.getTrue()); 539 auto *ZextCttz = IC.Builder.CreateZExt(Cttz, II.getType()); 540 return IC.replaceInstUsesWith(II, ZextCttz); 541 } 542 543 // cttz(abs(x)) -> cttz(x) 544 // cttz(nabs(x)) -> cttz(x) 545 Value *Y; 546 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor; 547 if (SPF == SPF_ABS || SPF == SPF_NABS) 548 return IC.replaceOperand(II, 0, X); 549 550 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X)))) 551 return IC.replaceOperand(II, 0, X); 552 553 // cttz(shl(%const, %val), 1) --> add(cttz(%const, 1), %val) 554 if (match(Op0, m_Shl(m_ImmConstant(C), m_Value(X))) && 555 match(Op1, m_One())) { 556 Value *ConstCttz = 557 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, C, Op1); 558 return BinaryOperator::CreateAdd(ConstCttz, X); 559 } 560 561 // cttz(lshr exact (%const, %val), 1) --> sub(cttz(%const, 1), %val) 562 if (match(Op0, m_Exact(m_LShr(m_ImmConstant(C), m_Value(X)))) && 563 match(Op1, m_One())) { 564 Value *ConstCttz = 565 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, C, Op1); 566 return BinaryOperator::CreateSub(ConstCttz, X); 567 } 568 } else { 569 // ctlz(lshr(%const, %val), 1) --> add(ctlz(%const, 1), %val) 570 if (match(Op0, m_LShr(m_ImmConstant(C), m_Value(X))) && 571 match(Op1, m_One())) { 572 Value *ConstCtlz = 573 IC.Builder.CreateBinaryIntrinsic(Intrinsic::ctlz, C, Op1); 574 return BinaryOperator::CreateAdd(ConstCtlz, X); 575 } 576 577 // ctlz(shl nuw (%const, %val), 1) --> sub(ctlz(%const, 1), %val) 578 if (match(Op0, m_NUWShl(m_ImmConstant(C), m_Value(X))) && 579 match(Op1, m_One())) { 580 Value *ConstCtlz = 581 IC.Builder.CreateBinaryIntrinsic(Intrinsic::ctlz, C, Op1); 582 return BinaryOperator::CreateSub(ConstCtlz, X); 583 } 584 } 585 586 KnownBits Known = IC.computeKnownBits(Op0, 0, &II); 587 588 // Create a mask for bits above (ctlz) or below (cttz) the first known one. 589 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros() 590 : Known.countMaxLeadingZeros(); 591 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros() 592 : Known.countMinLeadingZeros(); 593 594 // If all bits above (ctlz) or below (cttz) the first known one are known 595 // zero, this value is constant. 596 // FIXME: This should be in InstSimplify because we're replacing an 597 // instruction with a constant. 598 if (PossibleZeros == DefiniteZeros) { 599 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros); 600 return IC.replaceInstUsesWith(II, C); 601 } 602 603 // If the input to cttz/ctlz is known to be non-zero, 604 // then change the 'ZeroIsPoison' parameter to 'true' 605 // because we know the zero behavior can't affect the result. 606 if (!Known.One.isZero() || 607 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II, 608 &IC.getDominatorTree())) { 609 if (!match(II.getArgOperand(1), m_One())) 610 return IC.replaceOperand(II, 1, IC.Builder.getTrue()); 611 } 612 613 // Add range metadata since known bits can't completely reflect what we know. 614 auto *IT = cast<IntegerType>(Op0->getType()->getScalarType()); 615 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 616 Metadata *LowAndHigh[] = { 617 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)), 618 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))}; 619 II.setMetadata(LLVMContext::MD_range, 620 MDNode::get(II.getContext(), LowAndHigh)); 621 return &II; 622 } 623 624 return nullptr; 625 } 626 627 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) { 628 assert(II.getIntrinsicID() == Intrinsic::ctpop && 629 "Expected ctpop intrinsic"); 630 Type *Ty = II.getType(); 631 unsigned BitWidth = Ty->getScalarSizeInBits(); 632 Value *Op0 = II.getArgOperand(0); 633 Value *X, *Y; 634 635 // ctpop(bitreverse(x)) -> ctpop(x) 636 // ctpop(bswap(x)) -> ctpop(x) 637 if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) 638 return IC.replaceOperand(II, 0, X); 639 640 // ctpop(rot(x)) -> ctpop(x) 641 if ((match(Op0, m_FShl(m_Value(X), m_Value(Y), m_Value())) || 642 match(Op0, m_FShr(m_Value(X), m_Value(Y), m_Value()))) && 643 X == Y) 644 return IC.replaceOperand(II, 0, X); 645 646 // ctpop(x | -x) -> bitwidth - cttz(x, false) 647 if (Op0->hasOneUse() && 648 match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) { 649 Function *F = 650 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 651 auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()}); 652 auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth)); 653 return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz)); 654 } 655 656 // ctpop(~x & (x - 1)) -> cttz(x, false) 657 if (match(Op0, 658 m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) { 659 Function *F = 660 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 661 return CallInst::Create(F, {X, IC.Builder.getFalse()}); 662 } 663 664 // Zext doesn't change the number of set bits, so narrow: 665 // ctpop (zext X) --> zext (ctpop X) 666 if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) { 667 Value *NarrowPop = IC.Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, X); 668 return CastInst::Create(Instruction::ZExt, NarrowPop, Ty); 669 } 670 671 KnownBits Known(BitWidth); 672 IC.computeKnownBits(Op0, Known, 0, &II); 673 674 // If all bits are zero except for exactly one fixed bit, then the result 675 // must be 0 or 1, and we can get that answer by shifting to LSB: 676 // ctpop (X & 32) --> (X & 32) >> 5 677 // TODO: Investigate removing this as its likely unnecessary given the below 678 // `isKnownToBeAPowerOfTwo` check. 679 if ((~Known.Zero).isPowerOf2()) 680 return BinaryOperator::CreateLShr( 681 Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2())); 682 683 // More generally we can also handle non-constant power of 2 patterns such as 684 // shl/shr(Pow2, X), (X & -X), etc... by transforming: 685 // ctpop(Pow2OrZero) --> icmp ne X, 0 686 if (IC.isKnownToBeAPowerOfTwo(Op0, /* OrZero */ true)) 687 return CastInst::Create(Instruction::ZExt, 688 IC.Builder.CreateICmp(ICmpInst::ICMP_NE, Op0, 689 Constant::getNullValue(Ty)), 690 Ty); 691 692 // Add range metadata since known bits can't completely reflect what we know. 693 auto *IT = cast<IntegerType>(Ty->getScalarType()); 694 unsigned MinCount = Known.countMinPopulation(); 695 unsigned MaxCount = Known.countMaxPopulation(); 696 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 697 Metadata *LowAndHigh[] = { 698 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)), 699 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))}; 700 II.setMetadata(LLVMContext::MD_range, 701 MDNode::get(II.getContext(), LowAndHigh)); 702 return &II; 703 } 704 705 return nullptr; 706 } 707 708 /// Convert a table lookup to shufflevector if the mask is constant. 709 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in 710 /// which case we could lower the shufflevector with rev64 instructions 711 /// as it's actually a byte reverse. 712 static Value *simplifyNeonTbl1(const IntrinsicInst &II, 713 InstCombiner::BuilderTy &Builder) { 714 // Bail out if the mask is not a constant. 715 auto *C = dyn_cast<Constant>(II.getArgOperand(1)); 716 if (!C) 717 return nullptr; 718 719 auto *VecTy = cast<FixedVectorType>(II.getType()); 720 unsigned NumElts = VecTy->getNumElements(); 721 722 // Only perform this transformation for <8 x i8> vector types. 723 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8) 724 return nullptr; 725 726 int Indexes[8]; 727 728 for (unsigned I = 0; I < NumElts; ++I) { 729 Constant *COp = C->getAggregateElement(I); 730 731 if (!COp || !isa<ConstantInt>(COp)) 732 return nullptr; 733 734 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue(); 735 736 // Make sure the mask indices are in range. 737 if ((unsigned)Indexes[I] >= NumElts) 738 return nullptr; 739 } 740 741 auto *V1 = II.getArgOperand(0); 742 auto *V2 = Constant::getNullValue(V1->getType()); 743 return Builder.CreateShuffleVector(V1, V2, ArrayRef(Indexes)); 744 } 745 746 // Returns true iff the 2 intrinsics have the same operands, limiting the 747 // comparison to the first NumOperands. 748 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, 749 unsigned NumOperands) { 750 assert(I.arg_size() >= NumOperands && "Not enough operands"); 751 assert(E.arg_size() >= NumOperands && "Not enough operands"); 752 for (unsigned i = 0; i < NumOperands; i++) 753 if (I.getArgOperand(i) != E.getArgOperand(i)) 754 return false; 755 return true; 756 } 757 758 // Remove trivially empty start/end intrinsic ranges, i.e. a start 759 // immediately followed by an end (ignoring debuginfo or other 760 // start/end intrinsics in between). As this handles only the most trivial 761 // cases, tracking the nesting level is not needed: 762 // 763 // call @llvm.foo.start(i1 0) 764 // call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed 765 // call @llvm.foo.end(i1 0) 766 // call @llvm.foo.end(i1 0) ; &I 767 static bool 768 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, 769 std::function<bool(const IntrinsicInst &)> IsStart) { 770 // We start from the end intrinsic and scan backwards, so that InstCombine 771 // has already processed (and potentially removed) all the instructions 772 // before the end intrinsic. 773 BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend()); 774 for (; BI != BE; ++BI) { 775 if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) { 776 if (I->isDebugOrPseudoInst() || 777 I->getIntrinsicID() == EndI.getIntrinsicID()) 778 continue; 779 if (IsStart(*I)) { 780 if (haveSameOperands(EndI, *I, EndI.arg_size())) { 781 IC.eraseInstFromFunction(*I); 782 IC.eraseInstFromFunction(EndI); 783 return true; 784 } 785 // Skip start intrinsics that don't pair with this end intrinsic. 786 continue; 787 } 788 } 789 break; 790 } 791 792 return false; 793 } 794 795 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) { 796 removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) { 797 return I.getIntrinsicID() == Intrinsic::vastart || 798 I.getIntrinsicID() == Intrinsic::vacopy; 799 }); 800 return nullptr; 801 } 802 803 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) { 804 assert(Call.arg_size() > 1 && "Need at least 2 args to swap"); 805 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1); 806 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) { 807 Call.setArgOperand(0, Arg1); 808 Call.setArgOperand(1, Arg0); 809 return &Call; 810 } 811 return nullptr; 812 } 813 814 /// Creates a result tuple for an overflow intrinsic \p II with a given 815 /// \p Result and a constant \p Overflow value. 816 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result, 817 Constant *Overflow) { 818 Constant *V[] = {PoisonValue::get(Result->getType()), Overflow}; 819 StructType *ST = cast<StructType>(II->getType()); 820 Constant *Struct = ConstantStruct::get(ST, V); 821 return InsertValueInst::Create(Struct, Result, 0); 822 } 823 824 Instruction * 825 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) { 826 WithOverflowInst *WO = cast<WithOverflowInst>(II); 827 Value *OperationResult = nullptr; 828 Constant *OverflowResult = nullptr; 829 if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(), 830 WO->getRHS(), *WO, OperationResult, OverflowResult)) 831 return createOverflowTuple(WO, OperationResult, OverflowResult); 832 return nullptr; 833 } 834 835 static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) { 836 Ty = Ty->getScalarType(); 837 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE; 838 } 839 840 static bool inputDenormalIsDAZ(const Function &F, const Type *Ty) { 841 Ty = Ty->getScalarType(); 842 return F.getDenormalMode(Ty->getFltSemantics()).inputsAreZero(); 843 } 844 845 /// \returns the compare predicate type if the test performed by 846 /// llvm.is.fpclass(x, \p Mask) is equivalent to fcmp o__ x, 0.0 with the 847 /// floating-point environment assumed for \p F for type \p Ty 848 static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, 849 const Function &F, Type *Ty) { 850 switch (static_cast<unsigned>(Mask)) { 851 case fcZero: 852 if (inputDenormalIsIEEE(F, Ty)) 853 return FCmpInst::FCMP_OEQ; 854 break; 855 case fcZero | fcSubnormal: 856 if (inputDenormalIsDAZ(F, Ty)) 857 return FCmpInst::FCMP_OEQ; 858 break; 859 case fcPositive | fcNegZero: 860 if (inputDenormalIsIEEE(F, Ty)) 861 return FCmpInst::FCMP_OGE; 862 break; 863 case fcPositive | fcNegZero | fcNegSubnormal: 864 if (inputDenormalIsDAZ(F, Ty)) 865 return FCmpInst::FCMP_OGE; 866 break; 867 case fcPosSubnormal | fcPosNormal | fcPosInf: 868 if (inputDenormalIsIEEE(F, Ty)) 869 return FCmpInst::FCMP_OGT; 870 break; 871 case fcNegative | fcPosZero: 872 if (inputDenormalIsIEEE(F, Ty)) 873 return FCmpInst::FCMP_OLE; 874 break; 875 case fcNegative | fcPosZero | fcPosSubnormal: 876 if (inputDenormalIsDAZ(F, Ty)) 877 return FCmpInst::FCMP_OLE; 878 break; 879 case fcNegSubnormal | fcNegNormal | fcNegInf: 880 if (inputDenormalIsIEEE(F, Ty)) 881 return FCmpInst::FCMP_OLT; 882 break; 883 case fcPosNormal | fcPosInf: 884 if (inputDenormalIsDAZ(F, Ty)) 885 return FCmpInst::FCMP_OGT; 886 break; 887 case fcNegNormal | fcNegInf: 888 if (inputDenormalIsDAZ(F, Ty)) 889 return FCmpInst::FCMP_OLT; 890 break; 891 case ~fcZero & ~fcNan: 892 if (inputDenormalIsIEEE(F, Ty)) 893 return FCmpInst::FCMP_ONE; 894 break; 895 case ~(fcZero | fcSubnormal) & ~fcNan: 896 if (inputDenormalIsDAZ(F, Ty)) 897 return FCmpInst::FCMP_ONE; 898 break; 899 default: 900 break; 901 } 902 903 return FCmpInst::BAD_FCMP_PREDICATE; 904 } 905 906 Instruction *InstCombinerImpl::foldIntrinsicIsFPClass(IntrinsicInst &II) { 907 Value *Src0 = II.getArgOperand(0); 908 Value *Src1 = II.getArgOperand(1); 909 const ConstantInt *CMask = cast<ConstantInt>(Src1); 910 FPClassTest Mask = static_cast<FPClassTest>(CMask->getZExtValue()); 911 const bool IsUnordered = (Mask & fcNan) == fcNan; 912 const bool IsOrdered = (Mask & fcNan) == fcNone; 913 const FPClassTest OrderedMask = Mask & ~fcNan; 914 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan; 915 916 const bool IsStrict = II.isStrictFP(); 917 918 Value *FNegSrc; 919 if (match(Src0, m_FNeg(m_Value(FNegSrc)))) { 920 // is.fpclass (fneg x), mask -> is.fpclass x, (fneg mask) 921 922 II.setArgOperand(1, ConstantInt::get(Src1->getType(), fneg(Mask))); 923 return replaceOperand(II, 0, FNegSrc); 924 } 925 926 Value *FAbsSrc; 927 if (match(Src0, m_FAbs(m_Value(FAbsSrc)))) { 928 II.setArgOperand(1, ConstantInt::get(Src1->getType(), inverse_fabs(Mask))); 929 return replaceOperand(II, 0, FAbsSrc); 930 } 931 932 if ((OrderedMask == fcInf || OrderedInvertedMask == fcInf) && 933 (IsOrdered || IsUnordered) && !IsStrict) { 934 // is.fpclass(x, fcInf) -> fcmp oeq fabs(x), +inf 935 // is.fpclass(x, ~fcInf) -> fcmp one fabs(x), +inf 936 // is.fpclass(x, fcInf|fcNan) -> fcmp ueq fabs(x), +inf 937 // is.fpclass(x, ~(fcInf|fcNan)) -> fcmp une fabs(x), +inf 938 Constant *Inf = ConstantFP::getInfinity(Src0->getType()); 939 FCmpInst::Predicate Pred = 940 IsUnordered ? FCmpInst::FCMP_UEQ : FCmpInst::FCMP_OEQ; 941 if (OrderedInvertedMask == fcInf) 942 Pred = IsUnordered ? FCmpInst::FCMP_UNE : FCmpInst::FCMP_ONE; 943 944 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Src0); 945 Value *CmpInf = Builder.CreateFCmp(Pred, Fabs, Inf); 946 CmpInf->takeName(&II); 947 return replaceInstUsesWith(II, CmpInf); 948 } 949 950 if ((OrderedMask == fcPosInf || OrderedMask == fcNegInf) && 951 (IsOrdered || IsUnordered) && !IsStrict) { 952 // is.fpclass(x, fcPosInf) -> fcmp oeq x, +inf 953 // is.fpclass(x, fcNegInf) -> fcmp oeq x, -inf 954 // is.fpclass(x, fcPosInf|fcNan) -> fcmp ueq x, +inf 955 // is.fpclass(x, fcNegInf|fcNan) -> fcmp ueq x, -inf 956 Constant *Inf = 957 ConstantFP::getInfinity(Src0->getType(), OrderedMask == fcNegInf); 958 Value *EqInf = IsUnordered ? Builder.CreateFCmpUEQ(Src0, Inf) 959 : Builder.CreateFCmpOEQ(Src0, Inf); 960 961 EqInf->takeName(&II); 962 return replaceInstUsesWith(II, EqInf); 963 } 964 965 if ((OrderedInvertedMask == fcPosInf || OrderedInvertedMask == fcNegInf) && 966 (IsOrdered || IsUnordered) && !IsStrict) { 967 // is.fpclass(x, ~fcPosInf) -> fcmp one x, +inf 968 // is.fpclass(x, ~fcNegInf) -> fcmp one x, -inf 969 // is.fpclass(x, ~fcPosInf|fcNan) -> fcmp une x, +inf 970 // is.fpclass(x, ~fcNegInf|fcNan) -> fcmp une x, -inf 971 Constant *Inf = ConstantFP::getInfinity(Src0->getType(), 972 OrderedInvertedMask == fcNegInf); 973 Value *NeInf = IsUnordered ? Builder.CreateFCmpUNE(Src0, Inf) 974 : Builder.CreateFCmpONE(Src0, Inf); 975 NeInf->takeName(&II); 976 return replaceInstUsesWith(II, NeInf); 977 } 978 979 if (Mask == fcNan && !IsStrict) { 980 // Equivalent of isnan. Replace with standard fcmp if we don't care about FP 981 // exceptions. 982 Value *IsNan = 983 Builder.CreateFCmpUNO(Src0, ConstantFP::getZero(Src0->getType())); 984 IsNan->takeName(&II); 985 return replaceInstUsesWith(II, IsNan); 986 } 987 988 if (Mask == (~fcNan & fcAllFlags) && !IsStrict) { 989 // Equivalent of !isnan. Replace with standard fcmp. 990 Value *FCmp = 991 Builder.CreateFCmpORD(Src0, ConstantFP::getZero(Src0->getType())); 992 FCmp->takeName(&II); 993 return replaceInstUsesWith(II, FCmp); 994 } 995 996 FCmpInst::Predicate PredType = FCmpInst::BAD_FCMP_PREDICATE; 997 998 // Try to replace with an fcmp with 0 999 // 1000 // is.fpclass(x, fcZero) -> fcmp oeq x, 0.0 1001 // is.fpclass(x, fcZero | fcNan) -> fcmp ueq x, 0.0 1002 // is.fpclass(x, ~fcZero & ~fcNan) -> fcmp one x, 0.0 1003 // is.fpclass(x, ~fcZero) -> fcmp une x, 0.0 1004 // 1005 // is.fpclass(x, fcPosSubnormal | fcPosNormal | fcPosInf) -> fcmp ogt x, 0.0 1006 // is.fpclass(x, fcPositive | fcNegZero) -> fcmp oge x, 0.0 1007 // 1008 // is.fpclass(x, fcNegSubnormal | fcNegNormal | fcNegInf) -> fcmp olt x, 0.0 1009 // is.fpclass(x, fcNegative | fcPosZero) -> fcmp ole x, 0.0 1010 // 1011 if (!IsStrict && (IsOrdered || IsUnordered) && 1012 (PredType = fpclassTestIsFCmp0(OrderedMask, *II.getFunction(), 1013 Src0->getType())) != 1014 FCmpInst::BAD_FCMP_PREDICATE) { 1015 Constant *Zero = ConstantFP::getZero(Src0->getType()); 1016 // Equivalent of == 0. 1017 Value *FCmp = Builder.CreateFCmp( 1018 IsUnordered ? FCmpInst::getUnorderedPredicate(PredType) : PredType, 1019 Src0, Zero); 1020 1021 FCmp->takeName(&II); 1022 return replaceInstUsesWith(II, FCmp); 1023 } 1024 1025 KnownFPClass Known = computeKnownFPClass(Src0, Mask, &II); 1026 1027 // Clear test bits we know must be false from the source value. 1028 // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other 1029 // fp_class (ninf x), ninf|pinf|other -> fp_class (ninf x), other 1030 if ((Mask & Known.KnownFPClasses) != Mask) { 1031 II.setArgOperand( 1032 1, ConstantInt::get(Src1->getType(), Mask & Known.KnownFPClasses)); 1033 return &II; 1034 } 1035 1036 // If none of the tests which can return false are possible, fold to true. 1037 // fp_class (nnan x), ~(qnan|snan) -> true 1038 // fp_class (ninf x), ~(ninf|pinf) -> true 1039 if (Mask == Known.KnownFPClasses) 1040 return replaceInstUsesWith(II, ConstantInt::get(II.getType(), true)); 1041 1042 return nullptr; 1043 } 1044 1045 static std::optional<bool> getKnownSign(Value *Op, Instruction *CxtI, 1046 const DataLayout &DL, AssumptionCache *AC, 1047 DominatorTree *DT) { 1048 KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT); 1049 if (Known.isNonNegative()) 1050 return false; 1051 if (Known.isNegative()) 1052 return true; 1053 1054 Value *X, *Y; 1055 if (match(Op, m_NSWSub(m_Value(X), m_Value(Y)))) 1056 return isImpliedByDomCondition(ICmpInst::ICMP_SLT, X, Y, CxtI, DL); 1057 1058 return isImpliedByDomCondition( 1059 ICmpInst::ICMP_SLT, Op, Constant::getNullValue(Op->getType()), CxtI, DL); 1060 } 1061 1062 static std::optional<bool> getKnownSignOrZero(Value *Op, Instruction *CxtI, 1063 const DataLayout &DL, 1064 AssumptionCache *AC, 1065 DominatorTree *DT) { 1066 if (std::optional<bool> Sign = getKnownSign(Op, CxtI, DL, AC, DT)) 1067 return Sign; 1068 1069 Value *X, *Y; 1070 if (match(Op, m_NSWSub(m_Value(X), m_Value(Y)))) 1071 return isImpliedByDomCondition(ICmpInst::ICMP_SLE, X, Y, CxtI, DL); 1072 1073 return std::nullopt; 1074 } 1075 1076 /// Return true if two values \p Op0 and \p Op1 are known to have the same sign. 1077 static bool signBitMustBeTheSame(Value *Op0, Value *Op1, Instruction *CxtI, 1078 const DataLayout &DL, AssumptionCache *AC, 1079 DominatorTree *DT) { 1080 std::optional<bool> Known1 = getKnownSign(Op1, CxtI, DL, AC, DT); 1081 if (!Known1) 1082 return false; 1083 std::optional<bool> Known0 = getKnownSign(Op0, CxtI, DL, AC, DT); 1084 if (!Known0) 1085 return false; 1086 return *Known0 == *Known1; 1087 } 1088 1089 /// Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0. This 1090 /// can trigger other combines. 1091 static Instruction *moveAddAfterMinMax(IntrinsicInst *II, 1092 InstCombiner::BuilderTy &Builder) { 1093 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1094 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin || 1095 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) && 1096 "Expected a min or max intrinsic"); 1097 1098 // TODO: Match vectors with undef elements, but undef may not propagate. 1099 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 1100 Value *X; 1101 const APInt *C0, *C1; 1102 if (!match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C0)))) || 1103 !match(Op1, m_APInt(C1))) 1104 return nullptr; 1105 1106 // Check for necessary no-wrap and overflow constraints. 1107 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin; 1108 auto *Add = cast<BinaryOperator>(Op0); 1109 if ((IsSigned && !Add->hasNoSignedWrap()) || 1110 (!IsSigned && !Add->hasNoUnsignedWrap())) 1111 return nullptr; 1112 1113 // If the constant difference overflows, then instsimplify should reduce the 1114 // min/max to the add or C1. 1115 bool Overflow; 1116 APInt CDiff = 1117 IsSigned ? C1->ssub_ov(*C0, Overflow) : C1->usub_ov(*C0, Overflow); 1118 assert(!Overflow && "Expected simplify of min/max"); 1119 1120 // min/max (add X, C0), C1 --> add (min/max X, C1 - C0), C0 1121 // Note: the "mismatched" no-overflow setting does not propagate. 1122 Constant *NewMinMaxC = ConstantInt::get(II->getType(), CDiff); 1123 Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID, X, NewMinMaxC); 1124 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax, Add->getOperand(1)) 1125 : BinaryOperator::CreateNUWAdd(NewMinMax, Add->getOperand(1)); 1126 } 1127 /// Match a sadd_sat or ssub_sat which is using min/max to clamp the value. 1128 Instruction *InstCombinerImpl::matchSAddSubSat(IntrinsicInst &MinMax1) { 1129 Type *Ty = MinMax1.getType(); 1130 1131 // We are looking for a tree of: 1132 // max(INT_MIN, min(INT_MAX, add(sext(A), sext(B)))) 1133 // Where the min and max could be reversed 1134 Instruction *MinMax2; 1135 BinaryOperator *AddSub; 1136 const APInt *MinValue, *MaxValue; 1137 if (match(&MinMax1, m_SMin(m_Instruction(MinMax2), m_APInt(MaxValue)))) { 1138 if (!match(MinMax2, m_SMax(m_BinOp(AddSub), m_APInt(MinValue)))) 1139 return nullptr; 1140 } else if (match(&MinMax1, 1141 m_SMax(m_Instruction(MinMax2), m_APInt(MinValue)))) { 1142 if (!match(MinMax2, m_SMin(m_BinOp(AddSub), m_APInt(MaxValue)))) 1143 return nullptr; 1144 } else 1145 return nullptr; 1146 1147 // Check that the constants clamp a saturate, and that the new type would be 1148 // sensible to convert to. 1149 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1) 1150 return nullptr; 1151 // In what bitwidth can this be treated as saturating arithmetics? 1152 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1; 1153 // FIXME: This isn't quite right for vectors, but using the scalar type is a 1154 // good first approximation for what should be done there. 1155 if (!shouldChangeType(Ty->getScalarType()->getIntegerBitWidth(), NewBitWidth)) 1156 return nullptr; 1157 1158 // Also make sure that the inner min/max and the add/sub have one use. 1159 if (!MinMax2->hasOneUse() || !AddSub->hasOneUse()) 1160 return nullptr; 1161 1162 // Create the new type (which can be a vector type) 1163 Type *NewTy = Ty->getWithNewBitWidth(NewBitWidth); 1164 1165 Intrinsic::ID IntrinsicID; 1166 if (AddSub->getOpcode() == Instruction::Add) 1167 IntrinsicID = Intrinsic::sadd_sat; 1168 else if (AddSub->getOpcode() == Instruction::Sub) 1169 IntrinsicID = Intrinsic::ssub_sat; 1170 else 1171 return nullptr; 1172 1173 // The two operands of the add/sub must be nsw-truncatable to the NewTy. This 1174 // is usually achieved via a sext from a smaller type. 1175 if (ComputeMaxSignificantBits(AddSub->getOperand(0), 0, AddSub) > 1176 NewBitWidth || 1177 ComputeMaxSignificantBits(AddSub->getOperand(1), 0, AddSub) > NewBitWidth) 1178 return nullptr; 1179 1180 // Finally create and return the sat intrinsic, truncated to the new type 1181 Function *F = Intrinsic::getDeclaration(MinMax1.getModule(), IntrinsicID, NewTy); 1182 Value *AT = Builder.CreateTrunc(AddSub->getOperand(0), NewTy); 1183 Value *BT = Builder.CreateTrunc(AddSub->getOperand(1), NewTy); 1184 Value *Sat = Builder.CreateCall(F, {AT, BT}); 1185 return CastInst::Create(Instruction::SExt, Sat, Ty); 1186 } 1187 1188 1189 /// If we have a clamp pattern like max (min X, 42), 41 -- where the output 1190 /// can only be one of two possible constant values -- turn that into a select 1191 /// of constants. 1192 static Instruction *foldClampRangeOfTwo(IntrinsicInst *II, 1193 InstCombiner::BuilderTy &Builder) { 1194 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1195 Value *X; 1196 const APInt *C0, *C1; 1197 if (!match(I1, m_APInt(C1)) || !I0->hasOneUse()) 1198 return nullptr; 1199 1200 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 1201 switch (II->getIntrinsicID()) { 1202 case Intrinsic::smax: 1203 if (match(I0, m_SMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 1204 Pred = ICmpInst::ICMP_SGT; 1205 break; 1206 case Intrinsic::smin: 1207 if (match(I0, m_SMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 1208 Pred = ICmpInst::ICMP_SLT; 1209 break; 1210 case Intrinsic::umax: 1211 if (match(I0, m_UMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 1212 Pred = ICmpInst::ICMP_UGT; 1213 break; 1214 case Intrinsic::umin: 1215 if (match(I0, m_UMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 1216 Pred = ICmpInst::ICMP_ULT; 1217 break; 1218 default: 1219 llvm_unreachable("Expected min/max intrinsic"); 1220 } 1221 if (Pred == CmpInst::BAD_ICMP_PREDICATE) 1222 return nullptr; 1223 1224 // max (min X, 42), 41 --> X > 41 ? 42 : 41 1225 // min (max X, 42), 43 --> X < 43 ? 42 : 43 1226 Value *Cmp = Builder.CreateICmp(Pred, X, I1); 1227 return SelectInst::Create(Cmp, ConstantInt::get(II->getType(), *C0), I1); 1228 } 1229 1230 /// If this min/max has a constant operand and an operand that is a matching 1231 /// min/max with a constant operand, constant-fold the 2 constant operands. 1232 static Value *reassociateMinMaxWithConstants(IntrinsicInst *II, 1233 IRBuilderBase &Builder) { 1234 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1235 auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); 1236 if (!LHS || LHS->getIntrinsicID() != MinMaxID) 1237 return nullptr; 1238 1239 Constant *C0, *C1; 1240 if (!match(LHS->getArgOperand(1), m_ImmConstant(C0)) || 1241 !match(II->getArgOperand(1), m_ImmConstant(C1))) 1242 return nullptr; 1243 1244 // max (max X, C0), C1 --> max X, (max C0, C1) --> max X, NewC 1245 ICmpInst::Predicate Pred = MinMaxIntrinsic::getPredicate(MinMaxID); 1246 Value *CondC = Builder.CreateICmp(Pred, C0, C1); 1247 Value *NewC = Builder.CreateSelect(CondC, C0, C1); 1248 return Builder.CreateIntrinsic(MinMaxID, II->getType(), 1249 {LHS->getArgOperand(0), NewC}); 1250 } 1251 1252 /// If this min/max has a matching min/max operand with a constant, try to push 1253 /// the constant operand into this instruction. This can enable more folds. 1254 static Instruction * 1255 reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, 1256 InstCombiner::BuilderTy &Builder) { 1257 // Match and capture a min/max operand candidate. 1258 Value *X, *Y; 1259 Constant *C; 1260 Instruction *Inner; 1261 if (!match(II, m_c_MaxOrMin(m_OneUse(m_CombineAnd( 1262 m_Instruction(Inner), 1263 m_MaxOrMin(m_Value(X), m_ImmConstant(C)))), 1264 m_Value(Y)))) 1265 return nullptr; 1266 1267 // The inner op must match. Check for constants to avoid infinite loops. 1268 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1269 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner); 1270 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID || 1271 match(X, m_ImmConstant()) || match(Y, m_ImmConstant())) 1272 return nullptr; 1273 1274 // max (max X, C), Y --> max (max X, Y), C 1275 Function *MinMax = 1276 Intrinsic::getDeclaration(II->getModule(), MinMaxID, II->getType()); 1277 Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID, X, Y); 1278 NewInner->takeName(Inner); 1279 return CallInst::Create(MinMax, {NewInner, C}); 1280 } 1281 1282 /// Reduce a sequence of min/max intrinsics with a common operand. 1283 static Instruction *factorizeMinMaxTree(IntrinsicInst *II) { 1284 // Match 3 of the same min/max ops. Example: umin(umin(), umin()). 1285 auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); 1286 auto *RHS = dyn_cast<IntrinsicInst>(II->getArgOperand(1)); 1287 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1288 if (!LHS || !RHS || LHS->getIntrinsicID() != MinMaxID || 1289 RHS->getIntrinsicID() != MinMaxID || 1290 (!LHS->hasOneUse() && !RHS->hasOneUse())) 1291 return nullptr; 1292 1293 Value *A = LHS->getArgOperand(0); 1294 Value *B = LHS->getArgOperand(1); 1295 Value *C = RHS->getArgOperand(0); 1296 Value *D = RHS->getArgOperand(1); 1297 1298 // Look for a common operand. 1299 Value *MinMaxOp = nullptr; 1300 Value *ThirdOp = nullptr; 1301 if (LHS->hasOneUse()) { 1302 // If the LHS is only used in this chain and the RHS is used outside of it, 1303 // reuse the RHS min/max because that will eliminate the LHS. 1304 if (D == A || C == A) { 1305 // min(min(a, b), min(c, a)) --> min(min(c, a), b) 1306 // min(min(a, b), min(a, d)) --> min(min(a, d), b) 1307 MinMaxOp = RHS; 1308 ThirdOp = B; 1309 } else if (D == B || C == B) { 1310 // min(min(a, b), min(c, b)) --> min(min(c, b), a) 1311 // min(min(a, b), min(b, d)) --> min(min(b, d), a) 1312 MinMaxOp = RHS; 1313 ThirdOp = A; 1314 } 1315 } else { 1316 assert(RHS->hasOneUse() && "Expected one-use operand"); 1317 // Reuse the LHS. This will eliminate the RHS. 1318 if (D == A || D == B) { 1319 // min(min(a, b), min(c, a)) --> min(min(a, b), c) 1320 // min(min(a, b), min(c, b)) --> min(min(a, b), c) 1321 MinMaxOp = LHS; 1322 ThirdOp = C; 1323 } else if (C == A || C == B) { 1324 // min(min(a, b), min(b, d)) --> min(min(a, b), d) 1325 // min(min(a, b), min(c, b)) --> min(min(a, b), d) 1326 MinMaxOp = LHS; 1327 ThirdOp = D; 1328 } 1329 } 1330 1331 if (!MinMaxOp || !ThirdOp) 1332 return nullptr; 1333 1334 Module *Mod = II->getModule(); 1335 Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType()); 1336 return CallInst::Create(MinMax, { MinMaxOp, ThirdOp }); 1337 } 1338 1339 /// If all arguments of the intrinsic are unary shuffles with the same mask, 1340 /// try to shuffle after the intrinsic. 1341 static Instruction * 1342 foldShuffledIntrinsicOperands(IntrinsicInst *II, 1343 InstCombiner::BuilderTy &Builder) { 1344 // TODO: This should be extended to handle other intrinsics like fshl, ctpop, 1345 // etc. Use llvm::isTriviallyVectorizable() and related to determine 1346 // which intrinsics are safe to shuffle? 1347 switch (II->getIntrinsicID()) { 1348 case Intrinsic::smax: 1349 case Intrinsic::smin: 1350 case Intrinsic::umax: 1351 case Intrinsic::umin: 1352 case Intrinsic::fma: 1353 case Intrinsic::fshl: 1354 case Intrinsic::fshr: 1355 break; 1356 default: 1357 return nullptr; 1358 } 1359 1360 Value *X; 1361 ArrayRef<int> Mask; 1362 if (!match(II->getArgOperand(0), 1363 m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask)))) 1364 return nullptr; 1365 1366 // At least 1 operand must have 1 use because we are creating 2 instructions. 1367 if (none_of(II->args(), [](Value *V) { return V->hasOneUse(); })) 1368 return nullptr; 1369 1370 // See if all arguments are shuffled with the same mask. 1371 SmallVector<Value *, 4> NewArgs(II->arg_size()); 1372 NewArgs[0] = X; 1373 Type *SrcTy = X->getType(); 1374 for (unsigned i = 1, e = II->arg_size(); i != e; ++i) { 1375 if (!match(II->getArgOperand(i), 1376 m_Shuffle(m_Value(X), m_Undef(), m_SpecificMask(Mask))) || 1377 X->getType() != SrcTy) 1378 return nullptr; 1379 NewArgs[i] = X; 1380 } 1381 1382 // intrinsic (shuf X, M), (shuf Y, M), ... --> shuf (intrinsic X, Y, ...), M 1383 Instruction *FPI = isa<FPMathOperator>(II) ? II : nullptr; 1384 Value *NewIntrinsic = 1385 Builder.CreateIntrinsic(II->getIntrinsicID(), SrcTy, NewArgs, FPI); 1386 return new ShuffleVectorInst(NewIntrinsic, Mask); 1387 } 1388 1389 /// Fold the following cases and accepts bswap and bitreverse intrinsics: 1390 /// bswap(logic_op(bswap(x), y)) --> logic_op(x, bswap(y)) 1391 /// bswap(logic_op(bswap(x), bswap(y))) --> logic_op(x, y) (ignores multiuse) 1392 template <Intrinsic::ID IntrID> 1393 static Instruction *foldBitOrderCrossLogicOp(Value *V, 1394 InstCombiner::BuilderTy &Builder) { 1395 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse, 1396 "This helper only supports BSWAP and BITREVERSE intrinsics"); 1397 1398 Value *X, *Y; 1399 // Find bitwise logic op. Check that it is a BinaryOperator explicitly so we 1400 // don't match ConstantExpr that aren't meaningful for this transform. 1401 if (match(V, m_OneUse(m_BitwiseLogic(m_Value(X), m_Value(Y)))) && 1402 isa<BinaryOperator>(V)) { 1403 Value *OldReorderX, *OldReorderY; 1404 BinaryOperator::BinaryOps Op = cast<BinaryOperator>(V)->getOpcode(); 1405 1406 // If both X and Y are bswap/bitreverse, the transform reduces the number 1407 // of instructions even if there's multiuse. 1408 // If only one operand is bswap/bitreverse, we need to ensure the operand 1409 // have only one use. 1410 if (match(X, m_Intrinsic<IntrID>(m_Value(OldReorderX))) && 1411 match(Y, m_Intrinsic<IntrID>(m_Value(OldReorderY)))) { 1412 return BinaryOperator::Create(Op, OldReorderX, OldReorderY); 1413 } 1414 1415 if (match(X, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderX))))) { 1416 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, Y); 1417 return BinaryOperator::Create(Op, OldReorderX, NewReorder); 1418 } 1419 1420 if (match(Y, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderY))))) { 1421 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, X); 1422 return BinaryOperator::Create(Op, NewReorder, OldReorderY); 1423 } 1424 } 1425 return nullptr; 1426 } 1427 1428 /// CallInst simplification. This mostly only handles folding of intrinsic 1429 /// instructions. For normal calls, it allows visitCallBase to do the heavy 1430 /// lifting. 1431 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { 1432 // Don't try to simplify calls without uses. It will not do anything useful, 1433 // but will result in the following folds being skipped. 1434 if (!CI.use_empty()) { 1435 SmallVector<Value *, 4> Args; 1436 Args.reserve(CI.arg_size()); 1437 for (Value *Op : CI.args()) 1438 Args.push_back(Op); 1439 if (Value *V = simplifyCall(&CI, CI.getCalledOperand(), Args, 1440 SQ.getWithInstruction(&CI))) 1441 return replaceInstUsesWith(CI, V); 1442 } 1443 1444 if (Value *FreedOp = getFreedOperand(&CI, &TLI)) 1445 return visitFree(CI, FreedOp); 1446 1447 // If the caller function (i.e. us, the function that contains this CallInst) 1448 // is nounwind, mark the call as nounwind, even if the callee isn't. 1449 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) { 1450 CI.setDoesNotThrow(); 1451 return &CI; 1452 } 1453 1454 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 1455 if (!II) return visitCallBase(CI); 1456 1457 // For atomic unordered mem intrinsics if len is not a positive or 1458 // not a multiple of element size then behavior is undefined. 1459 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II)) 1460 if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength())) 1461 if (NumBytes->isNegative() || 1462 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) { 1463 CreateNonTerminatorUnreachable(AMI); 1464 assert(AMI->getType()->isVoidTy() && 1465 "non void atomic unordered mem intrinsic"); 1466 return eraseInstFromFunction(*AMI); 1467 } 1468 1469 // Intrinsics cannot occur in an invoke or a callbr, so handle them here 1470 // instead of in visitCallBase. 1471 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) { 1472 bool Changed = false; 1473 1474 // memmove/cpy/set of zero bytes is a noop. 1475 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 1476 if (NumBytes->isNullValue()) 1477 return eraseInstFromFunction(CI); 1478 } 1479 1480 // No other transformations apply to volatile transfers. 1481 if (auto *M = dyn_cast<MemIntrinsic>(MI)) 1482 if (M->isVolatile()) 1483 return nullptr; 1484 1485 // If we have a memmove and the source operation is a constant global, 1486 // then the source and dest pointers can't alias, so we can change this 1487 // into a call to memcpy. 1488 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) { 1489 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 1490 if (GVSrc->isConstant()) { 1491 Module *M = CI.getModule(); 1492 Intrinsic::ID MemCpyID = 1493 isa<AtomicMemMoveInst>(MMI) 1494 ? Intrinsic::memcpy_element_unordered_atomic 1495 : Intrinsic::memcpy; 1496 Type *Tys[3] = { CI.getArgOperand(0)->getType(), 1497 CI.getArgOperand(1)->getType(), 1498 CI.getArgOperand(2)->getType() }; 1499 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys)); 1500 Changed = true; 1501 } 1502 } 1503 1504 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 1505 // memmove(x,x,size) -> noop. 1506 if (MTI->getSource() == MTI->getDest()) 1507 return eraseInstFromFunction(CI); 1508 } 1509 1510 // If we can determine a pointer alignment that is bigger than currently 1511 // set, update the alignment. 1512 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 1513 if (Instruction *I = SimplifyAnyMemTransfer(MTI)) 1514 return I; 1515 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) { 1516 if (Instruction *I = SimplifyAnyMemSet(MSI)) 1517 return I; 1518 } 1519 1520 if (Changed) return II; 1521 } 1522 1523 // For fixed width vector result intrinsics, use the generic demanded vector 1524 // support. 1525 if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) { 1526 auto VWidth = IIFVTy->getNumElements(); 1527 APInt PoisonElts(VWidth, 0); 1528 APInt AllOnesEltMask(APInt::getAllOnes(VWidth)); 1529 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, PoisonElts)) { 1530 if (V != II) 1531 return replaceInstUsesWith(*II, V); 1532 return II; 1533 } 1534 } 1535 1536 if (II->isCommutative()) { 1537 if (auto Pair = matchSymmetricPair(II->getOperand(0), II->getOperand(1))) { 1538 replaceOperand(*II, 0, Pair->first); 1539 replaceOperand(*II, 1, Pair->second); 1540 return II; 1541 } 1542 1543 if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI)) 1544 return NewCall; 1545 } 1546 1547 // Unused constrained FP intrinsic calls may have declared side effect, which 1548 // prevents it from being removed. In some cases however the side effect is 1549 // actually absent. To detect this case, call SimplifyConstrainedFPCall. If it 1550 // returns a replacement, the call may be removed. 1551 if (CI.use_empty() && isa<ConstrainedFPIntrinsic>(CI)) { 1552 if (simplifyConstrainedFPCall(&CI, SQ.getWithInstruction(&CI))) 1553 return eraseInstFromFunction(CI); 1554 } 1555 1556 Intrinsic::ID IID = II->getIntrinsicID(); 1557 switch (IID) { 1558 case Intrinsic::objectsize: { 1559 SmallVector<Instruction *> InsertedInstructions; 1560 if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false, 1561 &InsertedInstructions)) { 1562 for (Instruction *Inserted : InsertedInstructions) 1563 Worklist.add(Inserted); 1564 return replaceInstUsesWith(CI, V); 1565 } 1566 return nullptr; 1567 } 1568 case Intrinsic::abs: { 1569 Value *IIOperand = II->getArgOperand(0); 1570 bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue(); 1571 1572 // abs(-x) -> abs(x) 1573 // TODO: Copy nsw if it was present on the neg? 1574 Value *X; 1575 if (match(IIOperand, m_Neg(m_Value(X)))) 1576 return replaceOperand(*II, 0, X); 1577 if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X))))) 1578 return replaceOperand(*II, 0, X); 1579 if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X)))) 1580 return replaceOperand(*II, 0, X); 1581 1582 if (std::optional<bool> Known = 1583 getKnownSignOrZero(IIOperand, II, DL, &AC, &DT)) { 1584 // abs(x) -> x if x >= 0 (include abs(x-y) --> x - y where x >= y) 1585 // abs(x) -> x if x > 0 (include abs(x-y) --> x - y where x > y) 1586 if (!*Known) 1587 return replaceInstUsesWith(*II, IIOperand); 1588 1589 // abs(x) -> -x if x < 0 1590 // abs(x) -> -x if x < = 0 (include abs(x-y) --> y - x where x <= y) 1591 if (IntMinIsPoison) 1592 return BinaryOperator::CreateNSWNeg(IIOperand); 1593 return BinaryOperator::CreateNeg(IIOperand); 1594 } 1595 1596 // abs (sext X) --> zext (abs X*) 1597 // Clear the IsIntMin (nsw) bit on the abs to allow narrowing. 1598 if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) { 1599 Value *NarrowAbs = 1600 Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse()); 1601 return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType()); 1602 } 1603 1604 // Match a complicated way to check if a number is odd/even: 1605 // abs (srem X, 2) --> and X, 1 1606 const APInt *C; 1607 if (match(IIOperand, m_SRem(m_Value(X), m_APInt(C))) && *C == 2) 1608 return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1)); 1609 1610 break; 1611 } 1612 case Intrinsic::umin: { 1613 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1614 // umin(x, 1) == zext(x != 0) 1615 if (match(I1, m_One())) { 1616 assert(II->getType()->getScalarSizeInBits() != 1 && 1617 "Expected simplify of umin with max constant"); 1618 Value *Zero = Constant::getNullValue(I0->getType()); 1619 Value *Cmp = Builder.CreateICmpNE(I0, Zero); 1620 return CastInst::Create(Instruction::ZExt, Cmp, II->getType()); 1621 } 1622 [[fallthrough]]; 1623 } 1624 case Intrinsic::umax: { 1625 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1626 Value *X, *Y; 1627 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) && 1628 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 1629 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 1630 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 1631 } 1632 Constant *C; 1633 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_Constant(C)) && 1634 I0->hasOneUse()) { 1635 if (Constant *NarrowC = getLosslessUnsignedTrunc(C, X->getType())) { 1636 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 1637 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 1638 } 1639 } 1640 // If both operands of unsigned min/max are sign-extended, it is still ok 1641 // to narrow the operation. 1642 [[fallthrough]]; 1643 } 1644 case Intrinsic::smax: 1645 case Intrinsic::smin: { 1646 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1647 Value *X, *Y; 1648 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) && 1649 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 1650 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 1651 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 1652 } 1653 1654 Constant *C; 1655 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_Constant(C)) && 1656 I0->hasOneUse()) { 1657 if (Constant *NarrowC = getLosslessSignedTrunc(C, X->getType())) { 1658 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 1659 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 1660 } 1661 } 1662 1663 // umin(i1 X, i1 Y) -> and i1 X, Y 1664 // smax(i1 X, i1 Y) -> and i1 X, Y 1665 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) && 1666 II->getType()->isIntOrIntVectorTy(1)) { 1667 return BinaryOperator::CreateAnd(I0, I1); 1668 } 1669 1670 // umax(i1 X, i1 Y) -> or i1 X, Y 1671 // smin(i1 X, i1 Y) -> or i1 X, Y 1672 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) && 1673 II->getType()->isIntOrIntVectorTy(1)) { 1674 return BinaryOperator::CreateOr(I0, I1); 1675 } 1676 1677 if (IID == Intrinsic::smax || IID == Intrinsic::smin) { 1678 // smax (neg nsw X), (neg nsw Y) --> neg nsw (smin X, Y) 1679 // smin (neg nsw X), (neg nsw Y) --> neg nsw (smax X, Y) 1680 // TODO: Canonicalize neg after min/max if I1 is constant. 1681 if (match(I0, m_NSWNeg(m_Value(X))) && match(I1, m_NSWNeg(m_Value(Y))) && 1682 (I0->hasOneUse() || I1->hasOneUse())) { 1683 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID); 1684 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y); 1685 return BinaryOperator::CreateNSWNeg(InvMaxMin); 1686 } 1687 } 1688 1689 // (umax X, (xor X, Pow2)) 1690 // -> (or X, Pow2) 1691 // (umin X, (xor X, Pow2)) 1692 // -> (and X, ~Pow2) 1693 // (smax X, (xor X, Pos_Pow2)) 1694 // -> (or X, Pos_Pow2) 1695 // (smin X, (xor X, Pos_Pow2)) 1696 // -> (and X, ~Pos_Pow2) 1697 // (smax X, (xor X, Neg_Pow2)) 1698 // -> (and X, ~Neg_Pow2) 1699 // (smin X, (xor X, Neg_Pow2)) 1700 // -> (or X, Neg_Pow2) 1701 if ((match(I0, m_c_Xor(m_Specific(I1), m_Value(X))) || 1702 match(I1, m_c_Xor(m_Specific(I0), m_Value(X)))) && 1703 isKnownToBeAPowerOfTwo(X, /* OrZero */ true)) { 1704 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax; 1705 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin; 1706 1707 if (IID == Intrinsic::smax || IID == Intrinsic::smin) { 1708 auto KnownSign = getKnownSign(X, II, DL, &AC, &DT); 1709 if (KnownSign == std::nullopt) { 1710 UseOr = false; 1711 UseAndN = false; 1712 } else if (*KnownSign /* true is Signed. */) { 1713 UseOr ^= true; 1714 UseAndN ^= true; 1715 Type *Ty = I0->getType(); 1716 // Negative power of 2 must be IntMin. It's possible to be able to 1717 // prove negative / power of 2 without actually having known bits, so 1718 // just get the value by hand. 1719 X = Constant::getIntegerValue( 1720 Ty, APInt::getSignedMinValue(Ty->getScalarSizeInBits())); 1721 } 1722 } 1723 if (UseOr) 1724 return BinaryOperator::CreateOr(I0, X); 1725 else if (UseAndN) 1726 return BinaryOperator::CreateAnd(I0, Builder.CreateNot(X)); 1727 } 1728 1729 // If we can eliminate ~A and Y is free to invert: 1730 // max ~A, Y --> ~(min A, ~Y) 1731 // 1732 // Examples: 1733 // max ~A, ~Y --> ~(min A, Y) 1734 // max ~A, C --> ~(min A, ~C) 1735 // max ~A, (max ~Y, ~Z) --> ~min( A, (min Y, Z)) 1736 auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * { 1737 Value *A; 1738 if (match(X, m_OneUse(m_Not(m_Value(A)))) && 1739 !isFreeToInvert(A, A->hasOneUse())) { 1740 if (Value *NotY = getFreelyInverted(Y, Y->hasOneUse(), &Builder)) { 1741 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID); 1742 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY); 1743 return BinaryOperator::CreateNot(InvMaxMin); 1744 } 1745 } 1746 return nullptr; 1747 }; 1748 1749 if (Instruction *I = moveNotAfterMinMax(I0, I1)) 1750 return I; 1751 if (Instruction *I = moveNotAfterMinMax(I1, I0)) 1752 return I; 1753 1754 if (Instruction *I = moveAddAfterMinMax(II, Builder)) 1755 return I; 1756 1757 // smax(X, -X) --> abs(X) 1758 // smin(X, -X) --> -abs(X) 1759 // umax(X, -X) --> -abs(X) 1760 // umin(X, -X) --> abs(X) 1761 if (isKnownNegation(I0, I1)) { 1762 // We can choose either operand as the input to abs(), but if we can 1763 // eliminate the only use of a value, that's better for subsequent 1764 // transforms/analysis. 1765 if (I0->hasOneUse() && !I1->hasOneUse()) 1766 std::swap(I0, I1); 1767 1768 // This is some variant of abs(). See if we can propagate 'nsw' to the abs 1769 // operation and potentially its negation. 1770 bool IntMinIsPoison = isKnownNegation(I0, I1, /* NeedNSW */ true); 1771 Value *Abs = Builder.CreateBinaryIntrinsic( 1772 Intrinsic::abs, I0, 1773 ConstantInt::getBool(II->getContext(), IntMinIsPoison)); 1774 1775 // We don't have a "nabs" intrinsic, so negate if needed based on the 1776 // max/min operation. 1777 if (IID == Intrinsic::smin || IID == Intrinsic::umax) 1778 Abs = Builder.CreateNeg(Abs, "nabs", /* NUW */ false, IntMinIsPoison); 1779 return replaceInstUsesWith(CI, Abs); 1780 } 1781 1782 if (Instruction *Sel = foldClampRangeOfTwo(II, Builder)) 1783 return Sel; 1784 1785 if (Instruction *SAdd = matchSAddSubSat(*II)) 1786 return SAdd; 1787 1788 if (Value *NewMinMax = reassociateMinMaxWithConstants(II, Builder)) 1789 return replaceInstUsesWith(*II, NewMinMax); 1790 1791 if (Instruction *R = reassociateMinMaxWithConstantInOperand(II, Builder)) 1792 return R; 1793 1794 if (Instruction *NewMinMax = factorizeMinMaxTree(II)) 1795 return NewMinMax; 1796 1797 // Try to fold minmax with constant RHS based on range information 1798 const APInt *RHSC; 1799 if (match(I1, m_APIntAllowUndef(RHSC))) { 1800 ICmpInst::Predicate Pred = 1801 ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID)); 1802 bool IsSigned = MinMaxIntrinsic::isSigned(IID); 1803 ConstantRange LHS_CR = computeConstantRangeIncludingKnownBits( 1804 I0, IsSigned, SQ.getWithInstruction(II)); 1805 if (!LHS_CR.isFullSet()) { 1806 if (LHS_CR.icmp(Pred, *RHSC)) 1807 return replaceInstUsesWith(*II, I0); 1808 if (LHS_CR.icmp(ICmpInst::getSwappedPredicate(Pred), *RHSC)) 1809 return replaceInstUsesWith(*II, 1810 ConstantInt::get(II->getType(), *RHSC)); 1811 } 1812 } 1813 1814 break; 1815 } 1816 case Intrinsic::bitreverse: { 1817 Value *IIOperand = II->getArgOperand(0); 1818 // bitrev (zext i1 X to ?) --> X ? SignBitC : 0 1819 Value *X; 1820 if (match(IIOperand, m_ZExt(m_Value(X))) && 1821 X->getType()->isIntOrIntVectorTy(1)) { 1822 Type *Ty = II->getType(); 1823 APInt SignBit = APInt::getSignMask(Ty->getScalarSizeInBits()); 1824 return SelectInst::Create(X, ConstantInt::get(Ty, SignBit), 1825 ConstantInt::getNullValue(Ty)); 1826 } 1827 1828 if (Instruction *crossLogicOpFold = 1829 foldBitOrderCrossLogicOp<Intrinsic::bitreverse>(IIOperand, Builder)) 1830 return crossLogicOpFold; 1831 1832 break; 1833 } 1834 case Intrinsic::bswap: { 1835 Value *IIOperand = II->getArgOperand(0); 1836 1837 // Try to canonicalize bswap-of-logical-shift-by-8-bit-multiple as 1838 // inverse-shift-of-bswap: 1839 // bswap (shl X, Y) --> lshr (bswap X), Y 1840 // bswap (lshr X, Y) --> shl (bswap X), Y 1841 Value *X, *Y; 1842 if (match(IIOperand, m_OneUse(m_LogicalShift(m_Value(X), m_Value(Y))))) { 1843 // The transform allows undef vector elements, so try a constant match 1844 // first. If knownbits can handle that case, that clause could be removed. 1845 unsigned BitWidth = IIOperand->getType()->getScalarSizeInBits(); 1846 const APInt *C; 1847 if ((match(Y, m_APIntAllowUndef(C)) && (*C & 7) == 0) || 1848 MaskedValueIsZero(Y, APInt::getLowBitsSet(BitWidth, 3))) { 1849 Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X); 1850 BinaryOperator::BinaryOps InverseShift = 1851 cast<BinaryOperator>(IIOperand)->getOpcode() == Instruction::Shl 1852 ? Instruction::LShr 1853 : Instruction::Shl; 1854 return BinaryOperator::Create(InverseShift, NewSwap, Y); 1855 } 1856 } 1857 1858 KnownBits Known = computeKnownBits(IIOperand, 0, II); 1859 uint64_t LZ = alignDown(Known.countMinLeadingZeros(), 8); 1860 uint64_t TZ = alignDown(Known.countMinTrailingZeros(), 8); 1861 unsigned BW = Known.getBitWidth(); 1862 1863 // bswap(x) -> shift(x) if x has exactly one "active byte" 1864 if (BW - LZ - TZ == 8) { 1865 assert(LZ != TZ && "active byte cannot be in the middle"); 1866 if (LZ > TZ) // -> shl(x) if the "active byte" is in the low part of x 1867 return BinaryOperator::CreateNUWShl( 1868 IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ)); 1869 // -> lshr(x) if the "active byte" is in the high part of x 1870 return BinaryOperator::CreateExactLShr( 1871 IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ)); 1872 } 1873 1874 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 1875 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) { 1876 unsigned C = X->getType()->getScalarSizeInBits() - BW; 1877 Value *CV = ConstantInt::get(X->getType(), C); 1878 Value *V = Builder.CreateLShr(X, CV); 1879 return new TruncInst(V, IIOperand->getType()); 1880 } 1881 1882 if (Instruction *crossLogicOpFold = 1883 foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand, Builder)) { 1884 return crossLogicOpFold; 1885 } 1886 1887 break; 1888 } 1889 case Intrinsic::masked_load: 1890 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II)) 1891 return replaceInstUsesWith(CI, SimplifiedMaskedOp); 1892 break; 1893 case Intrinsic::masked_store: 1894 return simplifyMaskedStore(*II); 1895 case Intrinsic::masked_gather: 1896 return simplifyMaskedGather(*II); 1897 case Intrinsic::masked_scatter: 1898 return simplifyMaskedScatter(*II); 1899 case Intrinsic::launder_invariant_group: 1900 case Intrinsic::strip_invariant_group: 1901 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this)) 1902 return replaceInstUsesWith(*II, SkippedBarrier); 1903 break; 1904 case Intrinsic::powi: 1905 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 1906 // 0 and 1 are handled in instsimplify 1907 // powi(x, -1) -> 1/x 1908 if (Power->isMinusOne()) 1909 return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0), 1910 II->getArgOperand(0), II); 1911 // powi(x, 2) -> x*x 1912 if (Power->equalsInt(2)) 1913 return BinaryOperator::CreateFMulFMF(II->getArgOperand(0), 1914 II->getArgOperand(0), II); 1915 1916 if (!Power->getValue()[0]) { 1917 Value *X; 1918 // If power is even: 1919 // powi(-x, p) -> powi(x, p) 1920 // powi(fabs(x), p) -> powi(x, p) 1921 // powi(copysign(x, y), p) -> powi(x, p) 1922 if (match(II->getArgOperand(0), m_FNeg(m_Value(X))) || 1923 match(II->getArgOperand(0), m_FAbs(m_Value(X))) || 1924 match(II->getArgOperand(0), 1925 m_Intrinsic<Intrinsic::copysign>(m_Value(X), m_Value()))) 1926 return replaceOperand(*II, 0, X); 1927 } 1928 } 1929 break; 1930 1931 case Intrinsic::cttz: 1932 case Intrinsic::ctlz: 1933 if (auto *I = foldCttzCtlz(*II, *this)) 1934 return I; 1935 break; 1936 1937 case Intrinsic::ctpop: 1938 if (auto *I = foldCtpop(*II, *this)) 1939 return I; 1940 break; 1941 1942 case Intrinsic::fshl: 1943 case Intrinsic::fshr: { 1944 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 1945 Type *Ty = II->getType(); 1946 unsigned BitWidth = Ty->getScalarSizeInBits(); 1947 Constant *ShAmtC; 1948 if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC))) { 1949 // Canonicalize a shift amount constant operand to modulo the bit-width. 1950 Constant *WidthC = ConstantInt::get(Ty, BitWidth); 1951 Constant *ModuloC = 1952 ConstantFoldBinaryOpOperands(Instruction::URem, ShAmtC, WidthC, DL); 1953 if (!ModuloC) 1954 return nullptr; 1955 if (ModuloC != ShAmtC) 1956 return replaceOperand(*II, 2, ModuloC); 1957 1958 assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) == 1959 ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) && 1960 "Shift amount expected to be modulo bitwidth"); 1961 1962 // Canonicalize funnel shift right by constant to funnel shift left. This 1963 // is not entirely arbitrary. For historical reasons, the backend may 1964 // recognize rotate left patterns but miss rotate right patterns. 1965 if (IID == Intrinsic::fshr) { 1966 // fshr X, Y, C --> fshl X, Y, (BitWidth - C) 1967 Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC); 1968 Module *Mod = II->getModule(); 1969 Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty); 1970 return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC }); 1971 } 1972 assert(IID == Intrinsic::fshl && 1973 "All funnel shifts by simple constants should go left"); 1974 1975 // fshl(X, 0, C) --> shl X, C 1976 // fshl(X, undef, C) --> shl X, C 1977 if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef())) 1978 return BinaryOperator::CreateShl(Op0, ShAmtC); 1979 1980 // fshl(0, X, C) --> lshr X, (BW-C) 1981 // fshl(undef, X, C) --> lshr X, (BW-C) 1982 if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef())) 1983 return BinaryOperator::CreateLShr(Op1, 1984 ConstantExpr::getSub(WidthC, ShAmtC)); 1985 1986 // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form) 1987 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) { 1988 Module *Mod = II->getModule(); 1989 Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty); 1990 return CallInst::Create(Bswap, { Op0 }); 1991 } 1992 if (Instruction *BitOp = 1993 matchBSwapOrBitReverse(*II, /*MatchBSwaps*/ true, 1994 /*MatchBitReversals*/ true)) 1995 return BitOp; 1996 } 1997 1998 // Left or right might be masked. 1999 if (SimplifyDemandedInstructionBits(*II)) 2000 return &CI; 2001 2002 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth, 2003 // so only the low bits of the shift amount are demanded if the bitwidth is 2004 // a power-of-2. 2005 if (!isPowerOf2_32(BitWidth)) 2006 break; 2007 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth)); 2008 KnownBits Op2Known(BitWidth); 2009 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known)) 2010 return &CI; 2011 break; 2012 } 2013 case Intrinsic::ptrmask: { 2014 unsigned BitWidth = DL.getPointerTypeSizeInBits(II->getType()); 2015 KnownBits Known(BitWidth); 2016 if (SimplifyDemandedInstructionBits(*II, Known)) 2017 return II; 2018 2019 Value *InnerPtr, *InnerMask; 2020 bool Changed = false; 2021 // Combine: 2022 // (ptrmask (ptrmask p, A), B) 2023 // -> (ptrmask p, (and A, B)) 2024 if (match(II->getArgOperand(0), 2025 m_OneUse(m_Intrinsic<Intrinsic::ptrmask>(m_Value(InnerPtr), 2026 m_Value(InnerMask))))) { 2027 assert(II->getArgOperand(1)->getType() == InnerMask->getType() && 2028 "Mask types must match"); 2029 // TODO: If InnerMask == Op1, we could copy attributes from inner 2030 // callsite -> outer callsite. 2031 Value *NewMask = Builder.CreateAnd(II->getArgOperand(1), InnerMask); 2032 replaceOperand(CI, 0, InnerPtr); 2033 replaceOperand(CI, 1, NewMask); 2034 Changed = true; 2035 } 2036 2037 // See if we can deduce non-null. 2038 if (!CI.hasRetAttr(Attribute::NonNull) && 2039 (Known.isNonZero() || 2040 isKnownNonZero(II, DL, /*Depth*/ 0, &AC, II, &DT))) { 2041 CI.addRetAttr(Attribute::NonNull); 2042 Changed = true; 2043 } 2044 2045 unsigned NewAlignmentLog = 2046 std::min(Value::MaxAlignmentExponent, 2047 std::min(BitWidth - 1, Known.countMinTrailingZeros())); 2048 // Known bits will capture if we had alignment information associated with 2049 // the pointer argument. 2050 if (NewAlignmentLog > Log2(CI.getRetAlign().valueOrOne())) { 2051 CI.addRetAttr(Attribute::getWithAlignment( 2052 CI.getContext(), Align(uint64_t(1) << NewAlignmentLog))); 2053 Changed = true; 2054 } 2055 if (Changed) 2056 return &CI; 2057 break; 2058 } 2059 case Intrinsic::uadd_with_overflow: 2060 case Intrinsic::sadd_with_overflow: { 2061 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 2062 return I; 2063 2064 // Given 2 constant operands whose sum does not overflow: 2065 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1 2066 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1 2067 Value *X; 2068 const APInt *C0, *C1; 2069 Value *Arg0 = II->getArgOperand(0); 2070 Value *Arg1 = II->getArgOperand(1); 2071 bool IsSigned = IID == Intrinsic::sadd_with_overflow; 2072 bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0))) 2073 : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0))); 2074 if (HasNWAdd && match(Arg1, m_APInt(C1))) { 2075 bool Overflow; 2076 APInt NewC = 2077 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow); 2078 if (!Overflow) 2079 return replaceInstUsesWith( 2080 *II, Builder.CreateBinaryIntrinsic( 2081 IID, X, ConstantInt::get(Arg1->getType(), NewC))); 2082 } 2083 break; 2084 } 2085 2086 case Intrinsic::umul_with_overflow: 2087 case Intrinsic::smul_with_overflow: 2088 case Intrinsic::usub_with_overflow: 2089 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 2090 return I; 2091 break; 2092 2093 case Intrinsic::ssub_with_overflow: { 2094 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 2095 return I; 2096 2097 Constant *C; 2098 Value *Arg0 = II->getArgOperand(0); 2099 Value *Arg1 = II->getArgOperand(1); 2100 // Given a constant C that is not the minimum signed value 2101 // for an integer of a given bit width: 2102 // 2103 // ssubo X, C -> saddo X, -C 2104 if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) { 2105 Value *NegVal = ConstantExpr::getNeg(C); 2106 // Build a saddo call that is equivalent to the discovered 2107 // ssubo call. 2108 return replaceInstUsesWith( 2109 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow, 2110 Arg0, NegVal)); 2111 } 2112 2113 break; 2114 } 2115 2116 case Intrinsic::uadd_sat: 2117 case Intrinsic::sadd_sat: 2118 case Intrinsic::usub_sat: 2119 case Intrinsic::ssub_sat: { 2120 SaturatingInst *SI = cast<SaturatingInst>(II); 2121 Type *Ty = SI->getType(); 2122 Value *Arg0 = SI->getLHS(); 2123 Value *Arg1 = SI->getRHS(); 2124 2125 // Make use of known overflow information. 2126 OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(), 2127 Arg0, Arg1, SI); 2128 switch (OR) { 2129 case OverflowResult::MayOverflow: 2130 break; 2131 case OverflowResult::NeverOverflows: 2132 if (SI->isSigned()) 2133 return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1); 2134 else 2135 return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1); 2136 case OverflowResult::AlwaysOverflowsLow: { 2137 unsigned BitWidth = Ty->getScalarSizeInBits(); 2138 APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned()); 2139 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min)); 2140 } 2141 case OverflowResult::AlwaysOverflowsHigh: { 2142 unsigned BitWidth = Ty->getScalarSizeInBits(); 2143 APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned()); 2144 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max)); 2145 } 2146 } 2147 2148 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN 2149 Constant *C; 2150 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) && 2151 C->isNotMinSignedValue()) { 2152 Value *NegVal = ConstantExpr::getNeg(C); 2153 return replaceInstUsesWith( 2154 *II, Builder.CreateBinaryIntrinsic( 2155 Intrinsic::sadd_sat, Arg0, NegVal)); 2156 } 2157 2158 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2)) 2159 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2)) 2160 // if Val and Val2 have the same sign 2161 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) { 2162 Value *X; 2163 const APInt *Val, *Val2; 2164 APInt NewVal; 2165 bool IsUnsigned = 2166 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat; 2167 if (Other->getIntrinsicID() == IID && 2168 match(Arg1, m_APInt(Val)) && 2169 match(Other->getArgOperand(0), m_Value(X)) && 2170 match(Other->getArgOperand(1), m_APInt(Val2))) { 2171 if (IsUnsigned) 2172 NewVal = Val->uadd_sat(*Val2); 2173 else if (Val->isNonNegative() == Val2->isNonNegative()) { 2174 bool Overflow; 2175 NewVal = Val->sadd_ov(*Val2, Overflow); 2176 if (Overflow) { 2177 // Both adds together may add more than SignedMaxValue 2178 // without saturating the final result. 2179 break; 2180 } 2181 } else { 2182 // Cannot fold saturated addition with different signs. 2183 break; 2184 } 2185 2186 return replaceInstUsesWith( 2187 *II, Builder.CreateBinaryIntrinsic( 2188 IID, X, ConstantInt::get(II->getType(), NewVal))); 2189 } 2190 } 2191 break; 2192 } 2193 2194 case Intrinsic::minnum: 2195 case Intrinsic::maxnum: 2196 case Intrinsic::minimum: 2197 case Intrinsic::maximum: { 2198 Value *Arg0 = II->getArgOperand(0); 2199 Value *Arg1 = II->getArgOperand(1); 2200 Value *X, *Y; 2201 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) && 2202 (Arg0->hasOneUse() || Arg1->hasOneUse())) { 2203 // If both operands are negated, invert the call and negate the result: 2204 // min(-X, -Y) --> -(max(X, Y)) 2205 // max(-X, -Y) --> -(min(X, Y)) 2206 Intrinsic::ID NewIID; 2207 switch (IID) { 2208 case Intrinsic::maxnum: 2209 NewIID = Intrinsic::minnum; 2210 break; 2211 case Intrinsic::minnum: 2212 NewIID = Intrinsic::maxnum; 2213 break; 2214 case Intrinsic::maximum: 2215 NewIID = Intrinsic::minimum; 2216 break; 2217 case Intrinsic::minimum: 2218 NewIID = Intrinsic::maximum; 2219 break; 2220 default: 2221 llvm_unreachable("unexpected intrinsic ID"); 2222 } 2223 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II); 2224 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall); 2225 FNeg->copyIRFlags(II); 2226 return FNeg; 2227 } 2228 2229 // m(m(X, C2), C1) -> m(X, C) 2230 const APFloat *C1, *C2; 2231 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) { 2232 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) && 2233 ((match(M->getArgOperand(0), m_Value(X)) && 2234 match(M->getArgOperand(1), m_APFloat(C2))) || 2235 (match(M->getArgOperand(1), m_Value(X)) && 2236 match(M->getArgOperand(0), m_APFloat(C2))))) { 2237 APFloat Res(0.0); 2238 switch (IID) { 2239 case Intrinsic::maxnum: 2240 Res = maxnum(*C1, *C2); 2241 break; 2242 case Intrinsic::minnum: 2243 Res = minnum(*C1, *C2); 2244 break; 2245 case Intrinsic::maximum: 2246 Res = maximum(*C1, *C2); 2247 break; 2248 case Intrinsic::minimum: 2249 Res = minimum(*C1, *C2); 2250 break; 2251 default: 2252 llvm_unreachable("unexpected intrinsic ID"); 2253 } 2254 Instruction *NewCall = Builder.CreateBinaryIntrinsic( 2255 IID, X, ConstantFP::get(Arg0->getType(), Res), II); 2256 // TODO: Conservatively intersecting FMF. If Res == C2, the transform 2257 // was a simplification (so Arg0 and its original flags could 2258 // propagate?) 2259 NewCall->andIRFlags(M); 2260 return replaceInstUsesWith(*II, NewCall); 2261 } 2262 } 2263 2264 // m((fpext X), (fpext Y)) -> fpext (m(X, Y)) 2265 if (match(Arg0, m_OneUse(m_FPExt(m_Value(X)))) && 2266 match(Arg1, m_OneUse(m_FPExt(m_Value(Y)))) && 2267 X->getType() == Y->getType()) { 2268 Value *NewCall = 2269 Builder.CreateBinaryIntrinsic(IID, X, Y, II, II->getName()); 2270 return new FPExtInst(NewCall, II->getType()); 2271 } 2272 2273 // max X, -X --> fabs X 2274 // min X, -X --> -(fabs X) 2275 // TODO: Remove one-use limitation? That is obviously better for max. 2276 // It would be an extra instruction for min (fnabs), but that is 2277 // still likely better for analysis and codegen. 2278 if ((match(Arg0, m_OneUse(m_FNeg(m_Value(X)))) && Arg1 == X) || 2279 (match(Arg1, m_OneUse(m_FNeg(m_Value(X)))) && Arg0 == X)) { 2280 Value *R = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II); 2281 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum) 2282 R = Builder.CreateFNegFMF(R, II); 2283 return replaceInstUsesWith(*II, R); 2284 } 2285 2286 break; 2287 } 2288 case Intrinsic::matrix_multiply: { 2289 // Optimize negation in matrix multiplication. 2290 2291 // -A * -B -> A * B 2292 Value *A, *B; 2293 if (match(II->getArgOperand(0), m_FNeg(m_Value(A))) && 2294 match(II->getArgOperand(1), m_FNeg(m_Value(B)))) { 2295 replaceOperand(*II, 0, A); 2296 replaceOperand(*II, 1, B); 2297 return II; 2298 } 2299 2300 Value *Op0 = II->getOperand(0); 2301 Value *Op1 = II->getOperand(1); 2302 Value *OpNotNeg, *NegatedOp; 2303 unsigned NegatedOpArg, OtherOpArg; 2304 if (match(Op0, m_FNeg(m_Value(OpNotNeg)))) { 2305 NegatedOp = Op0; 2306 NegatedOpArg = 0; 2307 OtherOpArg = 1; 2308 } else if (match(Op1, m_FNeg(m_Value(OpNotNeg)))) { 2309 NegatedOp = Op1; 2310 NegatedOpArg = 1; 2311 OtherOpArg = 0; 2312 } else 2313 // Multiplication doesn't have a negated operand. 2314 break; 2315 2316 // Only optimize if the negated operand has only one use. 2317 if (!NegatedOp->hasOneUse()) 2318 break; 2319 2320 Value *OtherOp = II->getOperand(OtherOpArg); 2321 VectorType *RetTy = cast<VectorType>(II->getType()); 2322 VectorType *NegatedOpTy = cast<VectorType>(NegatedOp->getType()); 2323 VectorType *OtherOpTy = cast<VectorType>(OtherOp->getType()); 2324 ElementCount NegatedCount = NegatedOpTy->getElementCount(); 2325 ElementCount OtherCount = OtherOpTy->getElementCount(); 2326 ElementCount RetCount = RetTy->getElementCount(); 2327 // (-A) * B -> A * (-B), if it is cheaper to negate B and vice versa. 2328 if (ElementCount::isKnownGT(NegatedCount, OtherCount) && 2329 ElementCount::isKnownLT(OtherCount, RetCount)) { 2330 Value *InverseOtherOp = Builder.CreateFNeg(OtherOp); 2331 replaceOperand(*II, NegatedOpArg, OpNotNeg); 2332 replaceOperand(*II, OtherOpArg, InverseOtherOp); 2333 return II; 2334 } 2335 // (-A) * B -> -(A * B), if it is cheaper to negate the result 2336 if (ElementCount::isKnownGT(NegatedCount, RetCount)) { 2337 SmallVector<Value *, 5> NewArgs(II->args()); 2338 NewArgs[NegatedOpArg] = OpNotNeg; 2339 Instruction *NewMul = 2340 Builder.CreateIntrinsic(II->getType(), IID, NewArgs, II); 2341 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(NewMul, II)); 2342 } 2343 break; 2344 } 2345 case Intrinsic::fmuladd: { 2346 // Canonicalize fast fmuladd to the separate fmul + fadd. 2347 if (II->isFast()) { 2348 BuilderTy::FastMathFlagGuard Guard(Builder); 2349 Builder.setFastMathFlags(II->getFastMathFlags()); 2350 Value *Mul = Builder.CreateFMul(II->getArgOperand(0), 2351 II->getArgOperand(1)); 2352 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2)); 2353 Add->takeName(II); 2354 return replaceInstUsesWith(*II, Add); 2355 } 2356 2357 // Try to simplify the underlying FMul. 2358 if (Value *V = simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1), 2359 II->getFastMathFlags(), 2360 SQ.getWithInstruction(II))) { 2361 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 2362 FAdd->copyFastMathFlags(II); 2363 return FAdd; 2364 } 2365 2366 [[fallthrough]]; 2367 } 2368 case Intrinsic::fma: { 2369 // fma fneg(x), fneg(y), z -> fma x, y, z 2370 Value *Src0 = II->getArgOperand(0); 2371 Value *Src1 = II->getArgOperand(1); 2372 Value *X, *Y; 2373 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) { 2374 replaceOperand(*II, 0, X); 2375 replaceOperand(*II, 1, Y); 2376 return II; 2377 } 2378 2379 // fma fabs(x), fabs(x), z -> fma x, x, z 2380 if (match(Src0, m_FAbs(m_Value(X))) && 2381 match(Src1, m_FAbs(m_Specific(X)))) { 2382 replaceOperand(*II, 0, X); 2383 replaceOperand(*II, 1, X); 2384 return II; 2385 } 2386 2387 // Try to simplify the underlying FMul. We can only apply simplifications 2388 // that do not require rounding. 2389 if (Value *V = simplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1), 2390 II->getFastMathFlags(), 2391 SQ.getWithInstruction(II))) { 2392 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 2393 FAdd->copyFastMathFlags(II); 2394 return FAdd; 2395 } 2396 2397 // fma x, y, 0 -> fmul x, y 2398 // This is always valid for -0.0, but requires nsz for +0.0 as 2399 // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own. 2400 if (match(II->getArgOperand(2), m_NegZeroFP()) || 2401 (match(II->getArgOperand(2), m_PosZeroFP()) && 2402 II->getFastMathFlags().noSignedZeros())) 2403 return BinaryOperator::CreateFMulFMF(Src0, Src1, II); 2404 2405 break; 2406 } 2407 case Intrinsic::copysign: { 2408 Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1); 2409 if (SignBitMustBeZero(Sign, DL, &TLI)) { 2410 // If we know that the sign argument is positive, reduce to FABS: 2411 // copysign Mag, +Sign --> fabs Mag 2412 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 2413 return replaceInstUsesWith(*II, Fabs); 2414 } 2415 // TODO: There should be a ValueTracking sibling like SignBitMustBeOne. 2416 const APFloat *C; 2417 if (match(Sign, m_APFloat(C)) && C->isNegative()) { 2418 // If we know that the sign argument is negative, reduce to FNABS: 2419 // copysign Mag, -Sign --> fneg (fabs Mag) 2420 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 2421 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II)); 2422 } 2423 2424 // Propagate sign argument through nested calls: 2425 // copysign Mag, (copysign ?, X) --> copysign Mag, X 2426 Value *X; 2427 if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X)))) 2428 return replaceOperand(*II, 1, X); 2429 2430 // Peek through changes of magnitude's sign-bit. This call rewrites those: 2431 // copysign (fabs X), Sign --> copysign X, Sign 2432 // copysign (fneg X), Sign --> copysign X, Sign 2433 if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X)))) 2434 return replaceOperand(*II, 0, X); 2435 2436 break; 2437 } 2438 case Intrinsic::fabs: { 2439 Value *Cond, *TVal, *FVal; 2440 if (match(II->getArgOperand(0), 2441 m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) { 2442 // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF 2443 if (isa<Constant>(TVal) && isa<Constant>(FVal)) { 2444 CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal}); 2445 CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal}); 2446 return SelectInst::Create(Cond, AbsT, AbsF); 2447 } 2448 // fabs (select Cond, -FVal, FVal) --> fabs FVal 2449 if (match(TVal, m_FNeg(m_Specific(FVal)))) 2450 return replaceOperand(*II, 0, FVal); 2451 // fabs (select Cond, TVal, -TVal) --> fabs TVal 2452 if (match(FVal, m_FNeg(m_Specific(TVal)))) 2453 return replaceOperand(*II, 0, TVal); 2454 } 2455 2456 Value *Magnitude, *Sign; 2457 if (match(II->getArgOperand(0), 2458 m_CopySign(m_Value(Magnitude), m_Value(Sign)))) { 2459 // fabs (copysign x, y) -> (fabs x) 2460 CallInst *AbsSign = 2461 Builder.CreateCall(II->getCalledFunction(), {Magnitude}); 2462 AbsSign->copyFastMathFlags(II); 2463 return replaceInstUsesWith(*II, AbsSign); 2464 } 2465 2466 [[fallthrough]]; 2467 } 2468 case Intrinsic::ceil: 2469 case Intrinsic::floor: 2470 case Intrinsic::round: 2471 case Intrinsic::roundeven: 2472 case Intrinsic::nearbyint: 2473 case Intrinsic::rint: 2474 case Intrinsic::trunc: { 2475 Value *ExtSrc; 2476 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) { 2477 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x) 2478 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II); 2479 return new FPExtInst(NarrowII, II->getType()); 2480 } 2481 break; 2482 } 2483 case Intrinsic::cos: 2484 case Intrinsic::amdgcn_cos: { 2485 Value *X; 2486 Value *Src = II->getArgOperand(0); 2487 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) { 2488 // cos(-x) -> cos(x) 2489 // cos(fabs(x)) -> cos(x) 2490 return replaceOperand(*II, 0, X); 2491 } 2492 break; 2493 } 2494 case Intrinsic::sin: { 2495 Value *X; 2496 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) { 2497 // sin(-x) --> -sin(x) 2498 Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II); 2499 Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin); 2500 FNeg->copyFastMathFlags(II); 2501 return FNeg; 2502 } 2503 break; 2504 } 2505 case Intrinsic::ldexp: { 2506 // ldexp(ldexp(x, a), b) -> ldexp(x, a + b) 2507 // 2508 // The danger is if the first ldexp would overflow to infinity or underflow 2509 // to zero, but the combined exponent avoids it. We ignore this with 2510 // reassoc. 2511 // 2512 // It's also safe to fold if we know both exponents are >= 0 or <= 0 since 2513 // it would just double down on the overflow/underflow which would occur 2514 // anyway. 2515 // 2516 // TODO: Could do better if we had range tracking for the input value 2517 // exponent. Also could broaden sign check to cover == 0 case. 2518 Value *Src = II->getArgOperand(0); 2519 Value *Exp = II->getArgOperand(1); 2520 Value *InnerSrc; 2521 Value *InnerExp; 2522 if (match(Src, m_OneUse(m_Intrinsic<Intrinsic::ldexp>( 2523 m_Value(InnerSrc), m_Value(InnerExp)))) && 2524 Exp->getType() == InnerExp->getType()) { 2525 FastMathFlags FMF = II->getFastMathFlags(); 2526 FastMathFlags InnerFlags = cast<FPMathOperator>(Src)->getFastMathFlags(); 2527 2528 if ((FMF.allowReassoc() && InnerFlags.allowReassoc()) || 2529 signBitMustBeTheSame(Exp, InnerExp, II, DL, &AC, &DT)) { 2530 // TODO: Add nsw/nuw probably safe if integer type exceeds exponent 2531 // width. 2532 Value *NewExp = Builder.CreateAdd(InnerExp, Exp); 2533 II->setArgOperand(1, NewExp); 2534 II->setFastMathFlags(InnerFlags); // Or the inner flags. 2535 return replaceOperand(*II, 0, InnerSrc); 2536 } 2537 } 2538 2539 break; 2540 } 2541 case Intrinsic::ptrauth_auth: 2542 case Intrinsic::ptrauth_resign: { 2543 // (sign|resign) + (auth|resign) can be folded by omitting the middle 2544 // sign+auth component if the key and discriminator match. 2545 bool NeedSign = II->getIntrinsicID() == Intrinsic::ptrauth_resign; 2546 Value *Key = II->getArgOperand(1); 2547 Value *Disc = II->getArgOperand(2); 2548 2549 // AuthKey will be the key we need to end up authenticating against in 2550 // whatever we replace this sequence with. 2551 Value *AuthKey = nullptr, *AuthDisc = nullptr, *BasePtr; 2552 if (auto CI = dyn_cast<CallBase>(II->getArgOperand(0))) { 2553 BasePtr = CI->getArgOperand(0); 2554 if (CI->getIntrinsicID() == Intrinsic::ptrauth_sign) { 2555 if (CI->getArgOperand(1) != Key || CI->getArgOperand(2) != Disc) 2556 break; 2557 } else if (CI->getIntrinsicID() == Intrinsic::ptrauth_resign) { 2558 if (CI->getArgOperand(3) != Key || CI->getArgOperand(4) != Disc) 2559 break; 2560 AuthKey = CI->getArgOperand(1); 2561 AuthDisc = CI->getArgOperand(2); 2562 } else 2563 break; 2564 } else 2565 break; 2566 2567 unsigned NewIntrin; 2568 if (AuthKey && NeedSign) { 2569 // resign(0,1) + resign(1,2) = resign(0, 2) 2570 NewIntrin = Intrinsic::ptrauth_resign; 2571 } else if (AuthKey) { 2572 // resign(0,1) + auth(1) = auth(0) 2573 NewIntrin = Intrinsic::ptrauth_auth; 2574 } else if (NeedSign) { 2575 // sign(0) + resign(0, 1) = sign(1) 2576 NewIntrin = Intrinsic::ptrauth_sign; 2577 } else { 2578 // sign(0) + auth(0) = nop 2579 replaceInstUsesWith(*II, BasePtr); 2580 eraseInstFromFunction(*II); 2581 return nullptr; 2582 } 2583 2584 SmallVector<Value *, 4> CallArgs; 2585 CallArgs.push_back(BasePtr); 2586 if (AuthKey) { 2587 CallArgs.push_back(AuthKey); 2588 CallArgs.push_back(AuthDisc); 2589 } 2590 2591 if (NeedSign) { 2592 CallArgs.push_back(II->getArgOperand(3)); 2593 CallArgs.push_back(II->getArgOperand(4)); 2594 } 2595 2596 Function *NewFn = Intrinsic::getDeclaration(II->getModule(), NewIntrin); 2597 return CallInst::Create(NewFn, CallArgs); 2598 } 2599 case Intrinsic::arm_neon_vtbl1: 2600 case Intrinsic::aarch64_neon_tbl1: 2601 if (Value *V = simplifyNeonTbl1(*II, Builder)) 2602 return replaceInstUsesWith(*II, V); 2603 break; 2604 2605 case Intrinsic::arm_neon_vmulls: 2606 case Intrinsic::arm_neon_vmullu: 2607 case Intrinsic::aarch64_neon_smull: 2608 case Intrinsic::aarch64_neon_umull: { 2609 Value *Arg0 = II->getArgOperand(0); 2610 Value *Arg1 = II->getArgOperand(1); 2611 2612 // Handle mul by zero first: 2613 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) { 2614 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType())); 2615 } 2616 2617 // Check for constant LHS & RHS - in this case we just simplify. 2618 bool Zext = (IID == Intrinsic::arm_neon_vmullu || 2619 IID == Intrinsic::aarch64_neon_umull); 2620 VectorType *NewVT = cast<VectorType>(II->getType()); 2621 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) { 2622 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) { 2623 Value *V0 = Builder.CreateIntCast(CV0, NewVT, /*isSigned=*/!Zext); 2624 Value *V1 = Builder.CreateIntCast(CV1, NewVT, /*isSigned=*/!Zext); 2625 return replaceInstUsesWith(CI, Builder.CreateMul(V0, V1)); 2626 } 2627 2628 // Couldn't simplify - canonicalize constant to the RHS. 2629 std::swap(Arg0, Arg1); 2630 } 2631 2632 // Handle mul by one: 2633 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) 2634 if (ConstantInt *Splat = 2635 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) 2636 if (Splat->isOne()) 2637 return CastInst::CreateIntegerCast(Arg0, II->getType(), 2638 /*isSigned=*/!Zext); 2639 2640 break; 2641 } 2642 case Intrinsic::arm_neon_aesd: 2643 case Intrinsic::arm_neon_aese: 2644 case Intrinsic::aarch64_crypto_aesd: 2645 case Intrinsic::aarch64_crypto_aese: { 2646 Value *DataArg = II->getArgOperand(0); 2647 Value *KeyArg = II->getArgOperand(1); 2648 2649 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR 2650 Value *Data, *Key; 2651 if (match(KeyArg, m_ZeroInt()) && 2652 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) { 2653 replaceOperand(*II, 0, Data); 2654 replaceOperand(*II, 1, Key); 2655 return II; 2656 } 2657 break; 2658 } 2659 case Intrinsic::hexagon_V6_vandvrt: 2660 case Intrinsic::hexagon_V6_vandvrt_128B: { 2661 // Simplify Q -> V -> Q conversion. 2662 if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 2663 Intrinsic::ID ID0 = Op0->getIntrinsicID(); 2664 if (ID0 != Intrinsic::hexagon_V6_vandqrt && 2665 ID0 != Intrinsic::hexagon_V6_vandqrt_128B) 2666 break; 2667 Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1); 2668 uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue(); 2669 uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue(); 2670 // Check if every byte has common bits in Bytes and Mask. 2671 uint64_t C = Bytes1 & Mask1; 2672 if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000)) 2673 return replaceInstUsesWith(*II, Op0->getArgOperand(0)); 2674 } 2675 break; 2676 } 2677 case Intrinsic::stackrestore: { 2678 enum class ClassifyResult { 2679 None, 2680 Alloca, 2681 StackRestore, 2682 CallWithSideEffects, 2683 }; 2684 auto Classify = [](const Instruction *I) { 2685 if (isa<AllocaInst>(I)) 2686 return ClassifyResult::Alloca; 2687 2688 if (auto *CI = dyn_cast<CallInst>(I)) { 2689 if (auto *II = dyn_cast<IntrinsicInst>(CI)) { 2690 if (II->getIntrinsicID() == Intrinsic::stackrestore) 2691 return ClassifyResult::StackRestore; 2692 2693 if (II->mayHaveSideEffects()) 2694 return ClassifyResult::CallWithSideEffects; 2695 } else { 2696 // Consider all non-intrinsic calls to be side effects 2697 return ClassifyResult::CallWithSideEffects; 2698 } 2699 } 2700 2701 return ClassifyResult::None; 2702 }; 2703 2704 // If the stacksave and the stackrestore are in the same BB, and there is 2705 // no intervening call, alloca, or stackrestore of a different stacksave, 2706 // remove the restore. This can happen when variable allocas are DCE'd. 2707 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 2708 if (SS->getIntrinsicID() == Intrinsic::stacksave && 2709 SS->getParent() == II->getParent()) { 2710 BasicBlock::iterator BI(SS); 2711 bool CannotRemove = false; 2712 for (++BI; &*BI != II; ++BI) { 2713 switch (Classify(&*BI)) { 2714 case ClassifyResult::None: 2715 // So far so good, look at next instructions. 2716 break; 2717 2718 case ClassifyResult::StackRestore: 2719 // If we found an intervening stackrestore for a different 2720 // stacksave, we can't remove the stackrestore. Otherwise, continue. 2721 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS) 2722 CannotRemove = true; 2723 break; 2724 2725 case ClassifyResult::Alloca: 2726 case ClassifyResult::CallWithSideEffects: 2727 // If we found an alloca, a non-intrinsic call, or an intrinsic 2728 // call with side effects, we can't remove the stackrestore. 2729 CannotRemove = true; 2730 break; 2731 } 2732 if (CannotRemove) 2733 break; 2734 } 2735 2736 if (!CannotRemove) 2737 return eraseInstFromFunction(CI); 2738 } 2739 } 2740 2741 // Scan down this block to see if there is another stack restore in the 2742 // same block without an intervening call/alloca. 2743 BasicBlock::iterator BI(II); 2744 Instruction *TI = II->getParent()->getTerminator(); 2745 bool CannotRemove = false; 2746 for (++BI; &*BI != TI; ++BI) { 2747 switch (Classify(&*BI)) { 2748 case ClassifyResult::None: 2749 // So far so good, look at next instructions. 2750 break; 2751 2752 case ClassifyResult::StackRestore: 2753 // If there is a stackrestore below this one, remove this one. 2754 return eraseInstFromFunction(CI); 2755 2756 case ClassifyResult::Alloca: 2757 case ClassifyResult::CallWithSideEffects: 2758 // If we found an alloca, a non-intrinsic call, or an intrinsic call 2759 // with side effects (such as llvm.stacksave and llvm.read_register), 2760 // we can't remove the stack restore. 2761 CannotRemove = true; 2762 break; 2763 } 2764 if (CannotRemove) 2765 break; 2766 } 2767 2768 // If the stack restore is in a return, resume, or unwind block and if there 2769 // are no allocas or calls between the restore and the return, nuke the 2770 // restore. 2771 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI))) 2772 return eraseInstFromFunction(CI); 2773 break; 2774 } 2775 case Intrinsic::lifetime_end: 2776 // Asan needs to poison memory to detect invalid access which is possible 2777 // even for empty lifetime range. 2778 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 2779 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) || 2780 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 2781 break; 2782 2783 if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) { 2784 return I.getIntrinsicID() == Intrinsic::lifetime_start; 2785 })) 2786 return nullptr; 2787 break; 2788 case Intrinsic::assume: { 2789 Value *IIOperand = II->getArgOperand(0); 2790 SmallVector<OperandBundleDef, 4> OpBundles; 2791 II->getOperandBundlesAsDefs(OpBundles); 2792 2793 /// This will remove the boolean Condition from the assume given as 2794 /// argument and remove the assume if it becomes useless. 2795 /// always returns nullptr for use as a return values. 2796 auto RemoveConditionFromAssume = [&](Instruction *Assume) -> Instruction * { 2797 assert(isa<AssumeInst>(Assume)); 2798 if (isAssumeWithEmptyBundle(*cast<AssumeInst>(II))) 2799 return eraseInstFromFunction(CI); 2800 replaceUse(II->getOperandUse(0), ConstantInt::getTrue(II->getContext())); 2801 return nullptr; 2802 }; 2803 // Remove an assume if it is followed by an identical assume. 2804 // TODO: Do we need this? Unless there are conflicting assumptions, the 2805 // computeKnownBits(IIOperand) below here eliminates redundant assumes. 2806 Instruction *Next = II->getNextNonDebugInstruction(); 2807 if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand)))) 2808 return RemoveConditionFromAssume(Next); 2809 2810 // Canonicalize assume(a && b) -> assume(a); assume(b); 2811 // Note: New assumption intrinsics created here are registered by 2812 // the InstCombineIRInserter object. 2813 FunctionType *AssumeIntrinsicTy = II->getFunctionType(); 2814 Value *AssumeIntrinsic = II->getCalledOperand(); 2815 Value *A, *B; 2816 if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) { 2817 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles, 2818 II->getName()); 2819 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName()); 2820 return eraseInstFromFunction(*II); 2821 } 2822 // assume(!(a || b)) -> assume(!a); assume(!b); 2823 if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) { 2824 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 2825 Builder.CreateNot(A), OpBundles, II->getName()); 2826 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 2827 Builder.CreateNot(B), II->getName()); 2828 return eraseInstFromFunction(*II); 2829 } 2830 2831 // assume( (load addr) != null ) -> add 'nonnull' metadata to load 2832 // (if assume is valid at the load) 2833 CmpInst::Predicate Pred; 2834 Instruction *LHS; 2835 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) && 2836 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load && 2837 LHS->getType()->isPointerTy() && 2838 isValidAssumeForContext(II, LHS, &DT)) { 2839 MDNode *MD = MDNode::get(II->getContext(), std::nullopt); 2840 LHS->setMetadata(LLVMContext::MD_nonnull, MD); 2841 LHS->setMetadata(LLVMContext::MD_noundef, MD); 2842 return RemoveConditionFromAssume(II); 2843 2844 // TODO: apply nonnull return attributes to calls and invokes 2845 // TODO: apply range metadata for range check patterns? 2846 } 2847 2848 // Separate storage assumptions apply to the underlying allocations, not any 2849 // particular pointer within them. When evaluating the hints for AA purposes 2850 // we getUnderlyingObject them; by precomputing the answers here we can 2851 // avoid having to do so repeatedly there. 2852 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) { 2853 OperandBundleUse OBU = II->getOperandBundleAt(Idx); 2854 if (OBU.getTagName() == "separate_storage") { 2855 assert(OBU.Inputs.size() == 2); 2856 auto MaybeSimplifyHint = [&](const Use &U) { 2857 Value *Hint = U.get(); 2858 // Not having a limit is safe because InstCombine removes unreachable 2859 // code. 2860 Value *UnderlyingObject = getUnderlyingObject(Hint, /*MaxLookup*/ 0); 2861 if (Hint != UnderlyingObject) 2862 replaceUse(const_cast<Use &>(U), UnderlyingObject); 2863 }; 2864 MaybeSimplifyHint(OBU.Inputs[0]); 2865 MaybeSimplifyHint(OBU.Inputs[1]); 2866 } 2867 } 2868 2869 // Convert nonnull assume like: 2870 // %A = icmp ne i32* %PTR, null 2871 // call void @llvm.assume(i1 %A) 2872 // into 2873 // call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ] 2874 if (EnableKnowledgeRetention && 2875 match(IIOperand, m_Cmp(Pred, m_Value(A), m_Zero())) && 2876 Pred == CmpInst::ICMP_NE && A->getType()->isPointerTy()) { 2877 if (auto *Replacement = buildAssumeFromKnowledge( 2878 {RetainedKnowledge{Attribute::NonNull, 0, A}}, Next, &AC, &DT)) { 2879 2880 Replacement->insertBefore(Next); 2881 AC.registerAssumption(Replacement); 2882 return RemoveConditionFromAssume(II); 2883 } 2884 } 2885 2886 // Convert alignment assume like: 2887 // %B = ptrtoint i32* %A to i64 2888 // %C = and i64 %B, Constant 2889 // %D = icmp eq i64 %C, 0 2890 // call void @llvm.assume(i1 %D) 2891 // into 2892 // call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 Constant + 1)] 2893 uint64_t AlignMask; 2894 if (EnableKnowledgeRetention && 2895 match(IIOperand, 2896 m_Cmp(Pred, m_And(m_Value(A), m_ConstantInt(AlignMask)), 2897 m_Zero())) && 2898 Pred == CmpInst::ICMP_EQ) { 2899 if (isPowerOf2_64(AlignMask + 1)) { 2900 uint64_t Offset = 0; 2901 match(A, m_Add(m_Value(A), m_ConstantInt(Offset))); 2902 if (match(A, m_PtrToInt(m_Value(A)))) { 2903 /// Note: this doesn't preserve the offset information but merges 2904 /// offset and alignment. 2905 /// TODO: we can generate a GEP instead of merging the alignment with 2906 /// the offset. 2907 RetainedKnowledge RK{Attribute::Alignment, 2908 (unsigned)MinAlign(Offset, AlignMask + 1), A}; 2909 if (auto *Replacement = 2910 buildAssumeFromKnowledge(RK, Next, &AC, &DT)) { 2911 2912 Replacement->insertAfter(II); 2913 AC.registerAssumption(Replacement); 2914 } 2915 return RemoveConditionFromAssume(II); 2916 } 2917 } 2918 } 2919 2920 /// Canonicalize Knowledge in operand bundles. 2921 if (EnableKnowledgeRetention && II->hasOperandBundles()) { 2922 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) { 2923 auto &BOI = II->bundle_op_info_begin()[Idx]; 2924 RetainedKnowledge RK = 2925 llvm::getKnowledgeFromBundle(cast<AssumeInst>(*II), BOI); 2926 if (BOI.End - BOI.Begin > 2) 2927 continue; // Prevent reducing knowledge in an align with offset since 2928 // extracting a RetainedKnowledge from them looses offset 2929 // information 2930 RetainedKnowledge CanonRK = 2931 llvm::simplifyRetainedKnowledge(cast<AssumeInst>(II), RK, 2932 &getAssumptionCache(), 2933 &getDominatorTree()); 2934 if (CanonRK == RK) 2935 continue; 2936 if (!CanonRK) { 2937 if (BOI.End - BOI.Begin > 0) { 2938 Worklist.pushValue(II->op_begin()[BOI.Begin]); 2939 Value::dropDroppableUse(II->op_begin()[BOI.Begin]); 2940 } 2941 continue; 2942 } 2943 assert(RK.AttrKind == CanonRK.AttrKind); 2944 if (BOI.End - BOI.Begin > 0) 2945 II->op_begin()[BOI.Begin].set(CanonRK.WasOn); 2946 if (BOI.End - BOI.Begin > 1) 2947 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get( 2948 Type::getInt64Ty(II->getContext()), CanonRK.ArgValue)); 2949 if (RK.WasOn) 2950 Worklist.pushValue(RK.WasOn); 2951 return II; 2952 } 2953 } 2954 2955 // If there is a dominating assume with the same condition as this one, 2956 // then this one is redundant, and should be removed. 2957 KnownBits Known(1); 2958 computeKnownBits(IIOperand, Known, 0, II); 2959 if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) 2960 return eraseInstFromFunction(*II); 2961 2962 // assume(false) is unreachable. 2963 if (match(IIOperand, m_CombineOr(m_Zero(), m_Undef()))) { 2964 CreateNonTerminatorUnreachable(II); 2965 return eraseInstFromFunction(*II); 2966 } 2967 2968 // Update the cache of affected values for this assumption (we might be 2969 // here because we just simplified the condition). 2970 AC.updateAffectedValues(cast<AssumeInst>(II)); 2971 break; 2972 } 2973 case Intrinsic::experimental_guard: { 2974 // Is this guard followed by another guard? We scan forward over a small 2975 // fixed window of instructions to handle common cases with conditions 2976 // computed between guards. 2977 Instruction *NextInst = II->getNextNonDebugInstruction(); 2978 for (unsigned i = 0; i < GuardWideningWindow; i++) { 2979 // Note: Using context-free form to avoid compile time blow up 2980 if (!isSafeToSpeculativelyExecute(NextInst)) 2981 break; 2982 NextInst = NextInst->getNextNonDebugInstruction(); 2983 } 2984 Value *NextCond = nullptr; 2985 if (match(NextInst, 2986 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) { 2987 Value *CurrCond = II->getArgOperand(0); 2988 2989 // Remove a guard that it is immediately preceded by an identical guard. 2990 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b). 2991 if (CurrCond != NextCond) { 2992 Instruction *MoveI = II->getNextNonDebugInstruction(); 2993 while (MoveI != NextInst) { 2994 auto *Temp = MoveI; 2995 MoveI = MoveI->getNextNonDebugInstruction(); 2996 Temp->moveBefore(II); 2997 } 2998 replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond)); 2999 } 3000 eraseInstFromFunction(*NextInst); 3001 return II; 3002 } 3003 break; 3004 } 3005 case Intrinsic::vector_insert: { 3006 Value *Vec = II->getArgOperand(0); 3007 Value *SubVec = II->getArgOperand(1); 3008 Value *Idx = II->getArgOperand(2); 3009 auto *DstTy = dyn_cast<FixedVectorType>(II->getType()); 3010 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 3011 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType()); 3012 3013 // Only canonicalize if the destination vector, Vec, and SubVec are all 3014 // fixed vectors. 3015 if (DstTy && VecTy && SubVecTy) { 3016 unsigned DstNumElts = DstTy->getNumElements(); 3017 unsigned VecNumElts = VecTy->getNumElements(); 3018 unsigned SubVecNumElts = SubVecTy->getNumElements(); 3019 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 3020 3021 // An insert that entirely overwrites Vec with SubVec is a nop. 3022 if (VecNumElts == SubVecNumElts) 3023 return replaceInstUsesWith(CI, SubVec); 3024 3025 // Widen SubVec into a vector of the same width as Vec, since 3026 // shufflevector requires the two input vectors to be the same width. 3027 // Elements beyond the bounds of SubVec within the widened vector are 3028 // undefined. 3029 SmallVector<int, 8> WidenMask; 3030 unsigned i; 3031 for (i = 0; i != SubVecNumElts; ++i) 3032 WidenMask.push_back(i); 3033 for (; i != VecNumElts; ++i) 3034 WidenMask.push_back(PoisonMaskElem); 3035 3036 Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask); 3037 3038 SmallVector<int, 8> Mask; 3039 for (unsigned i = 0; i != IdxN; ++i) 3040 Mask.push_back(i); 3041 for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i) 3042 Mask.push_back(i); 3043 for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i) 3044 Mask.push_back(i); 3045 3046 Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask); 3047 return replaceInstUsesWith(CI, Shuffle); 3048 } 3049 break; 3050 } 3051 case Intrinsic::vector_extract: { 3052 Value *Vec = II->getArgOperand(0); 3053 Value *Idx = II->getArgOperand(1); 3054 3055 Type *ReturnType = II->getType(); 3056 // (extract_vector (insert_vector InsertTuple, InsertValue, InsertIdx), 3057 // ExtractIdx) 3058 unsigned ExtractIdx = cast<ConstantInt>(Idx)->getZExtValue(); 3059 Value *InsertTuple, *InsertIdx, *InsertValue; 3060 if (match(Vec, m_Intrinsic<Intrinsic::vector_insert>(m_Value(InsertTuple), 3061 m_Value(InsertValue), 3062 m_Value(InsertIdx))) && 3063 InsertValue->getType() == ReturnType) { 3064 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue(); 3065 // Case where we get the same index right after setting it. 3066 // extract.vector(insert.vector(InsertTuple, InsertValue, Idx), Idx) --> 3067 // InsertValue 3068 if (ExtractIdx == Index) 3069 return replaceInstUsesWith(CI, InsertValue); 3070 // If we are getting a different index than what was set in the 3071 // insert.vector intrinsic. We can just set the input tuple to the one up 3072 // in the chain. extract.vector(insert.vector(InsertTuple, InsertValue, 3073 // InsertIndex), ExtractIndex) 3074 // --> extract.vector(InsertTuple, ExtractIndex) 3075 else 3076 return replaceOperand(CI, 0, InsertTuple); 3077 } 3078 3079 auto *DstTy = dyn_cast<VectorType>(ReturnType); 3080 auto *VecTy = dyn_cast<VectorType>(Vec->getType()); 3081 3082 if (DstTy && VecTy) { 3083 auto DstEltCnt = DstTy->getElementCount(); 3084 auto VecEltCnt = VecTy->getElementCount(); 3085 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 3086 3087 // Extracting the entirety of Vec is a nop. 3088 if (DstEltCnt == VecTy->getElementCount()) { 3089 replaceInstUsesWith(CI, Vec); 3090 return eraseInstFromFunction(CI); 3091 } 3092 3093 // Only canonicalize to shufflevector if the destination vector and 3094 // Vec are fixed vectors. 3095 if (VecEltCnt.isScalable() || DstEltCnt.isScalable()) 3096 break; 3097 3098 SmallVector<int, 8> Mask; 3099 for (unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i) 3100 Mask.push_back(IdxN + i); 3101 3102 Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask); 3103 return replaceInstUsesWith(CI, Shuffle); 3104 } 3105 break; 3106 } 3107 case Intrinsic::experimental_vector_reverse: { 3108 Value *BO0, *BO1, *X, *Y; 3109 Value *Vec = II->getArgOperand(0); 3110 if (match(Vec, m_OneUse(m_BinOp(m_Value(BO0), m_Value(BO1))))) { 3111 auto *OldBinOp = cast<BinaryOperator>(Vec); 3112 if (match(BO0, m_VecReverse(m_Value(X)))) { 3113 // rev(binop rev(X), rev(Y)) --> binop X, Y 3114 if (match(BO1, m_VecReverse(m_Value(Y)))) 3115 return replaceInstUsesWith(CI, 3116 BinaryOperator::CreateWithCopiedFlags( 3117 OldBinOp->getOpcode(), X, Y, OldBinOp, 3118 OldBinOp->getName(), II)); 3119 // rev(binop rev(X), BO1Splat) --> binop X, BO1Splat 3120 if (isSplatValue(BO1)) 3121 return replaceInstUsesWith(CI, 3122 BinaryOperator::CreateWithCopiedFlags( 3123 OldBinOp->getOpcode(), X, BO1, 3124 OldBinOp, OldBinOp->getName(), II)); 3125 } 3126 // rev(binop BO0Splat, rev(Y)) --> binop BO0Splat, Y 3127 if (match(BO1, m_VecReverse(m_Value(Y))) && isSplatValue(BO0)) 3128 return replaceInstUsesWith(CI, BinaryOperator::CreateWithCopiedFlags( 3129 OldBinOp->getOpcode(), BO0, Y, 3130 OldBinOp, OldBinOp->getName(), II)); 3131 } 3132 // rev(unop rev(X)) --> unop X 3133 if (match(Vec, m_OneUse(m_UnOp(m_VecReverse(m_Value(X)))))) { 3134 auto *OldUnOp = cast<UnaryOperator>(Vec); 3135 auto *NewUnOp = UnaryOperator::CreateWithCopiedFlags( 3136 OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(), II); 3137 return replaceInstUsesWith(CI, NewUnOp); 3138 } 3139 break; 3140 } 3141 case Intrinsic::vector_reduce_or: 3142 case Intrinsic::vector_reduce_and: { 3143 // Canonicalize logical or/and reductions: 3144 // Or reduction for i1 is represented as: 3145 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 3146 // %res = cmp ne iReduxWidth %val, 0 3147 // And reduction for i1 is represented as: 3148 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 3149 // %res = cmp eq iReduxWidth %val, 11111 3150 Value *Arg = II->getArgOperand(0); 3151 Value *Vect; 3152 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3153 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3154 if (FTy->getElementType() == Builder.getInt1Ty()) { 3155 Value *Res = Builder.CreateBitCast( 3156 Vect, Builder.getIntNTy(FTy->getNumElements())); 3157 if (IID == Intrinsic::vector_reduce_and) { 3158 Res = Builder.CreateICmpEQ( 3159 Res, ConstantInt::getAllOnesValue(Res->getType())); 3160 } else { 3161 assert(IID == Intrinsic::vector_reduce_or && 3162 "Expected or reduction."); 3163 Res = Builder.CreateIsNotNull(Res); 3164 } 3165 if (Arg != Vect) 3166 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 3167 II->getType()); 3168 return replaceInstUsesWith(CI, Res); 3169 } 3170 } 3171 [[fallthrough]]; 3172 } 3173 case Intrinsic::vector_reduce_add: { 3174 if (IID == Intrinsic::vector_reduce_add) { 3175 // Convert vector_reduce_add(ZExt(<n x i1>)) to 3176 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)). 3177 // Convert vector_reduce_add(SExt(<n x i1>)) to 3178 // -ZExtOrTrunc(ctpop(bitcast <n x i1> to in)). 3179 // Convert vector_reduce_add(<n x i1>) to 3180 // Trunc(ctpop(bitcast <n x i1> to in)). 3181 Value *Arg = II->getArgOperand(0); 3182 Value *Vect; 3183 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3184 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3185 if (FTy->getElementType() == Builder.getInt1Ty()) { 3186 Value *V = Builder.CreateBitCast( 3187 Vect, Builder.getIntNTy(FTy->getNumElements())); 3188 Value *Res = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V); 3189 if (Res->getType() != II->getType()) 3190 Res = Builder.CreateZExtOrTrunc(Res, II->getType()); 3191 if (Arg != Vect && 3192 cast<Instruction>(Arg)->getOpcode() == Instruction::SExt) 3193 Res = Builder.CreateNeg(Res); 3194 return replaceInstUsesWith(CI, Res); 3195 } 3196 } 3197 } 3198 [[fallthrough]]; 3199 } 3200 case Intrinsic::vector_reduce_xor: { 3201 if (IID == Intrinsic::vector_reduce_xor) { 3202 // Exclusive disjunction reduction over the vector with 3203 // (potentially-extended) i1 element type is actually a 3204 // (potentially-extended) arithmetic `add` reduction over the original 3205 // non-extended value: 3206 // vector_reduce_xor(?ext(<n x i1>)) 3207 // --> 3208 // ?ext(vector_reduce_add(<n x i1>)) 3209 Value *Arg = II->getArgOperand(0); 3210 Value *Vect; 3211 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3212 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3213 if (FTy->getElementType() == Builder.getInt1Ty()) { 3214 Value *Res = Builder.CreateAddReduce(Vect); 3215 if (Arg != Vect) 3216 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 3217 II->getType()); 3218 return replaceInstUsesWith(CI, Res); 3219 } 3220 } 3221 } 3222 [[fallthrough]]; 3223 } 3224 case Intrinsic::vector_reduce_mul: { 3225 if (IID == Intrinsic::vector_reduce_mul) { 3226 // Multiplicative reduction over the vector with (potentially-extended) 3227 // i1 element type is actually a (potentially zero-extended) 3228 // logical `and` reduction over the original non-extended value: 3229 // vector_reduce_mul(?ext(<n x i1>)) 3230 // --> 3231 // zext(vector_reduce_and(<n x i1>)) 3232 Value *Arg = II->getArgOperand(0); 3233 Value *Vect; 3234 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3235 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3236 if (FTy->getElementType() == Builder.getInt1Ty()) { 3237 Value *Res = Builder.CreateAndReduce(Vect); 3238 if (Res->getType() != II->getType()) 3239 Res = Builder.CreateZExt(Res, II->getType()); 3240 return replaceInstUsesWith(CI, Res); 3241 } 3242 } 3243 } 3244 [[fallthrough]]; 3245 } 3246 case Intrinsic::vector_reduce_umin: 3247 case Intrinsic::vector_reduce_umax: { 3248 if (IID == Intrinsic::vector_reduce_umin || 3249 IID == Intrinsic::vector_reduce_umax) { 3250 // UMin/UMax reduction over the vector with (potentially-extended) 3251 // i1 element type is actually a (potentially-extended) 3252 // logical `and`/`or` reduction over the original non-extended value: 3253 // vector_reduce_u{min,max}(?ext(<n x i1>)) 3254 // --> 3255 // ?ext(vector_reduce_{and,or}(<n x i1>)) 3256 Value *Arg = II->getArgOperand(0); 3257 Value *Vect; 3258 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3259 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3260 if (FTy->getElementType() == Builder.getInt1Ty()) { 3261 Value *Res = IID == Intrinsic::vector_reduce_umin 3262 ? Builder.CreateAndReduce(Vect) 3263 : Builder.CreateOrReduce(Vect); 3264 if (Arg != Vect) 3265 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 3266 II->getType()); 3267 return replaceInstUsesWith(CI, Res); 3268 } 3269 } 3270 } 3271 [[fallthrough]]; 3272 } 3273 case Intrinsic::vector_reduce_smin: 3274 case Intrinsic::vector_reduce_smax: { 3275 if (IID == Intrinsic::vector_reduce_smin || 3276 IID == Intrinsic::vector_reduce_smax) { 3277 // SMin/SMax reduction over the vector with (potentially-extended) 3278 // i1 element type is actually a (potentially-extended) 3279 // logical `and`/`or` reduction over the original non-extended value: 3280 // vector_reduce_s{min,max}(<n x i1>) 3281 // --> 3282 // vector_reduce_{or,and}(<n x i1>) 3283 // and 3284 // vector_reduce_s{min,max}(sext(<n x i1>)) 3285 // --> 3286 // sext(vector_reduce_{or,and}(<n x i1>)) 3287 // and 3288 // vector_reduce_s{min,max}(zext(<n x i1>)) 3289 // --> 3290 // zext(vector_reduce_{and,or}(<n x i1>)) 3291 Value *Arg = II->getArgOperand(0); 3292 Value *Vect; 3293 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3294 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3295 if (FTy->getElementType() == Builder.getInt1Ty()) { 3296 Instruction::CastOps ExtOpc = Instruction::CastOps::CastOpsEnd; 3297 if (Arg != Vect) 3298 ExtOpc = cast<CastInst>(Arg)->getOpcode(); 3299 Value *Res = ((IID == Intrinsic::vector_reduce_smin) == 3300 (ExtOpc == Instruction::CastOps::ZExt)) 3301 ? Builder.CreateAndReduce(Vect) 3302 : Builder.CreateOrReduce(Vect); 3303 if (Arg != Vect) 3304 Res = Builder.CreateCast(ExtOpc, Res, II->getType()); 3305 return replaceInstUsesWith(CI, Res); 3306 } 3307 } 3308 } 3309 [[fallthrough]]; 3310 } 3311 case Intrinsic::vector_reduce_fmax: 3312 case Intrinsic::vector_reduce_fmin: 3313 case Intrinsic::vector_reduce_fadd: 3314 case Intrinsic::vector_reduce_fmul: { 3315 bool CanBeReassociated = (IID != Intrinsic::vector_reduce_fadd && 3316 IID != Intrinsic::vector_reduce_fmul) || 3317 II->hasAllowReassoc(); 3318 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd || 3319 IID == Intrinsic::vector_reduce_fmul) 3320 ? 1 3321 : 0; 3322 Value *Arg = II->getArgOperand(ArgIdx); 3323 Value *V; 3324 ArrayRef<int> Mask; 3325 if (!isa<FixedVectorType>(Arg->getType()) || !CanBeReassociated || 3326 !match(Arg, m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))) || 3327 !cast<ShuffleVectorInst>(Arg)->isSingleSource()) 3328 break; 3329 int Sz = Mask.size(); 3330 SmallBitVector UsedIndices(Sz); 3331 for (int Idx : Mask) { 3332 if (Idx == PoisonMaskElem || UsedIndices.test(Idx)) 3333 break; 3334 UsedIndices.set(Idx); 3335 } 3336 // Can remove shuffle iff just shuffled elements, no repeats, undefs, or 3337 // other changes. 3338 if (UsedIndices.all()) { 3339 replaceUse(II->getOperandUse(ArgIdx), V); 3340 return nullptr; 3341 } 3342 break; 3343 } 3344 case Intrinsic::is_fpclass: { 3345 if (Instruction *I = foldIntrinsicIsFPClass(*II)) 3346 return I; 3347 break; 3348 } 3349 default: { 3350 // Handle target specific intrinsics 3351 std::optional<Instruction *> V = targetInstCombineIntrinsic(*II); 3352 if (V) 3353 return *V; 3354 break; 3355 } 3356 } 3357 3358 // Try to fold intrinsic into select operands. This is legal if: 3359 // * The intrinsic is speculatable. 3360 // * The select condition is not a vector, or the intrinsic does not 3361 // perform cross-lane operations. 3362 switch (IID) { 3363 case Intrinsic::ctlz: 3364 case Intrinsic::cttz: 3365 case Intrinsic::ctpop: 3366 case Intrinsic::umin: 3367 case Intrinsic::umax: 3368 case Intrinsic::smin: 3369 case Intrinsic::smax: 3370 case Intrinsic::usub_sat: 3371 case Intrinsic::uadd_sat: 3372 case Intrinsic::ssub_sat: 3373 case Intrinsic::sadd_sat: 3374 for (Value *Op : II->args()) 3375 if (auto *Sel = dyn_cast<SelectInst>(Op)) 3376 if (Instruction *R = FoldOpIntoSelect(*II, Sel)) 3377 return R; 3378 [[fallthrough]]; 3379 default: 3380 break; 3381 } 3382 3383 if (Instruction *Shuf = foldShuffledIntrinsicOperands(II, Builder)) 3384 return Shuf; 3385 3386 // Some intrinsics (like experimental_gc_statepoint) can be used in invoke 3387 // context, so it is handled in visitCallBase and we should trigger it. 3388 return visitCallBase(*II); 3389 } 3390 3391 // Fence instruction simplification 3392 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) { 3393 auto *NFI = dyn_cast<FenceInst>(FI.getNextNonDebugInstruction()); 3394 // This check is solely here to handle arbitrary target-dependent syncscopes. 3395 // TODO: Can remove if does not matter in practice. 3396 if (NFI && FI.isIdenticalTo(NFI)) 3397 return eraseInstFromFunction(FI); 3398 3399 // Returns true if FI1 is identical or stronger fence than FI2. 3400 auto isIdenticalOrStrongerFence = [](FenceInst *FI1, FenceInst *FI2) { 3401 auto FI1SyncScope = FI1->getSyncScopeID(); 3402 // Consider same scope, where scope is global or single-thread. 3403 if (FI1SyncScope != FI2->getSyncScopeID() || 3404 (FI1SyncScope != SyncScope::System && 3405 FI1SyncScope != SyncScope::SingleThread)) 3406 return false; 3407 3408 return isAtLeastOrStrongerThan(FI1->getOrdering(), FI2->getOrdering()); 3409 }; 3410 if (NFI && isIdenticalOrStrongerFence(NFI, &FI)) 3411 return eraseInstFromFunction(FI); 3412 3413 if (auto *PFI = dyn_cast_or_null<FenceInst>(FI.getPrevNonDebugInstruction())) 3414 if (isIdenticalOrStrongerFence(PFI, &FI)) 3415 return eraseInstFromFunction(FI); 3416 return nullptr; 3417 } 3418 3419 // InvokeInst simplification 3420 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) { 3421 return visitCallBase(II); 3422 } 3423 3424 // CallBrInst simplification 3425 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) { 3426 return visitCallBase(CBI); 3427 } 3428 3429 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) { 3430 if (!CI->getCalledFunction()) return nullptr; 3431 3432 // Skip optimizing notail and musttail calls so 3433 // LibCallSimplifier::optimizeCall doesn't have to preserve those invariants. 3434 // LibCallSimplifier::optimizeCall should try to preseve tail calls though. 3435 if (CI->isMustTailCall() || CI->isNoTailCall()) 3436 return nullptr; 3437 3438 auto InstCombineRAUW = [this](Instruction *From, Value *With) { 3439 replaceInstUsesWith(*From, With); 3440 }; 3441 auto InstCombineErase = [this](Instruction *I) { 3442 eraseInstFromFunction(*I); 3443 }; 3444 LibCallSimplifier Simplifier(DL, &TLI, &AC, ORE, BFI, PSI, InstCombineRAUW, 3445 InstCombineErase); 3446 if (Value *With = Simplifier.optimizeCall(CI, Builder)) { 3447 ++NumSimplified; 3448 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With); 3449 } 3450 3451 return nullptr; 3452 } 3453 3454 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) { 3455 // Strip off at most one level of pointer casts, looking for an alloca. This 3456 // is good enough in practice and simpler than handling any number of casts. 3457 Value *Underlying = TrampMem->stripPointerCasts(); 3458 if (Underlying != TrampMem && 3459 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem)) 3460 return nullptr; 3461 if (!isa<AllocaInst>(Underlying)) 3462 return nullptr; 3463 3464 IntrinsicInst *InitTrampoline = nullptr; 3465 for (User *U : TrampMem->users()) { 3466 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3467 if (!II) 3468 return nullptr; 3469 if (II->getIntrinsicID() == Intrinsic::init_trampoline) { 3470 if (InitTrampoline) 3471 // More than one init_trampoline writes to this value. Give up. 3472 return nullptr; 3473 InitTrampoline = II; 3474 continue; 3475 } 3476 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline) 3477 // Allow any number of calls to adjust.trampoline. 3478 continue; 3479 return nullptr; 3480 } 3481 3482 // No call to init.trampoline found. 3483 if (!InitTrampoline) 3484 return nullptr; 3485 3486 // Check that the alloca is being used in the expected way. 3487 if (InitTrampoline->getOperand(0) != TrampMem) 3488 return nullptr; 3489 3490 return InitTrampoline; 3491 } 3492 3493 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, 3494 Value *TrampMem) { 3495 // Visit all the previous instructions in the basic block, and try to find a 3496 // init.trampoline which has a direct path to the adjust.trampoline. 3497 for (BasicBlock::iterator I = AdjustTramp->getIterator(), 3498 E = AdjustTramp->getParent()->begin(); 3499 I != E;) { 3500 Instruction *Inst = &*--I; 3501 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 3502 if (II->getIntrinsicID() == Intrinsic::init_trampoline && 3503 II->getOperand(0) == TrampMem) 3504 return II; 3505 if (Inst->mayWriteToMemory()) 3506 return nullptr; 3507 } 3508 return nullptr; 3509 } 3510 3511 // Given a call to llvm.adjust.trampoline, find and return the corresponding 3512 // call to llvm.init.trampoline if the call to the trampoline can be optimized 3513 // to a direct call to a function. Otherwise return NULL. 3514 static IntrinsicInst *findInitTrampoline(Value *Callee) { 3515 Callee = Callee->stripPointerCasts(); 3516 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee); 3517 if (!AdjustTramp || 3518 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline) 3519 return nullptr; 3520 3521 Value *TrampMem = AdjustTramp->getOperand(0); 3522 3523 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem)) 3524 return IT; 3525 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem)) 3526 return IT; 3527 return nullptr; 3528 } 3529 3530 bool InstCombinerImpl::annotateAnyAllocSite(CallBase &Call, 3531 const TargetLibraryInfo *TLI) { 3532 // Note: We only handle cases which can't be driven from generic attributes 3533 // here. So, for example, nonnull and noalias (which are common properties 3534 // of some allocation functions) are expected to be handled via annotation 3535 // of the respective allocator declaration with generic attributes. 3536 bool Changed = false; 3537 3538 if (!Call.getType()->isPointerTy()) 3539 return Changed; 3540 3541 std::optional<APInt> Size = getAllocSize(&Call, TLI); 3542 if (Size && *Size != 0) { 3543 // TODO: We really should just emit deref_or_null here and then 3544 // let the generic inference code combine that with nonnull. 3545 if (Call.hasRetAttr(Attribute::NonNull)) { 3546 Changed = !Call.hasRetAttr(Attribute::Dereferenceable); 3547 Call.addRetAttr(Attribute::getWithDereferenceableBytes( 3548 Call.getContext(), Size->getLimitedValue())); 3549 } else { 3550 Changed = !Call.hasRetAttr(Attribute::DereferenceableOrNull); 3551 Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes( 3552 Call.getContext(), Size->getLimitedValue())); 3553 } 3554 } 3555 3556 // Add alignment attribute if alignment is a power of two constant. 3557 Value *Alignment = getAllocAlignment(&Call, TLI); 3558 if (!Alignment) 3559 return Changed; 3560 3561 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment); 3562 if (AlignOpC && AlignOpC->getValue().ult(llvm::Value::MaximumAlignment)) { 3563 uint64_t AlignmentVal = AlignOpC->getZExtValue(); 3564 if (llvm::isPowerOf2_64(AlignmentVal)) { 3565 Align ExistingAlign = Call.getRetAlign().valueOrOne(); 3566 Align NewAlign = Align(AlignmentVal); 3567 if (NewAlign > ExistingAlign) { 3568 Call.addRetAttr( 3569 Attribute::getWithAlignment(Call.getContext(), NewAlign)); 3570 Changed = true; 3571 } 3572 } 3573 } 3574 return Changed; 3575 } 3576 3577 /// Improvements for call, callbr and invoke instructions. 3578 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) { 3579 bool Changed = annotateAnyAllocSite(Call, &TLI); 3580 3581 // Mark any parameters that are known to be non-null with the nonnull 3582 // attribute. This is helpful for inlining calls to functions with null 3583 // checks on their arguments. 3584 SmallVector<unsigned, 4> ArgNos; 3585 unsigned ArgNo = 0; 3586 3587 for (Value *V : Call.args()) { 3588 if (V->getType()->isPointerTy() && 3589 !Call.paramHasAttr(ArgNo, Attribute::NonNull) && 3590 isKnownNonZero(V, DL, 0, &AC, &Call, &DT)) 3591 ArgNos.push_back(ArgNo); 3592 ArgNo++; 3593 } 3594 3595 assert(ArgNo == Call.arg_size() && "Call arguments not processed correctly."); 3596 3597 if (!ArgNos.empty()) { 3598 AttributeList AS = Call.getAttributes(); 3599 LLVMContext &Ctx = Call.getContext(); 3600 AS = AS.addParamAttribute(Ctx, ArgNos, 3601 Attribute::get(Ctx, Attribute::NonNull)); 3602 Call.setAttributes(AS); 3603 Changed = true; 3604 } 3605 3606 // If the callee is a pointer to a function, attempt to move any casts to the 3607 // arguments of the call/callbr/invoke. 3608 Value *Callee = Call.getCalledOperand(); 3609 Function *CalleeF = dyn_cast<Function>(Callee); 3610 if ((!CalleeF || CalleeF->getFunctionType() != Call.getFunctionType()) && 3611 transformConstExprCastCall(Call)) 3612 return nullptr; 3613 3614 if (CalleeF) { 3615 // Remove the convergent attr on calls when the callee is not convergent. 3616 if (Call.isConvergent() && !CalleeF->isConvergent() && 3617 !CalleeF->isIntrinsic()) { 3618 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call 3619 << "\n"); 3620 Call.setNotConvergent(); 3621 return &Call; 3622 } 3623 3624 // If the call and callee calling conventions don't match, and neither one 3625 // of the calling conventions is compatible with C calling convention 3626 // this call must be unreachable, as the call is undefined. 3627 if ((CalleeF->getCallingConv() != Call.getCallingConv() && 3628 !(CalleeF->getCallingConv() == llvm::CallingConv::C && 3629 TargetLibraryInfoImpl::isCallingConvCCompatible(&Call)) && 3630 !(Call.getCallingConv() == llvm::CallingConv::C && 3631 TargetLibraryInfoImpl::isCallingConvCCompatible(CalleeF))) && 3632 // Only do this for calls to a function with a body. A prototype may 3633 // not actually end up matching the implementation's calling conv for a 3634 // variety of reasons (e.g. it may be written in assembly). 3635 !CalleeF->isDeclaration()) { 3636 Instruction *OldCall = &Call; 3637 CreateNonTerminatorUnreachable(OldCall); 3638 // If OldCall does not return void then replaceInstUsesWith poison. 3639 // This allows ValueHandlers and custom metadata to adjust itself. 3640 if (!OldCall->getType()->isVoidTy()) 3641 replaceInstUsesWith(*OldCall, PoisonValue::get(OldCall->getType())); 3642 if (isa<CallInst>(OldCall)) 3643 return eraseInstFromFunction(*OldCall); 3644 3645 // We cannot remove an invoke or a callbr, because it would change thexi 3646 // CFG, just change the callee to a null pointer. 3647 cast<CallBase>(OldCall)->setCalledFunction( 3648 CalleeF->getFunctionType(), 3649 Constant::getNullValue(CalleeF->getType())); 3650 return nullptr; 3651 } 3652 } 3653 3654 // Calling a null function pointer is undefined if a null address isn't 3655 // dereferenceable. 3656 if ((isa<ConstantPointerNull>(Callee) && 3657 !NullPointerIsDefined(Call.getFunction())) || 3658 isa<UndefValue>(Callee)) { 3659 // If Call does not return void then replaceInstUsesWith poison. 3660 // This allows ValueHandlers and custom metadata to adjust itself. 3661 if (!Call.getType()->isVoidTy()) 3662 replaceInstUsesWith(Call, PoisonValue::get(Call.getType())); 3663 3664 if (Call.isTerminator()) { 3665 // Can't remove an invoke or callbr because we cannot change the CFG. 3666 return nullptr; 3667 } 3668 3669 // This instruction is not reachable, just remove it. 3670 CreateNonTerminatorUnreachable(&Call); 3671 return eraseInstFromFunction(Call); 3672 } 3673 3674 if (IntrinsicInst *II = findInitTrampoline(Callee)) 3675 return transformCallThroughTrampoline(Call, *II); 3676 3677 if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) { 3678 InlineAsm *IA = cast<InlineAsm>(Callee); 3679 if (!IA->canThrow()) { 3680 // Normal inline asm calls cannot throw - mark them 3681 // 'nounwind'. 3682 Call.setDoesNotThrow(); 3683 Changed = true; 3684 } 3685 } 3686 3687 // Try to optimize the call if possible, we require DataLayout for most of 3688 // this. None of these calls are seen as possibly dead so go ahead and 3689 // delete the instruction now. 3690 if (CallInst *CI = dyn_cast<CallInst>(&Call)) { 3691 Instruction *I = tryOptimizeCall(CI); 3692 // If we changed something return the result, etc. Otherwise let 3693 // the fallthrough check. 3694 if (I) return eraseInstFromFunction(*I); 3695 } 3696 3697 if (!Call.use_empty() && !Call.isMustTailCall()) 3698 if (Value *ReturnedArg = Call.getReturnedArgOperand()) { 3699 Type *CallTy = Call.getType(); 3700 Type *RetArgTy = ReturnedArg->getType(); 3701 if (RetArgTy->canLosslesslyBitCastTo(CallTy)) 3702 return replaceInstUsesWith( 3703 Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy)); 3704 } 3705 3706 // Drop unnecessary kcfi operand bundles from calls that were converted 3707 // into direct calls. 3708 auto Bundle = Call.getOperandBundle(LLVMContext::OB_kcfi); 3709 if (Bundle && !Call.isIndirectCall()) { 3710 DEBUG_WITH_TYPE(DEBUG_TYPE "-kcfi", { 3711 if (CalleeF) { 3712 ConstantInt *FunctionType = nullptr; 3713 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]); 3714 3715 if (MDNode *MD = CalleeF->getMetadata(LLVMContext::MD_kcfi_type)) 3716 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0)); 3717 3718 if (FunctionType && 3719 FunctionType->getZExtValue() != ExpectedType->getZExtValue()) 3720 dbgs() << Call.getModule()->getName() 3721 << ": warning: kcfi: " << Call.getCaller()->getName() 3722 << ": call to " << CalleeF->getName() 3723 << " using a mismatching function pointer type\n"; 3724 } 3725 }); 3726 3727 return CallBase::removeOperandBundle(&Call, LLVMContext::OB_kcfi); 3728 } 3729 3730 if (isRemovableAlloc(&Call, &TLI)) 3731 return visitAllocSite(Call); 3732 3733 // Handle intrinsics which can be used in both call and invoke context. 3734 switch (Call.getIntrinsicID()) { 3735 case Intrinsic::experimental_gc_statepoint: { 3736 GCStatepointInst &GCSP = *cast<GCStatepointInst>(&Call); 3737 SmallPtrSet<Value *, 32> LiveGcValues; 3738 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 3739 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 3740 3741 // Remove the relocation if unused. 3742 if (GCR.use_empty()) { 3743 eraseInstFromFunction(GCR); 3744 continue; 3745 } 3746 3747 Value *DerivedPtr = GCR.getDerivedPtr(); 3748 Value *BasePtr = GCR.getBasePtr(); 3749 3750 // Undef is undef, even after relocation. 3751 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) { 3752 replaceInstUsesWith(GCR, UndefValue::get(GCR.getType())); 3753 eraseInstFromFunction(GCR); 3754 continue; 3755 } 3756 3757 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) { 3758 // The relocation of null will be null for most any collector. 3759 // TODO: provide a hook for this in GCStrategy. There might be some 3760 // weird collector this property does not hold for. 3761 if (isa<ConstantPointerNull>(DerivedPtr)) { 3762 // Use null-pointer of gc_relocate's type to replace it. 3763 replaceInstUsesWith(GCR, ConstantPointerNull::get(PT)); 3764 eraseInstFromFunction(GCR); 3765 continue; 3766 } 3767 3768 // isKnownNonNull -> nonnull attribute 3769 if (!GCR.hasRetAttr(Attribute::NonNull) && 3770 isKnownNonZero(DerivedPtr, DL, 0, &AC, &Call, &DT)) { 3771 GCR.addRetAttr(Attribute::NonNull); 3772 // We discovered new fact, re-check users. 3773 Worklist.pushUsersToWorkList(GCR); 3774 } 3775 } 3776 3777 // If we have two copies of the same pointer in the statepoint argument 3778 // list, canonicalize to one. This may let us common gc.relocates. 3779 if (GCR.getBasePtr() == GCR.getDerivedPtr() && 3780 GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) { 3781 auto *OpIntTy = GCR.getOperand(2)->getType(); 3782 GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex())); 3783 } 3784 3785 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p)) 3786 // Canonicalize on the type from the uses to the defs 3787 3788 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...) 3789 LiveGcValues.insert(BasePtr); 3790 LiveGcValues.insert(DerivedPtr); 3791 } 3792 std::optional<OperandBundleUse> Bundle = 3793 GCSP.getOperandBundle(LLVMContext::OB_gc_live); 3794 unsigned NumOfGCLives = LiveGcValues.size(); 3795 if (!Bundle || NumOfGCLives == Bundle->Inputs.size()) 3796 break; 3797 // We can reduce the size of gc live bundle. 3798 DenseMap<Value *, unsigned> Val2Idx; 3799 std::vector<Value *> NewLiveGc; 3800 for (Value *V : Bundle->Inputs) { 3801 if (Val2Idx.count(V)) 3802 continue; 3803 if (LiveGcValues.count(V)) { 3804 Val2Idx[V] = NewLiveGc.size(); 3805 NewLiveGc.push_back(V); 3806 } else 3807 Val2Idx[V] = NumOfGCLives; 3808 } 3809 // Update all gc.relocates 3810 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 3811 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 3812 Value *BasePtr = GCR.getBasePtr(); 3813 assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives && 3814 "Missed live gc for base pointer"); 3815 auto *OpIntTy1 = GCR.getOperand(1)->getType(); 3816 GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr])); 3817 Value *DerivedPtr = GCR.getDerivedPtr(); 3818 assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives && 3819 "Missed live gc for derived pointer"); 3820 auto *OpIntTy2 = GCR.getOperand(2)->getType(); 3821 GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr])); 3822 } 3823 // Create new statepoint instruction. 3824 OperandBundleDef NewBundle("gc-live", NewLiveGc); 3825 return CallBase::Create(&Call, NewBundle); 3826 } 3827 default: { break; } 3828 } 3829 3830 return Changed ? &Call : nullptr; 3831 } 3832 3833 /// If the callee is a constexpr cast of a function, attempt to move the cast to 3834 /// the arguments of the call/invoke. 3835 /// CallBrInst is not supported. 3836 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) { 3837 auto *Callee = 3838 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts()); 3839 if (!Callee) 3840 return false; 3841 3842 assert(!isa<CallBrInst>(Call) && 3843 "CallBr's don't have a single point after a def to insert at"); 3844 3845 // If this is a call to a thunk function, don't remove the cast. Thunks are 3846 // used to transparently forward all incoming parameters and outgoing return 3847 // values, so it's important to leave the cast in place. 3848 if (Callee->hasFnAttribute("thunk")) 3849 return false; 3850 3851 // If this is a call to a naked function, the assembly might be 3852 // using an argument, or otherwise rely on the frame layout, 3853 // the function prototype will mismatch. 3854 if (Callee->hasFnAttribute(Attribute::Naked)) 3855 return false; 3856 3857 // If this is a musttail call, the callee's prototype must match the caller's 3858 // prototype with the exception of pointee types. The code below doesn't 3859 // implement that, so we can't do this transform. 3860 // TODO: Do the transform if it only requires adding pointer casts. 3861 if (Call.isMustTailCall()) 3862 return false; 3863 3864 Instruction *Caller = &Call; 3865 const AttributeList &CallerPAL = Call.getAttributes(); 3866 3867 // Okay, this is a cast from a function to a different type. Unless doing so 3868 // would cause a type conversion of one of our arguments, change this call to 3869 // be a direct call with arguments casted to the appropriate types. 3870 FunctionType *FT = Callee->getFunctionType(); 3871 Type *OldRetTy = Caller->getType(); 3872 Type *NewRetTy = FT->getReturnType(); 3873 3874 // Check to see if we are changing the return type... 3875 if (OldRetTy != NewRetTy) { 3876 3877 if (NewRetTy->isStructTy()) 3878 return false; // TODO: Handle multiple return values. 3879 3880 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) { 3881 if (Callee->isDeclaration()) 3882 return false; // Cannot transform this return value. 3883 3884 if (!Caller->use_empty() && 3885 // void -> non-void is handled specially 3886 !NewRetTy->isVoidTy()) 3887 return false; // Cannot transform this return value. 3888 } 3889 3890 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 3891 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs()); 3892 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy))) 3893 return false; // Attribute not compatible with transformed value. 3894 } 3895 3896 // If the callbase is an invoke instruction, and the return value is 3897 // used by a PHI node in a successor, we cannot change the return type of 3898 // the call because there is no place to put the cast instruction (without 3899 // breaking the critical edge). Bail out in this case. 3900 if (!Caller->use_empty()) { 3901 BasicBlock *PhisNotSupportedBlock = nullptr; 3902 if (auto *II = dyn_cast<InvokeInst>(Caller)) 3903 PhisNotSupportedBlock = II->getNormalDest(); 3904 if (PhisNotSupportedBlock) 3905 for (User *U : Caller->users()) 3906 if (PHINode *PN = dyn_cast<PHINode>(U)) 3907 if (PN->getParent() == PhisNotSupportedBlock) 3908 return false; 3909 } 3910 } 3911 3912 unsigned NumActualArgs = Call.arg_size(); 3913 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 3914 3915 // Prevent us turning: 3916 // declare void @takes_i32_inalloca(i32* inalloca) 3917 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0) 3918 // 3919 // into: 3920 // call void @takes_i32_inalloca(i32* null) 3921 // 3922 // Similarly, avoid folding away bitcasts of byval calls. 3923 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) || 3924 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated)) 3925 return false; 3926 3927 auto AI = Call.arg_begin(); 3928 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 3929 Type *ParamTy = FT->getParamType(i); 3930 Type *ActTy = (*AI)->getType(); 3931 3932 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL)) 3933 return false; // Cannot transform this parameter value. 3934 3935 // Check if there are any incompatible attributes we cannot drop safely. 3936 if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i)) 3937 .overlaps(AttributeFuncs::typeIncompatible( 3938 ParamTy, AttributeFuncs::ASK_UNSAFE_TO_DROP))) 3939 return false; // Attribute not compatible with transformed value. 3940 3941 if (Call.isInAllocaArgument(i) || 3942 CallerPAL.hasParamAttr(i, Attribute::Preallocated)) 3943 return false; // Cannot transform to and from inalloca/preallocated. 3944 3945 if (CallerPAL.hasParamAttr(i, Attribute::SwiftError)) 3946 return false; 3947 3948 if (CallerPAL.hasParamAttr(i, Attribute::ByVal) != 3949 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal)) 3950 return false; // Cannot transform to or from byval. 3951 } 3952 3953 if (Callee->isDeclaration()) { 3954 // Do not delete arguments unless we have a function body. 3955 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg()) 3956 return false; 3957 3958 // If the callee is just a declaration, don't change the varargsness of the 3959 // call. We don't want to introduce a varargs call where one doesn't 3960 // already exist. 3961 if (FT->isVarArg() != Call.getFunctionType()->isVarArg()) 3962 return false; 3963 3964 // If both the callee and the cast type are varargs, we still have to make 3965 // sure the number of fixed parameters are the same or we have the same 3966 // ABI issues as if we introduce a varargs call. 3967 if (FT->isVarArg() && Call.getFunctionType()->isVarArg() && 3968 FT->getNumParams() != Call.getFunctionType()->getNumParams()) 3969 return false; 3970 } 3971 3972 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 3973 !CallerPAL.isEmpty()) { 3974 // In this case we have more arguments than the new function type, but we 3975 // won't be dropping them. Check that these extra arguments have attributes 3976 // that are compatible with being a vararg call argument. 3977 unsigned SRetIdx; 3978 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) && 3979 SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams()) 3980 return false; 3981 } 3982 3983 // Okay, we decided that this is a safe thing to do: go ahead and start 3984 // inserting cast instructions as necessary. 3985 SmallVector<Value *, 8> Args; 3986 SmallVector<AttributeSet, 8> ArgAttrs; 3987 Args.reserve(NumActualArgs); 3988 ArgAttrs.reserve(NumActualArgs); 3989 3990 // Get any return attributes. 3991 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs()); 3992 3993 // If the return value is not being used, the type may not be compatible 3994 // with the existing attributes. Wipe out any problematic attributes. 3995 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy)); 3996 3997 LLVMContext &Ctx = Call.getContext(); 3998 AI = Call.arg_begin(); 3999 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 4000 Type *ParamTy = FT->getParamType(i); 4001 4002 Value *NewArg = *AI; 4003 if ((*AI)->getType() != ParamTy) 4004 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy); 4005 Args.push_back(NewArg); 4006 4007 // Add any parameter attributes except the ones incompatible with the new 4008 // type. Note that we made sure all incompatible ones are safe to drop. 4009 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible( 4010 ParamTy, AttributeFuncs::ASK_SAFE_TO_DROP); 4011 ArgAttrs.push_back( 4012 CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs)); 4013 } 4014 4015 // If the function takes more arguments than the call was taking, add them 4016 // now. 4017 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) { 4018 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 4019 ArgAttrs.push_back(AttributeSet()); 4020 } 4021 4022 // If we are removing arguments to the function, emit an obnoxious warning. 4023 if (FT->getNumParams() < NumActualArgs) { 4024 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722 4025 if (FT->isVarArg()) { 4026 // Add all of the arguments in their promoted form to the arg list. 4027 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 4028 Type *PTy = getPromotedType((*AI)->getType()); 4029 Value *NewArg = *AI; 4030 if (PTy != (*AI)->getType()) { 4031 // Must promote to pass through va_arg area! 4032 Instruction::CastOps opcode = 4033 CastInst::getCastOpcode(*AI, false, PTy, false); 4034 NewArg = Builder.CreateCast(opcode, *AI, PTy); 4035 } 4036 Args.push_back(NewArg); 4037 4038 // Add any parameter attributes. 4039 ArgAttrs.push_back(CallerPAL.getParamAttrs(i)); 4040 } 4041 } 4042 } 4043 4044 AttributeSet FnAttrs = CallerPAL.getFnAttrs(); 4045 4046 if (NewRetTy->isVoidTy()) 4047 Caller->setName(""); // Void type should not have a name. 4048 4049 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) && 4050 "missing argument attributes"); 4051 AttributeList NewCallerPAL = AttributeList::get( 4052 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs); 4053 4054 SmallVector<OperandBundleDef, 1> OpBundles; 4055 Call.getOperandBundlesAsDefs(OpBundles); 4056 4057 CallBase *NewCall; 4058 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 4059 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(), 4060 II->getUnwindDest(), Args, OpBundles); 4061 } else { 4062 NewCall = Builder.CreateCall(Callee, Args, OpBundles); 4063 cast<CallInst>(NewCall)->setTailCallKind( 4064 cast<CallInst>(Caller)->getTailCallKind()); 4065 } 4066 NewCall->takeName(Caller); 4067 NewCall->setCallingConv(Call.getCallingConv()); 4068 NewCall->setAttributes(NewCallerPAL); 4069 4070 // Preserve prof metadata if any. 4071 NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof}); 4072 4073 // Insert a cast of the return type as necessary. 4074 Instruction *NC = NewCall; 4075 Value *NV = NC; 4076 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 4077 if (!NV->getType()->isVoidTy()) { 4078 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy); 4079 NC->setDebugLoc(Caller->getDebugLoc()); 4080 4081 auto OptInsertPt = NewCall->getInsertionPointAfterDef(); 4082 assert(OptInsertPt && "No place to insert cast"); 4083 InsertNewInstBefore(NC, *OptInsertPt); 4084 Worklist.pushUsersToWorkList(*Caller); 4085 } else { 4086 NV = PoisonValue::get(Caller->getType()); 4087 } 4088 } 4089 4090 if (!Caller->use_empty()) 4091 replaceInstUsesWith(*Caller, NV); 4092 else if (Caller->hasValueHandle()) { 4093 if (OldRetTy == NV->getType()) 4094 ValueHandleBase::ValueIsRAUWd(Caller, NV); 4095 else 4096 // We cannot call ValueIsRAUWd with a different type, and the 4097 // actual tracked value will disappear. 4098 ValueHandleBase::ValueIsDeleted(Caller); 4099 } 4100 4101 eraseInstFromFunction(*Caller); 4102 return true; 4103 } 4104 4105 /// Turn a call to a function created by init_trampoline / adjust_trampoline 4106 /// intrinsic pair into a direct call to the underlying function. 4107 Instruction * 4108 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call, 4109 IntrinsicInst &Tramp) { 4110 FunctionType *FTy = Call.getFunctionType(); 4111 AttributeList Attrs = Call.getAttributes(); 4112 4113 // If the call already has the 'nest' attribute somewhere then give up - 4114 // otherwise 'nest' would occur twice after splicing in the chain. 4115 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 4116 return nullptr; 4117 4118 Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts()); 4119 FunctionType *NestFTy = NestF->getFunctionType(); 4120 4121 AttributeList NestAttrs = NestF->getAttributes(); 4122 if (!NestAttrs.isEmpty()) { 4123 unsigned NestArgNo = 0; 4124 Type *NestTy = nullptr; 4125 AttributeSet NestAttr; 4126 4127 // Look for a parameter marked with the 'nest' attribute. 4128 for (FunctionType::param_iterator I = NestFTy->param_begin(), 4129 E = NestFTy->param_end(); 4130 I != E; ++NestArgNo, ++I) { 4131 AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo); 4132 if (AS.hasAttribute(Attribute::Nest)) { 4133 // Record the parameter type and any other attributes. 4134 NestTy = *I; 4135 NestAttr = AS; 4136 break; 4137 } 4138 } 4139 4140 if (NestTy) { 4141 std::vector<Value*> NewArgs; 4142 std::vector<AttributeSet> NewArgAttrs; 4143 NewArgs.reserve(Call.arg_size() + 1); 4144 NewArgAttrs.reserve(Call.arg_size()); 4145 4146 // Insert the nest argument into the call argument list, which may 4147 // mean appending it. Likewise for attributes. 4148 4149 { 4150 unsigned ArgNo = 0; 4151 auto I = Call.arg_begin(), E = Call.arg_end(); 4152 do { 4153 if (ArgNo == NestArgNo) { 4154 // Add the chain argument and attributes. 4155 Value *NestVal = Tramp.getArgOperand(2); 4156 if (NestVal->getType() != NestTy) 4157 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest"); 4158 NewArgs.push_back(NestVal); 4159 NewArgAttrs.push_back(NestAttr); 4160 } 4161 4162 if (I == E) 4163 break; 4164 4165 // Add the original argument and attributes. 4166 NewArgs.push_back(*I); 4167 NewArgAttrs.push_back(Attrs.getParamAttrs(ArgNo)); 4168 4169 ++ArgNo; 4170 ++I; 4171 } while (true); 4172 } 4173 4174 // The trampoline may have been bitcast to a bogus type (FTy). 4175 // Handle this by synthesizing a new function type, equal to FTy 4176 // with the chain parameter inserted. 4177 4178 std::vector<Type*> NewTypes; 4179 NewTypes.reserve(FTy->getNumParams()+1); 4180 4181 // Insert the chain's type into the list of parameter types, which may 4182 // mean appending it. 4183 { 4184 unsigned ArgNo = 0; 4185 FunctionType::param_iterator I = FTy->param_begin(), 4186 E = FTy->param_end(); 4187 4188 do { 4189 if (ArgNo == NestArgNo) 4190 // Add the chain's type. 4191 NewTypes.push_back(NestTy); 4192 4193 if (I == E) 4194 break; 4195 4196 // Add the original type. 4197 NewTypes.push_back(*I); 4198 4199 ++ArgNo; 4200 ++I; 4201 } while (true); 4202 } 4203 4204 // Replace the trampoline call with a direct call. Let the generic 4205 // code sort out any function type mismatches. 4206 FunctionType *NewFTy = 4207 FunctionType::get(FTy->getReturnType(), NewTypes, FTy->isVarArg()); 4208 AttributeList NewPAL = 4209 AttributeList::get(FTy->getContext(), Attrs.getFnAttrs(), 4210 Attrs.getRetAttrs(), NewArgAttrs); 4211 4212 SmallVector<OperandBundleDef, 1> OpBundles; 4213 Call.getOperandBundlesAsDefs(OpBundles); 4214 4215 Instruction *NewCaller; 4216 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) { 4217 NewCaller = InvokeInst::Create(NewFTy, NestF, II->getNormalDest(), 4218 II->getUnwindDest(), NewArgs, OpBundles); 4219 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 4220 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 4221 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) { 4222 NewCaller = 4223 CallBrInst::Create(NewFTy, NestF, CBI->getDefaultDest(), 4224 CBI->getIndirectDests(), NewArgs, OpBundles); 4225 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv()); 4226 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL); 4227 } else { 4228 NewCaller = CallInst::Create(NewFTy, NestF, NewArgs, OpBundles); 4229 cast<CallInst>(NewCaller)->setTailCallKind( 4230 cast<CallInst>(Call).getTailCallKind()); 4231 cast<CallInst>(NewCaller)->setCallingConv( 4232 cast<CallInst>(Call).getCallingConv()); 4233 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 4234 } 4235 NewCaller->setDebugLoc(Call.getDebugLoc()); 4236 4237 return NewCaller; 4238 } 4239 } 4240 4241 // Replace the trampoline call with a direct call. Since there is no 'nest' 4242 // parameter, there is no need to adjust the argument list. Let the generic 4243 // code sort out any function type mismatches. 4244 Call.setCalledFunction(FTy, NestF); 4245 return &Call; 4246 } 4247