1 //===- InstCombineCalls.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visitCall, visitInvoke, and visitCallBr functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/APFloat.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/APSInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/STLFunctionalExtras.h" 19 #include "llvm/ADT/SmallBitVector.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumeBundleQueries.h" 24 #include "llvm/Analysis/AssumptionCache.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/Loads.h" 27 #include "llvm/Analysis/MemoryBuiltins.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/Analysis/VectorUtils.h" 30 #include "llvm/IR/AttributeMask.h" 31 #include "llvm/IR/Attributes.h" 32 #include "llvm/IR/BasicBlock.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/DebugInfo.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/GlobalVariable.h" 40 #include "llvm/IR/InlineAsm.h" 41 #include "llvm/IR/InstrTypes.h" 42 #include "llvm/IR/Instruction.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/Intrinsics.h" 46 #include "llvm/IR/IntrinsicsAArch64.h" 47 #include "llvm/IR/IntrinsicsAMDGPU.h" 48 #include "llvm/IR/IntrinsicsARM.h" 49 #include "llvm/IR/IntrinsicsHexagon.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/PatternMatch.h" 53 #include "llvm/IR/Statepoint.h" 54 #include "llvm/IR/Type.h" 55 #include "llvm/IR/User.h" 56 #include "llvm/IR/Value.h" 57 #include "llvm/IR/ValueHandle.h" 58 #include "llvm/Support/AtomicOrdering.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Compiler.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/ErrorHandling.h" 64 #include "llvm/Support/KnownBits.h" 65 #include "llvm/Support/MathExtras.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Transforms/InstCombine/InstCombiner.h" 68 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 69 #include "llvm/Transforms/Utils/Local.h" 70 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 71 #include <algorithm> 72 #include <cassert> 73 #include <cstdint> 74 #include <optional> 75 #include <utility> 76 #include <vector> 77 78 #define DEBUG_TYPE "instcombine" 79 #include "llvm/Transforms/Utils/InstructionWorklist.h" 80 81 using namespace llvm; 82 using namespace PatternMatch; 83 84 STATISTIC(NumSimplified, "Number of library calls simplified"); 85 86 static cl::opt<unsigned> GuardWideningWindow( 87 "instcombine-guard-widening-window", 88 cl::init(3), 89 cl::desc("How wide an instruction window to bypass looking for " 90 "another guard")); 91 92 /// Return the specified type promoted as it would be to pass though a va_arg 93 /// area. 94 static Type *getPromotedType(Type *Ty) { 95 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 96 if (ITy->getBitWidth() < 32) 97 return Type::getInt32Ty(Ty->getContext()); 98 } 99 return Ty; 100 } 101 102 /// Recognize a memcpy/memmove from a trivially otherwise unused alloca. 103 /// TODO: This should probably be integrated with visitAllocSites, but that 104 /// requires a deeper change to allow either unread or unwritten objects. 105 static bool hasUndefSource(AnyMemTransferInst *MI) { 106 auto *Src = MI->getRawSource(); 107 while (isa<GetElementPtrInst>(Src) || isa<BitCastInst>(Src)) { 108 if (!Src->hasOneUse()) 109 return false; 110 Src = cast<Instruction>(Src)->getOperand(0); 111 } 112 return isa<AllocaInst>(Src) && Src->hasOneUse(); 113 } 114 115 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) { 116 Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT); 117 MaybeAlign CopyDstAlign = MI->getDestAlign(); 118 if (!CopyDstAlign || *CopyDstAlign < DstAlign) { 119 MI->setDestAlignment(DstAlign); 120 return MI; 121 } 122 123 Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT); 124 MaybeAlign CopySrcAlign = MI->getSourceAlign(); 125 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) { 126 MI->setSourceAlignment(SrcAlign); 127 return MI; 128 } 129 130 // If we have a store to a location which is known constant, we can conclude 131 // that the store must be storing the constant value (else the memory 132 // wouldn't be constant), and this must be a noop. 133 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) { 134 // Set the size of the copy to 0, it will be deleted on the next iteration. 135 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 136 return MI; 137 } 138 139 // If the source is provably undef, the memcpy/memmove doesn't do anything 140 // (unless the transfer is volatile). 141 if (hasUndefSource(MI) && !MI->isVolatile()) { 142 // Set the size of the copy to 0, it will be deleted on the next iteration. 143 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 144 return MI; 145 } 146 147 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 148 // load/store. 149 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength()); 150 if (!MemOpLength) return nullptr; 151 152 // Source and destination pointer types are always "i8*" for intrinsic. See 153 // if the size is something we can handle with a single primitive load/store. 154 // A single load+store correctly handles overlapping memory in the memmove 155 // case. 156 uint64_t Size = MemOpLength->getLimitedValue(); 157 assert(Size && "0-sized memory transferring should be removed already."); 158 159 if (Size > 8 || (Size&(Size-1))) 160 return nullptr; // If not 1/2/4/8 bytes, exit. 161 162 // If it is an atomic and alignment is less than the size then we will 163 // introduce the unaligned memory access which will be later transformed 164 // into libcall in CodeGen. This is not evident performance gain so disable 165 // it now. 166 if (isa<AtomicMemTransferInst>(MI)) 167 if (*CopyDstAlign < Size || *CopySrcAlign < Size) 168 return nullptr; 169 170 // Use an integer load+store unless we can find something better. 171 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 172 173 // If the memcpy has metadata describing the members, see if we can get the 174 // TBAA tag describing our copy. 175 AAMDNodes AACopyMD = MI->getAAMetadata(); 176 177 if (MDNode *M = AACopyMD.TBAAStruct) { 178 AACopyMD.TBAAStruct = nullptr; 179 if (M->getNumOperands() == 3 && M->getOperand(0) && 180 mdconst::hasa<ConstantInt>(M->getOperand(0)) && 181 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() && 182 M->getOperand(1) && 183 mdconst::hasa<ConstantInt>(M->getOperand(1)) && 184 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() == 185 Size && 186 M->getOperand(2) && isa<MDNode>(M->getOperand(2))) 187 AACopyMD.TBAA = cast<MDNode>(M->getOperand(2)); 188 } 189 190 Value *Src = MI->getArgOperand(1); 191 Value *Dest = MI->getArgOperand(0); 192 LoadInst *L = Builder.CreateLoad(IntType, Src); 193 // Alignment from the mem intrinsic will be better, so use it. 194 L->setAlignment(*CopySrcAlign); 195 L->setAAMetadata(AACopyMD); 196 MDNode *LoopMemParallelMD = 197 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 198 if (LoopMemParallelMD) 199 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 200 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group); 201 if (AccessGroupMD) 202 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 203 204 StoreInst *S = Builder.CreateStore(L, Dest); 205 // Alignment from the mem intrinsic will be better, so use it. 206 S->setAlignment(*CopyDstAlign); 207 S->setAAMetadata(AACopyMD); 208 if (LoopMemParallelMD) 209 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 210 if (AccessGroupMD) 211 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 212 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID); 213 214 if (auto *MT = dyn_cast<MemTransferInst>(MI)) { 215 // non-atomics can be volatile 216 L->setVolatile(MT->isVolatile()); 217 S->setVolatile(MT->isVolatile()); 218 } 219 if (isa<AtomicMemTransferInst>(MI)) { 220 // atomics have to be unordered 221 L->setOrdering(AtomicOrdering::Unordered); 222 S->setOrdering(AtomicOrdering::Unordered); 223 } 224 225 // Set the size of the copy to 0, it will be deleted on the next iteration. 226 MI->setLength(Constant::getNullValue(MemOpLength->getType())); 227 return MI; 228 } 229 230 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) { 231 const Align KnownAlignment = 232 getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT); 233 MaybeAlign MemSetAlign = MI->getDestAlign(); 234 if (!MemSetAlign || *MemSetAlign < KnownAlignment) { 235 MI->setDestAlignment(KnownAlignment); 236 return MI; 237 } 238 239 // If we have a store to a location which is known constant, we can conclude 240 // that the store must be storing the constant value (else the memory 241 // wouldn't be constant), and this must be a noop. 242 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) { 243 // Set the size of the copy to 0, it will be deleted on the next iteration. 244 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 245 return MI; 246 } 247 248 // Remove memset with an undef value. 249 // FIXME: This is technically incorrect because it might overwrite a poison 250 // value. Change to PoisonValue once #52930 is resolved. 251 if (isa<UndefValue>(MI->getValue())) { 252 // Set the size of the copy to 0, it will be deleted on the next iteration. 253 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 254 return MI; 255 } 256 257 // Extract the length and alignment and fill if they are constant. 258 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 259 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 260 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 261 return nullptr; 262 const uint64_t Len = LenC->getLimitedValue(); 263 assert(Len && "0-sized memory setting should be removed already."); 264 const Align Alignment = MI->getDestAlign().valueOrOne(); 265 266 // If it is an atomic and alignment is less than the size then we will 267 // introduce the unaligned memory access which will be later transformed 268 // into libcall in CodeGen. This is not evident performance gain so disable 269 // it now. 270 if (isa<AtomicMemSetInst>(MI)) 271 if (Alignment < Len) 272 return nullptr; 273 274 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 275 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 276 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 277 278 Value *Dest = MI->getDest(); 279 280 // Extract the fill value and store. 281 const uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 282 Constant *FillVal = ConstantInt::get(ITy, Fill); 283 StoreInst *S = Builder.CreateStore(FillVal, Dest, MI->isVolatile()); 284 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID); 285 auto replaceOpForAssignmentMarkers = [FillC, FillVal](auto *DbgAssign) { 286 if (llvm::is_contained(DbgAssign->location_ops(), FillC)) 287 DbgAssign->replaceVariableLocationOp(FillC, FillVal); 288 }; 289 for_each(at::getAssignmentMarkers(S), replaceOpForAssignmentMarkers); 290 for_each(at::getDPVAssignmentMarkers(S), replaceOpForAssignmentMarkers); 291 292 S->setAlignment(Alignment); 293 if (isa<AtomicMemSetInst>(MI)) 294 S->setOrdering(AtomicOrdering::Unordered); 295 296 // Set the size of the copy to 0, it will be deleted on the next iteration. 297 MI->setLength(Constant::getNullValue(LenC->getType())); 298 return MI; 299 } 300 301 return nullptr; 302 } 303 304 // TODO, Obvious Missing Transforms: 305 // * Narrow width by halfs excluding zero/undef lanes 306 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) { 307 Value *LoadPtr = II.getArgOperand(0); 308 const Align Alignment = 309 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 310 311 // If the mask is all ones or undefs, this is a plain vector load of the 1st 312 // argument. 313 if (maskIsAllOneOrUndef(II.getArgOperand(2))) { 314 LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 315 "unmaskedload"); 316 L->copyMetadata(II); 317 return L; 318 } 319 320 // If we can unconditionally load from this address, replace with a 321 // load/select idiom. TODO: use DT for context sensitive query 322 if (isDereferenceablePointer(LoadPtr, II.getType(), 323 II.getModule()->getDataLayout(), &II, &AC)) { 324 LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 325 "unmaskedload"); 326 LI->copyMetadata(II); 327 return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3)); 328 } 329 330 return nullptr; 331 } 332 333 // TODO, Obvious Missing Transforms: 334 // * Single constant active lane -> store 335 // * Narrow width by halfs excluding zero/undef lanes 336 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) { 337 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 338 if (!ConstMask) 339 return nullptr; 340 341 // If the mask is all zeros, this instruction does nothing. 342 if (ConstMask->isNullValue()) 343 return eraseInstFromFunction(II); 344 345 // If the mask is all ones, this is a plain vector store of the 1st argument. 346 if (ConstMask->isAllOnesValue()) { 347 Value *StorePtr = II.getArgOperand(1); 348 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 349 StoreInst *S = 350 new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); 351 S->copyMetadata(II); 352 return S; 353 } 354 355 if (isa<ScalableVectorType>(ConstMask->getType())) 356 return nullptr; 357 358 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 359 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 360 APInt PoisonElts(DemandedElts.getBitWidth(), 0); 361 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, 362 PoisonElts)) 363 return replaceOperand(II, 0, V); 364 365 return nullptr; 366 } 367 368 // TODO, Obvious Missing Transforms: 369 // * Single constant active lane load -> load 370 // * Dereferenceable address & few lanes -> scalarize speculative load/selects 371 // * Adjacent vector addresses -> masked.load 372 // * Narrow width by halfs excluding zero/undef lanes 373 // * Vector incrementing address -> vector masked load 374 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) { 375 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2)); 376 if (!ConstMask) 377 return nullptr; 378 379 // Vector splat address w/known mask -> scalar load 380 // Fold the gather to load the source vector first lane 381 // because it is reloading the same value each time 382 if (ConstMask->isAllOnesValue()) 383 if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) { 384 auto *VecTy = cast<VectorType>(II.getType()); 385 const Align Alignment = 386 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 387 LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr, 388 Alignment, "load.scalar"); 389 Value *Shuf = 390 Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast"); 391 return replaceInstUsesWith(II, cast<Instruction>(Shuf)); 392 } 393 394 return nullptr; 395 } 396 397 // TODO, Obvious Missing Transforms: 398 // * Single constant active lane -> store 399 // * Adjacent vector addresses -> masked.store 400 // * Narrow store width by halfs excluding zero/undef lanes 401 // * Vector incrementing address -> vector masked store 402 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) { 403 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 404 if (!ConstMask) 405 return nullptr; 406 407 // If the mask is all zeros, a scatter does nothing. 408 if (ConstMask->isNullValue()) 409 return eraseInstFromFunction(II); 410 411 // Vector splat address -> scalar store 412 if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) { 413 // scatter(splat(value), splat(ptr), non-zero-mask) -> store value, ptr 414 if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) { 415 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 416 StoreInst *S = 417 new StoreInst(SplatValue, SplatPtr, /*IsVolatile=*/false, Alignment); 418 S->copyMetadata(II); 419 return S; 420 } 421 // scatter(vector, splat(ptr), splat(true)) -> store extract(vector, 422 // lastlane), ptr 423 if (ConstMask->isAllOnesValue()) { 424 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 425 VectorType *WideLoadTy = cast<VectorType>(II.getArgOperand(1)->getType()); 426 ElementCount VF = WideLoadTy->getElementCount(); 427 Value *RunTimeVF = Builder.CreateElementCount(Builder.getInt32Ty(), VF); 428 Value *LastLane = Builder.CreateSub(RunTimeVF, Builder.getInt32(1)); 429 Value *Extract = 430 Builder.CreateExtractElement(II.getArgOperand(0), LastLane); 431 StoreInst *S = 432 new StoreInst(Extract, SplatPtr, /*IsVolatile=*/false, Alignment); 433 S->copyMetadata(II); 434 return S; 435 } 436 } 437 if (isa<ScalableVectorType>(ConstMask->getType())) 438 return nullptr; 439 440 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 441 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 442 APInt PoisonElts(DemandedElts.getBitWidth(), 0); 443 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, 444 PoisonElts)) 445 return replaceOperand(II, 0, V); 446 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, 447 PoisonElts)) 448 return replaceOperand(II, 1, V); 449 450 return nullptr; 451 } 452 453 /// This function transforms launder.invariant.group and strip.invariant.group 454 /// like: 455 /// launder(launder(%x)) -> launder(%x) (the result is not the argument) 456 /// launder(strip(%x)) -> launder(%x) 457 /// strip(strip(%x)) -> strip(%x) (the result is not the argument) 458 /// strip(launder(%x)) -> strip(%x) 459 /// This is legal because it preserves the most recent information about 460 /// the presence or absence of invariant.group. 461 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II, 462 InstCombinerImpl &IC) { 463 auto *Arg = II.getArgOperand(0); 464 auto *StrippedArg = Arg->stripPointerCasts(); 465 auto *StrippedInvariantGroupsArg = StrippedArg; 466 while (auto *Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) { 467 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group && 468 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group) 469 break; 470 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts(); 471 } 472 if (StrippedArg == StrippedInvariantGroupsArg) 473 return nullptr; // No launders/strips to remove. 474 475 Value *Result = nullptr; 476 477 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group) 478 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg); 479 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group) 480 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg); 481 else 482 llvm_unreachable( 483 "simplifyInvariantGroupIntrinsic only handles launder and strip"); 484 if (Result->getType()->getPointerAddressSpace() != 485 II.getType()->getPointerAddressSpace()) 486 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType()); 487 488 return cast<Instruction>(Result); 489 } 490 491 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) { 492 assert((II.getIntrinsicID() == Intrinsic::cttz || 493 II.getIntrinsicID() == Intrinsic::ctlz) && 494 "Expected cttz or ctlz intrinsic"); 495 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz; 496 Value *Op0 = II.getArgOperand(0); 497 Value *Op1 = II.getArgOperand(1); 498 Value *X; 499 // ctlz(bitreverse(x)) -> cttz(x) 500 // cttz(bitreverse(x)) -> ctlz(x) 501 if (match(Op0, m_BitReverse(m_Value(X)))) { 502 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz; 503 Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType()); 504 return CallInst::Create(F, {X, II.getArgOperand(1)}); 505 } 506 507 if (II.getType()->isIntOrIntVectorTy(1)) { 508 // ctlz/cttz i1 Op0 --> not Op0 509 if (match(Op1, m_Zero())) 510 return BinaryOperator::CreateNot(Op0); 511 // If zero is poison, then the input can be assumed to be "true", so the 512 // instruction simplifies to "false". 513 assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1"); 514 return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(II.getType())); 515 } 516 517 Constant *C; 518 519 if (IsTZ) { 520 // cttz(-x) -> cttz(x) 521 if (match(Op0, m_Neg(m_Value(X)))) 522 return IC.replaceOperand(II, 0, X); 523 524 // cttz(-x & x) -> cttz(x) 525 if (match(Op0, m_c_And(m_Neg(m_Value(X)), m_Deferred(X)))) 526 return IC.replaceOperand(II, 0, X); 527 528 // cttz(sext(x)) -> cttz(zext(x)) 529 if (match(Op0, m_OneUse(m_SExt(m_Value(X))))) { 530 auto *Zext = IC.Builder.CreateZExt(X, II.getType()); 531 auto *CttzZext = 532 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Zext, Op1); 533 return IC.replaceInstUsesWith(II, CttzZext); 534 } 535 536 // Zext doesn't change the number of trailing zeros, so narrow: 537 // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsPoison' parameter is 'true'. 538 if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) { 539 auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X, 540 IC.Builder.getTrue()); 541 auto *ZextCttz = IC.Builder.CreateZExt(Cttz, II.getType()); 542 return IC.replaceInstUsesWith(II, ZextCttz); 543 } 544 545 // cttz(abs(x)) -> cttz(x) 546 // cttz(nabs(x)) -> cttz(x) 547 Value *Y; 548 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor; 549 if (SPF == SPF_ABS || SPF == SPF_NABS) 550 return IC.replaceOperand(II, 0, X); 551 552 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X)))) 553 return IC.replaceOperand(II, 0, X); 554 555 // cttz(shl(%const, %val), 1) --> add(cttz(%const, 1), %val) 556 if (match(Op0, m_Shl(m_ImmConstant(C), m_Value(X))) && 557 match(Op1, m_One())) { 558 Value *ConstCttz = 559 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, C, Op1); 560 return BinaryOperator::CreateAdd(ConstCttz, X); 561 } 562 563 // cttz(lshr exact (%const, %val), 1) --> sub(cttz(%const, 1), %val) 564 if (match(Op0, m_Exact(m_LShr(m_ImmConstant(C), m_Value(X)))) && 565 match(Op1, m_One())) { 566 Value *ConstCttz = 567 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, C, Op1); 568 return BinaryOperator::CreateSub(ConstCttz, X); 569 } 570 } else { 571 // ctlz(lshr(%const, %val), 1) --> add(ctlz(%const, 1), %val) 572 if (match(Op0, m_LShr(m_ImmConstant(C), m_Value(X))) && 573 match(Op1, m_One())) { 574 Value *ConstCtlz = 575 IC.Builder.CreateBinaryIntrinsic(Intrinsic::ctlz, C, Op1); 576 return BinaryOperator::CreateAdd(ConstCtlz, X); 577 } 578 579 // ctlz(shl nuw (%const, %val), 1) --> sub(ctlz(%const, 1), %val) 580 if (match(Op0, m_NUWShl(m_ImmConstant(C), m_Value(X))) && 581 match(Op1, m_One())) { 582 Value *ConstCtlz = 583 IC.Builder.CreateBinaryIntrinsic(Intrinsic::ctlz, C, Op1); 584 return BinaryOperator::CreateSub(ConstCtlz, X); 585 } 586 } 587 588 KnownBits Known = IC.computeKnownBits(Op0, 0, &II); 589 590 // Create a mask for bits above (ctlz) or below (cttz) the first known one. 591 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros() 592 : Known.countMaxLeadingZeros(); 593 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros() 594 : Known.countMinLeadingZeros(); 595 596 // If all bits above (ctlz) or below (cttz) the first known one are known 597 // zero, this value is constant. 598 // FIXME: This should be in InstSimplify because we're replacing an 599 // instruction with a constant. 600 if (PossibleZeros == DefiniteZeros) { 601 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros); 602 return IC.replaceInstUsesWith(II, C); 603 } 604 605 // If the input to cttz/ctlz is known to be non-zero, 606 // then change the 'ZeroIsPoison' parameter to 'true' 607 // because we know the zero behavior can't affect the result. 608 if (!Known.One.isZero() || 609 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II, 610 &IC.getDominatorTree())) { 611 if (!match(II.getArgOperand(1), m_One())) 612 return IC.replaceOperand(II, 1, IC.Builder.getTrue()); 613 } 614 615 // Add range metadata since known bits can't completely reflect what we know. 616 auto *IT = cast<IntegerType>(Op0->getType()->getScalarType()); 617 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 618 Metadata *LowAndHigh[] = { 619 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)), 620 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))}; 621 II.setMetadata(LLVMContext::MD_range, 622 MDNode::get(II.getContext(), LowAndHigh)); 623 return &II; 624 } 625 626 return nullptr; 627 } 628 629 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) { 630 assert(II.getIntrinsicID() == Intrinsic::ctpop && 631 "Expected ctpop intrinsic"); 632 Type *Ty = II.getType(); 633 unsigned BitWidth = Ty->getScalarSizeInBits(); 634 Value *Op0 = II.getArgOperand(0); 635 Value *X, *Y; 636 637 // ctpop(bitreverse(x)) -> ctpop(x) 638 // ctpop(bswap(x)) -> ctpop(x) 639 if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) 640 return IC.replaceOperand(II, 0, X); 641 642 // ctpop(rot(x)) -> ctpop(x) 643 if ((match(Op0, m_FShl(m_Value(X), m_Value(Y), m_Value())) || 644 match(Op0, m_FShr(m_Value(X), m_Value(Y), m_Value()))) && 645 X == Y) 646 return IC.replaceOperand(II, 0, X); 647 648 // ctpop(x | -x) -> bitwidth - cttz(x, false) 649 if (Op0->hasOneUse() && 650 match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) { 651 Function *F = 652 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 653 auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()}); 654 auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth)); 655 return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz)); 656 } 657 658 // ctpop(~x & (x - 1)) -> cttz(x, false) 659 if (match(Op0, 660 m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) { 661 Function *F = 662 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 663 return CallInst::Create(F, {X, IC.Builder.getFalse()}); 664 } 665 666 // Zext doesn't change the number of set bits, so narrow: 667 // ctpop (zext X) --> zext (ctpop X) 668 if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) { 669 Value *NarrowPop = IC.Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, X); 670 return CastInst::Create(Instruction::ZExt, NarrowPop, Ty); 671 } 672 673 KnownBits Known(BitWidth); 674 IC.computeKnownBits(Op0, Known, 0, &II); 675 676 // If all bits are zero except for exactly one fixed bit, then the result 677 // must be 0 or 1, and we can get that answer by shifting to LSB: 678 // ctpop (X & 32) --> (X & 32) >> 5 679 // TODO: Investigate removing this as its likely unnecessary given the below 680 // `isKnownToBeAPowerOfTwo` check. 681 if ((~Known.Zero).isPowerOf2()) 682 return BinaryOperator::CreateLShr( 683 Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2())); 684 685 // More generally we can also handle non-constant power of 2 patterns such as 686 // shl/shr(Pow2, X), (X & -X), etc... by transforming: 687 // ctpop(Pow2OrZero) --> icmp ne X, 0 688 if (IC.isKnownToBeAPowerOfTwo(Op0, /* OrZero */ true)) 689 return CastInst::Create(Instruction::ZExt, 690 IC.Builder.CreateICmp(ICmpInst::ICMP_NE, Op0, 691 Constant::getNullValue(Ty)), 692 Ty); 693 694 // Add range metadata since known bits can't completely reflect what we know. 695 auto *IT = cast<IntegerType>(Ty->getScalarType()); 696 unsigned MinCount = Known.countMinPopulation(); 697 unsigned MaxCount = Known.countMaxPopulation(); 698 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 699 Metadata *LowAndHigh[] = { 700 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)), 701 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))}; 702 II.setMetadata(LLVMContext::MD_range, 703 MDNode::get(II.getContext(), LowAndHigh)); 704 return &II; 705 } 706 707 return nullptr; 708 } 709 710 /// Convert a table lookup to shufflevector if the mask is constant. 711 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in 712 /// which case we could lower the shufflevector with rev64 instructions 713 /// as it's actually a byte reverse. 714 static Value *simplifyNeonTbl1(const IntrinsicInst &II, 715 InstCombiner::BuilderTy &Builder) { 716 // Bail out if the mask is not a constant. 717 auto *C = dyn_cast<Constant>(II.getArgOperand(1)); 718 if (!C) 719 return nullptr; 720 721 auto *VecTy = cast<FixedVectorType>(II.getType()); 722 unsigned NumElts = VecTy->getNumElements(); 723 724 // Only perform this transformation for <8 x i8> vector types. 725 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8) 726 return nullptr; 727 728 int Indexes[8]; 729 730 for (unsigned I = 0; I < NumElts; ++I) { 731 Constant *COp = C->getAggregateElement(I); 732 733 if (!COp || !isa<ConstantInt>(COp)) 734 return nullptr; 735 736 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue(); 737 738 // Make sure the mask indices are in range. 739 if ((unsigned)Indexes[I] >= NumElts) 740 return nullptr; 741 } 742 743 auto *V1 = II.getArgOperand(0); 744 auto *V2 = Constant::getNullValue(V1->getType()); 745 return Builder.CreateShuffleVector(V1, V2, ArrayRef(Indexes)); 746 } 747 748 // Returns true iff the 2 intrinsics have the same operands, limiting the 749 // comparison to the first NumOperands. 750 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, 751 unsigned NumOperands) { 752 assert(I.arg_size() >= NumOperands && "Not enough operands"); 753 assert(E.arg_size() >= NumOperands && "Not enough operands"); 754 for (unsigned i = 0; i < NumOperands; i++) 755 if (I.getArgOperand(i) != E.getArgOperand(i)) 756 return false; 757 return true; 758 } 759 760 // Remove trivially empty start/end intrinsic ranges, i.e. a start 761 // immediately followed by an end (ignoring debuginfo or other 762 // start/end intrinsics in between). As this handles only the most trivial 763 // cases, tracking the nesting level is not needed: 764 // 765 // call @llvm.foo.start(i1 0) 766 // call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed 767 // call @llvm.foo.end(i1 0) 768 // call @llvm.foo.end(i1 0) ; &I 769 static bool 770 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, 771 std::function<bool(const IntrinsicInst &)> IsStart) { 772 // We start from the end intrinsic and scan backwards, so that InstCombine 773 // has already processed (and potentially removed) all the instructions 774 // before the end intrinsic. 775 BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend()); 776 for (; BI != BE; ++BI) { 777 if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) { 778 if (I->isDebugOrPseudoInst() || 779 I->getIntrinsicID() == EndI.getIntrinsicID()) 780 continue; 781 if (IsStart(*I)) { 782 if (haveSameOperands(EndI, *I, EndI.arg_size())) { 783 IC.eraseInstFromFunction(*I); 784 IC.eraseInstFromFunction(EndI); 785 return true; 786 } 787 // Skip start intrinsics that don't pair with this end intrinsic. 788 continue; 789 } 790 } 791 break; 792 } 793 794 return false; 795 } 796 797 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) { 798 removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) { 799 return I.getIntrinsicID() == Intrinsic::vastart || 800 I.getIntrinsicID() == Intrinsic::vacopy; 801 }); 802 return nullptr; 803 } 804 805 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) { 806 assert(Call.arg_size() > 1 && "Need at least 2 args to swap"); 807 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1); 808 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) { 809 Call.setArgOperand(0, Arg1); 810 Call.setArgOperand(1, Arg0); 811 return &Call; 812 } 813 return nullptr; 814 } 815 816 /// Creates a result tuple for an overflow intrinsic \p II with a given 817 /// \p Result and a constant \p Overflow value. 818 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result, 819 Constant *Overflow) { 820 Constant *V[] = {PoisonValue::get(Result->getType()), Overflow}; 821 StructType *ST = cast<StructType>(II->getType()); 822 Constant *Struct = ConstantStruct::get(ST, V); 823 return InsertValueInst::Create(Struct, Result, 0); 824 } 825 826 Instruction * 827 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) { 828 WithOverflowInst *WO = cast<WithOverflowInst>(II); 829 Value *OperationResult = nullptr; 830 Constant *OverflowResult = nullptr; 831 if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(), 832 WO->getRHS(), *WO, OperationResult, OverflowResult)) 833 return createOverflowTuple(WO, OperationResult, OverflowResult); 834 return nullptr; 835 } 836 837 static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) { 838 Ty = Ty->getScalarType(); 839 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE; 840 } 841 842 static bool inputDenormalIsDAZ(const Function &F, const Type *Ty) { 843 Ty = Ty->getScalarType(); 844 return F.getDenormalMode(Ty->getFltSemantics()).inputsAreZero(); 845 } 846 847 /// \returns the compare predicate type if the test performed by 848 /// llvm.is.fpclass(x, \p Mask) is equivalent to fcmp o__ x, 0.0 with the 849 /// floating-point environment assumed for \p F for type \p Ty 850 static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, 851 const Function &F, Type *Ty) { 852 switch (static_cast<unsigned>(Mask)) { 853 case fcZero: 854 if (inputDenormalIsIEEE(F, Ty)) 855 return FCmpInst::FCMP_OEQ; 856 break; 857 case fcZero | fcSubnormal: 858 if (inputDenormalIsDAZ(F, Ty)) 859 return FCmpInst::FCMP_OEQ; 860 break; 861 case fcPositive | fcNegZero: 862 if (inputDenormalIsIEEE(F, Ty)) 863 return FCmpInst::FCMP_OGE; 864 break; 865 case fcPositive | fcNegZero | fcNegSubnormal: 866 if (inputDenormalIsDAZ(F, Ty)) 867 return FCmpInst::FCMP_OGE; 868 break; 869 case fcPosSubnormal | fcPosNormal | fcPosInf: 870 if (inputDenormalIsIEEE(F, Ty)) 871 return FCmpInst::FCMP_OGT; 872 break; 873 case fcNegative | fcPosZero: 874 if (inputDenormalIsIEEE(F, Ty)) 875 return FCmpInst::FCMP_OLE; 876 break; 877 case fcNegative | fcPosZero | fcPosSubnormal: 878 if (inputDenormalIsDAZ(F, Ty)) 879 return FCmpInst::FCMP_OLE; 880 break; 881 case fcNegSubnormal | fcNegNormal | fcNegInf: 882 if (inputDenormalIsIEEE(F, Ty)) 883 return FCmpInst::FCMP_OLT; 884 break; 885 case fcPosNormal | fcPosInf: 886 if (inputDenormalIsDAZ(F, Ty)) 887 return FCmpInst::FCMP_OGT; 888 break; 889 case fcNegNormal | fcNegInf: 890 if (inputDenormalIsDAZ(F, Ty)) 891 return FCmpInst::FCMP_OLT; 892 break; 893 case ~fcZero & ~fcNan: 894 if (inputDenormalIsIEEE(F, Ty)) 895 return FCmpInst::FCMP_ONE; 896 break; 897 case ~(fcZero | fcSubnormal) & ~fcNan: 898 if (inputDenormalIsDAZ(F, Ty)) 899 return FCmpInst::FCMP_ONE; 900 break; 901 default: 902 break; 903 } 904 905 return FCmpInst::BAD_FCMP_PREDICATE; 906 } 907 908 Instruction *InstCombinerImpl::foldIntrinsicIsFPClass(IntrinsicInst &II) { 909 Value *Src0 = II.getArgOperand(0); 910 Value *Src1 = II.getArgOperand(1); 911 const ConstantInt *CMask = cast<ConstantInt>(Src1); 912 FPClassTest Mask = static_cast<FPClassTest>(CMask->getZExtValue()); 913 const bool IsUnordered = (Mask & fcNan) == fcNan; 914 const bool IsOrdered = (Mask & fcNan) == fcNone; 915 const FPClassTest OrderedMask = Mask & ~fcNan; 916 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan; 917 918 const bool IsStrict = II.isStrictFP(); 919 920 Value *FNegSrc; 921 if (match(Src0, m_FNeg(m_Value(FNegSrc)))) { 922 // is.fpclass (fneg x), mask -> is.fpclass x, (fneg mask) 923 924 II.setArgOperand(1, ConstantInt::get(Src1->getType(), fneg(Mask))); 925 return replaceOperand(II, 0, FNegSrc); 926 } 927 928 Value *FAbsSrc; 929 if (match(Src0, m_FAbs(m_Value(FAbsSrc)))) { 930 II.setArgOperand(1, ConstantInt::get(Src1->getType(), inverse_fabs(Mask))); 931 return replaceOperand(II, 0, FAbsSrc); 932 } 933 934 if ((OrderedMask == fcInf || OrderedInvertedMask == fcInf) && 935 (IsOrdered || IsUnordered) && !IsStrict) { 936 // is.fpclass(x, fcInf) -> fcmp oeq fabs(x), +inf 937 // is.fpclass(x, ~fcInf) -> fcmp one fabs(x), +inf 938 // is.fpclass(x, fcInf|fcNan) -> fcmp ueq fabs(x), +inf 939 // is.fpclass(x, ~(fcInf|fcNan)) -> fcmp une fabs(x), +inf 940 Constant *Inf = ConstantFP::getInfinity(Src0->getType()); 941 FCmpInst::Predicate Pred = 942 IsUnordered ? FCmpInst::FCMP_UEQ : FCmpInst::FCMP_OEQ; 943 if (OrderedInvertedMask == fcInf) 944 Pred = IsUnordered ? FCmpInst::FCMP_UNE : FCmpInst::FCMP_ONE; 945 946 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Src0); 947 Value *CmpInf = Builder.CreateFCmp(Pred, Fabs, Inf); 948 CmpInf->takeName(&II); 949 return replaceInstUsesWith(II, CmpInf); 950 } 951 952 if ((OrderedMask == fcPosInf || OrderedMask == fcNegInf) && 953 (IsOrdered || IsUnordered) && !IsStrict) { 954 // is.fpclass(x, fcPosInf) -> fcmp oeq x, +inf 955 // is.fpclass(x, fcNegInf) -> fcmp oeq x, -inf 956 // is.fpclass(x, fcPosInf|fcNan) -> fcmp ueq x, +inf 957 // is.fpclass(x, fcNegInf|fcNan) -> fcmp ueq x, -inf 958 Constant *Inf = 959 ConstantFP::getInfinity(Src0->getType(), OrderedMask == fcNegInf); 960 Value *EqInf = IsUnordered ? Builder.CreateFCmpUEQ(Src0, Inf) 961 : Builder.CreateFCmpOEQ(Src0, Inf); 962 963 EqInf->takeName(&II); 964 return replaceInstUsesWith(II, EqInf); 965 } 966 967 if ((OrderedInvertedMask == fcPosInf || OrderedInvertedMask == fcNegInf) && 968 (IsOrdered || IsUnordered) && !IsStrict) { 969 // is.fpclass(x, ~fcPosInf) -> fcmp one x, +inf 970 // is.fpclass(x, ~fcNegInf) -> fcmp one x, -inf 971 // is.fpclass(x, ~fcPosInf|fcNan) -> fcmp une x, +inf 972 // is.fpclass(x, ~fcNegInf|fcNan) -> fcmp une x, -inf 973 Constant *Inf = ConstantFP::getInfinity(Src0->getType(), 974 OrderedInvertedMask == fcNegInf); 975 Value *NeInf = IsUnordered ? Builder.CreateFCmpUNE(Src0, Inf) 976 : Builder.CreateFCmpONE(Src0, Inf); 977 NeInf->takeName(&II); 978 return replaceInstUsesWith(II, NeInf); 979 } 980 981 if (Mask == fcNan && !IsStrict) { 982 // Equivalent of isnan. Replace with standard fcmp if we don't care about FP 983 // exceptions. 984 Value *IsNan = 985 Builder.CreateFCmpUNO(Src0, ConstantFP::getZero(Src0->getType())); 986 IsNan->takeName(&II); 987 return replaceInstUsesWith(II, IsNan); 988 } 989 990 if (Mask == (~fcNan & fcAllFlags) && !IsStrict) { 991 // Equivalent of !isnan. Replace with standard fcmp. 992 Value *FCmp = 993 Builder.CreateFCmpORD(Src0, ConstantFP::getZero(Src0->getType())); 994 FCmp->takeName(&II); 995 return replaceInstUsesWith(II, FCmp); 996 } 997 998 FCmpInst::Predicate PredType = FCmpInst::BAD_FCMP_PREDICATE; 999 1000 // Try to replace with an fcmp with 0 1001 // 1002 // is.fpclass(x, fcZero) -> fcmp oeq x, 0.0 1003 // is.fpclass(x, fcZero | fcNan) -> fcmp ueq x, 0.0 1004 // is.fpclass(x, ~fcZero & ~fcNan) -> fcmp one x, 0.0 1005 // is.fpclass(x, ~fcZero) -> fcmp une x, 0.0 1006 // 1007 // is.fpclass(x, fcPosSubnormal | fcPosNormal | fcPosInf) -> fcmp ogt x, 0.0 1008 // is.fpclass(x, fcPositive | fcNegZero) -> fcmp oge x, 0.0 1009 // 1010 // is.fpclass(x, fcNegSubnormal | fcNegNormal | fcNegInf) -> fcmp olt x, 0.0 1011 // is.fpclass(x, fcNegative | fcPosZero) -> fcmp ole x, 0.0 1012 // 1013 if (!IsStrict && (IsOrdered || IsUnordered) && 1014 (PredType = fpclassTestIsFCmp0(OrderedMask, *II.getFunction(), 1015 Src0->getType())) != 1016 FCmpInst::BAD_FCMP_PREDICATE) { 1017 Constant *Zero = ConstantFP::getZero(Src0->getType()); 1018 // Equivalent of == 0. 1019 Value *FCmp = Builder.CreateFCmp( 1020 IsUnordered ? FCmpInst::getUnorderedPredicate(PredType) : PredType, 1021 Src0, Zero); 1022 1023 FCmp->takeName(&II); 1024 return replaceInstUsesWith(II, FCmp); 1025 } 1026 1027 KnownFPClass Known = computeKnownFPClass(Src0, Mask, &II); 1028 1029 // Clear test bits we know must be false from the source value. 1030 // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other 1031 // fp_class (ninf x), ninf|pinf|other -> fp_class (ninf x), other 1032 if ((Mask & Known.KnownFPClasses) != Mask) { 1033 II.setArgOperand( 1034 1, ConstantInt::get(Src1->getType(), Mask & Known.KnownFPClasses)); 1035 return &II; 1036 } 1037 1038 // If none of the tests which can return false are possible, fold to true. 1039 // fp_class (nnan x), ~(qnan|snan) -> true 1040 // fp_class (ninf x), ~(ninf|pinf) -> true 1041 if (Mask == Known.KnownFPClasses) 1042 return replaceInstUsesWith(II, ConstantInt::get(II.getType(), true)); 1043 1044 return nullptr; 1045 } 1046 1047 static std::optional<bool> getKnownSign(Value *Op, Instruction *CxtI, 1048 const DataLayout &DL, AssumptionCache *AC, 1049 DominatorTree *DT) { 1050 KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT); 1051 if (Known.isNonNegative()) 1052 return false; 1053 if (Known.isNegative()) 1054 return true; 1055 1056 Value *X, *Y; 1057 if (match(Op, m_NSWSub(m_Value(X), m_Value(Y)))) 1058 return isImpliedByDomCondition(ICmpInst::ICMP_SLT, X, Y, CxtI, DL); 1059 1060 return isImpliedByDomCondition( 1061 ICmpInst::ICMP_SLT, Op, Constant::getNullValue(Op->getType()), CxtI, DL); 1062 } 1063 1064 static std::optional<bool> getKnownSignOrZero(Value *Op, Instruction *CxtI, 1065 const DataLayout &DL, 1066 AssumptionCache *AC, 1067 DominatorTree *DT) { 1068 if (std::optional<bool> Sign = getKnownSign(Op, CxtI, DL, AC, DT)) 1069 return Sign; 1070 1071 Value *X, *Y; 1072 if (match(Op, m_NSWSub(m_Value(X), m_Value(Y)))) 1073 return isImpliedByDomCondition(ICmpInst::ICMP_SLE, X, Y, CxtI, DL); 1074 1075 return std::nullopt; 1076 } 1077 1078 /// Return true if two values \p Op0 and \p Op1 are known to have the same sign. 1079 static bool signBitMustBeTheSame(Value *Op0, Value *Op1, Instruction *CxtI, 1080 const DataLayout &DL, AssumptionCache *AC, 1081 DominatorTree *DT) { 1082 std::optional<bool> Known1 = getKnownSign(Op1, CxtI, DL, AC, DT); 1083 if (!Known1) 1084 return false; 1085 std::optional<bool> Known0 = getKnownSign(Op0, CxtI, DL, AC, DT); 1086 if (!Known0) 1087 return false; 1088 return *Known0 == *Known1; 1089 } 1090 1091 /// Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0. This 1092 /// can trigger other combines. 1093 static Instruction *moveAddAfterMinMax(IntrinsicInst *II, 1094 InstCombiner::BuilderTy &Builder) { 1095 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1096 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin || 1097 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) && 1098 "Expected a min or max intrinsic"); 1099 1100 // TODO: Match vectors with undef elements, but undef may not propagate. 1101 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 1102 Value *X; 1103 const APInt *C0, *C1; 1104 if (!match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C0)))) || 1105 !match(Op1, m_APInt(C1))) 1106 return nullptr; 1107 1108 // Check for necessary no-wrap and overflow constraints. 1109 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin; 1110 auto *Add = cast<BinaryOperator>(Op0); 1111 if ((IsSigned && !Add->hasNoSignedWrap()) || 1112 (!IsSigned && !Add->hasNoUnsignedWrap())) 1113 return nullptr; 1114 1115 // If the constant difference overflows, then instsimplify should reduce the 1116 // min/max to the add or C1. 1117 bool Overflow; 1118 APInt CDiff = 1119 IsSigned ? C1->ssub_ov(*C0, Overflow) : C1->usub_ov(*C0, Overflow); 1120 assert(!Overflow && "Expected simplify of min/max"); 1121 1122 // min/max (add X, C0), C1 --> add (min/max X, C1 - C0), C0 1123 // Note: the "mismatched" no-overflow setting does not propagate. 1124 Constant *NewMinMaxC = ConstantInt::get(II->getType(), CDiff); 1125 Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID, X, NewMinMaxC); 1126 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax, Add->getOperand(1)) 1127 : BinaryOperator::CreateNUWAdd(NewMinMax, Add->getOperand(1)); 1128 } 1129 /// Match a sadd_sat or ssub_sat which is using min/max to clamp the value. 1130 Instruction *InstCombinerImpl::matchSAddSubSat(IntrinsicInst &MinMax1) { 1131 Type *Ty = MinMax1.getType(); 1132 1133 // We are looking for a tree of: 1134 // max(INT_MIN, min(INT_MAX, add(sext(A), sext(B)))) 1135 // Where the min and max could be reversed 1136 Instruction *MinMax2; 1137 BinaryOperator *AddSub; 1138 const APInt *MinValue, *MaxValue; 1139 if (match(&MinMax1, m_SMin(m_Instruction(MinMax2), m_APInt(MaxValue)))) { 1140 if (!match(MinMax2, m_SMax(m_BinOp(AddSub), m_APInt(MinValue)))) 1141 return nullptr; 1142 } else if (match(&MinMax1, 1143 m_SMax(m_Instruction(MinMax2), m_APInt(MinValue)))) { 1144 if (!match(MinMax2, m_SMin(m_BinOp(AddSub), m_APInt(MaxValue)))) 1145 return nullptr; 1146 } else 1147 return nullptr; 1148 1149 // Check that the constants clamp a saturate, and that the new type would be 1150 // sensible to convert to. 1151 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1) 1152 return nullptr; 1153 // In what bitwidth can this be treated as saturating arithmetics? 1154 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1; 1155 // FIXME: This isn't quite right for vectors, but using the scalar type is a 1156 // good first approximation for what should be done there. 1157 if (!shouldChangeType(Ty->getScalarType()->getIntegerBitWidth(), NewBitWidth)) 1158 return nullptr; 1159 1160 // Also make sure that the inner min/max and the add/sub have one use. 1161 if (!MinMax2->hasOneUse() || !AddSub->hasOneUse()) 1162 return nullptr; 1163 1164 // Create the new type (which can be a vector type) 1165 Type *NewTy = Ty->getWithNewBitWidth(NewBitWidth); 1166 1167 Intrinsic::ID IntrinsicID; 1168 if (AddSub->getOpcode() == Instruction::Add) 1169 IntrinsicID = Intrinsic::sadd_sat; 1170 else if (AddSub->getOpcode() == Instruction::Sub) 1171 IntrinsicID = Intrinsic::ssub_sat; 1172 else 1173 return nullptr; 1174 1175 // The two operands of the add/sub must be nsw-truncatable to the NewTy. This 1176 // is usually achieved via a sext from a smaller type. 1177 if (ComputeMaxSignificantBits(AddSub->getOperand(0), 0, AddSub) > 1178 NewBitWidth || 1179 ComputeMaxSignificantBits(AddSub->getOperand(1), 0, AddSub) > NewBitWidth) 1180 return nullptr; 1181 1182 // Finally create and return the sat intrinsic, truncated to the new type 1183 Function *F = Intrinsic::getDeclaration(MinMax1.getModule(), IntrinsicID, NewTy); 1184 Value *AT = Builder.CreateTrunc(AddSub->getOperand(0), NewTy); 1185 Value *BT = Builder.CreateTrunc(AddSub->getOperand(1), NewTy); 1186 Value *Sat = Builder.CreateCall(F, {AT, BT}); 1187 return CastInst::Create(Instruction::SExt, Sat, Ty); 1188 } 1189 1190 1191 /// If we have a clamp pattern like max (min X, 42), 41 -- where the output 1192 /// can only be one of two possible constant values -- turn that into a select 1193 /// of constants. 1194 static Instruction *foldClampRangeOfTwo(IntrinsicInst *II, 1195 InstCombiner::BuilderTy &Builder) { 1196 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1197 Value *X; 1198 const APInt *C0, *C1; 1199 if (!match(I1, m_APInt(C1)) || !I0->hasOneUse()) 1200 return nullptr; 1201 1202 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 1203 switch (II->getIntrinsicID()) { 1204 case Intrinsic::smax: 1205 if (match(I0, m_SMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 1206 Pred = ICmpInst::ICMP_SGT; 1207 break; 1208 case Intrinsic::smin: 1209 if (match(I0, m_SMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 1210 Pred = ICmpInst::ICMP_SLT; 1211 break; 1212 case Intrinsic::umax: 1213 if (match(I0, m_UMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 1214 Pred = ICmpInst::ICMP_UGT; 1215 break; 1216 case Intrinsic::umin: 1217 if (match(I0, m_UMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 1218 Pred = ICmpInst::ICMP_ULT; 1219 break; 1220 default: 1221 llvm_unreachable("Expected min/max intrinsic"); 1222 } 1223 if (Pred == CmpInst::BAD_ICMP_PREDICATE) 1224 return nullptr; 1225 1226 // max (min X, 42), 41 --> X > 41 ? 42 : 41 1227 // min (max X, 42), 43 --> X < 43 ? 42 : 43 1228 Value *Cmp = Builder.CreateICmp(Pred, X, I1); 1229 return SelectInst::Create(Cmp, ConstantInt::get(II->getType(), *C0), I1); 1230 } 1231 1232 /// If this min/max has a constant operand and an operand that is a matching 1233 /// min/max with a constant operand, constant-fold the 2 constant operands. 1234 static Value *reassociateMinMaxWithConstants(IntrinsicInst *II, 1235 IRBuilderBase &Builder) { 1236 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1237 auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); 1238 if (!LHS || LHS->getIntrinsicID() != MinMaxID) 1239 return nullptr; 1240 1241 Constant *C0, *C1; 1242 if (!match(LHS->getArgOperand(1), m_ImmConstant(C0)) || 1243 !match(II->getArgOperand(1), m_ImmConstant(C1))) 1244 return nullptr; 1245 1246 // max (max X, C0), C1 --> max X, (max C0, C1) --> max X, NewC 1247 ICmpInst::Predicate Pred = MinMaxIntrinsic::getPredicate(MinMaxID); 1248 Value *CondC = Builder.CreateICmp(Pred, C0, C1); 1249 Value *NewC = Builder.CreateSelect(CondC, C0, C1); 1250 return Builder.CreateIntrinsic(MinMaxID, II->getType(), 1251 {LHS->getArgOperand(0), NewC}); 1252 } 1253 1254 /// If this min/max has a matching min/max operand with a constant, try to push 1255 /// the constant operand into this instruction. This can enable more folds. 1256 static Instruction * 1257 reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, 1258 InstCombiner::BuilderTy &Builder) { 1259 // Match and capture a min/max operand candidate. 1260 Value *X, *Y; 1261 Constant *C; 1262 Instruction *Inner; 1263 if (!match(II, m_c_MaxOrMin(m_OneUse(m_CombineAnd( 1264 m_Instruction(Inner), 1265 m_MaxOrMin(m_Value(X), m_ImmConstant(C)))), 1266 m_Value(Y)))) 1267 return nullptr; 1268 1269 // The inner op must match. Check for constants to avoid infinite loops. 1270 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1271 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner); 1272 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID || 1273 match(X, m_ImmConstant()) || match(Y, m_ImmConstant())) 1274 return nullptr; 1275 1276 // max (max X, C), Y --> max (max X, Y), C 1277 Function *MinMax = 1278 Intrinsic::getDeclaration(II->getModule(), MinMaxID, II->getType()); 1279 Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID, X, Y); 1280 NewInner->takeName(Inner); 1281 return CallInst::Create(MinMax, {NewInner, C}); 1282 } 1283 1284 /// Reduce a sequence of min/max intrinsics with a common operand. 1285 static Instruction *factorizeMinMaxTree(IntrinsicInst *II) { 1286 // Match 3 of the same min/max ops. Example: umin(umin(), umin()). 1287 auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); 1288 auto *RHS = dyn_cast<IntrinsicInst>(II->getArgOperand(1)); 1289 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 1290 if (!LHS || !RHS || LHS->getIntrinsicID() != MinMaxID || 1291 RHS->getIntrinsicID() != MinMaxID || 1292 (!LHS->hasOneUse() && !RHS->hasOneUse())) 1293 return nullptr; 1294 1295 Value *A = LHS->getArgOperand(0); 1296 Value *B = LHS->getArgOperand(1); 1297 Value *C = RHS->getArgOperand(0); 1298 Value *D = RHS->getArgOperand(1); 1299 1300 // Look for a common operand. 1301 Value *MinMaxOp = nullptr; 1302 Value *ThirdOp = nullptr; 1303 if (LHS->hasOneUse()) { 1304 // If the LHS is only used in this chain and the RHS is used outside of it, 1305 // reuse the RHS min/max because that will eliminate the LHS. 1306 if (D == A || C == A) { 1307 // min(min(a, b), min(c, a)) --> min(min(c, a), b) 1308 // min(min(a, b), min(a, d)) --> min(min(a, d), b) 1309 MinMaxOp = RHS; 1310 ThirdOp = B; 1311 } else if (D == B || C == B) { 1312 // min(min(a, b), min(c, b)) --> min(min(c, b), a) 1313 // min(min(a, b), min(b, d)) --> min(min(b, d), a) 1314 MinMaxOp = RHS; 1315 ThirdOp = A; 1316 } 1317 } else { 1318 assert(RHS->hasOneUse() && "Expected one-use operand"); 1319 // Reuse the LHS. This will eliminate the RHS. 1320 if (D == A || D == B) { 1321 // min(min(a, b), min(c, a)) --> min(min(a, b), c) 1322 // min(min(a, b), min(c, b)) --> min(min(a, b), c) 1323 MinMaxOp = LHS; 1324 ThirdOp = C; 1325 } else if (C == A || C == B) { 1326 // min(min(a, b), min(b, d)) --> min(min(a, b), d) 1327 // min(min(a, b), min(c, b)) --> min(min(a, b), d) 1328 MinMaxOp = LHS; 1329 ThirdOp = D; 1330 } 1331 } 1332 1333 if (!MinMaxOp || !ThirdOp) 1334 return nullptr; 1335 1336 Module *Mod = II->getModule(); 1337 Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType()); 1338 return CallInst::Create(MinMax, { MinMaxOp, ThirdOp }); 1339 } 1340 1341 /// If all arguments of the intrinsic are unary shuffles with the same mask, 1342 /// try to shuffle after the intrinsic. 1343 static Instruction * 1344 foldShuffledIntrinsicOperands(IntrinsicInst *II, 1345 InstCombiner::BuilderTy &Builder) { 1346 // TODO: This should be extended to handle other intrinsics like fshl, ctpop, 1347 // etc. Use llvm::isTriviallyVectorizable() and related to determine 1348 // which intrinsics are safe to shuffle? 1349 switch (II->getIntrinsicID()) { 1350 case Intrinsic::smax: 1351 case Intrinsic::smin: 1352 case Intrinsic::umax: 1353 case Intrinsic::umin: 1354 case Intrinsic::fma: 1355 case Intrinsic::fshl: 1356 case Intrinsic::fshr: 1357 break; 1358 default: 1359 return nullptr; 1360 } 1361 1362 Value *X; 1363 ArrayRef<int> Mask; 1364 if (!match(II->getArgOperand(0), 1365 m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask)))) 1366 return nullptr; 1367 1368 // At least 1 operand must have 1 use because we are creating 2 instructions. 1369 if (none_of(II->args(), [](Value *V) { return V->hasOneUse(); })) 1370 return nullptr; 1371 1372 // See if all arguments are shuffled with the same mask. 1373 SmallVector<Value *, 4> NewArgs(II->arg_size()); 1374 NewArgs[0] = X; 1375 Type *SrcTy = X->getType(); 1376 for (unsigned i = 1, e = II->arg_size(); i != e; ++i) { 1377 if (!match(II->getArgOperand(i), 1378 m_Shuffle(m_Value(X), m_Undef(), m_SpecificMask(Mask))) || 1379 X->getType() != SrcTy) 1380 return nullptr; 1381 NewArgs[i] = X; 1382 } 1383 1384 // intrinsic (shuf X, M), (shuf Y, M), ... --> shuf (intrinsic X, Y, ...), M 1385 Instruction *FPI = isa<FPMathOperator>(II) ? II : nullptr; 1386 Value *NewIntrinsic = 1387 Builder.CreateIntrinsic(II->getIntrinsicID(), SrcTy, NewArgs, FPI); 1388 return new ShuffleVectorInst(NewIntrinsic, Mask); 1389 } 1390 1391 /// Fold the following cases and accepts bswap and bitreverse intrinsics: 1392 /// bswap(logic_op(bswap(x), y)) --> logic_op(x, bswap(y)) 1393 /// bswap(logic_op(bswap(x), bswap(y))) --> logic_op(x, y) (ignores multiuse) 1394 template <Intrinsic::ID IntrID> 1395 static Instruction *foldBitOrderCrossLogicOp(Value *V, 1396 InstCombiner::BuilderTy &Builder) { 1397 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse, 1398 "This helper only supports BSWAP and BITREVERSE intrinsics"); 1399 1400 Value *X, *Y; 1401 // Find bitwise logic op. Check that it is a BinaryOperator explicitly so we 1402 // don't match ConstantExpr that aren't meaningful for this transform. 1403 if (match(V, m_OneUse(m_BitwiseLogic(m_Value(X), m_Value(Y)))) && 1404 isa<BinaryOperator>(V)) { 1405 Value *OldReorderX, *OldReorderY; 1406 BinaryOperator::BinaryOps Op = cast<BinaryOperator>(V)->getOpcode(); 1407 1408 // If both X and Y are bswap/bitreverse, the transform reduces the number 1409 // of instructions even if there's multiuse. 1410 // If only one operand is bswap/bitreverse, we need to ensure the operand 1411 // have only one use. 1412 if (match(X, m_Intrinsic<IntrID>(m_Value(OldReorderX))) && 1413 match(Y, m_Intrinsic<IntrID>(m_Value(OldReorderY)))) { 1414 return BinaryOperator::Create(Op, OldReorderX, OldReorderY); 1415 } 1416 1417 if (match(X, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderX))))) { 1418 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, Y); 1419 return BinaryOperator::Create(Op, OldReorderX, NewReorder); 1420 } 1421 1422 if (match(Y, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderY))))) { 1423 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, X); 1424 return BinaryOperator::Create(Op, NewReorder, OldReorderY); 1425 } 1426 } 1427 return nullptr; 1428 } 1429 1430 /// CallInst simplification. This mostly only handles folding of intrinsic 1431 /// instructions. For normal calls, it allows visitCallBase to do the heavy 1432 /// lifting. 1433 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { 1434 // Don't try to simplify calls without uses. It will not do anything useful, 1435 // but will result in the following folds being skipped. 1436 if (!CI.use_empty()) { 1437 SmallVector<Value *, 4> Args; 1438 Args.reserve(CI.arg_size()); 1439 for (Value *Op : CI.args()) 1440 Args.push_back(Op); 1441 if (Value *V = simplifyCall(&CI, CI.getCalledOperand(), Args, 1442 SQ.getWithInstruction(&CI))) 1443 return replaceInstUsesWith(CI, V); 1444 } 1445 1446 if (Value *FreedOp = getFreedOperand(&CI, &TLI)) 1447 return visitFree(CI, FreedOp); 1448 1449 // If the caller function (i.e. us, the function that contains this CallInst) 1450 // is nounwind, mark the call as nounwind, even if the callee isn't. 1451 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) { 1452 CI.setDoesNotThrow(); 1453 return &CI; 1454 } 1455 1456 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 1457 if (!II) return visitCallBase(CI); 1458 1459 // For atomic unordered mem intrinsics if len is not a positive or 1460 // not a multiple of element size then behavior is undefined. 1461 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II)) 1462 if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength())) 1463 if (NumBytes->isNegative() || 1464 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) { 1465 CreateNonTerminatorUnreachable(AMI); 1466 assert(AMI->getType()->isVoidTy() && 1467 "non void atomic unordered mem intrinsic"); 1468 return eraseInstFromFunction(*AMI); 1469 } 1470 1471 // Intrinsics cannot occur in an invoke or a callbr, so handle them here 1472 // instead of in visitCallBase. 1473 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) { 1474 bool Changed = false; 1475 1476 // memmove/cpy/set of zero bytes is a noop. 1477 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 1478 if (NumBytes->isNullValue()) 1479 return eraseInstFromFunction(CI); 1480 } 1481 1482 // No other transformations apply to volatile transfers. 1483 if (auto *M = dyn_cast<MemIntrinsic>(MI)) 1484 if (M->isVolatile()) 1485 return nullptr; 1486 1487 // If we have a memmove and the source operation is a constant global, 1488 // then the source and dest pointers can't alias, so we can change this 1489 // into a call to memcpy. 1490 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) { 1491 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 1492 if (GVSrc->isConstant()) { 1493 Module *M = CI.getModule(); 1494 Intrinsic::ID MemCpyID = 1495 isa<AtomicMemMoveInst>(MMI) 1496 ? Intrinsic::memcpy_element_unordered_atomic 1497 : Intrinsic::memcpy; 1498 Type *Tys[3] = { CI.getArgOperand(0)->getType(), 1499 CI.getArgOperand(1)->getType(), 1500 CI.getArgOperand(2)->getType() }; 1501 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys)); 1502 Changed = true; 1503 } 1504 } 1505 1506 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 1507 // memmove(x,x,size) -> noop. 1508 if (MTI->getSource() == MTI->getDest()) 1509 return eraseInstFromFunction(CI); 1510 } 1511 1512 // If we can determine a pointer alignment that is bigger than currently 1513 // set, update the alignment. 1514 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 1515 if (Instruction *I = SimplifyAnyMemTransfer(MTI)) 1516 return I; 1517 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) { 1518 if (Instruction *I = SimplifyAnyMemSet(MSI)) 1519 return I; 1520 } 1521 1522 if (Changed) return II; 1523 } 1524 1525 // For fixed width vector result intrinsics, use the generic demanded vector 1526 // support. 1527 if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) { 1528 auto VWidth = IIFVTy->getNumElements(); 1529 APInt PoisonElts(VWidth, 0); 1530 APInt AllOnesEltMask(APInt::getAllOnes(VWidth)); 1531 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, PoisonElts)) { 1532 if (V != II) 1533 return replaceInstUsesWith(*II, V); 1534 return II; 1535 } 1536 } 1537 1538 if (II->isCommutative()) { 1539 if (auto Pair = matchSymmetricPair(II->getOperand(0), II->getOperand(1))) { 1540 replaceOperand(*II, 0, Pair->first); 1541 replaceOperand(*II, 1, Pair->second); 1542 return II; 1543 } 1544 1545 if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI)) 1546 return NewCall; 1547 } 1548 1549 // Unused constrained FP intrinsic calls may have declared side effect, which 1550 // prevents it from being removed. In some cases however the side effect is 1551 // actually absent. To detect this case, call SimplifyConstrainedFPCall. If it 1552 // returns a replacement, the call may be removed. 1553 if (CI.use_empty() && isa<ConstrainedFPIntrinsic>(CI)) { 1554 if (simplifyConstrainedFPCall(&CI, SQ.getWithInstruction(&CI))) 1555 return eraseInstFromFunction(CI); 1556 } 1557 1558 Intrinsic::ID IID = II->getIntrinsicID(); 1559 switch (IID) { 1560 case Intrinsic::objectsize: { 1561 SmallVector<Instruction *> InsertedInstructions; 1562 if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false, 1563 &InsertedInstructions)) { 1564 for (Instruction *Inserted : InsertedInstructions) 1565 Worklist.add(Inserted); 1566 return replaceInstUsesWith(CI, V); 1567 } 1568 return nullptr; 1569 } 1570 case Intrinsic::abs: { 1571 Value *IIOperand = II->getArgOperand(0); 1572 bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue(); 1573 1574 // abs(-x) -> abs(x) 1575 // TODO: Copy nsw if it was present on the neg? 1576 Value *X; 1577 if (match(IIOperand, m_Neg(m_Value(X)))) 1578 return replaceOperand(*II, 0, X); 1579 if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X))))) 1580 return replaceOperand(*II, 0, X); 1581 if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X)))) 1582 return replaceOperand(*II, 0, X); 1583 1584 if (std::optional<bool> Known = 1585 getKnownSignOrZero(IIOperand, II, DL, &AC, &DT)) { 1586 // abs(x) -> x if x >= 0 (include abs(x-y) --> x - y where x >= y) 1587 // abs(x) -> x if x > 0 (include abs(x-y) --> x - y where x > y) 1588 if (!*Known) 1589 return replaceInstUsesWith(*II, IIOperand); 1590 1591 // abs(x) -> -x if x < 0 1592 // abs(x) -> -x if x < = 0 (include abs(x-y) --> y - x where x <= y) 1593 if (IntMinIsPoison) 1594 return BinaryOperator::CreateNSWNeg(IIOperand); 1595 return BinaryOperator::CreateNeg(IIOperand); 1596 } 1597 1598 // abs (sext X) --> zext (abs X*) 1599 // Clear the IsIntMin (nsw) bit on the abs to allow narrowing. 1600 if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) { 1601 Value *NarrowAbs = 1602 Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse()); 1603 return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType()); 1604 } 1605 1606 // Match a complicated way to check if a number is odd/even: 1607 // abs (srem X, 2) --> and X, 1 1608 const APInt *C; 1609 if (match(IIOperand, m_SRem(m_Value(X), m_APInt(C))) && *C == 2) 1610 return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1)); 1611 1612 break; 1613 } 1614 case Intrinsic::umin: { 1615 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1616 // umin(x, 1) == zext(x != 0) 1617 if (match(I1, m_One())) { 1618 assert(II->getType()->getScalarSizeInBits() != 1 && 1619 "Expected simplify of umin with max constant"); 1620 Value *Zero = Constant::getNullValue(I0->getType()); 1621 Value *Cmp = Builder.CreateICmpNE(I0, Zero); 1622 return CastInst::Create(Instruction::ZExt, Cmp, II->getType()); 1623 } 1624 [[fallthrough]]; 1625 } 1626 case Intrinsic::umax: { 1627 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1628 Value *X, *Y; 1629 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) && 1630 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 1631 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 1632 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 1633 } 1634 Constant *C; 1635 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_Constant(C)) && 1636 I0->hasOneUse()) { 1637 if (Constant *NarrowC = getLosslessUnsignedTrunc(C, X->getType())) { 1638 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 1639 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 1640 } 1641 } 1642 // If both operands of unsigned min/max are sign-extended, it is still ok 1643 // to narrow the operation. 1644 [[fallthrough]]; 1645 } 1646 case Intrinsic::smax: 1647 case Intrinsic::smin: { 1648 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1649 Value *X, *Y; 1650 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) && 1651 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 1652 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 1653 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 1654 } 1655 1656 Constant *C; 1657 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_Constant(C)) && 1658 I0->hasOneUse()) { 1659 if (Constant *NarrowC = getLosslessSignedTrunc(C, X->getType())) { 1660 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 1661 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 1662 } 1663 } 1664 1665 // umin(i1 X, i1 Y) -> and i1 X, Y 1666 // smax(i1 X, i1 Y) -> and i1 X, Y 1667 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) && 1668 II->getType()->isIntOrIntVectorTy(1)) { 1669 return BinaryOperator::CreateAnd(I0, I1); 1670 } 1671 1672 // umax(i1 X, i1 Y) -> or i1 X, Y 1673 // smin(i1 X, i1 Y) -> or i1 X, Y 1674 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) && 1675 II->getType()->isIntOrIntVectorTy(1)) { 1676 return BinaryOperator::CreateOr(I0, I1); 1677 } 1678 1679 if (IID == Intrinsic::smax || IID == Intrinsic::smin) { 1680 // smax (neg nsw X), (neg nsw Y) --> neg nsw (smin X, Y) 1681 // smin (neg nsw X), (neg nsw Y) --> neg nsw (smax X, Y) 1682 // TODO: Canonicalize neg after min/max if I1 is constant. 1683 if (match(I0, m_NSWNeg(m_Value(X))) && match(I1, m_NSWNeg(m_Value(Y))) && 1684 (I0->hasOneUse() || I1->hasOneUse())) { 1685 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID); 1686 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y); 1687 return BinaryOperator::CreateNSWNeg(InvMaxMin); 1688 } 1689 } 1690 1691 // (umax X, (xor X, Pow2)) 1692 // -> (or X, Pow2) 1693 // (umin X, (xor X, Pow2)) 1694 // -> (and X, ~Pow2) 1695 // (smax X, (xor X, Pos_Pow2)) 1696 // -> (or X, Pos_Pow2) 1697 // (smin X, (xor X, Pos_Pow2)) 1698 // -> (and X, ~Pos_Pow2) 1699 // (smax X, (xor X, Neg_Pow2)) 1700 // -> (and X, ~Neg_Pow2) 1701 // (smin X, (xor X, Neg_Pow2)) 1702 // -> (or X, Neg_Pow2) 1703 if ((match(I0, m_c_Xor(m_Specific(I1), m_Value(X))) || 1704 match(I1, m_c_Xor(m_Specific(I0), m_Value(X)))) && 1705 isKnownToBeAPowerOfTwo(X, /* OrZero */ true)) { 1706 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax; 1707 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin; 1708 1709 if (IID == Intrinsic::smax || IID == Intrinsic::smin) { 1710 auto KnownSign = getKnownSign(X, II, DL, &AC, &DT); 1711 if (KnownSign == std::nullopt) { 1712 UseOr = false; 1713 UseAndN = false; 1714 } else if (*KnownSign /* true is Signed. */) { 1715 UseOr ^= true; 1716 UseAndN ^= true; 1717 Type *Ty = I0->getType(); 1718 // Negative power of 2 must be IntMin. It's possible to be able to 1719 // prove negative / power of 2 without actually having known bits, so 1720 // just get the value by hand. 1721 X = Constant::getIntegerValue( 1722 Ty, APInt::getSignedMinValue(Ty->getScalarSizeInBits())); 1723 } 1724 } 1725 if (UseOr) 1726 return BinaryOperator::CreateOr(I0, X); 1727 else if (UseAndN) 1728 return BinaryOperator::CreateAnd(I0, Builder.CreateNot(X)); 1729 } 1730 1731 // If we can eliminate ~A and Y is free to invert: 1732 // max ~A, Y --> ~(min A, ~Y) 1733 // 1734 // Examples: 1735 // max ~A, ~Y --> ~(min A, Y) 1736 // max ~A, C --> ~(min A, ~C) 1737 // max ~A, (max ~Y, ~Z) --> ~min( A, (min Y, Z)) 1738 auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * { 1739 Value *A; 1740 if (match(X, m_OneUse(m_Not(m_Value(A)))) && 1741 !isFreeToInvert(A, A->hasOneUse())) { 1742 if (Value *NotY = getFreelyInverted(Y, Y->hasOneUse(), &Builder)) { 1743 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID); 1744 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY); 1745 return BinaryOperator::CreateNot(InvMaxMin); 1746 } 1747 } 1748 return nullptr; 1749 }; 1750 1751 if (Instruction *I = moveNotAfterMinMax(I0, I1)) 1752 return I; 1753 if (Instruction *I = moveNotAfterMinMax(I1, I0)) 1754 return I; 1755 1756 if (Instruction *I = moveAddAfterMinMax(II, Builder)) 1757 return I; 1758 1759 // smax(X, -X) --> abs(X) 1760 // smin(X, -X) --> -abs(X) 1761 // umax(X, -X) --> -abs(X) 1762 // umin(X, -X) --> abs(X) 1763 if (isKnownNegation(I0, I1)) { 1764 // We can choose either operand as the input to abs(), but if we can 1765 // eliminate the only use of a value, that's better for subsequent 1766 // transforms/analysis. 1767 if (I0->hasOneUse() && !I1->hasOneUse()) 1768 std::swap(I0, I1); 1769 1770 // This is some variant of abs(). See if we can propagate 'nsw' to the abs 1771 // operation and potentially its negation. 1772 bool IntMinIsPoison = isKnownNegation(I0, I1, /* NeedNSW */ true); 1773 Value *Abs = Builder.CreateBinaryIntrinsic( 1774 Intrinsic::abs, I0, 1775 ConstantInt::getBool(II->getContext(), IntMinIsPoison)); 1776 1777 // We don't have a "nabs" intrinsic, so negate if needed based on the 1778 // max/min operation. 1779 if (IID == Intrinsic::smin || IID == Intrinsic::umax) 1780 Abs = Builder.CreateNeg(Abs, "nabs", /* NUW */ false, IntMinIsPoison); 1781 return replaceInstUsesWith(CI, Abs); 1782 } 1783 1784 if (Instruction *Sel = foldClampRangeOfTwo(II, Builder)) 1785 return Sel; 1786 1787 if (Instruction *SAdd = matchSAddSubSat(*II)) 1788 return SAdd; 1789 1790 if (Value *NewMinMax = reassociateMinMaxWithConstants(II, Builder)) 1791 return replaceInstUsesWith(*II, NewMinMax); 1792 1793 if (Instruction *R = reassociateMinMaxWithConstantInOperand(II, Builder)) 1794 return R; 1795 1796 if (Instruction *NewMinMax = factorizeMinMaxTree(II)) 1797 return NewMinMax; 1798 1799 // Try to fold minmax with constant RHS based on range information 1800 const APInt *RHSC; 1801 if (match(I1, m_APIntAllowUndef(RHSC))) { 1802 ICmpInst::Predicate Pred = 1803 ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID)); 1804 bool IsSigned = MinMaxIntrinsic::isSigned(IID); 1805 ConstantRange LHS_CR = computeConstantRangeIncludingKnownBits( 1806 I0, IsSigned, SQ.getWithInstruction(II)); 1807 if (!LHS_CR.isFullSet()) { 1808 if (LHS_CR.icmp(Pred, *RHSC)) 1809 return replaceInstUsesWith(*II, I0); 1810 if (LHS_CR.icmp(ICmpInst::getSwappedPredicate(Pred), *RHSC)) 1811 return replaceInstUsesWith(*II, 1812 ConstantInt::get(II->getType(), *RHSC)); 1813 } 1814 } 1815 1816 break; 1817 } 1818 case Intrinsic::bitreverse: { 1819 Value *IIOperand = II->getArgOperand(0); 1820 // bitrev (zext i1 X to ?) --> X ? SignBitC : 0 1821 Value *X; 1822 if (match(IIOperand, m_ZExt(m_Value(X))) && 1823 X->getType()->isIntOrIntVectorTy(1)) { 1824 Type *Ty = II->getType(); 1825 APInt SignBit = APInt::getSignMask(Ty->getScalarSizeInBits()); 1826 return SelectInst::Create(X, ConstantInt::get(Ty, SignBit), 1827 ConstantInt::getNullValue(Ty)); 1828 } 1829 1830 if (Instruction *crossLogicOpFold = 1831 foldBitOrderCrossLogicOp<Intrinsic::bitreverse>(IIOperand, Builder)) 1832 return crossLogicOpFold; 1833 1834 break; 1835 } 1836 case Intrinsic::bswap: { 1837 Value *IIOperand = II->getArgOperand(0); 1838 1839 // Try to canonicalize bswap-of-logical-shift-by-8-bit-multiple as 1840 // inverse-shift-of-bswap: 1841 // bswap (shl X, Y) --> lshr (bswap X), Y 1842 // bswap (lshr X, Y) --> shl (bswap X), Y 1843 Value *X, *Y; 1844 if (match(IIOperand, m_OneUse(m_LogicalShift(m_Value(X), m_Value(Y))))) { 1845 // The transform allows undef vector elements, so try a constant match 1846 // first. If knownbits can handle that case, that clause could be removed. 1847 unsigned BitWidth = IIOperand->getType()->getScalarSizeInBits(); 1848 const APInt *C; 1849 if ((match(Y, m_APIntAllowUndef(C)) && (*C & 7) == 0) || 1850 MaskedValueIsZero(Y, APInt::getLowBitsSet(BitWidth, 3))) { 1851 Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X); 1852 BinaryOperator::BinaryOps InverseShift = 1853 cast<BinaryOperator>(IIOperand)->getOpcode() == Instruction::Shl 1854 ? Instruction::LShr 1855 : Instruction::Shl; 1856 return BinaryOperator::Create(InverseShift, NewSwap, Y); 1857 } 1858 } 1859 1860 KnownBits Known = computeKnownBits(IIOperand, 0, II); 1861 uint64_t LZ = alignDown(Known.countMinLeadingZeros(), 8); 1862 uint64_t TZ = alignDown(Known.countMinTrailingZeros(), 8); 1863 unsigned BW = Known.getBitWidth(); 1864 1865 // bswap(x) -> shift(x) if x has exactly one "active byte" 1866 if (BW - LZ - TZ == 8) { 1867 assert(LZ != TZ && "active byte cannot be in the middle"); 1868 if (LZ > TZ) // -> shl(x) if the "active byte" is in the low part of x 1869 return BinaryOperator::CreateNUWShl( 1870 IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ)); 1871 // -> lshr(x) if the "active byte" is in the high part of x 1872 return BinaryOperator::CreateExactLShr( 1873 IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ)); 1874 } 1875 1876 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 1877 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) { 1878 unsigned C = X->getType()->getScalarSizeInBits() - BW; 1879 Value *CV = ConstantInt::get(X->getType(), C); 1880 Value *V = Builder.CreateLShr(X, CV); 1881 return new TruncInst(V, IIOperand->getType()); 1882 } 1883 1884 if (Instruction *crossLogicOpFold = 1885 foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand, Builder)) { 1886 return crossLogicOpFold; 1887 } 1888 1889 // Try to fold into bitreverse if bswap is the root of the expression tree. 1890 if (Instruction *BitOp = matchBSwapOrBitReverse(*II, /*MatchBSwaps*/ false, 1891 /*MatchBitReversals*/ true)) 1892 return BitOp; 1893 break; 1894 } 1895 case Intrinsic::masked_load: 1896 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II)) 1897 return replaceInstUsesWith(CI, SimplifiedMaskedOp); 1898 break; 1899 case Intrinsic::masked_store: 1900 return simplifyMaskedStore(*II); 1901 case Intrinsic::masked_gather: 1902 return simplifyMaskedGather(*II); 1903 case Intrinsic::masked_scatter: 1904 return simplifyMaskedScatter(*II); 1905 case Intrinsic::launder_invariant_group: 1906 case Intrinsic::strip_invariant_group: 1907 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this)) 1908 return replaceInstUsesWith(*II, SkippedBarrier); 1909 break; 1910 case Intrinsic::powi: 1911 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 1912 // 0 and 1 are handled in instsimplify 1913 // powi(x, -1) -> 1/x 1914 if (Power->isMinusOne()) 1915 return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0), 1916 II->getArgOperand(0), II); 1917 // powi(x, 2) -> x*x 1918 if (Power->equalsInt(2)) 1919 return BinaryOperator::CreateFMulFMF(II->getArgOperand(0), 1920 II->getArgOperand(0), II); 1921 1922 if (!Power->getValue()[0]) { 1923 Value *X; 1924 // If power is even: 1925 // powi(-x, p) -> powi(x, p) 1926 // powi(fabs(x), p) -> powi(x, p) 1927 // powi(copysign(x, y), p) -> powi(x, p) 1928 if (match(II->getArgOperand(0), m_FNeg(m_Value(X))) || 1929 match(II->getArgOperand(0), m_FAbs(m_Value(X))) || 1930 match(II->getArgOperand(0), 1931 m_Intrinsic<Intrinsic::copysign>(m_Value(X), m_Value()))) 1932 return replaceOperand(*II, 0, X); 1933 } 1934 } 1935 break; 1936 1937 case Intrinsic::cttz: 1938 case Intrinsic::ctlz: 1939 if (auto *I = foldCttzCtlz(*II, *this)) 1940 return I; 1941 break; 1942 1943 case Intrinsic::ctpop: 1944 if (auto *I = foldCtpop(*II, *this)) 1945 return I; 1946 break; 1947 1948 case Intrinsic::fshl: 1949 case Intrinsic::fshr: { 1950 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 1951 Type *Ty = II->getType(); 1952 unsigned BitWidth = Ty->getScalarSizeInBits(); 1953 Constant *ShAmtC; 1954 if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC))) { 1955 // Canonicalize a shift amount constant operand to modulo the bit-width. 1956 Constant *WidthC = ConstantInt::get(Ty, BitWidth); 1957 Constant *ModuloC = 1958 ConstantFoldBinaryOpOperands(Instruction::URem, ShAmtC, WidthC, DL); 1959 if (!ModuloC) 1960 return nullptr; 1961 if (ModuloC != ShAmtC) 1962 return replaceOperand(*II, 2, ModuloC); 1963 1964 assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) == 1965 ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) && 1966 "Shift amount expected to be modulo bitwidth"); 1967 1968 // Canonicalize funnel shift right by constant to funnel shift left. This 1969 // is not entirely arbitrary. For historical reasons, the backend may 1970 // recognize rotate left patterns but miss rotate right patterns. 1971 if (IID == Intrinsic::fshr) { 1972 // fshr X, Y, C --> fshl X, Y, (BitWidth - C) 1973 Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC); 1974 Module *Mod = II->getModule(); 1975 Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty); 1976 return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC }); 1977 } 1978 assert(IID == Intrinsic::fshl && 1979 "All funnel shifts by simple constants should go left"); 1980 1981 // fshl(X, 0, C) --> shl X, C 1982 // fshl(X, undef, C) --> shl X, C 1983 if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef())) 1984 return BinaryOperator::CreateShl(Op0, ShAmtC); 1985 1986 // fshl(0, X, C) --> lshr X, (BW-C) 1987 // fshl(undef, X, C) --> lshr X, (BW-C) 1988 if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef())) 1989 return BinaryOperator::CreateLShr(Op1, 1990 ConstantExpr::getSub(WidthC, ShAmtC)); 1991 1992 // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form) 1993 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) { 1994 Module *Mod = II->getModule(); 1995 Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty); 1996 return CallInst::Create(Bswap, { Op0 }); 1997 } 1998 if (Instruction *BitOp = 1999 matchBSwapOrBitReverse(*II, /*MatchBSwaps*/ true, 2000 /*MatchBitReversals*/ true)) 2001 return BitOp; 2002 } 2003 2004 // Left or right might be masked. 2005 if (SimplifyDemandedInstructionBits(*II)) 2006 return &CI; 2007 2008 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth, 2009 // so only the low bits of the shift amount are demanded if the bitwidth is 2010 // a power-of-2. 2011 if (!isPowerOf2_32(BitWidth)) 2012 break; 2013 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth)); 2014 KnownBits Op2Known(BitWidth); 2015 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known)) 2016 return &CI; 2017 break; 2018 } 2019 case Intrinsic::ptrmask: { 2020 unsigned BitWidth = DL.getPointerTypeSizeInBits(II->getType()); 2021 KnownBits Known(BitWidth); 2022 if (SimplifyDemandedInstructionBits(*II, Known)) 2023 return II; 2024 2025 Value *InnerPtr, *InnerMask; 2026 bool Changed = false; 2027 // Combine: 2028 // (ptrmask (ptrmask p, A), B) 2029 // -> (ptrmask p, (and A, B)) 2030 if (match(II->getArgOperand(0), 2031 m_OneUse(m_Intrinsic<Intrinsic::ptrmask>(m_Value(InnerPtr), 2032 m_Value(InnerMask))))) { 2033 assert(II->getArgOperand(1)->getType() == InnerMask->getType() && 2034 "Mask types must match"); 2035 // TODO: If InnerMask == Op1, we could copy attributes from inner 2036 // callsite -> outer callsite. 2037 Value *NewMask = Builder.CreateAnd(II->getArgOperand(1), InnerMask); 2038 replaceOperand(CI, 0, InnerPtr); 2039 replaceOperand(CI, 1, NewMask); 2040 Changed = true; 2041 } 2042 2043 // See if we can deduce non-null. 2044 if (!CI.hasRetAttr(Attribute::NonNull) && 2045 (Known.isNonZero() || 2046 isKnownNonZero(II, DL, /*Depth*/ 0, &AC, II, &DT))) { 2047 CI.addRetAttr(Attribute::NonNull); 2048 Changed = true; 2049 } 2050 2051 unsigned NewAlignmentLog = 2052 std::min(Value::MaxAlignmentExponent, 2053 std::min(BitWidth - 1, Known.countMinTrailingZeros())); 2054 // Known bits will capture if we had alignment information associated with 2055 // the pointer argument. 2056 if (NewAlignmentLog > Log2(CI.getRetAlign().valueOrOne())) { 2057 CI.addRetAttr(Attribute::getWithAlignment( 2058 CI.getContext(), Align(uint64_t(1) << NewAlignmentLog))); 2059 Changed = true; 2060 } 2061 if (Changed) 2062 return &CI; 2063 break; 2064 } 2065 case Intrinsic::uadd_with_overflow: 2066 case Intrinsic::sadd_with_overflow: { 2067 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 2068 return I; 2069 2070 // Given 2 constant operands whose sum does not overflow: 2071 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1 2072 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1 2073 Value *X; 2074 const APInt *C0, *C1; 2075 Value *Arg0 = II->getArgOperand(0); 2076 Value *Arg1 = II->getArgOperand(1); 2077 bool IsSigned = IID == Intrinsic::sadd_with_overflow; 2078 bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0))) 2079 : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0))); 2080 if (HasNWAdd && match(Arg1, m_APInt(C1))) { 2081 bool Overflow; 2082 APInt NewC = 2083 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow); 2084 if (!Overflow) 2085 return replaceInstUsesWith( 2086 *II, Builder.CreateBinaryIntrinsic( 2087 IID, X, ConstantInt::get(Arg1->getType(), NewC))); 2088 } 2089 break; 2090 } 2091 2092 case Intrinsic::umul_with_overflow: 2093 case Intrinsic::smul_with_overflow: 2094 case Intrinsic::usub_with_overflow: 2095 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 2096 return I; 2097 break; 2098 2099 case Intrinsic::ssub_with_overflow: { 2100 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 2101 return I; 2102 2103 Constant *C; 2104 Value *Arg0 = II->getArgOperand(0); 2105 Value *Arg1 = II->getArgOperand(1); 2106 // Given a constant C that is not the minimum signed value 2107 // for an integer of a given bit width: 2108 // 2109 // ssubo X, C -> saddo X, -C 2110 if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) { 2111 Value *NegVal = ConstantExpr::getNeg(C); 2112 // Build a saddo call that is equivalent to the discovered 2113 // ssubo call. 2114 return replaceInstUsesWith( 2115 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow, 2116 Arg0, NegVal)); 2117 } 2118 2119 break; 2120 } 2121 2122 case Intrinsic::uadd_sat: 2123 case Intrinsic::sadd_sat: 2124 case Intrinsic::usub_sat: 2125 case Intrinsic::ssub_sat: { 2126 SaturatingInst *SI = cast<SaturatingInst>(II); 2127 Type *Ty = SI->getType(); 2128 Value *Arg0 = SI->getLHS(); 2129 Value *Arg1 = SI->getRHS(); 2130 2131 // Make use of known overflow information. 2132 OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(), 2133 Arg0, Arg1, SI); 2134 switch (OR) { 2135 case OverflowResult::MayOverflow: 2136 break; 2137 case OverflowResult::NeverOverflows: 2138 if (SI->isSigned()) 2139 return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1); 2140 else 2141 return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1); 2142 case OverflowResult::AlwaysOverflowsLow: { 2143 unsigned BitWidth = Ty->getScalarSizeInBits(); 2144 APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned()); 2145 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min)); 2146 } 2147 case OverflowResult::AlwaysOverflowsHigh: { 2148 unsigned BitWidth = Ty->getScalarSizeInBits(); 2149 APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned()); 2150 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max)); 2151 } 2152 } 2153 2154 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN 2155 Constant *C; 2156 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) && 2157 C->isNotMinSignedValue()) { 2158 Value *NegVal = ConstantExpr::getNeg(C); 2159 return replaceInstUsesWith( 2160 *II, Builder.CreateBinaryIntrinsic( 2161 Intrinsic::sadd_sat, Arg0, NegVal)); 2162 } 2163 2164 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2)) 2165 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2)) 2166 // if Val and Val2 have the same sign 2167 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) { 2168 Value *X; 2169 const APInt *Val, *Val2; 2170 APInt NewVal; 2171 bool IsUnsigned = 2172 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat; 2173 if (Other->getIntrinsicID() == IID && 2174 match(Arg1, m_APInt(Val)) && 2175 match(Other->getArgOperand(0), m_Value(X)) && 2176 match(Other->getArgOperand(1), m_APInt(Val2))) { 2177 if (IsUnsigned) 2178 NewVal = Val->uadd_sat(*Val2); 2179 else if (Val->isNonNegative() == Val2->isNonNegative()) { 2180 bool Overflow; 2181 NewVal = Val->sadd_ov(*Val2, Overflow); 2182 if (Overflow) { 2183 // Both adds together may add more than SignedMaxValue 2184 // without saturating the final result. 2185 break; 2186 } 2187 } else { 2188 // Cannot fold saturated addition with different signs. 2189 break; 2190 } 2191 2192 return replaceInstUsesWith( 2193 *II, Builder.CreateBinaryIntrinsic( 2194 IID, X, ConstantInt::get(II->getType(), NewVal))); 2195 } 2196 } 2197 break; 2198 } 2199 2200 case Intrinsic::minnum: 2201 case Intrinsic::maxnum: 2202 case Intrinsic::minimum: 2203 case Intrinsic::maximum: { 2204 Value *Arg0 = II->getArgOperand(0); 2205 Value *Arg1 = II->getArgOperand(1); 2206 Value *X, *Y; 2207 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) && 2208 (Arg0->hasOneUse() || Arg1->hasOneUse())) { 2209 // If both operands are negated, invert the call and negate the result: 2210 // min(-X, -Y) --> -(max(X, Y)) 2211 // max(-X, -Y) --> -(min(X, Y)) 2212 Intrinsic::ID NewIID; 2213 switch (IID) { 2214 case Intrinsic::maxnum: 2215 NewIID = Intrinsic::minnum; 2216 break; 2217 case Intrinsic::minnum: 2218 NewIID = Intrinsic::maxnum; 2219 break; 2220 case Intrinsic::maximum: 2221 NewIID = Intrinsic::minimum; 2222 break; 2223 case Intrinsic::minimum: 2224 NewIID = Intrinsic::maximum; 2225 break; 2226 default: 2227 llvm_unreachable("unexpected intrinsic ID"); 2228 } 2229 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II); 2230 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall); 2231 FNeg->copyIRFlags(II); 2232 return FNeg; 2233 } 2234 2235 // m(m(X, C2), C1) -> m(X, C) 2236 const APFloat *C1, *C2; 2237 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) { 2238 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) && 2239 ((match(M->getArgOperand(0), m_Value(X)) && 2240 match(M->getArgOperand(1), m_APFloat(C2))) || 2241 (match(M->getArgOperand(1), m_Value(X)) && 2242 match(M->getArgOperand(0), m_APFloat(C2))))) { 2243 APFloat Res(0.0); 2244 switch (IID) { 2245 case Intrinsic::maxnum: 2246 Res = maxnum(*C1, *C2); 2247 break; 2248 case Intrinsic::minnum: 2249 Res = minnum(*C1, *C2); 2250 break; 2251 case Intrinsic::maximum: 2252 Res = maximum(*C1, *C2); 2253 break; 2254 case Intrinsic::minimum: 2255 Res = minimum(*C1, *C2); 2256 break; 2257 default: 2258 llvm_unreachable("unexpected intrinsic ID"); 2259 } 2260 Instruction *NewCall = Builder.CreateBinaryIntrinsic( 2261 IID, X, ConstantFP::get(Arg0->getType(), Res), II); 2262 // TODO: Conservatively intersecting FMF. If Res == C2, the transform 2263 // was a simplification (so Arg0 and its original flags could 2264 // propagate?) 2265 NewCall->andIRFlags(M); 2266 return replaceInstUsesWith(*II, NewCall); 2267 } 2268 } 2269 2270 // m((fpext X), (fpext Y)) -> fpext (m(X, Y)) 2271 if (match(Arg0, m_OneUse(m_FPExt(m_Value(X)))) && 2272 match(Arg1, m_OneUse(m_FPExt(m_Value(Y)))) && 2273 X->getType() == Y->getType()) { 2274 Value *NewCall = 2275 Builder.CreateBinaryIntrinsic(IID, X, Y, II, II->getName()); 2276 return new FPExtInst(NewCall, II->getType()); 2277 } 2278 2279 // max X, -X --> fabs X 2280 // min X, -X --> -(fabs X) 2281 // TODO: Remove one-use limitation? That is obviously better for max. 2282 // It would be an extra instruction for min (fnabs), but that is 2283 // still likely better for analysis and codegen. 2284 if ((match(Arg0, m_OneUse(m_FNeg(m_Value(X)))) && Arg1 == X) || 2285 (match(Arg1, m_OneUse(m_FNeg(m_Value(X)))) && Arg0 == X)) { 2286 Value *R = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II); 2287 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum) 2288 R = Builder.CreateFNegFMF(R, II); 2289 return replaceInstUsesWith(*II, R); 2290 } 2291 2292 break; 2293 } 2294 case Intrinsic::matrix_multiply: { 2295 // Optimize negation in matrix multiplication. 2296 2297 // -A * -B -> A * B 2298 Value *A, *B; 2299 if (match(II->getArgOperand(0), m_FNeg(m_Value(A))) && 2300 match(II->getArgOperand(1), m_FNeg(m_Value(B)))) { 2301 replaceOperand(*II, 0, A); 2302 replaceOperand(*II, 1, B); 2303 return II; 2304 } 2305 2306 Value *Op0 = II->getOperand(0); 2307 Value *Op1 = II->getOperand(1); 2308 Value *OpNotNeg, *NegatedOp; 2309 unsigned NegatedOpArg, OtherOpArg; 2310 if (match(Op0, m_FNeg(m_Value(OpNotNeg)))) { 2311 NegatedOp = Op0; 2312 NegatedOpArg = 0; 2313 OtherOpArg = 1; 2314 } else if (match(Op1, m_FNeg(m_Value(OpNotNeg)))) { 2315 NegatedOp = Op1; 2316 NegatedOpArg = 1; 2317 OtherOpArg = 0; 2318 } else 2319 // Multiplication doesn't have a negated operand. 2320 break; 2321 2322 // Only optimize if the negated operand has only one use. 2323 if (!NegatedOp->hasOneUse()) 2324 break; 2325 2326 Value *OtherOp = II->getOperand(OtherOpArg); 2327 VectorType *RetTy = cast<VectorType>(II->getType()); 2328 VectorType *NegatedOpTy = cast<VectorType>(NegatedOp->getType()); 2329 VectorType *OtherOpTy = cast<VectorType>(OtherOp->getType()); 2330 ElementCount NegatedCount = NegatedOpTy->getElementCount(); 2331 ElementCount OtherCount = OtherOpTy->getElementCount(); 2332 ElementCount RetCount = RetTy->getElementCount(); 2333 // (-A) * B -> A * (-B), if it is cheaper to negate B and vice versa. 2334 if (ElementCount::isKnownGT(NegatedCount, OtherCount) && 2335 ElementCount::isKnownLT(OtherCount, RetCount)) { 2336 Value *InverseOtherOp = Builder.CreateFNeg(OtherOp); 2337 replaceOperand(*II, NegatedOpArg, OpNotNeg); 2338 replaceOperand(*II, OtherOpArg, InverseOtherOp); 2339 return II; 2340 } 2341 // (-A) * B -> -(A * B), if it is cheaper to negate the result 2342 if (ElementCount::isKnownGT(NegatedCount, RetCount)) { 2343 SmallVector<Value *, 5> NewArgs(II->args()); 2344 NewArgs[NegatedOpArg] = OpNotNeg; 2345 Instruction *NewMul = 2346 Builder.CreateIntrinsic(II->getType(), IID, NewArgs, II); 2347 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(NewMul, II)); 2348 } 2349 break; 2350 } 2351 case Intrinsic::fmuladd: { 2352 // Canonicalize fast fmuladd to the separate fmul + fadd. 2353 if (II->isFast()) { 2354 BuilderTy::FastMathFlagGuard Guard(Builder); 2355 Builder.setFastMathFlags(II->getFastMathFlags()); 2356 Value *Mul = Builder.CreateFMul(II->getArgOperand(0), 2357 II->getArgOperand(1)); 2358 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2)); 2359 Add->takeName(II); 2360 return replaceInstUsesWith(*II, Add); 2361 } 2362 2363 // Try to simplify the underlying FMul. 2364 if (Value *V = simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1), 2365 II->getFastMathFlags(), 2366 SQ.getWithInstruction(II))) { 2367 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 2368 FAdd->copyFastMathFlags(II); 2369 return FAdd; 2370 } 2371 2372 [[fallthrough]]; 2373 } 2374 case Intrinsic::fma: { 2375 // fma fneg(x), fneg(y), z -> fma x, y, z 2376 Value *Src0 = II->getArgOperand(0); 2377 Value *Src1 = II->getArgOperand(1); 2378 Value *X, *Y; 2379 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) { 2380 replaceOperand(*II, 0, X); 2381 replaceOperand(*II, 1, Y); 2382 return II; 2383 } 2384 2385 // fma fabs(x), fabs(x), z -> fma x, x, z 2386 if (match(Src0, m_FAbs(m_Value(X))) && 2387 match(Src1, m_FAbs(m_Specific(X)))) { 2388 replaceOperand(*II, 0, X); 2389 replaceOperand(*II, 1, X); 2390 return II; 2391 } 2392 2393 // Try to simplify the underlying FMul. We can only apply simplifications 2394 // that do not require rounding. 2395 if (Value *V = simplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1), 2396 II->getFastMathFlags(), 2397 SQ.getWithInstruction(II))) { 2398 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 2399 FAdd->copyFastMathFlags(II); 2400 return FAdd; 2401 } 2402 2403 // fma x, y, 0 -> fmul x, y 2404 // This is always valid for -0.0, but requires nsz for +0.0 as 2405 // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own. 2406 if (match(II->getArgOperand(2), m_NegZeroFP()) || 2407 (match(II->getArgOperand(2), m_PosZeroFP()) && 2408 II->getFastMathFlags().noSignedZeros())) 2409 return BinaryOperator::CreateFMulFMF(Src0, Src1, II); 2410 2411 break; 2412 } 2413 case Intrinsic::copysign: { 2414 Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1); 2415 if (SignBitMustBeZero(Sign, DL, &TLI)) { 2416 // If we know that the sign argument is positive, reduce to FABS: 2417 // copysign Mag, +Sign --> fabs Mag 2418 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 2419 return replaceInstUsesWith(*II, Fabs); 2420 } 2421 // TODO: There should be a ValueTracking sibling like SignBitMustBeOne. 2422 const APFloat *C; 2423 if (match(Sign, m_APFloat(C)) && C->isNegative()) { 2424 // If we know that the sign argument is negative, reduce to FNABS: 2425 // copysign Mag, -Sign --> fneg (fabs Mag) 2426 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 2427 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II)); 2428 } 2429 2430 // Propagate sign argument through nested calls: 2431 // copysign Mag, (copysign ?, X) --> copysign Mag, X 2432 Value *X; 2433 if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X)))) 2434 return replaceOperand(*II, 1, X); 2435 2436 // Peek through changes of magnitude's sign-bit. This call rewrites those: 2437 // copysign (fabs X), Sign --> copysign X, Sign 2438 // copysign (fneg X), Sign --> copysign X, Sign 2439 if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X)))) 2440 return replaceOperand(*II, 0, X); 2441 2442 break; 2443 } 2444 case Intrinsic::fabs: { 2445 Value *Cond, *TVal, *FVal; 2446 if (match(II->getArgOperand(0), 2447 m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) { 2448 // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF 2449 if (isa<Constant>(TVal) && isa<Constant>(FVal)) { 2450 CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal}); 2451 CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal}); 2452 return SelectInst::Create(Cond, AbsT, AbsF); 2453 } 2454 // fabs (select Cond, -FVal, FVal) --> fabs FVal 2455 if (match(TVal, m_FNeg(m_Specific(FVal)))) 2456 return replaceOperand(*II, 0, FVal); 2457 // fabs (select Cond, TVal, -TVal) --> fabs TVal 2458 if (match(FVal, m_FNeg(m_Specific(TVal)))) 2459 return replaceOperand(*II, 0, TVal); 2460 } 2461 2462 Value *Magnitude, *Sign; 2463 if (match(II->getArgOperand(0), 2464 m_CopySign(m_Value(Magnitude), m_Value(Sign)))) { 2465 // fabs (copysign x, y) -> (fabs x) 2466 CallInst *AbsSign = 2467 Builder.CreateCall(II->getCalledFunction(), {Magnitude}); 2468 AbsSign->copyFastMathFlags(II); 2469 return replaceInstUsesWith(*II, AbsSign); 2470 } 2471 2472 [[fallthrough]]; 2473 } 2474 case Intrinsic::ceil: 2475 case Intrinsic::floor: 2476 case Intrinsic::round: 2477 case Intrinsic::roundeven: 2478 case Intrinsic::nearbyint: 2479 case Intrinsic::rint: 2480 case Intrinsic::trunc: { 2481 Value *ExtSrc; 2482 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) { 2483 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x) 2484 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II); 2485 return new FPExtInst(NarrowII, II->getType()); 2486 } 2487 break; 2488 } 2489 case Intrinsic::cos: 2490 case Intrinsic::amdgcn_cos: { 2491 Value *X; 2492 Value *Src = II->getArgOperand(0); 2493 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) { 2494 // cos(-x) -> cos(x) 2495 // cos(fabs(x)) -> cos(x) 2496 return replaceOperand(*II, 0, X); 2497 } 2498 break; 2499 } 2500 case Intrinsic::sin: { 2501 Value *X; 2502 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) { 2503 // sin(-x) --> -sin(x) 2504 Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II); 2505 Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin); 2506 FNeg->copyFastMathFlags(II); 2507 return FNeg; 2508 } 2509 break; 2510 } 2511 case Intrinsic::ldexp: { 2512 // ldexp(ldexp(x, a), b) -> ldexp(x, a + b) 2513 // 2514 // The danger is if the first ldexp would overflow to infinity or underflow 2515 // to zero, but the combined exponent avoids it. We ignore this with 2516 // reassoc. 2517 // 2518 // It's also safe to fold if we know both exponents are >= 0 or <= 0 since 2519 // it would just double down on the overflow/underflow which would occur 2520 // anyway. 2521 // 2522 // TODO: Could do better if we had range tracking for the input value 2523 // exponent. Also could broaden sign check to cover == 0 case. 2524 Value *Src = II->getArgOperand(0); 2525 Value *Exp = II->getArgOperand(1); 2526 Value *InnerSrc; 2527 Value *InnerExp; 2528 if (match(Src, m_OneUse(m_Intrinsic<Intrinsic::ldexp>( 2529 m_Value(InnerSrc), m_Value(InnerExp)))) && 2530 Exp->getType() == InnerExp->getType()) { 2531 FastMathFlags FMF = II->getFastMathFlags(); 2532 FastMathFlags InnerFlags = cast<FPMathOperator>(Src)->getFastMathFlags(); 2533 2534 if ((FMF.allowReassoc() && InnerFlags.allowReassoc()) || 2535 signBitMustBeTheSame(Exp, InnerExp, II, DL, &AC, &DT)) { 2536 // TODO: Add nsw/nuw probably safe if integer type exceeds exponent 2537 // width. 2538 Value *NewExp = Builder.CreateAdd(InnerExp, Exp); 2539 II->setArgOperand(1, NewExp); 2540 II->setFastMathFlags(InnerFlags); // Or the inner flags. 2541 return replaceOperand(*II, 0, InnerSrc); 2542 } 2543 } 2544 2545 break; 2546 } 2547 case Intrinsic::ptrauth_auth: 2548 case Intrinsic::ptrauth_resign: { 2549 // (sign|resign) + (auth|resign) can be folded by omitting the middle 2550 // sign+auth component if the key and discriminator match. 2551 bool NeedSign = II->getIntrinsicID() == Intrinsic::ptrauth_resign; 2552 Value *Key = II->getArgOperand(1); 2553 Value *Disc = II->getArgOperand(2); 2554 2555 // AuthKey will be the key we need to end up authenticating against in 2556 // whatever we replace this sequence with. 2557 Value *AuthKey = nullptr, *AuthDisc = nullptr, *BasePtr; 2558 if (auto CI = dyn_cast<CallBase>(II->getArgOperand(0))) { 2559 BasePtr = CI->getArgOperand(0); 2560 if (CI->getIntrinsicID() == Intrinsic::ptrauth_sign) { 2561 if (CI->getArgOperand(1) != Key || CI->getArgOperand(2) != Disc) 2562 break; 2563 } else if (CI->getIntrinsicID() == Intrinsic::ptrauth_resign) { 2564 if (CI->getArgOperand(3) != Key || CI->getArgOperand(4) != Disc) 2565 break; 2566 AuthKey = CI->getArgOperand(1); 2567 AuthDisc = CI->getArgOperand(2); 2568 } else 2569 break; 2570 } else 2571 break; 2572 2573 unsigned NewIntrin; 2574 if (AuthKey && NeedSign) { 2575 // resign(0,1) + resign(1,2) = resign(0, 2) 2576 NewIntrin = Intrinsic::ptrauth_resign; 2577 } else if (AuthKey) { 2578 // resign(0,1) + auth(1) = auth(0) 2579 NewIntrin = Intrinsic::ptrauth_auth; 2580 } else if (NeedSign) { 2581 // sign(0) + resign(0, 1) = sign(1) 2582 NewIntrin = Intrinsic::ptrauth_sign; 2583 } else { 2584 // sign(0) + auth(0) = nop 2585 replaceInstUsesWith(*II, BasePtr); 2586 eraseInstFromFunction(*II); 2587 return nullptr; 2588 } 2589 2590 SmallVector<Value *, 4> CallArgs; 2591 CallArgs.push_back(BasePtr); 2592 if (AuthKey) { 2593 CallArgs.push_back(AuthKey); 2594 CallArgs.push_back(AuthDisc); 2595 } 2596 2597 if (NeedSign) { 2598 CallArgs.push_back(II->getArgOperand(3)); 2599 CallArgs.push_back(II->getArgOperand(4)); 2600 } 2601 2602 Function *NewFn = Intrinsic::getDeclaration(II->getModule(), NewIntrin); 2603 return CallInst::Create(NewFn, CallArgs); 2604 } 2605 case Intrinsic::arm_neon_vtbl1: 2606 case Intrinsic::aarch64_neon_tbl1: 2607 if (Value *V = simplifyNeonTbl1(*II, Builder)) 2608 return replaceInstUsesWith(*II, V); 2609 break; 2610 2611 case Intrinsic::arm_neon_vmulls: 2612 case Intrinsic::arm_neon_vmullu: 2613 case Intrinsic::aarch64_neon_smull: 2614 case Intrinsic::aarch64_neon_umull: { 2615 Value *Arg0 = II->getArgOperand(0); 2616 Value *Arg1 = II->getArgOperand(1); 2617 2618 // Handle mul by zero first: 2619 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) { 2620 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType())); 2621 } 2622 2623 // Check for constant LHS & RHS - in this case we just simplify. 2624 bool Zext = (IID == Intrinsic::arm_neon_vmullu || 2625 IID == Intrinsic::aarch64_neon_umull); 2626 VectorType *NewVT = cast<VectorType>(II->getType()); 2627 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) { 2628 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) { 2629 Value *V0 = Builder.CreateIntCast(CV0, NewVT, /*isSigned=*/!Zext); 2630 Value *V1 = Builder.CreateIntCast(CV1, NewVT, /*isSigned=*/!Zext); 2631 return replaceInstUsesWith(CI, Builder.CreateMul(V0, V1)); 2632 } 2633 2634 // Couldn't simplify - canonicalize constant to the RHS. 2635 std::swap(Arg0, Arg1); 2636 } 2637 2638 // Handle mul by one: 2639 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) 2640 if (ConstantInt *Splat = 2641 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) 2642 if (Splat->isOne()) 2643 return CastInst::CreateIntegerCast(Arg0, II->getType(), 2644 /*isSigned=*/!Zext); 2645 2646 break; 2647 } 2648 case Intrinsic::arm_neon_aesd: 2649 case Intrinsic::arm_neon_aese: 2650 case Intrinsic::aarch64_crypto_aesd: 2651 case Intrinsic::aarch64_crypto_aese: { 2652 Value *DataArg = II->getArgOperand(0); 2653 Value *KeyArg = II->getArgOperand(1); 2654 2655 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR 2656 Value *Data, *Key; 2657 if (match(KeyArg, m_ZeroInt()) && 2658 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) { 2659 replaceOperand(*II, 0, Data); 2660 replaceOperand(*II, 1, Key); 2661 return II; 2662 } 2663 break; 2664 } 2665 case Intrinsic::hexagon_V6_vandvrt: 2666 case Intrinsic::hexagon_V6_vandvrt_128B: { 2667 // Simplify Q -> V -> Q conversion. 2668 if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 2669 Intrinsic::ID ID0 = Op0->getIntrinsicID(); 2670 if (ID0 != Intrinsic::hexagon_V6_vandqrt && 2671 ID0 != Intrinsic::hexagon_V6_vandqrt_128B) 2672 break; 2673 Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1); 2674 uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue(); 2675 uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue(); 2676 // Check if every byte has common bits in Bytes and Mask. 2677 uint64_t C = Bytes1 & Mask1; 2678 if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000)) 2679 return replaceInstUsesWith(*II, Op0->getArgOperand(0)); 2680 } 2681 break; 2682 } 2683 case Intrinsic::stackrestore: { 2684 enum class ClassifyResult { 2685 None, 2686 Alloca, 2687 StackRestore, 2688 CallWithSideEffects, 2689 }; 2690 auto Classify = [](const Instruction *I) { 2691 if (isa<AllocaInst>(I)) 2692 return ClassifyResult::Alloca; 2693 2694 if (auto *CI = dyn_cast<CallInst>(I)) { 2695 if (auto *II = dyn_cast<IntrinsicInst>(CI)) { 2696 if (II->getIntrinsicID() == Intrinsic::stackrestore) 2697 return ClassifyResult::StackRestore; 2698 2699 if (II->mayHaveSideEffects()) 2700 return ClassifyResult::CallWithSideEffects; 2701 } else { 2702 // Consider all non-intrinsic calls to be side effects 2703 return ClassifyResult::CallWithSideEffects; 2704 } 2705 } 2706 2707 return ClassifyResult::None; 2708 }; 2709 2710 // If the stacksave and the stackrestore are in the same BB, and there is 2711 // no intervening call, alloca, or stackrestore of a different stacksave, 2712 // remove the restore. This can happen when variable allocas are DCE'd. 2713 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 2714 if (SS->getIntrinsicID() == Intrinsic::stacksave && 2715 SS->getParent() == II->getParent()) { 2716 BasicBlock::iterator BI(SS); 2717 bool CannotRemove = false; 2718 for (++BI; &*BI != II; ++BI) { 2719 switch (Classify(&*BI)) { 2720 case ClassifyResult::None: 2721 // So far so good, look at next instructions. 2722 break; 2723 2724 case ClassifyResult::StackRestore: 2725 // If we found an intervening stackrestore for a different 2726 // stacksave, we can't remove the stackrestore. Otherwise, continue. 2727 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS) 2728 CannotRemove = true; 2729 break; 2730 2731 case ClassifyResult::Alloca: 2732 case ClassifyResult::CallWithSideEffects: 2733 // If we found an alloca, a non-intrinsic call, or an intrinsic 2734 // call with side effects, we can't remove the stackrestore. 2735 CannotRemove = true; 2736 break; 2737 } 2738 if (CannotRemove) 2739 break; 2740 } 2741 2742 if (!CannotRemove) 2743 return eraseInstFromFunction(CI); 2744 } 2745 } 2746 2747 // Scan down this block to see if there is another stack restore in the 2748 // same block without an intervening call/alloca. 2749 BasicBlock::iterator BI(II); 2750 Instruction *TI = II->getParent()->getTerminator(); 2751 bool CannotRemove = false; 2752 for (++BI; &*BI != TI; ++BI) { 2753 switch (Classify(&*BI)) { 2754 case ClassifyResult::None: 2755 // So far so good, look at next instructions. 2756 break; 2757 2758 case ClassifyResult::StackRestore: 2759 // If there is a stackrestore below this one, remove this one. 2760 return eraseInstFromFunction(CI); 2761 2762 case ClassifyResult::Alloca: 2763 case ClassifyResult::CallWithSideEffects: 2764 // If we found an alloca, a non-intrinsic call, or an intrinsic call 2765 // with side effects (such as llvm.stacksave and llvm.read_register), 2766 // we can't remove the stack restore. 2767 CannotRemove = true; 2768 break; 2769 } 2770 if (CannotRemove) 2771 break; 2772 } 2773 2774 // If the stack restore is in a return, resume, or unwind block and if there 2775 // are no allocas or calls between the restore and the return, nuke the 2776 // restore. 2777 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI))) 2778 return eraseInstFromFunction(CI); 2779 break; 2780 } 2781 case Intrinsic::lifetime_end: 2782 // Asan needs to poison memory to detect invalid access which is possible 2783 // even for empty lifetime range. 2784 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 2785 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) || 2786 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 2787 break; 2788 2789 if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) { 2790 return I.getIntrinsicID() == Intrinsic::lifetime_start; 2791 })) 2792 return nullptr; 2793 break; 2794 case Intrinsic::assume: { 2795 Value *IIOperand = II->getArgOperand(0); 2796 SmallVector<OperandBundleDef, 4> OpBundles; 2797 II->getOperandBundlesAsDefs(OpBundles); 2798 2799 /// This will remove the boolean Condition from the assume given as 2800 /// argument and remove the assume if it becomes useless. 2801 /// always returns nullptr for use as a return values. 2802 auto RemoveConditionFromAssume = [&](Instruction *Assume) -> Instruction * { 2803 assert(isa<AssumeInst>(Assume)); 2804 if (isAssumeWithEmptyBundle(*cast<AssumeInst>(II))) 2805 return eraseInstFromFunction(CI); 2806 replaceUse(II->getOperandUse(0), ConstantInt::getTrue(II->getContext())); 2807 return nullptr; 2808 }; 2809 // Remove an assume if it is followed by an identical assume. 2810 // TODO: Do we need this? Unless there are conflicting assumptions, the 2811 // computeKnownBits(IIOperand) below here eliminates redundant assumes. 2812 Instruction *Next = II->getNextNonDebugInstruction(); 2813 if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand)))) 2814 return RemoveConditionFromAssume(Next); 2815 2816 // Canonicalize assume(a && b) -> assume(a); assume(b); 2817 // Note: New assumption intrinsics created here are registered by 2818 // the InstCombineIRInserter object. 2819 FunctionType *AssumeIntrinsicTy = II->getFunctionType(); 2820 Value *AssumeIntrinsic = II->getCalledOperand(); 2821 Value *A, *B; 2822 if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) { 2823 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles, 2824 II->getName()); 2825 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName()); 2826 return eraseInstFromFunction(*II); 2827 } 2828 // assume(!(a || b)) -> assume(!a); assume(!b); 2829 if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) { 2830 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 2831 Builder.CreateNot(A), OpBundles, II->getName()); 2832 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 2833 Builder.CreateNot(B), II->getName()); 2834 return eraseInstFromFunction(*II); 2835 } 2836 2837 // assume( (load addr) != null ) -> add 'nonnull' metadata to load 2838 // (if assume is valid at the load) 2839 CmpInst::Predicate Pred; 2840 Instruction *LHS; 2841 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) && 2842 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load && 2843 LHS->getType()->isPointerTy() && 2844 isValidAssumeForContext(II, LHS, &DT)) { 2845 MDNode *MD = MDNode::get(II->getContext(), std::nullopt); 2846 LHS->setMetadata(LLVMContext::MD_nonnull, MD); 2847 LHS->setMetadata(LLVMContext::MD_noundef, MD); 2848 return RemoveConditionFromAssume(II); 2849 2850 // TODO: apply nonnull return attributes to calls and invokes 2851 // TODO: apply range metadata for range check patterns? 2852 } 2853 2854 // Separate storage assumptions apply to the underlying allocations, not any 2855 // particular pointer within them. When evaluating the hints for AA purposes 2856 // we getUnderlyingObject them; by precomputing the answers here we can 2857 // avoid having to do so repeatedly there. 2858 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) { 2859 OperandBundleUse OBU = II->getOperandBundleAt(Idx); 2860 if (OBU.getTagName() == "separate_storage") { 2861 assert(OBU.Inputs.size() == 2); 2862 auto MaybeSimplifyHint = [&](const Use &U) { 2863 Value *Hint = U.get(); 2864 // Not having a limit is safe because InstCombine removes unreachable 2865 // code. 2866 Value *UnderlyingObject = getUnderlyingObject(Hint, /*MaxLookup*/ 0); 2867 if (Hint != UnderlyingObject) 2868 replaceUse(const_cast<Use &>(U), UnderlyingObject); 2869 }; 2870 MaybeSimplifyHint(OBU.Inputs[0]); 2871 MaybeSimplifyHint(OBU.Inputs[1]); 2872 } 2873 } 2874 2875 // Convert nonnull assume like: 2876 // %A = icmp ne i32* %PTR, null 2877 // call void @llvm.assume(i1 %A) 2878 // into 2879 // call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ] 2880 if (EnableKnowledgeRetention && 2881 match(IIOperand, m_Cmp(Pred, m_Value(A), m_Zero())) && 2882 Pred == CmpInst::ICMP_NE && A->getType()->isPointerTy()) { 2883 if (auto *Replacement = buildAssumeFromKnowledge( 2884 {RetainedKnowledge{Attribute::NonNull, 0, A}}, Next, &AC, &DT)) { 2885 2886 Replacement->insertBefore(Next); 2887 AC.registerAssumption(Replacement); 2888 return RemoveConditionFromAssume(II); 2889 } 2890 } 2891 2892 // Convert alignment assume like: 2893 // %B = ptrtoint i32* %A to i64 2894 // %C = and i64 %B, Constant 2895 // %D = icmp eq i64 %C, 0 2896 // call void @llvm.assume(i1 %D) 2897 // into 2898 // call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 Constant + 1)] 2899 uint64_t AlignMask; 2900 if (EnableKnowledgeRetention && 2901 match(IIOperand, 2902 m_Cmp(Pred, m_And(m_Value(A), m_ConstantInt(AlignMask)), 2903 m_Zero())) && 2904 Pred == CmpInst::ICMP_EQ) { 2905 if (isPowerOf2_64(AlignMask + 1)) { 2906 uint64_t Offset = 0; 2907 match(A, m_Add(m_Value(A), m_ConstantInt(Offset))); 2908 if (match(A, m_PtrToInt(m_Value(A)))) { 2909 /// Note: this doesn't preserve the offset information but merges 2910 /// offset and alignment. 2911 /// TODO: we can generate a GEP instead of merging the alignment with 2912 /// the offset. 2913 RetainedKnowledge RK{Attribute::Alignment, 2914 (unsigned)MinAlign(Offset, AlignMask + 1), A}; 2915 if (auto *Replacement = 2916 buildAssumeFromKnowledge(RK, Next, &AC, &DT)) { 2917 2918 Replacement->insertAfter(II); 2919 AC.registerAssumption(Replacement); 2920 } 2921 return RemoveConditionFromAssume(II); 2922 } 2923 } 2924 } 2925 2926 /// Canonicalize Knowledge in operand bundles. 2927 if (EnableKnowledgeRetention && II->hasOperandBundles()) { 2928 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) { 2929 auto &BOI = II->bundle_op_info_begin()[Idx]; 2930 RetainedKnowledge RK = 2931 llvm::getKnowledgeFromBundle(cast<AssumeInst>(*II), BOI); 2932 if (BOI.End - BOI.Begin > 2) 2933 continue; // Prevent reducing knowledge in an align with offset since 2934 // extracting a RetainedKnowledge from them looses offset 2935 // information 2936 RetainedKnowledge CanonRK = 2937 llvm::simplifyRetainedKnowledge(cast<AssumeInst>(II), RK, 2938 &getAssumptionCache(), 2939 &getDominatorTree()); 2940 if (CanonRK == RK) 2941 continue; 2942 if (!CanonRK) { 2943 if (BOI.End - BOI.Begin > 0) { 2944 Worklist.pushValue(II->op_begin()[BOI.Begin]); 2945 Value::dropDroppableUse(II->op_begin()[BOI.Begin]); 2946 } 2947 continue; 2948 } 2949 assert(RK.AttrKind == CanonRK.AttrKind); 2950 if (BOI.End - BOI.Begin > 0) 2951 II->op_begin()[BOI.Begin].set(CanonRK.WasOn); 2952 if (BOI.End - BOI.Begin > 1) 2953 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get( 2954 Type::getInt64Ty(II->getContext()), CanonRK.ArgValue)); 2955 if (RK.WasOn) 2956 Worklist.pushValue(RK.WasOn); 2957 return II; 2958 } 2959 } 2960 2961 // If there is a dominating assume with the same condition as this one, 2962 // then this one is redundant, and should be removed. 2963 KnownBits Known(1); 2964 computeKnownBits(IIOperand, Known, 0, II); 2965 if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) 2966 return eraseInstFromFunction(*II); 2967 2968 // assume(false) is unreachable. 2969 if (match(IIOperand, m_CombineOr(m_Zero(), m_Undef()))) { 2970 CreateNonTerminatorUnreachable(II); 2971 return eraseInstFromFunction(*II); 2972 } 2973 2974 // Update the cache of affected values for this assumption (we might be 2975 // here because we just simplified the condition). 2976 AC.updateAffectedValues(cast<AssumeInst>(II)); 2977 break; 2978 } 2979 case Intrinsic::experimental_guard: { 2980 // Is this guard followed by another guard? We scan forward over a small 2981 // fixed window of instructions to handle common cases with conditions 2982 // computed between guards. 2983 Instruction *NextInst = II->getNextNonDebugInstruction(); 2984 for (unsigned i = 0; i < GuardWideningWindow; i++) { 2985 // Note: Using context-free form to avoid compile time blow up 2986 if (!isSafeToSpeculativelyExecute(NextInst)) 2987 break; 2988 NextInst = NextInst->getNextNonDebugInstruction(); 2989 } 2990 Value *NextCond = nullptr; 2991 if (match(NextInst, 2992 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) { 2993 Value *CurrCond = II->getArgOperand(0); 2994 2995 // Remove a guard that it is immediately preceded by an identical guard. 2996 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b). 2997 if (CurrCond != NextCond) { 2998 Instruction *MoveI = II->getNextNonDebugInstruction(); 2999 while (MoveI != NextInst) { 3000 auto *Temp = MoveI; 3001 MoveI = MoveI->getNextNonDebugInstruction(); 3002 Temp->moveBefore(II); 3003 } 3004 replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond)); 3005 } 3006 eraseInstFromFunction(*NextInst); 3007 return II; 3008 } 3009 break; 3010 } 3011 case Intrinsic::vector_insert: { 3012 Value *Vec = II->getArgOperand(0); 3013 Value *SubVec = II->getArgOperand(1); 3014 Value *Idx = II->getArgOperand(2); 3015 auto *DstTy = dyn_cast<FixedVectorType>(II->getType()); 3016 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 3017 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType()); 3018 3019 // Only canonicalize if the destination vector, Vec, and SubVec are all 3020 // fixed vectors. 3021 if (DstTy && VecTy && SubVecTy) { 3022 unsigned DstNumElts = DstTy->getNumElements(); 3023 unsigned VecNumElts = VecTy->getNumElements(); 3024 unsigned SubVecNumElts = SubVecTy->getNumElements(); 3025 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 3026 3027 // An insert that entirely overwrites Vec with SubVec is a nop. 3028 if (VecNumElts == SubVecNumElts) 3029 return replaceInstUsesWith(CI, SubVec); 3030 3031 // Widen SubVec into a vector of the same width as Vec, since 3032 // shufflevector requires the two input vectors to be the same width. 3033 // Elements beyond the bounds of SubVec within the widened vector are 3034 // undefined. 3035 SmallVector<int, 8> WidenMask; 3036 unsigned i; 3037 for (i = 0; i != SubVecNumElts; ++i) 3038 WidenMask.push_back(i); 3039 for (; i != VecNumElts; ++i) 3040 WidenMask.push_back(PoisonMaskElem); 3041 3042 Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask); 3043 3044 SmallVector<int, 8> Mask; 3045 for (unsigned i = 0; i != IdxN; ++i) 3046 Mask.push_back(i); 3047 for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i) 3048 Mask.push_back(i); 3049 for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i) 3050 Mask.push_back(i); 3051 3052 Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask); 3053 return replaceInstUsesWith(CI, Shuffle); 3054 } 3055 break; 3056 } 3057 case Intrinsic::vector_extract: { 3058 Value *Vec = II->getArgOperand(0); 3059 Value *Idx = II->getArgOperand(1); 3060 3061 Type *ReturnType = II->getType(); 3062 // (extract_vector (insert_vector InsertTuple, InsertValue, InsertIdx), 3063 // ExtractIdx) 3064 unsigned ExtractIdx = cast<ConstantInt>(Idx)->getZExtValue(); 3065 Value *InsertTuple, *InsertIdx, *InsertValue; 3066 if (match(Vec, m_Intrinsic<Intrinsic::vector_insert>(m_Value(InsertTuple), 3067 m_Value(InsertValue), 3068 m_Value(InsertIdx))) && 3069 InsertValue->getType() == ReturnType) { 3070 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue(); 3071 // Case where we get the same index right after setting it. 3072 // extract.vector(insert.vector(InsertTuple, InsertValue, Idx), Idx) --> 3073 // InsertValue 3074 if (ExtractIdx == Index) 3075 return replaceInstUsesWith(CI, InsertValue); 3076 // If we are getting a different index than what was set in the 3077 // insert.vector intrinsic. We can just set the input tuple to the one up 3078 // in the chain. extract.vector(insert.vector(InsertTuple, InsertValue, 3079 // InsertIndex), ExtractIndex) 3080 // --> extract.vector(InsertTuple, ExtractIndex) 3081 else 3082 return replaceOperand(CI, 0, InsertTuple); 3083 } 3084 3085 auto *DstTy = dyn_cast<VectorType>(ReturnType); 3086 auto *VecTy = dyn_cast<VectorType>(Vec->getType()); 3087 3088 if (DstTy && VecTy) { 3089 auto DstEltCnt = DstTy->getElementCount(); 3090 auto VecEltCnt = VecTy->getElementCount(); 3091 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 3092 3093 // Extracting the entirety of Vec is a nop. 3094 if (DstEltCnt == VecTy->getElementCount()) { 3095 replaceInstUsesWith(CI, Vec); 3096 return eraseInstFromFunction(CI); 3097 } 3098 3099 // Only canonicalize to shufflevector if the destination vector and 3100 // Vec are fixed vectors. 3101 if (VecEltCnt.isScalable() || DstEltCnt.isScalable()) 3102 break; 3103 3104 SmallVector<int, 8> Mask; 3105 for (unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i) 3106 Mask.push_back(IdxN + i); 3107 3108 Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask); 3109 return replaceInstUsesWith(CI, Shuffle); 3110 } 3111 break; 3112 } 3113 case Intrinsic::experimental_vector_reverse: { 3114 Value *BO0, *BO1, *X, *Y; 3115 Value *Vec = II->getArgOperand(0); 3116 if (match(Vec, m_OneUse(m_BinOp(m_Value(BO0), m_Value(BO1))))) { 3117 auto *OldBinOp = cast<BinaryOperator>(Vec); 3118 if (match(BO0, m_VecReverse(m_Value(X)))) { 3119 // rev(binop rev(X), rev(Y)) --> binop X, Y 3120 if (match(BO1, m_VecReverse(m_Value(Y)))) 3121 return replaceInstUsesWith(CI, 3122 BinaryOperator::CreateWithCopiedFlags( 3123 OldBinOp->getOpcode(), X, Y, OldBinOp, 3124 OldBinOp->getName(), II)); 3125 // rev(binop rev(X), BO1Splat) --> binop X, BO1Splat 3126 if (isSplatValue(BO1)) 3127 return replaceInstUsesWith(CI, 3128 BinaryOperator::CreateWithCopiedFlags( 3129 OldBinOp->getOpcode(), X, BO1, 3130 OldBinOp, OldBinOp->getName(), II)); 3131 } 3132 // rev(binop BO0Splat, rev(Y)) --> binop BO0Splat, Y 3133 if (match(BO1, m_VecReverse(m_Value(Y))) && isSplatValue(BO0)) 3134 return replaceInstUsesWith(CI, BinaryOperator::CreateWithCopiedFlags( 3135 OldBinOp->getOpcode(), BO0, Y, 3136 OldBinOp, OldBinOp->getName(), II)); 3137 } 3138 // rev(unop rev(X)) --> unop X 3139 if (match(Vec, m_OneUse(m_UnOp(m_VecReverse(m_Value(X)))))) { 3140 auto *OldUnOp = cast<UnaryOperator>(Vec); 3141 auto *NewUnOp = UnaryOperator::CreateWithCopiedFlags( 3142 OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(), II); 3143 return replaceInstUsesWith(CI, NewUnOp); 3144 } 3145 break; 3146 } 3147 case Intrinsic::vector_reduce_or: 3148 case Intrinsic::vector_reduce_and: { 3149 // Canonicalize logical or/and reductions: 3150 // Or reduction for i1 is represented as: 3151 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 3152 // %res = cmp ne iReduxWidth %val, 0 3153 // And reduction for i1 is represented as: 3154 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 3155 // %res = cmp eq iReduxWidth %val, 11111 3156 Value *Arg = II->getArgOperand(0); 3157 Value *Vect; 3158 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3159 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3160 if (FTy->getElementType() == Builder.getInt1Ty()) { 3161 Value *Res = Builder.CreateBitCast( 3162 Vect, Builder.getIntNTy(FTy->getNumElements())); 3163 if (IID == Intrinsic::vector_reduce_and) { 3164 Res = Builder.CreateICmpEQ( 3165 Res, ConstantInt::getAllOnesValue(Res->getType())); 3166 } else { 3167 assert(IID == Intrinsic::vector_reduce_or && 3168 "Expected or reduction."); 3169 Res = Builder.CreateIsNotNull(Res); 3170 } 3171 if (Arg != Vect) 3172 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 3173 II->getType()); 3174 return replaceInstUsesWith(CI, Res); 3175 } 3176 } 3177 [[fallthrough]]; 3178 } 3179 case Intrinsic::vector_reduce_add: { 3180 if (IID == Intrinsic::vector_reduce_add) { 3181 // Convert vector_reduce_add(ZExt(<n x i1>)) to 3182 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)). 3183 // Convert vector_reduce_add(SExt(<n x i1>)) to 3184 // -ZExtOrTrunc(ctpop(bitcast <n x i1> to in)). 3185 // Convert vector_reduce_add(<n x i1>) to 3186 // Trunc(ctpop(bitcast <n x i1> to in)). 3187 Value *Arg = II->getArgOperand(0); 3188 Value *Vect; 3189 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3190 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3191 if (FTy->getElementType() == Builder.getInt1Ty()) { 3192 Value *V = Builder.CreateBitCast( 3193 Vect, Builder.getIntNTy(FTy->getNumElements())); 3194 Value *Res = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V); 3195 if (Res->getType() != II->getType()) 3196 Res = Builder.CreateZExtOrTrunc(Res, II->getType()); 3197 if (Arg != Vect && 3198 cast<Instruction>(Arg)->getOpcode() == Instruction::SExt) 3199 Res = Builder.CreateNeg(Res); 3200 return replaceInstUsesWith(CI, Res); 3201 } 3202 } 3203 } 3204 [[fallthrough]]; 3205 } 3206 case Intrinsic::vector_reduce_xor: { 3207 if (IID == Intrinsic::vector_reduce_xor) { 3208 // Exclusive disjunction reduction over the vector with 3209 // (potentially-extended) i1 element type is actually a 3210 // (potentially-extended) arithmetic `add` reduction over the original 3211 // non-extended value: 3212 // vector_reduce_xor(?ext(<n x i1>)) 3213 // --> 3214 // ?ext(vector_reduce_add(<n x i1>)) 3215 Value *Arg = II->getArgOperand(0); 3216 Value *Vect; 3217 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3218 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3219 if (FTy->getElementType() == Builder.getInt1Ty()) { 3220 Value *Res = Builder.CreateAddReduce(Vect); 3221 if (Arg != Vect) 3222 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 3223 II->getType()); 3224 return replaceInstUsesWith(CI, Res); 3225 } 3226 } 3227 } 3228 [[fallthrough]]; 3229 } 3230 case Intrinsic::vector_reduce_mul: { 3231 if (IID == Intrinsic::vector_reduce_mul) { 3232 // Multiplicative reduction over the vector with (potentially-extended) 3233 // i1 element type is actually a (potentially zero-extended) 3234 // logical `and` reduction over the original non-extended value: 3235 // vector_reduce_mul(?ext(<n x i1>)) 3236 // --> 3237 // zext(vector_reduce_and(<n x i1>)) 3238 Value *Arg = II->getArgOperand(0); 3239 Value *Vect; 3240 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3241 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3242 if (FTy->getElementType() == Builder.getInt1Ty()) { 3243 Value *Res = Builder.CreateAndReduce(Vect); 3244 if (Res->getType() != II->getType()) 3245 Res = Builder.CreateZExt(Res, II->getType()); 3246 return replaceInstUsesWith(CI, Res); 3247 } 3248 } 3249 } 3250 [[fallthrough]]; 3251 } 3252 case Intrinsic::vector_reduce_umin: 3253 case Intrinsic::vector_reduce_umax: { 3254 if (IID == Intrinsic::vector_reduce_umin || 3255 IID == Intrinsic::vector_reduce_umax) { 3256 // UMin/UMax reduction over the vector with (potentially-extended) 3257 // i1 element type is actually a (potentially-extended) 3258 // logical `and`/`or` reduction over the original non-extended value: 3259 // vector_reduce_u{min,max}(?ext(<n x i1>)) 3260 // --> 3261 // ?ext(vector_reduce_{and,or}(<n x i1>)) 3262 Value *Arg = II->getArgOperand(0); 3263 Value *Vect; 3264 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3265 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3266 if (FTy->getElementType() == Builder.getInt1Ty()) { 3267 Value *Res = IID == Intrinsic::vector_reduce_umin 3268 ? Builder.CreateAndReduce(Vect) 3269 : Builder.CreateOrReduce(Vect); 3270 if (Arg != Vect) 3271 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 3272 II->getType()); 3273 return replaceInstUsesWith(CI, Res); 3274 } 3275 } 3276 } 3277 [[fallthrough]]; 3278 } 3279 case Intrinsic::vector_reduce_smin: 3280 case Intrinsic::vector_reduce_smax: { 3281 if (IID == Intrinsic::vector_reduce_smin || 3282 IID == Intrinsic::vector_reduce_smax) { 3283 // SMin/SMax reduction over the vector with (potentially-extended) 3284 // i1 element type is actually a (potentially-extended) 3285 // logical `and`/`or` reduction over the original non-extended value: 3286 // vector_reduce_s{min,max}(<n x i1>) 3287 // --> 3288 // vector_reduce_{or,and}(<n x i1>) 3289 // and 3290 // vector_reduce_s{min,max}(sext(<n x i1>)) 3291 // --> 3292 // sext(vector_reduce_{or,and}(<n x i1>)) 3293 // and 3294 // vector_reduce_s{min,max}(zext(<n x i1>)) 3295 // --> 3296 // zext(vector_reduce_{and,or}(<n x i1>)) 3297 Value *Arg = II->getArgOperand(0); 3298 Value *Vect; 3299 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 3300 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 3301 if (FTy->getElementType() == Builder.getInt1Ty()) { 3302 Instruction::CastOps ExtOpc = Instruction::CastOps::CastOpsEnd; 3303 if (Arg != Vect) 3304 ExtOpc = cast<CastInst>(Arg)->getOpcode(); 3305 Value *Res = ((IID == Intrinsic::vector_reduce_smin) == 3306 (ExtOpc == Instruction::CastOps::ZExt)) 3307 ? Builder.CreateAndReduce(Vect) 3308 : Builder.CreateOrReduce(Vect); 3309 if (Arg != Vect) 3310 Res = Builder.CreateCast(ExtOpc, Res, II->getType()); 3311 return replaceInstUsesWith(CI, Res); 3312 } 3313 } 3314 } 3315 [[fallthrough]]; 3316 } 3317 case Intrinsic::vector_reduce_fmax: 3318 case Intrinsic::vector_reduce_fmin: 3319 case Intrinsic::vector_reduce_fadd: 3320 case Intrinsic::vector_reduce_fmul: { 3321 bool CanBeReassociated = (IID != Intrinsic::vector_reduce_fadd && 3322 IID != Intrinsic::vector_reduce_fmul) || 3323 II->hasAllowReassoc(); 3324 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd || 3325 IID == Intrinsic::vector_reduce_fmul) 3326 ? 1 3327 : 0; 3328 Value *Arg = II->getArgOperand(ArgIdx); 3329 Value *V; 3330 ArrayRef<int> Mask; 3331 if (!isa<FixedVectorType>(Arg->getType()) || !CanBeReassociated || 3332 !match(Arg, m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))) || 3333 !cast<ShuffleVectorInst>(Arg)->isSingleSource()) 3334 break; 3335 int Sz = Mask.size(); 3336 SmallBitVector UsedIndices(Sz); 3337 for (int Idx : Mask) { 3338 if (Idx == PoisonMaskElem || UsedIndices.test(Idx)) 3339 break; 3340 UsedIndices.set(Idx); 3341 } 3342 // Can remove shuffle iff just shuffled elements, no repeats, undefs, or 3343 // other changes. 3344 if (UsedIndices.all()) { 3345 replaceUse(II->getOperandUse(ArgIdx), V); 3346 return nullptr; 3347 } 3348 break; 3349 } 3350 case Intrinsic::is_fpclass: { 3351 if (Instruction *I = foldIntrinsicIsFPClass(*II)) 3352 return I; 3353 break; 3354 } 3355 default: { 3356 // Handle target specific intrinsics 3357 std::optional<Instruction *> V = targetInstCombineIntrinsic(*II); 3358 if (V) 3359 return *V; 3360 break; 3361 } 3362 } 3363 3364 // Try to fold intrinsic into select operands. This is legal if: 3365 // * The intrinsic is speculatable. 3366 // * The select condition is not a vector, or the intrinsic does not 3367 // perform cross-lane operations. 3368 switch (IID) { 3369 case Intrinsic::ctlz: 3370 case Intrinsic::cttz: 3371 case Intrinsic::ctpop: 3372 case Intrinsic::umin: 3373 case Intrinsic::umax: 3374 case Intrinsic::smin: 3375 case Intrinsic::smax: 3376 case Intrinsic::usub_sat: 3377 case Intrinsic::uadd_sat: 3378 case Intrinsic::ssub_sat: 3379 case Intrinsic::sadd_sat: 3380 for (Value *Op : II->args()) 3381 if (auto *Sel = dyn_cast<SelectInst>(Op)) 3382 if (Instruction *R = FoldOpIntoSelect(*II, Sel)) 3383 return R; 3384 [[fallthrough]]; 3385 default: 3386 break; 3387 } 3388 3389 if (Instruction *Shuf = foldShuffledIntrinsicOperands(II, Builder)) 3390 return Shuf; 3391 3392 // Some intrinsics (like experimental_gc_statepoint) can be used in invoke 3393 // context, so it is handled in visitCallBase and we should trigger it. 3394 return visitCallBase(*II); 3395 } 3396 3397 // Fence instruction simplification 3398 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) { 3399 auto *NFI = dyn_cast<FenceInst>(FI.getNextNonDebugInstruction()); 3400 // This check is solely here to handle arbitrary target-dependent syncscopes. 3401 // TODO: Can remove if does not matter in practice. 3402 if (NFI && FI.isIdenticalTo(NFI)) 3403 return eraseInstFromFunction(FI); 3404 3405 // Returns true if FI1 is identical or stronger fence than FI2. 3406 auto isIdenticalOrStrongerFence = [](FenceInst *FI1, FenceInst *FI2) { 3407 auto FI1SyncScope = FI1->getSyncScopeID(); 3408 // Consider same scope, where scope is global or single-thread. 3409 if (FI1SyncScope != FI2->getSyncScopeID() || 3410 (FI1SyncScope != SyncScope::System && 3411 FI1SyncScope != SyncScope::SingleThread)) 3412 return false; 3413 3414 return isAtLeastOrStrongerThan(FI1->getOrdering(), FI2->getOrdering()); 3415 }; 3416 if (NFI && isIdenticalOrStrongerFence(NFI, &FI)) 3417 return eraseInstFromFunction(FI); 3418 3419 if (auto *PFI = dyn_cast_or_null<FenceInst>(FI.getPrevNonDebugInstruction())) 3420 if (isIdenticalOrStrongerFence(PFI, &FI)) 3421 return eraseInstFromFunction(FI); 3422 return nullptr; 3423 } 3424 3425 // InvokeInst simplification 3426 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) { 3427 return visitCallBase(II); 3428 } 3429 3430 // CallBrInst simplification 3431 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) { 3432 return visitCallBase(CBI); 3433 } 3434 3435 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) { 3436 if (!CI->getCalledFunction()) return nullptr; 3437 3438 // Skip optimizing notail and musttail calls so 3439 // LibCallSimplifier::optimizeCall doesn't have to preserve those invariants. 3440 // LibCallSimplifier::optimizeCall should try to preseve tail calls though. 3441 if (CI->isMustTailCall() || CI->isNoTailCall()) 3442 return nullptr; 3443 3444 auto InstCombineRAUW = [this](Instruction *From, Value *With) { 3445 replaceInstUsesWith(*From, With); 3446 }; 3447 auto InstCombineErase = [this](Instruction *I) { 3448 eraseInstFromFunction(*I); 3449 }; 3450 LibCallSimplifier Simplifier(DL, &TLI, &AC, ORE, BFI, PSI, InstCombineRAUW, 3451 InstCombineErase); 3452 if (Value *With = Simplifier.optimizeCall(CI, Builder)) { 3453 ++NumSimplified; 3454 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With); 3455 } 3456 3457 return nullptr; 3458 } 3459 3460 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) { 3461 // Strip off at most one level of pointer casts, looking for an alloca. This 3462 // is good enough in practice and simpler than handling any number of casts. 3463 Value *Underlying = TrampMem->stripPointerCasts(); 3464 if (Underlying != TrampMem && 3465 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem)) 3466 return nullptr; 3467 if (!isa<AllocaInst>(Underlying)) 3468 return nullptr; 3469 3470 IntrinsicInst *InitTrampoline = nullptr; 3471 for (User *U : TrampMem->users()) { 3472 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3473 if (!II) 3474 return nullptr; 3475 if (II->getIntrinsicID() == Intrinsic::init_trampoline) { 3476 if (InitTrampoline) 3477 // More than one init_trampoline writes to this value. Give up. 3478 return nullptr; 3479 InitTrampoline = II; 3480 continue; 3481 } 3482 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline) 3483 // Allow any number of calls to adjust.trampoline. 3484 continue; 3485 return nullptr; 3486 } 3487 3488 // No call to init.trampoline found. 3489 if (!InitTrampoline) 3490 return nullptr; 3491 3492 // Check that the alloca is being used in the expected way. 3493 if (InitTrampoline->getOperand(0) != TrampMem) 3494 return nullptr; 3495 3496 return InitTrampoline; 3497 } 3498 3499 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, 3500 Value *TrampMem) { 3501 // Visit all the previous instructions in the basic block, and try to find a 3502 // init.trampoline which has a direct path to the adjust.trampoline. 3503 for (BasicBlock::iterator I = AdjustTramp->getIterator(), 3504 E = AdjustTramp->getParent()->begin(); 3505 I != E;) { 3506 Instruction *Inst = &*--I; 3507 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 3508 if (II->getIntrinsicID() == Intrinsic::init_trampoline && 3509 II->getOperand(0) == TrampMem) 3510 return II; 3511 if (Inst->mayWriteToMemory()) 3512 return nullptr; 3513 } 3514 return nullptr; 3515 } 3516 3517 // Given a call to llvm.adjust.trampoline, find and return the corresponding 3518 // call to llvm.init.trampoline if the call to the trampoline can be optimized 3519 // to a direct call to a function. Otherwise return NULL. 3520 static IntrinsicInst *findInitTrampoline(Value *Callee) { 3521 Callee = Callee->stripPointerCasts(); 3522 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee); 3523 if (!AdjustTramp || 3524 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline) 3525 return nullptr; 3526 3527 Value *TrampMem = AdjustTramp->getOperand(0); 3528 3529 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem)) 3530 return IT; 3531 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem)) 3532 return IT; 3533 return nullptr; 3534 } 3535 3536 bool InstCombinerImpl::annotateAnyAllocSite(CallBase &Call, 3537 const TargetLibraryInfo *TLI) { 3538 // Note: We only handle cases which can't be driven from generic attributes 3539 // here. So, for example, nonnull and noalias (which are common properties 3540 // of some allocation functions) are expected to be handled via annotation 3541 // of the respective allocator declaration with generic attributes. 3542 bool Changed = false; 3543 3544 if (!Call.getType()->isPointerTy()) 3545 return Changed; 3546 3547 std::optional<APInt> Size = getAllocSize(&Call, TLI); 3548 if (Size && *Size != 0) { 3549 // TODO: We really should just emit deref_or_null here and then 3550 // let the generic inference code combine that with nonnull. 3551 if (Call.hasRetAttr(Attribute::NonNull)) { 3552 Changed = !Call.hasRetAttr(Attribute::Dereferenceable); 3553 Call.addRetAttr(Attribute::getWithDereferenceableBytes( 3554 Call.getContext(), Size->getLimitedValue())); 3555 } else { 3556 Changed = !Call.hasRetAttr(Attribute::DereferenceableOrNull); 3557 Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes( 3558 Call.getContext(), Size->getLimitedValue())); 3559 } 3560 } 3561 3562 // Add alignment attribute if alignment is a power of two constant. 3563 Value *Alignment = getAllocAlignment(&Call, TLI); 3564 if (!Alignment) 3565 return Changed; 3566 3567 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment); 3568 if (AlignOpC && AlignOpC->getValue().ult(llvm::Value::MaximumAlignment)) { 3569 uint64_t AlignmentVal = AlignOpC->getZExtValue(); 3570 if (llvm::isPowerOf2_64(AlignmentVal)) { 3571 Align ExistingAlign = Call.getRetAlign().valueOrOne(); 3572 Align NewAlign = Align(AlignmentVal); 3573 if (NewAlign > ExistingAlign) { 3574 Call.addRetAttr( 3575 Attribute::getWithAlignment(Call.getContext(), NewAlign)); 3576 Changed = true; 3577 } 3578 } 3579 } 3580 return Changed; 3581 } 3582 3583 /// Improvements for call, callbr and invoke instructions. 3584 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) { 3585 bool Changed = annotateAnyAllocSite(Call, &TLI); 3586 3587 // Mark any parameters that are known to be non-null with the nonnull 3588 // attribute. This is helpful for inlining calls to functions with null 3589 // checks on their arguments. 3590 SmallVector<unsigned, 4> ArgNos; 3591 unsigned ArgNo = 0; 3592 3593 for (Value *V : Call.args()) { 3594 if (V->getType()->isPointerTy() && 3595 !Call.paramHasAttr(ArgNo, Attribute::NonNull) && 3596 isKnownNonZero(V, DL, 0, &AC, &Call, &DT)) 3597 ArgNos.push_back(ArgNo); 3598 ArgNo++; 3599 } 3600 3601 assert(ArgNo == Call.arg_size() && "Call arguments not processed correctly."); 3602 3603 if (!ArgNos.empty()) { 3604 AttributeList AS = Call.getAttributes(); 3605 LLVMContext &Ctx = Call.getContext(); 3606 AS = AS.addParamAttribute(Ctx, ArgNos, 3607 Attribute::get(Ctx, Attribute::NonNull)); 3608 Call.setAttributes(AS); 3609 Changed = true; 3610 } 3611 3612 // If the callee is a pointer to a function, attempt to move any casts to the 3613 // arguments of the call/callbr/invoke. 3614 Value *Callee = Call.getCalledOperand(); 3615 Function *CalleeF = dyn_cast<Function>(Callee); 3616 if ((!CalleeF || CalleeF->getFunctionType() != Call.getFunctionType()) && 3617 transformConstExprCastCall(Call)) 3618 return nullptr; 3619 3620 if (CalleeF) { 3621 // Remove the convergent attr on calls when the callee is not convergent. 3622 if (Call.isConvergent() && !CalleeF->isConvergent() && 3623 !CalleeF->isIntrinsic()) { 3624 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call 3625 << "\n"); 3626 Call.setNotConvergent(); 3627 return &Call; 3628 } 3629 3630 // If the call and callee calling conventions don't match, and neither one 3631 // of the calling conventions is compatible with C calling convention 3632 // this call must be unreachable, as the call is undefined. 3633 if ((CalleeF->getCallingConv() != Call.getCallingConv() && 3634 !(CalleeF->getCallingConv() == llvm::CallingConv::C && 3635 TargetLibraryInfoImpl::isCallingConvCCompatible(&Call)) && 3636 !(Call.getCallingConv() == llvm::CallingConv::C && 3637 TargetLibraryInfoImpl::isCallingConvCCompatible(CalleeF))) && 3638 // Only do this for calls to a function with a body. A prototype may 3639 // not actually end up matching the implementation's calling conv for a 3640 // variety of reasons (e.g. it may be written in assembly). 3641 !CalleeF->isDeclaration()) { 3642 Instruction *OldCall = &Call; 3643 CreateNonTerminatorUnreachable(OldCall); 3644 // If OldCall does not return void then replaceInstUsesWith poison. 3645 // This allows ValueHandlers and custom metadata to adjust itself. 3646 if (!OldCall->getType()->isVoidTy()) 3647 replaceInstUsesWith(*OldCall, PoisonValue::get(OldCall->getType())); 3648 if (isa<CallInst>(OldCall)) 3649 return eraseInstFromFunction(*OldCall); 3650 3651 // We cannot remove an invoke or a callbr, because it would change thexi 3652 // CFG, just change the callee to a null pointer. 3653 cast<CallBase>(OldCall)->setCalledFunction( 3654 CalleeF->getFunctionType(), 3655 Constant::getNullValue(CalleeF->getType())); 3656 return nullptr; 3657 } 3658 } 3659 3660 // Calling a null function pointer is undefined if a null address isn't 3661 // dereferenceable. 3662 if ((isa<ConstantPointerNull>(Callee) && 3663 !NullPointerIsDefined(Call.getFunction())) || 3664 isa<UndefValue>(Callee)) { 3665 // If Call does not return void then replaceInstUsesWith poison. 3666 // This allows ValueHandlers and custom metadata to adjust itself. 3667 if (!Call.getType()->isVoidTy()) 3668 replaceInstUsesWith(Call, PoisonValue::get(Call.getType())); 3669 3670 if (Call.isTerminator()) { 3671 // Can't remove an invoke or callbr because we cannot change the CFG. 3672 return nullptr; 3673 } 3674 3675 // This instruction is not reachable, just remove it. 3676 CreateNonTerminatorUnreachable(&Call); 3677 return eraseInstFromFunction(Call); 3678 } 3679 3680 if (IntrinsicInst *II = findInitTrampoline(Callee)) 3681 return transformCallThroughTrampoline(Call, *II); 3682 3683 if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) { 3684 InlineAsm *IA = cast<InlineAsm>(Callee); 3685 if (!IA->canThrow()) { 3686 // Normal inline asm calls cannot throw - mark them 3687 // 'nounwind'. 3688 Call.setDoesNotThrow(); 3689 Changed = true; 3690 } 3691 } 3692 3693 // Try to optimize the call if possible, we require DataLayout for most of 3694 // this. None of these calls are seen as possibly dead so go ahead and 3695 // delete the instruction now. 3696 if (CallInst *CI = dyn_cast<CallInst>(&Call)) { 3697 Instruction *I = tryOptimizeCall(CI); 3698 // If we changed something return the result, etc. Otherwise let 3699 // the fallthrough check. 3700 if (I) return eraseInstFromFunction(*I); 3701 } 3702 3703 if (!Call.use_empty() && !Call.isMustTailCall()) 3704 if (Value *ReturnedArg = Call.getReturnedArgOperand()) { 3705 Type *CallTy = Call.getType(); 3706 Type *RetArgTy = ReturnedArg->getType(); 3707 if (RetArgTy->canLosslesslyBitCastTo(CallTy)) 3708 return replaceInstUsesWith( 3709 Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy)); 3710 } 3711 3712 // Drop unnecessary kcfi operand bundles from calls that were converted 3713 // into direct calls. 3714 auto Bundle = Call.getOperandBundle(LLVMContext::OB_kcfi); 3715 if (Bundle && !Call.isIndirectCall()) { 3716 DEBUG_WITH_TYPE(DEBUG_TYPE "-kcfi", { 3717 if (CalleeF) { 3718 ConstantInt *FunctionType = nullptr; 3719 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]); 3720 3721 if (MDNode *MD = CalleeF->getMetadata(LLVMContext::MD_kcfi_type)) 3722 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0)); 3723 3724 if (FunctionType && 3725 FunctionType->getZExtValue() != ExpectedType->getZExtValue()) 3726 dbgs() << Call.getModule()->getName() 3727 << ": warning: kcfi: " << Call.getCaller()->getName() 3728 << ": call to " << CalleeF->getName() 3729 << " using a mismatching function pointer type\n"; 3730 } 3731 }); 3732 3733 return CallBase::removeOperandBundle(&Call, LLVMContext::OB_kcfi); 3734 } 3735 3736 if (isRemovableAlloc(&Call, &TLI)) 3737 return visitAllocSite(Call); 3738 3739 // Handle intrinsics which can be used in both call and invoke context. 3740 switch (Call.getIntrinsicID()) { 3741 case Intrinsic::experimental_gc_statepoint: { 3742 GCStatepointInst &GCSP = *cast<GCStatepointInst>(&Call); 3743 SmallPtrSet<Value *, 32> LiveGcValues; 3744 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 3745 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 3746 3747 // Remove the relocation if unused. 3748 if (GCR.use_empty()) { 3749 eraseInstFromFunction(GCR); 3750 continue; 3751 } 3752 3753 Value *DerivedPtr = GCR.getDerivedPtr(); 3754 Value *BasePtr = GCR.getBasePtr(); 3755 3756 // Undef is undef, even after relocation. 3757 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) { 3758 replaceInstUsesWith(GCR, UndefValue::get(GCR.getType())); 3759 eraseInstFromFunction(GCR); 3760 continue; 3761 } 3762 3763 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) { 3764 // The relocation of null will be null for most any collector. 3765 // TODO: provide a hook for this in GCStrategy. There might be some 3766 // weird collector this property does not hold for. 3767 if (isa<ConstantPointerNull>(DerivedPtr)) { 3768 // Use null-pointer of gc_relocate's type to replace it. 3769 replaceInstUsesWith(GCR, ConstantPointerNull::get(PT)); 3770 eraseInstFromFunction(GCR); 3771 continue; 3772 } 3773 3774 // isKnownNonNull -> nonnull attribute 3775 if (!GCR.hasRetAttr(Attribute::NonNull) && 3776 isKnownNonZero(DerivedPtr, DL, 0, &AC, &Call, &DT)) { 3777 GCR.addRetAttr(Attribute::NonNull); 3778 // We discovered new fact, re-check users. 3779 Worklist.pushUsersToWorkList(GCR); 3780 } 3781 } 3782 3783 // If we have two copies of the same pointer in the statepoint argument 3784 // list, canonicalize to one. This may let us common gc.relocates. 3785 if (GCR.getBasePtr() == GCR.getDerivedPtr() && 3786 GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) { 3787 auto *OpIntTy = GCR.getOperand(2)->getType(); 3788 GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex())); 3789 } 3790 3791 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p)) 3792 // Canonicalize on the type from the uses to the defs 3793 3794 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...) 3795 LiveGcValues.insert(BasePtr); 3796 LiveGcValues.insert(DerivedPtr); 3797 } 3798 std::optional<OperandBundleUse> Bundle = 3799 GCSP.getOperandBundle(LLVMContext::OB_gc_live); 3800 unsigned NumOfGCLives = LiveGcValues.size(); 3801 if (!Bundle || NumOfGCLives == Bundle->Inputs.size()) 3802 break; 3803 // We can reduce the size of gc live bundle. 3804 DenseMap<Value *, unsigned> Val2Idx; 3805 std::vector<Value *> NewLiveGc; 3806 for (Value *V : Bundle->Inputs) { 3807 if (Val2Idx.count(V)) 3808 continue; 3809 if (LiveGcValues.count(V)) { 3810 Val2Idx[V] = NewLiveGc.size(); 3811 NewLiveGc.push_back(V); 3812 } else 3813 Val2Idx[V] = NumOfGCLives; 3814 } 3815 // Update all gc.relocates 3816 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 3817 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 3818 Value *BasePtr = GCR.getBasePtr(); 3819 assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives && 3820 "Missed live gc for base pointer"); 3821 auto *OpIntTy1 = GCR.getOperand(1)->getType(); 3822 GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr])); 3823 Value *DerivedPtr = GCR.getDerivedPtr(); 3824 assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives && 3825 "Missed live gc for derived pointer"); 3826 auto *OpIntTy2 = GCR.getOperand(2)->getType(); 3827 GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr])); 3828 } 3829 // Create new statepoint instruction. 3830 OperandBundleDef NewBundle("gc-live", NewLiveGc); 3831 return CallBase::Create(&Call, NewBundle); 3832 } 3833 default: { break; } 3834 } 3835 3836 return Changed ? &Call : nullptr; 3837 } 3838 3839 /// If the callee is a constexpr cast of a function, attempt to move the cast to 3840 /// the arguments of the call/invoke. 3841 /// CallBrInst is not supported. 3842 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) { 3843 auto *Callee = 3844 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts()); 3845 if (!Callee) 3846 return false; 3847 3848 assert(!isa<CallBrInst>(Call) && 3849 "CallBr's don't have a single point after a def to insert at"); 3850 3851 // If this is a call to a thunk function, don't remove the cast. Thunks are 3852 // used to transparently forward all incoming parameters and outgoing return 3853 // values, so it's important to leave the cast in place. 3854 if (Callee->hasFnAttribute("thunk")) 3855 return false; 3856 3857 // If this is a call to a naked function, the assembly might be 3858 // using an argument, or otherwise rely on the frame layout, 3859 // the function prototype will mismatch. 3860 if (Callee->hasFnAttribute(Attribute::Naked)) 3861 return false; 3862 3863 // If this is a musttail call, the callee's prototype must match the caller's 3864 // prototype with the exception of pointee types. The code below doesn't 3865 // implement that, so we can't do this transform. 3866 // TODO: Do the transform if it only requires adding pointer casts. 3867 if (Call.isMustTailCall()) 3868 return false; 3869 3870 Instruction *Caller = &Call; 3871 const AttributeList &CallerPAL = Call.getAttributes(); 3872 3873 // Okay, this is a cast from a function to a different type. Unless doing so 3874 // would cause a type conversion of one of our arguments, change this call to 3875 // be a direct call with arguments casted to the appropriate types. 3876 FunctionType *FT = Callee->getFunctionType(); 3877 Type *OldRetTy = Caller->getType(); 3878 Type *NewRetTy = FT->getReturnType(); 3879 3880 // Check to see if we are changing the return type... 3881 if (OldRetTy != NewRetTy) { 3882 3883 if (NewRetTy->isStructTy()) 3884 return false; // TODO: Handle multiple return values. 3885 3886 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) { 3887 if (Callee->isDeclaration()) 3888 return false; // Cannot transform this return value. 3889 3890 if (!Caller->use_empty() && 3891 // void -> non-void is handled specially 3892 !NewRetTy->isVoidTy()) 3893 return false; // Cannot transform this return value. 3894 } 3895 3896 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 3897 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs()); 3898 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy))) 3899 return false; // Attribute not compatible with transformed value. 3900 } 3901 3902 // If the callbase is an invoke instruction, and the return value is 3903 // used by a PHI node in a successor, we cannot change the return type of 3904 // the call because there is no place to put the cast instruction (without 3905 // breaking the critical edge). Bail out in this case. 3906 if (!Caller->use_empty()) { 3907 BasicBlock *PhisNotSupportedBlock = nullptr; 3908 if (auto *II = dyn_cast<InvokeInst>(Caller)) 3909 PhisNotSupportedBlock = II->getNormalDest(); 3910 if (PhisNotSupportedBlock) 3911 for (User *U : Caller->users()) 3912 if (PHINode *PN = dyn_cast<PHINode>(U)) 3913 if (PN->getParent() == PhisNotSupportedBlock) 3914 return false; 3915 } 3916 } 3917 3918 unsigned NumActualArgs = Call.arg_size(); 3919 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 3920 3921 // Prevent us turning: 3922 // declare void @takes_i32_inalloca(i32* inalloca) 3923 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0) 3924 // 3925 // into: 3926 // call void @takes_i32_inalloca(i32* null) 3927 // 3928 // Similarly, avoid folding away bitcasts of byval calls. 3929 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) || 3930 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated)) 3931 return false; 3932 3933 auto AI = Call.arg_begin(); 3934 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 3935 Type *ParamTy = FT->getParamType(i); 3936 Type *ActTy = (*AI)->getType(); 3937 3938 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL)) 3939 return false; // Cannot transform this parameter value. 3940 3941 // Check if there are any incompatible attributes we cannot drop safely. 3942 if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i)) 3943 .overlaps(AttributeFuncs::typeIncompatible( 3944 ParamTy, AttributeFuncs::ASK_UNSAFE_TO_DROP))) 3945 return false; // Attribute not compatible with transformed value. 3946 3947 if (Call.isInAllocaArgument(i) || 3948 CallerPAL.hasParamAttr(i, Attribute::Preallocated)) 3949 return false; // Cannot transform to and from inalloca/preallocated. 3950 3951 if (CallerPAL.hasParamAttr(i, Attribute::SwiftError)) 3952 return false; 3953 3954 if (CallerPAL.hasParamAttr(i, Attribute::ByVal) != 3955 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal)) 3956 return false; // Cannot transform to or from byval. 3957 } 3958 3959 if (Callee->isDeclaration()) { 3960 // Do not delete arguments unless we have a function body. 3961 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg()) 3962 return false; 3963 3964 // If the callee is just a declaration, don't change the varargsness of the 3965 // call. We don't want to introduce a varargs call where one doesn't 3966 // already exist. 3967 if (FT->isVarArg() != Call.getFunctionType()->isVarArg()) 3968 return false; 3969 3970 // If both the callee and the cast type are varargs, we still have to make 3971 // sure the number of fixed parameters are the same or we have the same 3972 // ABI issues as if we introduce a varargs call. 3973 if (FT->isVarArg() && Call.getFunctionType()->isVarArg() && 3974 FT->getNumParams() != Call.getFunctionType()->getNumParams()) 3975 return false; 3976 } 3977 3978 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 3979 !CallerPAL.isEmpty()) { 3980 // In this case we have more arguments than the new function type, but we 3981 // won't be dropping them. Check that these extra arguments have attributes 3982 // that are compatible with being a vararg call argument. 3983 unsigned SRetIdx; 3984 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) && 3985 SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams()) 3986 return false; 3987 } 3988 3989 // Okay, we decided that this is a safe thing to do: go ahead and start 3990 // inserting cast instructions as necessary. 3991 SmallVector<Value *, 8> Args; 3992 SmallVector<AttributeSet, 8> ArgAttrs; 3993 Args.reserve(NumActualArgs); 3994 ArgAttrs.reserve(NumActualArgs); 3995 3996 // Get any return attributes. 3997 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs()); 3998 3999 // If the return value is not being used, the type may not be compatible 4000 // with the existing attributes. Wipe out any problematic attributes. 4001 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy)); 4002 4003 LLVMContext &Ctx = Call.getContext(); 4004 AI = Call.arg_begin(); 4005 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 4006 Type *ParamTy = FT->getParamType(i); 4007 4008 Value *NewArg = *AI; 4009 if ((*AI)->getType() != ParamTy) 4010 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy); 4011 Args.push_back(NewArg); 4012 4013 // Add any parameter attributes except the ones incompatible with the new 4014 // type. Note that we made sure all incompatible ones are safe to drop. 4015 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible( 4016 ParamTy, AttributeFuncs::ASK_SAFE_TO_DROP); 4017 ArgAttrs.push_back( 4018 CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs)); 4019 } 4020 4021 // If the function takes more arguments than the call was taking, add them 4022 // now. 4023 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) { 4024 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 4025 ArgAttrs.push_back(AttributeSet()); 4026 } 4027 4028 // If we are removing arguments to the function, emit an obnoxious warning. 4029 if (FT->getNumParams() < NumActualArgs) { 4030 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722 4031 if (FT->isVarArg()) { 4032 // Add all of the arguments in their promoted form to the arg list. 4033 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 4034 Type *PTy = getPromotedType((*AI)->getType()); 4035 Value *NewArg = *AI; 4036 if (PTy != (*AI)->getType()) { 4037 // Must promote to pass through va_arg area! 4038 Instruction::CastOps opcode = 4039 CastInst::getCastOpcode(*AI, false, PTy, false); 4040 NewArg = Builder.CreateCast(opcode, *AI, PTy); 4041 } 4042 Args.push_back(NewArg); 4043 4044 // Add any parameter attributes. 4045 ArgAttrs.push_back(CallerPAL.getParamAttrs(i)); 4046 } 4047 } 4048 } 4049 4050 AttributeSet FnAttrs = CallerPAL.getFnAttrs(); 4051 4052 if (NewRetTy->isVoidTy()) 4053 Caller->setName(""); // Void type should not have a name. 4054 4055 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) && 4056 "missing argument attributes"); 4057 AttributeList NewCallerPAL = AttributeList::get( 4058 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs); 4059 4060 SmallVector<OperandBundleDef, 1> OpBundles; 4061 Call.getOperandBundlesAsDefs(OpBundles); 4062 4063 CallBase *NewCall; 4064 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 4065 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(), 4066 II->getUnwindDest(), Args, OpBundles); 4067 } else { 4068 NewCall = Builder.CreateCall(Callee, Args, OpBundles); 4069 cast<CallInst>(NewCall)->setTailCallKind( 4070 cast<CallInst>(Caller)->getTailCallKind()); 4071 } 4072 NewCall->takeName(Caller); 4073 NewCall->setCallingConv(Call.getCallingConv()); 4074 NewCall->setAttributes(NewCallerPAL); 4075 4076 // Preserve prof metadata if any. 4077 NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof}); 4078 4079 // Insert a cast of the return type as necessary. 4080 Instruction *NC = NewCall; 4081 Value *NV = NC; 4082 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 4083 if (!NV->getType()->isVoidTy()) { 4084 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy); 4085 NC->setDebugLoc(Caller->getDebugLoc()); 4086 4087 auto OptInsertPt = NewCall->getInsertionPointAfterDef(); 4088 assert(OptInsertPt && "No place to insert cast"); 4089 InsertNewInstBefore(NC, *OptInsertPt); 4090 Worklist.pushUsersToWorkList(*Caller); 4091 } else { 4092 NV = PoisonValue::get(Caller->getType()); 4093 } 4094 } 4095 4096 if (!Caller->use_empty()) 4097 replaceInstUsesWith(*Caller, NV); 4098 else if (Caller->hasValueHandle()) { 4099 if (OldRetTy == NV->getType()) 4100 ValueHandleBase::ValueIsRAUWd(Caller, NV); 4101 else 4102 // We cannot call ValueIsRAUWd with a different type, and the 4103 // actual tracked value will disappear. 4104 ValueHandleBase::ValueIsDeleted(Caller); 4105 } 4106 4107 eraseInstFromFunction(*Caller); 4108 return true; 4109 } 4110 4111 /// Turn a call to a function created by init_trampoline / adjust_trampoline 4112 /// intrinsic pair into a direct call to the underlying function. 4113 Instruction * 4114 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call, 4115 IntrinsicInst &Tramp) { 4116 FunctionType *FTy = Call.getFunctionType(); 4117 AttributeList Attrs = Call.getAttributes(); 4118 4119 // If the call already has the 'nest' attribute somewhere then give up - 4120 // otherwise 'nest' would occur twice after splicing in the chain. 4121 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 4122 return nullptr; 4123 4124 Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts()); 4125 FunctionType *NestFTy = NestF->getFunctionType(); 4126 4127 AttributeList NestAttrs = NestF->getAttributes(); 4128 if (!NestAttrs.isEmpty()) { 4129 unsigned NestArgNo = 0; 4130 Type *NestTy = nullptr; 4131 AttributeSet NestAttr; 4132 4133 // Look for a parameter marked with the 'nest' attribute. 4134 for (FunctionType::param_iterator I = NestFTy->param_begin(), 4135 E = NestFTy->param_end(); 4136 I != E; ++NestArgNo, ++I) { 4137 AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo); 4138 if (AS.hasAttribute(Attribute::Nest)) { 4139 // Record the parameter type and any other attributes. 4140 NestTy = *I; 4141 NestAttr = AS; 4142 break; 4143 } 4144 } 4145 4146 if (NestTy) { 4147 std::vector<Value*> NewArgs; 4148 std::vector<AttributeSet> NewArgAttrs; 4149 NewArgs.reserve(Call.arg_size() + 1); 4150 NewArgAttrs.reserve(Call.arg_size()); 4151 4152 // Insert the nest argument into the call argument list, which may 4153 // mean appending it. Likewise for attributes. 4154 4155 { 4156 unsigned ArgNo = 0; 4157 auto I = Call.arg_begin(), E = Call.arg_end(); 4158 do { 4159 if (ArgNo == NestArgNo) { 4160 // Add the chain argument and attributes. 4161 Value *NestVal = Tramp.getArgOperand(2); 4162 if (NestVal->getType() != NestTy) 4163 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest"); 4164 NewArgs.push_back(NestVal); 4165 NewArgAttrs.push_back(NestAttr); 4166 } 4167 4168 if (I == E) 4169 break; 4170 4171 // Add the original argument and attributes. 4172 NewArgs.push_back(*I); 4173 NewArgAttrs.push_back(Attrs.getParamAttrs(ArgNo)); 4174 4175 ++ArgNo; 4176 ++I; 4177 } while (true); 4178 } 4179 4180 // The trampoline may have been bitcast to a bogus type (FTy). 4181 // Handle this by synthesizing a new function type, equal to FTy 4182 // with the chain parameter inserted. 4183 4184 std::vector<Type*> NewTypes; 4185 NewTypes.reserve(FTy->getNumParams()+1); 4186 4187 // Insert the chain's type into the list of parameter types, which may 4188 // mean appending it. 4189 { 4190 unsigned ArgNo = 0; 4191 FunctionType::param_iterator I = FTy->param_begin(), 4192 E = FTy->param_end(); 4193 4194 do { 4195 if (ArgNo == NestArgNo) 4196 // Add the chain's type. 4197 NewTypes.push_back(NestTy); 4198 4199 if (I == E) 4200 break; 4201 4202 // Add the original type. 4203 NewTypes.push_back(*I); 4204 4205 ++ArgNo; 4206 ++I; 4207 } while (true); 4208 } 4209 4210 // Replace the trampoline call with a direct call. Let the generic 4211 // code sort out any function type mismatches. 4212 FunctionType *NewFTy = 4213 FunctionType::get(FTy->getReturnType(), NewTypes, FTy->isVarArg()); 4214 AttributeList NewPAL = 4215 AttributeList::get(FTy->getContext(), Attrs.getFnAttrs(), 4216 Attrs.getRetAttrs(), NewArgAttrs); 4217 4218 SmallVector<OperandBundleDef, 1> OpBundles; 4219 Call.getOperandBundlesAsDefs(OpBundles); 4220 4221 Instruction *NewCaller; 4222 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) { 4223 NewCaller = InvokeInst::Create(NewFTy, NestF, II->getNormalDest(), 4224 II->getUnwindDest(), NewArgs, OpBundles); 4225 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 4226 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 4227 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) { 4228 NewCaller = 4229 CallBrInst::Create(NewFTy, NestF, CBI->getDefaultDest(), 4230 CBI->getIndirectDests(), NewArgs, OpBundles); 4231 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv()); 4232 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL); 4233 } else { 4234 NewCaller = CallInst::Create(NewFTy, NestF, NewArgs, OpBundles); 4235 cast<CallInst>(NewCaller)->setTailCallKind( 4236 cast<CallInst>(Call).getTailCallKind()); 4237 cast<CallInst>(NewCaller)->setCallingConv( 4238 cast<CallInst>(Call).getCallingConv()); 4239 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 4240 } 4241 NewCaller->setDebugLoc(Call.getDebugLoc()); 4242 4243 return NewCaller; 4244 } 4245 } 4246 4247 // Replace the trampoline call with a direct call. Since there is no 'nest' 4248 // parameter, there is no need to adjust the argument list. Let the generic 4249 // code sort out any function type mismatches. 4250 Call.setCalledFunction(FTy, NestF); 4251 return &Call; 4252 } 4253