1 //===- InstCombineCalls.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visitCall, visitInvoke, and visitCallBr functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/APFloat.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/APSInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/FloatingPointMode.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/Twine.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/Analysis/AssumeBundleQueries.h" 27 #include "llvm/Analysis/AssumptionCache.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/Loads.h" 30 #include "llvm/Analysis/MemoryBuiltins.h" 31 #include "llvm/Analysis/TargetTransformInfo.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/Analysis/VectorUtils.h" 34 #include "llvm/IR/Attributes.h" 35 #include "llvm/IR/BasicBlock.h" 36 #include "llvm/IR/Constant.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/DerivedTypes.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/InstrTypes.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/IR/Intrinsics.h" 47 #include "llvm/IR/IntrinsicsAArch64.h" 48 #include "llvm/IR/IntrinsicsAMDGPU.h" 49 #include "llvm/IR/IntrinsicsARM.h" 50 #include "llvm/IR/IntrinsicsHexagon.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/IR/Metadata.h" 53 #include "llvm/IR/PatternMatch.h" 54 #include "llvm/IR/Statepoint.h" 55 #include "llvm/IR/Type.h" 56 #include "llvm/IR/User.h" 57 #include "llvm/IR/Value.h" 58 #include "llvm/IR/ValueHandle.h" 59 #include "llvm/Support/AtomicOrdering.h" 60 #include "llvm/Support/Casting.h" 61 #include "llvm/Support/CommandLine.h" 62 #include "llvm/Support/Compiler.h" 63 #include "llvm/Support/Debug.h" 64 #include "llvm/Support/ErrorHandling.h" 65 #include "llvm/Support/KnownBits.h" 66 #include "llvm/Support/MathExtras.h" 67 #include "llvm/Support/raw_ostream.h" 68 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 69 #include "llvm/Transforms/InstCombine/InstCombiner.h" 70 #include "llvm/Transforms/Utils/Local.h" 71 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 72 #include <algorithm> 73 #include <cassert> 74 #include <cstdint> 75 #include <cstring> 76 #include <utility> 77 #include <vector> 78 79 using namespace llvm; 80 using namespace PatternMatch; 81 82 #define DEBUG_TYPE "instcombine" 83 84 STATISTIC(NumSimplified, "Number of library calls simplified"); 85 86 static cl::opt<unsigned> GuardWideningWindow( 87 "instcombine-guard-widening-window", 88 cl::init(3), 89 cl::desc("How wide an instruction window to bypass looking for " 90 "another guard")); 91 92 /// Return the specified type promoted as it would be to pass though a va_arg 93 /// area. 94 static Type *getPromotedType(Type *Ty) { 95 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 96 if (ITy->getBitWidth() < 32) 97 return Type::getInt32Ty(Ty->getContext()); 98 } 99 return Ty; 100 } 101 102 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) { 103 Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT); 104 MaybeAlign CopyDstAlign = MI->getDestAlign(); 105 if (!CopyDstAlign || *CopyDstAlign < DstAlign) { 106 MI->setDestAlignment(DstAlign); 107 return MI; 108 } 109 110 Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT); 111 MaybeAlign CopySrcAlign = MI->getSourceAlign(); 112 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) { 113 MI->setSourceAlignment(SrcAlign); 114 return MI; 115 } 116 117 // If we have a store to a location which is known constant, we can conclude 118 // that the store must be storing the constant value (else the memory 119 // wouldn't be constant), and this must be a noop. 120 if (AA->pointsToConstantMemory(MI->getDest())) { 121 // Set the size of the copy to 0, it will be deleted on the next iteration. 122 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 123 return MI; 124 } 125 126 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 127 // load/store. 128 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength()); 129 if (!MemOpLength) return nullptr; 130 131 // Source and destination pointer types are always "i8*" for intrinsic. See 132 // if the size is something we can handle with a single primitive load/store. 133 // A single load+store correctly handles overlapping memory in the memmove 134 // case. 135 uint64_t Size = MemOpLength->getLimitedValue(); 136 assert(Size && "0-sized memory transferring should be removed already."); 137 138 if (Size > 8 || (Size&(Size-1))) 139 return nullptr; // If not 1/2/4/8 bytes, exit. 140 141 // If it is an atomic and alignment is less than the size then we will 142 // introduce the unaligned memory access which will be later transformed 143 // into libcall in CodeGen. This is not evident performance gain so disable 144 // it now. 145 if (isa<AtomicMemTransferInst>(MI)) 146 if (*CopyDstAlign < Size || *CopySrcAlign < Size) 147 return nullptr; 148 149 // Use an integer load+store unless we can find something better. 150 unsigned SrcAddrSp = 151 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 152 unsigned DstAddrSp = 153 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 154 155 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 156 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 157 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 158 159 // If the memcpy has metadata describing the members, see if we can get the 160 // TBAA tag describing our copy. 161 MDNode *CopyMD = nullptr; 162 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) { 163 CopyMD = M; 164 } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) { 165 if (M->getNumOperands() == 3 && M->getOperand(0) && 166 mdconst::hasa<ConstantInt>(M->getOperand(0)) && 167 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() && 168 M->getOperand(1) && 169 mdconst::hasa<ConstantInt>(M->getOperand(1)) && 170 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() == 171 Size && 172 M->getOperand(2) && isa<MDNode>(M->getOperand(2))) 173 CopyMD = cast<MDNode>(M->getOperand(2)); 174 } 175 176 Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 177 Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 178 LoadInst *L = Builder.CreateLoad(IntType, Src); 179 // Alignment from the mem intrinsic will be better, so use it. 180 L->setAlignment(*CopySrcAlign); 181 if (CopyMD) 182 L->setMetadata(LLVMContext::MD_tbaa, CopyMD); 183 MDNode *LoopMemParallelMD = 184 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 185 if (LoopMemParallelMD) 186 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 187 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group); 188 if (AccessGroupMD) 189 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 190 191 StoreInst *S = Builder.CreateStore(L, Dest); 192 // Alignment from the mem intrinsic will be better, so use it. 193 S->setAlignment(*CopyDstAlign); 194 if (CopyMD) 195 S->setMetadata(LLVMContext::MD_tbaa, CopyMD); 196 if (LoopMemParallelMD) 197 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 198 if (AccessGroupMD) 199 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 200 201 if (auto *MT = dyn_cast<MemTransferInst>(MI)) { 202 // non-atomics can be volatile 203 L->setVolatile(MT->isVolatile()); 204 S->setVolatile(MT->isVolatile()); 205 } 206 if (isa<AtomicMemTransferInst>(MI)) { 207 // atomics have to be unordered 208 L->setOrdering(AtomicOrdering::Unordered); 209 S->setOrdering(AtomicOrdering::Unordered); 210 } 211 212 // Set the size of the copy to 0, it will be deleted on the next iteration. 213 MI->setLength(Constant::getNullValue(MemOpLength->getType())); 214 return MI; 215 } 216 217 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) { 218 const Align KnownAlignment = 219 getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT); 220 MaybeAlign MemSetAlign = MI->getDestAlign(); 221 if (!MemSetAlign || *MemSetAlign < KnownAlignment) { 222 MI->setDestAlignment(KnownAlignment); 223 return MI; 224 } 225 226 // If we have a store to a location which is known constant, we can conclude 227 // that the store must be storing the constant value (else the memory 228 // wouldn't be constant), and this must be a noop. 229 if (AA->pointsToConstantMemory(MI->getDest())) { 230 // Set the size of the copy to 0, it will be deleted on the next iteration. 231 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 232 return MI; 233 } 234 235 // Extract the length and alignment and fill if they are constant. 236 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 237 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 238 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 239 return nullptr; 240 const uint64_t Len = LenC->getLimitedValue(); 241 assert(Len && "0-sized memory setting should be removed already."); 242 const Align Alignment = assumeAligned(MI->getDestAlignment()); 243 244 // If it is an atomic and alignment is less than the size then we will 245 // introduce the unaligned memory access which will be later transformed 246 // into libcall in CodeGen. This is not evident performance gain so disable 247 // it now. 248 if (isa<AtomicMemSetInst>(MI)) 249 if (Alignment < Len) 250 return nullptr; 251 252 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 253 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 254 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 255 256 Value *Dest = MI->getDest(); 257 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace(); 258 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp); 259 Dest = Builder.CreateBitCast(Dest, NewDstPtrTy); 260 261 // Extract the fill value and store. 262 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 263 StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest, 264 MI->isVolatile()); 265 S->setAlignment(Alignment); 266 if (isa<AtomicMemSetInst>(MI)) 267 S->setOrdering(AtomicOrdering::Unordered); 268 269 // Set the size of the copy to 0, it will be deleted on the next iteration. 270 MI->setLength(Constant::getNullValue(LenC->getType())); 271 return MI; 272 } 273 274 return nullptr; 275 } 276 277 // TODO, Obvious Missing Transforms: 278 // * Narrow width by halfs excluding zero/undef lanes 279 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) { 280 Value *LoadPtr = II.getArgOperand(0); 281 const Align Alignment = 282 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 283 284 // If the mask is all ones or undefs, this is a plain vector load of the 1st 285 // argument. 286 if (maskIsAllOneOrUndef(II.getArgOperand(2))) 287 return Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 288 "unmaskedload"); 289 290 // If we can unconditionally load from this address, replace with a 291 // load/select idiom. TODO: use DT for context sensitive query 292 if (isDereferenceablePointer(LoadPtr, II.getType(), 293 II.getModule()->getDataLayout(), &II, nullptr)) { 294 Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 295 "unmaskedload"); 296 return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3)); 297 } 298 299 return nullptr; 300 } 301 302 // TODO, Obvious Missing Transforms: 303 // * Single constant active lane -> store 304 // * Narrow width by halfs excluding zero/undef lanes 305 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) { 306 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 307 if (!ConstMask) 308 return nullptr; 309 310 // If the mask is all zeros, this instruction does nothing. 311 if (ConstMask->isNullValue()) 312 return eraseInstFromFunction(II); 313 314 // If the mask is all ones, this is a plain vector store of the 1st argument. 315 if (ConstMask->isAllOnesValue()) { 316 Value *StorePtr = II.getArgOperand(1); 317 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 318 return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); 319 } 320 321 if (isa<ScalableVectorType>(ConstMask->getType())) 322 return nullptr; 323 324 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 325 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 326 APInt UndefElts(DemandedElts.getBitWidth(), 0); 327 if (Value *V = 328 SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts)) 329 return replaceOperand(II, 0, V); 330 331 return nullptr; 332 } 333 334 // TODO, Obvious Missing Transforms: 335 // * Single constant active lane load -> load 336 // * Dereferenceable address & few lanes -> scalarize speculative load/selects 337 // * Adjacent vector addresses -> masked.load 338 // * Narrow width by halfs excluding zero/undef lanes 339 // * Vector splat address w/known mask -> scalar load 340 // * Vector incrementing address -> vector masked load 341 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) { 342 return nullptr; 343 } 344 345 // TODO, Obvious Missing Transforms: 346 // * Single constant active lane -> store 347 // * Adjacent vector addresses -> masked.store 348 // * Narrow store width by halfs excluding zero/undef lanes 349 // * Vector splat address w/known mask -> scalar store 350 // * Vector incrementing address -> vector masked store 351 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) { 352 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 353 if (!ConstMask) 354 return nullptr; 355 356 // If the mask is all zeros, a scatter does nothing. 357 if (ConstMask->isNullValue()) 358 return eraseInstFromFunction(II); 359 360 if (isa<ScalableVectorType>(ConstMask->getType())) 361 return nullptr; 362 363 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 364 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 365 APInt UndefElts(DemandedElts.getBitWidth(), 0); 366 if (Value *V = 367 SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts)) 368 return replaceOperand(II, 0, V); 369 if (Value *V = 370 SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, UndefElts)) 371 return replaceOperand(II, 1, V); 372 373 return nullptr; 374 } 375 376 /// This function transforms launder.invariant.group and strip.invariant.group 377 /// like: 378 /// launder(launder(%x)) -> launder(%x) (the result is not the argument) 379 /// launder(strip(%x)) -> launder(%x) 380 /// strip(strip(%x)) -> strip(%x) (the result is not the argument) 381 /// strip(launder(%x)) -> strip(%x) 382 /// This is legal because it preserves the most recent information about 383 /// the presence or absence of invariant.group. 384 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II, 385 InstCombinerImpl &IC) { 386 auto *Arg = II.getArgOperand(0); 387 auto *StrippedArg = Arg->stripPointerCasts(); 388 auto *StrippedInvariantGroupsArg = Arg->stripPointerCastsAndInvariantGroups(); 389 if (StrippedArg == StrippedInvariantGroupsArg) 390 return nullptr; // No launders/strips to remove. 391 392 Value *Result = nullptr; 393 394 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group) 395 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg); 396 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group) 397 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg); 398 else 399 llvm_unreachable( 400 "simplifyInvariantGroupIntrinsic only handles launder and strip"); 401 if (Result->getType()->getPointerAddressSpace() != 402 II.getType()->getPointerAddressSpace()) 403 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType()); 404 if (Result->getType() != II.getType()) 405 Result = IC.Builder.CreateBitCast(Result, II.getType()); 406 407 return cast<Instruction>(Result); 408 } 409 410 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) { 411 assert((II.getIntrinsicID() == Intrinsic::cttz || 412 II.getIntrinsicID() == Intrinsic::ctlz) && 413 "Expected cttz or ctlz intrinsic"); 414 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz; 415 Value *Op0 = II.getArgOperand(0); 416 Value *X; 417 // ctlz(bitreverse(x)) -> cttz(x) 418 // cttz(bitreverse(x)) -> ctlz(x) 419 if (match(Op0, m_BitReverse(m_Value(X)))) { 420 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz; 421 Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType()); 422 return CallInst::Create(F, {X, II.getArgOperand(1)}); 423 } 424 425 if (IsTZ) { 426 // cttz(-x) -> cttz(x) 427 if (match(Op0, m_Neg(m_Value(X)))) 428 return IC.replaceOperand(II, 0, X); 429 430 // cttz(abs(x)) -> cttz(x) 431 // cttz(nabs(x)) -> cttz(x) 432 Value *Y; 433 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor; 434 if (SPF == SPF_ABS || SPF == SPF_NABS) 435 return IC.replaceOperand(II, 0, X); 436 437 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X)))) 438 return IC.replaceOperand(II, 0, X); 439 } 440 441 KnownBits Known = IC.computeKnownBits(Op0, 0, &II); 442 443 // Create a mask for bits above (ctlz) or below (cttz) the first known one. 444 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros() 445 : Known.countMaxLeadingZeros(); 446 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros() 447 : Known.countMinLeadingZeros(); 448 449 // If all bits above (ctlz) or below (cttz) the first known one are known 450 // zero, this value is constant. 451 // FIXME: This should be in InstSimplify because we're replacing an 452 // instruction with a constant. 453 if (PossibleZeros == DefiniteZeros) { 454 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros); 455 return IC.replaceInstUsesWith(II, C); 456 } 457 458 // If the input to cttz/ctlz is known to be non-zero, 459 // then change the 'ZeroIsUndef' parameter to 'true' 460 // because we know the zero behavior can't affect the result. 461 if (!Known.One.isNullValue() || 462 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II, 463 &IC.getDominatorTree())) { 464 if (!match(II.getArgOperand(1), m_One())) 465 return IC.replaceOperand(II, 1, IC.Builder.getTrue()); 466 } 467 468 // Add range metadata since known bits can't completely reflect what we know. 469 // TODO: Handle splat vectors. 470 auto *IT = dyn_cast<IntegerType>(Op0->getType()); 471 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 472 Metadata *LowAndHigh[] = { 473 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)), 474 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))}; 475 II.setMetadata(LLVMContext::MD_range, 476 MDNode::get(II.getContext(), LowAndHigh)); 477 return &II; 478 } 479 480 return nullptr; 481 } 482 483 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) { 484 assert(II.getIntrinsicID() == Intrinsic::ctpop && 485 "Expected ctpop intrinsic"); 486 Type *Ty = II.getType(); 487 unsigned BitWidth = Ty->getScalarSizeInBits(); 488 Value *Op0 = II.getArgOperand(0); 489 Value *X; 490 491 // ctpop(bitreverse(x)) -> ctpop(x) 492 // ctpop(bswap(x)) -> ctpop(x) 493 if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) 494 return IC.replaceOperand(II, 0, X); 495 496 // ctpop(x | -x) -> bitwidth - cttz(x, false) 497 if (Op0->hasOneUse() && 498 match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) { 499 Function *F = 500 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 501 auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()}); 502 auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth)); 503 return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz)); 504 } 505 506 // ctpop(~x & (x - 1)) -> cttz(x, false) 507 if (match(Op0, 508 m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) { 509 Function *F = 510 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 511 return CallInst::Create(F, {X, IC.Builder.getFalse()}); 512 } 513 514 // FIXME: Try to simplify vectors of integers. 515 auto *IT = dyn_cast<IntegerType>(Ty); 516 if (!IT) 517 return nullptr; 518 519 KnownBits Known(BitWidth); 520 IC.computeKnownBits(Op0, Known, 0, &II); 521 522 unsigned MinCount = Known.countMinPopulation(); 523 unsigned MaxCount = Known.countMaxPopulation(); 524 525 // Add range metadata since known bits can't completely reflect what we know. 526 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 527 Metadata *LowAndHigh[] = { 528 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)), 529 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))}; 530 II.setMetadata(LLVMContext::MD_range, 531 MDNode::get(II.getContext(), LowAndHigh)); 532 return &II; 533 } 534 535 return nullptr; 536 } 537 538 /// Convert a table lookup to shufflevector if the mask is constant. 539 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in 540 /// which case we could lower the shufflevector with rev64 instructions 541 /// as it's actually a byte reverse. 542 static Value *simplifyNeonTbl1(const IntrinsicInst &II, 543 InstCombiner::BuilderTy &Builder) { 544 // Bail out if the mask is not a constant. 545 auto *C = dyn_cast<Constant>(II.getArgOperand(1)); 546 if (!C) 547 return nullptr; 548 549 auto *VecTy = cast<FixedVectorType>(II.getType()); 550 unsigned NumElts = VecTy->getNumElements(); 551 552 // Only perform this transformation for <8 x i8> vector types. 553 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8) 554 return nullptr; 555 556 int Indexes[8]; 557 558 for (unsigned I = 0; I < NumElts; ++I) { 559 Constant *COp = C->getAggregateElement(I); 560 561 if (!COp || !isa<ConstantInt>(COp)) 562 return nullptr; 563 564 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue(); 565 566 // Make sure the mask indices are in range. 567 if ((unsigned)Indexes[I] >= NumElts) 568 return nullptr; 569 } 570 571 auto *V1 = II.getArgOperand(0); 572 auto *V2 = Constant::getNullValue(V1->getType()); 573 return Builder.CreateShuffleVector(V1, V2, makeArrayRef(Indexes)); 574 } 575 576 // Returns true iff the 2 intrinsics have the same operands, limiting the 577 // comparison to the first NumOperands. 578 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, 579 unsigned NumOperands) { 580 assert(I.getNumArgOperands() >= NumOperands && "Not enough operands"); 581 assert(E.getNumArgOperands() >= NumOperands && "Not enough operands"); 582 for (unsigned i = 0; i < NumOperands; i++) 583 if (I.getArgOperand(i) != E.getArgOperand(i)) 584 return false; 585 return true; 586 } 587 588 // Remove trivially empty start/end intrinsic ranges, i.e. a start 589 // immediately followed by an end (ignoring debuginfo or other 590 // start/end intrinsics in between). As this handles only the most trivial 591 // cases, tracking the nesting level is not needed: 592 // 593 // call @llvm.foo.start(i1 0) 594 // call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed 595 // call @llvm.foo.end(i1 0) 596 // call @llvm.foo.end(i1 0) ; &I 597 static bool 598 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, 599 std::function<bool(const IntrinsicInst &)> IsStart) { 600 // We start from the end intrinsic and scan backwards, so that InstCombine 601 // has already processed (and potentially removed) all the instructions 602 // before the end intrinsic. 603 BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend()); 604 for (; BI != BE; ++BI) { 605 if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) { 606 if (isa<DbgInfoIntrinsic>(I) || 607 I->getIntrinsicID() == EndI.getIntrinsicID()) 608 continue; 609 if (IsStart(*I)) { 610 if (haveSameOperands(EndI, *I, EndI.getNumArgOperands())) { 611 IC.eraseInstFromFunction(*I); 612 IC.eraseInstFromFunction(EndI); 613 return true; 614 } 615 // Skip start intrinsics that don't pair with this end intrinsic. 616 continue; 617 } 618 } 619 break; 620 } 621 622 return false; 623 } 624 625 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) { 626 removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) { 627 return I.getIntrinsicID() == Intrinsic::vastart || 628 I.getIntrinsicID() == Intrinsic::vacopy; 629 }); 630 return nullptr; 631 } 632 633 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) { 634 assert(Call.getNumArgOperands() > 1 && "Need at least 2 args to swap"); 635 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1); 636 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) { 637 Call.setArgOperand(0, Arg1); 638 Call.setArgOperand(1, Arg0); 639 return &Call; 640 } 641 return nullptr; 642 } 643 644 /// Creates a result tuple for an overflow intrinsic \p II with a given 645 /// \p Result and a constant \p Overflow value. 646 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result, 647 Constant *Overflow) { 648 Constant *V[] = {UndefValue::get(Result->getType()), Overflow}; 649 StructType *ST = cast<StructType>(II->getType()); 650 Constant *Struct = ConstantStruct::get(ST, V); 651 return InsertValueInst::Create(Struct, Result, 0); 652 } 653 654 Instruction * 655 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) { 656 WithOverflowInst *WO = cast<WithOverflowInst>(II); 657 Value *OperationResult = nullptr; 658 Constant *OverflowResult = nullptr; 659 if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(), 660 WO->getRHS(), *WO, OperationResult, OverflowResult)) 661 return createOverflowTuple(WO, OperationResult, OverflowResult); 662 return nullptr; 663 } 664 665 static Optional<bool> getKnownSign(Value *Op, Instruction *CxtI, 666 const DataLayout &DL, AssumptionCache *AC, 667 DominatorTree *DT) { 668 KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT); 669 if (Known.isNonNegative()) 670 return false; 671 if (Known.isNegative()) 672 return true; 673 674 return isImpliedByDomCondition( 675 ICmpInst::ICMP_SLT, Op, Constant::getNullValue(Op->getType()), CxtI, DL); 676 } 677 678 /// CallInst simplification. This mostly only handles folding of intrinsic 679 /// instructions. For normal calls, it allows visitCallBase to do the heavy 680 /// lifting. 681 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { 682 // Don't try to simplify calls without uses. It will not do anything useful, 683 // but will result in the following folds being skipped. 684 if (!CI.use_empty()) 685 if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI))) 686 return replaceInstUsesWith(CI, V); 687 688 if (isFreeCall(&CI, &TLI)) 689 return visitFree(CI); 690 691 // If the caller function is nounwind, mark the call as nounwind, even if the 692 // callee isn't. 693 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) { 694 CI.setDoesNotThrow(); 695 return &CI; 696 } 697 698 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 699 if (!II) return visitCallBase(CI); 700 701 // For atomic unordered mem intrinsics if len is not a positive or 702 // not a multiple of element size then behavior is undefined. 703 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II)) 704 if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength())) 705 if (NumBytes->getSExtValue() < 0 || 706 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) { 707 CreateNonTerminatorUnreachable(AMI); 708 assert(AMI->getType()->isVoidTy() && 709 "non void atomic unordered mem intrinsic"); 710 return eraseInstFromFunction(*AMI); 711 } 712 713 // Intrinsics cannot occur in an invoke or a callbr, so handle them here 714 // instead of in visitCallBase. 715 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) { 716 bool Changed = false; 717 718 // memmove/cpy/set of zero bytes is a noop. 719 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 720 if (NumBytes->isNullValue()) 721 return eraseInstFromFunction(CI); 722 723 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 724 if (CI->getZExtValue() == 1) { 725 // Replace the instruction with just byte operations. We would 726 // transform other cases to loads/stores, but we don't know if 727 // alignment is sufficient. 728 } 729 } 730 731 // No other transformations apply to volatile transfers. 732 if (auto *M = dyn_cast<MemIntrinsic>(MI)) 733 if (M->isVolatile()) 734 return nullptr; 735 736 // If we have a memmove and the source operation is a constant global, 737 // then the source and dest pointers can't alias, so we can change this 738 // into a call to memcpy. 739 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) { 740 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 741 if (GVSrc->isConstant()) { 742 Module *M = CI.getModule(); 743 Intrinsic::ID MemCpyID = 744 isa<AtomicMemMoveInst>(MMI) 745 ? Intrinsic::memcpy_element_unordered_atomic 746 : Intrinsic::memcpy; 747 Type *Tys[3] = { CI.getArgOperand(0)->getType(), 748 CI.getArgOperand(1)->getType(), 749 CI.getArgOperand(2)->getType() }; 750 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys)); 751 Changed = true; 752 } 753 } 754 755 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 756 // memmove(x,x,size) -> noop. 757 if (MTI->getSource() == MTI->getDest()) 758 return eraseInstFromFunction(CI); 759 } 760 761 // If we can determine a pointer alignment that is bigger than currently 762 // set, update the alignment. 763 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 764 if (Instruction *I = SimplifyAnyMemTransfer(MTI)) 765 return I; 766 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) { 767 if (Instruction *I = SimplifyAnyMemSet(MSI)) 768 return I; 769 } 770 771 if (Changed) return II; 772 } 773 774 // For fixed width vector result intrinsics, use the generic demanded vector 775 // support. 776 if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) { 777 auto VWidth = IIFVTy->getNumElements(); 778 APInt UndefElts(VWidth, 0); 779 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); 780 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) { 781 if (V != II) 782 return replaceInstUsesWith(*II, V); 783 return II; 784 } 785 } 786 787 if (II->isCommutative()) { 788 if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI)) 789 return NewCall; 790 } 791 792 Intrinsic::ID IID = II->getIntrinsicID(); 793 switch (IID) { 794 case Intrinsic::objectsize: 795 if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false)) 796 return replaceInstUsesWith(CI, V); 797 return nullptr; 798 case Intrinsic::abs: { 799 Value *IIOperand = II->getArgOperand(0); 800 bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue(); 801 802 // abs(-x) -> abs(x) 803 // TODO: Copy nsw if it was present on the neg? 804 Value *X; 805 if (match(IIOperand, m_Neg(m_Value(X)))) 806 return replaceOperand(*II, 0, X); 807 if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X))))) 808 return replaceOperand(*II, 0, X); 809 if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X)))) 810 return replaceOperand(*II, 0, X); 811 812 if (Optional<bool> Sign = getKnownSign(IIOperand, II, DL, &AC, &DT)) { 813 // abs(x) -> x if x >= 0 814 if (!*Sign) 815 return replaceInstUsesWith(*II, IIOperand); 816 817 // abs(x) -> -x if x < 0 818 if (IntMinIsPoison) 819 return BinaryOperator::CreateNSWNeg(IIOperand); 820 return BinaryOperator::CreateNeg(IIOperand); 821 } 822 823 // abs (sext X) --> zext (abs X*) 824 // Clear the IsIntMin (nsw) bit on the abs to allow narrowing. 825 if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) { 826 Value *NarrowAbs = 827 Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse()); 828 return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType()); 829 } 830 831 break; 832 } 833 case Intrinsic::umax: 834 case Intrinsic::umin: { 835 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 836 Value *X, *Y; 837 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) && 838 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 839 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 840 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 841 } 842 // If both operands of unsigned min/max are sign-extended, it is still ok 843 // to narrow the operation. 844 LLVM_FALLTHROUGH; 845 } 846 case Intrinsic::smax: 847 case Intrinsic::smin: { 848 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 849 Value *X, *Y; 850 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) && 851 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 852 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 853 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 854 } 855 break; 856 } 857 case Intrinsic::bswap: { 858 Value *IIOperand = II->getArgOperand(0); 859 Value *X = nullptr; 860 861 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 862 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) { 863 unsigned C = X->getType()->getScalarSizeInBits() - 864 IIOperand->getType()->getScalarSizeInBits(); 865 Value *CV = ConstantInt::get(X->getType(), C); 866 Value *V = Builder.CreateLShr(X, CV); 867 return new TruncInst(V, IIOperand->getType()); 868 } 869 break; 870 } 871 case Intrinsic::masked_load: 872 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II)) 873 return replaceInstUsesWith(CI, SimplifiedMaskedOp); 874 break; 875 case Intrinsic::masked_store: 876 return simplifyMaskedStore(*II); 877 case Intrinsic::masked_gather: 878 return simplifyMaskedGather(*II); 879 case Intrinsic::masked_scatter: 880 return simplifyMaskedScatter(*II); 881 case Intrinsic::launder_invariant_group: 882 case Intrinsic::strip_invariant_group: 883 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this)) 884 return replaceInstUsesWith(*II, SkippedBarrier); 885 break; 886 case Intrinsic::powi: 887 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 888 // 0 and 1 are handled in instsimplify 889 // powi(x, -1) -> 1/x 890 if (Power->isMinusOne()) 891 return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0), 892 II->getArgOperand(0), II); 893 // powi(x, 2) -> x*x 894 if (Power->equalsInt(2)) 895 return BinaryOperator::CreateFMulFMF(II->getArgOperand(0), 896 II->getArgOperand(0), II); 897 } 898 break; 899 900 case Intrinsic::cttz: 901 case Intrinsic::ctlz: 902 if (auto *I = foldCttzCtlz(*II, *this)) 903 return I; 904 break; 905 906 case Intrinsic::ctpop: 907 if (auto *I = foldCtpop(*II, *this)) 908 return I; 909 break; 910 911 case Intrinsic::fshl: 912 case Intrinsic::fshr: { 913 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 914 Type *Ty = II->getType(); 915 unsigned BitWidth = Ty->getScalarSizeInBits(); 916 Constant *ShAmtC; 917 if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC)) && 918 !ShAmtC->containsConstantExpression()) { 919 // Canonicalize a shift amount constant operand to modulo the bit-width. 920 Constant *WidthC = ConstantInt::get(Ty, BitWidth); 921 Constant *ModuloC = ConstantExpr::getURem(ShAmtC, WidthC); 922 if (ModuloC != ShAmtC) 923 return replaceOperand(*II, 2, ModuloC); 924 925 assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) == 926 ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) && 927 "Shift amount expected to be modulo bitwidth"); 928 929 // Canonicalize funnel shift right by constant to funnel shift left. This 930 // is not entirely arbitrary. For historical reasons, the backend may 931 // recognize rotate left patterns but miss rotate right patterns. 932 if (IID == Intrinsic::fshr) { 933 // fshr X, Y, C --> fshl X, Y, (BitWidth - C) 934 Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC); 935 Module *Mod = II->getModule(); 936 Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty); 937 return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC }); 938 } 939 assert(IID == Intrinsic::fshl && 940 "All funnel shifts by simple constants should go left"); 941 942 // fshl(X, 0, C) --> shl X, C 943 // fshl(X, undef, C) --> shl X, C 944 if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef())) 945 return BinaryOperator::CreateShl(Op0, ShAmtC); 946 947 // fshl(0, X, C) --> lshr X, (BW-C) 948 // fshl(undef, X, C) --> lshr X, (BW-C) 949 if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef())) 950 return BinaryOperator::CreateLShr(Op1, 951 ConstantExpr::getSub(WidthC, ShAmtC)); 952 953 // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form) 954 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) { 955 Module *Mod = II->getModule(); 956 Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty); 957 return CallInst::Create(Bswap, { Op0 }); 958 } 959 } 960 961 // Left or right might be masked. 962 if (SimplifyDemandedInstructionBits(*II)) 963 return &CI; 964 965 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth, 966 // so only the low bits of the shift amount are demanded if the bitwidth is 967 // a power-of-2. 968 if (!isPowerOf2_32(BitWidth)) 969 break; 970 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth)); 971 KnownBits Op2Known(BitWidth); 972 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known)) 973 return &CI; 974 break; 975 } 976 case Intrinsic::uadd_with_overflow: 977 case Intrinsic::sadd_with_overflow: { 978 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 979 return I; 980 981 // Given 2 constant operands whose sum does not overflow: 982 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1 983 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1 984 Value *X; 985 const APInt *C0, *C1; 986 Value *Arg0 = II->getArgOperand(0); 987 Value *Arg1 = II->getArgOperand(1); 988 bool IsSigned = IID == Intrinsic::sadd_with_overflow; 989 bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0))) 990 : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0))); 991 if (HasNWAdd && match(Arg1, m_APInt(C1))) { 992 bool Overflow; 993 APInt NewC = 994 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow); 995 if (!Overflow) 996 return replaceInstUsesWith( 997 *II, Builder.CreateBinaryIntrinsic( 998 IID, X, ConstantInt::get(Arg1->getType(), NewC))); 999 } 1000 break; 1001 } 1002 1003 case Intrinsic::umul_with_overflow: 1004 case Intrinsic::smul_with_overflow: 1005 case Intrinsic::usub_with_overflow: 1006 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1007 return I; 1008 break; 1009 1010 case Intrinsic::ssub_with_overflow: { 1011 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1012 return I; 1013 1014 Constant *C; 1015 Value *Arg0 = II->getArgOperand(0); 1016 Value *Arg1 = II->getArgOperand(1); 1017 // Given a constant C that is not the minimum signed value 1018 // for an integer of a given bit width: 1019 // 1020 // ssubo X, C -> saddo X, -C 1021 if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) { 1022 Value *NegVal = ConstantExpr::getNeg(C); 1023 // Build a saddo call that is equivalent to the discovered 1024 // ssubo call. 1025 return replaceInstUsesWith( 1026 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow, 1027 Arg0, NegVal)); 1028 } 1029 1030 break; 1031 } 1032 1033 case Intrinsic::uadd_sat: 1034 case Intrinsic::sadd_sat: 1035 case Intrinsic::usub_sat: 1036 case Intrinsic::ssub_sat: { 1037 SaturatingInst *SI = cast<SaturatingInst>(II); 1038 Type *Ty = SI->getType(); 1039 Value *Arg0 = SI->getLHS(); 1040 Value *Arg1 = SI->getRHS(); 1041 1042 // Make use of known overflow information. 1043 OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(), 1044 Arg0, Arg1, SI); 1045 switch (OR) { 1046 case OverflowResult::MayOverflow: 1047 break; 1048 case OverflowResult::NeverOverflows: 1049 if (SI->isSigned()) 1050 return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1); 1051 else 1052 return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1); 1053 case OverflowResult::AlwaysOverflowsLow: { 1054 unsigned BitWidth = Ty->getScalarSizeInBits(); 1055 APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned()); 1056 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min)); 1057 } 1058 case OverflowResult::AlwaysOverflowsHigh: { 1059 unsigned BitWidth = Ty->getScalarSizeInBits(); 1060 APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned()); 1061 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max)); 1062 } 1063 } 1064 1065 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN 1066 Constant *C; 1067 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) && 1068 C->isNotMinSignedValue()) { 1069 Value *NegVal = ConstantExpr::getNeg(C); 1070 return replaceInstUsesWith( 1071 *II, Builder.CreateBinaryIntrinsic( 1072 Intrinsic::sadd_sat, Arg0, NegVal)); 1073 } 1074 1075 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2)) 1076 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2)) 1077 // if Val and Val2 have the same sign 1078 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) { 1079 Value *X; 1080 const APInt *Val, *Val2; 1081 APInt NewVal; 1082 bool IsUnsigned = 1083 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat; 1084 if (Other->getIntrinsicID() == IID && 1085 match(Arg1, m_APInt(Val)) && 1086 match(Other->getArgOperand(0), m_Value(X)) && 1087 match(Other->getArgOperand(1), m_APInt(Val2))) { 1088 if (IsUnsigned) 1089 NewVal = Val->uadd_sat(*Val2); 1090 else if (Val->isNonNegative() == Val2->isNonNegative()) { 1091 bool Overflow; 1092 NewVal = Val->sadd_ov(*Val2, Overflow); 1093 if (Overflow) { 1094 // Both adds together may add more than SignedMaxValue 1095 // without saturating the final result. 1096 break; 1097 } 1098 } else { 1099 // Cannot fold saturated addition with different signs. 1100 break; 1101 } 1102 1103 return replaceInstUsesWith( 1104 *II, Builder.CreateBinaryIntrinsic( 1105 IID, X, ConstantInt::get(II->getType(), NewVal))); 1106 } 1107 } 1108 break; 1109 } 1110 1111 case Intrinsic::minnum: 1112 case Intrinsic::maxnum: 1113 case Intrinsic::minimum: 1114 case Intrinsic::maximum: { 1115 Value *Arg0 = II->getArgOperand(0); 1116 Value *Arg1 = II->getArgOperand(1); 1117 Value *X, *Y; 1118 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) && 1119 (Arg0->hasOneUse() || Arg1->hasOneUse())) { 1120 // If both operands are negated, invert the call and negate the result: 1121 // min(-X, -Y) --> -(max(X, Y)) 1122 // max(-X, -Y) --> -(min(X, Y)) 1123 Intrinsic::ID NewIID; 1124 switch (IID) { 1125 case Intrinsic::maxnum: 1126 NewIID = Intrinsic::minnum; 1127 break; 1128 case Intrinsic::minnum: 1129 NewIID = Intrinsic::maxnum; 1130 break; 1131 case Intrinsic::maximum: 1132 NewIID = Intrinsic::minimum; 1133 break; 1134 case Intrinsic::minimum: 1135 NewIID = Intrinsic::maximum; 1136 break; 1137 default: 1138 llvm_unreachable("unexpected intrinsic ID"); 1139 } 1140 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II); 1141 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall); 1142 FNeg->copyIRFlags(II); 1143 return FNeg; 1144 } 1145 1146 // m(m(X, C2), C1) -> m(X, C) 1147 const APFloat *C1, *C2; 1148 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) { 1149 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) && 1150 ((match(M->getArgOperand(0), m_Value(X)) && 1151 match(M->getArgOperand(1), m_APFloat(C2))) || 1152 (match(M->getArgOperand(1), m_Value(X)) && 1153 match(M->getArgOperand(0), m_APFloat(C2))))) { 1154 APFloat Res(0.0); 1155 switch (IID) { 1156 case Intrinsic::maxnum: 1157 Res = maxnum(*C1, *C2); 1158 break; 1159 case Intrinsic::minnum: 1160 Res = minnum(*C1, *C2); 1161 break; 1162 case Intrinsic::maximum: 1163 Res = maximum(*C1, *C2); 1164 break; 1165 case Intrinsic::minimum: 1166 Res = minimum(*C1, *C2); 1167 break; 1168 default: 1169 llvm_unreachable("unexpected intrinsic ID"); 1170 } 1171 Instruction *NewCall = Builder.CreateBinaryIntrinsic( 1172 IID, X, ConstantFP::get(Arg0->getType(), Res), II); 1173 // TODO: Conservatively intersecting FMF. If Res == C2, the transform 1174 // was a simplification (so Arg0 and its original flags could 1175 // propagate?) 1176 NewCall->andIRFlags(M); 1177 return replaceInstUsesWith(*II, NewCall); 1178 } 1179 } 1180 1181 Value *ExtSrc0; 1182 Value *ExtSrc1; 1183 1184 // minnum (fpext x), (fpext y) -> minnum x, y 1185 // maxnum (fpext x), (fpext y) -> maxnum x, y 1186 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc0)))) && 1187 match(II->getArgOperand(1), m_OneUse(m_FPExt(m_Value(ExtSrc1)))) && 1188 ExtSrc0->getType() == ExtSrc1->getType()) { 1189 Function *F = Intrinsic::getDeclaration( 1190 II->getModule(), II->getIntrinsicID(), {ExtSrc0->getType()}); 1191 CallInst *NewCall = Builder.CreateCall(F, { ExtSrc0, ExtSrc1 }); 1192 NewCall->copyFastMathFlags(II); 1193 NewCall->takeName(II); 1194 return new FPExtInst(NewCall, II->getType()); 1195 } 1196 1197 break; 1198 } 1199 case Intrinsic::fmuladd: { 1200 // Canonicalize fast fmuladd to the separate fmul + fadd. 1201 if (II->isFast()) { 1202 BuilderTy::FastMathFlagGuard Guard(Builder); 1203 Builder.setFastMathFlags(II->getFastMathFlags()); 1204 Value *Mul = Builder.CreateFMul(II->getArgOperand(0), 1205 II->getArgOperand(1)); 1206 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2)); 1207 Add->takeName(II); 1208 return replaceInstUsesWith(*II, Add); 1209 } 1210 1211 // Try to simplify the underlying FMul. 1212 if (Value *V = SimplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1), 1213 II->getFastMathFlags(), 1214 SQ.getWithInstruction(II))) { 1215 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 1216 FAdd->copyFastMathFlags(II); 1217 return FAdd; 1218 } 1219 1220 LLVM_FALLTHROUGH; 1221 } 1222 case Intrinsic::fma: { 1223 // fma fneg(x), fneg(y), z -> fma x, y, z 1224 Value *Src0 = II->getArgOperand(0); 1225 Value *Src1 = II->getArgOperand(1); 1226 Value *X, *Y; 1227 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) { 1228 replaceOperand(*II, 0, X); 1229 replaceOperand(*II, 1, Y); 1230 return II; 1231 } 1232 1233 // fma fabs(x), fabs(x), z -> fma x, x, z 1234 if (match(Src0, m_FAbs(m_Value(X))) && 1235 match(Src1, m_FAbs(m_Specific(X)))) { 1236 replaceOperand(*II, 0, X); 1237 replaceOperand(*II, 1, X); 1238 return II; 1239 } 1240 1241 // Try to simplify the underlying FMul. We can only apply simplifications 1242 // that do not require rounding. 1243 if (Value *V = SimplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1), 1244 II->getFastMathFlags(), 1245 SQ.getWithInstruction(II))) { 1246 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 1247 FAdd->copyFastMathFlags(II); 1248 return FAdd; 1249 } 1250 1251 // fma x, y, 0 -> fmul x, y 1252 // This is always valid for -0.0, but requires nsz for +0.0 as 1253 // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own. 1254 if (match(II->getArgOperand(2), m_NegZeroFP()) || 1255 (match(II->getArgOperand(2), m_PosZeroFP()) && 1256 II->getFastMathFlags().noSignedZeros())) 1257 return BinaryOperator::CreateFMulFMF(Src0, Src1, II); 1258 1259 break; 1260 } 1261 case Intrinsic::copysign: { 1262 Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1); 1263 if (SignBitMustBeZero(Sign, &TLI)) { 1264 // If we know that the sign argument is positive, reduce to FABS: 1265 // copysign Mag, +Sign --> fabs Mag 1266 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 1267 return replaceInstUsesWith(*II, Fabs); 1268 } 1269 // TODO: There should be a ValueTracking sibling like SignBitMustBeOne. 1270 const APFloat *C; 1271 if (match(Sign, m_APFloat(C)) && C->isNegative()) { 1272 // If we know that the sign argument is negative, reduce to FNABS: 1273 // copysign Mag, -Sign --> fneg (fabs Mag) 1274 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 1275 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II)); 1276 } 1277 1278 // Propagate sign argument through nested calls: 1279 // copysign Mag, (copysign ?, X) --> copysign Mag, X 1280 Value *X; 1281 if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X)))) 1282 return replaceOperand(*II, 1, X); 1283 1284 // Peek through changes of magnitude's sign-bit. This call rewrites those: 1285 // copysign (fabs X), Sign --> copysign X, Sign 1286 // copysign (fneg X), Sign --> copysign X, Sign 1287 if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X)))) 1288 return replaceOperand(*II, 0, X); 1289 1290 break; 1291 } 1292 case Intrinsic::fabs: { 1293 Value *Cond, *TVal, *FVal; 1294 if (match(II->getArgOperand(0), 1295 m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) { 1296 // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF 1297 if (isa<Constant>(TVal) && isa<Constant>(FVal)) { 1298 CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal}); 1299 CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal}); 1300 return SelectInst::Create(Cond, AbsT, AbsF); 1301 } 1302 // fabs (select Cond, -FVal, FVal) --> fabs FVal 1303 if (match(TVal, m_FNeg(m_Specific(FVal)))) 1304 return replaceOperand(*II, 0, FVal); 1305 // fabs (select Cond, TVal, -TVal) --> fabs TVal 1306 if (match(FVal, m_FNeg(m_Specific(TVal)))) 1307 return replaceOperand(*II, 0, TVal); 1308 } 1309 1310 LLVM_FALLTHROUGH; 1311 } 1312 case Intrinsic::ceil: 1313 case Intrinsic::floor: 1314 case Intrinsic::round: 1315 case Intrinsic::roundeven: 1316 case Intrinsic::nearbyint: 1317 case Intrinsic::rint: 1318 case Intrinsic::trunc: { 1319 Value *ExtSrc; 1320 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) { 1321 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x) 1322 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II); 1323 return new FPExtInst(NarrowII, II->getType()); 1324 } 1325 break; 1326 } 1327 case Intrinsic::cos: 1328 case Intrinsic::amdgcn_cos: { 1329 Value *X; 1330 Value *Src = II->getArgOperand(0); 1331 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) { 1332 // cos(-x) -> cos(x) 1333 // cos(fabs(x)) -> cos(x) 1334 return replaceOperand(*II, 0, X); 1335 } 1336 break; 1337 } 1338 case Intrinsic::sin: { 1339 Value *X; 1340 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) { 1341 // sin(-x) --> -sin(x) 1342 Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II); 1343 Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin); 1344 FNeg->copyFastMathFlags(II); 1345 return FNeg; 1346 } 1347 break; 1348 } 1349 1350 case Intrinsic::arm_neon_vtbl1: 1351 case Intrinsic::aarch64_neon_tbl1: 1352 if (Value *V = simplifyNeonTbl1(*II, Builder)) 1353 return replaceInstUsesWith(*II, V); 1354 break; 1355 1356 case Intrinsic::arm_neon_vmulls: 1357 case Intrinsic::arm_neon_vmullu: 1358 case Intrinsic::aarch64_neon_smull: 1359 case Intrinsic::aarch64_neon_umull: { 1360 Value *Arg0 = II->getArgOperand(0); 1361 Value *Arg1 = II->getArgOperand(1); 1362 1363 // Handle mul by zero first: 1364 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) { 1365 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType())); 1366 } 1367 1368 // Check for constant LHS & RHS - in this case we just simplify. 1369 bool Zext = (IID == Intrinsic::arm_neon_vmullu || 1370 IID == Intrinsic::aarch64_neon_umull); 1371 VectorType *NewVT = cast<VectorType>(II->getType()); 1372 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) { 1373 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) { 1374 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext); 1375 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext); 1376 1377 return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1)); 1378 } 1379 1380 // Couldn't simplify - canonicalize constant to the RHS. 1381 std::swap(Arg0, Arg1); 1382 } 1383 1384 // Handle mul by one: 1385 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) 1386 if (ConstantInt *Splat = 1387 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) 1388 if (Splat->isOne()) 1389 return CastInst::CreateIntegerCast(Arg0, II->getType(), 1390 /*isSigned=*/!Zext); 1391 1392 break; 1393 } 1394 case Intrinsic::arm_neon_aesd: 1395 case Intrinsic::arm_neon_aese: 1396 case Intrinsic::aarch64_crypto_aesd: 1397 case Intrinsic::aarch64_crypto_aese: { 1398 Value *DataArg = II->getArgOperand(0); 1399 Value *KeyArg = II->getArgOperand(1); 1400 1401 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR 1402 Value *Data, *Key; 1403 if (match(KeyArg, m_ZeroInt()) && 1404 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) { 1405 replaceOperand(*II, 0, Data); 1406 replaceOperand(*II, 1, Key); 1407 return II; 1408 } 1409 break; 1410 } 1411 case Intrinsic::hexagon_V6_vandvrt: 1412 case Intrinsic::hexagon_V6_vandvrt_128B: { 1413 // Simplify Q -> V -> Q conversion. 1414 if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 1415 Intrinsic::ID ID0 = Op0->getIntrinsicID(); 1416 if (ID0 != Intrinsic::hexagon_V6_vandqrt && 1417 ID0 != Intrinsic::hexagon_V6_vandqrt_128B) 1418 break; 1419 Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1); 1420 uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue(); 1421 uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue(); 1422 // Check if every byte has common bits in Bytes and Mask. 1423 uint64_t C = Bytes1 & Mask1; 1424 if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000)) 1425 return replaceInstUsesWith(*II, Op0->getArgOperand(0)); 1426 } 1427 break; 1428 } 1429 case Intrinsic::stackrestore: { 1430 // If the save is right next to the restore, remove the restore. This can 1431 // happen when variable allocas are DCE'd. 1432 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 1433 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 1434 // Skip over debug info. 1435 if (SS->getNextNonDebugInstruction() == II) { 1436 return eraseInstFromFunction(CI); 1437 } 1438 } 1439 } 1440 1441 // Scan down this block to see if there is another stack restore in the 1442 // same block without an intervening call/alloca. 1443 BasicBlock::iterator BI(II); 1444 Instruction *TI = II->getParent()->getTerminator(); 1445 bool CannotRemove = false; 1446 for (++BI; &*BI != TI; ++BI) { 1447 if (isa<AllocaInst>(BI)) { 1448 CannotRemove = true; 1449 break; 1450 } 1451 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 1452 if (auto *II2 = dyn_cast<IntrinsicInst>(BCI)) { 1453 // If there is a stackrestore below this one, remove this one. 1454 if (II2->getIntrinsicID() == Intrinsic::stackrestore) 1455 return eraseInstFromFunction(CI); 1456 1457 // Bail if we cross over an intrinsic with side effects, such as 1458 // llvm.stacksave, or llvm.read_register. 1459 if (II2->mayHaveSideEffects()) { 1460 CannotRemove = true; 1461 break; 1462 } 1463 } else { 1464 // If we found a non-intrinsic call, we can't remove the stack 1465 // restore. 1466 CannotRemove = true; 1467 break; 1468 } 1469 } 1470 } 1471 1472 // If the stack restore is in a return, resume, or unwind block and if there 1473 // are no allocas or calls between the restore and the return, nuke the 1474 // restore. 1475 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI))) 1476 return eraseInstFromFunction(CI); 1477 break; 1478 } 1479 case Intrinsic::lifetime_end: 1480 // Asan needs to poison memory to detect invalid access which is possible 1481 // even for empty lifetime range. 1482 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 1483 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) || 1484 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 1485 break; 1486 1487 if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) { 1488 return I.getIntrinsicID() == Intrinsic::lifetime_start; 1489 })) 1490 return nullptr; 1491 break; 1492 case Intrinsic::assume: { 1493 Value *IIOperand = II->getArgOperand(0); 1494 SmallVector<OperandBundleDef, 4> OpBundles; 1495 II->getOperandBundlesAsDefs(OpBundles); 1496 bool HasOpBundles = !OpBundles.empty(); 1497 // Remove an assume if it is followed by an identical assume. 1498 // TODO: Do we need this? Unless there are conflicting assumptions, the 1499 // computeKnownBits(IIOperand) below here eliminates redundant assumes. 1500 Instruction *Next = II->getNextNonDebugInstruction(); 1501 if (HasOpBundles && 1502 match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))) && 1503 !cast<IntrinsicInst>(Next)->hasOperandBundles()) 1504 return eraseInstFromFunction(CI); 1505 1506 // Canonicalize assume(a && b) -> assume(a); assume(b); 1507 // Note: New assumption intrinsics created here are registered by 1508 // the InstCombineIRInserter object. 1509 FunctionType *AssumeIntrinsicTy = II->getFunctionType(); 1510 Value *AssumeIntrinsic = II->getCalledOperand(); 1511 Value *A, *B; 1512 if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) { 1513 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles, 1514 II->getName()); 1515 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName()); 1516 return eraseInstFromFunction(*II); 1517 } 1518 // assume(!(a || b)) -> assume(!a); assume(!b); 1519 if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) { 1520 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 1521 Builder.CreateNot(A), OpBundles, II->getName()); 1522 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 1523 Builder.CreateNot(B), II->getName()); 1524 return eraseInstFromFunction(*II); 1525 } 1526 1527 // assume( (load addr) != null ) -> add 'nonnull' metadata to load 1528 // (if assume is valid at the load) 1529 CmpInst::Predicate Pred; 1530 Instruction *LHS; 1531 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) && 1532 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load && 1533 LHS->getType()->isPointerTy() && 1534 isValidAssumeForContext(II, LHS, &DT)) { 1535 MDNode *MD = MDNode::get(II->getContext(), None); 1536 LHS->setMetadata(LLVMContext::MD_nonnull, MD); 1537 if (!HasOpBundles) 1538 return eraseInstFromFunction(*II); 1539 1540 // TODO: apply nonnull return attributes to calls and invokes 1541 // TODO: apply range metadata for range check patterns? 1542 } 1543 1544 // If there is a dominating assume with the same condition as this one, 1545 // then this one is redundant, and should be removed. 1546 KnownBits Known(1); 1547 computeKnownBits(IIOperand, Known, 0, II); 1548 if (Known.isAllOnes() && isAssumeWithEmptyBundle(*II)) 1549 return eraseInstFromFunction(*II); 1550 1551 // Update the cache of affected values for this assumption (we might be 1552 // here because we just simplified the condition). 1553 AC.updateAffectedValues(II); 1554 break; 1555 } 1556 case Intrinsic::experimental_gc_statepoint: { 1557 GCStatepointInst &GCSP = *cast<GCStatepointInst>(II); 1558 SmallPtrSet<Value *, 32> LiveGcValues; 1559 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 1560 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 1561 1562 // Remove the relocation if unused. 1563 if (GCR.use_empty()) { 1564 eraseInstFromFunction(GCR); 1565 continue; 1566 } 1567 1568 Value *DerivedPtr = GCR.getDerivedPtr(); 1569 Value *BasePtr = GCR.getBasePtr(); 1570 1571 // Undef is undef, even after relocation. 1572 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) { 1573 replaceInstUsesWith(GCR, UndefValue::get(GCR.getType())); 1574 eraseInstFromFunction(GCR); 1575 continue; 1576 } 1577 1578 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) { 1579 // The relocation of null will be null for most any collector. 1580 // TODO: provide a hook for this in GCStrategy. There might be some 1581 // weird collector this property does not hold for. 1582 if (isa<ConstantPointerNull>(DerivedPtr)) { 1583 // Use null-pointer of gc_relocate's type to replace it. 1584 replaceInstUsesWith(GCR, ConstantPointerNull::get(PT)); 1585 eraseInstFromFunction(GCR); 1586 continue; 1587 } 1588 1589 // isKnownNonNull -> nonnull attribute 1590 if (!GCR.hasRetAttr(Attribute::NonNull) && 1591 isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT)) { 1592 GCR.addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 1593 // We discovered new fact, re-check users. 1594 Worklist.pushUsersToWorkList(GCR); 1595 } 1596 } 1597 1598 // If we have two copies of the same pointer in the statepoint argument 1599 // list, canonicalize to one. This may let us common gc.relocates. 1600 if (GCR.getBasePtr() == GCR.getDerivedPtr() && 1601 GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) { 1602 auto *OpIntTy = GCR.getOperand(2)->getType(); 1603 GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex())); 1604 } 1605 1606 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p)) 1607 // Canonicalize on the type from the uses to the defs 1608 1609 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...) 1610 LiveGcValues.insert(BasePtr); 1611 LiveGcValues.insert(DerivedPtr); 1612 } 1613 Optional<OperandBundleUse> Bundle = 1614 GCSP.getOperandBundle(LLVMContext::OB_gc_live); 1615 unsigned NumOfGCLives = LiveGcValues.size(); 1616 if (!Bundle.hasValue() || NumOfGCLives == Bundle->Inputs.size()) 1617 break; 1618 // We can reduce the size of gc live bundle. 1619 DenseMap<Value *, unsigned> Val2Idx; 1620 std::vector<Value *> NewLiveGc; 1621 for (unsigned I = 0, E = Bundle->Inputs.size(); I < E; ++I) { 1622 Value *V = Bundle->Inputs[I]; 1623 if (Val2Idx.count(V)) 1624 continue; 1625 if (LiveGcValues.count(V)) { 1626 Val2Idx[V] = NewLiveGc.size(); 1627 NewLiveGc.push_back(V); 1628 } else 1629 Val2Idx[V] = NumOfGCLives; 1630 } 1631 // Update all gc.relocates 1632 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 1633 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 1634 Value *BasePtr = GCR.getBasePtr(); 1635 assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives && 1636 "Missed live gc for base pointer"); 1637 auto *OpIntTy1 = GCR.getOperand(1)->getType(); 1638 GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr])); 1639 Value *DerivedPtr = GCR.getDerivedPtr(); 1640 assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives && 1641 "Missed live gc for derived pointer"); 1642 auto *OpIntTy2 = GCR.getOperand(2)->getType(); 1643 GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr])); 1644 } 1645 // Create new statepoint instruction. 1646 OperandBundleDef NewBundle("gc-live", NewLiveGc); 1647 if (isa<CallInst>(II)) 1648 return CallInst::CreateWithReplacedBundle(cast<CallInst>(II), NewBundle); 1649 else 1650 return InvokeInst::CreateWithReplacedBundle(cast<InvokeInst>(II), 1651 NewBundle); 1652 break; 1653 } 1654 case Intrinsic::experimental_guard: { 1655 // Is this guard followed by another guard? We scan forward over a small 1656 // fixed window of instructions to handle common cases with conditions 1657 // computed between guards. 1658 Instruction *NextInst = II->getNextNonDebugInstruction(); 1659 for (unsigned i = 0; i < GuardWideningWindow; i++) { 1660 // Note: Using context-free form to avoid compile time blow up 1661 if (!isSafeToSpeculativelyExecute(NextInst)) 1662 break; 1663 NextInst = NextInst->getNextNonDebugInstruction(); 1664 } 1665 Value *NextCond = nullptr; 1666 if (match(NextInst, 1667 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) { 1668 Value *CurrCond = II->getArgOperand(0); 1669 1670 // Remove a guard that it is immediately preceded by an identical guard. 1671 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b). 1672 if (CurrCond != NextCond) { 1673 Instruction *MoveI = II->getNextNonDebugInstruction(); 1674 while (MoveI != NextInst) { 1675 auto *Temp = MoveI; 1676 MoveI = MoveI->getNextNonDebugInstruction(); 1677 Temp->moveBefore(II); 1678 } 1679 replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond)); 1680 } 1681 eraseInstFromFunction(*NextInst); 1682 return II; 1683 } 1684 break; 1685 } 1686 case Intrinsic::experimental_vector_insert: { 1687 Value *Vec = II->getArgOperand(0); 1688 Value *SubVec = II->getArgOperand(1); 1689 Value *Idx = II->getArgOperand(2); 1690 auto *DstTy = dyn_cast<FixedVectorType>(II->getType()); 1691 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 1692 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType()); 1693 1694 // Only canonicalize if the destination vector, Vec, and SubVec are all 1695 // fixed vectors. 1696 if (DstTy && VecTy && SubVecTy) { 1697 unsigned DstNumElts = DstTy->getNumElements(); 1698 unsigned VecNumElts = VecTy->getNumElements(); 1699 unsigned SubVecNumElts = SubVecTy->getNumElements(); 1700 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 1701 1702 // The result of this call is undefined if IdxN is not a constant multiple 1703 // of the SubVec's minimum vector length OR the insertion overruns Vec. 1704 if (IdxN % SubVecNumElts != 0 || IdxN + SubVecNumElts > VecNumElts) { 1705 replaceInstUsesWith(CI, UndefValue::get(CI.getType())); 1706 return eraseInstFromFunction(CI); 1707 } 1708 1709 // An insert that entirely overwrites Vec with SubVec is a nop. 1710 if (VecNumElts == SubVecNumElts) { 1711 replaceInstUsesWith(CI, SubVec); 1712 return eraseInstFromFunction(CI); 1713 } 1714 1715 // Widen SubVec into a vector of the same width as Vec, since 1716 // shufflevector requires the two input vectors to be the same width. 1717 // Elements beyond the bounds of SubVec within the widened vector are 1718 // undefined. 1719 SmallVector<int, 8> WidenMask; 1720 unsigned i; 1721 for (i = 0; i != SubVecNumElts; ++i) 1722 WidenMask.push_back(i); 1723 for (; i != VecNumElts; ++i) 1724 WidenMask.push_back(UndefMaskElem); 1725 1726 Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask); 1727 1728 SmallVector<int, 8> Mask; 1729 for (unsigned i = 0; i != IdxN; ++i) 1730 Mask.push_back(i); 1731 for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i) 1732 Mask.push_back(i); 1733 for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i) 1734 Mask.push_back(i); 1735 1736 Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask); 1737 replaceInstUsesWith(CI, Shuffle); 1738 return eraseInstFromFunction(CI); 1739 } 1740 break; 1741 } 1742 case Intrinsic::experimental_vector_extract: { 1743 Value *Vec = II->getArgOperand(0); 1744 Value *Idx = II->getArgOperand(1); 1745 1746 auto *DstTy = dyn_cast<FixedVectorType>(II->getType()); 1747 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 1748 1749 // Only canonicalize if the the destination vector and Vec are fixed 1750 // vectors. 1751 if (DstTy && VecTy) { 1752 unsigned DstNumElts = DstTy->getNumElements(); 1753 unsigned VecNumElts = VecTy->getNumElements(); 1754 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 1755 1756 // The result of this call is undefined if IdxN is not a constant multiple 1757 // of the result type's minimum vector length OR the extraction overruns 1758 // Vec. 1759 if (IdxN % DstNumElts != 0 || IdxN + DstNumElts > VecNumElts) { 1760 replaceInstUsesWith(CI, UndefValue::get(CI.getType())); 1761 return eraseInstFromFunction(CI); 1762 } 1763 1764 // Extracting the entirety of Vec is a nop. 1765 if (VecNumElts == DstNumElts) { 1766 replaceInstUsesWith(CI, Vec); 1767 return eraseInstFromFunction(CI); 1768 } 1769 1770 SmallVector<int, 8> Mask; 1771 for (unsigned i = 0; i != DstNumElts; ++i) 1772 Mask.push_back(IdxN + i); 1773 1774 Value *Shuffle = 1775 Builder.CreateShuffleVector(Vec, UndefValue::get(VecTy), Mask); 1776 replaceInstUsesWith(CI, Shuffle); 1777 return eraseInstFromFunction(CI); 1778 } 1779 break; 1780 } 1781 default: { 1782 // Handle target specific intrinsics 1783 Optional<Instruction *> V = targetInstCombineIntrinsic(*II); 1784 if (V.hasValue()) 1785 return V.getValue(); 1786 break; 1787 } 1788 } 1789 return visitCallBase(*II); 1790 } 1791 1792 // Fence instruction simplification 1793 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) { 1794 // Remove identical consecutive fences. 1795 Instruction *Next = FI.getNextNonDebugInstruction(); 1796 if (auto *NFI = dyn_cast<FenceInst>(Next)) 1797 if (FI.isIdenticalTo(NFI)) 1798 return eraseInstFromFunction(FI); 1799 return nullptr; 1800 } 1801 1802 // InvokeInst simplification 1803 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) { 1804 return visitCallBase(II); 1805 } 1806 1807 // CallBrInst simplification 1808 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) { 1809 return visitCallBase(CBI); 1810 } 1811 1812 /// If this cast does not affect the value passed through the varargs area, we 1813 /// can eliminate the use of the cast. 1814 static bool isSafeToEliminateVarargsCast(const CallBase &Call, 1815 const DataLayout &DL, 1816 const CastInst *const CI, 1817 const int ix) { 1818 if (!CI->isLosslessCast()) 1819 return false; 1820 1821 // If this is a GC intrinsic, avoid munging types. We need types for 1822 // statepoint reconstruction in SelectionDAG. 1823 // TODO: This is probably something which should be expanded to all 1824 // intrinsics since the entire point of intrinsics is that 1825 // they are understandable by the optimizer. 1826 if (isa<GCStatepointInst>(Call) || isa<GCRelocateInst>(Call) || 1827 isa<GCResultInst>(Call)) 1828 return false; 1829 1830 // The size of ByVal or InAlloca arguments is derived from the type, so we 1831 // can't change to a type with a different size. If the size were 1832 // passed explicitly we could avoid this check. 1833 if (!Call.isPassPointeeByValueArgument(ix)) 1834 return true; 1835 1836 Type* SrcTy = 1837 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 1838 Type *DstTy = Call.isByValArgument(ix) 1839 ? Call.getParamByValType(ix) 1840 : cast<PointerType>(CI->getType())->getElementType(); 1841 if (!SrcTy->isSized() || !DstTy->isSized()) 1842 return false; 1843 if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy)) 1844 return false; 1845 return true; 1846 } 1847 1848 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) { 1849 if (!CI->getCalledFunction()) return nullptr; 1850 1851 auto InstCombineRAUW = [this](Instruction *From, Value *With) { 1852 replaceInstUsesWith(*From, With); 1853 }; 1854 auto InstCombineErase = [this](Instruction *I) { 1855 eraseInstFromFunction(*I); 1856 }; 1857 LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW, 1858 InstCombineErase); 1859 if (Value *With = Simplifier.optimizeCall(CI, Builder)) { 1860 ++NumSimplified; 1861 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With); 1862 } 1863 1864 return nullptr; 1865 } 1866 1867 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) { 1868 // Strip off at most one level of pointer casts, looking for an alloca. This 1869 // is good enough in practice and simpler than handling any number of casts. 1870 Value *Underlying = TrampMem->stripPointerCasts(); 1871 if (Underlying != TrampMem && 1872 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem)) 1873 return nullptr; 1874 if (!isa<AllocaInst>(Underlying)) 1875 return nullptr; 1876 1877 IntrinsicInst *InitTrampoline = nullptr; 1878 for (User *U : TrampMem->users()) { 1879 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 1880 if (!II) 1881 return nullptr; 1882 if (II->getIntrinsicID() == Intrinsic::init_trampoline) { 1883 if (InitTrampoline) 1884 // More than one init_trampoline writes to this value. Give up. 1885 return nullptr; 1886 InitTrampoline = II; 1887 continue; 1888 } 1889 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline) 1890 // Allow any number of calls to adjust.trampoline. 1891 continue; 1892 return nullptr; 1893 } 1894 1895 // No call to init.trampoline found. 1896 if (!InitTrampoline) 1897 return nullptr; 1898 1899 // Check that the alloca is being used in the expected way. 1900 if (InitTrampoline->getOperand(0) != TrampMem) 1901 return nullptr; 1902 1903 return InitTrampoline; 1904 } 1905 1906 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, 1907 Value *TrampMem) { 1908 // Visit all the previous instructions in the basic block, and try to find a 1909 // init.trampoline which has a direct path to the adjust.trampoline. 1910 for (BasicBlock::iterator I = AdjustTramp->getIterator(), 1911 E = AdjustTramp->getParent()->begin(); 1912 I != E;) { 1913 Instruction *Inst = &*--I; 1914 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1915 if (II->getIntrinsicID() == Intrinsic::init_trampoline && 1916 II->getOperand(0) == TrampMem) 1917 return II; 1918 if (Inst->mayWriteToMemory()) 1919 return nullptr; 1920 } 1921 return nullptr; 1922 } 1923 1924 // Given a call to llvm.adjust.trampoline, find and return the corresponding 1925 // call to llvm.init.trampoline if the call to the trampoline can be optimized 1926 // to a direct call to a function. Otherwise return NULL. 1927 static IntrinsicInst *findInitTrampoline(Value *Callee) { 1928 Callee = Callee->stripPointerCasts(); 1929 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee); 1930 if (!AdjustTramp || 1931 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline) 1932 return nullptr; 1933 1934 Value *TrampMem = AdjustTramp->getOperand(0); 1935 1936 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem)) 1937 return IT; 1938 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem)) 1939 return IT; 1940 return nullptr; 1941 } 1942 1943 static void annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) { 1944 unsigned NumArgs = Call.getNumArgOperands(); 1945 ConstantInt *Op0C = dyn_cast<ConstantInt>(Call.getOperand(0)); 1946 ConstantInt *Op1C = 1947 (NumArgs == 1) ? nullptr : dyn_cast<ConstantInt>(Call.getOperand(1)); 1948 // Bail out if the allocation size is zero (or an invalid alignment of zero 1949 // with aligned_alloc). 1950 if ((Op0C && Op0C->isNullValue()) || (Op1C && Op1C->isNullValue())) 1951 return; 1952 1953 if (isMallocLikeFn(&Call, TLI) && Op0C) { 1954 if (isOpNewLikeFn(&Call, TLI)) 1955 Call.addAttribute(AttributeList::ReturnIndex, 1956 Attribute::getWithDereferenceableBytes( 1957 Call.getContext(), Op0C->getZExtValue())); 1958 else 1959 Call.addAttribute(AttributeList::ReturnIndex, 1960 Attribute::getWithDereferenceableOrNullBytes( 1961 Call.getContext(), Op0C->getZExtValue())); 1962 } else if (isAlignedAllocLikeFn(&Call, TLI) && Op1C) { 1963 Call.addAttribute(AttributeList::ReturnIndex, 1964 Attribute::getWithDereferenceableOrNullBytes( 1965 Call.getContext(), Op1C->getZExtValue())); 1966 // Add alignment attribute if alignment is a power of two constant. 1967 if (Op0C && Op0C->getValue().ult(llvm::Value::MaximumAlignment)) { 1968 uint64_t AlignmentVal = Op0C->getZExtValue(); 1969 if (llvm::isPowerOf2_64(AlignmentVal)) 1970 Call.addAttribute(AttributeList::ReturnIndex, 1971 Attribute::getWithAlignment(Call.getContext(), 1972 Align(AlignmentVal))); 1973 } 1974 } else if (isReallocLikeFn(&Call, TLI) && Op1C) { 1975 Call.addAttribute(AttributeList::ReturnIndex, 1976 Attribute::getWithDereferenceableOrNullBytes( 1977 Call.getContext(), Op1C->getZExtValue())); 1978 } else if (isCallocLikeFn(&Call, TLI) && Op0C && Op1C) { 1979 bool Overflow; 1980 const APInt &N = Op0C->getValue(); 1981 APInt Size = N.umul_ov(Op1C->getValue(), Overflow); 1982 if (!Overflow) 1983 Call.addAttribute(AttributeList::ReturnIndex, 1984 Attribute::getWithDereferenceableOrNullBytes( 1985 Call.getContext(), Size.getZExtValue())); 1986 } else if (isStrdupLikeFn(&Call, TLI)) { 1987 uint64_t Len = GetStringLength(Call.getOperand(0)); 1988 if (Len) { 1989 // strdup 1990 if (NumArgs == 1) 1991 Call.addAttribute(AttributeList::ReturnIndex, 1992 Attribute::getWithDereferenceableOrNullBytes( 1993 Call.getContext(), Len)); 1994 // strndup 1995 else if (NumArgs == 2 && Op1C) 1996 Call.addAttribute( 1997 AttributeList::ReturnIndex, 1998 Attribute::getWithDereferenceableOrNullBytes( 1999 Call.getContext(), std::min(Len, Op1C->getZExtValue() + 1))); 2000 } 2001 } 2002 } 2003 2004 /// Improvements for call, callbr and invoke instructions. 2005 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) { 2006 if (isAllocationFn(&Call, &TLI)) 2007 annotateAnyAllocSite(Call, &TLI); 2008 2009 bool Changed = false; 2010 2011 // Mark any parameters that are known to be non-null with the nonnull 2012 // attribute. This is helpful for inlining calls to functions with null 2013 // checks on their arguments. 2014 SmallVector<unsigned, 4> ArgNos; 2015 unsigned ArgNo = 0; 2016 2017 for (Value *V : Call.args()) { 2018 if (V->getType()->isPointerTy() && 2019 !Call.paramHasAttr(ArgNo, Attribute::NonNull) && 2020 isKnownNonZero(V, DL, 0, &AC, &Call, &DT)) 2021 ArgNos.push_back(ArgNo); 2022 ArgNo++; 2023 } 2024 2025 assert(ArgNo == Call.arg_size() && "sanity check"); 2026 2027 if (!ArgNos.empty()) { 2028 AttributeList AS = Call.getAttributes(); 2029 LLVMContext &Ctx = Call.getContext(); 2030 AS = AS.addParamAttribute(Ctx, ArgNos, 2031 Attribute::get(Ctx, Attribute::NonNull)); 2032 Call.setAttributes(AS); 2033 Changed = true; 2034 } 2035 2036 // If the callee is a pointer to a function, attempt to move any casts to the 2037 // arguments of the call/callbr/invoke. 2038 Value *Callee = Call.getCalledOperand(); 2039 if (!isa<Function>(Callee) && transformConstExprCastCall(Call)) 2040 return nullptr; 2041 2042 if (Function *CalleeF = dyn_cast<Function>(Callee)) { 2043 // Remove the convergent attr on calls when the callee is not convergent. 2044 if (Call.isConvergent() && !CalleeF->isConvergent() && 2045 !CalleeF->isIntrinsic()) { 2046 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call 2047 << "\n"); 2048 Call.setNotConvergent(); 2049 return &Call; 2050 } 2051 2052 // If the call and callee calling conventions don't match, this call must 2053 // be unreachable, as the call is undefined. 2054 if (CalleeF->getCallingConv() != Call.getCallingConv() && 2055 // Only do this for calls to a function with a body. A prototype may 2056 // not actually end up matching the implementation's calling conv for a 2057 // variety of reasons (e.g. it may be written in assembly). 2058 !CalleeF->isDeclaration()) { 2059 Instruction *OldCall = &Call; 2060 CreateNonTerminatorUnreachable(OldCall); 2061 // If OldCall does not return void then replaceInstUsesWith undef. 2062 // This allows ValueHandlers and custom metadata to adjust itself. 2063 if (!OldCall->getType()->isVoidTy()) 2064 replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType())); 2065 if (isa<CallInst>(OldCall)) 2066 return eraseInstFromFunction(*OldCall); 2067 2068 // We cannot remove an invoke or a callbr, because it would change thexi 2069 // CFG, just change the callee to a null pointer. 2070 cast<CallBase>(OldCall)->setCalledFunction( 2071 CalleeF->getFunctionType(), 2072 Constant::getNullValue(CalleeF->getType())); 2073 return nullptr; 2074 } 2075 } 2076 2077 if ((isa<ConstantPointerNull>(Callee) && 2078 !NullPointerIsDefined(Call.getFunction())) || 2079 isa<UndefValue>(Callee)) { 2080 // If Call does not return void then replaceInstUsesWith undef. 2081 // This allows ValueHandlers and custom metadata to adjust itself. 2082 if (!Call.getType()->isVoidTy()) 2083 replaceInstUsesWith(Call, UndefValue::get(Call.getType())); 2084 2085 if (Call.isTerminator()) { 2086 // Can't remove an invoke or callbr because we cannot change the CFG. 2087 return nullptr; 2088 } 2089 2090 // This instruction is not reachable, just remove it. 2091 CreateNonTerminatorUnreachable(&Call); 2092 return eraseInstFromFunction(Call); 2093 } 2094 2095 if (IntrinsicInst *II = findInitTrampoline(Callee)) 2096 return transformCallThroughTrampoline(Call, *II); 2097 2098 PointerType *PTy = cast<PointerType>(Callee->getType()); 2099 FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 2100 if (FTy->isVarArg()) { 2101 int ix = FTy->getNumParams(); 2102 // See if we can optimize any arguments passed through the varargs area of 2103 // the call. 2104 for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end(); 2105 I != E; ++I, ++ix) { 2106 CastInst *CI = dyn_cast<CastInst>(*I); 2107 if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) { 2108 replaceUse(*I, CI->getOperand(0)); 2109 2110 // Update the byval type to match the argument type. 2111 if (Call.isByValArgument(ix)) { 2112 Call.removeParamAttr(ix, Attribute::ByVal); 2113 Call.addParamAttr( 2114 ix, Attribute::getWithByValType( 2115 Call.getContext(), 2116 CI->getOperand(0)->getType()->getPointerElementType())); 2117 } 2118 Changed = true; 2119 } 2120 } 2121 } 2122 2123 if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) { 2124 // Inline asm calls cannot throw - mark them 'nounwind'. 2125 Call.setDoesNotThrow(); 2126 Changed = true; 2127 } 2128 2129 // Try to optimize the call if possible, we require DataLayout for most of 2130 // this. None of these calls are seen as possibly dead so go ahead and 2131 // delete the instruction now. 2132 if (CallInst *CI = dyn_cast<CallInst>(&Call)) { 2133 Instruction *I = tryOptimizeCall(CI); 2134 // If we changed something return the result, etc. Otherwise let 2135 // the fallthrough check. 2136 if (I) return eraseInstFromFunction(*I); 2137 } 2138 2139 if (!Call.use_empty() && !Call.isMustTailCall()) 2140 if (Value *ReturnedArg = Call.getReturnedArgOperand()) { 2141 Type *CallTy = Call.getType(); 2142 Type *RetArgTy = ReturnedArg->getType(); 2143 if (RetArgTy->canLosslesslyBitCastTo(CallTy)) 2144 return replaceInstUsesWith( 2145 Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy)); 2146 } 2147 2148 if (isAllocLikeFn(&Call, &TLI)) 2149 return visitAllocSite(Call); 2150 2151 return Changed ? &Call : nullptr; 2152 } 2153 2154 /// If the callee is a constexpr cast of a function, attempt to move the cast to 2155 /// the arguments of the call/callbr/invoke. 2156 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) { 2157 auto *Callee = 2158 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts()); 2159 if (!Callee) 2160 return false; 2161 2162 // If this is a call to a thunk function, don't remove the cast. Thunks are 2163 // used to transparently forward all incoming parameters and outgoing return 2164 // values, so it's important to leave the cast in place. 2165 if (Callee->hasFnAttribute("thunk")) 2166 return false; 2167 2168 // If this is a musttail call, the callee's prototype must match the caller's 2169 // prototype with the exception of pointee types. The code below doesn't 2170 // implement that, so we can't do this transform. 2171 // TODO: Do the transform if it only requires adding pointer casts. 2172 if (Call.isMustTailCall()) 2173 return false; 2174 2175 Instruction *Caller = &Call; 2176 const AttributeList &CallerPAL = Call.getAttributes(); 2177 2178 // Okay, this is a cast from a function to a different type. Unless doing so 2179 // would cause a type conversion of one of our arguments, change this call to 2180 // be a direct call with arguments casted to the appropriate types. 2181 FunctionType *FT = Callee->getFunctionType(); 2182 Type *OldRetTy = Caller->getType(); 2183 Type *NewRetTy = FT->getReturnType(); 2184 2185 // Check to see if we are changing the return type... 2186 if (OldRetTy != NewRetTy) { 2187 2188 if (NewRetTy->isStructTy()) 2189 return false; // TODO: Handle multiple return values. 2190 2191 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) { 2192 if (Callee->isDeclaration()) 2193 return false; // Cannot transform this return value. 2194 2195 if (!Caller->use_empty() && 2196 // void -> non-void is handled specially 2197 !NewRetTy->isVoidTy()) 2198 return false; // Cannot transform this return value. 2199 } 2200 2201 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 2202 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex); 2203 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy))) 2204 return false; // Attribute not compatible with transformed value. 2205 } 2206 2207 // If the callbase is an invoke/callbr instruction, and the return value is 2208 // used by a PHI node in a successor, we cannot change the return type of 2209 // the call because there is no place to put the cast instruction (without 2210 // breaking the critical edge). Bail out in this case. 2211 if (!Caller->use_empty()) { 2212 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 2213 for (User *U : II->users()) 2214 if (PHINode *PN = dyn_cast<PHINode>(U)) 2215 if (PN->getParent() == II->getNormalDest() || 2216 PN->getParent() == II->getUnwindDest()) 2217 return false; 2218 // FIXME: Be conservative for callbr to avoid a quadratic search. 2219 if (isa<CallBrInst>(Caller)) 2220 return false; 2221 } 2222 } 2223 2224 unsigned NumActualArgs = Call.arg_size(); 2225 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 2226 2227 // Prevent us turning: 2228 // declare void @takes_i32_inalloca(i32* inalloca) 2229 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0) 2230 // 2231 // into: 2232 // call void @takes_i32_inalloca(i32* null) 2233 // 2234 // Similarly, avoid folding away bitcasts of byval calls. 2235 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) || 2236 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated) || 2237 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 2238 return false; 2239 2240 auto AI = Call.arg_begin(); 2241 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 2242 Type *ParamTy = FT->getParamType(i); 2243 Type *ActTy = (*AI)->getType(); 2244 2245 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL)) 2246 return false; // Cannot transform this parameter value. 2247 2248 if (AttrBuilder(CallerPAL.getParamAttributes(i)) 2249 .overlaps(AttributeFuncs::typeIncompatible(ParamTy))) 2250 return false; // Attribute not compatible with transformed value. 2251 2252 if (Call.isInAllocaArgument(i)) 2253 return false; // Cannot transform to and from inalloca. 2254 2255 if (CallerPAL.hasParamAttribute(i, Attribute::SwiftError)) 2256 return false; 2257 2258 // If the parameter is passed as a byval argument, then we have to have a 2259 // sized type and the sized type has to have the same size as the old type. 2260 if (ParamTy != ActTy && CallerPAL.hasParamAttribute(i, Attribute::ByVal)) { 2261 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy); 2262 if (!ParamPTy || !ParamPTy->getElementType()->isSized()) 2263 return false; 2264 2265 Type *CurElTy = Call.getParamByValType(i); 2266 if (DL.getTypeAllocSize(CurElTy) != 2267 DL.getTypeAllocSize(ParamPTy->getElementType())) 2268 return false; 2269 } 2270 } 2271 2272 if (Callee->isDeclaration()) { 2273 // Do not delete arguments unless we have a function body. 2274 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg()) 2275 return false; 2276 2277 // If the callee is just a declaration, don't change the varargsness of the 2278 // call. We don't want to introduce a varargs call where one doesn't 2279 // already exist. 2280 PointerType *APTy = cast<PointerType>(Call.getCalledOperand()->getType()); 2281 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg()) 2282 return false; 2283 2284 // If both the callee and the cast type are varargs, we still have to make 2285 // sure the number of fixed parameters are the same or we have the same 2286 // ABI issues as if we introduce a varargs call. 2287 if (FT->isVarArg() && 2288 cast<FunctionType>(APTy->getElementType())->isVarArg() && 2289 FT->getNumParams() != 2290 cast<FunctionType>(APTy->getElementType())->getNumParams()) 2291 return false; 2292 } 2293 2294 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 2295 !CallerPAL.isEmpty()) { 2296 // In this case we have more arguments than the new function type, but we 2297 // won't be dropping them. Check that these extra arguments have attributes 2298 // that are compatible with being a vararg call argument. 2299 unsigned SRetIdx; 2300 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) && 2301 SRetIdx > FT->getNumParams()) 2302 return false; 2303 } 2304 2305 // Okay, we decided that this is a safe thing to do: go ahead and start 2306 // inserting cast instructions as necessary. 2307 SmallVector<Value *, 8> Args; 2308 SmallVector<AttributeSet, 8> ArgAttrs; 2309 Args.reserve(NumActualArgs); 2310 ArgAttrs.reserve(NumActualArgs); 2311 2312 // Get any return attributes. 2313 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex); 2314 2315 // If the return value is not being used, the type may not be compatible 2316 // with the existing attributes. Wipe out any problematic attributes. 2317 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy)); 2318 2319 LLVMContext &Ctx = Call.getContext(); 2320 AI = Call.arg_begin(); 2321 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 2322 Type *ParamTy = FT->getParamType(i); 2323 2324 Value *NewArg = *AI; 2325 if ((*AI)->getType() != ParamTy) 2326 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy); 2327 Args.push_back(NewArg); 2328 2329 // Add any parameter attributes. 2330 if (CallerPAL.hasParamAttribute(i, Attribute::ByVal)) { 2331 AttrBuilder AB(CallerPAL.getParamAttributes(i)); 2332 AB.addByValAttr(NewArg->getType()->getPointerElementType()); 2333 ArgAttrs.push_back(AttributeSet::get(Ctx, AB)); 2334 } else 2335 ArgAttrs.push_back(CallerPAL.getParamAttributes(i)); 2336 } 2337 2338 // If the function takes more arguments than the call was taking, add them 2339 // now. 2340 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) { 2341 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 2342 ArgAttrs.push_back(AttributeSet()); 2343 } 2344 2345 // If we are removing arguments to the function, emit an obnoxious warning. 2346 if (FT->getNumParams() < NumActualArgs) { 2347 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722 2348 if (FT->isVarArg()) { 2349 // Add all of the arguments in their promoted form to the arg list. 2350 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 2351 Type *PTy = getPromotedType((*AI)->getType()); 2352 Value *NewArg = *AI; 2353 if (PTy != (*AI)->getType()) { 2354 // Must promote to pass through va_arg area! 2355 Instruction::CastOps opcode = 2356 CastInst::getCastOpcode(*AI, false, PTy, false); 2357 NewArg = Builder.CreateCast(opcode, *AI, PTy); 2358 } 2359 Args.push_back(NewArg); 2360 2361 // Add any parameter attributes. 2362 ArgAttrs.push_back(CallerPAL.getParamAttributes(i)); 2363 } 2364 } 2365 } 2366 2367 AttributeSet FnAttrs = CallerPAL.getFnAttributes(); 2368 2369 if (NewRetTy->isVoidTy()) 2370 Caller->setName(""); // Void type should not have a name. 2371 2372 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) && 2373 "missing argument attributes"); 2374 AttributeList NewCallerPAL = AttributeList::get( 2375 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs); 2376 2377 SmallVector<OperandBundleDef, 1> OpBundles; 2378 Call.getOperandBundlesAsDefs(OpBundles); 2379 2380 CallBase *NewCall; 2381 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 2382 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(), 2383 II->getUnwindDest(), Args, OpBundles); 2384 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) { 2385 NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(), 2386 CBI->getIndirectDests(), Args, OpBundles); 2387 } else { 2388 NewCall = Builder.CreateCall(Callee, Args, OpBundles); 2389 cast<CallInst>(NewCall)->setTailCallKind( 2390 cast<CallInst>(Caller)->getTailCallKind()); 2391 } 2392 NewCall->takeName(Caller); 2393 NewCall->setCallingConv(Call.getCallingConv()); 2394 NewCall->setAttributes(NewCallerPAL); 2395 2396 // Preserve prof metadata if any. 2397 NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof}); 2398 2399 // Insert a cast of the return type as necessary. 2400 Instruction *NC = NewCall; 2401 Value *NV = NC; 2402 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 2403 if (!NV->getType()->isVoidTy()) { 2404 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy); 2405 NC->setDebugLoc(Caller->getDebugLoc()); 2406 2407 // If this is an invoke/callbr instruction, we should insert it after the 2408 // first non-phi instruction in the normal successor block. 2409 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 2410 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt(); 2411 InsertNewInstBefore(NC, *I); 2412 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) { 2413 BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt(); 2414 InsertNewInstBefore(NC, *I); 2415 } else { 2416 // Otherwise, it's a call, just insert cast right after the call. 2417 InsertNewInstBefore(NC, *Caller); 2418 } 2419 Worklist.pushUsersToWorkList(*Caller); 2420 } else { 2421 NV = UndefValue::get(Caller->getType()); 2422 } 2423 } 2424 2425 if (!Caller->use_empty()) 2426 replaceInstUsesWith(*Caller, NV); 2427 else if (Caller->hasValueHandle()) { 2428 if (OldRetTy == NV->getType()) 2429 ValueHandleBase::ValueIsRAUWd(Caller, NV); 2430 else 2431 // We cannot call ValueIsRAUWd with a different type, and the 2432 // actual tracked value will disappear. 2433 ValueHandleBase::ValueIsDeleted(Caller); 2434 } 2435 2436 eraseInstFromFunction(*Caller); 2437 return true; 2438 } 2439 2440 /// Turn a call to a function created by init_trampoline / adjust_trampoline 2441 /// intrinsic pair into a direct call to the underlying function. 2442 Instruction * 2443 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call, 2444 IntrinsicInst &Tramp) { 2445 Value *Callee = Call.getCalledOperand(); 2446 Type *CalleeTy = Callee->getType(); 2447 FunctionType *FTy = Call.getFunctionType(); 2448 AttributeList Attrs = Call.getAttributes(); 2449 2450 // If the call already has the 'nest' attribute somewhere then give up - 2451 // otherwise 'nest' would occur twice after splicing in the chain. 2452 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 2453 return nullptr; 2454 2455 Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts()); 2456 FunctionType *NestFTy = NestF->getFunctionType(); 2457 2458 AttributeList NestAttrs = NestF->getAttributes(); 2459 if (!NestAttrs.isEmpty()) { 2460 unsigned NestArgNo = 0; 2461 Type *NestTy = nullptr; 2462 AttributeSet NestAttr; 2463 2464 // Look for a parameter marked with the 'nest' attribute. 2465 for (FunctionType::param_iterator I = NestFTy->param_begin(), 2466 E = NestFTy->param_end(); 2467 I != E; ++NestArgNo, ++I) { 2468 AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo); 2469 if (AS.hasAttribute(Attribute::Nest)) { 2470 // Record the parameter type and any other attributes. 2471 NestTy = *I; 2472 NestAttr = AS; 2473 break; 2474 } 2475 } 2476 2477 if (NestTy) { 2478 std::vector<Value*> NewArgs; 2479 std::vector<AttributeSet> NewArgAttrs; 2480 NewArgs.reserve(Call.arg_size() + 1); 2481 NewArgAttrs.reserve(Call.arg_size()); 2482 2483 // Insert the nest argument into the call argument list, which may 2484 // mean appending it. Likewise for attributes. 2485 2486 { 2487 unsigned ArgNo = 0; 2488 auto I = Call.arg_begin(), E = Call.arg_end(); 2489 do { 2490 if (ArgNo == NestArgNo) { 2491 // Add the chain argument and attributes. 2492 Value *NestVal = Tramp.getArgOperand(2); 2493 if (NestVal->getType() != NestTy) 2494 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest"); 2495 NewArgs.push_back(NestVal); 2496 NewArgAttrs.push_back(NestAttr); 2497 } 2498 2499 if (I == E) 2500 break; 2501 2502 // Add the original argument and attributes. 2503 NewArgs.push_back(*I); 2504 NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo)); 2505 2506 ++ArgNo; 2507 ++I; 2508 } while (true); 2509 } 2510 2511 // The trampoline may have been bitcast to a bogus type (FTy). 2512 // Handle this by synthesizing a new function type, equal to FTy 2513 // with the chain parameter inserted. 2514 2515 std::vector<Type*> NewTypes; 2516 NewTypes.reserve(FTy->getNumParams()+1); 2517 2518 // Insert the chain's type into the list of parameter types, which may 2519 // mean appending it. 2520 { 2521 unsigned ArgNo = 0; 2522 FunctionType::param_iterator I = FTy->param_begin(), 2523 E = FTy->param_end(); 2524 2525 do { 2526 if (ArgNo == NestArgNo) 2527 // Add the chain's type. 2528 NewTypes.push_back(NestTy); 2529 2530 if (I == E) 2531 break; 2532 2533 // Add the original type. 2534 NewTypes.push_back(*I); 2535 2536 ++ArgNo; 2537 ++I; 2538 } while (true); 2539 } 2540 2541 // Replace the trampoline call with a direct call. Let the generic 2542 // code sort out any function type mismatches. 2543 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 2544 FTy->isVarArg()); 2545 Constant *NewCallee = 2546 NestF->getType() == PointerType::getUnqual(NewFTy) ? 2547 NestF : ConstantExpr::getBitCast(NestF, 2548 PointerType::getUnqual(NewFTy)); 2549 AttributeList NewPAL = 2550 AttributeList::get(FTy->getContext(), Attrs.getFnAttributes(), 2551 Attrs.getRetAttributes(), NewArgAttrs); 2552 2553 SmallVector<OperandBundleDef, 1> OpBundles; 2554 Call.getOperandBundlesAsDefs(OpBundles); 2555 2556 Instruction *NewCaller; 2557 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) { 2558 NewCaller = InvokeInst::Create(NewFTy, NewCallee, 2559 II->getNormalDest(), II->getUnwindDest(), 2560 NewArgs, OpBundles); 2561 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 2562 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 2563 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) { 2564 NewCaller = 2565 CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(), 2566 CBI->getIndirectDests(), NewArgs, OpBundles); 2567 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv()); 2568 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL); 2569 } else { 2570 NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles); 2571 cast<CallInst>(NewCaller)->setTailCallKind( 2572 cast<CallInst>(Call).getTailCallKind()); 2573 cast<CallInst>(NewCaller)->setCallingConv( 2574 cast<CallInst>(Call).getCallingConv()); 2575 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 2576 } 2577 NewCaller->setDebugLoc(Call.getDebugLoc()); 2578 2579 return NewCaller; 2580 } 2581 } 2582 2583 // Replace the trampoline call with a direct call. Since there is no 'nest' 2584 // parameter, there is no need to adjust the argument list. Let the generic 2585 // code sort out any function type mismatches. 2586 Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy); 2587 Call.setCalledFunction(FTy, NewCallee); 2588 return &Call; 2589 } 2590