1 //===- InstCombineCalls.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visitCall, visitInvoke, and visitCallBr functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/APFloat.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/APSInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/FloatingPointMode.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallBitVector.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/Twine.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AssumeBundleQueries.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/InstructionSimplify.h" 30 #include "llvm/Analysis/Loads.h" 31 #include "llvm/Analysis/MemoryBuiltins.h" 32 #include "llvm/Analysis/TargetTransformInfo.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/Analysis/VectorUtils.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/Constant.h" 38 #include "llvm/IR/Constants.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/DerivedTypes.h" 41 #include "llvm/IR/Function.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/InlineAsm.h" 44 #include "llvm/IR/InstrTypes.h" 45 #include "llvm/IR/Instruction.h" 46 #include "llvm/IR/Instructions.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/IR/Intrinsics.h" 49 #include "llvm/IR/IntrinsicsAArch64.h" 50 #include "llvm/IR/IntrinsicsAMDGPU.h" 51 #include "llvm/IR/IntrinsicsARM.h" 52 #include "llvm/IR/IntrinsicsHexagon.h" 53 #include "llvm/IR/LLVMContext.h" 54 #include "llvm/IR/Metadata.h" 55 #include "llvm/IR/PatternMatch.h" 56 #include "llvm/IR/Statepoint.h" 57 #include "llvm/IR/Type.h" 58 #include "llvm/IR/User.h" 59 #include "llvm/IR/Value.h" 60 #include "llvm/IR/ValueHandle.h" 61 #include "llvm/Support/AtomicOrdering.h" 62 #include "llvm/Support/Casting.h" 63 #include "llvm/Support/CommandLine.h" 64 #include "llvm/Support/Compiler.h" 65 #include "llvm/Support/Debug.h" 66 #include "llvm/Support/ErrorHandling.h" 67 #include "llvm/Support/KnownBits.h" 68 #include "llvm/Support/MathExtras.h" 69 #include "llvm/Support/raw_ostream.h" 70 #include "llvm/Transforms/InstCombine/InstCombiner.h" 71 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 72 #include "llvm/Transforms/Utils/Local.h" 73 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 74 #include <algorithm> 75 #include <cassert> 76 #include <cstdint> 77 #include <cstring> 78 #include <utility> 79 #include <vector> 80 81 #define DEBUG_TYPE "instcombine" 82 #include "llvm/Transforms/Utils/InstructionWorklist.h" 83 84 using namespace llvm; 85 using namespace PatternMatch; 86 87 STATISTIC(NumSimplified, "Number of library calls simplified"); 88 89 static cl::opt<unsigned> GuardWideningWindow( 90 "instcombine-guard-widening-window", 91 cl::init(3), 92 cl::desc("How wide an instruction window to bypass looking for " 93 "another guard")); 94 95 namespace llvm { 96 /// enable preservation of attributes in assume like: 97 /// call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ] 98 extern cl::opt<bool> EnableKnowledgeRetention; 99 } // namespace llvm 100 101 /// Return the specified type promoted as it would be to pass though a va_arg 102 /// area. 103 static Type *getPromotedType(Type *Ty) { 104 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 105 if (ITy->getBitWidth() < 32) 106 return Type::getInt32Ty(Ty->getContext()); 107 } 108 return Ty; 109 } 110 111 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) { 112 Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT); 113 MaybeAlign CopyDstAlign = MI->getDestAlign(); 114 if (!CopyDstAlign || *CopyDstAlign < DstAlign) { 115 MI->setDestAlignment(DstAlign); 116 return MI; 117 } 118 119 Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT); 120 MaybeAlign CopySrcAlign = MI->getSourceAlign(); 121 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) { 122 MI->setSourceAlignment(SrcAlign); 123 return MI; 124 } 125 126 // If we have a store to a location which is known constant, we can conclude 127 // that the store must be storing the constant value (else the memory 128 // wouldn't be constant), and this must be a noop. 129 if (AA->pointsToConstantMemory(MI->getDest())) { 130 // Set the size of the copy to 0, it will be deleted on the next iteration. 131 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 132 return MI; 133 } 134 135 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 136 // load/store. 137 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength()); 138 if (!MemOpLength) return nullptr; 139 140 // Source and destination pointer types are always "i8*" for intrinsic. See 141 // if the size is something we can handle with a single primitive load/store. 142 // A single load+store correctly handles overlapping memory in the memmove 143 // case. 144 uint64_t Size = MemOpLength->getLimitedValue(); 145 assert(Size && "0-sized memory transferring should be removed already."); 146 147 if (Size > 8 || (Size&(Size-1))) 148 return nullptr; // If not 1/2/4/8 bytes, exit. 149 150 // If it is an atomic and alignment is less than the size then we will 151 // introduce the unaligned memory access which will be later transformed 152 // into libcall in CodeGen. This is not evident performance gain so disable 153 // it now. 154 if (isa<AtomicMemTransferInst>(MI)) 155 if (*CopyDstAlign < Size || *CopySrcAlign < Size) 156 return nullptr; 157 158 // Use an integer load+store unless we can find something better. 159 unsigned SrcAddrSp = 160 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 161 unsigned DstAddrSp = 162 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 163 164 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 165 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 166 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 167 168 // If the memcpy has metadata describing the members, see if we can get the 169 // TBAA tag describing our copy. 170 MDNode *CopyMD = nullptr; 171 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) { 172 CopyMD = M; 173 } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) { 174 if (M->getNumOperands() == 3 && M->getOperand(0) && 175 mdconst::hasa<ConstantInt>(M->getOperand(0)) && 176 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() && 177 M->getOperand(1) && 178 mdconst::hasa<ConstantInt>(M->getOperand(1)) && 179 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() == 180 Size && 181 M->getOperand(2) && isa<MDNode>(M->getOperand(2))) 182 CopyMD = cast<MDNode>(M->getOperand(2)); 183 } 184 185 Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 186 Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 187 LoadInst *L = Builder.CreateLoad(IntType, Src); 188 // Alignment from the mem intrinsic will be better, so use it. 189 L->setAlignment(*CopySrcAlign); 190 if (CopyMD) 191 L->setMetadata(LLVMContext::MD_tbaa, CopyMD); 192 MDNode *LoopMemParallelMD = 193 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 194 if (LoopMemParallelMD) 195 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 196 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group); 197 if (AccessGroupMD) 198 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 199 200 StoreInst *S = Builder.CreateStore(L, Dest); 201 // Alignment from the mem intrinsic will be better, so use it. 202 S->setAlignment(*CopyDstAlign); 203 if (CopyMD) 204 S->setMetadata(LLVMContext::MD_tbaa, CopyMD); 205 if (LoopMemParallelMD) 206 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 207 if (AccessGroupMD) 208 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 209 210 if (auto *MT = dyn_cast<MemTransferInst>(MI)) { 211 // non-atomics can be volatile 212 L->setVolatile(MT->isVolatile()); 213 S->setVolatile(MT->isVolatile()); 214 } 215 if (isa<AtomicMemTransferInst>(MI)) { 216 // atomics have to be unordered 217 L->setOrdering(AtomicOrdering::Unordered); 218 S->setOrdering(AtomicOrdering::Unordered); 219 } 220 221 // Set the size of the copy to 0, it will be deleted on the next iteration. 222 MI->setLength(Constant::getNullValue(MemOpLength->getType())); 223 return MI; 224 } 225 226 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) { 227 const Align KnownAlignment = 228 getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT); 229 MaybeAlign MemSetAlign = MI->getDestAlign(); 230 if (!MemSetAlign || *MemSetAlign < KnownAlignment) { 231 MI->setDestAlignment(KnownAlignment); 232 return MI; 233 } 234 235 // If we have a store to a location which is known constant, we can conclude 236 // that the store must be storing the constant value (else the memory 237 // wouldn't be constant), and this must be a noop. 238 if (AA->pointsToConstantMemory(MI->getDest())) { 239 // Set the size of the copy to 0, it will be deleted on the next iteration. 240 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 241 return MI; 242 } 243 244 // Extract the length and alignment and fill if they are constant. 245 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 246 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 247 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 248 return nullptr; 249 const uint64_t Len = LenC->getLimitedValue(); 250 assert(Len && "0-sized memory setting should be removed already."); 251 const Align Alignment = assumeAligned(MI->getDestAlignment()); 252 253 // If it is an atomic and alignment is less than the size then we will 254 // introduce the unaligned memory access which will be later transformed 255 // into libcall in CodeGen. This is not evident performance gain so disable 256 // it now. 257 if (isa<AtomicMemSetInst>(MI)) 258 if (Alignment < Len) 259 return nullptr; 260 261 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 262 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 263 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 264 265 Value *Dest = MI->getDest(); 266 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace(); 267 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp); 268 Dest = Builder.CreateBitCast(Dest, NewDstPtrTy); 269 270 // Extract the fill value and store. 271 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 272 StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest, 273 MI->isVolatile()); 274 S->setAlignment(Alignment); 275 if (isa<AtomicMemSetInst>(MI)) 276 S->setOrdering(AtomicOrdering::Unordered); 277 278 // Set the size of the copy to 0, it will be deleted on the next iteration. 279 MI->setLength(Constant::getNullValue(LenC->getType())); 280 return MI; 281 } 282 283 return nullptr; 284 } 285 286 // TODO, Obvious Missing Transforms: 287 // * Narrow width by halfs excluding zero/undef lanes 288 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) { 289 Value *LoadPtr = II.getArgOperand(0); 290 const Align Alignment = 291 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 292 293 // If the mask is all ones or undefs, this is a plain vector load of the 1st 294 // argument. 295 if (maskIsAllOneOrUndef(II.getArgOperand(2))) { 296 LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 297 "unmaskedload"); 298 L->copyMetadata(II); 299 return L; 300 } 301 302 // If we can unconditionally load from this address, replace with a 303 // load/select idiom. TODO: use DT for context sensitive query 304 if (isDereferenceablePointer(LoadPtr, II.getType(), 305 II.getModule()->getDataLayout(), &II, nullptr)) { 306 LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 307 "unmaskedload"); 308 LI->copyMetadata(II); 309 return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3)); 310 } 311 312 return nullptr; 313 } 314 315 // TODO, Obvious Missing Transforms: 316 // * Single constant active lane -> store 317 // * Narrow width by halfs excluding zero/undef lanes 318 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) { 319 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 320 if (!ConstMask) 321 return nullptr; 322 323 // If the mask is all zeros, this instruction does nothing. 324 if (ConstMask->isNullValue()) 325 return eraseInstFromFunction(II); 326 327 // If the mask is all ones, this is a plain vector store of the 1st argument. 328 if (ConstMask->isAllOnesValue()) { 329 Value *StorePtr = II.getArgOperand(1); 330 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 331 StoreInst *S = 332 new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); 333 S->copyMetadata(II); 334 return S; 335 } 336 337 if (isa<ScalableVectorType>(ConstMask->getType())) 338 return nullptr; 339 340 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 341 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 342 APInt UndefElts(DemandedElts.getBitWidth(), 0); 343 if (Value *V = 344 SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts)) 345 return replaceOperand(II, 0, V); 346 347 return nullptr; 348 } 349 350 // TODO, Obvious Missing Transforms: 351 // * Single constant active lane load -> load 352 // * Dereferenceable address & few lanes -> scalarize speculative load/selects 353 // * Adjacent vector addresses -> masked.load 354 // * Narrow width by halfs excluding zero/undef lanes 355 // * Vector incrementing address -> vector masked load 356 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) { 357 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2)); 358 if (!ConstMask) 359 return nullptr; 360 361 // Vector splat address w/known mask -> scalar load 362 // Fold the gather to load the source vector first lane 363 // because it is reloading the same value each time 364 if (ConstMask->isAllOnesValue()) 365 if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) { 366 auto *VecTy = cast<VectorType>(II.getType()); 367 const Align Alignment = 368 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 369 LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr, 370 Alignment, "load.scalar"); 371 Value *Shuf = 372 Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast"); 373 return replaceInstUsesWith(II, cast<Instruction>(Shuf)); 374 } 375 376 return nullptr; 377 } 378 379 // TODO, Obvious Missing Transforms: 380 // * Single constant active lane -> store 381 // * Adjacent vector addresses -> masked.store 382 // * Narrow store width by halfs excluding zero/undef lanes 383 // * Vector incrementing address -> vector masked store 384 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) { 385 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 386 if (!ConstMask) 387 return nullptr; 388 389 // If the mask is all zeros, a scatter does nothing. 390 if (ConstMask->isNullValue()) 391 return eraseInstFromFunction(II); 392 393 // Vector splat address -> scalar store 394 if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) { 395 // scatter(splat(value), splat(ptr), non-zero-mask) -> store value, ptr 396 if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) { 397 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 398 StoreInst *S = 399 new StoreInst(SplatValue, SplatPtr, /*IsVolatile=*/false, Alignment); 400 S->copyMetadata(II); 401 return S; 402 } 403 // scatter(vector, splat(ptr), splat(true)) -> store extract(vector, 404 // lastlane), ptr 405 if (ConstMask->isAllOnesValue()) { 406 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 407 VectorType *WideLoadTy = cast<VectorType>(II.getArgOperand(1)->getType()); 408 ElementCount VF = WideLoadTy->getElementCount(); 409 Constant *EC = 410 ConstantInt::get(Builder.getInt32Ty(), VF.getKnownMinValue()); 411 Value *RunTimeVF = VF.isScalable() ? Builder.CreateVScale(EC) : EC; 412 Value *LastLane = Builder.CreateSub(RunTimeVF, Builder.getInt32(1)); 413 Value *Extract = 414 Builder.CreateExtractElement(II.getArgOperand(0), LastLane); 415 StoreInst *S = 416 new StoreInst(Extract, SplatPtr, /*IsVolatile=*/false, Alignment); 417 S->copyMetadata(II); 418 return S; 419 } 420 } 421 if (isa<ScalableVectorType>(ConstMask->getType())) 422 return nullptr; 423 424 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 425 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 426 APInt UndefElts(DemandedElts.getBitWidth(), 0); 427 if (Value *V = 428 SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts)) 429 return replaceOperand(II, 0, V); 430 if (Value *V = 431 SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, UndefElts)) 432 return replaceOperand(II, 1, V); 433 434 return nullptr; 435 } 436 437 /// This function transforms launder.invariant.group and strip.invariant.group 438 /// like: 439 /// launder(launder(%x)) -> launder(%x) (the result is not the argument) 440 /// launder(strip(%x)) -> launder(%x) 441 /// strip(strip(%x)) -> strip(%x) (the result is not the argument) 442 /// strip(launder(%x)) -> strip(%x) 443 /// This is legal because it preserves the most recent information about 444 /// the presence or absence of invariant.group. 445 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II, 446 InstCombinerImpl &IC) { 447 auto *Arg = II.getArgOperand(0); 448 auto *StrippedArg = Arg->stripPointerCasts(); 449 auto *StrippedInvariantGroupsArg = StrippedArg; 450 while (auto *Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) { 451 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group && 452 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group) 453 break; 454 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts(); 455 } 456 if (StrippedArg == StrippedInvariantGroupsArg) 457 return nullptr; // No launders/strips to remove. 458 459 Value *Result = nullptr; 460 461 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group) 462 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg); 463 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group) 464 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg); 465 else 466 llvm_unreachable( 467 "simplifyInvariantGroupIntrinsic only handles launder and strip"); 468 if (Result->getType()->getPointerAddressSpace() != 469 II.getType()->getPointerAddressSpace()) 470 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType()); 471 if (Result->getType() != II.getType()) 472 Result = IC.Builder.CreateBitCast(Result, II.getType()); 473 474 return cast<Instruction>(Result); 475 } 476 477 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) { 478 assert((II.getIntrinsicID() == Intrinsic::cttz || 479 II.getIntrinsicID() == Intrinsic::ctlz) && 480 "Expected cttz or ctlz intrinsic"); 481 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz; 482 Value *Op0 = II.getArgOperand(0); 483 Value *Op1 = II.getArgOperand(1); 484 Value *X; 485 // ctlz(bitreverse(x)) -> cttz(x) 486 // cttz(bitreverse(x)) -> ctlz(x) 487 if (match(Op0, m_BitReverse(m_Value(X)))) { 488 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz; 489 Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType()); 490 return CallInst::Create(F, {X, II.getArgOperand(1)}); 491 } 492 493 if (II.getType()->isIntOrIntVectorTy(1)) { 494 // ctlz/cttz i1 Op0 --> not Op0 495 if (match(Op1, m_Zero())) 496 return BinaryOperator::CreateNot(Op0); 497 // If zero is poison, then the input can be assumed to be "true", so the 498 // instruction simplifies to "false". 499 assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1"); 500 return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(II.getType())); 501 } 502 503 // If the operand is a select with constant arm(s), try to hoist ctlz/cttz. 504 if (auto *Sel = dyn_cast<SelectInst>(Op0)) 505 if (Instruction *R = IC.FoldOpIntoSelect(II, Sel)) 506 return R; 507 508 if (IsTZ) { 509 // cttz(-x) -> cttz(x) 510 if (match(Op0, m_Neg(m_Value(X)))) 511 return IC.replaceOperand(II, 0, X); 512 513 // cttz(sext(x)) -> cttz(zext(x)) 514 if (match(Op0, m_OneUse(m_SExt(m_Value(X))))) { 515 auto *Zext = IC.Builder.CreateZExt(X, II.getType()); 516 auto *CttzZext = 517 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Zext, Op1); 518 return IC.replaceInstUsesWith(II, CttzZext); 519 } 520 521 // Zext doesn't change the number of trailing zeros, so narrow: 522 // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsPoison' parameter is 'true'. 523 if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) { 524 auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X, 525 IC.Builder.getTrue()); 526 auto *ZextCttz = IC.Builder.CreateZExt(Cttz, II.getType()); 527 return IC.replaceInstUsesWith(II, ZextCttz); 528 } 529 530 // cttz(abs(x)) -> cttz(x) 531 // cttz(nabs(x)) -> cttz(x) 532 Value *Y; 533 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor; 534 if (SPF == SPF_ABS || SPF == SPF_NABS) 535 return IC.replaceOperand(II, 0, X); 536 537 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X)))) 538 return IC.replaceOperand(II, 0, X); 539 } 540 541 KnownBits Known = IC.computeKnownBits(Op0, 0, &II); 542 543 // Create a mask for bits above (ctlz) or below (cttz) the first known one. 544 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros() 545 : Known.countMaxLeadingZeros(); 546 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros() 547 : Known.countMinLeadingZeros(); 548 549 // If all bits above (ctlz) or below (cttz) the first known one are known 550 // zero, this value is constant. 551 // FIXME: This should be in InstSimplify because we're replacing an 552 // instruction with a constant. 553 if (PossibleZeros == DefiniteZeros) { 554 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros); 555 return IC.replaceInstUsesWith(II, C); 556 } 557 558 // If the input to cttz/ctlz is known to be non-zero, 559 // then change the 'ZeroIsPoison' parameter to 'true' 560 // because we know the zero behavior can't affect the result. 561 if (!Known.One.isZero() || 562 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II, 563 &IC.getDominatorTree())) { 564 if (!match(II.getArgOperand(1), m_One())) 565 return IC.replaceOperand(II, 1, IC.Builder.getTrue()); 566 } 567 568 // Add range metadata since known bits can't completely reflect what we know. 569 // TODO: Handle splat vectors. 570 auto *IT = dyn_cast<IntegerType>(Op0->getType()); 571 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 572 Metadata *LowAndHigh[] = { 573 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)), 574 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))}; 575 II.setMetadata(LLVMContext::MD_range, 576 MDNode::get(II.getContext(), LowAndHigh)); 577 return &II; 578 } 579 580 return nullptr; 581 } 582 583 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) { 584 assert(II.getIntrinsicID() == Intrinsic::ctpop && 585 "Expected ctpop intrinsic"); 586 Type *Ty = II.getType(); 587 unsigned BitWidth = Ty->getScalarSizeInBits(); 588 Value *Op0 = II.getArgOperand(0); 589 Value *X, *Y; 590 591 // ctpop(bitreverse(x)) -> ctpop(x) 592 // ctpop(bswap(x)) -> ctpop(x) 593 if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) 594 return IC.replaceOperand(II, 0, X); 595 596 // ctpop(rot(x)) -> ctpop(x) 597 if ((match(Op0, m_FShl(m_Value(X), m_Value(Y), m_Value())) || 598 match(Op0, m_FShr(m_Value(X), m_Value(Y), m_Value()))) && 599 X == Y) 600 return IC.replaceOperand(II, 0, X); 601 602 // ctpop(x | -x) -> bitwidth - cttz(x, false) 603 if (Op0->hasOneUse() && 604 match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) { 605 Function *F = 606 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 607 auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()}); 608 auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth)); 609 return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz)); 610 } 611 612 // ctpop(~x & (x - 1)) -> cttz(x, false) 613 if (match(Op0, 614 m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) { 615 Function *F = 616 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 617 return CallInst::Create(F, {X, IC.Builder.getFalse()}); 618 } 619 620 // Zext doesn't change the number of set bits, so narrow: 621 // ctpop (zext X) --> zext (ctpop X) 622 if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) { 623 Value *NarrowPop = IC.Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, X); 624 return CastInst::Create(Instruction::ZExt, NarrowPop, Ty); 625 } 626 627 // If the operand is a select with constant arm(s), try to hoist ctpop. 628 if (auto *Sel = dyn_cast<SelectInst>(Op0)) 629 if (Instruction *R = IC.FoldOpIntoSelect(II, Sel)) 630 return R; 631 632 KnownBits Known(BitWidth); 633 IC.computeKnownBits(Op0, Known, 0, &II); 634 635 // If all bits are zero except for exactly one fixed bit, then the result 636 // must be 0 or 1, and we can get that answer by shifting to LSB: 637 // ctpop (X & 32) --> (X & 32) >> 5 638 if ((~Known.Zero).isPowerOf2()) 639 return BinaryOperator::CreateLShr( 640 Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2())); 641 642 // FIXME: Try to simplify vectors of integers. 643 auto *IT = dyn_cast<IntegerType>(Ty); 644 if (!IT) 645 return nullptr; 646 647 // Add range metadata since known bits can't completely reflect what we know. 648 unsigned MinCount = Known.countMinPopulation(); 649 unsigned MaxCount = Known.countMaxPopulation(); 650 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 651 Metadata *LowAndHigh[] = { 652 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)), 653 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))}; 654 II.setMetadata(LLVMContext::MD_range, 655 MDNode::get(II.getContext(), LowAndHigh)); 656 return &II; 657 } 658 659 return nullptr; 660 } 661 662 /// Convert a table lookup to shufflevector if the mask is constant. 663 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in 664 /// which case we could lower the shufflevector with rev64 instructions 665 /// as it's actually a byte reverse. 666 static Value *simplifyNeonTbl1(const IntrinsicInst &II, 667 InstCombiner::BuilderTy &Builder) { 668 // Bail out if the mask is not a constant. 669 auto *C = dyn_cast<Constant>(II.getArgOperand(1)); 670 if (!C) 671 return nullptr; 672 673 auto *VecTy = cast<FixedVectorType>(II.getType()); 674 unsigned NumElts = VecTy->getNumElements(); 675 676 // Only perform this transformation for <8 x i8> vector types. 677 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8) 678 return nullptr; 679 680 int Indexes[8]; 681 682 for (unsigned I = 0; I < NumElts; ++I) { 683 Constant *COp = C->getAggregateElement(I); 684 685 if (!COp || !isa<ConstantInt>(COp)) 686 return nullptr; 687 688 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue(); 689 690 // Make sure the mask indices are in range. 691 if ((unsigned)Indexes[I] >= NumElts) 692 return nullptr; 693 } 694 695 auto *V1 = II.getArgOperand(0); 696 auto *V2 = Constant::getNullValue(V1->getType()); 697 return Builder.CreateShuffleVector(V1, V2, makeArrayRef(Indexes)); 698 } 699 700 // Returns true iff the 2 intrinsics have the same operands, limiting the 701 // comparison to the first NumOperands. 702 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, 703 unsigned NumOperands) { 704 assert(I.arg_size() >= NumOperands && "Not enough operands"); 705 assert(E.arg_size() >= NumOperands && "Not enough operands"); 706 for (unsigned i = 0; i < NumOperands; i++) 707 if (I.getArgOperand(i) != E.getArgOperand(i)) 708 return false; 709 return true; 710 } 711 712 // Remove trivially empty start/end intrinsic ranges, i.e. a start 713 // immediately followed by an end (ignoring debuginfo or other 714 // start/end intrinsics in between). As this handles only the most trivial 715 // cases, tracking the nesting level is not needed: 716 // 717 // call @llvm.foo.start(i1 0) 718 // call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed 719 // call @llvm.foo.end(i1 0) 720 // call @llvm.foo.end(i1 0) ; &I 721 static bool 722 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, 723 std::function<bool(const IntrinsicInst &)> IsStart) { 724 // We start from the end intrinsic and scan backwards, so that InstCombine 725 // has already processed (and potentially removed) all the instructions 726 // before the end intrinsic. 727 BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend()); 728 for (; BI != BE; ++BI) { 729 if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) { 730 if (I->isDebugOrPseudoInst() || 731 I->getIntrinsicID() == EndI.getIntrinsicID()) 732 continue; 733 if (IsStart(*I)) { 734 if (haveSameOperands(EndI, *I, EndI.arg_size())) { 735 IC.eraseInstFromFunction(*I); 736 IC.eraseInstFromFunction(EndI); 737 return true; 738 } 739 // Skip start intrinsics that don't pair with this end intrinsic. 740 continue; 741 } 742 } 743 break; 744 } 745 746 return false; 747 } 748 749 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) { 750 removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) { 751 return I.getIntrinsicID() == Intrinsic::vastart || 752 I.getIntrinsicID() == Intrinsic::vacopy; 753 }); 754 return nullptr; 755 } 756 757 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) { 758 assert(Call.arg_size() > 1 && "Need at least 2 args to swap"); 759 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1); 760 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) { 761 Call.setArgOperand(0, Arg1); 762 Call.setArgOperand(1, Arg0); 763 return &Call; 764 } 765 return nullptr; 766 } 767 768 /// Creates a result tuple for an overflow intrinsic \p II with a given 769 /// \p Result and a constant \p Overflow value. 770 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result, 771 Constant *Overflow) { 772 Constant *V[] = {UndefValue::get(Result->getType()), Overflow}; 773 StructType *ST = cast<StructType>(II->getType()); 774 Constant *Struct = ConstantStruct::get(ST, V); 775 return InsertValueInst::Create(Struct, Result, 0); 776 } 777 778 Instruction * 779 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) { 780 WithOverflowInst *WO = cast<WithOverflowInst>(II); 781 Value *OperationResult = nullptr; 782 Constant *OverflowResult = nullptr; 783 if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(), 784 WO->getRHS(), *WO, OperationResult, OverflowResult)) 785 return createOverflowTuple(WO, OperationResult, OverflowResult); 786 return nullptr; 787 } 788 789 static Optional<bool> getKnownSign(Value *Op, Instruction *CxtI, 790 const DataLayout &DL, AssumptionCache *AC, 791 DominatorTree *DT) { 792 KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT); 793 if (Known.isNonNegative()) 794 return false; 795 if (Known.isNegative()) 796 return true; 797 798 return isImpliedByDomCondition( 799 ICmpInst::ICMP_SLT, Op, Constant::getNullValue(Op->getType()), CxtI, DL); 800 } 801 802 /// Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0. This 803 /// can trigger other combines. 804 static Instruction *moveAddAfterMinMax(IntrinsicInst *II, 805 InstCombiner::BuilderTy &Builder) { 806 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 807 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin || 808 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) && 809 "Expected a min or max intrinsic"); 810 811 // TODO: Match vectors with undef elements, but undef may not propagate. 812 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 813 Value *X; 814 const APInt *C0, *C1; 815 if (!match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C0)))) || 816 !match(Op1, m_APInt(C1))) 817 return nullptr; 818 819 // Check for necessary no-wrap and overflow constraints. 820 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin; 821 auto *Add = cast<BinaryOperator>(Op0); 822 if ((IsSigned && !Add->hasNoSignedWrap()) || 823 (!IsSigned && !Add->hasNoUnsignedWrap())) 824 return nullptr; 825 826 // If the constant difference overflows, then instsimplify should reduce the 827 // min/max to the add or C1. 828 bool Overflow; 829 APInt CDiff = 830 IsSigned ? C1->ssub_ov(*C0, Overflow) : C1->usub_ov(*C0, Overflow); 831 assert(!Overflow && "Expected simplify of min/max"); 832 833 // min/max (add X, C0), C1 --> add (min/max X, C1 - C0), C0 834 // Note: the "mismatched" no-overflow setting does not propagate. 835 Constant *NewMinMaxC = ConstantInt::get(II->getType(), CDiff); 836 Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID, X, NewMinMaxC); 837 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax, Add->getOperand(1)) 838 : BinaryOperator::CreateNUWAdd(NewMinMax, Add->getOperand(1)); 839 } 840 841 /// If we have a clamp pattern like max (min X, 42), 41 -- where the output 842 /// can only be one of two possible constant values -- turn that into a select 843 /// of constants. 844 static Instruction *foldClampRangeOfTwo(IntrinsicInst *II, 845 InstCombiner::BuilderTy &Builder) { 846 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 847 Value *X; 848 const APInt *C0, *C1; 849 if (!match(I1, m_APInt(C1)) || !I0->hasOneUse()) 850 return nullptr; 851 852 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 853 switch (II->getIntrinsicID()) { 854 case Intrinsic::smax: 855 if (match(I0, m_SMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 856 Pred = ICmpInst::ICMP_SGT; 857 break; 858 case Intrinsic::smin: 859 if (match(I0, m_SMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 860 Pred = ICmpInst::ICMP_SLT; 861 break; 862 case Intrinsic::umax: 863 if (match(I0, m_UMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 864 Pred = ICmpInst::ICMP_UGT; 865 break; 866 case Intrinsic::umin: 867 if (match(I0, m_UMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 868 Pred = ICmpInst::ICMP_ULT; 869 break; 870 default: 871 llvm_unreachable("Expected min/max intrinsic"); 872 } 873 if (Pred == CmpInst::BAD_ICMP_PREDICATE) 874 return nullptr; 875 876 // max (min X, 42), 41 --> X > 41 ? 42 : 41 877 // min (max X, 42), 43 --> X < 43 ? 42 : 43 878 Value *Cmp = Builder.CreateICmp(Pred, X, I1); 879 return SelectInst::Create(Cmp, ConstantInt::get(II->getType(), *C0), I1); 880 } 881 882 /// Reduce a sequence of min/max intrinsics with a common operand. 883 static Instruction *factorizeMinMaxTree(IntrinsicInst *II) { 884 // Match 3 of the same min/max ops. Example: umin(umin(), umin()). 885 auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); 886 auto *RHS = dyn_cast<IntrinsicInst>(II->getArgOperand(1)); 887 Intrinsic::ID MinMaxID = II->getIntrinsicID(); 888 if (!LHS || !RHS || LHS->getIntrinsicID() != MinMaxID || 889 RHS->getIntrinsicID() != MinMaxID || 890 (!LHS->hasOneUse() && !RHS->hasOneUse())) 891 return nullptr; 892 893 Value *A = LHS->getArgOperand(0); 894 Value *B = LHS->getArgOperand(1); 895 Value *C = RHS->getArgOperand(0); 896 Value *D = RHS->getArgOperand(1); 897 898 // Look for a common operand. 899 Value *MinMaxOp = nullptr; 900 Value *ThirdOp = nullptr; 901 if (LHS->hasOneUse()) { 902 // If the LHS is only used in this chain and the RHS is used outside of it, 903 // reuse the RHS min/max because that will eliminate the LHS. 904 if (D == A || C == A) { 905 // min(min(a, b), min(c, a)) --> min(min(c, a), b) 906 // min(min(a, b), min(a, d)) --> min(min(a, d), b) 907 MinMaxOp = RHS; 908 ThirdOp = B; 909 } else if (D == B || C == B) { 910 // min(min(a, b), min(c, b)) --> min(min(c, b), a) 911 // min(min(a, b), min(b, d)) --> min(min(b, d), a) 912 MinMaxOp = RHS; 913 ThirdOp = A; 914 } 915 } else { 916 assert(RHS->hasOneUse() && "Expected one-use operand"); 917 // Reuse the LHS. This will eliminate the RHS. 918 if (D == A || D == B) { 919 // min(min(a, b), min(c, a)) --> min(min(a, b), c) 920 // min(min(a, b), min(c, b)) --> min(min(a, b), c) 921 MinMaxOp = LHS; 922 ThirdOp = C; 923 } else if (C == A || C == B) { 924 // min(min(a, b), min(b, d)) --> min(min(a, b), d) 925 // min(min(a, b), min(c, b)) --> min(min(a, b), d) 926 MinMaxOp = LHS; 927 ThirdOp = D; 928 } 929 } 930 931 if (!MinMaxOp || !ThirdOp) 932 return nullptr; 933 934 Module *Mod = II->getModule(); 935 Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType()); 936 return CallInst::Create(MinMax, { MinMaxOp, ThirdOp }); 937 } 938 939 /// CallInst simplification. This mostly only handles folding of intrinsic 940 /// instructions. For normal calls, it allows visitCallBase to do the heavy 941 /// lifting. 942 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { 943 // Don't try to simplify calls without uses. It will not do anything useful, 944 // but will result in the following folds being skipped. 945 if (!CI.use_empty()) 946 if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI))) 947 return replaceInstUsesWith(CI, V); 948 949 if (isFreeCall(&CI, &TLI)) 950 return visitFree(CI); 951 952 // If the caller function is nounwind, mark the call as nounwind, even if the 953 // callee isn't. 954 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) { 955 CI.setDoesNotThrow(); 956 return &CI; 957 } 958 959 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 960 if (!II) return visitCallBase(CI); 961 962 // For atomic unordered mem intrinsics if len is not a positive or 963 // not a multiple of element size then behavior is undefined. 964 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II)) 965 if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength())) 966 if (NumBytes->getSExtValue() < 0 || 967 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) { 968 CreateNonTerminatorUnreachable(AMI); 969 assert(AMI->getType()->isVoidTy() && 970 "non void atomic unordered mem intrinsic"); 971 return eraseInstFromFunction(*AMI); 972 } 973 974 // Intrinsics cannot occur in an invoke or a callbr, so handle them here 975 // instead of in visitCallBase. 976 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) { 977 bool Changed = false; 978 979 // memmove/cpy/set of zero bytes is a noop. 980 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 981 if (NumBytes->isNullValue()) 982 return eraseInstFromFunction(CI); 983 984 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 985 if (CI->getZExtValue() == 1) { 986 // Replace the instruction with just byte operations. We would 987 // transform other cases to loads/stores, but we don't know if 988 // alignment is sufficient. 989 } 990 } 991 992 // No other transformations apply to volatile transfers. 993 if (auto *M = dyn_cast<MemIntrinsic>(MI)) 994 if (M->isVolatile()) 995 return nullptr; 996 997 // If we have a memmove and the source operation is a constant global, 998 // then the source and dest pointers can't alias, so we can change this 999 // into a call to memcpy. 1000 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) { 1001 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 1002 if (GVSrc->isConstant()) { 1003 Module *M = CI.getModule(); 1004 Intrinsic::ID MemCpyID = 1005 isa<AtomicMemMoveInst>(MMI) 1006 ? Intrinsic::memcpy_element_unordered_atomic 1007 : Intrinsic::memcpy; 1008 Type *Tys[3] = { CI.getArgOperand(0)->getType(), 1009 CI.getArgOperand(1)->getType(), 1010 CI.getArgOperand(2)->getType() }; 1011 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys)); 1012 Changed = true; 1013 } 1014 } 1015 1016 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 1017 // memmove(x,x,size) -> noop. 1018 if (MTI->getSource() == MTI->getDest()) 1019 return eraseInstFromFunction(CI); 1020 } 1021 1022 // If we can determine a pointer alignment that is bigger than currently 1023 // set, update the alignment. 1024 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 1025 if (Instruction *I = SimplifyAnyMemTransfer(MTI)) 1026 return I; 1027 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) { 1028 if (Instruction *I = SimplifyAnyMemSet(MSI)) 1029 return I; 1030 } 1031 1032 if (Changed) return II; 1033 } 1034 1035 // For fixed width vector result intrinsics, use the generic demanded vector 1036 // support. 1037 if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) { 1038 auto VWidth = IIFVTy->getNumElements(); 1039 APInt UndefElts(VWidth, 0); 1040 APInt AllOnesEltMask(APInt::getAllOnes(VWidth)); 1041 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) { 1042 if (V != II) 1043 return replaceInstUsesWith(*II, V); 1044 return II; 1045 } 1046 } 1047 1048 if (II->isCommutative()) { 1049 if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI)) 1050 return NewCall; 1051 } 1052 1053 Intrinsic::ID IID = II->getIntrinsicID(); 1054 switch (IID) { 1055 case Intrinsic::objectsize: 1056 if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false)) 1057 return replaceInstUsesWith(CI, V); 1058 return nullptr; 1059 case Intrinsic::abs: { 1060 Value *IIOperand = II->getArgOperand(0); 1061 bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue(); 1062 1063 // abs(-x) -> abs(x) 1064 // TODO: Copy nsw if it was present on the neg? 1065 Value *X; 1066 if (match(IIOperand, m_Neg(m_Value(X)))) 1067 return replaceOperand(*II, 0, X); 1068 if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X))))) 1069 return replaceOperand(*II, 0, X); 1070 if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X)))) 1071 return replaceOperand(*II, 0, X); 1072 1073 if (Optional<bool> Sign = getKnownSign(IIOperand, II, DL, &AC, &DT)) { 1074 // abs(x) -> x if x >= 0 1075 if (!*Sign) 1076 return replaceInstUsesWith(*II, IIOperand); 1077 1078 // abs(x) -> -x if x < 0 1079 if (IntMinIsPoison) 1080 return BinaryOperator::CreateNSWNeg(IIOperand); 1081 return BinaryOperator::CreateNeg(IIOperand); 1082 } 1083 1084 // abs (sext X) --> zext (abs X*) 1085 // Clear the IsIntMin (nsw) bit on the abs to allow narrowing. 1086 if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) { 1087 Value *NarrowAbs = 1088 Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse()); 1089 return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType()); 1090 } 1091 1092 // Match a complicated way to check if a number is odd/even: 1093 // abs (srem X, 2) --> and X, 1 1094 const APInt *C; 1095 if (match(IIOperand, m_SRem(m_Value(X), m_APInt(C))) && *C == 2) 1096 return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1)); 1097 1098 break; 1099 } 1100 case Intrinsic::umin: { 1101 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1102 // umin(x, 1) == zext(x != 0) 1103 if (match(I1, m_One())) { 1104 Value *Zero = Constant::getNullValue(I0->getType()); 1105 Value *Cmp = Builder.CreateICmpNE(I0, Zero); 1106 return CastInst::Create(Instruction::ZExt, Cmp, II->getType()); 1107 } 1108 LLVM_FALLTHROUGH; 1109 } 1110 case Intrinsic::umax: { 1111 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1112 Value *X, *Y; 1113 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) && 1114 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 1115 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 1116 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 1117 } 1118 Constant *C; 1119 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_Constant(C)) && 1120 I0->hasOneUse()) { 1121 Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType()); 1122 if (ConstantExpr::getZExt(NarrowC, II->getType()) == C) { 1123 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 1124 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 1125 } 1126 } 1127 // If both operands of unsigned min/max are sign-extended, it is still ok 1128 // to narrow the operation. 1129 LLVM_FALLTHROUGH; 1130 } 1131 case Intrinsic::smax: 1132 case Intrinsic::smin: { 1133 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 1134 Value *X, *Y; 1135 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) && 1136 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 1137 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 1138 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 1139 } 1140 1141 Constant *C; 1142 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_Constant(C)) && 1143 I0->hasOneUse()) { 1144 Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType()); 1145 if (ConstantExpr::getSExt(NarrowC, II->getType()) == C) { 1146 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 1147 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 1148 } 1149 } 1150 1151 if (IID == Intrinsic::smax || IID == Intrinsic::smin) { 1152 // smax (neg nsw X), (neg nsw Y) --> neg nsw (smin X, Y) 1153 // smin (neg nsw X), (neg nsw Y) --> neg nsw (smax X, Y) 1154 // TODO: Canonicalize neg after min/max if I1 is constant. 1155 if (match(I0, m_NSWNeg(m_Value(X))) && match(I1, m_NSWNeg(m_Value(Y))) && 1156 (I0->hasOneUse() || I1->hasOneUse())) { 1157 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID); 1158 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y); 1159 return BinaryOperator::CreateNSWNeg(InvMaxMin); 1160 } 1161 } 1162 1163 // If we can eliminate ~A and Y is free to invert: 1164 // max ~A, Y --> ~(min A, ~Y) 1165 // 1166 // Examples: 1167 // max ~A, ~Y --> ~(min A, Y) 1168 // max ~A, C --> ~(min A, ~C) 1169 // max ~A, (max ~Y, ~Z) --> ~min( A, (min Y, Z)) 1170 auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * { 1171 Value *A; 1172 if (match(X, m_OneUse(m_Not(m_Value(A)))) && 1173 !isFreeToInvert(A, A->hasOneUse()) && 1174 isFreeToInvert(Y, Y->hasOneUse())) { 1175 Value *NotY = Builder.CreateNot(Y); 1176 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID); 1177 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY); 1178 return BinaryOperator::CreateNot(InvMaxMin); 1179 } 1180 return nullptr; 1181 }; 1182 1183 if (Instruction *I = moveNotAfterMinMax(I0, I1)) 1184 return I; 1185 if (Instruction *I = moveNotAfterMinMax(I1, I0)) 1186 return I; 1187 1188 if (Instruction *I = moveAddAfterMinMax(II, Builder)) 1189 return I; 1190 1191 // smax(X, -X) --> abs(X) 1192 // smin(X, -X) --> -abs(X) 1193 // umax(X, -X) --> -abs(X) 1194 // umin(X, -X) --> abs(X) 1195 if (isKnownNegation(I0, I1)) { 1196 // We can choose either operand as the input to abs(), but if we can 1197 // eliminate the only use of a value, that's better for subsequent 1198 // transforms/analysis. 1199 if (I0->hasOneUse() && !I1->hasOneUse()) 1200 std::swap(I0, I1); 1201 1202 // This is some variant of abs(). See if we can propagate 'nsw' to the abs 1203 // operation and potentially its negation. 1204 bool IntMinIsPoison = isKnownNegation(I0, I1, /* NeedNSW */ true); 1205 Value *Abs = Builder.CreateBinaryIntrinsic( 1206 Intrinsic::abs, I0, 1207 ConstantInt::getBool(II->getContext(), IntMinIsPoison)); 1208 1209 // We don't have a "nabs" intrinsic, so negate if needed based on the 1210 // max/min operation. 1211 if (IID == Intrinsic::smin || IID == Intrinsic::umax) 1212 Abs = Builder.CreateNeg(Abs, "nabs", /* NUW */ false, IntMinIsPoison); 1213 return replaceInstUsesWith(CI, Abs); 1214 } 1215 1216 if (Instruction *Sel = foldClampRangeOfTwo(II, Builder)) 1217 return Sel; 1218 1219 if (Instruction *SAdd = matchSAddSubSat(*II)) 1220 return SAdd; 1221 1222 if (match(I1, m_ImmConstant())) 1223 if (auto *Sel = dyn_cast<SelectInst>(I0)) 1224 if (Instruction *R = FoldOpIntoSelect(*II, Sel)) 1225 return R; 1226 1227 if (Instruction *NewMinMax = factorizeMinMaxTree(II)) 1228 return NewMinMax; 1229 1230 break; 1231 } 1232 case Intrinsic::bswap: { 1233 Value *IIOperand = II->getArgOperand(0); 1234 Value *X = nullptr; 1235 1236 KnownBits Known = computeKnownBits(IIOperand, 0, II); 1237 uint64_t LZ = alignDown(Known.countMinLeadingZeros(), 8); 1238 uint64_t TZ = alignDown(Known.countMinTrailingZeros(), 8); 1239 1240 // bswap(x) -> shift(x) if x has exactly one "active byte" 1241 if (Known.getBitWidth() - LZ - TZ == 8) { 1242 assert(LZ != TZ && "active byte cannot be in the middle"); 1243 if (LZ > TZ) // -> shl(x) if the "active byte" is in the low part of x 1244 return BinaryOperator::CreateNUWShl( 1245 IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ)); 1246 // -> lshr(x) if the "active byte" is in the high part of x 1247 return BinaryOperator::CreateExactLShr( 1248 IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ)); 1249 } 1250 1251 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 1252 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) { 1253 unsigned C = X->getType()->getScalarSizeInBits() - 1254 IIOperand->getType()->getScalarSizeInBits(); 1255 Value *CV = ConstantInt::get(X->getType(), C); 1256 Value *V = Builder.CreateLShr(X, CV); 1257 return new TruncInst(V, IIOperand->getType()); 1258 } 1259 break; 1260 } 1261 case Intrinsic::masked_load: 1262 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II)) 1263 return replaceInstUsesWith(CI, SimplifiedMaskedOp); 1264 break; 1265 case Intrinsic::masked_store: 1266 return simplifyMaskedStore(*II); 1267 case Intrinsic::masked_gather: 1268 return simplifyMaskedGather(*II); 1269 case Intrinsic::masked_scatter: 1270 return simplifyMaskedScatter(*II); 1271 case Intrinsic::launder_invariant_group: 1272 case Intrinsic::strip_invariant_group: 1273 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this)) 1274 return replaceInstUsesWith(*II, SkippedBarrier); 1275 break; 1276 case Intrinsic::powi: 1277 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 1278 // 0 and 1 are handled in instsimplify 1279 // powi(x, -1) -> 1/x 1280 if (Power->isMinusOne()) 1281 return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0), 1282 II->getArgOperand(0), II); 1283 // powi(x, 2) -> x*x 1284 if (Power->equalsInt(2)) 1285 return BinaryOperator::CreateFMulFMF(II->getArgOperand(0), 1286 II->getArgOperand(0), II); 1287 1288 if (!Power->getValue()[0]) { 1289 Value *X; 1290 // If power is even: 1291 // powi(-x, p) -> powi(x, p) 1292 // powi(fabs(x), p) -> powi(x, p) 1293 // powi(copysign(x, y), p) -> powi(x, p) 1294 if (match(II->getArgOperand(0), m_FNeg(m_Value(X))) || 1295 match(II->getArgOperand(0), m_FAbs(m_Value(X))) || 1296 match(II->getArgOperand(0), 1297 m_Intrinsic<Intrinsic::copysign>(m_Value(X), m_Value()))) 1298 return replaceOperand(*II, 0, X); 1299 } 1300 } 1301 break; 1302 1303 case Intrinsic::cttz: 1304 case Intrinsic::ctlz: 1305 if (auto *I = foldCttzCtlz(*II, *this)) 1306 return I; 1307 break; 1308 1309 case Intrinsic::ctpop: 1310 if (auto *I = foldCtpop(*II, *this)) 1311 return I; 1312 break; 1313 1314 case Intrinsic::fshl: 1315 case Intrinsic::fshr: { 1316 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 1317 Type *Ty = II->getType(); 1318 unsigned BitWidth = Ty->getScalarSizeInBits(); 1319 Constant *ShAmtC; 1320 if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC)) && 1321 !ShAmtC->containsConstantExpression()) { 1322 // Canonicalize a shift amount constant operand to modulo the bit-width. 1323 Constant *WidthC = ConstantInt::get(Ty, BitWidth); 1324 Constant *ModuloC = ConstantExpr::getURem(ShAmtC, WidthC); 1325 if (ModuloC != ShAmtC) 1326 return replaceOperand(*II, 2, ModuloC); 1327 1328 assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) == 1329 ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) && 1330 "Shift amount expected to be modulo bitwidth"); 1331 1332 // Canonicalize funnel shift right by constant to funnel shift left. This 1333 // is not entirely arbitrary. For historical reasons, the backend may 1334 // recognize rotate left patterns but miss rotate right patterns. 1335 if (IID == Intrinsic::fshr) { 1336 // fshr X, Y, C --> fshl X, Y, (BitWidth - C) 1337 Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC); 1338 Module *Mod = II->getModule(); 1339 Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty); 1340 return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC }); 1341 } 1342 assert(IID == Intrinsic::fshl && 1343 "All funnel shifts by simple constants should go left"); 1344 1345 // fshl(X, 0, C) --> shl X, C 1346 // fshl(X, undef, C) --> shl X, C 1347 if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef())) 1348 return BinaryOperator::CreateShl(Op0, ShAmtC); 1349 1350 // fshl(0, X, C) --> lshr X, (BW-C) 1351 // fshl(undef, X, C) --> lshr X, (BW-C) 1352 if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef())) 1353 return BinaryOperator::CreateLShr(Op1, 1354 ConstantExpr::getSub(WidthC, ShAmtC)); 1355 1356 // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form) 1357 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) { 1358 Module *Mod = II->getModule(); 1359 Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty); 1360 return CallInst::Create(Bswap, { Op0 }); 1361 } 1362 } 1363 1364 // Left or right might be masked. 1365 if (SimplifyDemandedInstructionBits(*II)) 1366 return &CI; 1367 1368 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth, 1369 // so only the low bits of the shift amount are demanded if the bitwidth is 1370 // a power-of-2. 1371 if (!isPowerOf2_32(BitWidth)) 1372 break; 1373 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth)); 1374 KnownBits Op2Known(BitWidth); 1375 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known)) 1376 return &CI; 1377 break; 1378 } 1379 case Intrinsic::uadd_with_overflow: 1380 case Intrinsic::sadd_with_overflow: { 1381 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1382 return I; 1383 1384 // Given 2 constant operands whose sum does not overflow: 1385 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1 1386 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1 1387 Value *X; 1388 const APInt *C0, *C1; 1389 Value *Arg0 = II->getArgOperand(0); 1390 Value *Arg1 = II->getArgOperand(1); 1391 bool IsSigned = IID == Intrinsic::sadd_with_overflow; 1392 bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0))) 1393 : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0))); 1394 if (HasNWAdd && match(Arg1, m_APInt(C1))) { 1395 bool Overflow; 1396 APInt NewC = 1397 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow); 1398 if (!Overflow) 1399 return replaceInstUsesWith( 1400 *II, Builder.CreateBinaryIntrinsic( 1401 IID, X, ConstantInt::get(Arg1->getType(), NewC))); 1402 } 1403 break; 1404 } 1405 1406 case Intrinsic::umul_with_overflow: 1407 case Intrinsic::smul_with_overflow: 1408 case Intrinsic::usub_with_overflow: 1409 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1410 return I; 1411 break; 1412 1413 case Intrinsic::ssub_with_overflow: { 1414 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1415 return I; 1416 1417 Constant *C; 1418 Value *Arg0 = II->getArgOperand(0); 1419 Value *Arg1 = II->getArgOperand(1); 1420 // Given a constant C that is not the minimum signed value 1421 // for an integer of a given bit width: 1422 // 1423 // ssubo X, C -> saddo X, -C 1424 if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) { 1425 Value *NegVal = ConstantExpr::getNeg(C); 1426 // Build a saddo call that is equivalent to the discovered 1427 // ssubo call. 1428 return replaceInstUsesWith( 1429 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow, 1430 Arg0, NegVal)); 1431 } 1432 1433 break; 1434 } 1435 1436 case Intrinsic::uadd_sat: 1437 case Intrinsic::sadd_sat: 1438 case Intrinsic::usub_sat: 1439 case Intrinsic::ssub_sat: { 1440 SaturatingInst *SI = cast<SaturatingInst>(II); 1441 Type *Ty = SI->getType(); 1442 Value *Arg0 = SI->getLHS(); 1443 Value *Arg1 = SI->getRHS(); 1444 1445 // Make use of known overflow information. 1446 OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(), 1447 Arg0, Arg1, SI); 1448 switch (OR) { 1449 case OverflowResult::MayOverflow: 1450 break; 1451 case OverflowResult::NeverOverflows: 1452 if (SI->isSigned()) 1453 return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1); 1454 else 1455 return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1); 1456 case OverflowResult::AlwaysOverflowsLow: { 1457 unsigned BitWidth = Ty->getScalarSizeInBits(); 1458 APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned()); 1459 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min)); 1460 } 1461 case OverflowResult::AlwaysOverflowsHigh: { 1462 unsigned BitWidth = Ty->getScalarSizeInBits(); 1463 APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned()); 1464 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max)); 1465 } 1466 } 1467 1468 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN 1469 Constant *C; 1470 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) && 1471 C->isNotMinSignedValue()) { 1472 Value *NegVal = ConstantExpr::getNeg(C); 1473 return replaceInstUsesWith( 1474 *II, Builder.CreateBinaryIntrinsic( 1475 Intrinsic::sadd_sat, Arg0, NegVal)); 1476 } 1477 1478 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2)) 1479 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2)) 1480 // if Val and Val2 have the same sign 1481 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) { 1482 Value *X; 1483 const APInt *Val, *Val2; 1484 APInt NewVal; 1485 bool IsUnsigned = 1486 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat; 1487 if (Other->getIntrinsicID() == IID && 1488 match(Arg1, m_APInt(Val)) && 1489 match(Other->getArgOperand(0), m_Value(X)) && 1490 match(Other->getArgOperand(1), m_APInt(Val2))) { 1491 if (IsUnsigned) 1492 NewVal = Val->uadd_sat(*Val2); 1493 else if (Val->isNonNegative() == Val2->isNonNegative()) { 1494 bool Overflow; 1495 NewVal = Val->sadd_ov(*Val2, Overflow); 1496 if (Overflow) { 1497 // Both adds together may add more than SignedMaxValue 1498 // without saturating the final result. 1499 break; 1500 } 1501 } else { 1502 // Cannot fold saturated addition with different signs. 1503 break; 1504 } 1505 1506 return replaceInstUsesWith( 1507 *II, Builder.CreateBinaryIntrinsic( 1508 IID, X, ConstantInt::get(II->getType(), NewVal))); 1509 } 1510 } 1511 break; 1512 } 1513 1514 case Intrinsic::minnum: 1515 case Intrinsic::maxnum: 1516 case Intrinsic::minimum: 1517 case Intrinsic::maximum: { 1518 Value *Arg0 = II->getArgOperand(0); 1519 Value *Arg1 = II->getArgOperand(1); 1520 Value *X, *Y; 1521 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) && 1522 (Arg0->hasOneUse() || Arg1->hasOneUse())) { 1523 // If both operands are negated, invert the call and negate the result: 1524 // min(-X, -Y) --> -(max(X, Y)) 1525 // max(-X, -Y) --> -(min(X, Y)) 1526 Intrinsic::ID NewIID; 1527 switch (IID) { 1528 case Intrinsic::maxnum: 1529 NewIID = Intrinsic::minnum; 1530 break; 1531 case Intrinsic::minnum: 1532 NewIID = Intrinsic::maxnum; 1533 break; 1534 case Intrinsic::maximum: 1535 NewIID = Intrinsic::minimum; 1536 break; 1537 case Intrinsic::minimum: 1538 NewIID = Intrinsic::maximum; 1539 break; 1540 default: 1541 llvm_unreachable("unexpected intrinsic ID"); 1542 } 1543 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II); 1544 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall); 1545 FNeg->copyIRFlags(II); 1546 return FNeg; 1547 } 1548 1549 // m(m(X, C2), C1) -> m(X, C) 1550 const APFloat *C1, *C2; 1551 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) { 1552 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) && 1553 ((match(M->getArgOperand(0), m_Value(X)) && 1554 match(M->getArgOperand(1), m_APFloat(C2))) || 1555 (match(M->getArgOperand(1), m_Value(X)) && 1556 match(M->getArgOperand(0), m_APFloat(C2))))) { 1557 APFloat Res(0.0); 1558 switch (IID) { 1559 case Intrinsic::maxnum: 1560 Res = maxnum(*C1, *C2); 1561 break; 1562 case Intrinsic::minnum: 1563 Res = minnum(*C1, *C2); 1564 break; 1565 case Intrinsic::maximum: 1566 Res = maximum(*C1, *C2); 1567 break; 1568 case Intrinsic::minimum: 1569 Res = minimum(*C1, *C2); 1570 break; 1571 default: 1572 llvm_unreachable("unexpected intrinsic ID"); 1573 } 1574 Instruction *NewCall = Builder.CreateBinaryIntrinsic( 1575 IID, X, ConstantFP::get(Arg0->getType(), Res), II); 1576 // TODO: Conservatively intersecting FMF. If Res == C2, the transform 1577 // was a simplification (so Arg0 and its original flags could 1578 // propagate?) 1579 NewCall->andIRFlags(M); 1580 return replaceInstUsesWith(*II, NewCall); 1581 } 1582 } 1583 1584 // m((fpext X), (fpext Y)) -> fpext (m(X, Y)) 1585 if (match(Arg0, m_OneUse(m_FPExt(m_Value(X)))) && 1586 match(Arg1, m_OneUse(m_FPExt(m_Value(Y)))) && 1587 X->getType() == Y->getType()) { 1588 Value *NewCall = 1589 Builder.CreateBinaryIntrinsic(IID, X, Y, II, II->getName()); 1590 return new FPExtInst(NewCall, II->getType()); 1591 } 1592 1593 // max X, -X --> fabs X 1594 // min X, -X --> -(fabs X) 1595 // TODO: Remove one-use limitation? That is obviously better for max. 1596 // It would be an extra instruction for min (fnabs), but that is 1597 // still likely better for analysis and codegen. 1598 if ((match(Arg0, m_OneUse(m_FNeg(m_Value(X)))) && Arg1 == X) || 1599 (match(Arg1, m_OneUse(m_FNeg(m_Value(X)))) && Arg0 == X)) { 1600 Value *R = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II); 1601 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum) 1602 R = Builder.CreateFNegFMF(R, II); 1603 return replaceInstUsesWith(*II, R); 1604 } 1605 1606 break; 1607 } 1608 case Intrinsic::fmuladd: { 1609 // Canonicalize fast fmuladd to the separate fmul + fadd. 1610 if (II->isFast()) { 1611 BuilderTy::FastMathFlagGuard Guard(Builder); 1612 Builder.setFastMathFlags(II->getFastMathFlags()); 1613 Value *Mul = Builder.CreateFMul(II->getArgOperand(0), 1614 II->getArgOperand(1)); 1615 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2)); 1616 Add->takeName(II); 1617 return replaceInstUsesWith(*II, Add); 1618 } 1619 1620 // Try to simplify the underlying FMul. 1621 if (Value *V = SimplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1), 1622 II->getFastMathFlags(), 1623 SQ.getWithInstruction(II))) { 1624 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 1625 FAdd->copyFastMathFlags(II); 1626 return FAdd; 1627 } 1628 1629 LLVM_FALLTHROUGH; 1630 } 1631 case Intrinsic::fma: { 1632 // fma fneg(x), fneg(y), z -> fma x, y, z 1633 Value *Src0 = II->getArgOperand(0); 1634 Value *Src1 = II->getArgOperand(1); 1635 Value *X, *Y; 1636 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) { 1637 replaceOperand(*II, 0, X); 1638 replaceOperand(*II, 1, Y); 1639 return II; 1640 } 1641 1642 // fma fabs(x), fabs(x), z -> fma x, x, z 1643 if (match(Src0, m_FAbs(m_Value(X))) && 1644 match(Src1, m_FAbs(m_Specific(X)))) { 1645 replaceOperand(*II, 0, X); 1646 replaceOperand(*II, 1, X); 1647 return II; 1648 } 1649 1650 // Try to simplify the underlying FMul. We can only apply simplifications 1651 // that do not require rounding. 1652 if (Value *V = SimplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1), 1653 II->getFastMathFlags(), 1654 SQ.getWithInstruction(II))) { 1655 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 1656 FAdd->copyFastMathFlags(II); 1657 return FAdd; 1658 } 1659 1660 // fma x, y, 0 -> fmul x, y 1661 // This is always valid for -0.0, but requires nsz for +0.0 as 1662 // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own. 1663 if (match(II->getArgOperand(2), m_NegZeroFP()) || 1664 (match(II->getArgOperand(2), m_PosZeroFP()) && 1665 II->getFastMathFlags().noSignedZeros())) 1666 return BinaryOperator::CreateFMulFMF(Src0, Src1, II); 1667 1668 break; 1669 } 1670 case Intrinsic::copysign: { 1671 Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1); 1672 if (SignBitMustBeZero(Sign, &TLI)) { 1673 // If we know that the sign argument is positive, reduce to FABS: 1674 // copysign Mag, +Sign --> fabs Mag 1675 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 1676 return replaceInstUsesWith(*II, Fabs); 1677 } 1678 // TODO: There should be a ValueTracking sibling like SignBitMustBeOne. 1679 const APFloat *C; 1680 if (match(Sign, m_APFloat(C)) && C->isNegative()) { 1681 // If we know that the sign argument is negative, reduce to FNABS: 1682 // copysign Mag, -Sign --> fneg (fabs Mag) 1683 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 1684 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II)); 1685 } 1686 1687 // Propagate sign argument through nested calls: 1688 // copysign Mag, (copysign ?, X) --> copysign Mag, X 1689 Value *X; 1690 if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X)))) 1691 return replaceOperand(*II, 1, X); 1692 1693 // Peek through changes of magnitude's sign-bit. This call rewrites those: 1694 // copysign (fabs X), Sign --> copysign X, Sign 1695 // copysign (fneg X), Sign --> copysign X, Sign 1696 if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X)))) 1697 return replaceOperand(*II, 0, X); 1698 1699 break; 1700 } 1701 case Intrinsic::fabs: { 1702 Value *Cond, *TVal, *FVal; 1703 if (match(II->getArgOperand(0), 1704 m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) { 1705 // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF 1706 if (isa<Constant>(TVal) && isa<Constant>(FVal)) { 1707 CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal}); 1708 CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal}); 1709 return SelectInst::Create(Cond, AbsT, AbsF); 1710 } 1711 // fabs (select Cond, -FVal, FVal) --> fabs FVal 1712 if (match(TVal, m_FNeg(m_Specific(FVal)))) 1713 return replaceOperand(*II, 0, FVal); 1714 // fabs (select Cond, TVal, -TVal) --> fabs TVal 1715 if (match(FVal, m_FNeg(m_Specific(TVal)))) 1716 return replaceOperand(*II, 0, TVal); 1717 } 1718 1719 LLVM_FALLTHROUGH; 1720 } 1721 case Intrinsic::ceil: 1722 case Intrinsic::floor: 1723 case Intrinsic::round: 1724 case Intrinsic::roundeven: 1725 case Intrinsic::nearbyint: 1726 case Intrinsic::rint: 1727 case Intrinsic::trunc: { 1728 Value *ExtSrc; 1729 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) { 1730 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x) 1731 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II); 1732 return new FPExtInst(NarrowII, II->getType()); 1733 } 1734 break; 1735 } 1736 case Intrinsic::cos: 1737 case Intrinsic::amdgcn_cos: { 1738 Value *X; 1739 Value *Src = II->getArgOperand(0); 1740 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) { 1741 // cos(-x) -> cos(x) 1742 // cos(fabs(x)) -> cos(x) 1743 return replaceOperand(*II, 0, X); 1744 } 1745 break; 1746 } 1747 case Intrinsic::sin: { 1748 Value *X; 1749 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) { 1750 // sin(-x) --> -sin(x) 1751 Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II); 1752 Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin); 1753 FNeg->copyFastMathFlags(II); 1754 return FNeg; 1755 } 1756 break; 1757 } 1758 1759 case Intrinsic::arm_neon_vtbl1: 1760 case Intrinsic::aarch64_neon_tbl1: 1761 if (Value *V = simplifyNeonTbl1(*II, Builder)) 1762 return replaceInstUsesWith(*II, V); 1763 break; 1764 1765 case Intrinsic::arm_neon_vmulls: 1766 case Intrinsic::arm_neon_vmullu: 1767 case Intrinsic::aarch64_neon_smull: 1768 case Intrinsic::aarch64_neon_umull: { 1769 Value *Arg0 = II->getArgOperand(0); 1770 Value *Arg1 = II->getArgOperand(1); 1771 1772 // Handle mul by zero first: 1773 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) { 1774 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType())); 1775 } 1776 1777 // Check for constant LHS & RHS - in this case we just simplify. 1778 bool Zext = (IID == Intrinsic::arm_neon_vmullu || 1779 IID == Intrinsic::aarch64_neon_umull); 1780 VectorType *NewVT = cast<VectorType>(II->getType()); 1781 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) { 1782 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) { 1783 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext); 1784 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext); 1785 1786 return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1)); 1787 } 1788 1789 // Couldn't simplify - canonicalize constant to the RHS. 1790 std::swap(Arg0, Arg1); 1791 } 1792 1793 // Handle mul by one: 1794 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) 1795 if (ConstantInt *Splat = 1796 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) 1797 if (Splat->isOne()) 1798 return CastInst::CreateIntegerCast(Arg0, II->getType(), 1799 /*isSigned=*/!Zext); 1800 1801 break; 1802 } 1803 case Intrinsic::arm_neon_aesd: 1804 case Intrinsic::arm_neon_aese: 1805 case Intrinsic::aarch64_crypto_aesd: 1806 case Intrinsic::aarch64_crypto_aese: { 1807 Value *DataArg = II->getArgOperand(0); 1808 Value *KeyArg = II->getArgOperand(1); 1809 1810 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR 1811 Value *Data, *Key; 1812 if (match(KeyArg, m_ZeroInt()) && 1813 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) { 1814 replaceOperand(*II, 0, Data); 1815 replaceOperand(*II, 1, Key); 1816 return II; 1817 } 1818 break; 1819 } 1820 case Intrinsic::hexagon_V6_vandvrt: 1821 case Intrinsic::hexagon_V6_vandvrt_128B: { 1822 // Simplify Q -> V -> Q conversion. 1823 if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 1824 Intrinsic::ID ID0 = Op0->getIntrinsicID(); 1825 if (ID0 != Intrinsic::hexagon_V6_vandqrt && 1826 ID0 != Intrinsic::hexagon_V6_vandqrt_128B) 1827 break; 1828 Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1); 1829 uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue(); 1830 uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue(); 1831 // Check if every byte has common bits in Bytes and Mask. 1832 uint64_t C = Bytes1 & Mask1; 1833 if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000)) 1834 return replaceInstUsesWith(*II, Op0->getArgOperand(0)); 1835 } 1836 break; 1837 } 1838 case Intrinsic::stackrestore: { 1839 enum class ClassifyResult { 1840 None, 1841 Alloca, 1842 StackRestore, 1843 CallWithSideEffects, 1844 }; 1845 auto Classify = [](const Instruction *I) { 1846 if (isa<AllocaInst>(I)) 1847 return ClassifyResult::Alloca; 1848 1849 if (auto *CI = dyn_cast<CallInst>(I)) { 1850 if (auto *II = dyn_cast<IntrinsicInst>(CI)) { 1851 if (II->getIntrinsicID() == Intrinsic::stackrestore) 1852 return ClassifyResult::StackRestore; 1853 1854 if (II->mayHaveSideEffects()) 1855 return ClassifyResult::CallWithSideEffects; 1856 } else { 1857 // Consider all non-intrinsic calls to be side effects 1858 return ClassifyResult::CallWithSideEffects; 1859 } 1860 } 1861 1862 return ClassifyResult::None; 1863 }; 1864 1865 // If the stacksave and the stackrestore are in the same BB, and there is 1866 // no intervening call, alloca, or stackrestore of a different stacksave, 1867 // remove the restore. This can happen when variable allocas are DCE'd. 1868 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 1869 if (SS->getIntrinsicID() == Intrinsic::stacksave && 1870 SS->getParent() == II->getParent()) { 1871 BasicBlock::iterator BI(SS); 1872 bool CannotRemove = false; 1873 for (++BI; &*BI != II; ++BI) { 1874 switch (Classify(&*BI)) { 1875 case ClassifyResult::None: 1876 // So far so good, look at next instructions. 1877 break; 1878 1879 case ClassifyResult::StackRestore: 1880 // If we found an intervening stackrestore for a different 1881 // stacksave, we can't remove the stackrestore. Otherwise, continue. 1882 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS) 1883 CannotRemove = true; 1884 break; 1885 1886 case ClassifyResult::Alloca: 1887 case ClassifyResult::CallWithSideEffects: 1888 // If we found an alloca, a non-intrinsic call, or an intrinsic 1889 // call with side effects, we can't remove the stackrestore. 1890 CannotRemove = true; 1891 break; 1892 } 1893 if (CannotRemove) 1894 break; 1895 } 1896 1897 if (!CannotRemove) 1898 return eraseInstFromFunction(CI); 1899 } 1900 } 1901 1902 // Scan down this block to see if there is another stack restore in the 1903 // same block without an intervening call/alloca. 1904 BasicBlock::iterator BI(II); 1905 Instruction *TI = II->getParent()->getTerminator(); 1906 bool CannotRemove = false; 1907 for (++BI; &*BI != TI; ++BI) { 1908 switch (Classify(&*BI)) { 1909 case ClassifyResult::None: 1910 // So far so good, look at next instructions. 1911 break; 1912 1913 case ClassifyResult::StackRestore: 1914 // If there is a stackrestore below this one, remove this one. 1915 return eraseInstFromFunction(CI); 1916 1917 case ClassifyResult::Alloca: 1918 case ClassifyResult::CallWithSideEffects: 1919 // If we found an alloca, a non-intrinsic call, or an intrinsic call 1920 // with side effects (such as llvm.stacksave and llvm.read_register), 1921 // we can't remove the stack restore. 1922 CannotRemove = true; 1923 break; 1924 } 1925 if (CannotRemove) 1926 break; 1927 } 1928 1929 // If the stack restore is in a return, resume, or unwind block and if there 1930 // are no allocas or calls between the restore and the return, nuke the 1931 // restore. 1932 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI))) 1933 return eraseInstFromFunction(CI); 1934 break; 1935 } 1936 case Intrinsic::lifetime_end: 1937 // Asan needs to poison memory to detect invalid access which is possible 1938 // even for empty lifetime range. 1939 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 1940 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) || 1941 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 1942 break; 1943 1944 if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) { 1945 return I.getIntrinsicID() == Intrinsic::lifetime_start; 1946 })) 1947 return nullptr; 1948 break; 1949 case Intrinsic::assume: { 1950 Value *IIOperand = II->getArgOperand(0); 1951 SmallVector<OperandBundleDef, 4> OpBundles; 1952 II->getOperandBundlesAsDefs(OpBundles); 1953 1954 /// This will remove the boolean Condition from the assume given as 1955 /// argument and remove the assume if it becomes useless. 1956 /// always returns nullptr for use as a return values. 1957 auto RemoveConditionFromAssume = [&](Instruction *Assume) -> Instruction * { 1958 assert(isa<AssumeInst>(Assume)); 1959 if (isAssumeWithEmptyBundle(*cast<AssumeInst>(II))) 1960 return eraseInstFromFunction(CI); 1961 replaceUse(II->getOperandUse(0), ConstantInt::getTrue(II->getContext())); 1962 return nullptr; 1963 }; 1964 // Remove an assume if it is followed by an identical assume. 1965 // TODO: Do we need this? Unless there are conflicting assumptions, the 1966 // computeKnownBits(IIOperand) below here eliminates redundant assumes. 1967 Instruction *Next = II->getNextNonDebugInstruction(); 1968 if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand)))) 1969 return RemoveConditionFromAssume(Next); 1970 1971 // Canonicalize assume(a && b) -> assume(a); assume(b); 1972 // Note: New assumption intrinsics created here are registered by 1973 // the InstCombineIRInserter object. 1974 FunctionType *AssumeIntrinsicTy = II->getFunctionType(); 1975 Value *AssumeIntrinsic = II->getCalledOperand(); 1976 Value *A, *B; 1977 if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) { 1978 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles, 1979 II->getName()); 1980 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName()); 1981 return eraseInstFromFunction(*II); 1982 } 1983 // assume(!(a || b)) -> assume(!a); assume(!b); 1984 if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) { 1985 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 1986 Builder.CreateNot(A), OpBundles, II->getName()); 1987 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 1988 Builder.CreateNot(B), II->getName()); 1989 return eraseInstFromFunction(*II); 1990 } 1991 1992 // assume( (load addr) != null ) -> add 'nonnull' metadata to load 1993 // (if assume is valid at the load) 1994 CmpInst::Predicate Pred; 1995 Instruction *LHS; 1996 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) && 1997 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load && 1998 LHS->getType()->isPointerTy() && 1999 isValidAssumeForContext(II, LHS, &DT)) { 2000 MDNode *MD = MDNode::get(II->getContext(), None); 2001 LHS->setMetadata(LLVMContext::MD_nonnull, MD); 2002 return RemoveConditionFromAssume(II); 2003 2004 // TODO: apply nonnull return attributes to calls and invokes 2005 // TODO: apply range metadata for range check patterns? 2006 } 2007 2008 // Convert nonnull assume like: 2009 // %A = icmp ne i32* %PTR, null 2010 // call void @llvm.assume(i1 %A) 2011 // into 2012 // call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ] 2013 if (EnableKnowledgeRetention && 2014 match(IIOperand, m_Cmp(Pred, m_Value(A), m_Zero())) && 2015 Pred == CmpInst::ICMP_NE && A->getType()->isPointerTy()) { 2016 if (auto *Replacement = buildAssumeFromKnowledge( 2017 {RetainedKnowledge{Attribute::NonNull, 0, A}}, Next, &AC, &DT)) { 2018 2019 Replacement->insertBefore(Next); 2020 AC.registerAssumption(Replacement); 2021 return RemoveConditionFromAssume(II); 2022 } 2023 } 2024 2025 // Convert alignment assume like: 2026 // %B = ptrtoint i32* %A to i64 2027 // %C = and i64 %B, Constant 2028 // %D = icmp eq i64 %C, 0 2029 // call void @llvm.assume(i1 %D) 2030 // into 2031 // call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 Constant + 1)] 2032 uint64_t AlignMask; 2033 if (EnableKnowledgeRetention && 2034 match(IIOperand, 2035 m_Cmp(Pred, m_And(m_Value(A), m_ConstantInt(AlignMask)), 2036 m_Zero())) && 2037 Pred == CmpInst::ICMP_EQ) { 2038 if (isPowerOf2_64(AlignMask + 1)) { 2039 uint64_t Offset = 0; 2040 match(A, m_Add(m_Value(A), m_ConstantInt(Offset))); 2041 if (match(A, m_PtrToInt(m_Value(A)))) { 2042 /// Note: this doesn't preserve the offset information but merges 2043 /// offset and alignment. 2044 /// TODO: we can generate a GEP instead of merging the alignment with 2045 /// the offset. 2046 RetainedKnowledge RK{Attribute::Alignment, 2047 (unsigned)MinAlign(Offset, AlignMask + 1), A}; 2048 if (auto *Replacement = 2049 buildAssumeFromKnowledge(RK, Next, &AC, &DT)) { 2050 2051 Replacement->insertAfter(II); 2052 AC.registerAssumption(Replacement); 2053 } 2054 return RemoveConditionFromAssume(II); 2055 } 2056 } 2057 } 2058 2059 /// Canonicalize Knowledge in operand bundles. 2060 if (EnableKnowledgeRetention && II->hasOperandBundles()) { 2061 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) { 2062 auto &BOI = II->bundle_op_info_begin()[Idx]; 2063 RetainedKnowledge RK = 2064 llvm::getKnowledgeFromBundle(cast<AssumeInst>(*II), BOI); 2065 if (BOI.End - BOI.Begin > 2) 2066 continue; // Prevent reducing knowledge in an align with offset since 2067 // extracting a RetainedKnowledge form them looses offset 2068 // information 2069 RetainedKnowledge CanonRK = 2070 llvm::simplifyRetainedKnowledge(cast<AssumeInst>(II), RK, 2071 &getAssumptionCache(), 2072 &getDominatorTree()); 2073 if (CanonRK == RK) 2074 continue; 2075 if (!CanonRK) { 2076 if (BOI.End - BOI.Begin > 0) { 2077 Worklist.pushValue(II->op_begin()[BOI.Begin]); 2078 Value::dropDroppableUse(II->op_begin()[BOI.Begin]); 2079 } 2080 continue; 2081 } 2082 assert(RK.AttrKind == CanonRK.AttrKind); 2083 if (BOI.End - BOI.Begin > 0) 2084 II->op_begin()[BOI.Begin].set(CanonRK.WasOn); 2085 if (BOI.End - BOI.Begin > 1) 2086 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get( 2087 Type::getInt64Ty(II->getContext()), CanonRK.ArgValue)); 2088 if (RK.WasOn) 2089 Worklist.pushValue(RK.WasOn); 2090 return II; 2091 } 2092 } 2093 2094 // If there is a dominating assume with the same condition as this one, 2095 // then this one is redundant, and should be removed. 2096 KnownBits Known(1); 2097 computeKnownBits(IIOperand, Known, 0, II); 2098 if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) 2099 return eraseInstFromFunction(*II); 2100 2101 // Update the cache of affected values for this assumption (we might be 2102 // here because we just simplified the condition). 2103 AC.updateAffectedValues(cast<AssumeInst>(II)); 2104 break; 2105 } 2106 case Intrinsic::experimental_guard: { 2107 // Is this guard followed by another guard? We scan forward over a small 2108 // fixed window of instructions to handle common cases with conditions 2109 // computed between guards. 2110 Instruction *NextInst = II->getNextNonDebugInstruction(); 2111 for (unsigned i = 0; i < GuardWideningWindow; i++) { 2112 // Note: Using context-free form to avoid compile time blow up 2113 if (!isSafeToSpeculativelyExecute(NextInst)) 2114 break; 2115 NextInst = NextInst->getNextNonDebugInstruction(); 2116 } 2117 Value *NextCond = nullptr; 2118 if (match(NextInst, 2119 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) { 2120 Value *CurrCond = II->getArgOperand(0); 2121 2122 // Remove a guard that it is immediately preceded by an identical guard. 2123 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b). 2124 if (CurrCond != NextCond) { 2125 Instruction *MoveI = II->getNextNonDebugInstruction(); 2126 while (MoveI != NextInst) { 2127 auto *Temp = MoveI; 2128 MoveI = MoveI->getNextNonDebugInstruction(); 2129 Temp->moveBefore(II); 2130 } 2131 replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond)); 2132 } 2133 eraseInstFromFunction(*NextInst); 2134 return II; 2135 } 2136 break; 2137 } 2138 case Intrinsic::experimental_vector_insert: { 2139 Value *Vec = II->getArgOperand(0); 2140 Value *SubVec = II->getArgOperand(1); 2141 Value *Idx = II->getArgOperand(2); 2142 auto *DstTy = dyn_cast<FixedVectorType>(II->getType()); 2143 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 2144 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType()); 2145 2146 // Only canonicalize if the destination vector, Vec, and SubVec are all 2147 // fixed vectors. 2148 if (DstTy && VecTy && SubVecTy) { 2149 unsigned DstNumElts = DstTy->getNumElements(); 2150 unsigned VecNumElts = VecTy->getNumElements(); 2151 unsigned SubVecNumElts = SubVecTy->getNumElements(); 2152 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 2153 2154 // An insert that entirely overwrites Vec with SubVec is a nop. 2155 if (VecNumElts == SubVecNumElts) 2156 return replaceInstUsesWith(CI, SubVec); 2157 2158 // Widen SubVec into a vector of the same width as Vec, since 2159 // shufflevector requires the two input vectors to be the same width. 2160 // Elements beyond the bounds of SubVec within the widened vector are 2161 // undefined. 2162 SmallVector<int, 8> WidenMask; 2163 unsigned i; 2164 for (i = 0; i != SubVecNumElts; ++i) 2165 WidenMask.push_back(i); 2166 for (; i != VecNumElts; ++i) 2167 WidenMask.push_back(UndefMaskElem); 2168 2169 Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask); 2170 2171 SmallVector<int, 8> Mask; 2172 for (unsigned i = 0; i != IdxN; ++i) 2173 Mask.push_back(i); 2174 for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i) 2175 Mask.push_back(i); 2176 for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i) 2177 Mask.push_back(i); 2178 2179 Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask); 2180 return replaceInstUsesWith(CI, Shuffle); 2181 } 2182 break; 2183 } 2184 case Intrinsic::experimental_vector_extract: { 2185 Value *Vec = II->getArgOperand(0); 2186 Value *Idx = II->getArgOperand(1); 2187 2188 auto *DstTy = dyn_cast<FixedVectorType>(II->getType()); 2189 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 2190 2191 // Only canonicalize if the the destination vector and Vec are fixed 2192 // vectors. 2193 if (DstTy && VecTy) { 2194 unsigned DstNumElts = DstTy->getNumElements(); 2195 unsigned VecNumElts = VecTy->getNumElements(); 2196 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 2197 2198 // Extracting the entirety of Vec is a nop. 2199 if (VecNumElts == DstNumElts) { 2200 replaceInstUsesWith(CI, Vec); 2201 return eraseInstFromFunction(CI); 2202 } 2203 2204 SmallVector<int, 8> Mask; 2205 for (unsigned i = 0; i != DstNumElts; ++i) 2206 Mask.push_back(IdxN + i); 2207 2208 Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask); 2209 return replaceInstUsesWith(CI, Shuffle); 2210 } 2211 break; 2212 } 2213 case Intrinsic::experimental_vector_reverse: { 2214 Value *BO0, *BO1, *X, *Y; 2215 Value *Vec = II->getArgOperand(0); 2216 if (match(Vec, m_OneUse(m_BinOp(m_Value(BO0), m_Value(BO1))))) { 2217 auto *OldBinOp = cast<BinaryOperator>(Vec); 2218 if (match(BO0, m_Intrinsic<Intrinsic::experimental_vector_reverse>( 2219 m_Value(X)))) { 2220 // rev(binop rev(X), rev(Y)) --> binop X, Y 2221 if (match(BO1, m_Intrinsic<Intrinsic::experimental_vector_reverse>( 2222 m_Value(Y)))) 2223 return replaceInstUsesWith(CI, 2224 BinaryOperator::CreateWithCopiedFlags( 2225 OldBinOp->getOpcode(), X, Y, OldBinOp, 2226 OldBinOp->getName(), II)); 2227 // rev(binop rev(X), BO1Splat) --> binop X, BO1Splat 2228 if (isSplatValue(BO1)) 2229 return replaceInstUsesWith(CI, 2230 BinaryOperator::CreateWithCopiedFlags( 2231 OldBinOp->getOpcode(), X, BO1, 2232 OldBinOp, OldBinOp->getName(), II)); 2233 } 2234 // rev(binop BO0Splat, rev(Y)) --> binop BO0Splat, Y 2235 if (match(BO1, m_Intrinsic<Intrinsic::experimental_vector_reverse>( 2236 m_Value(Y))) && 2237 isSplatValue(BO0)) 2238 return replaceInstUsesWith(CI, BinaryOperator::CreateWithCopiedFlags( 2239 OldBinOp->getOpcode(), BO0, Y, 2240 OldBinOp, OldBinOp->getName(), II)); 2241 } 2242 // rev(unop rev(X)) --> unop X 2243 if (match(Vec, m_OneUse(m_UnOp( 2244 m_Intrinsic<Intrinsic::experimental_vector_reverse>( 2245 m_Value(X)))))) { 2246 auto *OldUnOp = cast<UnaryOperator>(Vec); 2247 auto *NewUnOp = UnaryOperator::CreateWithCopiedFlags( 2248 OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(), II); 2249 return replaceInstUsesWith(CI, NewUnOp); 2250 } 2251 break; 2252 } 2253 case Intrinsic::vector_reduce_or: 2254 case Intrinsic::vector_reduce_and: { 2255 // Canonicalize logical or/and reductions: 2256 // Or reduction for i1 is represented as: 2257 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 2258 // %res = cmp ne iReduxWidth %val, 0 2259 // And reduction for i1 is represented as: 2260 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 2261 // %res = cmp eq iReduxWidth %val, 11111 2262 Value *Arg = II->getArgOperand(0); 2263 Value *Vect; 2264 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2265 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2266 if (FTy->getElementType() == Builder.getInt1Ty()) { 2267 Value *Res = Builder.CreateBitCast( 2268 Vect, Builder.getIntNTy(FTy->getNumElements())); 2269 if (IID == Intrinsic::vector_reduce_and) { 2270 Res = Builder.CreateICmpEQ( 2271 Res, ConstantInt::getAllOnesValue(Res->getType())); 2272 } else { 2273 assert(IID == Intrinsic::vector_reduce_or && 2274 "Expected or reduction."); 2275 Res = Builder.CreateIsNotNull(Res); 2276 } 2277 if (Arg != Vect) 2278 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 2279 II->getType()); 2280 return replaceInstUsesWith(CI, Res); 2281 } 2282 } 2283 LLVM_FALLTHROUGH; 2284 } 2285 case Intrinsic::vector_reduce_add: { 2286 if (IID == Intrinsic::vector_reduce_add) { 2287 // Convert vector_reduce_add(ZExt(<n x i1>)) to 2288 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)). 2289 // Convert vector_reduce_add(SExt(<n x i1>)) to 2290 // -ZExtOrTrunc(ctpop(bitcast <n x i1> to in)). 2291 // Convert vector_reduce_add(<n x i1>) to 2292 // Trunc(ctpop(bitcast <n x i1> to in)). 2293 Value *Arg = II->getArgOperand(0); 2294 Value *Vect; 2295 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2296 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2297 if (FTy->getElementType() == Builder.getInt1Ty()) { 2298 Value *V = Builder.CreateBitCast( 2299 Vect, Builder.getIntNTy(FTy->getNumElements())); 2300 Value *Res = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V); 2301 if (Res->getType() != II->getType()) 2302 Res = Builder.CreateZExtOrTrunc(Res, II->getType()); 2303 if (Arg != Vect && 2304 cast<Instruction>(Arg)->getOpcode() == Instruction::SExt) 2305 Res = Builder.CreateNeg(Res); 2306 return replaceInstUsesWith(CI, Res); 2307 } 2308 } 2309 } 2310 LLVM_FALLTHROUGH; 2311 } 2312 case Intrinsic::vector_reduce_xor: { 2313 if (IID == Intrinsic::vector_reduce_xor) { 2314 // Exclusive disjunction reduction over the vector with 2315 // (potentially-extended) i1 element type is actually a 2316 // (potentially-extended) arithmetic `add` reduction over the original 2317 // non-extended value: 2318 // vector_reduce_xor(?ext(<n x i1>)) 2319 // --> 2320 // ?ext(vector_reduce_add(<n x i1>)) 2321 Value *Arg = II->getArgOperand(0); 2322 Value *Vect; 2323 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2324 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2325 if (FTy->getElementType() == Builder.getInt1Ty()) { 2326 Value *Res = Builder.CreateAddReduce(Vect); 2327 if (Arg != Vect) 2328 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 2329 II->getType()); 2330 return replaceInstUsesWith(CI, Res); 2331 } 2332 } 2333 } 2334 LLVM_FALLTHROUGH; 2335 } 2336 case Intrinsic::vector_reduce_mul: { 2337 if (IID == Intrinsic::vector_reduce_mul) { 2338 // Multiplicative reduction over the vector with (potentially-extended) 2339 // i1 element type is actually a (potentially zero-extended) 2340 // logical `and` reduction over the original non-extended value: 2341 // vector_reduce_mul(?ext(<n x i1>)) 2342 // --> 2343 // zext(vector_reduce_and(<n x i1>)) 2344 Value *Arg = II->getArgOperand(0); 2345 Value *Vect; 2346 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2347 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2348 if (FTy->getElementType() == Builder.getInt1Ty()) { 2349 Value *Res = Builder.CreateAndReduce(Vect); 2350 if (Res->getType() != II->getType()) 2351 Res = Builder.CreateZExt(Res, II->getType()); 2352 return replaceInstUsesWith(CI, Res); 2353 } 2354 } 2355 } 2356 LLVM_FALLTHROUGH; 2357 } 2358 case Intrinsic::vector_reduce_umin: 2359 case Intrinsic::vector_reduce_umax: { 2360 if (IID == Intrinsic::vector_reduce_umin || 2361 IID == Intrinsic::vector_reduce_umax) { 2362 // UMin/UMax reduction over the vector with (potentially-extended) 2363 // i1 element type is actually a (potentially-extended) 2364 // logical `and`/`or` reduction over the original non-extended value: 2365 // vector_reduce_u{min,max}(?ext(<n x i1>)) 2366 // --> 2367 // ?ext(vector_reduce_{and,or}(<n x i1>)) 2368 Value *Arg = II->getArgOperand(0); 2369 Value *Vect; 2370 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2371 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2372 if (FTy->getElementType() == Builder.getInt1Ty()) { 2373 Value *Res = IID == Intrinsic::vector_reduce_umin 2374 ? Builder.CreateAndReduce(Vect) 2375 : Builder.CreateOrReduce(Vect); 2376 if (Arg != Vect) 2377 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res, 2378 II->getType()); 2379 return replaceInstUsesWith(CI, Res); 2380 } 2381 } 2382 } 2383 LLVM_FALLTHROUGH; 2384 } 2385 case Intrinsic::vector_reduce_smin: 2386 case Intrinsic::vector_reduce_smax: { 2387 if (IID == Intrinsic::vector_reduce_smin || 2388 IID == Intrinsic::vector_reduce_smax) { 2389 // SMin/SMax reduction over the vector with (potentially-extended) 2390 // i1 element type is actually a (potentially-extended) 2391 // logical `and`/`or` reduction over the original non-extended value: 2392 // vector_reduce_s{min,max}(<n x i1>) 2393 // --> 2394 // vector_reduce_{or,and}(<n x i1>) 2395 // and 2396 // vector_reduce_s{min,max}(sext(<n x i1>)) 2397 // --> 2398 // sext(vector_reduce_{or,and}(<n x i1>)) 2399 // and 2400 // vector_reduce_s{min,max}(zext(<n x i1>)) 2401 // --> 2402 // zext(vector_reduce_{and,or}(<n x i1>)) 2403 Value *Arg = II->getArgOperand(0); 2404 Value *Vect; 2405 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) { 2406 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType())) 2407 if (FTy->getElementType() == Builder.getInt1Ty()) { 2408 Instruction::CastOps ExtOpc = Instruction::CastOps::CastOpsEnd; 2409 if (Arg != Vect) 2410 ExtOpc = cast<CastInst>(Arg)->getOpcode(); 2411 Value *Res = ((IID == Intrinsic::vector_reduce_smin) == 2412 (ExtOpc == Instruction::CastOps::ZExt)) 2413 ? Builder.CreateAndReduce(Vect) 2414 : Builder.CreateOrReduce(Vect); 2415 if (Arg != Vect) 2416 Res = Builder.CreateCast(ExtOpc, Res, II->getType()); 2417 return replaceInstUsesWith(CI, Res); 2418 } 2419 } 2420 } 2421 LLVM_FALLTHROUGH; 2422 } 2423 case Intrinsic::vector_reduce_fmax: 2424 case Intrinsic::vector_reduce_fmin: 2425 case Intrinsic::vector_reduce_fadd: 2426 case Intrinsic::vector_reduce_fmul: { 2427 bool CanBeReassociated = (IID != Intrinsic::vector_reduce_fadd && 2428 IID != Intrinsic::vector_reduce_fmul) || 2429 II->hasAllowReassoc(); 2430 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd || 2431 IID == Intrinsic::vector_reduce_fmul) 2432 ? 1 2433 : 0; 2434 Value *Arg = II->getArgOperand(ArgIdx); 2435 Value *V; 2436 ArrayRef<int> Mask; 2437 if (!isa<FixedVectorType>(Arg->getType()) || !CanBeReassociated || 2438 !match(Arg, m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))) || 2439 !cast<ShuffleVectorInst>(Arg)->isSingleSource()) 2440 break; 2441 int Sz = Mask.size(); 2442 SmallBitVector UsedIndices(Sz); 2443 for (int Idx : Mask) { 2444 if (Idx == UndefMaskElem || UsedIndices.test(Idx)) 2445 break; 2446 UsedIndices.set(Idx); 2447 } 2448 // Can remove shuffle iff just shuffled elements, no repeats, undefs, or 2449 // other changes. 2450 if (UsedIndices.all()) { 2451 replaceUse(II->getOperandUse(ArgIdx), V); 2452 return nullptr; 2453 } 2454 break; 2455 } 2456 default: { 2457 // Handle target specific intrinsics 2458 Optional<Instruction *> V = targetInstCombineIntrinsic(*II); 2459 if (V.hasValue()) 2460 return V.getValue(); 2461 break; 2462 } 2463 } 2464 // Some intrinsics (like experimental_gc_statepoint) can be used in invoke 2465 // context, so it is handled in visitCallBase and we should trigger it. 2466 return visitCallBase(*II); 2467 } 2468 2469 // Fence instruction simplification 2470 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) { 2471 auto *NFI = dyn_cast<FenceInst>(FI.getNextNonDebugInstruction()); 2472 // This check is solely here to handle arbitrary target-dependent syncscopes. 2473 // TODO: Can remove if does not matter in practice. 2474 if (NFI && FI.isIdenticalTo(NFI)) 2475 return eraseInstFromFunction(FI); 2476 2477 // Returns true if FI1 is identical or stronger fence than FI2. 2478 auto isIdenticalOrStrongerFence = [](FenceInst *FI1, FenceInst *FI2) { 2479 auto FI1SyncScope = FI1->getSyncScopeID(); 2480 // Consider same scope, where scope is global or single-thread. 2481 if (FI1SyncScope != FI2->getSyncScopeID() || 2482 (FI1SyncScope != SyncScope::System && 2483 FI1SyncScope != SyncScope::SingleThread)) 2484 return false; 2485 2486 return isAtLeastOrStrongerThan(FI1->getOrdering(), FI2->getOrdering()); 2487 }; 2488 if (NFI && isIdenticalOrStrongerFence(NFI, &FI)) 2489 return eraseInstFromFunction(FI); 2490 2491 if (auto *PFI = dyn_cast_or_null<FenceInst>(FI.getPrevNonDebugInstruction())) 2492 if (isIdenticalOrStrongerFence(PFI, &FI)) 2493 return eraseInstFromFunction(FI); 2494 return nullptr; 2495 } 2496 2497 // InvokeInst simplification 2498 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) { 2499 return visitCallBase(II); 2500 } 2501 2502 // CallBrInst simplification 2503 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) { 2504 return visitCallBase(CBI); 2505 } 2506 2507 /// If this cast does not affect the value passed through the varargs area, we 2508 /// can eliminate the use of the cast. 2509 static bool isSafeToEliminateVarargsCast(const CallBase &Call, 2510 const DataLayout &DL, 2511 const CastInst *const CI, 2512 const int ix) { 2513 if (!CI->isLosslessCast()) 2514 return false; 2515 2516 // If this is a GC intrinsic, avoid munging types. We need types for 2517 // statepoint reconstruction in SelectionDAG. 2518 // TODO: This is probably something which should be expanded to all 2519 // intrinsics since the entire point of intrinsics is that 2520 // they are understandable by the optimizer. 2521 if (isa<GCStatepointInst>(Call) || isa<GCRelocateInst>(Call) || 2522 isa<GCResultInst>(Call)) 2523 return false; 2524 2525 // Opaque pointers are compatible with any byval types. 2526 PointerType *SrcTy = cast<PointerType>(CI->getOperand(0)->getType()); 2527 if (SrcTy->isOpaque()) 2528 return true; 2529 2530 // The size of ByVal or InAlloca arguments is derived from the type, so we 2531 // can't change to a type with a different size. If the size were 2532 // passed explicitly we could avoid this check. 2533 if (!Call.isPassPointeeByValueArgument(ix)) 2534 return true; 2535 2536 // The transform currently only handles type replacement for byval, not other 2537 // type-carrying attributes. 2538 if (!Call.isByValArgument(ix)) 2539 return false; 2540 2541 Type *SrcElemTy = SrcTy->getNonOpaquePointerElementType(); 2542 Type *DstElemTy = Call.getParamByValType(ix); 2543 if (!SrcElemTy->isSized() || !DstElemTy->isSized()) 2544 return false; 2545 if (DL.getTypeAllocSize(SrcElemTy) != DL.getTypeAllocSize(DstElemTy)) 2546 return false; 2547 return true; 2548 } 2549 2550 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) { 2551 if (!CI->getCalledFunction()) return nullptr; 2552 2553 // Skip optimizing notail and musttail calls so 2554 // LibCallSimplifier::optimizeCall doesn't have to preserve those invariants. 2555 // LibCallSimplifier::optimizeCall should try to preseve tail calls though. 2556 if (CI->isMustTailCall() || CI->isNoTailCall()) 2557 return nullptr; 2558 2559 auto InstCombineRAUW = [this](Instruction *From, Value *With) { 2560 replaceInstUsesWith(*From, With); 2561 }; 2562 auto InstCombineErase = [this](Instruction *I) { 2563 eraseInstFromFunction(*I); 2564 }; 2565 LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW, 2566 InstCombineErase); 2567 if (Value *With = Simplifier.optimizeCall(CI, Builder)) { 2568 ++NumSimplified; 2569 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With); 2570 } 2571 2572 return nullptr; 2573 } 2574 2575 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) { 2576 // Strip off at most one level of pointer casts, looking for an alloca. This 2577 // is good enough in practice and simpler than handling any number of casts. 2578 Value *Underlying = TrampMem->stripPointerCasts(); 2579 if (Underlying != TrampMem && 2580 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem)) 2581 return nullptr; 2582 if (!isa<AllocaInst>(Underlying)) 2583 return nullptr; 2584 2585 IntrinsicInst *InitTrampoline = nullptr; 2586 for (User *U : TrampMem->users()) { 2587 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 2588 if (!II) 2589 return nullptr; 2590 if (II->getIntrinsicID() == Intrinsic::init_trampoline) { 2591 if (InitTrampoline) 2592 // More than one init_trampoline writes to this value. Give up. 2593 return nullptr; 2594 InitTrampoline = II; 2595 continue; 2596 } 2597 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline) 2598 // Allow any number of calls to adjust.trampoline. 2599 continue; 2600 return nullptr; 2601 } 2602 2603 // No call to init.trampoline found. 2604 if (!InitTrampoline) 2605 return nullptr; 2606 2607 // Check that the alloca is being used in the expected way. 2608 if (InitTrampoline->getOperand(0) != TrampMem) 2609 return nullptr; 2610 2611 return InitTrampoline; 2612 } 2613 2614 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, 2615 Value *TrampMem) { 2616 // Visit all the previous instructions in the basic block, and try to find a 2617 // init.trampoline which has a direct path to the adjust.trampoline. 2618 for (BasicBlock::iterator I = AdjustTramp->getIterator(), 2619 E = AdjustTramp->getParent()->begin(); 2620 I != E;) { 2621 Instruction *Inst = &*--I; 2622 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2623 if (II->getIntrinsicID() == Intrinsic::init_trampoline && 2624 II->getOperand(0) == TrampMem) 2625 return II; 2626 if (Inst->mayWriteToMemory()) 2627 return nullptr; 2628 } 2629 return nullptr; 2630 } 2631 2632 // Given a call to llvm.adjust.trampoline, find and return the corresponding 2633 // call to llvm.init.trampoline if the call to the trampoline can be optimized 2634 // to a direct call to a function. Otherwise return NULL. 2635 static IntrinsicInst *findInitTrampoline(Value *Callee) { 2636 Callee = Callee->stripPointerCasts(); 2637 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee); 2638 if (!AdjustTramp || 2639 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline) 2640 return nullptr; 2641 2642 Value *TrampMem = AdjustTramp->getOperand(0); 2643 2644 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem)) 2645 return IT; 2646 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem)) 2647 return IT; 2648 return nullptr; 2649 } 2650 2651 void InstCombinerImpl::annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) { 2652 // Note: We only handle cases which can't be driven from generic attributes 2653 // here. So, for example, nonnull and noalias (which are common properties 2654 // of some allocation functions) are expected to be handled via annotation 2655 // of the respective allocator declaration with generic attributes. 2656 2657 uint64_t Size; 2658 ObjectSizeOpts Opts; 2659 if (getObjectSize(&Call, Size, DL, TLI, Opts) && Size > 0) { 2660 // TODO: We really should just emit deref_or_null here and then 2661 // let the generic inference code combine that with nonnull. 2662 if (Call.hasRetAttr(Attribute::NonNull)) 2663 Call.addRetAttr(Attribute::getWithDereferenceableBytes( 2664 Call.getContext(), Size)); 2665 else 2666 Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes( 2667 Call.getContext(), Size)); 2668 } 2669 2670 // Add alignment attribute if alignment is a power of two constant. 2671 Value *Alignment = getAllocAlignment(&Call, TLI); 2672 if (!Alignment) 2673 return; 2674 2675 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment); 2676 if (AlignOpC && AlignOpC->getValue().ult(llvm::Value::MaximumAlignment)) { 2677 uint64_t AlignmentVal = AlignOpC->getZExtValue(); 2678 if (llvm::isPowerOf2_64(AlignmentVal)) { 2679 Call.removeRetAttr(Attribute::Alignment); 2680 Call.addRetAttr(Attribute::getWithAlignment(Call.getContext(), 2681 Align(AlignmentVal))); 2682 } 2683 } 2684 } 2685 2686 /// Improvements for call, callbr and invoke instructions. 2687 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) { 2688 if (isAllocationFn(&Call, &TLI)) 2689 annotateAnyAllocSite(Call, &TLI); 2690 2691 bool Changed = false; 2692 2693 // Mark any parameters that are known to be non-null with the nonnull 2694 // attribute. This is helpful for inlining calls to functions with null 2695 // checks on their arguments. 2696 SmallVector<unsigned, 4> ArgNos; 2697 unsigned ArgNo = 0; 2698 2699 for (Value *V : Call.args()) { 2700 if (V->getType()->isPointerTy() && 2701 !Call.paramHasAttr(ArgNo, Attribute::NonNull) && 2702 isKnownNonZero(V, DL, 0, &AC, &Call, &DT)) 2703 ArgNos.push_back(ArgNo); 2704 ArgNo++; 2705 } 2706 2707 assert(ArgNo == Call.arg_size() && "Call arguments not processed correctly."); 2708 2709 if (!ArgNos.empty()) { 2710 AttributeList AS = Call.getAttributes(); 2711 LLVMContext &Ctx = Call.getContext(); 2712 AS = AS.addParamAttribute(Ctx, ArgNos, 2713 Attribute::get(Ctx, Attribute::NonNull)); 2714 Call.setAttributes(AS); 2715 Changed = true; 2716 } 2717 2718 // If the callee is a pointer to a function, attempt to move any casts to the 2719 // arguments of the call/callbr/invoke. 2720 Value *Callee = Call.getCalledOperand(); 2721 if (!isa<Function>(Callee) && transformConstExprCastCall(Call)) 2722 return nullptr; 2723 2724 if (Function *CalleeF = dyn_cast<Function>(Callee)) { 2725 // Remove the convergent attr on calls when the callee is not convergent. 2726 if (Call.isConvergent() && !CalleeF->isConvergent() && 2727 !CalleeF->isIntrinsic()) { 2728 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call 2729 << "\n"); 2730 Call.setNotConvergent(); 2731 return &Call; 2732 } 2733 2734 // If the call and callee calling conventions don't match, and neither one 2735 // of the calling conventions is compatible with C calling convention 2736 // this call must be unreachable, as the call is undefined. 2737 if ((CalleeF->getCallingConv() != Call.getCallingConv() && 2738 !(CalleeF->getCallingConv() == llvm::CallingConv::C && 2739 TargetLibraryInfoImpl::isCallingConvCCompatible(&Call)) && 2740 !(Call.getCallingConv() == llvm::CallingConv::C && 2741 TargetLibraryInfoImpl::isCallingConvCCompatible(CalleeF))) && 2742 // Only do this for calls to a function with a body. A prototype may 2743 // not actually end up matching the implementation's calling conv for a 2744 // variety of reasons (e.g. it may be written in assembly). 2745 !CalleeF->isDeclaration()) { 2746 Instruction *OldCall = &Call; 2747 CreateNonTerminatorUnreachable(OldCall); 2748 // If OldCall does not return void then replaceInstUsesWith poison. 2749 // This allows ValueHandlers and custom metadata to adjust itself. 2750 if (!OldCall->getType()->isVoidTy()) 2751 replaceInstUsesWith(*OldCall, PoisonValue::get(OldCall->getType())); 2752 if (isa<CallInst>(OldCall)) 2753 return eraseInstFromFunction(*OldCall); 2754 2755 // We cannot remove an invoke or a callbr, because it would change thexi 2756 // CFG, just change the callee to a null pointer. 2757 cast<CallBase>(OldCall)->setCalledFunction( 2758 CalleeF->getFunctionType(), 2759 Constant::getNullValue(CalleeF->getType())); 2760 return nullptr; 2761 } 2762 } 2763 2764 // Calling a null function pointer is undefined if a null address isn't 2765 // dereferenceable. 2766 if ((isa<ConstantPointerNull>(Callee) && 2767 !NullPointerIsDefined(Call.getFunction())) || 2768 isa<UndefValue>(Callee)) { 2769 // If Call does not return void then replaceInstUsesWith poison. 2770 // This allows ValueHandlers and custom metadata to adjust itself. 2771 if (!Call.getType()->isVoidTy()) 2772 replaceInstUsesWith(Call, PoisonValue::get(Call.getType())); 2773 2774 if (Call.isTerminator()) { 2775 // Can't remove an invoke or callbr because we cannot change the CFG. 2776 return nullptr; 2777 } 2778 2779 // This instruction is not reachable, just remove it. 2780 CreateNonTerminatorUnreachable(&Call); 2781 return eraseInstFromFunction(Call); 2782 } 2783 2784 if (IntrinsicInst *II = findInitTrampoline(Callee)) 2785 return transformCallThroughTrampoline(Call, *II); 2786 2787 // TODO: Drop this transform once opaque pointer transition is done. 2788 FunctionType *FTy = Call.getFunctionType(); 2789 if (FTy->isVarArg()) { 2790 int ix = FTy->getNumParams(); 2791 // See if we can optimize any arguments passed through the varargs area of 2792 // the call. 2793 for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end(); 2794 I != E; ++I, ++ix) { 2795 CastInst *CI = dyn_cast<CastInst>(*I); 2796 if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) { 2797 replaceUse(*I, CI->getOperand(0)); 2798 2799 // Update the byval type to match the pointer type. 2800 // Not necessary for opaque pointers. 2801 PointerType *NewTy = cast<PointerType>(CI->getOperand(0)->getType()); 2802 if (!NewTy->isOpaque() && Call.isByValArgument(ix)) { 2803 Call.removeParamAttr(ix, Attribute::ByVal); 2804 Call.addParamAttr(ix, Attribute::getWithByValType( 2805 Call.getContext(), 2806 NewTy->getNonOpaquePointerElementType())); 2807 } 2808 Changed = true; 2809 } 2810 } 2811 } 2812 2813 if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) { 2814 InlineAsm *IA = cast<InlineAsm>(Callee); 2815 if (!IA->canThrow()) { 2816 // Normal inline asm calls cannot throw - mark them 2817 // 'nounwind'. 2818 Call.setDoesNotThrow(); 2819 Changed = true; 2820 } 2821 } 2822 2823 // Try to optimize the call if possible, we require DataLayout for most of 2824 // this. None of these calls are seen as possibly dead so go ahead and 2825 // delete the instruction now. 2826 if (CallInst *CI = dyn_cast<CallInst>(&Call)) { 2827 Instruction *I = tryOptimizeCall(CI); 2828 // If we changed something return the result, etc. Otherwise let 2829 // the fallthrough check. 2830 if (I) return eraseInstFromFunction(*I); 2831 } 2832 2833 if (!Call.use_empty() && !Call.isMustTailCall()) 2834 if (Value *ReturnedArg = Call.getReturnedArgOperand()) { 2835 Type *CallTy = Call.getType(); 2836 Type *RetArgTy = ReturnedArg->getType(); 2837 if (RetArgTy->canLosslesslyBitCastTo(CallTy)) 2838 return replaceInstUsesWith( 2839 Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy)); 2840 } 2841 2842 if (isAllocationFn(&Call, &TLI) && 2843 isAllocRemovable(&cast<CallBase>(Call), &TLI)) 2844 return visitAllocSite(Call); 2845 2846 // Handle intrinsics which can be used in both call and invoke context. 2847 switch (Call.getIntrinsicID()) { 2848 case Intrinsic::experimental_gc_statepoint: { 2849 GCStatepointInst &GCSP = *cast<GCStatepointInst>(&Call); 2850 SmallPtrSet<Value *, 32> LiveGcValues; 2851 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 2852 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 2853 2854 // Remove the relocation if unused. 2855 if (GCR.use_empty()) { 2856 eraseInstFromFunction(GCR); 2857 continue; 2858 } 2859 2860 Value *DerivedPtr = GCR.getDerivedPtr(); 2861 Value *BasePtr = GCR.getBasePtr(); 2862 2863 // Undef is undef, even after relocation. 2864 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) { 2865 replaceInstUsesWith(GCR, UndefValue::get(GCR.getType())); 2866 eraseInstFromFunction(GCR); 2867 continue; 2868 } 2869 2870 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) { 2871 // The relocation of null will be null for most any collector. 2872 // TODO: provide a hook for this in GCStrategy. There might be some 2873 // weird collector this property does not hold for. 2874 if (isa<ConstantPointerNull>(DerivedPtr)) { 2875 // Use null-pointer of gc_relocate's type to replace it. 2876 replaceInstUsesWith(GCR, ConstantPointerNull::get(PT)); 2877 eraseInstFromFunction(GCR); 2878 continue; 2879 } 2880 2881 // isKnownNonNull -> nonnull attribute 2882 if (!GCR.hasRetAttr(Attribute::NonNull) && 2883 isKnownNonZero(DerivedPtr, DL, 0, &AC, &Call, &DT)) { 2884 GCR.addRetAttr(Attribute::NonNull); 2885 // We discovered new fact, re-check users. 2886 Worklist.pushUsersToWorkList(GCR); 2887 } 2888 } 2889 2890 // If we have two copies of the same pointer in the statepoint argument 2891 // list, canonicalize to one. This may let us common gc.relocates. 2892 if (GCR.getBasePtr() == GCR.getDerivedPtr() && 2893 GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) { 2894 auto *OpIntTy = GCR.getOperand(2)->getType(); 2895 GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex())); 2896 } 2897 2898 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p)) 2899 // Canonicalize on the type from the uses to the defs 2900 2901 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...) 2902 LiveGcValues.insert(BasePtr); 2903 LiveGcValues.insert(DerivedPtr); 2904 } 2905 Optional<OperandBundleUse> Bundle = 2906 GCSP.getOperandBundle(LLVMContext::OB_gc_live); 2907 unsigned NumOfGCLives = LiveGcValues.size(); 2908 if (!Bundle.hasValue() || NumOfGCLives == Bundle->Inputs.size()) 2909 break; 2910 // We can reduce the size of gc live bundle. 2911 DenseMap<Value *, unsigned> Val2Idx; 2912 std::vector<Value *> NewLiveGc; 2913 for (unsigned I = 0, E = Bundle->Inputs.size(); I < E; ++I) { 2914 Value *V = Bundle->Inputs[I]; 2915 if (Val2Idx.count(V)) 2916 continue; 2917 if (LiveGcValues.count(V)) { 2918 Val2Idx[V] = NewLiveGc.size(); 2919 NewLiveGc.push_back(V); 2920 } else 2921 Val2Idx[V] = NumOfGCLives; 2922 } 2923 // Update all gc.relocates 2924 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 2925 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 2926 Value *BasePtr = GCR.getBasePtr(); 2927 assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives && 2928 "Missed live gc for base pointer"); 2929 auto *OpIntTy1 = GCR.getOperand(1)->getType(); 2930 GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr])); 2931 Value *DerivedPtr = GCR.getDerivedPtr(); 2932 assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives && 2933 "Missed live gc for derived pointer"); 2934 auto *OpIntTy2 = GCR.getOperand(2)->getType(); 2935 GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr])); 2936 } 2937 // Create new statepoint instruction. 2938 OperandBundleDef NewBundle("gc-live", NewLiveGc); 2939 return CallBase::Create(&Call, NewBundle); 2940 } 2941 default: { break; } 2942 } 2943 2944 return Changed ? &Call : nullptr; 2945 } 2946 2947 /// If the callee is a constexpr cast of a function, attempt to move the cast to 2948 /// the arguments of the call/callbr/invoke. 2949 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) { 2950 auto *Callee = 2951 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts()); 2952 if (!Callee) 2953 return false; 2954 2955 // If this is a call to a thunk function, don't remove the cast. Thunks are 2956 // used to transparently forward all incoming parameters and outgoing return 2957 // values, so it's important to leave the cast in place. 2958 if (Callee->hasFnAttribute("thunk")) 2959 return false; 2960 2961 // If this is a musttail call, the callee's prototype must match the caller's 2962 // prototype with the exception of pointee types. The code below doesn't 2963 // implement that, so we can't do this transform. 2964 // TODO: Do the transform if it only requires adding pointer casts. 2965 if (Call.isMustTailCall()) 2966 return false; 2967 2968 Instruction *Caller = &Call; 2969 const AttributeList &CallerPAL = Call.getAttributes(); 2970 2971 // Okay, this is a cast from a function to a different type. Unless doing so 2972 // would cause a type conversion of one of our arguments, change this call to 2973 // be a direct call with arguments casted to the appropriate types. 2974 FunctionType *FT = Callee->getFunctionType(); 2975 Type *OldRetTy = Caller->getType(); 2976 Type *NewRetTy = FT->getReturnType(); 2977 2978 // Check to see if we are changing the return type... 2979 if (OldRetTy != NewRetTy) { 2980 2981 if (NewRetTy->isStructTy()) 2982 return false; // TODO: Handle multiple return values. 2983 2984 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) { 2985 if (Callee->isDeclaration()) 2986 return false; // Cannot transform this return value. 2987 2988 if (!Caller->use_empty() && 2989 // void -> non-void is handled specially 2990 !NewRetTy->isVoidTy()) 2991 return false; // Cannot transform this return value. 2992 } 2993 2994 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 2995 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs()); 2996 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy))) 2997 return false; // Attribute not compatible with transformed value. 2998 } 2999 3000 // If the callbase is an invoke/callbr instruction, and the return value is 3001 // used by a PHI node in a successor, we cannot change the return type of 3002 // the call because there is no place to put the cast instruction (without 3003 // breaking the critical edge). Bail out in this case. 3004 if (!Caller->use_empty()) { 3005 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 3006 for (User *U : II->users()) 3007 if (PHINode *PN = dyn_cast<PHINode>(U)) 3008 if (PN->getParent() == II->getNormalDest() || 3009 PN->getParent() == II->getUnwindDest()) 3010 return false; 3011 // FIXME: Be conservative for callbr to avoid a quadratic search. 3012 if (isa<CallBrInst>(Caller)) 3013 return false; 3014 } 3015 } 3016 3017 unsigned NumActualArgs = Call.arg_size(); 3018 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 3019 3020 // Prevent us turning: 3021 // declare void @takes_i32_inalloca(i32* inalloca) 3022 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0) 3023 // 3024 // into: 3025 // call void @takes_i32_inalloca(i32* null) 3026 // 3027 // Similarly, avoid folding away bitcasts of byval calls. 3028 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) || 3029 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated) || 3030 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 3031 return false; 3032 3033 auto AI = Call.arg_begin(); 3034 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 3035 Type *ParamTy = FT->getParamType(i); 3036 Type *ActTy = (*AI)->getType(); 3037 3038 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL)) 3039 return false; // Cannot transform this parameter value. 3040 3041 if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i)) 3042 .overlaps(AttributeFuncs::typeIncompatible(ParamTy))) 3043 return false; // Attribute not compatible with transformed value. 3044 3045 if (Call.isInAllocaArgument(i)) 3046 return false; // Cannot transform to and from inalloca. 3047 3048 if (CallerPAL.hasParamAttr(i, Attribute::SwiftError)) 3049 return false; 3050 3051 // If the parameter is passed as a byval argument, then we have to have a 3052 // sized type and the sized type has to have the same size as the old type. 3053 if (ParamTy != ActTy && CallerPAL.hasParamAttr(i, Attribute::ByVal)) { 3054 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy); 3055 if (!ParamPTy || !ParamPTy->getPointerElementType()->isSized()) 3056 return false; 3057 3058 Type *CurElTy = Call.getParamByValType(i); 3059 if (DL.getTypeAllocSize(CurElTy) != 3060 DL.getTypeAllocSize(ParamPTy->getPointerElementType())) 3061 return false; 3062 } 3063 } 3064 3065 if (Callee->isDeclaration()) { 3066 // Do not delete arguments unless we have a function body. 3067 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg()) 3068 return false; 3069 3070 // If the callee is just a declaration, don't change the varargsness of the 3071 // call. We don't want to introduce a varargs call where one doesn't 3072 // already exist. 3073 if (FT->isVarArg() != Call.getFunctionType()->isVarArg()) 3074 return false; 3075 3076 // If both the callee and the cast type are varargs, we still have to make 3077 // sure the number of fixed parameters are the same or we have the same 3078 // ABI issues as if we introduce a varargs call. 3079 if (FT->isVarArg() && Call.getFunctionType()->isVarArg() && 3080 FT->getNumParams() != Call.getFunctionType()->getNumParams()) 3081 return false; 3082 } 3083 3084 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 3085 !CallerPAL.isEmpty()) { 3086 // In this case we have more arguments than the new function type, but we 3087 // won't be dropping them. Check that these extra arguments have attributes 3088 // that are compatible with being a vararg call argument. 3089 unsigned SRetIdx; 3090 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) && 3091 SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams()) 3092 return false; 3093 } 3094 3095 // Okay, we decided that this is a safe thing to do: go ahead and start 3096 // inserting cast instructions as necessary. 3097 SmallVector<Value *, 8> Args; 3098 SmallVector<AttributeSet, 8> ArgAttrs; 3099 Args.reserve(NumActualArgs); 3100 ArgAttrs.reserve(NumActualArgs); 3101 3102 // Get any return attributes. 3103 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs()); 3104 3105 // If the return value is not being used, the type may not be compatible 3106 // with the existing attributes. Wipe out any problematic attributes. 3107 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy)); 3108 3109 LLVMContext &Ctx = Call.getContext(); 3110 AI = Call.arg_begin(); 3111 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 3112 Type *ParamTy = FT->getParamType(i); 3113 3114 Value *NewArg = *AI; 3115 if ((*AI)->getType() != ParamTy) 3116 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy); 3117 Args.push_back(NewArg); 3118 3119 // Add any parameter attributes. 3120 if (CallerPAL.hasParamAttr(i, Attribute::ByVal)) { 3121 AttrBuilder AB(FT->getContext(), CallerPAL.getParamAttrs(i)); 3122 AB.addByValAttr(NewArg->getType()->getPointerElementType()); 3123 ArgAttrs.push_back(AttributeSet::get(Ctx, AB)); 3124 } else 3125 ArgAttrs.push_back(CallerPAL.getParamAttrs(i)); 3126 } 3127 3128 // If the function takes more arguments than the call was taking, add them 3129 // now. 3130 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) { 3131 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 3132 ArgAttrs.push_back(AttributeSet()); 3133 } 3134 3135 // If we are removing arguments to the function, emit an obnoxious warning. 3136 if (FT->getNumParams() < NumActualArgs) { 3137 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722 3138 if (FT->isVarArg()) { 3139 // Add all of the arguments in their promoted form to the arg list. 3140 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 3141 Type *PTy = getPromotedType((*AI)->getType()); 3142 Value *NewArg = *AI; 3143 if (PTy != (*AI)->getType()) { 3144 // Must promote to pass through va_arg area! 3145 Instruction::CastOps opcode = 3146 CastInst::getCastOpcode(*AI, false, PTy, false); 3147 NewArg = Builder.CreateCast(opcode, *AI, PTy); 3148 } 3149 Args.push_back(NewArg); 3150 3151 // Add any parameter attributes. 3152 ArgAttrs.push_back(CallerPAL.getParamAttrs(i)); 3153 } 3154 } 3155 } 3156 3157 AttributeSet FnAttrs = CallerPAL.getFnAttrs(); 3158 3159 if (NewRetTy->isVoidTy()) 3160 Caller->setName(""); // Void type should not have a name. 3161 3162 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) && 3163 "missing argument attributes"); 3164 AttributeList NewCallerPAL = AttributeList::get( 3165 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs); 3166 3167 SmallVector<OperandBundleDef, 1> OpBundles; 3168 Call.getOperandBundlesAsDefs(OpBundles); 3169 3170 CallBase *NewCall; 3171 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 3172 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(), 3173 II->getUnwindDest(), Args, OpBundles); 3174 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) { 3175 NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(), 3176 CBI->getIndirectDests(), Args, OpBundles); 3177 } else { 3178 NewCall = Builder.CreateCall(Callee, Args, OpBundles); 3179 cast<CallInst>(NewCall)->setTailCallKind( 3180 cast<CallInst>(Caller)->getTailCallKind()); 3181 } 3182 NewCall->takeName(Caller); 3183 NewCall->setCallingConv(Call.getCallingConv()); 3184 NewCall->setAttributes(NewCallerPAL); 3185 3186 // Preserve prof metadata if any. 3187 NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof}); 3188 3189 // Insert a cast of the return type as necessary. 3190 Instruction *NC = NewCall; 3191 Value *NV = NC; 3192 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 3193 if (!NV->getType()->isVoidTy()) { 3194 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy); 3195 NC->setDebugLoc(Caller->getDebugLoc()); 3196 3197 // If this is an invoke/callbr instruction, we should insert it after the 3198 // first non-phi instruction in the normal successor block. 3199 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 3200 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt(); 3201 InsertNewInstBefore(NC, *I); 3202 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) { 3203 BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt(); 3204 InsertNewInstBefore(NC, *I); 3205 } else { 3206 // Otherwise, it's a call, just insert cast right after the call. 3207 InsertNewInstBefore(NC, *Caller); 3208 } 3209 Worklist.pushUsersToWorkList(*Caller); 3210 } else { 3211 NV = UndefValue::get(Caller->getType()); 3212 } 3213 } 3214 3215 if (!Caller->use_empty()) 3216 replaceInstUsesWith(*Caller, NV); 3217 else if (Caller->hasValueHandle()) { 3218 if (OldRetTy == NV->getType()) 3219 ValueHandleBase::ValueIsRAUWd(Caller, NV); 3220 else 3221 // We cannot call ValueIsRAUWd with a different type, and the 3222 // actual tracked value will disappear. 3223 ValueHandleBase::ValueIsDeleted(Caller); 3224 } 3225 3226 eraseInstFromFunction(*Caller); 3227 return true; 3228 } 3229 3230 /// Turn a call to a function created by init_trampoline / adjust_trampoline 3231 /// intrinsic pair into a direct call to the underlying function. 3232 Instruction * 3233 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call, 3234 IntrinsicInst &Tramp) { 3235 Value *Callee = Call.getCalledOperand(); 3236 Type *CalleeTy = Callee->getType(); 3237 FunctionType *FTy = Call.getFunctionType(); 3238 AttributeList Attrs = Call.getAttributes(); 3239 3240 // If the call already has the 'nest' attribute somewhere then give up - 3241 // otherwise 'nest' would occur twice after splicing in the chain. 3242 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 3243 return nullptr; 3244 3245 Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts()); 3246 FunctionType *NestFTy = NestF->getFunctionType(); 3247 3248 AttributeList NestAttrs = NestF->getAttributes(); 3249 if (!NestAttrs.isEmpty()) { 3250 unsigned NestArgNo = 0; 3251 Type *NestTy = nullptr; 3252 AttributeSet NestAttr; 3253 3254 // Look for a parameter marked with the 'nest' attribute. 3255 for (FunctionType::param_iterator I = NestFTy->param_begin(), 3256 E = NestFTy->param_end(); 3257 I != E; ++NestArgNo, ++I) { 3258 AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo); 3259 if (AS.hasAttribute(Attribute::Nest)) { 3260 // Record the parameter type and any other attributes. 3261 NestTy = *I; 3262 NestAttr = AS; 3263 break; 3264 } 3265 } 3266 3267 if (NestTy) { 3268 std::vector<Value*> NewArgs; 3269 std::vector<AttributeSet> NewArgAttrs; 3270 NewArgs.reserve(Call.arg_size() + 1); 3271 NewArgAttrs.reserve(Call.arg_size()); 3272 3273 // Insert the nest argument into the call argument list, which may 3274 // mean appending it. Likewise for attributes. 3275 3276 { 3277 unsigned ArgNo = 0; 3278 auto I = Call.arg_begin(), E = Call.arg_end(); 3279 do { 3280 if (ArgNo == NestArgNo) { 3281 // Add the chain argument and attributes. 3282 Value *NestVal = Tramp.getArgOperand(2); 3283 if (NestVal->getType() != NestTy) 3284 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest"); 3285 NewArgs.push_back(NestVal); 3286 NewArgAttrs.push_back(NestAttr); 3287 } 3288 3289 if (I == E) 3290 break; 3291 3292 // Add the original argument and attributes. 3293 NewArgs.push_back(*I); 3294 NewArgAttrs.push_back(Attrs.getParamAttrs(ArgNo)); 3295 3296 ++ArgNo; 3297 ++I; 3298 } while (true); 3299 } 3300 3301 // The trampoline may have been bitcast to a bogus type (FTy). 3302 // Handle this by synthesizing a new function type, equal to FTy 3303 // with the chain parameter inserted. 3304 3305 std::vector<Type*> NewTypes; 3306 NewTypes.reserve(FTy->getNumParams()+1); 3307 3308 // Insert the chain's type into the list of parameter types, which may 3309 // mean appending it. 3310 { 3311 unsigned ArgNo = 0; 3312 FunctionType::param_iterator I = FTy->param_begin(), 3313 E = FTy->param_end(); 3314 3315 do { 3316 if (ArgNo == NestArgNo) 3317 // Add the chain's type. 3318 NewTypes.push_back(NestTy); 3319 3320 if (I == E) 3321 break; 3322 3323 // Add the original type. 3324 NewTypes.push_back(*I); 3325 3326 ++ArgNo; 3327 ++I; 3328 } while (true); 3329 } 3330 3331 // Replace the trampoline call with a direct call. Let the generic 3332 // code sort out any function type mismatches. 3333 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 3334 FTy->isVarArg()); 3335 Constant *NewCallee = 3336 NestF->getType() == PointerType::getUnqual(NewFTy) ? 3337 NestF : ConstantExpr::getBitCast(NestF, 3338 PointerType::getUnqual(NewFTy)); 3339 AttributeList NewPAL = 3340 AttributeList::get(FTy->getContext(), Attrs.getFnAttrs(), 3341 Attrs.getRetAttrs(), NewArgAttrs); 3342 3343 SmallVector<OperandBundleDef, 1> OpBundles; 3344 Call.getOperandBundlesAsDefs(OpBundles); 3345 3346 Instruction *NewCaller; 3347 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) { 3348 NewCaller = InvokeInst::Create(NewFTy, NewCallee, 3349 II->getNormalDest(), II->getUnwindDest(), 3350 NewArgs, OpBundles); 3351 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 3352 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 3353 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) { 3354 NewCaller = 3355 CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(), 3356 CBI->getIndirectDests(), NewArgs, OpBundles); 3357 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv()); 3358 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL); 3359 } else { 3360 NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles); 3361 cast<CallInst>(NewCaller)->setTailCallKind( 3362 cast<CallInst>(Call).getTailCallKind()); 3363 cast<CallInst>(NewCaller)->setCallingConv( 3364 cast<CallInst>(Call).getCallingConv()); 3365 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 3366 } 3367 NewCaller->setDebugLoc(Call.getDebugLoc()); 3368 3369 return NewCaller; 3370 } 3371 } 3372 3373 // Replace the trampoline call with a direct call. Since there is no 'nest' 3374 // parameter, there is no need to adjust the argument list. Let the generic 3375 // code sort out any function type mismatches. 3376 Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy); 3377 Call.setCalledFunction(FTy, NewCallee); 3378 return &Call; 3379 } 3380