1 //===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// 11 /// This file provides internal interfaces used to implement the InstCombine. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 16 #define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/Analysis/TargetFolder.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/IR/Argument.h" 24 #include "llvm/IR/BasicBlock.h" 25 #include "llvm/IR/Constant.h" 26 #include "llvm/IR/Constants.h" 27 #include "llvm/IR/DerivedTypes.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/InstVisitor.h" 30 #include "llvm/IR/InstrTypes.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/IntrinsicInst.h" 33 #include "llvm/IR/Intrinsics.h" 34 #include "llvm/IR/PatternMatch.h" 35 #include "llvm/IR/Use.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/Casting.h" 38 #include "llvm/Support/Compiler.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/KnownBits.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 43 #include "llvm/Transforms/Utils/Local.h" 44 #include <cassert> 45 #include <cstdint> 46 47 #define DEBUG_TYPE "instcombine" 48 49 using namespace llvm::PatternMatch; 50 51 namespace llvm { 52 53 class APInt; 54 class AssumptionCache; 55 class BlockFrequencyInfo; 56 class DataLayout; 57 class DominatorTree; 58 class GEPOperator; 59 class GlobalVariable; 60 class LoopInfo; 61 class OptimizationRemarkEmitter; 62 class ProfileSummaryInfo; 63 class TargetLibraryInfo; 64 class User; 65 66 /// Assign a complexity or rank value to LLVM Values. This is used to reduce 67 /// the amount of pattern matching needed for compares and commutative 68 /// instructions. For example, if we have: 69 /// icmp ugt X, Constant 70 /// or 71 /// xor (add X, Constant), cast Z 72 /// 73 /// We do not have to consider the commuted variants of these patterns because 74 /// canonicalization based on complexity guarantees the above ordering. 75 /// 76 /// This routine maps IR values to various complexity ranks: 77 /// 0 -> undef 78 /// 1 -> Constants 79 /// 2 -> Other non-instructions 80 /// 3 -> Arguments 81 /// 4 -> Cast and (f)neg/not instructions 82 /// 5 -> Other instructions 83 static inline unsigned getComplexity(Value *V) { 84 if (isa<Instruction>(V)) { 85 if (isa<CastInst>(V) || match(V, m_Neg(m_Value())) || 86 match(V, m_Not(m_Value())) || match(V, m_FNeg(m_Value()))) 87 return 4; 88 return 5; 89 } 90 if (isa<Argument>(V)) 91 return 3; 92 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2; 93 } 94 95 /// Predicate canonicalization reduces the number of patterns that need to be 96 /// matched by other transforms. For example, we may swap the operands of a 97 /// conditional branch or select to create a compare with a canonical (inverted) 98 /// predicate which is then more likely to be matched with other values. 99 static inline bool isCanonicalPredicate(CmpInst::Predicate Pred) { 100 switch (Pred) { 101 case CmpInst::ICMP_NE: 102 case CmpInst::ICMP_ULE: 103 case CmpInst::ICMP_SLE: 104 case CmpInst::ICMP_UGE: 105 case CmpInst::ICMP_SGE: 106 // TODO: There are 16 FCMP predicates. Should others be (not) canonical? 107 case CmpInst::FCMP_ONE: 108 case CmpInst::FCMP_OLE: 109 case CmpInst::FCMP_OGE: 110 return false; 111 default: 112 return true; 113 } 114 } 115 116 /// Given an exploded icmp instruction, return true if the comparison only 117 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if the 118 /// result of the comparison is true when the input value is signed. 119 inline bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, 120 bool &TrueIfSigned) { 121 switch (Pred) { 122 case ICmpInst::ICMP_SLT: // True if LHS s< 0 123 TrueIfSigned = true; 124 return RHS.isNullValue(); 125 case ICmpInst::ICMP_SLE: // True if LHS s<= -1 126 TrueIfSigned = true; 127 return RHS.isAllOnesValue(); 128 case ICmpInst::ICMP_SGT: // True if LHS s> -1 129 TrueIfSigned = false; 130 return RHS.isAllOnesValue(); 131 case ICmpInst::ICMP_SGE: // True if LHS s>= 0 132 TrueIfSigned = false; 133 return RHS.isNullValue(); 134 case ICmpInst::ICMP_UGT: 135 // True if LHS u> RHS and RHS == sign-bit-mask - 1 136 TrueIfSigned = true; 137 return RHS.isMaxSignedValue(); 138 case ICmpInst::ICMP_UGE: 139 // True if LHS u>= RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc) 140 TrueIfSigned = true; 141 return RHS.isMinSignedValue(); 142 case ICmpInst::ICMP_ULT: 143 // True if LHS u< RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc) 144 TrueIfSigned = false; 145 return RHS.isMinSignedValue(); 146 case ICmpInst::ICMP_ULE: 147 // True if LHS u<= RHS and RHS == sign-bit-mask - 1 148 TrueIfSigned = false; 149 return RHS.isMaxSignedValue(); 150 default: 151 return false; 152 } 153 } 154 155 llvm::Optional<std::pair<CmpInst::Predicate, Constant *>> 156 getFlippedStrictnessPredicateAndConstant(CmpInst::Predicate Pred, Constant *C); 157 158 /// Return the source operand of a potentially bitcasted value while optionally 159 /// checking if it has one use. If there is no bitcast or the one use check is 160 /// not met, return the input value itself. 161 static inline Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) { 162 if (auto *BitCast = dyn_cast<BitCastInst>(V)) 163 if (!OneUseOnly || BitCast->hasOneUse()) 164 return BitCast->getOperand(0); 165 166 // V is not a bitcast or V has more than one use and OneUseOnly is true. 167 return V; 168 } 169 170 /// Add one to a Constant 171 static inline Constant *AddOne(Constant *C) { 172 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1)); 173 } 174 175 /// Subtract one from a Constant 176 static inline Constant *SubOne(Constant *C) { 177 return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1)); 178 } 179 180 /// Return true if the specified value is free to invert (apply ~ to). 181 /// This happens in cases where the ~ can be eliminated. If WillInvertAllUses 182 /// is true, work under the assumption that the caller intends to remove all 183 /// uses of V and only keep uses of ~V. 184 /// 185 /// See also: canFreelyInvertAllUsersOf() 186 static inline bool isFreeToInvert(Value *V, bool WillInvertAllUses) { 187 // ~(~(X)) -> X. 188 if (match(V, m_Not(m_Value()))) 189 return true; 190 191 // Constants can be considered to be not'ed values. 192 if (match(V, m_AnyIntegralConstant())) 193 return true; 194 195 // Compares can be inverted if all of their uses are being modified to use the 196 // ~V. 197 if (isa<CmpInst>(V)) 198 return WillInvertAllUses; 199 200 // If `V` is of the form `A + Constant` then `-1 - V` can be folded into `(-1 201 // - Constant) - A` if we are willing to invert all of the uses. 202 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) 203 if (BO->getOpcode() == Instruction::Add || 204 BO->getOpcode() == Instruction::Sub) 205 if (isa<Constant>(BO->getOperand(0)) || isa<Constant>(BO->getOperand(1))) 206 return WillInvertAllUses; 207 208 // Selects with invertible operands are freely invertible 209 if (match(V, m_Select(m_Value(), m_Not(m_Value()), m_Not(m_Value())))) 210 return WillInvertAllUses; 211 212 return false; 213 } 214 215 /// Given i1 V, can every user of V be freely adapted if V is changed to !V ? 216 /// 217 /// See also: isFreeToInvert() 218 static inline bool canFreelyInvertAllUsersOf(Value *V, Value *IgnoredUser) { 219 // Look at every user of V. 220 for (User *U : V->users()) { 221 if (U == IgnoredUser) 222 continue; // Don't consider this user. 223 224 auto *I = cast<Instruction>(U); 225 switch (I->getOpcode()) { 226 case Instruction::Select: 227 case Instruction::Br: 228 break; // Free to invert by swapping true/false values/destinations. 229 case Instruction::Xor: // Can invert 'xor' if it's a 'not', by ignoring it. 230 if (!match(I, m_Not(m_Value()))) 231 return false; // Not a 'not'. 232 break; 233 default: 234 return false; // Don't know, likely not freely invertible. 235 } 236 // So far all users were free to invert... 237 } 238 return true; // Can freely invert all users! 239 } 240 241 /// Some binary operators require special handling to avoid poison and undefined 242 /// behavior. If a constant vector has undef elements, replace those undefs with 243 /// identity constants if possible because those are always safe to execute. 244 /// If no identity constant exists, replace undef with some other safe constant. 245 static inline Constant *getSafeVectorConstantForBinop( 246 BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant) { 247 assert(In->getType()->isVectorTy() && "Not expecting scalars here"); 248 249 Type *EltTy = In->getType()->getVectorElementType(); 250 auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant); 251 if (!SafeC) { 252 // TODO: Should this be available as a constant utility function? It is 253 // similar to getBinOpAbsorber(). 254 if (IsRHSConstant) { 255 switch (Opcode) { 256 case Instruction::SRem: // X % 1 = 0 257 case Instruction::URem: // X %u 1 = 0 258 SafeC = ConstantInt::get(EltTy, 1); 259 break; 260 case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe) 261 SafeC = ConstantFP::get(EltTy, 1.0); 262 break; 263 default: 264 llvm_unreachable("Only rem opcodes have no identity constant for RHS"); 265 } 266 } else { 267 switch (Opcode) { 268 case Instruction::Shl: // 0 << X = 0 269 case Instruction::LShr: // 0 >>u X = 0 270 case Instruction::AShr: // 0 >> X = 0 271 case Instruction::SDiv: // 0 / X = 0 272 case Instruction::UDiv: // 0 /u X = 0 273 case Instruction::SRem: // 0 % X = 0 274 case Instruction::URem: // 0 %u X = 0 275 case Instruction::Sub: // 0 - X (doesn't simplify, but it is safe) 276 case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe) 277 case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe) 278 case Instruction::FRem: // 0.0 % X = 0 279 SafeC = Constant::getNullValue(EltTy); 280 break; 281 default: 282 llvm_unreachable("Expected to find identity constant for opcode"); 283 } 284 } 285 } 286 assert(SafeC && "Must have safe constant for binop"); 287 unsigned NumElts = In->getType()->getVectorNumElements(); 288 SmallVector<Constant *, 16> Out(NumElts); 289 for (unsigned i = 0; i != NumElts; ++i) { 290 Constant *C = In->getAggregateElement(i); 291 Out[i] = isa<UndefValue>(C) ? SafeC : C; 292 } 293 return ConstantVector::get(Out); 294 } 295 296 /// The core instruction combiner logic. 297 /// 298 /// This class provides both the logic to recursively visit instructions and 299 /// combine them. 300 class LLVM_LIBRARY_VISIBILITY InstCombiner 301 : public InstVisitor<InstCombiner, Instruction *> { 302 // FIXME: These members shouldn't be public. 303 public: 304 /// A worklist of the instructions that need to be simplified. 305 InstCombineWorklist &Worklist; 306 307 /// An IRBuilder that automatically inserts new instructions into the 308 /// worklist. 309 using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>; 310 BuilderTy &Builder; 311 312 private: 313 // Mode in which we are running the combiner. 314 const bool MinimizeSize; 315 316 /// Enable combines that trigger rarely but are costly in compiletime. 317 const bool ExpensiveCombines; 318 319 AliasAnalysis *AA; 320 321 // Required analyses. 322 AssumptionCache &AC; 323 TargetLibraryInfo &TLI; 324 DominatorTree &DT; 325 const DataLayout &DL; 326 const SimplifyQuery SQ; 327 OptimizationRemarkEmitter &ORE; 328 BlockFrequencyInfo *BFI; 329 ProfileSummaryInfo *PSI; 330 331 // Optional analyses. When non-null, these can both be used to do better 332 // combining and will be updated to reflect any changes. 333 LoopInfo *LI; 334 335 bool MadeIRChange = false; 336 337 public: 338 InstCombiner(InstCombineWorklist &Worklist, BuilderTy &Builder, 339 bool MinimizeSize, bool ExpensiveCombines, AliasAnalysis *AA, 340 AssumptionCache &AC, TargetLibraryInfo &TLI, DominatorTree &DT, 341 OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, 342 ProfileSummaryInfo *PSI, const DataLayout &DL, LoopInfo *LI) 343 : Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize), 344 ExpensiveCombines(ExpensiveCombines), AA(AA), AC(AC), TLI(TLI), DT(DT), 345 DL(DL), SQ(DL, &TLI, &DT, &AC), ORE(ORE), BFI(BFI), PSI(PSI), LI(LI) {} 346 347 /// Run the combiner over the entire worklist until it is empty. 348 /// 349 /// \returns true if the IR is changed. 350 bool run(); 351 352 AssumptionCache &getAssumptionCache() const { return AC; } 353 354 const DataLayout &getDataLayout() const { return DL; } 355 356 DominatorTree &getDominatorTree() const { return DT; } 357 358 LoopInfo *getLoopInfo() const { return LI; } 359 360 TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; } 361 362 // Visitation implementation - Implement instruction combining for different 363 // instruction types. The semantics are as follows: 364 // Return Value: 365 // null - No change was made 366 // I - Change was made, I is still valid, I may be dead though 367 // otherwise - Change was made, replace I with returned instruction 368 // 369 Instruction *visitFNeg(UnaryOperator &I); 370 Instruction *visitAdd(BinaryOperator &I); 371 Instruction *visitFAdd(BinaryOperator &I); 372 Value *OptimizePointerDifference( 373 Value *LHS, Value *RHS, Type *Ty, bool isNUW); 374 Instruction *visitSub(BinaryOperator &I); 375 Instruction *visitFSub(BinaryOperator &I); 376 Instruction *visitMul(BinaryOperator &I); 377 Instruction *visitFMul(BinaryOperator &I); 378 Instruction *visitURem(BinaryOperator &I); 379 Instruction *visitSRem(BinaryOperator &I); 380 Instruction *visitFRem(BinaryOperator &I); 381 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I); 382 Instruction *commonRemTransforms(BinaryOperator &I); 383 Instruction *commonIRemTransforms(BinaryOperator &I); 384 Instruction *commonDivTransforms(BinaryOperator &I); 385 Instruction *commonIDivTransforms(BinaryOperator &I); 386 Instruction *visitUDiv(BinaryOperator &I); 387 Instruction *visitSDiv(BinaryOperator &I); 388 Instruction *visitFDiv(BinaryOperator &I); 389 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted); 390 Instruction *visitAnd(BinaryOperator &I); 391 Instruction *visitOr(BinaryOperator &I); 392 Instruction *visitXor(BinaryOperator &I); 393 Instruction *visitShl(BinaryOperator &I); 394 Value *reassociateShiftAmtsOfTwoSameDirectionShifts( 395 BinaryOperator *Sh0, const SimplifyQuery &SQ, 396 bool AnalyzeForSignBitExtraction = false); 397 Instruction *canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract( 398 BinaryOperator &I); 399 Instruction *foldVariableSignZeroExtensionOfVariableHighBitExtract( 400 BinaryOperator &OldAShr); 401 Instruction *visitAShr(BinaryOperator &I); 402 Instruction *visitLShr(BinaryOperator &I); 403 Instruction *commonShiftTransforms(BinaryOperator &I); 404 Instruction *visitFCmpInst(FCmpInst &I); 405 Instruction *visitICmpInst(ICmpInst &I); 406 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1, 407 BinaryOperator &I); 408 Instruction *commonCastTransforms(CastInst &CI); 409 Instruction *commonPointerCastTransforms(CastInst &CI); 410 Instruction *visitTrunc(TruncInst &CI); 411 Instruction *visitZExt(ZExtInst &CI); 412 Instruction *visitSExt(SExtInst &CI); 413 Instruction *visitFPTrunc(FPTruncInst &CI); 414 Instruction *visitFPExt(CastInst &CI); 415 Instruction *visitFPToUI(FPToUIInst &FI); 416 Instruction *visitFPToSI(FPToSIInst &FI); 417 Instruction *visitUIToFP(CastInst &CI); 418 Instruction *visitSIToFP(CastInst &CI); 419 Instruction *visitPtrToInt(PtrToIntInst &CI); 420 Instruction *visitIntToPtr(IntToPtrInst &CI); 421 Instruction *visitBitCast(BitCastInst &CI); 422 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI); 423 Instruction *FoldItoFPtoI(Instruction &FI); 424 Instruction *visitSelectInst(SelectInst &SI); 425 Instruction *visitCallInst(CallInst &CI); 426 Instruction *visitInvokeInst(InvokeInst &II); 427 Instruction *visitCallBrInst(CallBrInst &CBI); 428 429 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN); 430 Instruction *visitPHINode(PHINode &PN); 431 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP); 432 Instruction *visitAllocaInst(AllocaInst &AI); 433 Instruction *visitAllocSite(Instruction &FI); 434 Instruction *visitFree(CallInst &FI); 435 Instruction *visitLoadInst(LoadInst &LI); 436 Instruction *visitStoreInst(StoreInst &SI); 437 Instruction *visitAtomicRMWInst(AtomicRMWInst &SI); 438 Instruction *visitBranchInst(BranchInst &BI); 439 Instruction *visitFenceInst(FenceInst &FI); 440 Instruction *visitSwitchInst(SwitchInst &SI); 441 Instruction *visitReturnInst(ReturnInst &RI); 442 Instruction *visitInsertValueInst(InsertValueInst &IV); 443 Instruction *visitInsertElementInst(InsertElementInst &IE); 444 Instruction *visitExtractElementInst(ExtractElementInst &EI); 445 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI); 446 Instruction *visitExtractValueInst(ExtractValueInst &EV); 447 Instruction *visitLandingPadInst(LandingPadInst &LI); 448 Instruction *visitVAStartInst(VAStartInst &I); 449 Instruction *visitVACopyInst(VACopyInst &I); 450 Instruction *visitFreeze(FreezeInst &I); 451 452 /// Specify what to return for unhandled instructions. 453 Instruction *visitInstruction(Instruction &I) { return nullptr; } 454 455 /// True when DB dominates all uses of DI except UI. 456 /// UI must be in the same block as DI. 457 /// The routine checks that the DI parent and DB are different. 458 bool dominatesAllUses(const Instruction *DI, const Instruction *UI, 459 const BasicBlock *DB) const; 460 461 /// Try to replace select with select operand SIOpd in SI-ICmp sequence. 462 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, 463 const unsigned SIOpd); 464 465 /// Try to replace instruction \p I with value \p V which are pointers 466 /// in different address space. 467 /// \return true if successful. 468 bool replacePointer(Instruction &I, Value *V); 469 470 LoadInst *combineLoadToNewType(LoadInst &LI, Type *NewTy, 471 const Twine &Suffix = ""); 472 473 private: 474 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const; 475 bool shouldChangeType(Type *From, Type *To) const; 476 Value *dyn_castNegVal(Value *V) const; 477 Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset, 478 SmallVectorImpl<Value *> &NewIndices); 479 480 /// Classify whether a cast is worth optimizing. 481 /// 482 /// This is a helper to decide whether the simplification of 483 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed. 484 /// 485 /// \param CI The cast we are interested in. 486 /// 487 /// \return true if this cast actually results in any code being generated and 488 /// if it cannot already be eliminated by some other transformation. 489 bool shouldOptimizeCast(CastInst *CI); 490 491 /// Try to optimize a sequence of instructions checking if an operation 492 /// on LHS and RHS overflows. 493 /// 494 /// If this overflow check is done via one of the overflow check intrinsics, 495 /// then CtxI has to be the call instruction calling that intrinsic. If this 496 /// overflow check is done by arithmetic followed by a compare, then CtxI has 497 /// to be the arithmetic instruction. 498 /// 499 /// If a simplification is possible, stores the simplified result of the 500 /// operation in OperationResult and result of the overflow check in 501 /// OverflowResult, and return true. If no simplification is possible, 502 /// returns false. 503 bool OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned, 504 Value *LHS, Value *RHS, 505 Instruction &CtxI, Value *&OperationResult, 506 Constant *&OverflowResult); 507 508 Instruction *visitCallBase(CallBase &Call); 509 Instruction *tryOptimizeCall(CallInst *CI); 510 bool transformConstExprCastCall(CallBase &Call); 511 Instruction *transformCallThroughTrampoline(CallBase &Call, 512 IntrinsicInst &Tramp); 513 514 Value *simplifyMaskedLoad(IntrinsicInst &II); 515 Instruction *simplifyMaskedStore(IntrinsicInst &II); 516 Instruction *simplifyMaskedGather(IntrinsicInst &II); 517 Instruction *simplifyMaskedScatter(IntrinsicInst &II); 518 519 /// Transform (zext icmp) to bitwise / integer operations in order to 520 /// eliminate it. 521 /// 522 /// \param ICI The icmp of the (zext icmp) pair we are interested in. 523 /// \parem CI The zext of the (zext icmp) pair we are interested in. 524 /// \param DoTransform Pass false to just test whether the given (zext icmp) 525 /// would be transformed. Pass true to actually perform the transformation. 526 /// 527 /// \return null if the transformation cannot be performed. If the 528 /// transformation can be performed the new instruction that replaces the 529 /// (zext icmp) pair will be returned (if \p DoTransform is false the 530 /// unmodified \p ICI will be returned in this case). 531 Instruction *transformZExtICmp(ICmpInst *ICI, ZExtInst &CI, 532 bool DoTransform = true); 533 534 Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI); 535 536 bool willNotOverflowSignedAdd(const Value *LHS, const Value *RHS, 537 const Instruction &CxtI) const { 538 return computeOverflowForSignedAdd(LHS, RHS, &CxtI) == 539 OverflowResult::NeverOverflows; 540 } 541 542 bool willNotOverflowUnsignedAdd(const Value *LHS, const Value *RHS, 543 const Instruction &CxtI) const { 544 return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) == 545 OverflowResult::NeverOverflows; 546 } 547 548 bool willNotOverflowAdd(const Value *LHS, const Value *RHS, 549 const Instruction &CxtI, bool IsSigned) const { 550 return IsSigned ? willNotOverflowSignedAdd(LHS, RHS, CxtI) 551 : willNotOverflowUnsignedAdd(LHS, RHS, CxtI); 552 } 553 554 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS, 555 const Instruction &CxtI) const { 556 return computeOverflowForSignedSub(LHS, RHS, &CxtI) == 557 OverflowResult::NeverOverflows; 558 } 559 560 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS, 561 const Instruction &CxtI) const { 562 return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) == 563 OverflowResult::NeverOverflows; 564 } 565 566 bool willNotOverflowSub(const Value *LHS, const Value *RHS, 567 const Instruction &CxtI, bool IsSigned) const { 568 return IsSigned ? willNotOverflowSignedSub(LHS, RHS, CxtI) 569 : willNotOverflowUnsignedSub(LHS, RHS, CxtI); 570 } 571 572 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS, 573 const Instruction &CxtI) const { 574 return computeOverflowForSignedMul(LHS, RHS, &CxtI) == 575 OverflowResult::NeverOverflows; 576 } 577 578 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS, 579 const Instruction &CxtI) const { 580 return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) == 581 OverflowResult::NeverOverflows; 582 } 583 584 bool willNotOverflowMul(const Value *LHS, const Value *RHS, 585 const Instruction &CxtI, bool IsSigned) const { 586 return IsSigned ? willNotOverflowSignedMul(LHS, RHS, CxtI) 587 : willNotOverflowUnsignedMul(LHS, RHS, CxtI); 588 } 589 590 bool willNotOverflow(BinaryOperator::BinaryOps Opcode, const Value *LHS, 591 const Value *RHS, const Instruction &CxtI, 592 bool IsSigned) const { 593 switch (Opcode) { 594 case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned); 595 case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned); 596 case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned); 597 default: llvm_unreachable("Unexpected opcode for overflow query"); 598 } 599 } 600 601 Value *EmitGEPOffset(User *GEP); 602 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN); 603 Instruction *foldCastedBitwiseLogic(BinaryOperator &I); 604 Instruction *narrowBinOp(TruncInst &Trunc); 605 Instruction *narrowMaskedBinOp(BinaryOperator &And); 606 Instruction *narrowMathIfNoOverflow(BinaryOperator &I); 607 Instruction *narrowRotate(TruncInst &Trunc); 608 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN); 609 Instruction *matchSAddSubSat(SelectInst &MinMax1); 610 611 /// Determine if a pair of casts can be replaced by a single cast. 612 /// 613 /// \param CI1 The first of a pair of casts. 614 /// \param CI2 The second of a pair of casts. 615 /// 616 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an 617 /// Instruction::CastOps value for a cast that can replace the pair, casting 618 /// CI1->getSrcTy() to CI2->getDstTy(). 619 /// 620 /// \see CastInst::isEliminableCastPair 621 Instruction::CastOps isEliminableCastPair(const CastInst *CI1, 622 const CastInst *CI2); 623 624 Value *foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &CxtI); 625 Value *foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &CxtI); 626 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &I); 627 628 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp). 629 /// NOTE: Unlike most of instcombine, this returns a Value which should 630 /// already be inserted into the function. 631 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd); 632 633 Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS, 634 bool JoinedByAnd, Instruction &CxtI); 635 Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D); 636 Value *getSelectCondition(Value *A, Value *B); 637 638 Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II); 639 640 public: 641 /// Inserts an instruction \p New before instruction \p Old 642 /// 643 /// Also adds the new instruction to the worklist and returns \p New so that 644 /// it is suitable for use as the return from the visitation patterns. 645 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) { 646 assert(New && !New->getParent() && 647 "New instruction already inserted into a basic block!"); 648 BasicBlock *BB = Old.getParent(); 649 BB->getInstList().insert(Old.getIterator(), New); // Insert inst 650 Worklist.Add(New); 651 return New; 652 } 653 654 /// Same as InsertNewInstBefore, but also sets the debug loc. 655 Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) { 656 New->setDebugLoc(Old.getDebugLoc()); 657 return InsertNewInstBefore(New, Old); 658 } 659 660 /// A combiner-aware RAUW-like routine. 661 /// 662 /// This method is to be used when an instruction is found to be dead, 663 /// replaceable with another preexisting expression. Here we add all uses of 664 /// I to the worklist, replace all uses of I with the new value, then return 665 /// I, so that the inst combiner will know that I was modified. 666 Instruction *replaceInstUsesWith(Instruction &I, Value *V) { 667 // If there are no uses to replace, then we return nullptr to indicate that 668 // no changes were made to the program. 669 if (I.use_empty()) return nullptr; 670 671 Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist. 672 673 // If we are replacing the instruction with itself, this must be in a 674 // segment of unreachable code, so just clobber the instruction. 675 if (&I == V) 676 V = UndefValue::get(I.getType()); 677 678 LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n" 679 << " with " << *V << '\n'); 680 681 I.replaceAllUsesWith(V); 682 return &I; 683 } 684 685 /// Creates a result tuple for an overflow intrinsic \p II with a given 686 /// \p Result and a constant \p Overflow value. 687 Instruction *CreateOverflowTuple(IntrinsicInst *II, Value *Result, 688 Constant *Overflow) { 689 Constant *V[] = {UndefValue::get(Result->getType()), Overflow}; 690 StructType *ST = cast<StructType>(II->getType()); 691 Constant *Struct = ConstantStruct::get(ST, V); 692 return InsertValueInst::Create(Struct, Result, 0); 693 } 694 695 /// Create and insert the idiom we use to indicate a block is unreachable 696 /// without having to rewrite the CFG from within InstCombine. 697 void CreateNonTerminatorUnreachable(Instruction *InsertAt) { 698 auto &Ctx = InsertAt->getContext(); 699 new StoreInst(ConstantInt::getTrue(Ctx), 700 UndefValue::get(Type::getInt1PtrTy(Ctx)), 701 InsertAt); 702 } 703 704 705 /// Combiner aware instruction erasure. 706 /// 707 /// When dealing with an instruction that has side effects or produces a void 708 /// value, we can't rely on DCE to delete the instruction. Instead, visit 709 /// methods should return the value returned by this function. 710 Instruction *eraseInstFromFunction(Instruction &I) { 711 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n'); 712 assert(I.use_empty() && "Cannot erase instruction that is used!"); 713 salvageDebugInfoOrMarkUndef(I); 714 715 // Make sure that we reprocess all operands now that we reduced their 716 // use counts. 717 if (I.getNumOperands() < 8) { 718 for (Use &Operand : I.operands()) 719 if (auto *Inst = dyn_cast<Instruction>(Operand)) 720 Worklist.Add(Inst); 721 } 722 Worklist.Remove(&I); 723 I.eraseFromParent(); 724 MadeIRChange = true; 725 return nullptr; // Don't do anything with FI 726 } 727 728 void computeKnownBits(const Value *V, KnownBits &Known, 729 unsigned Depth, const Instruction *CxtI) const { 730 llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT); 731 } 732 733 KnownBits computeKnownBits(const Value *V, unsigned Depth, 734 const Instruction *CxtI) const { 735 return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT); 736 } 737 738 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false, 739 unsigned Depth = 0, 740 const Instruction *CxtI = nullptr) { 741 return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT); 742 } 743 744 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0, 745 const Instruction *CxtI = nullptr) const { 746 return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT); 747 } 748 749 unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0, 750 const Instruction *CxtI = nullptr) const { 751 return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT); 752 } 753 754 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, 755 const Value *RHS, 756 const Instruction *CxtI) const { 757 return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 758 } 759 760 OverflowResult computeOverflowForSignedMul(const Value *LHS, 761 const Value *RHS, 762 const Instruction *CxtI) const { 763 return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 764 } 765 766 OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, 767 const Value *RHS, 768 const Instruction *CxtI) const { 769 return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 770 } 771 772 OverflowResult computeOverflowForSignedAdd(const Value *LHS, 773 const Value *RHS, 774 const Instruction *CxtI) const { 775 return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 776 } 777 778 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, 779 const Value *RHS, 780 const Instruction *CxtI) const { 781 return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 782 } 783 784 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, 785 const Instruction *CxtI) const { 786 return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 787 } 788 789 OverflowResult computeOverflow( 790 Instruction::BinaryOps BinaryOp, bool IsSigned, 791 Value *LHS, Value *RHS, Instruction *CxtI) const; 792 793 /// Maximum size of array considered when transforming. 794 uint64_t MaxArraySizeForCombine = 0; 795 796 private: 797 /// Performs a few simplifications for operators which are associative 798 /// or commutative. 799 bool SimplifyAssociativeOrCommutative(BinaryOperator &I); 800 801 /// Tries to simplify binary operations which some other binary 802 /// operation distributes over. 803 /// 804 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)" 805 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A 806 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified 807 /// value, or null if it didn't simplify. 808 Value *SimplifyUsingDistributiveLaws(BinaryOperator &I); 809 810 /// Tries to simplify add operations using the definition of remainder. 811 /// 812 /// The definition of remainder is X % C = X - (X / C ) * C. The add 813 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to 814 /// X % (C0 * C1) 815 Value *SimplifyAddWithRemainder(BinaryOperator &I); 816 817 // Binary Op helper for select operations where the expression can be 818 // efficiently reorganized. 819 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, 820 Value *RHS); 821 822 /// This tries to simplify binary operations by factorizing out common terms 823 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)"). 824 Value *tryFactorization(BinaryOperator &, Instruction::BinaryOps, Value *, 825 Value *, Value *, Value *); 826 827 /// Match a select chain which produces one of three values based on whether 828 /// the LHS is less than, equal to, or greater than RHS respectively. 829 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less, 830 /// Equal and Greater values are saved in the matching process and returned to 831 /// the caller. 832 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, 833 ConstantInt *&Less, ConstantInt *&Equal, 834 ConstantInt *&Greater); 835 836 /// Attempts to replace V with a simpler value based on the demanded 837 /// bits. 838 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known, 839 unsigned Depth, Instruction *CxtI); 840 bool SimplifyDemandedBits(Instruction *I, unsigned Op, 841 const APInt &DemandedMask, KnownBits &Known, 842 unsigned Depth = 0); 843 844 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne 845 /// bits. It also tries to handle simplifications that can be done based on 846 /// DemandedMask, but without modifying the Instruction. 847 Value *SimplifyMultipleUseDemandedBits(Instruction *I, 848 const APInt &DemandedMask, 849 KnownBits &Known, 850 unsigned Depth, Instruction *CxtI); 851 852 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded 853 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence. 854 Value *simplifyShrShlDemandedBits( 855 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl, 856 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known); 857 858 /// Tries to simplify operands to an integer instruction based on its 859 /// demanded bits. 860 bool SimplifyDemandedInstructionBits(Instruction &Inst); 861 862 Value *simplifyAMDGCNMemoryIntrinsicDemanded(IntrinsicInst *II, 863 APInt DemandedElts, 864 int DmaskIdx = -1); 865 866 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, 867 APInt &UndefElts, unsigned Depth = 0, 868 bool AllowMultipleUsers = false); 869 870 /// Canonicalize the position of binops relative to shufflevector. 871 Instruction *foldVectorBinop(BinaryOperator &Inst); 872 873 /// Given a binary operator, cast instruction, or select which has a PHI node 874 /// as operand #0, see if we can fold the instruction into the PHI (which is 875 /// only possible if all operands to the PHI are constants). 876 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN); 877 878 /// Given an instruction with a select as one operand and a constant as the 879 /// other operand, try to fold the binary operator into the select arguments. 880 /// This also works for Cast instructions, which obviously do not have a 881 /// second operand. 882 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI); 883 884 /// This is a convenience wrapper function for the above two functions. 885 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I); 886 887 Instruction *foldAddWithConstant(BinaryOperator &Add); 888 889 /// Try to rotate an operation below a PHI node, using PHI nodes for 890 /// its operands. 891 Instruction *FoldPHIArgOpIntoPHI(PHINode &PN); 892 Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN); 893 Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN); 894 Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN); 895 Instruction *FoldPHIArgZextsIntoPHI(PHINode &PN); 896 897 /// If an integer typed PHI has only one use which is an IntToPtr operation, 898 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise 899 /// insert a new pointer typed PHI and replace the original one. 900 Instruction *FoldIntegerTypedPHI(PHINode &PN); 901 902 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the 903 /// folded operation. 904 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN); 905 906 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 907 ICmpInst::Predicate Cond, Instruction &I); 908 Instruction *foldAllocaCmp(ICmpInst &ICI, const AllocaInst *Alloca, 909 const Value *Other); 910 Instruction *foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, 911 GlobalVariable *GV, CmpInst &ICI, 912 ConstantInt *AndCst = nullptr); 913 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, 914 Constant *RHSC); 915 Instruction *foldICmpAddOpConst(Value *X, const APInt &C, 916 ICmpInst::Predicate Pred); 917 Instruction *foldICmpWithCastOp(ICmpInst &ICI); 918 919 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp); 920 Instruction *foldICmpWithDominatingICmp(ICmpInst &Cmp); 921 Instruction *foldICmpWithConstant(ICmpInst &Cmp); 922 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp); 923 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp); 924 Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ); 925 Instruction *foldICmpEquality(ICmpInst &Cmp); 926 Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I); 927 Instruction *foldSignBitTest(ICmpInst &I); 928 Instruction *foldICmpWithZero(ICmpInst &Cmp); 929 930 Value *foldUnsignedMultiplicationOverflowCheck(ICmpInst &Cmp); 931 932 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, 933 ConstantInt *C); 934 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, 935 const APInt &C); 936 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, 937 const APInt &C); 938 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, 939 const APInt &C); 940 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, 941 const APInt &C); 942 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, 943 const APInt &C); 944 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, 945 const APInt &C); 946 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, 947 const APInt &C); 948 Instruction *foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, 949 const APInt &C); 950 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, 951 const APInt &C); 952 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, 953 const APInt &C); 954 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, 955 const APInt &C); 956 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, 957 const APInt &C); 958 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, 959 const APInt &C1); 960 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, 961 const APInt &C1, const APInt &C2); 962 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 963 const APInt &C2); 964 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 965 const APInt &C2); 966 967 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, 968 BinaryOperator *BO, 969 const APInt &C); 970 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, 971 const APInt &C); 972 Instruction *foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, 973 const APInt &C); 974 975 // Helpers of visitSelectInst(). 976 Instruction *foldSelectExtConst(SelectInst &Sel); 977 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI); 978 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *); 979 Instruction *foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1, 980 Value *A, Value *B, Instruction &Outer, 981 SelectPatternFlavor SPF2, Value *C); 982 Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI); 983 984 Instruction *OptAndOp(BinaryOperator *Op, ConstantInt *OpRHS, 985 ConstantInt *AndRHS, BinaryOperator &TheAnd); 986 987 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, 988 bool isSigned, bool Inside); 989 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI); 990 bool mergeStoreIntoSuccessor(StoreInst &SI); 991 992 /// Given an 'or' instruction, check to see if it is part of a bswap idiom. 993 /// If so, return the equivalent bswap intrinsic. 994 Instruction *matchBSwap(BinaryOperator &Or); 995 996 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI); 997 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI); 998 999 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned); 1000 1001 /// Returns a value X such that Val = X * Scale, or null if none. 1002 /// 1003 /// If the multiplication is known not to overflow then NoSignedWrap is set. 1004 Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap); 1005 }; 1006 1007 } // end namespace llvm 1008 1009 #undef DEBUG_TYPE 1010 1011 #endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 1012