1 //===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// 11 /// This file provides internal interfaces used to implement the InstCombine. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 16 #define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 17 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/TargetFolder.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/IR/InstVisitor.h" 24 #include "llvm/IR/PatternMatch.h" 25 #include "llvm/IR/Value.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/KnownBits.h" 28 #include "llvm/Transforms/InstCombine/InstCombiner.h" 29 #include "llvm/Transforms/Utils/Local.h" 30 #include <cassert> 31 32 #define DEBUG_TYPE "instcombine" 33 #include "llvm/Transforms/Utils/InstructionWorklist.h" 34 35 using namespace llvm::PatternMatch; 36 37 // As a default, let's assume that we want to be aggressive, 38 // and attempt to traverse with no limits in attempt to sink negation. 39 static constexpr unsigned NegatorDefaultMaxDepth = ~0U; 40 41 // Let's guesstimate that most often we will end up visiting/producing 42 // fairly small number of new instructions. 43 static constexpr unsigned NegatorMaxNodesSSO = 16; 44 45 namespace llvm { 46 47 class AAResults; 48 class APInt; 49 class AssumptionCache; 50 class BlockFrequencyInfo; 51 class DataLayout; 52 class DominatorTree; 53 class GEPOperator; 54 class GlobalVariable; 55 class LoopInfo; 56 class OptimizationRemarkEmitter; 57 class ProfileSummaryInfo; 58 class TargetLibraryInfo; 59 class User; 60 61 class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final 62 : public InstCombiner, 63 public InstVisitor<InstCombinerImpl, Instruction *> { 64 public: 65 InstCombinerImpl(InstructionWorklist &Worklist, BuilderTy &Builder, 66 bool MinimizeSize, AAResults *AA, AssumptionCache &AC, 67 TargetLibraryInfo &TLI, TargetTransformInfo &TTI, 68 DominatorTree &DT, OptimizationRemarkEmitter &ORE, 69 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 70 const DataLayout &DL, LoopInfo *LI) 71 : InstCombiner(Worklist, Builder, MinimizeSize, AA, AC, TLI, TTI, DT, ORE, 72 BFI, PSI, DL, LI) {} 73 74 virtual ~InstCombinerImpl() = default; 75 76 /// Run the combiner over the entire worklist until it is empty. 77 /// 78 /// \returns true if the IR is changed. 79 bool run(); 80 81 // Visitation implementation - Implement instruction combining for different 82 // instruction types. The semantics are as follows: 83 // Return Value: 84 // null - No change was made 85 // I - Change was made, I is still valid, I may be dead though 86 // otherwise - Change was made, replace I with returned instruction 87 // 88 Instruction *visitFNeg(UnaryOperator &I); 89 Instruction *visitAdd(BinaryOperator &I); 90 Instruction *visitFAdd(BinaryOperator &I); 91 Value *OptimizePointerDifference( 92 Value *LHS, Value *RHS, Type *Ty, bool isNUW); 93 Instruction *visitSub(BinaryOperator &I); 94 Instruction *visitFSub(BinaryOperator &I); 95 Instruction *visitMul(BinaryOperator &I); 96 Instruction *visitFMul(BinaryOperator &I); 97 Instruction *visitURem(BinaryOperator &I); 98 Instruction *visitSRem(BinaryOperator &I); 99 Instruction *visitFRem(BinaryOperator &I); 100 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I); 101 Instruction *commonIRemTransforms(BinaryOperator &I); 102 Instruction *commonIDivTransforms(BinaryOperator &I); 103 Instruction *visitUDiv(BinaryOperator &I); 104 Instruction *visitSDiv(BinaryOperator &I); 105 Instruction *visitFDiv(BinaryOperator &I); 106 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted); 107 Instruction *visitAnd(BinaryOperator &I); 108 Instruction *visitOr(BinaryOperator &I); 109 bool sinkNotIntoOtherHandOfAndOrOr(BinaryOperator &I); 110 Instruction *visitXor(BinaryOperator &I); 111 Instruction *visitShl(BinaryOperator &I); 112 Value *reassociateShiftAmtsOfTwoSameDirectionShifts( 113 BinaryOperator *Sh0, const SimplifyQuery &SQ, 114 bool AnalyzeForSignBitExtraction = false); 115 Instruction *canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract( 116 BinaryOperator &I); 117 Instruction *foldVariableSignZeroExtensionOfVariableHighBitExtract( 118 BinaryOperator &OldAShr); 119 Instruction *visitAShr(BinaryOperator &I); 120 Instruction *visitLShr(BinaryOperator &I); 121 Instruction *commonShiftTransforms(BinaryOperator &I); 122 Instruction *visitFCmpInst(FCmpInst &I); 123 CmpInst *canonicalizeICmpPredicate(CmpInst &I); 124 Instruction *visitICmpInst(ICmpInst &I); 125 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1, 126 BinaryOperator &I); 127 Instruction *commonCastTransforms(CastInst &CI); 128 Instruction *commonPointerCastTransforms(CastInst &CI); 129 Instruction *visitTrunc(TruncInst &CI); 130 Instruction *visitZExt(ZExtInst &CI); 131 Instruction *visitSExt(SExtInst &CI); 132 Instruction *visitFPTrunc(FPTruncInst &CI); 133 Instruction *visitFPExt(CastInst &CI); 134 Instruction *visitFPToUI(FPToUIInst &FI); 135 Instruction *visitFPToSI(FPToSIInst &FI); 136 Instruction *visitUIToFP(CastInst &CI); 137 Instruction *visitSIToFP(CastInst &CI); 138 Instruction *visitPtrToInt(PtrToIntInst &CI); 139 Instruction *visitIntToPtr(IntToPtrInst &CI); 140 Instruction *visitBitCast(BitCastInst &CI); 141 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI); 142 Instruction *foldItoFPtoI(CastInst &FI); 143 Instruction *visitSelectInst(SelectInst &SI); 144 Instruction *visitCallInst(CallInst &CI); 145 Instruction *visitInvokeInst(InvokeInst &II); 146 Instruction *visitCallBrInst(CallBrInst &CBI); 147 148 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN); 149 Instruction *visitPHINode(PHINode &PN); 150 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP); 151 Instruction *visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src); 152 Instruction *visitGEPOfBitcast(BitCastInst *BCI, GetElementPtrInst &GEP); 153 Instruction *visitAllocaInst(AllocaInst &AI); 154 Instruction *visitAllocSite(Instruction &FI); 155 Instruction *visitFree(CallInst &FI, Value *FreedOp); 156 Instruction *visitLoadInst(LoadInst &LI); 157 Instruction *visitStoreInst(StoreInst &SI); 158 Instruction *visitAtomicRMWInst(AtomicRMWInst &SI); 159 Instruction *visitUnconditionalBranchInst(BranchInst &BI); 160 Instruction *visitBranchInst(BranchInst &BI); 161 Instruction *visitFenceInst(FenceInst &FI); 162 Instruction *visitSwitchInst(SwitchInst &SI); 163 Instruction *visitReturnInst(ReturnInst &RI); 164 Instruction *visitUnreachableInst(UnreachableInst &I); 165 Instruction * 166 foldAggregateConstructionIntoAggregateReuse(InsertValueInst &OrigIVI); 167 Instruction *visitInsertValueInst(InsertValueInst &IV); 168 Instruction *visitInsertElementInst(InsertElementInst &IE); 169 Instruction *visitExtractElementInst(ExtractElementInst &EI); 170 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI); 171 Instruction *visitExtractValueInst(ExtractValueInst &EV); 172 Instruction *visitLandingPadInst(LandingPadInst &LI); 173 Instruction *visitVAEndInst(VAEndInst &I); 174 Value *pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI); 175 bool freezeOtherUses(FreezeInst &FI); 176 Instruction *foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN); 177 Instruction *visitFreeze(FreezeInst &I); 178 179 /// Specify what to return for unhandled instructions. 180 Instruction *visitInstruction(Instruction &I) { return nullptr; } 181 182 /// True when DB dominates all uses of DI except UI. 183 /// UI must be in the same block as DI. 184 /// The routine checks that the DI parent and DB are different. 185 bool dominatesAllUses(const Instruction *DI, const Instruction *UI, 186 const BasicBlock *DB) const; 187 188 /// Try to replace select with select operand SIOpd in SI-ICmp sequence. 189 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, 190 const unsigned SIOpd); 191 192 LoadInst *combineLoadToNewType(LoadInst &LI, Type *NewTy, 193 const Twine &Suffix = ""); 194 195 private: 196 bool annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI); 197 bool isDesirableIntType(unsigned BitWidth) const; 198 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const; 199 bool shouldChangeType(Type *From, Type *To) const; 200 Value *dyn_castNegVal(Value *V) const; 201 202 /// Classify whether a cast is worth optimizing. 203 /// 204 /// This is a helper to decide whether the simplification of 205 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed. 206 /// 207 /// \param CI The cast we are interested in. 208 /// 209 /// \return true if this cast actually results in any code being generated and 210 /// if it cannot already be eliminated by some other transformation. 211 bool shouldOptimizeCast(CastInst *CI); 212 213 /// Try to optimize a sequence of instructions checking if an operation 214 /// on LHS and RHS overflows. 215 /// 216 /// If this overflow check is done via one of the overflow check intrinsics, 217 /// then CtxI has to be the call instruction calling that intrinsic. If this 218 /// overflow check is done by arithmetic followed by a compare, then CtxI has 219 /// to be the arithmetic instruction. 220 /// 221 /// If a simplification is possible, stores the simplified result of the 222 /// operation in OperationResult and result of the overflow check in 223 /// OverflowResult, and return true. If no simplification is possible, 224 /// returns false. 225 bool OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned, 226 Value *LHS, Value *RHS, 227 Instruction &CtxI, Value *&OperationResult, 228 Constant *&OverflowResult); 229 230 Instruction *visitCallBase(CallBase &Call); 231 Instruction *tryOptimizeCall(CallInst *CI); 232 bool transformConstExprCastCall(CallBase &Call); 233 Instruction *transformCallThroughTrampoline(CallBase &Call, 234 IntrinsicInst &Tramp); 235 236 Value *simplifyMaskedLoad(IntrinsicInst &II); 237 Instruction *simplifyMaskedStore(IntrinsicInst &II); 238 Instruction *simplifyMaskedGather(IntrinsicInst &II); 239 Instruction *simplifyMaskedScatter(IntrinsicInst &II); 240 241 /// Transform (zext icmp) to bitwise / integer operations in order to 242 /// eliminate it. 243 /// 244 /// \param ICI The icmp of the (zext icmp) pair we are interested in. 245 /// \parem CI The zext of the (zext icmp) pair we are interested in. 246 /// 247 /// \return null if the transformation cannot be performed. If the 248 /// transformation can be performed the new instruction that replaces the 249 /// (zext icmp) pair will be returned. 250 Instruction *transformZExtICmp(ICmpInst *ICI, ZExtInst &CI); 251 252 Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI); 253 254 bool willNotOverflowSignedAdd(const Value *LHS, const Value *RHS, 255 const Instruction &CxtI) const { 256 return computeOverflowForSignedAdd(LHS, RHS, &CxtI) == 257 OverflowResult::NeverOverflows; 258 } 259 260 bool willNotOverflowUnsignedAdd(const Value *LHS, const Value *RHS, 261 const Instruction &CxtI) const { 262 return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) == 263 OverflowResult::NeverOverflows; 264 } 265 266 bool willNotOverflowAdd(const Value *LHS, const Value *RHS, 267 const Instruction &CxtI, bool IsSigned) const { 268 return IsSigned ? willNotOverflowSignedAdd(LHS, RHS, CxtI) 269 : willNotOverflowUnsignedAdd(LHS, RHS, CxtI); 270 } 271 272 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS, 273 const Instruction &CxtI) const { 274 return computeOverflowForSignedSub(LHS, RHS, &CxtI) == 275 OverflowResult::NeverOverflows; 276 } 277 278 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS, 279 const Instruction &CxtI) const { 280 return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) == 281 OverflowResult::NeverOverflows; 282 } 283 284 bool willNotOverflowSub(const Value *LHS, const Value *RHS, 285 const Instruction &CxtI, bool IsSigned) const { 286 return IsSigned ? willNotOverflowSignedSub(LHS, RHS, CxtI) 287 : willNotOverflowUnsignedSub(LHS, RHS, CxtI); 288 } 289 290 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS, 291 const Instruction &CxtI) const { 292 return computeOverflowForSignedMul(LHS, RHS, &CxtI) == 293 OverflowResult::NeverOverflows; 294 } 295 296 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS, 297 const Instruction &CxtI) const { 298 return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) == 299 OverflowResult::NeverOverflows; 300 } 301 302 bool willNotOverflowMul(const Value *LHS, const Value *RHS, 303 const Instruction &CxtI, bool IsSigned) const { 304 return IsSigned ? willNotOverflowSignedMul(LHS, RHS, CxtI) 305 : willNotOverflowUnsignedMul(LHS, RHS, CxtI); 306 } 307 308 bool willNotOverflow(BinaryOperator::BinaryOps Opcode, const Value *LHS, 309 const Value *RHS, const Instruction &CxtI, 310 bool IsSigned) const { 311 switch (Opcode) { 312 case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned); 313 case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned); 314 case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned); 315 default: llvm_unreachable("Unexpected opcode for overflow query"); 316 } 317 } 318 319 Value *EmitGEPOffset(User *GEP); 320 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN); 321 Instruction *foldBitcastExtElt(ExtractElementInst &ExtElt); 322 Instruction *foldCastedBitwiseLogic(BinaryOperator &I); 323 Instruction *foldBinopOfSextBoolToSelect(BinaryOperator &I); 324 Instruction *narrowBinOp(TruncInst &Trunc); 325 Instruction *narrowMaskedBinOp(BinaryOperator &And); 326 Instruction *narrowMathIfNoOverflow(BinaryOperator &I); 327 Instruction *narrowFunnelShift(TruncInst &Trunc); 328 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN); 329 Instruction *matchSAddSubSat(IntrinsicInst &MinMax1); 330 Instruction *foldNot(BinaryOperator &I); 331 332 void freelyInvertAllUsersOf(Value *V); 333 334 /// Determine if a pair of casts can be replaced by a single cast. 335 /// 336 /// \param CI1 The first of a pair of casts. 337 /// \param CI2 The second of a pair of casts. 338 /// 339 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an 340 /// Instruction::CastOps value for a cast that can replace the pair, casting 341 /// CI1->getSrcTy() to CI2->getDstTy(). 342 /// 343 /// \see CastInst::isEliminableCastPair 344 Instruction::CastOps isEliminableCastPair(const CastInst *CI1, 345 const CastInst *CI2); 346 Value *simplifyIntToPtrRoundTripCast(Value *Val); 347 348 Value *foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &I, 349 bool IsAnd, bool IsLogical = false); 350 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Xor); 351 352 Value *foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd); 353 354 Value *foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1, ICmpInst *ICmp2, 355 bool IsAnd); 356 357 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp). 358 /// NOTE: Unlike most of instcombine, this returns a Value which should 359 /// already be inserted into the function. 360 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd, 361 bool IsLogicalSelect = false); 362 363 Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS, 364 Instruction *CxtI, bool IsAnd, 365 bool IsLogical = false); 366 Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D); 367 Value *getSelectCondition(Value *A, Value *B); 368 369 Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II); 370 Instruction *foldFPSignBitOps(BinaryOperator &I); 371 372 // Optimize one of these forms: 373 // and i1 Op, SI / select i1 Op, i1 SI, i1 false (if IsAnd = true) 374 // or i1 Op, SI / select i1 Op, i1 true, i1 SI (if IsAnd = false) 375 // into simplier select instruction using isImpliedCondition. 376 Instruction *foldAndOrOfSelectUsingImpliedCond(Value *Op, SelectInst &SI, 377 bool IsAnd); 378 379 public: 380 /// Inserts an instruction \p New before instruction \p Old 381 /// 382 /// Also adds the new instruction to the worklist and returns \p New so that 383 /// it is suitable for use as the return from the visitation patterns. 384 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) { 385 assert(New && !New->getParent() && 386 "New instruction already inserted into a basic block!"); 387 BasicBlock *BB = Old.getParent(); 388 BB->getInstList().insert(Old.getIterator(), New); // Insert inst 389 Worklist.add(New); 390 return New; 391 } 392 393 /// Same as InsertNewInstBefore, but also sets the debug loc. 394 Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) { 395 New->setDebugLoc(Old.getDebugLoc()); 396 return InsertNewInstBefore(New, Old); 397 } 398 399 /// A combiner-aware RAUW-like routine. 400 /// 401 /// This method is to be used when an instruction is found to be dead, 402 /// replaceable with another preexisting expression. Here we add all uses of 403 /// I to the worklist, replace all uses of I with the new value, then return 404 /// I, so that the inst combiner will know that I was modified. 405 Instruction *replaceInstUsesWith(Instruction &I, Value *V) { 406 // If there are no uses to replace, then we return nullptr to indicate that 407 // no changes were made to the program. 408 if (I.use_empty()) return nullptr; 409 410 Worklist.pushUsersToWorkList(I); // Add all modified instrs to worklist. 411 412 // If we are replacing the instruction with itself, this must be in a 413 // segment of unreachable code, so just clobber the instruction. 414 if (&I == V) 415 V = PoisonValue::get(I.getType()); 416 417 LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n" 418 << " with " << *V << '\n'); 419 420 I.replaceAllUsesWith(V); 421 MadeIRChange = true; 422 return &I; 423 } 424 425 /// Replace operand of instruction and add old operand to the worklist. 426 Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) { 427 Worklist.addValue(I.getOperand(OpNum)); 428 I.setOperand(OpNum, V); 429 return &I; 430 } 431 432 /// Replace use and add the previously used value to the worklist. 433 void replaceUse(Use &U, Value *NewValue) { 434 Worklist.addValue(U); 435 U = NewValue; 436 } 437 438 /// Create and insert the idiom we use to indicate a block is unreachable 439 /// without having to rewrite the CFG from within InstCombine. 440 void CreateNonTerminatorUnreachable(Instruction *InsertAt) { 441 auto &Ctx = InsertAt->getContext(); 442 new StoreInst(ConstantInt::getTrue(Ctx), 443 PoisonValue::get(Type::getInt1PtrTy(Ctx)), 444 InsertAt); 445 } 446 447 448 /// Combiner aware instruction erasure. 449 /// 450 /// When dealing with an instruction that has side effects or produces a void 451 /// value, we can't rely on DCE to delete the instruction. Instead, visit 452 /// methods should return the value returned by this function. 453 Instruction *eraseInstFromFunction(Instruction &I) override { 454 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n'); 455 assert(I.use_empty() && "Cannot erase instruction that is used!"); 456 salvageDebugInfo(I); 457 458 // Make sure that we reprocess all operands now that we reduced their 459 // use counts. 460 for (Use &Operand : I.operands()) 461 if (auto *Inst = dyn_cast<Instruction>(Operand)) 462 Worklist.add(Inst); 463 464 Worklist.remove(&I); 465 I.eraseFromParent(); 466 MadeIRChange = true; 467 return nullptr; // Don't do anything with FI 468 } 469 470 void computeKnownBits(const Value *V, KnownBits &Known, 471 unsigned Depth, const Instruction *CxtI) const { 472 llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT); 473 } 474 475 KnownBits computeKnownBits(const Value *V, unsigned Depth, 476 const Instruction *CxtI) const { 477 return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT); 478 } 479 480 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false, 481 unsigned Depth = 0, 482 const Instruction *CxtI = nullptr) { 483 return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT); 484 } 485 486 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0, 487 const Instruction *CxtI = nullptr) const { 488 return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT); 489 } 490 491 unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0, 492 const Instruction *CxtI = nullptr) const { 493 return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT); 494 } 495 496 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, 497 const Value *RHS, 498 const Instruction *CxtI) const { 499 return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 500 } 501 502 OverflowResult computeOverflowForSignedMul(const Value *LHS, 503 const Value *RHS, 504 const Instruction *CxtI) const { 505 return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 506 } 507 508 OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, 509 const Value *RHS, 510 const Instruction *CxtI) const { 511 return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 512 } 513 514 OverflowResult computeOverflowForSignedAdd(const Value *LHS, 515 const Value *RHS, 516 const Instruction *CxtI) const { 517 return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 518 } 519 520 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, 521 const Value *RHS, 522 const Instruction *CxtI) const { 523 return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 524 } 525 526 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, 527 const Instruction *CxtI) const { 528 return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 529 } 530 531 OverflowResult computeOverflow( 532 Instruction::BinaryOps BinaryOp, bool IsSigned, 533 Value *LHS, Value *RHS, Instruction *CxtI) const; 534 535 /// Performs a few simplifications for operators which are associative 536 /// or commutative. 537 bool SimplifyAssociativeOrCommutative(BinaryOperator &I); 538 539 /// Tries to simplify binary operations which some other binary 540 /// operation distributes over. 541 /// 542 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)" 543 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A 544 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified 545 /// value, or null if it didn't simplify. 546 Value *SimplifyUsingDistributiveLaws(BinaryOperator &I); 547 548 /// Tries to simplify add operations using the definition of remainder. 549 /// 550 /// The definition of remainder is X % C = X - (X / C ) * C. The add 551 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to 552 /// X % (C0 * C1) 553 Value *SimplifyAddWithRemainder(BinaryOperator &I); 554 555 // Binary Op helper for select operations where the expression can be 556 // efficiently reorganized. 557 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, 558 Value *RHS); 559 560 /// This tries to simplify binary operations by factorizing out common terms 561 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)"). 562 Value *tryFactorization(BinaryOperator &, Instruction::BinaryOps, Value *, 563 Value *, Value *, Value *); 564 565 /// Match a select chain which produces one of three values based on whether 566 /// the LHS is less than, equal to, or greater than RHS respectively. 567 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less, 568 /// Equal and Greater values are saved in the matching process and returned to 569 /// the caller. 570 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, 571 ConstantInt *&Less, ConstantInt *&Equal, 572 ConstantInt *&Greater); 573 574 /// Attempts to replace V with a simpler value based on the demanded 575 /// bits. 576 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known, 577 unsigned Depth, Instruction *CxtI); 578 bool SimplifyDemandedBits(Instruction *I, unsigned Op, 579 const APInt &DemandedMask, KnownBits &Known, 580 unsigned Depth = 0) override; 581 582 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne 583 /// bits. It also tries to handle simplifications that can be done based on 584 /// DemandedMask, but without modifying the Instruction. 585 Value *SimplifyMultipleUseDemandedBits(Instruction *I, 586 const APInt &DemandedMask, 587 KnownBits &Known, 588 unsigned Depth, Instruction *CxtI); 589 590 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded 591 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence. 592 Value *simplifyShrShlDemandedBits( 593 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl, 594 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known); 595 596 /// Tries to simplify operands to an integer instruction based on its 597 /// demanded bits. 598 bool SimplifyDemandedInstructionBits(Instruction &Inst); 599 600 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, 601 APInt &UndefElts, unsigned Depth = 0, 602 bool AllowMultipleUsers = false) override; 603 604 /// Canonicalize the position of binops relative to shufflevector. 605 Instruction *foldVectorBinop(BinaryOperator &Inst); 606 Instruction *foldVectorSelect(SelectInst &Sel); 607 Instruction *foldSelectShuffle(ShuffleVectorInst &Shuf); 608 609 /// Given a binary operator, cast instruction, or select which has a PHI node 610 /// as operand #0, see if we can fold the instruction into the PHI (which is 611 /// only possible if all operands to the PHI are constants). 612 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN); 613 614 /// For a binary operator with 2 phi operands, try to hoist the binary 615 /// operation before the phi. This can result in fewer instructions in 616 /// patterns where at least one set of phi operands simplifies. 617 /// Example: 618 /// BB3: binop (phi [X, BB1], [C1, BB2]), (phi [Y, BB1], [C2, BB2]) 619 /// --> 620 /// BB1: BO = binop X, Y 621 /// BB3: phi [BO, BB1], [(binop C1, C2), BB2] 622 Instruction *foldBinopWithPhiOperands(BinaryOperator &BO); 623 624 /// Given an instruction with a select as one operand and a constant as the 625 /// other operand, try to fold the binary operator into the select arguments. 626 /// This also works for Cast instructions, which obviously do not have a 627 /// second operand. 628 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI, 629 bool FoldWithMultiUse = false); 630 631 /// This is a convenience wrapper function for the above two functions. 632 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I); 633 634 Instruction *foldAddWithConstant(BinaryOperator &Add); 635 636 /// Try to rotate an operation below a PHI node, using PHI nodes for 637 /// its operands. 638 Instruction *foldPHIArgOpIntoPHI(PHINode &PN); 639 Instruction *foldPHIArgBinOpIntoPHI(PHINode &PN); 640 Instruction *foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN); 641 Instruction *foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN); 642 Instruction *foldPHIArgGEPIntoPHI(PHINode &PN); 643 Instruction *foldPHIArgLoadIntoPHI(PHINode &PN); 644 Instruction *foldPHIArgZextsIntoPHI(PHINode &PN); 645 Instruction *foldPHIArgIntToPtrToPHI(PHINode &PN); 646 647 /// If an integer typed PHI has only one use which is an IntToPtr operation, 648 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise 649 /// insert a new pointer typed PHI and replace the original one. 650 Instruction *foldIntegerTypedPHI(PHINode &PN); 651 652 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the 653 /// folded operation. 654 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN); 655 656 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 657 ICmpInst::Predicate Cond, Instruction &I); 658 Instruction *foldSelectICmp(ICmpInst::Predicate Pred, SelectInst *SI, 659 Value *RHS, const ICmpInst &I); 660 Instruction *foldAllocaCmp(ICmpInst &ICI, const AllocaInst *Alloca); 661 Instruction *foldCmpLoadFromIndexedGlobal(LoadInst *LI, 662 GetElementPtrInst *GEP, 663 GlobalVariable *GV, CmpInst &ICI, 664 ConstantInt *AndCst = nullptr); 665 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, 666 Constant *RHSC); 667 Instruction *foldICmpAddOpConst(Value *X, const APInt &C, 668 ICmpInst::Predicate Pred); 669 Instruction *foldICmpWithCastOp(ICmpInst &ICmp); 670 Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp); 671 672 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp); 673 Instruction *foldICmpWithDominatingICmp(ICmpInst &Cmp); 674 Instruction *foldICmpWithConstant(ICmpInst &Cmp); 675 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp); 676 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp); 677 Instruction *foldICmpInstWithConstantAllowUndef(ICmpInst &Cmp, 678 const APInt &C); 679 Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ); 680 Instruction *foldICmpEquality(ICmpInst &Cmp); 681 Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I); 682 Instruction *foldSignBitTest(ICmpInst &I); 683 Instruction *foldICmpWithZero(ICmpInst &Cmp); 684 685 Value *foldMultiplicationOverflowCheck(ICmpInst &Cmp); 686 687 Instruction *foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO, 688 const APInt &C); 689 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, 690 ConstantInt *C); 691 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, 692 const APInt &C); 693 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, 694 const APInt &C); 695 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, 696 const APInt &C); 697 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, 698 const APInt &C); 699 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, 700 const APInt &C); 701 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, 702 const APInt &C); 703 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, 704 const APInt &C); 705 Instruction *foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, 706 const APInt &C); 707 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, 708 const APInt &C); 709 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, 710 const APInt &C); 711 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, 712 const APInt &C); 713 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, 714 const APInt &C); 715 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, 716 const APInt &C1); 717 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, 718 const APInt &C1, const APInt &C2); 719 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 720 const APInt &C2); 721 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 722 const APInt &C2); 723 724 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, 725 BinaryOperator *BO, 726 const APInt &C); 727 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, 728 const APInt &C); 729 Instruction *foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, 730 const APInt &C); 731 Instruction *foldICmpBitCast(ICmpInst &Cmp); 732 733 // Helpers of visitSelectInst(). 734 Instruction *foldSelectExtConst(SelectInst &Sel); 735 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI); 736 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *); 737 Instruction *foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1, 738 Value *A, Value *B, Instruction &Outer, 739 SelectPatternFlavor SPF2, Value *C); 740 Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI); 741 Instruction *foldSelectValueEquivalence(SelectInst &SI, ICmpInst &ICI); 742 743 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, 744 bool isSigned, bool Inside); 745 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI); 746 bool mergeStoreIntoSuccessor(StoreInst &SI); 747 748 /// Given an initial instruction, check to see if it is the root of a 749 /// bswap/bitreverse idiom. If so, return the equivalent bswap/bitreverse 750 /// intrinsic. 751 Instruction *matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, 752 bool MatchBitReversals); 753 754 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI); 755 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI); 756 757 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned); 758 759 /// Returns a value X such that Val = X * Scale, or null if none. 760 /// 761 /// If the multiplication is known not to overflow then NoSignedWrap is set. 762 Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap); 763 }; 764 765 class Negator final { 766 /// Top-to-bottom, def-to-use negated instruction tree we produced. 767 SmallVector<Instruction *, NegatorMaxNodesSSO> NewInstructions; 768 769 using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>; 770 BuilderTy Builder; 771 772 const DataLayout &DL; 773 AssumptionCache &AC; 774 const DominatorTree &DT; 775 776 const bool IsTrulyNegation; 777 778 SmallDenseMap<Value *, Value *> NegationsCache; 779 780 Negator(LLVMContext &C, const DataLayout &DL, AssumptionCache &AC, 781 const DominatorTree &DT, bool IsTrulyNegation); 782 783 #if LLVM_ENABLE_STATS 784 unsigned NumValuesVisitedInThisNegator = 0; 785 ~Negator(); 786 #endif 787 788 using Result = std::pair<ArrayRef<Instruction *> /*NewInstructions*/, 789 Value * /*NegatedRoot*/>; 790 791 std::array<Value *, 2> getSortedOperandsOfBinOp(Instruction *I); 792 793 LLVM_NODISCARD Value *visitImpl(Value *V, unsigned Depth); 794 795 LLVM_NODISCARD Value *negate(Value *V, unsigned Depth); 796 797 /// Recurse depth-first and attempt to sink the negation. 798 /// FIXME: use worklist? 799 LLVM_NODISCARD Optional<Result> run(Value *Root); 800 801 Negator(const Negator &) = delete; 802 Negator(Negator &&) = delete; 803 Negator &operator=(const Negator &) = delete; 804 Negator &operator=(Negator &&) = delete; 805 806 public: 807 /// Attempt to negate \p Root. Retuns nullptr if negation can't be performed, 808 /// otherwise returns negated value. 809 LLVM_NODISCARD static Value *Negate(bool LHSIsZero, Value *Root, 810 InstCombinerImpl &IC); 811 }; 812 813 } // end namespace llvm 814 815 #undef DEBUG_TYPE 816 817 #endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 818