1 //===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// 11 /// This file provides internal interfaces used to implement the InstCombine. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 16 #define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 17 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/TargetFolder.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/IR/InstVisitor.h" 24 #include "llvm/IR/PatternMatch.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Support/KnownBits.h" 27 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 28 #include "llvm/Transforms/InstCombine/InstCombiner.h" 29 #include "llvm/Transforms/Utils/Local.h" 30 #include <cassert> 31 32 #define DEBUG_TYPE "instcombine" 33 34 using namespace llvm::PatternMatch; 35 36 // As a default, let's assume that we want to be aggressive, 37 // and attempt to traverse with no limits in attempt to sink negation. 38 static constexpr unsigned NegatorDefaultMaxDepth = ~0U; 39 40 // Let's guesstimate that most often we will end up visiting/producing 41 // fairly small number of new instructions. 42 static constexpr unsigned NegatorMaxNodesSSO = 16; 43 44 namespace llvm { 45 46 class AAResults; 47 class APInt; 48 class AssumptionCache; 49 class BlockFrequencyInfo; 50 class DataLayout; 51 class DominatorTree; 52 class GEPOperator; 53 class GlobalVariable; 54 class LoopInfo; 55 class OptimizationRemarkEmitter; 56 class ProfileSummaryInfo; 57 class TargetLibraryInfo; 58 class User; 59 60 class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final 61 : public InstCombiner, 62 public InstVisitor<InstCombinerImpl, Instruction *> { 63 public: 64 InstCombinerImpl(InstCombineWorklist &Worklist, BuilderTy &Builder, 65 bool MinimizeSize, AAResults *AA, AssumptionCache &AC, 66 TargetLibraryInfo &TLI, TargetTransformInfo &TTI, 67 DominatorTree &DT, OptimizationRemarkEmitter &ORE, 68 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 69 const DataLayout &DL, LoopInfo *LI) 70 : InstCombiner(Worklist, Builder, MinimizeSize, AA, AC, TLI, TTI, DT, ORE, 71 BFI, PSI, DL, LI) {} 72 73 virtual ~InstCombinerImpl() {} 74 75 /// Run the combiner over the entire worklist until it is empty. 76 /// 77 /// \returns true if the IR is changed. 78 bool run(); 79 80 // Visitation implementation - Implement instruction combining for different 81 // instruction types. The semantics are as follows: 82 // Return Value: 83 // null - No change was made 84 // I - Change was made, I is still valid, I may be dead though 85 // otherwise - Change was made, replace I with returned instruction 86 // 87 Instruction *visitFNeg(UnaryOperator &I); 88 Instruction *visitAdd(BinaryOperator &I); 89 Instruction *visitFAdd(BinaryOperator &I); 90 Value *OptimizePointerDifference( 91 Value *LHS, Value *RHS, Type *Ty, bool isNUW); 92 Instruction *visitSub(BinaryOperator &I); 93 Instruction *visitFSub(BinaryOperator &I); 94 Instruction *visitMul(BinaryOperator &I); 95 Instruction *visitFMul(BinaryOperator &I); 96 Instruction *visitURem(BinaryOperator &I); 97 Instruction *visitSRem(BinaryOperator &I); 98 Instruction *visitFRem(BinaryOperator &I); 99 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I); 100 Instruction *commonIRemTransforms(BinaryOperator &I); 101 Instruction *commonIDivTransforms(BinaryOperator &I); 102 Instruction *visitUDiv(BinaryOperator &I); 103 Instruction *visitSDiv(BinaryOperator &I); 104 Instruction *visitFDiv(BinaryOperator &I); 105 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted); 106 Instruction *visitAnd(BinaryOperator &I); 107 Instruction *visitOr(BinaryOperator &I); 108 bool sinkNotIntoOtherHandOfAndOrOr(BinaryOperator &I); 109 Instruction *visitXor(BinaryOperator &I); 110 Instruction *visitShl(BinaryOperator &I); 111 Value *reassociateShiftAmtsOfTwoSameDirectionShifts( 112 BinaryOperator *Sh0, const SimplifyQuery &SQ, 113 bool AnalyzeForSignBitExtraction = false); 114 Instruction *canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract( 115 BinaryOperator &I); 116 Instruction *foldVariableSignZeroExtensionOfVariableHighBitExtract( 117 BinaryOperator &OldAShr); 118 Instruction *visitAShr(BinaryOperator &I); 119 Instruction *visitLShr(BinaryOperator &I); 120 Instruction *commonShiftTransforms(BinaryOperator &I); 121 Instruction *visitFCmpInst(FCmpInst &I); 122 CmpInst *canonicalizeICmpPredicate(CmpInst &I); 123 Instruction *visitICmpInst(ICmpInst &I); 124 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1, 125 BinaryOperator &I); 126 Instruction *commonCastTransforms(CastInst &CI); 127 Instruction *commonPointerCastTransforms(CastInst &CI); 128 Instruction *visitTrunc(TruncInst &CI); 129 Instruction *visitZExt(ZExtInst &CI); 130 Instruction *visitSExt(SExtInst &CI); 131 Instruction *visitFPTrunc(FPTruncInst &CI); 132 Instruction *visitFPExt(CastInst &CI); 133 Instruction *visitFPToUI(FPToUIInst &FI); 134 Instruction *visitFPToSI(FPToSIInst &FI); 135 Instruction *visitUIToFP(CastInst &CI); 136 Instruction *visitSIToFP(CastInst &CI); 137 Instruction *visitPtrToInt(PtrToIntInst &CI); 138 Instruction *visitIntToPtr(IntToPtrInst &CI); 139 Instruction *visitBitCast(BitCastInst &CI); 140 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI); 141 Instruction *foldItoFPtoI(CastInst &FI); 142 Instruction *visitSelectInst(SelectInst &SI); 143 Instruction *visitCallInst(CallInst &CI); 144 Instruction *visitInvokeInst(InvokeInst &II); 145 Instruction *visitCallBrInst(CallBrInst &CBI); 146 147 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN); 148 Instruction *visitPHINode(PHINode &PN); 149 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP); 150 Instruction *visitAllocaInst(AllocaInst &AI); 151 Instruction *visitAllocSite(Instruction &FI); 152 Instruction *visitFree(CallInst &FI); 153 Instruction *visitLoadInst(LoadInst &LI); 154 Instruction *visitStoreInst(StoreInst &SI); 155 Instruction *visitAtomicRMWInst(AtomicRMWInst &SI); 156 Instruction *visitUnconditionalBranchInst(BranchInst &BI); 157 Instruction *visitBranchInst(BranchInst &BI); 158 Instruction *visitFenceInst(FenceInst &FI); 159 Instruction *visitSwitchInst(SwitchInst &SI); 160 Instruction *visitReturnInst(ReturnInst &RI); 161 Instruction *visitUnreachableInst(UnreachableInst &I); 162 Instruction * 163 foldAggregateConstructionIntoAggregateReuse(InsertValueInst &OrigIVI); 164 Instruction *visitInsertValueInst(InsertValueInst &IV); 165 Instruction *visitInsertElementInst(InsertElementInst &IE); 166 Instruction *visitExtractElementInst(ExtractElementInst &EI); 167 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI); 168 Instruction *visitExtractValueInst(ExtractValueInst &EV); 169 Instruction *visitLandingPadInst(LandingPadInst &LI); 170 Instruction *visitVAEndInst(VAEndInst &I); 171 Value *pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI); 172 bool freezeDominatedUses(FreezeInst &FI); 173 Instruction *visitFreeze(FreezeInst &I); 174 175 /// Specify what to return for unhandled instructions. 176 Instruction *visitInstruction(Instruction &I) { return nullptr; } 177 178 /// True when DB dominates all uses of DI except UI. 179 /// UI must be in the same block as DI. 180 /// The routine checks that the DI parent and DB are different. 181 bool dominatesAllUses(const Instruction *DI, const Instruction *UI, 182 const BasicBlock *DB) const; 183 184 /// Try to replace select with select operand SIOpd in SI-ICmp sequence. 185 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, 186 const unsigned SIOpd); 187 188 LoadInst *combineLoadToNewType(LoadInst &LI, Type *NewTy, 189 const Twine &Suffix = ""); 190 191 private: 192 void annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI); 193 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const; 194 bool shouldChangeType(Type *From, Type *To) const; 195 Value *dyn_castNegVal(Value *V) const; 196 Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset, 197 SmallVectorImpl<Value *> &NewIndices); 198 199 /// Classify whether a cast is worth optimizing. 200 /// 201 /// This is a helper to decide whether the simplification of 202 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed. 203 /// 204 /// \param CI The cast we are interested in. 205 /// 206 /// \return true if this cast actually results in any code being generated and 207 /// if it cannot already be eliminated by some other transformation. 208 bool shouldOptimizeCast(CastInst *CI); 209 210 /// Try to optimize a sequence of instructions checking if an operation 211 /// on LHS and RHS overflows. 212 /// 213 /// If this overflow check is done via one of the overflow check intrinsics, 214 /// then CtxI has to be the call instruction calling that intrinsic. If this 215 /// overflow check is done by arithmetic followed by a compare, then CtxI has 216 /// to be the arithmetic instruction. 217 /// 218 /// If a simplification is possible, stores the simplified result of the 219 /// operation in OperationResult and result of the overflow check in 220 /// OverflowResult, and return true. If no simplification is possible, 221 /// returns false. 222 bool OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned, 223 Value *LHS, Value *RHS, 224 Instruction &CtxI, Value *&OperationResult, 225 Constant *&OverflowResult); 226 227 Instruction *visitCallBase(CallBase &Call); 228 Instruction *tryOptimizeCall(CallInst *CI); 229 bool transformConstExprCastCall(CallBase &Call); 230 Instruction *transformCallThroughTrampoline(CallBase &Call, 231 IntrinsicInst &Tramp); 232 233 Value *simplifyMaskedLoad(IntrinsicInst &II); 234 Instruction *simplifyMaskedStore(IntrinsicInst &II); 235 Instruction *simplifyMaskedGather(IntrinsicInst &II); 236 Instruction *simplifyMaskedScatter(IntrinsicInst &II); 237 238 /// Transform (zext icmp) to bitwise / integer operations in order to 239 /// eliminate it. 240 /// 241 /// \param ICI The icmp of the (zext icmp) pair we are interested in. 242 /// \parem CI The zext of the (zext icmp) pair we are interested in. 243 /// \param DoTransform Pass false to just test whether the given (zext icmp) 244 /// would be transformed. Pass true to actually perform the transformation. 245 /// 246 /// \return null if the transformation cannot be performed. If the 247 /// transformation can be performed the new instruction that replaces the 248 /// (zext icmp) pair will be returned (if \p DoTransform is false the 249 /// unmodified \p ICI will be returned in this case). 250 Instruction *transformZExtICmp(ICmpInst *ICI, ZExtInst &CI, 251 bool DoTransform = true); 252 253 Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI); 254 255 bool willNotOverflowSignedAdd(const Value *LHS, const Value *RHS, 256 const Instruction &CxtI) const { 257 return computeOverflowForSignedAdd(LHS, RHS, &CxtI) == 258 OverflowResult::NeverOverflows; 259 } 260 261 bool willNotOverflowUnsignedAdd(const Value *LHS, const Value *RHS, 262 const Instruction &CxtI) const { 263 return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) == 264 OverflowResult::NeverOverflows; 265 } 266 267 bool willNotOverflowAdd(const Value *LHS, const Value *RHS, 268 const Instruction &CxtI, bool IsSigned) const { 269 return IsSigned ? willNotOverflowSignedAdd(LHS, RHS, CxtI) 270 : willNotOverflowUnsignedAdd(LHS, RHS, CxtI); 271 } 272 273 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS, 274 const Instruction &CxtI) const { 275 return computeOverflowForSignedSub(LHS, RHS, &CxtI) == 276 OverflowResult::NeverOverflows; 277 } 278 279 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS, 280 const Instruction &CxtI) const { 281 return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) == 282 OverflowResult::NeverOverflows; 283 } 284 285 bool willNotOverflowSub(const Value *LHS, const Value *RHS, 286 const Instruction &CxtI, bool IsSigned) const { 287 return IsSigned ? willNotOverflowSignedSub(LHS, RHS, CxtI) 288 : willNotOverflowUnsignedSub(LHS, RHS, CxtI); 289 } 290 291 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS, 292 const Instruction &CxtI) const { 293 return computeOverflowForSignedMul(LHS, RHS, &CxtI) == 294 OverflowResult::NeverOverflows; 295 } 296 297 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS, 298 const Instruction &CxtI) const { 299 return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) == 300 OverflowResult::NeverOverflows; 301 } 302 303 bool willNotOverflowMul(const Value *LHS, const Value *RHS, 304 const Instruction &CxtI, bool IsSigned) const { 305 return IsSigned ? willNotOverflowSignedMul(LHS, RHS, CxtI) 306 : willNotOverflowUnsignedMul(LHS, RHS, CxtI); 307 } 308 309 bool willNotOverflow(BinaryOperator::BinaryOps Opcode, const Value *LHS, 310 const Value *RHS, const Instruction &CxtI, 311 bool IsSigned) const { 312 switch (Opcode) { 313 case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned); 314 case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned); 315 case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned); 316 default: llvm_unreachable("Unexpected opcode for overflow query"); 317 } 318 } 319 320 Value *EmitGEPOffset(User *GEP); 321 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN); 322 Instruction *foldCastedBitwiseLogic(BinaryOperator &I); 323 Instruction *narrowBinOp(TruncInst &Trunc); 324 Instruction *narrowMaskedBinOp(BinaryOperator &And); 325 Instruction *narrowMathIfNoOverflow(BinaryOperator &I); 326 Instruction *narrowFunnelShift(TruncInst &Trunc); 327 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN); 328 Instruction *matchSAddSubSat(SelectInst &MinMax1); 329 330 void freelyInvertAllUsersOf(Value *V); 331 332 /// Determine if a pair of casts can be replaced by a single cast. 333 /// 334 /// \param CI1 The first of a pair of casts. 335 /// \param CI2 The second of a pair of casts. 336 /// 337 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an 338 /// Instruction::CastOps value for a cast that can replace the pair, casting 339 /// CI1->getSrcTy() to CI2->getDstTy(). 340 /// 341 /// \see CastInst::isEliminableCastPair 342 Instruction::CastOps isEliminableCastPair(const CastInst *CI1, 343 const CastInst *CI2); 344 Value *simplifyIntToPtrRoundTripCast(Value *Val); 345 346 Value *foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &And); 347 Value *foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Or); 348 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Xor); 349 350 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp). 351 /// NOTE: Unlike most of instcombine, this returns a Value which should 352 /// already be inserted into the function. 353 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd); 354 355 Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS, 356 Instruction *CxtI, bool IsAnd, 357 bool IsLogical = false); 358 Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D); 359 Value *getSelectCondition(Value *A, Value *B); 360 361 Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II); 362 Instruction *foldFPSignBitOps(BinaryOperator &I); 363 364 // Optimize one of these forms: 365 // and i1 Op, SI / select i1 Op, i1 SI, i1 false (if IsAnd = true) 366 // or i1 Op, SI / select i1 Op, i1 true, i1 SI (if IsAnd = false) 367 // into simplier select instruction using isImpliedCondition. 368 Instruction *foldAndOrOfSelectUsingImpliedCond(Value *Op, SelectInst &SI, 369 bool IsAnd); 370 371 public: 372 /// Inserts an instruction \p New before instruction \p Old 373 /// 374 /// Also adds the new instruction to the worklist and returns \p New so that 375 /// it is suitable for use as the return from the visitation patterns. 376 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) { 377 assert(New && !New->getParent() && 378 "New instruction already inserted into a basic block!"); 379 BasicBlock *BB = Old.getParent(); 380 BB->getInstList().insert(Old.getIterator(), New); // Insert inst 381 Worklist.add(New); 382 return New; 383 } 384 385 /// Same as InsertNewInstBefore, but also sets the debug loc. 386 Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) { 387 New->setDebugLoc(Old.getDebugLoc()); 388 return InsertNewInstBefore(New, Old); 389 } 390 391 /// A combiner-aware RAUW-like routine. 392 /// 393 /// This method is to be used when an instruction is found to be dead, 394 /// replaceable with another preexisting expression. Here we add all uses of 395 /// I to the worklist, replace all uses of I with the new value, then return 396 /// I, so that the inst combiner will know that I was modified. 397 Instruction *replaceInstUsesWith(Instruction &I, Value *V) { 398 // If there are no uses to replace, then we return nullptr to indicate that 399 // no changes were made to the program. 400 if (I.use_empty()) return nullptr; 401 402 Worklist.pushUsersToWorkList(I); // Add all modified instrs to worklist. 403 404 // If we are replacing the instruction with itself, this must be in a 405 // segment of unreachable code, so just clobber the instruction. 406 if (&I == V) 407 V = UndefValue::get(I.getType()); 408 409 LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n" 410 << " with " << *V << '\n'); 411 412 I.replaceAllUsesWith(V); 413 MadeIRChange = true; 414 return &I; 415 } 416 417 /// Replace operand of instruction and add old operand to the worklist. 418 Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) { 419 Worklist.addValue(I.getOperand(OpNum)); 420 I.setOperand(OpNum, V); 421 return &I; 422 } 423 424 /// Replace use and add the previously used value to the worklist. 425 void replaceUse(Use &U, Value *NewValue) { 426 Worklist.addValue(U); 427 U = NewValue; 428 } 429 430 /// Create and insert the idiom we use to indicate a block is unreachable 431 /// without having to rewrite the CFG from within InstCombine. 432 void CreateNonTerminatorUnreachable(Instruction *InsertAt) { 433 auto &Ctx = InsertAt->getContext(); 434 new StoreInst(ConstantInt::getTrue(Ctx), 435 UndefValue::get(Type::getInt1PtrTy(Ctx)), 436 InsertAt); 437 } 438 439 440 /// Combiner aware instruction erasure. 441 /// 442 /// When dealing with an instruction that has side effects or produces a void 443 /// value, we can't rely on DCE to delete the instruction. Instead, visit 444 /// methods should return the value returned by this function. 445 Instruction *eraseInstFromFunction(Instruction &I) override { 446 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n'); 447 assert(I.use_empty() && "Cannot erase instruction that is used!"); 448 salvageDebugInfo(I); 449 450 // Make sure that we reprocess all operands now that we reduced their 451 // use counts. 452 for (Use &Operand : I.operands()) 453 if (auto *Inst = dyn_cast<Instruction>(Operand)) 454 Worklist.add(Inst); 455 456 Worklist.remove(&I); 457 I.eraseFromParent(); 458 MadeIRChange = true; 459 return nullptr; // Don't do anything with FI 460 } 461 462 void computeKnownBits(const Value *V, KnownBits &Known, 463 unsigned Depth, const Instruction *CxtI) const { 464 llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT); 465 } 466 467 KnownBits computeKnownBits(const Value *V, unsigned Depth, 468 const Instruction *CxtI) const { 469 return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT); 470 } 471 472 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false, 473 unsigned Depth = 0, 474 const Instruction *CxtI = nullptr) { 475 return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT); 476 } 477 478 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0, 479 const Instruction *CxtI = nullptr) const { 480 return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT); 481 } 482 483 unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0, 484 const Instruction *CxtI = nullptr) const { 485 return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT); 486 } 487 488 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, 489 const Value *RHS, 490 const Instruction *CxtI) const { 491 return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 492 } 493 494 OverflowResult computeOverflowForSignedMul(const Value *LHS, 495 const Value *RHS, 496 const Instruction *CxtI) const { 497 return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 498 } 499 500 OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, 501 const Value *RHS, 502 const Instruction *CxtI) const { 503 return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 504 } 505 506 OverflowResult computeOverflowForSignedAdd(const Value *LHS, 507 const Value *RHS, 508 const Instruction *CxtI) const { 509 return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 510 } 511 512 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, 513 const Value *RHS, 514 const Instruction *CxtI) const { 515 return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 516 } 517 518 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, 519 const Instruction *CxtI) const { 520 return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 521 } 522 523 OverflowResult computeOverflow( 524 Instruction::BinaryOps BinaryOp, bool IsSigned, 525 Value *LHS, Value *RHS, Instruction *CxtI) const; 526 527 /// Performs a few simplifications for operators which are associative 528 /// or commutative. 529 bool SimplifyAssociativeOrCommutative(BinaryOperator &I); 530 531 /// Tries to simplify binary operations which some other binary 532 /// operation distributes over. 533 /// 534 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)" 535 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A 536 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified 537 /// value, or null if it didn't simplify. 538 Value *SimplifyUsingDistributiveLaws(BinaryOperator &I); 539 540 /// Tries to simplify add operations using the definition of remainder. 541 /// 542 /// The definition of remainder is X % C = X - (X / C ) * C. The add 543 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to 544 /// X % (C0 * C1) 545 Value *SimplifyAddWithRemainder(BinaryOperator &I); 546 547 // Binary Op helper for select operations where the expression can be 548 // efficiently reorganized. 549 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, 550 Value *RHS); 551 552 /// This tries to simplify binary operations by factorizing out common terms 553 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)"). 554 Value *tryFactorization(BinaryOperator &, Instruction::BinaryOps, Value *, 555 Value *, Value *, Value *); 556 557 /// Match a select chain which produces one of three values based on whether 558 /// the LHS is less than, equal to, or greater than RHS respectively. 559 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less, 560 /// Equal and Greater values are saved in the matching process and returned to 561 /// the caller. 562 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, 563 ConstantInt *&Less, ConstantInt *&Equal, 564 ConstantInt *&Greater); 565 566 /// Attempts to replace V with a simpler value based on the demanded 567 /// bits. 568 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known, 569 unsigned Depth, Instruction *CxtI); 570 bool SimplifyDemandedBits(Instruction *I, unsigned Op, 571 const APInt &DemandedMask, KnownBits &Known, 572 unsigned Depth = 0) override; 573 574 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne 575 /// bits. It also tries to handle simplifications that can be done based on 576 /// DemandedMask, but without modifying the Instruction. 577 Value *SimplifyMultipleUseDemandedBits(Instruction *I, 578 const APInt &DemandedMask, 579 KnownBits &Known, 580 unsigned Depth, Instruction *CxtI); 581 582 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded 583 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence. 584 Value *simplifyShrShlDemandedBits( 585 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl, 586 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known); 587 588 /// Tries to simplify operands to an integer instruction based on its 589 /// demanded bits. 590 bool SimplifyDemandedInstructionBits(Instruction &Inst); 591 592 virtual Value * 593 SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts, 594 unsigned Depth = 0, 595 bool AllowMultipleUsers = false) override; 596 597 /// Canonicalize the position of binops relative to shufflevector. 598 Instruction *foldVectorBinop(BinaryOperator &Inst); 599 Instruction *foldVectorSelect(SelectInst &Sel); 600 601 /// Given a binary operator, cast instruction, or select which has a PHI node 602 /// as operand #0, see if we can fold the instruction into the PHI (which is 603 /// only possible if all operands to the PHI are constants). 604 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN); 605 606 /// Given an instruction with a select as one operand and a constant as the 607 /// other operand, try to fold the binary operator into the select arguments. 608 /// This also works for Cast instructions, which obviously do not have a 609 /// second operand. 610 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI); 611 612 /// This is a convenience wrapper function for the above two functions. 613 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I); 614 615 Instruction *foldAddWithConstant(BinaryOperator &Add); 616 617 /// Try to rotate an operation below a PHI node, using PHI nodes for 618 /// its operands. 619 Instruction *foldPHIArgOpIntoPHI(PHINode &PN); 620 Instruction *foldPHIArgBinOpIntoPHI(PHINode &PN); 621 Instruction *foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN); 622 Instruction *foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN); 623 Instruction *foldPHIArgGEPIntoPHI(PHINode &PN); 624 Instruction *foldPHIArgLoadIntoPHI(PHINode &PN); 625 Instruction *foldPHIArgZextsIntoPHI(PHINode &PN); 626 627 /// If an integer typed PHI has only one use which is an IntToPtr operation, 628 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise 629 /// insert a new pointer typed PHI and replace the original one. 630 Instruction *foldIntegerTypedPHI(PHINode &PN); 631 632 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the 633 /// folded operation. 634 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN); 635 636 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 637 ICmpInst::Predicate Cond, Instruction &I); 638 Instruction *foldAllocaCmp(ICmpInst &ICI, const AllocaInst *Alloca, 639 const Value *Other); 640 Instruction *foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, 641 GlobalVariable *GV, CmpInst &ICI, 642 ConstantInt *AndCst = nullptr); 643 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, 644 Constant *RHSC); 645 Instruction *foldICmpAddOpConst(Value *X, const APInt &C, 646 ICmpInst::Predicate Pred); 647 Instruction *foldICmpWithCastOp(ICmpInst &ICI); 648 649 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp); 650 Instruction *foldICmpWithDominatingICmp(ICmpInst &Cmp); 651 Instruction *foldICmpWithConstant(ICmpInst &Cmp); 652 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp); 653 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp); 654 Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ); 655 Instruction *foldICmpEquality(ICmpInst &Cmp); 656 Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I); 657 Instruction *foldSignBitTest(ICmpInst &I); 658 Instruction *foldICmpWithZero(ICmpInst &Cmp); 659 660 Value *foldUnsignedMultiplicationOverflowCheck(ICmpInst &Cmp); 661 662 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, 663 ConstantInt *C); 664 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, 665 const APInt &C); 666 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, 667 const APInt &C); 668 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, 669 const APInt &C); 670 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, 671 const APInt &C); 672 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, 673 const APInt &C); 674 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, 675 const APInt &C); 676 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, 677 const APInt &C); 678 Instruction *foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, 679 const APInt &C); 680 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, 681 const APInt &C); 682 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, 683 const APInt &C); 684 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, 685 const APInt &C); 686 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, 687 const APInt &C); 688 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, 689 const APInt &C1); 690 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, 691 const APInt &C1, const APInt &C2); 692 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 693 const APInt &C2); 694 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 695 const APInt &C2); 696 697 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, 698 BinaryOperator *BO, 699 const APInt &C); 700 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, 701 const APInt &C); 702 Instruction *foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, 703 const APInt &C); 704 705 // Helpers of visitSelectInst(). 706 Instruction *foldSelectExtConst(SelectInst &Sel); 707 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI); 708 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *); 709 Instruction *foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1, 710 Value *A, Value *B, Instruction &Outer, 711 SelectPatternFlavor SPF2, Value *C); 712 Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI); 713 Instruction *foldSelectValueEquivalence(SelectInst &SI, ICmpInst &ICI); 714 715 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, 716 bool isSigned, bool Inside); 717 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI); 718 bool mergeStoreIntoSuccessor(StoreInst &SI); 719 720 /// Given an initial instruction, check to see if it is the root of a 721 /// bswap/bitreverse idiom. If so, return the equivalent bswap/bitreverse 722 /// intrinsic. 723 Instruction *matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, 724 bool MatchBitReversals); 725 726 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI); 727 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI); 728 729 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned); 730 731 /// Returns a value X such that Val = X * Scale, or null if none. 732 /// 733 /// If the multiplication is known not to overflow then NoSignedWrap is set. 734 Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap); 735 }; 736 737 class Negator final { 738 /// Top-to-bottom, def-to-use negated instruction tree we produced. 739 SmallVector<Instruction *, NegatorMaxNodesSSO> NewInstructions; 740 741 using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>; 742 BuilderTy Builder; 743 744 const DataLayout &DL; 745 AssumptionCache &AC; 746 const DominatorTree &DT; 747 748 const bool IsTrulyNegation; 749 750 SmallDenseMap<Value *, Value *> NegationsCache; 751 752 Negator(LLVMContext &C, const DataLayout &DL, AssumptionCache &AC, 753 const DominatorTree &DT, bool IsTrulyNegation); 754 755 #if LLVM_ENABLE_STATS 756 unsigned NumValuesVisitedInThisNegator = 0; 757 ~Negator(); 758 #endif 759 760 using Result = std::pair<ArrayRef<Instruction *> /*NewInstructions*/, 761 Value * /*NegatedRoot*/>; 762 763 std::array<Value *, 2> getSortedOperandsOfBinOp(Instruction *I); 764 765 LLVM_NODISCARD Value *visitImpl(Value *V, unsigned Depth); 766 767 LLVM_NODISCARD Value *negate(Value *V, unsigned Depth); 768 769 /// Recurse depth-first and attempt to sink the negation. 770 /// FIXME: use worklist? 771 LLVM_NODISCARD Optional<Result> run(Value *Root); 772 773 Negator(const Negator &) = delete; 774 Negator(Negator &&) = delete; 775 Negator &operator=(const Negator &) = delete; 776 Negator &operator=(Negator &&) = delete; 777 778 public: 779 /// Attempt to negate \p Root. Retuns nullptr if negation can't be performed, 780 /// otherwise returns negated value. 781 LLVM_NODISCARD static Value *Negate(bool LHSIsZero, Value *Root, 782 InstCombinerImpl &IC); 783 }; 784 785 } // end namespace llvm 786 787 #undef DEBUG_TYPE 788 789 #endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 790