1 //===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// 11 /// This file provides internal interfaces used to implement the InstCombine. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 16 #define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 17 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/TargetFolder.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/IR/InstVisitor.h" 24 #include "llvm/IR/PatternMatch.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Support/KnownBits.h" 27 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 28 #include "llvm/Transforms/InstCombine/InstCombiner.h" 29 #include "llvm/Transforms/Utils/Local.h" 30 #include <cassert> 31 32 #define DEBUG_TYPE "instcombine" 33 34 using namespace llvm::PatternMatch; 35 36 // As a default, let's assume that we want to be aggressive, 37 // and attempt to traverse with no limits in attempt to sink negation. 38 static constexpr unsigned NegatorDefaultMaxDepth = ~0U; 39 40 // Let's guesstimate that most often we will end up visiting/producing 41 // fairly small number of new instructions. 42 static constexpr unsigned NegatorMaxNodesSSO = 16; 43 44 namespace llvm { 45 46 class AAResults; 47 class APInt; 48 class AssumptionCache; 49 class BlockFrequencyInfo; 50 class DataLayout; 51 class DominatorTree; 52 class GEPOperator; 53 class GlobalVariable; 54 class LoopInfo; 55 class OptimizationRemarkEmitter; 56 class ProfileSummaryInfo; 57 class TargetLibraryInfo; 58 class User; 59 60 class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final 61 : public InstCombiner, 62 public InstVisitor<InstCombinerImpl, Instruction *> { 63 public: 64 InstCombinerImpl(InstCombineWorklist &Worklist, BuilderTy &Builder, 65 bool MinimizeSize, AAResults *AA, AssumptionCache &AC, 66 TargetLibraryInfo &TLI, TargetTransformInfo &TTI, 67 DominatorTree &DT, OptimizationRemarkEmitter &ORE, 68 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 69 const DataLayout &DL, LoopInfo *LI) 70 : InstCombiner(Worklist, Builder, MinimizeSize, AA, AC, TLI, TTI, DT, ORE, 71 BFI, PSI, DL, LI) {} 72 73 virtual ~InstCombinerImpl() {} 74 75 /// Run the combiner over the entire worklist until it is empty. 76 /// 77 /// \returns true if the IR is changed. 78 bool run(); 79 80 // Visitation implementation - Implement instruction combining for different 81 // instruction types. The semantics are as follows: 82 // Return Value: 83 // null - No change was made 84 // I - Change was made, I is still valid, I may be dead though 85 // otherwise - Change was made, replace I with returned instruction 86 // 87 Instruction *visitFNeg(UnaryOperator &I); 88 Instruction *visitAdd(BinaryOperator &I); 89 Instruction *visitFAdd(BinaryOperator &I); 90 Value *OptimizePointerDifference( 91 Value *LHS, Value *RHS, Type *Ty, bool isNUW); 92 Instruction *visitSub(BinaryOperator &I); 93 Instruction *visitFSub(BinaryOperator &I); 94 Instruction *visitMul(BinaryOperator &I); 95 Instruction *visitFMul(BinaryOperator &I); 96 Instruction *visitURem(BinaryOperator &I); 97 Instruction *visitSRem(BinaryOperator &I); 98 Instruction *visitFRem(BinaryOperator &I); 99 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I); 100 Instruction *commonIRemTransforms(BinaryOperator &I); 101 Instruction *commonIDivTransforms(BinaryOperator &I); 102 Instruction *visitUDiv(BinaryOperator &I); 103 Instruction *visitSDiv(BinaryOperator &I); 104 Instruction *visitFDiv(BinaryOperator &I); 105 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted); 106 Instruction *visitAnd(BinaryOperator &I); 107 Instruction *visitOr(BinaryOperator &I); 108 bool sinkNotIntoOtherHandOfAndOrOr(BinaryOperator &I); 109 Instruction *visitXor(BinaryOperator &I); 110 Instruction *visitShl(BinaryOperator &I); 111 Value *reassociateShiftAmtsOfTwoSameDirectionShifts( 112 BinaryOperator *Sh0, const SimplifyQuery &SQ, 113 bool AnalyzeForSignBitExtraction = false); 114 Instruction *canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract( 115 BinaryOperator &I); 116 Instruction *foldVariableSignZeroExtensionOfVariableHighBitExtract( 117 BinaryOperator &OldAShr); 118 Instruction *visitAShr(BinaryOperator &I); 119 Instruction *visitLShr(BinaryOperator &I); 120 Instruction *commonShiftTransforms(BinaryOperator &I); 121 Instruction *visitFCmpInst(FCmpInst &I); 122 CmpInst *canonicalizeICmpPredicate(CmpInst &I); 123 Instruction *visitICmpInst(ICmpInst &I); 124 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1, 125 BinaryOperator &I); 126 Instruction *commonCastTransforms(CastInst &CI); 127 Instruction *commonPointerCastTransforms(CastInst &CI); 128 Instruction *visitTrunc(TruncInst &CI); 129 Instruction *visitZExt(ZExtInst &CI); 130 Instruction *visitSExt(SExtInst &CI); 131 Instruction *visitFPTrunc(FPTruncInst &CI); 132 Instruction *visitFPExt(CastInst &CI); 133 Instruction *visitFPToUI(FPToUIInst &FI); 134 Instruction *visitFPToSI(FPToSIInst &FI); 135 Instruction *visitUIToFP(CastInst &CI); 136 Instruction *visitSIToFP(CastInst &CI); 137 Instruction *visitPtrToInt(PtrToIntInst &CI); 138 Instruction *visitIntToPtr(IntToPtrInst &CI); 139 Instruction *visitBitCast(BitCastInst &CI); 140 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI); 141 Instruction *foldItoFPtoI(CastInst &FI); 142 Instruction *visitSelectInst(SelectInst &SI); 143 Instruction *visitCallInst(CallInst &CI); 144 Instruction *visitInvokeInst(InvokeInst &II); 145 Instruction *visitCallBrInst(CallBrInst &CBI); 146 147 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN); 148 Instruction *visitPHINode(PHINode &PN); 149 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP); 150 Instruction *visitAllocaInst(AllocaInst &AI); 151 Instruction *visitAllocSite(Instruction &FI); 152 Instruction *visitFree(CallInst &FI); 153 Instruction *visitLoadInst(LoadInst &LI); 154 Instruction *visitStoreInst(StoreInst &SI); 155 Instruction *visitAtomicRMWInst(AtomicRMWInst &SI); 156 Instruction *visitUnconditionalBranchInst(BranchInst &BI); 157 Instruction *visitBranchInst(BranchInst &BI); 158 Instruction *visitFenceInst(FenceInst &FI); 159 Instruction *visitSwitchInst(SwitchInst &SI); 160 Instruction *visitReturnInst(ReturnInst &RI); 161 Instruction *visitUnreachableInst(UnreachableInst &I); 162 Instruction * 163 foldAggregateConstructionIntoAggregateReuse(InsertValueInst &OrigIVI); 164 Instruction *visitInsertValueInst(InsertValueInst &IV); 165 Instruction *visitInsertElementInst(InsertElementInst &IE); 166 Instruction *visitExtractElementInst(ExtractElementInst &EI); 167 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI); 168 Instruction *visitExtractValueInst(ExtractValueInst &EV); 169 Instruction *visitLandingPadInst(LandingPadInst &LI); 170 Instruction *visitVAEndInst(VAEndInst &I); 171 Instruction *visitFreeze(FreezeInst &I); 172 173 /// Specify what to return for unhandled instructions. 174 Instruction *visitInstruction(Instruction &I) { return nullptr; } 175 176 /// True when DB dominates all uses of DI except UI. 177 /// UI must be in the same block as DI. 178 /// The routine checks that the DI parent and DB are different. 179 bool dominatesAllUses(const Instruction *DI, const Instruction *UI, 180 const BasicBlock *DB) const; 181 182 /// Try to replace select with select operand SIOpd in SI-ICmp sequence. 183 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, 184 const unsigned SIOpd); 185 186 LoadInst *combineLoadToNewType(LoadInst &LI, Type *NewTy, 187 const Twine &Suffix = ""); 188 189 private: 190 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const; 191 bool shouldChangeType(Type *From, Type *To) const; 192 Value *dyn_castNegVal(Value *V) const; 193 Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset, 194 SmallVectorImpl<Value *> &NewIndices); 195 196 /// Classify whether a cast is worth optimizing. 197 /// 198 /// This is a helper to decide whether the simplification of 199 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed. 200 /// 201 /// \param CI The cast we are interested in. 202 /// 203 /// \return true if this cast actually results in any code being generated and 204 /// if it cannot already be eliminated by some other transformation. 205 bool shouldOptimizeCast(CastInst *CI); 206 207 /// Try to optimize a sequence of instructions checking if an operation 208 /// on LHS and RHS overflows. 209 /// 210 /// If this overflow check is done via one of the overflow check intrinsics, 211 /// then CtxI has to be the call instruction calling that intrinsic. If this 212 /// overflow check is done by arithmetic followed by a compare, then CtxI has 213 /// to be the arithmetic instruction. 214 /// 215 /// If a simplification is possible, stores the simplified result of the 216 /// operation in OperationResult and result of the overflow check in 217 /// OverflowResult, and return true. If no simplification is possible, 218 /// returns false. 219 bool OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned, 220 Value *LHS, Value *RHS, 221 Instruction &CtxI, Value *&OperationResult, 222 Constant *&OverflowResult); 223 224 Instruction *visitCallBase(CallBase &Call); 225 Instruction *tryOptimizeCall(CallInst *CI); 226 bool transformConstExprCastCall(CallBase &Call); 227 Instruction *transformCallThroughTrampoline(CallBase &Call, 228 IntrinsicInst &Tramp); 229 230 Value *simplifyMaskedLoad(IntrinsicInst &II); 231 Instruction *simplifyMaskedStore(IntrinsicInst &II); 232 Instruction *simplifyMaskedGather(IntrinsicInst &II); 233 Instruction *simplifyMaskedScatter(IntrinsicInst &II); 234 235 /// Transform (zext icmp) to bitwise / integer operations in order to 236 /// eliminate it. 237 /// 238 /// \param ICI The icmp of the (zext icmp) pair we are interested in. 239 /// \parem CI The zext of the (zext icmp) pair we are interested in. 240 /// \param DoTransform Pass false to just test whether the given (zext icmp) 241 /// would be transformed. Pass true to actually perform the transformation. 242 /// 243 /// \return null if the transformation cannot be performed. If the 244 /// transformation can be performed the new instruction that replaces the 245 /// (zext icmp) pair will be returned (if \p DoTransform is false the 246 /// unmodified \p ICI will be returned in this case). 247 Instruction *transformZExtICmp(ICmpInst *ICI, ZExtInst &CI, 248 bool DoTransform = true); 249 250 Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI); 251 252 bool willNotOverflowSignedAdd(const Value *LHS, const Value *RHS, 253 const Instruction &CxtI) const { 254 return computeOverflowForSignedAdd(LHS, RHS, &CxtI) == 255 OverflowResult::NeverOverflows; 256 } 257 258 bool willNotOverflowUnsignedAdd(const Value *LHS, const Value *RHS, 259 const Instruction &CxtI) const { 260 return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) == 261 OverflowResult::NeverOverflows; 262 } 263 264 bool willNotOverflowAdd(const Value *LHS, const Value *RHS, 265 const Instruction &CxtI, bool IsSigned) const { 266 return IsSigned ? willNotOverflowSignedAdd(LHS, RHS, CxtI) 267 : willNotOverflowUnsignedAdd(LHS, RHS, CxtI); 268 } 269 270 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS, 271 const Instruction &CxtI) const { 272 return computeOverflowForSignedSub(LHS, RHS, &CxtI) == 273 OverflowResult::NeverOverflows; 274 } 275 276 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS, 277 const Instruction &CxtI) const { 278 return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) == 279 OverflowResult::NeverOverflows; 280 } 281 282 bool willNotOverflowSub(const Value *LHS, const Value *RHS, 283 const Instruction &CxtI, bool IsSigned) const { 284 return IsSigned ? willNotOverflowSignedSub(LHS, RHS, CxtI) 285 : willNotOverflowUnsignedSub(LHS, RHS, CxtI); 286 } 287 288 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS, 289 const Instruction &CxtI) const { 290 return computeOverflowForSignedMul(LHS, RHS, &CxtI) == 291 OverflowResult::NeverOverflows; 292 } 293 294 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS, 295 const Instruction &CxtI) const { 296 return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) == 297 OverflowResult::NeverOverflows; 298 } 299 300 bool willNotOverflowMul(const Value *LHS, const Value *RHS, 301 const Instruction &CxtI, bool IsSigned) const { 302 return IsSigned ? willNotOverflowSignedMul(LHS, RHS, CxtI) 303 : willNotOverflowUnsignedMul(LHS, RHS, CxtI); 304 } 305 306 bool willNotOverflow(BinaryOperator::BinaryOps Opcode, const Value *LHS, 307 const Value *RHS, const Instruction &CxtI, 308 bool IsSigned) const { 309 switch (Opcode) { 310 case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned); 311 case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned); 312 case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned); 313 default: llvm_unreachable("Unexpected opcode for overflow query"); 314 } 315 } 316 317 Value *EmitGEPOffset(User *GEP); 318 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN); 319 Instruction *foldCastedBitwiseLogic(BinaryOperator &I); 320 Instruction *narrowBinOp(TruncInst &Trunc); 321 Instruction *narrowMaskedBinOp(BinaryOperator &And); 322 Instruction *narrowMathIfNoOverflow(BinaryOperator &I); 323 Instruction *narrowFunnelShift(TruncInst &Trunc); 324 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN); 325 Instruction *matchSAddSubSat(SelectInst &MinMax1); 326 327 void freelyInvertAllUsersOf(Value *V); 328 329 /// Determine if a pair of casts can be replaced by a single cast. 330 /// 331 /// \param CI1 The first of a pair of casts. 332 /// \param CI2 The second of a pair of casts. 333 /// 334 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an 335 /// Instruction::CastOps value for a cast that can replace the pair, casting 336 /// CI1->getSrcTy() to CI2->getDstTy(). 337 /// 338 /// \see CastInst::isEliminableCastPair 339 Instruction::CastOps isEliminableCastPair(const CastInst *CI1, 340 const CastInst *CI2); 341 342 Value *foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &And); 343 Value *foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Or); 344 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Xor); 345 346 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp). 347 /// NOTE: Unlike most of instcombine, this returns a Value which should 348 /// already be inserted into the function. 349 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd); 350 351 Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS, 352 BinaryOperator &Logic); 353 Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D); 354 Value *getSelectCondition(Value *A, Value *B); 355 356 Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II); 357 Instruction *foldFPSignBitOps(BinaryOperator &I); 358 359 public: 360 /// Inserts an instruction \p New before instruction \p Old 361 /// 362 /// Also adds the new instruction to the worklist and returns \p New so that 363 /// it is suitable for use as the return from the visitation patterns. 364 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) { 365 assert(New && !New->getParent() && 366 "New instruction already inserted into a basic block!"); 367 BasicBlock *BB = Old.getParent(); 368 BB->getInstList().insert(Old.getIterator(), New); // Insert inst 369 Worklist.add(New); 370 return New; 371 } 372 373 /// Same as InsertNewInstBefore, but also sets the debug loc. 374 Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) { 375 New->setDebugLoc(Old.getDebugLoc()); 376 return InsertNewInstBefore(New, Old); 377 } 378 379 /// A combiner-aware RAUW-like routine. 380 /// 381 /// This method is to be used when an instruction is found to be dead, 382 /// replaceable with another preexisting expression. Here we add all uses of 383 /// I to the worklist, replace all uses of I with the new value, then return 384 /// I, so that the inst combiner will know that I was modified. 385 Instruction *replaceInstUsesWith(Instruction &I, Value *V) { 386 // If there are no uses to replace, then we return nullptr to indicate that 387 // no changes were made to the program. 388 if (I.use_empty()) return nullptr; 389 390 Worklist.pushUsersToWorkList(I); // Add all modified instrs to worklist. 391 392 // If we are replacing the instruction with itself, this must be in a 393 // segment of unreachable code, so just clobber the instruction. 394 if (&I == V) 395 V = UndefValue::get(I.getType()); 396 397 LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n" 398 << " with " << *V << '\n'); 399 400 I.replaceAllUsesWith(V); 401 MadeIRChange = true; 402 return &I; 403 } 404 405 /// Replace operand of instruction and add old operand to the worklist. 406 Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) { 407 Worklist.addValue(I.getOperand(OpNum)); 408 I.setOperand(OpNum, V); 409 return &I; 410 } 411 412 /// Replace use and add the previously used value to the worklist. 413 void replaceUse(Use &U, Value *NewValue) { 414 Worklist.addValue(U); 415 U = NewValue; 416 } 417 418 /// Creates a result tuple for an overflow intrinsic \p II with a given 419 /// \p Result and a constant \p Overflow value. 420 Instruction *CreateOverflowTuple(IntrinsicInst *II, Value *Result, 421 Constant *Overflow) { 422 Constant *V[] = {UndefValue::get(Result->getType()), Overflow}; 423 StructType *ST = cast<StructType>(II->getType()); 424 Constant *Struct = ConstantStruct::get(ST, V); 425 return InsertValueInst::Create(Struct, Result, 0); 426 } 427 428 /// Create and insert the idiom we use to indicate a block is unreachable 429 /// without having to rewrite the CFG from within InstCombine. 430 void CreateNonTerminatorUnreachable(Instruction *InsertAt) { 431 auto &Ctx = InsertAt->getContext(); 432 new StoreInst(ConstantInt::getTrue(Ctx), 433 UndefValue::get(Type::getInt1PtrTy(Ctx)), 434 InsertAt); 435 } 436 437 438 /// Combiner aware instruction erasure. 439 /// 440 /// When dealing with an instruction that has side effects or produces a void 441 /// value, we can't rely on DCE to delete the instruction. Instead, visit 442 /// methods should return the value returned by this function. 443 Instruction *eraseInstFromFunction(Instruction &I) override { 444 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n'); 445 assert(I.use_empty() && "Cannot erase instruction that is used!"); 446 salvageDebugInfo(I); 447 448 // Make sure that we reprocess all operands now that we reduced their 449 // use counts. 450 for (Use &Operand : I.operands()) 451 if (auto *Inst = dyn_cast<Instruction>(Operand)) 452 Worklist.add(Inst); 453 454 Worklist.remove(&I); 455 I.eraseFromParent(); 456 MadeIRChange = true; 457 return nullptr; // Don't do anything with FI 458 } 459 460 void computeKnownBits(const Value *V, KnownBits &Known, 461 unsigned Depth, const Instruction *CxtI) const { 462 llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT); 463 } 464 465 KnownBits computeKnownBits(const Value *V, unsigned Depth, 466 const Instruction *CxtI) const { 467 return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT); 468 } 469 470 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false, 471 unsigned Depth = 0, 472 const Instruction *CxtI = nullptr) { 473 return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT); 474 } 475 476 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0, 477 const Instruction *CxtI = nullptr) const { 478 return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT); 479 } 480 481 unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0, 482 const Instruction *CxtI = nullptr) const { 483 return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT); 484 } 485 486 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, 487 const Value *RHS, 488 const Instruction *CxtI) const { 489 return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 490 } 491 492 OverflowResult computeOverflowForSignedMul(const Value *LHS, 493 const Value *RHS, 494 const Instruction *CxtI) const { 495 return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 496 } 497 498 OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, 499 const Value *RHS, 500 const Instruction *CxtI) const { 501 return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 502 } 503 504 OverflowResult computeOverflowForSignedAdd(const Value *LHS, 505 const Value *RHS, 506 const Instruction *CxtI) const { 507 return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 508 } 509 510 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, 511 const Value *RHS, 512 const Instruction *CxtI) const { 513 return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 514 } 515 516 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, 517 const Instruction *CxtI) const { 518 return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 519 } 520 521 OverflowResult computeOverflow( 522 Instruction::BinaryOps BinaryOp, bool IsSigned, 523 Value *LHS, Value *RHS, Instruction *CxtI) const; 524 525 /// Performs a few simplifications for operators which are associative 526 /// or commutative. 527 bool SimplifyAssociativeOrCommutative(BinaryOperator &I); 528 529 /// Tries to simplify binary operations which some other binary 530 /// operation distributes over. 531 /// 532 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)" 533 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A 534 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified 535 /// value, or null if it didn't simplify. 536 Value *SimplifyUsingDistributiveLaws(BinaryOperator &I); 537 538 /// Tries to simplify add operations using the definition of remainder. 539 /// 540 /// The definition of remainder is X % C = X - (X / C ) * C. The add 541 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to 542 /// X % (C0 * C1) 543 Value *SimplifyAddWithRemainder(BinaryOperator &I); 544 545 // Binary Op helper for select operations where the expression can be 546 // efficiently reorganized. 547 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, 548 Value *RHS); 549 550 /// This tries to simplify binary operations by factorizing out common terms 551 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)"). 552 Value *tryFactorization(BinaryOperator &, Instruction::BinaryOps, Value *, 553 Value *, Value *, Value *); 554 555 /// Match a select chain which produces one of three values based on whether 556 /// the LHS is less than, equal to, or greater than RHS respectively. 557 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less, 558 /// Equal and Greater values are saved in the matching process and returned to 559 /// the caller. 560 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, 561 ConstantInt *&Less, ConstantInt *&Equal, 562 ConstantInt *&Greater); 563 564 /// Attempts to replace V with a simpler value based on the demanded 565 /// bits. 566 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known, 567 unsigned Depth, Instruction *CxtI); 568 bool SimplifyDemandedBits(Instruction *I, unsigned Op, 569 const APInt &DemandedMask, KnownBits &Known, 570 unsigned Depth = 0) override; 571 572 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne 573 /// bits. It also tries to handle simplifications that can be done based on 574 /// DemandedMask, but without modifying the Instruction. 575 Value *SimplifyMultipleUseDemandedBits(Instruction *I, 576 const APInt &DemandedMask, 577 KnownBits &Known, 578 unsigned Depth, Instruction *CxtI); 579 580 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded 581 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence. 582 Value *simplifyShrShlDemandedBits( 583 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl, 584 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known); 585 586 /// Tries to simplify operands to an integer instruction based on its 587 /// demanded bits. 588 bool SimplifyDemandedInstructionBits(Instruction &Inst); 589 590 virtual Value * 591 SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts, 592 unsigned Depth = 0, 593 bool AllowMultipleUsers = false) override; 594 595 /// Canonicalize the position of binops relative to shufflevector. 596 Instruction *foldVectorBinop(BinaryOperator &Inst); 597 Instruction *foldVectorSelect(SelectInst &Sel); 598 599 /// Given a binary operator, cast instruction, or select which has a PHI node 600 /// as operand #0, see if we can fold the instruction into the PHI (which is 601 /// only possible if all operands to the PHI are constants). 602 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN); 603 604 /// Given an instruction with a select as one operand and a constant as the 605 /// other operand, try to fold the binary operator into the select arguments. 606 /// This also works for Cast instructions, which obviously do not have a 607 /// second operand. 608 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI); 609 610 /// This is a convenience wrapper function for the above two functions. 611 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I); 612 613 Instruction *foldAddWithConstant(BinaryOperator &Add); 614 615 /// Try to rotate an operation below a PHI node, using PHI nodes for 616 /// its operands. 617 Instruction *foldPHIArgOpIntoPHI(PHINode &PN); 618 Instruction *foldPHIArgBinOpIntoPHI(PHINode &PN); 619 Instruction *foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN); 620 Instruction *foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN); 621 Instruction *foldPHIArgGEPIntoPHI(PHINode &PN); 622 Instruction *foldPHIArgLoadIntoPHI(PHINode &PN); 623 Instruction *foldPHIArgZextsIntoPHI(PHINode &PN); 624 625 /// If an integer typed PHI has only one use which is an IntToPtr operation, 626 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise 627 /// insert a new pointer typed PHI and replace the original one. 628 Instruction *foldIntegerTypedPHI(PHINode &PN); 629 630 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the 631 /// folded operation. 632 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN); 633 634 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 635 ICmpInst::Predicate Cond, Instruction &I); 636 Instruction *foldAllocaCmp(ICmpInst &ICI, const AllocaInst *Alloca, 637 const Value *Other); 638 Instruction *foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, 639 GlobalVariable *GV, CmpInst &ICI, 640 ConstantInt *AndCst = nullptr); 641 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, 642 Constant *RHSC); 643 Instruction *foldICmpAddOpConst(Value *X, const APInt &C, 644 ICmpInst::Predicate Pred); 645 Instruction *foldICmpWithCastOp(ICmpInst &ICI); 646 647 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp); 648 Instruction *foldICmpWithDominatingICmp(ICmpInst &Cmp); 649 Instruction *foldICmpWithConstant(ICmpInst &Cmp); 650 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp); 651 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp); 652 Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ); 653 Instruction *foldICmpEquality(ICmpInst &Cmp); 654 Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I); 655 Instruction *foldSignBitTest(ICmpInst &I); 656 Instruction *foldICmpWithZero(ICmpInst &Cmp); 657 658 Value *foldUnsignedMultiplicationOverflowCheck(ICmpInst &Cmp); 659 660 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, 661 ConstantInt *C); 662 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, 663 const APInt &C); 664 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, 665 const APInt &C); 666 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, 667 const APInt &C); 668 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, 669 const APInt &C); 670 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, 671 const APInt &C); 672 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, 673 const APInt &C); 674 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, 675 const APInt &C); 676 Instruction *foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, 677 const APInt &C); 678 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, 679 const APInt &C); 680 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, 681 const APInt &C); 682 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, 683 const APInt &C); 684 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, 685 const APInt &C); 686 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, 687 const APInt &C1); 688 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, 689 const APInt &C1, const APInt &C2); 690 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 691 const APInt &C2); 692 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 693 const APInt &C2); 694 695 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, 696 BinaryOperator *BO, 697 const APInt &C); 698 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, 699 const APInt &C); 700 Instruction *foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, 701 const APInt &C); 702 703 // Helpers of visitSelectInst(). 704 Instruction *foldSelectExtConst(SelectInst &Sel); 705 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI); 706 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *); 707 Instruction *foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1, 708 Value *A, Value *B, Instruction &Outer, 709 SelectPatternFlavor SPF2, Value *C); 710 Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI); 711 Instruction *foldSelectValueEquivalence(SelectInst &SI, ICmpInst &ICI); 712 713 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, 714 bool isSigned, bool Inside); 715 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI); 716 bool mergeStoreIntoSuccessor(StoreInst &SI); 717 718 /// Given an 'or' instruction, check to see if it is part of a 719 /// bswap/bitreverse idiom. If so, return the equivalent bswap/bitreverse 720 /// intrinsic. 721 Instruction *matchBSwapOrBitReverse(BinaryOperator &Or, bool MatchBSwaps, 722 bool MatchBitReversals); 723 724 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI); 725 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI); 726 727 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned); 728 729 /// Returns a value X such that Val = X * Scale, or null if none. 730 /// 731 /// If the multiplication is known not to overflow then NoSignedWrap is set. 732 Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap); 733 }; 734 735 class Negator final { 736 /// Top-to-bottom, def-to-use negated instruction tree we produced. 737 SmallVector<Instruction *, NegatorMaxNodesSSO> NewInstructions; 738 739 using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>; 740 BuilderTy Builder; 741 742 const DataLayout &DL; 743 AssumptionCache &AC; 744 const DominatorTree &DT; 745 746 const bool IsTrulyNegation; 747 748 SmallDenseMap<Value *, Value *> NegationsCache; 749 750 Negator(LLVMContext &C, const DataLayout &DL, AssumptionCache &AC, 751 const DominatorTree &DT, bool IsTrulyNegation); 752 753 #if LLVM_ENABLE_STATS 754 unsigned NumValuesVisitedInThisNegator = 0; 755 ~Negator(); 756 #endif 757 758 using Result = std::pair<ArrayRef<Instruction *> /*NewInstructions*/, 759 Value * /*NegatedRoot*/>; 760 761 std::array<Value *, 2> getSortedOperandsOfBinOp(Instruction *I); 762 763 LLVM_NODISCARD Value *visitImpl(Value *V, unsigned Depth); 764 765 LLVM_NODISCARD Value *negate(Value *V, unsigned Depth); 766 767 /// Recurse depth-first and attempt to sink the negation. 768 /// FIXME: use worklist? 769 LLVM_NODISCARD Optional<Result> run(Value *Root); 770 771 Negator(const Negator &) = delete; 772 Negator(Negator &&) = delete; 773 Negator &operator=(const Negator &) = delete; 774 Negator &operator=(Negator &&) = delete; 775 776 public: 777 /// Attempt to negate \p Root. Retuns nullptr if negation can't be performed, 778 /// otherwise returns negated value. 779 LLVM_NODISCARD static Value *Negate(bool LHSIsZero, Value *Root, 780 InstCombinerImpl &IC); 781 }; 782 783 } // end namespace llvm 784 785 #undef DEBUG_TYPE 786 787 #endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 788