1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // InstructionCombining - Combine instructions to form fewer, simple 10 // instructions. This pass does not modify the CFG. This pass is where 11 // algebraic simplification happens. 12 // 13 // This pass combines things like: 14 // %Y = add i32 %X, 1 15 // %Z = add i32 %Y, 1 16 // into: 17 // %Z = add i32 %X, 2 18 // 19 // This is a simple worklist driven algorithm. 20 // 21 // This pass guarantees that the following canonicalizations are performed on 22 // the program: 23 // 1. If a binary operator has a constant operand, it is moved to the RHS 24 // 2. Bitwise operators with constant operands are always grouped so that 25 // shifts are performed first, then or's, then and's, then xor's. 26 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible 27 // 4. All cmp instructions on boolean values are replaced with logical ops 28 // 5. add X, X is represented as (X*2) => (X << 1) 29 // 6. Multiplies with a power-of-two constant argument are transformed into 30 // shifts. 31 // ... etc. 32 // 33 //===----------------------------------------------------------------------===// 34 35 #include "InstCombineInternal.h" 36 #include "llvm/ADT/APInt.h" 37 #include "llvm/ADT/ArrayRef.h" 38 #include "llvm/ADT/DenseMap.h" 39 #include "llvm/ADT/SmallPtrSet.h" 40 #include "llvm/ADT/SmallVector.h" 41 #include "llvm/ADT/Statistic.h" 42 #include "llvm/Analysis/AliasAnalysis.h" 43 #include "llvm/Analysis/AssumptionCache.h" 44 #include "llvm/Analysis/BasicAliasAnalysis.h" 45 #include "llvm/Analysis/BlockFrequencyInfo.h" 46 #include "llvm/Analysis/CFG.h" 47 #include "llvm/Analysis/ConstantFolding.h" 48 #include "llvm/Analysis/GlobalsModRef.h" 49 #include "llvm/Analysis/InstructionSimplify.h" 50 #include "llvm/Analysis/LazyBlockFrequencyInfo.h" 51 #include "llvm/Analysis/LoopInfo.h" 52 #include "llvm/Analysis/MemoryBuiltins.h" 53 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 54 #include "llvm/Analysis/ProfileSummaryInfo.h" 55 #include "llvm/Analysis/TargetFolder.h" 56 #include "llvm/Analysis/TargetLibraryInfo.h" 57 #include "llvm/Analysis/TargetTransformInfo.h" 58 #include "llvm/Analysis/Utils/Local.h" 59 #include "llvm/Analysis/ValueTracking.h" 60 #include "llvm/Analysis/VectorUtils.h" 61 #include "llvm/IR/BasicBlock.h" 62 #include "llvm/IR/CFG.h" 63 #include "llvm/IR/Constant.h" 64 #include "llvm/IR/Constants.h" 65 #include "llvm/IR/DIBuilder.h" 66 #include "llvm/IR/DataLayout.h" 67 #include "llvm/IR/DebugInfo.h" 68 #include "llvm/IR/DerivedTypes.h" 69 #include "llvm/IR/Dominators.h" 70 #include "llvm/IR/EHPersonalities.h" 71 #include "llvm/IR/Function.h" 72 #include "llvm/IR/GetElementPtrTypeIterator.h" 73 #include "llvm/IR/IRBuilder.h" 74 #include "llvm/IR/InstrTypes.h" 75 #include "llvm/IR/Instruction.h" 76 #include "llvm/IR/Instructions.h" 77 #include "llvm/IR/IntrinsicInst.h" 78 #include "llvm/IR/Intrinsics.h" 79 #include "llvm/IR/Metadata.h" 80 #include "llvm/IR/Operator.h" 81 #include "llvm/IR/PassManager.h" 82 #include "llvm/IR/PatternMatch.h" 83 #include "llvm/IR/Type.h" 84 #include "llvm/IR/Use.h" 85 #include "llvm/IR/User.h" 86 #include "llvm/IR/Value.h" 87 #include "llvm/IR/ValueHandle.h" 88 #include "llvm/InitializePasses.h" 89 #include "llvm/Support/Casting.h" 90 #include "llvm/Support/CommandLine.h" 91 #include "llvm/Support/Compiler.h" 92 #include "llvm/Support/Debug.h" 93 #include "llvm/Support/DebugCounter.h" 94 #include "llvm/Support/ErrorHandling.h" 95 #include "llvm/Support/KnownBits.h" 96 #include "llvm/Support/raw_ostream.h" 97 #include "llvm/Transforms/InstCombine/InstCombine.h" 98 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 99 #include "llvm/Transforms/Utils/Local.h" 100 #include <algorithm> 101 #include <cassert> 102 #include <cstdint> 103 #include <memory> 104 #include <optional> 105 #include <string> 106 #include <utility> 107 108 #define DEBUG_TYPE "instcombine" 109 #include "llvm/Transforms/Utils/InstructionWorklist.h" 110 #include <optional> 111 112 using namespace llvm; 113 using namespace llvm::PatternMatch; 114 115 STATISTIC(NumWorklistIterations, 116 "Number of instruction combining iterations performed"); 117 STATISTIC(NumOneIteration, "Number of functions with one iteration"); 118 STATISTIC(NumTwoIterations, "Number of functions with two iterations"); 119 STATISTIC(NumThreeIterations, "Number of functions with three iterations"); 120 STATISTIC(NumFourOrMoreIterations, 121 "Number of functions with four or more iterations"); 122 123 STATISTIC(NumCombined , "Number of insts combined"); 124 STATISTIC(NumConstProp, "Number of constant folds"); 125 STATISTIC(NumDeadInst , "Number of dead inst eliminated"); 126 STATISTIC(NumSunkInst , "Number of instructions sunk"); 127 STATISTIC(NumExpand, "Number of expansions"); 128 STATISTIC(NumFactor , "Number of factorizations"); 129 STATISTIC(NumReassoc , "Number of reassociations"); 130 DEBUG_COUNTER(VisitCounter, "instcombine-visit", 131 "Controls which instructions are visited"); 132 133 static cl::opt<bool> 134 EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), 135 cl::init(true)); 136 137 static cl::opt<unsigned> MaxSinkNumUsers( 138 "instcombine-max-sink-users", cl::init(32), 139 cl::desc("Maximum number of undroppable users for instruction sinking")); 140 141 static cl::opt<unsigned> 142 MaxArraySize("instcombine-maxarray-size", cl::init(1024), 143 cl::desc("Maximum array size considered when doing a combine")); 144 145 // FIXME: Remove this flag when it is no longer necessary to convert 146 // llvm.dbg.declare to avoid inaccurate debug info. Setting this to false 147 // increases variable availability at the cost of accuracy. Variables that 148 // cannot be promoted by mem2reg or SROA will be described as living in memory 149 // for their entire lifetime. However, passes like DSE and instcombine can 150 // delete stores to the alloca, leading to misleading and inaccurate debug 151 // information. This flag can be removed when those passes are fixed. 152 static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", 153 cl::Hidden, cl::init(true)); 154 155 std::optional<Instruction *> 156 InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) { 157 // Handle target specific intrinsics 158 if (II.getCalledFunction()->isTargetIntrinsic()) { 159 return TTI.instCombineIntrinsic(*this, II); 160 } 161 return std::nullopt; 162 } 163 164 std::optional<Value *> InstCombiner::targetSimplifyDemandedUseBitsIntrinsic( 165 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, 166 bool &KnownBitsComputed) { 167 // Handle target specific intrinsics 168 if (II.getCalledFunction()->isTargetIntrinsic()) { 169 return TTI.simplifyDemandedUseBitsIntrinsic(*this, II, DemandedMask, Known, 170 KnownBitsComputed); 171 } 172 return std::nullopt; 173 } 174 175 std::optional<Value *> InstCombiner::targetSimplifyDemandedVectorEltsIntrinsic( 176 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts, 177 APInt &PoisonElts2, APInt &PoisonElts3, 178 std::function<void(Instruction *, unsigned, APInt, APInt &)> 179 SimplifyAndSetOp) { 180 // Handle target specific intrinsics 181 if (II.getCalledFunction()->isTargetIntrinsic()) { 182 return TTI.simplifyDemandedVectorEltsIntrinsic( 183 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3, 184 SimplifyAndSetOp); 185 } 186 return std::nullopt; 187 } 188 189 bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const { 190 return TTI.isValidAddrSpaceCast(FromAS, ToAS); 191 } 192 193 Value *InstCombinerImpl::EmitGEPOffset(User *GEP) { 194 return llvm::emitGEPOffset(&Builder, DL, GEP); 195 } 196 197 /// Legal integers and common types are considered desirable. This is used to 198 /// avoid creating instructions with types that may not be supported well by the 199 /// the backend. 200 /// NOTE: This treats i8, i16 and i32 specially because they are common 201 /// types in frontend languages. 202 bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const { 203 switch (BitWidth) { 204 case 8: 205 case 16: 206 case 32: 207 return true; 208 default: 209 return DL.isLegalInteger(BitWidth); 210 } 211 } 212 213 /// Return true if it is desirable to convert an integer computation from a 214 /// given bit width to a new bit width. 215 /// We don't want to convert from a legal or desirable type (like i8) to an 216 /// illegal type or from a smaller to a larger illegal type. A width of '1' 217 /// is always treated as a desirable type because i1 is a fundamental type in 218 /// IR, and there are many specialized optimizations for i1 types. 219 /// Common/desirable widths are equally treated as legal to convert to, in 220 /// order to open up more combining opportunities. 221 bool InstCombinerImpl::shouldChangeType(unsigned FromWidth, 222 unsigned ToWidth) const { 223 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth); 224 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth); 225 226 // Convert to desirable widths even if they are not legal types. 227 // Only shrink types, to prevent infinite loops. 228 if (ToWidth < FromWidth && isDesirableIntType(ToWidth)) 229 return true; 230 231 // If this is a legal or desiable integer from type, and the result would be 232 // an illegal type, don't do the transformation. 233 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal) 234 return false; 235 236 // Otherwise, if both are illegal, do not increase the size of the result. We 237 // do allow things like i160 -> i64, but not i64 -> i160. 238 if (!FromLegal && !ToLegal && ToWidth > FromWidth) 239 return false; 240 241 return true; 242 } 243 244 /// Return true if it is desirable to convert a computation from 'From' to 'To'. 245 /// We don't want to convert from a legal to an illegal type or from a smaller 246 /// to a larger illegal type. i1 is always treated as a legal type because it is 247 /// a fundamental type in IR, and there are many specialized optimizations for 248 /// i1 types. 249 bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const { 250 // TODO: This could be extended to allow vectors. Datalayout changes might be 251 // needed to properly support that. 252 if (!From->isIntegerTy() || !To->isIntegerTy()) 253 return false; 254 255 unsigned FromWidth = From->getPrimitiveSizeInBits(); 256 unsigned ToWidth = To->getPrimitiveSizeInBits(); 257 return shouldChangeType(FromWidth, ToWidth); 258 } 259 260 // Return true, if No Signed Wrap should be maintained for I. 261 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C", 262 // where both B and C should be ConstantInts, results in a constant that does 263 // not overflow. This function only handles the Add and Sub opcodes. For 264 // all other opcodes, the function conservatively returns false. 265 static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) { 266 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 267 if (!OBO || !OBO->hasNoSignedWrap()) 268 return false; 269 270 // We reason about Add and Sub Only. 271 Instruction::BinaryOps Opcode = I.getOpcode(); 272 if (Opcode != Instruction::Add && Opcode != Instruction::Sub) 273 return false; 274 275 const APInt *BVal, *CVal; 276 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal))) 277 return false; 278 279 bool Overflow = false; 280 if (Opcode == Instruction::Add) 281 (void)BVal->sadd_ov(*CVal, Overflow); 282 else 283 (void)BVal->ssub_ov(*CVal, Overflow); 284 285 return !Overflow; 286 } 287 288 static bool hasNoUnsignedWrap(BinaryOperator &I) { 289 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 290 return OBO && OBO->hasNoUnsignedWrap(); 291 } 292 293 static bool hasNoSignedWrap(BinaryOperator &I) { 294 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 295 return OBO && OBO->hasNoSignedWrap(); 296 } 297 298 /// Conservatively clears subclassOptionalData after a reassociation or 299 /// commutation. We preserve fast-math flags when applicable as they can be 300 /// preserved. 301 static void ClearSubclassDataAfterReassociation(BinaryOperator &I) { 302 FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I); 303 if (!FPMO) { 304 I.clearSubclassOptionalData(); 305 return; 306 } 307 308 FastMathFlags FMF = I.getFastMathFlags(); 309 I.clearSubclassOptionalData(); 310 I.setFastMathFlags(FMF); 311 } 312 313 /// Combine constant operands of associative operations either before or after a 314 /// cast to eliminate one of the associative operations: 315 /// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2))) 316 /// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2)) 317 static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, 318 InstCombinerImpl &IC) { 319 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0)); 320 if (!Cast || !Cast->hasOneUse()) 321 return false; 322 323 // TODO: Enhance logic for other casts and remove this check. 324 auto CastOpcode = Cast->getOpcode(); 325 if (CastOpcode != Instruction::ZExt) 326 return false; 327 328 // TODO: Enhance logic for other BinOps and remove this check. 329 if (!BinOp1->isBitwiseLogicOp()) 330 return false; 331 332 auto AssocOpcode = BinOp1->getOpcode(); 333 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0)); 334 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode) 335 return false; 336 337 Constant *C1, *C2; 338 if (!match(BinOp1->getOperand(1), m_Constant(C1)) || 339 !match(BinOp2->getOperand(1), m_Constant(C2))) 340 return false; 341 342 // TODO: This assumes a zext cast. 343 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2 344 // to the destination type might lose bits. 345 346 // Fold the constants together in the destination type: 347 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC) 348 const DataLayout &DL = IC.getDataLayout(); 349 Type *DestTy = C1->getType(); 350 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL); 351 if (!CastC2) 352 return false; 353 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL); 354 if (!FoldedC) 355 return false; 356 357 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0)); 358 IC.replaceOperand(*BinOp1, 1, FoldedC); 359 BinOp1->dropPoisonGeneratingFlags(); 360 Cast->dropPoisonGeneratingFlags(); 361 return true; 362 } 363 364 // Simplifies IntToPtr/PtrToInt RoundTrip Cast. 365 // inttoptr ( ptrtoint (x) ) --> x 366 Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) { 367 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val); 368 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) == 369 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) { 370 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0)); 371 Type *CastTy = IntToPtr->getDestTy(); 372 if (PtrToInt && 373 CastTy->getPointerAddressSpace() == 374 PtrToInt->getSrcTy()->getPointerAddressSpace() && 375 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) == 376 DL.getTypeSizeInBits(PtrToInt->getDestTy())) 377 return PtrToInt->getOperand(0); 378 } 379 return nullptr; 380 } 381 382 /// This performs a few simplifications for operators that are associative or 383 /// commutative: 384 /// 385 /// Commutative operators: 386 /// 387 /// 1. Order operands such that they are listed from right (least complex) to 388 /// left (most complex). This puts constants before unary operators before 389 /// binary operators. 390 /// 391 /// Associative operators: 392 /// 393 /// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 394 /// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 395 /// 396 /// Associative and commutative operators: 397 /// 398 /// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 399 /// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 400 /// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 401 /// if C1 and C2 are constants. 402 bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) { 403 Instruction::BinaryOps Opcode = I.getOpcode(); 404 bool Changed = false; 405 406 do { 407 // Order operands such that they are listed from right (least complex) to 408 // left (most complex). This puts constants before unary operators before 409 // binary operators. 410 if (I.isCommutative() && getComplexity(I.getOperand(0)) < 411 getComplexity(I.getOperand(1))) 412 Changed = !I.swapOperands(); 413 414 if (I.isCommutative()) { 415 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) { 416 replaceOperand(I, 0, Pair->first); 417 replaceOperand(I, 1, Pair->second); 418 Changed = true; 419 } 420 } 421 422 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0)); 423 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)); 424 425 if (I.isAssociative()) { 426 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 427 if (Op0 && Op0->getOpcode() == Opcode) { 428 Value *A = Op0->getOperand(0); 429 Value *B = Op0->getOperand(1); 430 Value *C = I.getOperand(1); 431 432 // Does "B op C" simplify? 433 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) { 434 // It simplifies to V. Form "A op V". 435 replaceOperand(I, 0, A); 436 replaceOperand(I, 1, V); 437 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0); 438 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0); 439 440 // Conservatively clear all optional flags since they may not be 441 // preserved by the reassociation. Reset nsw/nuw based on the above 442 // analysis. 443 ClearSubclassDataAfterReassociation(I); 444 445 // Note: this is only valid because SimplifyBinOp doesn't look at 446 // the operands to Op0. 447 if (IsNUW) 448 I.setHasNoUnsignedWrap(true); 449 450 if (IsNSW) 451 I.setHasNoSignedWrap(true); 452 453 Changed = true; 454 ++NumReassoc; 455 continue; 456 } 457 } 458 459 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 460 if (Op1 && Op1->getOpcode() == Opcode) { 461 Value *A = I.getOperand(0); 462 Value *B = Op1->getOperand(0); 463 Value *C = Op1->getOperand(1); 464 465 // Does "A op B" simplify? 466 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) { 467 // It simplifies to V. Form "V op C". 468 replaceOperand(I, 0, V); 469 replaceOperand(I, 1, C); 470 // Conservatively clear the optional flags, since they may not be 471 // preserved by the reassociation. 472 ClearSubclassDataAfterReassociation(I); 473 Changed = true; 474 ++NumReassoc; 475 continue; 476 } 477 } 478 } 479 480 if (I.isAssociative() && I.isCommutative()) { 481 if (simplifyAssocCastAssoc(&I, *this)) { 482 Changed = true; 483 ++NumReassoc; 484 continue; 485 } 486 487 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 488 if (Op0 && Op0->getOpcode() == Opcode) { 489 Value *A = Op0->getOperand(0); 490 Value *B = Op0->getOperand(1); 491 Value *C = I.getOperand(1); 492 493 // Does "C op A" simplify? 494 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) { 495 // It simplifies to V. Form "V op B". 496 replaceOperand(I, 0, V); 497 replaceOperand(I, 1, B); 498 // Conservatively clear the optional flags, since they may not be 499 // preserved by the reassociation. 500 ClearSubclassDataAfterReassociation(I); 501 Changed = true; 502 ++NumReassoc; 503 continue; 504 } 505 } 506 507 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 508 if (Op1 && Op1->getOpcode() == Opcode) { 509 Value *A = I.getOperand(0); 510 Value *B = Op1->getOperand(0); 511 Value *C = Op1->getOperand(1); 512 513 // Does "C op A" simplify? 514 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) { 515 // It simplifies to V. Form "B op V". 516 replaceOperand(I, 0, B); 517 replaceOperand(I, 1, V); 518 // Conservatively clear the optional flags, since they may not be 519 // preserved by the reassociation. 520 ClearSubclassDataAfterReassociation(I); 521 Changed = true; 522 ++NumReassoc; 523 continue; 524 } 525 } 526 527 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 528 // if C1 and C2 are constants. 529 Value *A, *B; 530 Constant *C1, *C2, *CRes; 531 if (Op0 && Op1 && 532 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode && 533 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) && 534 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) && 535 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) { 536 bool IsNUW = hasNoUnsignedWrap(I) && 537 hasNoUnsignedWrap(*Op0) && 538 hasNoUnsignedWrap(*Op1); 539 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ? 540 BinaryOperator::CreateNUW(Opcode, A, B) : 541 BinaryOperator::Create(Opcode, A, B); 542 543 if (isa<FPMathOperator>(NewBO)) { 544 FastMathFlags Flags = I.getFastMathFlags() & 545 Op0->getFastMathFlags() & 546 Op1->getFastMathFlags(); 547 NewBO->setFastMathFlags(Flags); 548 } 549 InsertNewInstWith(NewBO, I.getIterator()); 550 NewBO->takeName(Op1); 551 replaceOperand(I, 0, NewBO); 552 replaceOperand(I, 1, CRes); 553 // Conservatively clear the optional flags, since they may not be 554 // preserved by the reassociation. 555 ClearSubclassDataAfterReassociation(I); 556 if (IsNUW) 557 I.setHasNoUnsignedWrap(true); 558 559 Changed = true; 560 continue; 561 } 562 } 563 564 // No further simplifications. 565 return Changed; 566 } while (true); 567 } 568 569 /// Return whether "X LOp (Y ROp Z)" is always equal to 570 /// "(X LOp Y) ROp (X LOp Z)". 571 static bool leftDistributesOverRight(Instruction::BinaryOps LOp, 572 Instruction::BinaryOps ROp) { 573 // X & (Y | Z) <--> (X & Y) | (X & Z) 574 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z) 575 if (LOp == Instruction::And) 576 return ROp == Instruction::Or || ROp == Instruction::Xor; 577 578 // X | (Y & Z) <--> (X | Y) & (X | Z) 579 if (LOp == Instruction::Or) 580 return ROp == Instruction::And; 581 582 // X * (Y + Z) <--> (X * Y) + (X * Z) 583 // X * (Y - Z) <--> (X * Y) - (X * Z) 584 if (LOp == Instruction::Mul) 585 return ROp == Instruction::Add || ROp == Instruction::Sub; 586 587 return false; 588 } 589 590 /// Return whether "(X LOp Y) ROp Z" is always equal to 591 /// "(X ROp Z) LOp (Y ROp Z)". 592 static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, 593 Instruction::BinaryOps ROp) { 594 if (Instruction::isCommutative(ROp)) 595 return leftDistributesOverRight(ROp, LOp); 596 597 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts. 598 return Instruction::isBitwiseLogicOp(LOp) && Instruction::isShift(ROp); 599 600 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z", 601 // but this requires knowing that the addition does not overflow and other 602 // such subtleties. 603 } 604 605 /// This function returns identity value for given opcode, which can be used to 606 /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1). 607 static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) { 608 if (isa<Constant>(V)) 609 return nullptr; 610 611 return ConstantExpr::getBinOpIdentity(Opcode, V->getType()); 612 } 613 614 /// This function predicates factorization using distributive laws. By default, 615 /// it just returns the 'Op' inputs. But for special-cases like 616 /// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add 617 /// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to 618 /// allow more factorization opportunities. 619 static Instruction::BinaryOps 620 getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, 621 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) { 622 assert(Op && "Expected a binary operator"); 623 LHS = Op->getOperand(0); 624 RHS = Op->getOperand(1); 625 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) { 626 Constant *C; 627 if (match(Op, m_Shl(m_Value(), m_Constant(C)))) { 628 // X << C --> X * (1 << C) 629 RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), C); 630 return Instruction::Mul; 631 } 632 // TODO: We can add other conversions e.g. shr => div etc. 633 } 634 if (Instruction::isBitwiseLogicOp(TopOpcode)) { 635 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr && 636 match(Op, m_LShr(m_NonNegative(), m_Value()))) { 637 // lshr nneg C, X --> ashr nneg C, X 638 return Instruction::AShr; 639 } 640 } 641 return Op->getOpcode(); 642 } 643 644 /// This tries to simplify binary operations by factorizing out common terms 645 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)"). 646 static Value *tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, 647 InstCombiner::BuilderTy &Builder, 648 Instruction::BinaryOps InnerOpcode, Value *A, 649 Value *B, Value *C, Value *D) { 650 assert(A && B && C && D && "All values must be provided"); 651 652 Value *V = nullptr; 653 Value *RetVal = nullptr; 654 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 655 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); 656 657 // Does "X op' Y" always equal "Y op' X"? 658 bool InnerCommutative = Instruction::isCommutative(InnerOpcode); 659 660 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"? 661 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) { 662 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the 663 // commutative case, "(A op' B) op (C op' A)"? 664 if (A == C || (InnerCommutative && A == D)) { 665 if (A != C) 666 std::swap(C, D); 667 // Consider forming "A op' (B op D)". 668 // If "B op D" simplifies then it can be formed with no cost. 669 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I)); 670 671 // If "B op D" doesn't simplify then only go on if one of the existing 672 // operations "A op' B" and "C op' D" will be zapped as no longer used. 673 if (!V && (LHS->hasOneUse() || RHS->hasOneUse())) 674 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName()); 675 if (V) 676 RetVal = Builder.CreateBinOp(InnerOpcode, A, V); 677 } 678 } 679 680 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"? 681 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) { 682 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the 683 // commutative case, "(A op' B) op (B op' D)"? 684 if (B == D || (InnerCommutative && B == C)) { 685 if (B != D) 686 std::swap(C, D); 687 // Consider forming "(A op C) op' B". 688 // If "A op C" simplifies then it can be formed with no cost. 689 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I)); 690 691 // If "A op C" doesn't simplify then only go on if one of the existing 692 // operations "A op' B" and "C op' D" will be zapped as no longer used. 693 if (!V && (LHS->hasOneUse() || RHS->hasOneUse())) 694 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName()); 695 if (V) 696 RetVal = Builder.CreateBinOp(InnerOpcode, V, B); 697 } 698 } 699 700 if (!RetVal) 701 return nullptr; 702 703 ++NumFactor; 704 RetVal->takeName(&I); 705 706 // Try to add no-overflow flags to the final value. 707 if (isa<OverflowingBinaryOperator>(RetVal)) { 708 bool HasNSW = false; 709 bool HasNUW = false; 710 if (isa<OverflowingBinaryOperator>(&I)) { 711 HasNSW = I.hasNoSignedWrap(); 712 HasNUW = I.hasNoUnsignedWrap(); 713 } 714 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) { 715 HasNSW &= LOBO->hasNoSignedWrap(); 716 HasNUW &= LOBO->hasNoUnsignedWrap(); 717 } 718 719 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) { 720 HasNSW &= ROBO->hasNoSignedWrap(); 721 HasNUW &= ROBO->hasNoUnsignedWrap(); 722 } 723 724 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) { 725 // We can propagate 'nsw' if we know that 726 // %Y = mul nsw i16 %X, C 727 // %Z = add nsw i16 %Y, %X 728 // => 729 // %Z = mul nsw i16 %X, C+1 730 // 731 // iff C+1 isn't INT_MIN 732 const APInt *CInt; 733 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue()) 734 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW); 735 736 // nuw can be propagated with any constant or nuw value. 737 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW); 738 } 739 } 740 return RetVal; 741 } 742 743 // (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C)) 744 // IFF 745 // 1) the logic_shifts match 746 // 2) either both binops are binops and one is `and` or 747 // BinOp1 is `and` 748 // (logic_shift (inv_logic_shift C1, C), C) == C1 or 749 // 750 // -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C) 751 // 752 // (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt)) 753 // IFF 754 // 1) the logic_shifts match 755 // 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`). 756 // 757 // -> (BinOp (logic_shift (BinOp X, Y)), Mask) 758 // 759 // (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt)) 760 // IFF 761 // 1) Binop1 is bitwise logical operator `and`, `or` or `xor` 762 // 2) Binop2 is `not` 763 // 764 // -> (arithmetic_shift Binop1((not X), Y), Amt) 765 766 Instruction *InstCombinerImpl::foldBinOpShiftWithShift(BinaryOperator &I) { 767 const DataLayout &DL = I.getModule()->getDataLayout(); 768 auto IsValidBinOpc = [](unsigned Opc) { 769 switch (Opc) { 770 default: 771 return false; 772 case Instruction::And: 773 case Instruction::Or: 774 case Instruction::Xor: 775 case Instruction::Add: 776 // Skip Sub as we only match constant masks which will canonicalize to use 777 // add. 778 return true; 779 } 780 }; 781 782 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra 783 // constraints. 784 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2, 785 unsigned ShOpc) { 786 assert(ShOpc != Instruction::AShr); 787 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) || 788 ShOpc == Instruction::Shl; 789 }; 790 791 auto GetInvShift = [](unsigned ShOpc) { 792 assert(ShOpc != Instruction::AShr); 793 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr; 794 }; 795 796 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2, 797 unsigned ShOpc, Constant *CMask, 798 Constant *CShift) { 799 // If the BinOp1 is `and` we don't need to check the mask. 800 if (BinOpc1 == Instruction::And) 801 return true; 802 803 // For all other possible transfers we need complete distributable 804 // binop/shift (anything but `add` + `lshr`). 805 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc)) 806 return false; 807 808 // If BinOp2 is `and`, any mask works (this only really helps for non-splat 809 // vecs, otherwise the mask will be simplified and the following check will 810 // handle it). 811 if (BinOpc2 == Instruction::And) 812 return true; 813 814 // Otherwise, need mask that meets the below requirement. 815 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask 816 Constant *MaskInvShift = 817 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL); 818 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) == 819 CMask; 820 }; 821 822 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * { 823 Constant *CMask, *CShift; 824 Value *X, *Y, *ShiftedX, *Mask, *Shift; 825 if (!match(I.getOperand(ShOpnum), 826 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift))))) 827 return nullptr; 828 if (!match(I.getOperand(1 - ShOpnum), 829 m_BinOp(m_Value(ShiftedX), m_Value(Mask)))) 830 return nullptr; 831 832 if (!match(ShiftedX, m_OneUse(m_Shift(m_Value(X), m_Specific(Shift))))) 833 return nullptr; 834 835 // Make sure we are matching instruction shifts and not ConstantExpr 836 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum)); 837 auto *IX = dyn_cast<Instruction>(ShiftedX); 838 if (!IY || !IX) 839 return nullptr; 840 841 // LHS and RHS need same shift opcode 842 unsigned ShOpc = IY->getOpcode(); 843 if (ShOpc != IX->getOpcode()) 844 return nullptr; 845 846 // Make sure binop is real instruction and not ConstantExpr 847 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum)); 848 if (!BO2) 849 return nullptr; 850 851 unsigned BinOpc = BO2->getOpcode(); 852 // Make sure we have valid binops. 853 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc)) 854 return nullptr; 855 856 if (ShOpc == Instruction::AShr) { 857 if (Instruction::isBitwiseLogicOp(I.getOpcode()) && 858 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) { 859 Value *NotX = Builder.CreateNot(X); 860 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX); 861 return BinaryOperator::Create( 862 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift); 863 } 864 865 return nullptr; 866 } 867 868 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just 869 // distribute to drop the shift irrelevant of constants. 870 if (BinOpc == I.getOpcode() && 871 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) { 872 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y); 873 Value *NewBinOp1 = Builder.CreateBinOp( 874 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift); 875 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask); 876 } 877 878 // Otherwise we can only distribute by constant shifting the mask, so 879 // ensure we have constants. 880 if (!match(Shift, m_ImmConstant(CShift))) 881 return nullptr; 882 if (!match(Mask, m_ImmConstant(CMask))) 883 return nullptr; 884 885 // Check if we can distribute the binops. 886 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift)) 887 return nullptr; 888 889 Constant *NewCMask = 890 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL); 891 Value *NewBinOp2 = Builder.CreateBinOp( 892 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask); 893 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2); 894 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc), 895 NewBinOp1, CShift); 896 }; 897 898 if (Instruction *R = MatchBinOp(0)) 899 return R; 900 return MatchBinOp(1); 901 } 902 903 // (Binop (zext C), (select C, T, F)) 904 // -> (select C, (binop 1, T), (binop 0, F)) 905 // 906 // (Binop (sext C), (select C, T, F)) 907 // -> (select C, (binop -1, T), (binop 0, F)) 908 // 909 // Attempt to simplify binary operations into a select with folded args, when 910 // one operand of the binop is a select instruction and the other operand is a 911 // zext/sext extension, whose value is the select condition. 912 Instruction * 913 InstCombinerImpl::foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I) { 914 // TODO: this simplification may be extended to any speculatable instruction, 915 // not just binops, and would possibly be handled better in FoldOpIntoSelect. 916 Instruction::BinaryOps Opc = I.getOpcode(); 917 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 918 Value *A, *CondVal, *TrueVal, *FalseVal; 919 Value *CastOp; 920 921 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) { 922 return match(CastOp, m_ZExtOrSExt(m_Value(A))) && 923 A->getType()->getScalarSizeInBits() == 1 && 924 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal), 925 m_Value(FalseVal))); 926 }; 927 928 // Make sure one side of the binop is a select instruction, and the other is a 929 // zero/sign extension operating on a i1. 930 if (MatchSelectAndCast(LHS, RHS)) 931 CastOp = LHS; 932 else if (MatchSelectAndCast(RHS, LHS)) 933 CastOp = RHS; 934 else 935 return nullptr; 936 937 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) { 938 bool IsCastOpRHS = (CastOp == RHS); 939 bool IsZExt = isa<ZExtInst>(CastOp); 940 Constant *C; 941 942 if (IsTrueArm) { 943 C = Constant::getNullValue(V->getType()); 944 } else if (IsZExt) { 945 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 946 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1)); 947 } else { 948 C = Constant::getAllOnesValue(V->getType()); 949 } 950 951 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C) 952 : Builder.CreateBinOp(Opc, C, V); 953 }; 954 955 // If the value used in the zext/sext is the select condition, or the negated 956 // of the select condition, the binop can be simplified. 957 if (CondVal == A) { 958 Value *NewTrueVal = NewFoldedConst(false, TrueVal); 959 return SelectInst::Create(CondVal, NewTrueVal, 960 NewFoldedConst(true, FalseVal)); 961 } 962 963 if (match(A, m_Not(m_Specific(CondVal)))) { 964 Value *NewTrueVal = NewFoldedConst(true, TrueVal); 965 return SelectInst::Create(CondVal, NewTrueVal, 966 NewFoldedConst(false, FalseVal)); 967 } 968 969 return nullptr; 970 } 971 972 Value *InstCombinerImpl::tryFactorizationFolds(BinaryOperator &I) { 973 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 974 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 975 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 976 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); 977 Value *A, *B, *C, *D; 978 Instruction::BinaryOps LHSOpcode, RHSOpcode; 979 980 if (Op0) 981 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1); 982 if (Op1) 983 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0); 984 985 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize 986 // a common term. 987 if (Op0 && Op1 && LHSOpcode == RHSOpcode) 988 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D)) 989 return V; 990 991 // The instruction has the form "(A op' B) op (C)". Try to factorize common 992 // term. 993 if (Op0) 994 if (Value *Ident = getIdentityValue(LHSOpcode, RHS)) 995 if (Value *V = 996 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident)) 997 return V; 998 999 // The instruction has the form "(B) op (C op' D)". Try to factorize common 1000 // term. 1001 if (Op1) 1002 if (Value *Ident = getIdentityValue(RHSOpcode, LHS)) 1003 if (Value *V = 1004 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D)) 1005 return V; 1006 1007 return nullptr; 1008 } 1009 1010 /// This tries to simplify binary operations which some other binary operation 1011 /// distributes over either by factorizing out common terms 1012 /// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in 1013 /// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win). 1014 /// Returns the simplified value, or null if it didn't simplify. 1015 Value *InstCombinerImpl::foldUsingDistributiveLaws(BinaryOperator &I) { 1016 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1017 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 1018 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 1019 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); 1020 1021 // Factorization. 1022 if (Value *R = tryFactorizationFolds(I)) 1023 return R; 1024 1025 // Expansion. 1026 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) { 1027 // The instruction has the form "(A op' B) op C". See if expanding it out 1028 // to "(A op C) op' (B op C)" results in simplifications. 1029 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 1030 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 1031 1032 // Disable the use of undef because it's not safe to distribute undef. 1033 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef(); 1034 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive); 1035 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive); 1036 1037 // Do "A op C" and "B op C" both simplify? 1038 if (L && R) { 1039 // They do! Return "L op' R". 1040 ++NumExpand; 1041 C = Builder.CreateBinOp(InnerOpcode, L, R); 1042 C->takeName(&I); 1043 return C; 1044 } 1045 1046 // Does "A op C" simplify to the identity value for the inner opcode? 1047 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) { 1048 // They do! Return "B op C". 1049 ++NumExpand; 1050 C = Builder.CreateBinOp(TopLevelOpcode, B, C); 1051 C->takeName(&I); 1052 return C; 1053 } 1054 1055 // Does "B op C" simplify to the identity value for the inner opcode? 1056 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) { 1057 // They do! Return "A op C". 1058 ++NumExpand; 1059 C = Builder.CreateBinOp(TopLevelOpcode, A, C); 1060 C->takeName(&I); 1061 return C; 1062 } 1063 } 1064 1065 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) { 1066 // The instruction has the form "A op (B op' C)". See if expanding it out 1067 // to "(A op B) op' (A op C)" results in simplifications. 1068 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 1069 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op' 1070 1071 // Disable the use of undef because it's not safe to distribute undef. 1072 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef(); 1073 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive); 1074 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive); 1075 1076 // Do "A op B" and "A op C" both simplify? 1077 if (L && R) { 1078 // They do! Return "L op' R". 1079 ++NumExpand; 1080 A = Builder.CreateBinOp(InnerOpcode, L, R); 1081 A->takeName(&I); 1082 return A; 1083 } 1084 1085 // Does "A op B" simplify to the identity value for the inner opcode? 1086 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) { 1087 // They do! Return "A op C". 1088 ++NumExpand; 1089 A = Builder.CreateBinOp(TopLevelOpcode, A, C); 1090 A->takeName(&I); 1091 return A; 1092 } 1093 1094 // Does "A op C" simplify to the identity value for the inner opcode? 1095 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) { 1096 // They do! Return "A op B". 1097 ++NumExpand; 1098 A = Builder.CreateBinOp(TopLevelOpcode, A, B); 1099 A->takeName(&I); 1100 return A; 1101 } 1102 } 1103 1104 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS); 1105 } 1106 1107 static std::optional<std::pair<Value *, Value *>> 1108 matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS) { 1109 if (LHS->getParent() != RHS->getParent()) 1110 return std::nullopt; 1111 1112 if (LHS->getNumIncomingValues() < 2) 1113 return std::nullopt; 1114 1115 if (!equal(LHS->blocks(), RHS->blocks())) 1116 return std::nullopt; 1117 1118 Value *L0 = LHS->getIncomingValue(0); 1119 Value *R0 = RHS->getIncomingValue(0); 1120 1121 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) { 1122 Value *L1 = LHS->getIncomingValue(I); 1123 Value *R1 = RHS->getIncomingValue(I); 1124 1125 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1)) 1126 continue; 1127 1128 return std::nullopt; 1129 } 1130 1131 return std::optional(std::pair(L0, R0)); 1132 } 1133 1134 std::optional<std::pair<Value *, Value *>> 1135 InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) { 1136 Instruction *LHSInst = dyn_cast<Instruction>(LHS); 1137 Instruction *RHSInst = dyn_cast<Instruction>(RHS); 1138 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode()) 1139 return std::nullopt; 1140 switch (LHSInst->getOpcode()) { 1141 case Instruction::PHI: 1142 return matchSymmetricPhiNodesPair(cast<PHINode>(LHS), cast<PHINode>(RHS)); 1143 case Instruction::Select: { 1144 Value *Cond = LHSInst->getOperand(0); 1145 Value *TrueVal = LHSInst->getOperand(1); 1146 Value *FalseVal = LHSInst->getOperand(2); 1147 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) && 1148 FalseVal == RHSInst->getOperand(1)) 1149 return std::pair(TrueVal, FalseVal); 1150 return std::nullopt; 1151 } 1152 case Instruction::Call: { 1153 // Match min(a, b) and max(a, b) 1154 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst); 1155 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst); 1156 if (LHSMinMax && RHSMinMax && 1157 LHSMinMax->getPredicate() == 1158 ICmpInst::getSwappedPredicate(RHSMinMax->getPredicate()) && 1159 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() && 1160 LHSMinMax->getRHS() == RHSMinMax->getRHS()) || 1161 (LHSMinMax->getLHS() == RHSMinMax->getRHS() && 1162 LHSMinMax->getRHS() == RHSMinMax->getLHS()))) 1163 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS()); 1164 return std::nullopt; 1165 } 1166 default: 1167 return std::nullopt; 1168 } 1169 } 1170 1171 Value *InstCombinerImpl::SimplifySelectsFeedingBinaryOp(BinaryOperator &I, 1172 Value *LHS, 1173 Value *RHS) { 1174 Value *A, *B, *C, *D, *E, *F; 1175 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C))); 1176 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F))); 1177 if (!LHSIsSelect && !RHSIsSelect) 1178 return nullptr; 1179 1180 FastMathFlags FMF; 1181 BuilderTy::FastMathFlagGuard Guard(Builder); 1182 if (isa<FPMathOperator>(&I)) { 1183 FMF = I.getFastMathFlags(); 1184 Builder.setFastMathFlags(FMF); 1185 } 1186 1187 Instruction::BinaryOps Opcode = I.getOpcode(); 1188 SimplifyQuery Q = SQ.getWithInstruction(&I); 1189 1190 Value *Cond, *True = nullptr, *False = nullptr; 1191 1192 // Special-case for add/negate combination. Replace the zero in the negation 1193 // with the trailing add operand: 1194 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N) 1195 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False 1196 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * { 1197 // We need an 'add' and exactly 1 arm of the select to have been simplified. 1198 if (Opcode != Instruction::Add || (!True && !False) || (True && False)) 1199 return nullptr; 1200 1201 Value *N; 1202 if (True && match(FVal, m_Neg(m_Value(N)))) { 1203 Value *Sub = Builder.CreateSub(Z, N); 1204 return Builder.CreateSelect(Cond, True, Sub, I.getName()); 1205 } 1206 if (False && match(TVal, m_Neg(m_Value(N)))) { 1207 Value *Sub = Builder.CreateSub(Z, N); 1208 return Builder.CreateSelect(Cond, Sub, False, I.getName()); 1209 } 1210 return nullptr; 1211 }; 1212 1213 if (LHSIsSelect && RHSIsSelect && A == D) { 1214 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F) 1215 Cond = A; 1216 True = simplifyBinOp(Opcode, B, E, FMF, Q); 1217 False = simplifyBinOp(Opcode, C, F, FMF, Q); 1218 1219 if (LHS->hasOneUse() && RHS->hasOneUse()) { 1220 if (False && !True) 1221 True = Builder.CreateBinOp(Opcode, B, E); 1222 else if (True && !False) 1223 False = Builder.CreateBinOp(Opcode, C, F); 1224 } 1225 } else if (LHSIsSelect && LHS->hasOneUse()) { 1226 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y) 1227 Cond = A; 1228 True = simplifyBinOp(Opcode, B, RHS, FMF, Q); 1229 False = simplifyBinOp(Opcode, C, RHS, FMF, Q); 1230 if (Value *NewSel = foldAddNegate(B, C, RHS)) 1231 return NewSel; 1232 } else if (RHSIsSelect && RHS->hasOneUse()) { 1233 // X op (D ? E : F) -> D ? (X op E) : (X op F) 1234 Cond = D; 1235 True = simplifyBinOp(Opcode, LHS, E, FMF, Q); 1236 False = simplifyBinOp(Opcode, LHS, F, FMF, Q); 1237 if (Value *NewSel = foldAddNegate(E, F, LHS)) 1238 return NewSel; 1239 } 1240 1241 if (!True || !False) 1242 return nullptr; 1243 1244 Value *SI = Builder.CreateSelect(Cond, True, False); 1245 SI->takeName(&I); 1246 return SI; 1247 } 1248 1249 /// Freely adapt every user of V as-if V was changed to !V. 1250 /// WARNING: only if canFreelyInvertAllUsersOf() said this can be done. 1251 void InstCombinerImpl::freelyInvertAllUsersOf(Value *I, Value *IgnoredUser) { 1252 assert(!isa<Constant>(I) && "Shouldn't invert users of constant"); 1253 for (User *U : make_early_inc_range(I->users())) { 1254 if (U == IgnoredUser) 1255 continue; // Don't consider this user. 1256 switch (cast<Instruction>(U)->getOpcode()) { 1257 case Instruction::Select: { 1258 auto *SI = cast<SelectInst>(U); 1259 SI->swapValues(); 1260 SI->swapProfMetadata(); 1261 break; 1262 } 1263 case Instruction::Br: 1264 cast<BranchInst>(U)->swapSuccessors(); // swaps prof metadata too 1265 break; 1266 case Instruction::Xor: 1267 replaceInstUsesWith(cast<Instruction>(*U), I); 1268 // Add to worklist for DCE. 1269 addToWorklist(cast<Instruction>(U)); 1270 break; 1271 default: 1272 llvm_unreachable("Got unexpected user - out of sync with " 1273 "canFreelyInvertAllUsersOf() ?"); 1274 } 1275 } 1276 } 1277 1278 /// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a 1279 /// constant zero (which is the 'negate' form). 1280 Value *InstCombinerImpl::dyn_castNegVal(Value *V) const { 1281 Value *NegV; 1282 if (match(V, m_Neg(m_Value(NegV)))) 1283 return NegV; 1284 1285 // Constants can be considered to be negated values if they can be folded. 1286 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 1287 return ConstantExpr::getNeg(C); 1288 1289 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V)) 1290 if (C->getType()->getElementType()->isIntegerTy()) 1291 return ConstantExpr::getNeg(C); 1292 1293 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) { 1294 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1295 Constant *Elt = CV->getAggregateElement(i); 1296 if (!Elt) 1297 return nullptr; 1298 1299 if (isa<UndefValue>(Elt)) 1300 continue; 1301 1302 if (!isa<ConstantInt>(Elt)) 1303 return nullptr; 1304 } 1305 return ConstantExpr::getNeg(CV); 1306 } 1307 1308 // Negate integer vector splats. 1309 if (auto *CV = dyn_cast<Constant>(V)) 1310 if (CV->getType()->isVectorTy() && 1311 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue()) 1312 return ConstantExpr::getNeg(CV); 1313 1314 return nullptr; 1315 } 1316 1317 /// A binop with a constant operand and a sign-extended boolean operand may be 1318 /// converted into a select of constants by applying the binary operation to 1319 /// the constant with the two possible values of the extended boolean (0 or -1). 1320 Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) { 1321 // TODO: Handle non-commutative binop (constant is operand 0). 1322 // TODO: Handle zext. 1323 // TODO: Peek through 'not' of cast. 1324 Value *BO0 = BO.getOperand(0); 1325 Value *BO1 = BO.getOperand(1); 1326 Value *X; 1327 Constant *C; 1328 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) || 1329 !X->getType()->isIntOrIntVectorTy(1)) 1330 return nullptr; 1331 1332 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C) 1333 Constant *Ones = ConstantInt::getAllOnesValue(BO.getType()); 1334 Constant *Zero = ConstantInt::getNullValue(BO.getType()); 1335 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C); 1336 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C); 1337 return SelectInst::Create(X, TVal, FVal); 1338 } 1339 1340 static Constant *constantFoldOperationIntoSelectOperand(Instruction &I, 1341 SelectInst *SI, 1342 bool IsTrueArm) { 1343 SmallVector<Constant *> ConstOps; 1344 for (Value *Op : I.operands()) { 1345 CmpInst::Predicate Pred; 1346 Constant *C = nullptr; 1347 if (Op == SI) { 1348 C = dyn_cast<Constant>(IsTrueArm ? SI->getTrueValue() 1349 : SI->getFalseValue()); 1350 } else if (match(SI->getCondition(), 1351 m_ICmp(Pred, m_Specific(Op), m_Constant(C))) && 1352 Pred == (IsTrueArm ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) && 1353 isGuaranteedNotToBeUndefOrPoison(C)) { 1354 // Pass 1355 } else { 1356 C = dyn_cast<Constant>(Op); 1357 } 1358 if (C == nullptr) 1359 return nullptr; 1360 1361 ConstOps.push_back(C); 1362 } 1363 1364 return ConstantFoldInstOperands(&I, ConstOps, I.getModule()->getDataLayout()); 1365 } 1366 1367 static Value *foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, 1368 Value *NewOp, InstCombiner &IC) { 1369 Instruction *Clone = I.clone(); 1370 Clone->replaceUsesOfWith(SI, NewOp); 1371 IC.InsertNewInstBefore(Clone, SI->getIterator()); 1372 return Clone; 1373 } 1374 1375 Instruction *InstCombinerImpl::FoldOpIntoSelect(Instruction &Op, SelectInst *SI, 1376 bool FoldWithMultiUse) { 1377 // Don't modify shared select instructions unless set FoldWithMultiUse 1378 if (!SI->hasOneUse() && !FoldWithMultiUse) 1379 return nullptr; 1380 1381 Value *TV = SI->getTrueValue(); 1382 Value *FV = SI->getFalseValue(); 1383 if (!(isa<Constant>(TV) || isa<Constant>(FV))) 1384 return nullptr; 1385 1386 // Bool selects with constant operands can be folded to logical ops. 1387 if (SI->getType()->isIntOrIntVectorTy(1)) 1388 return nullptr; 1389 1390 // If it's a bitcast involving vectors, make sure it has the same number of 1391 // elements on both sides. 1392 if (auto *BC = dyn_cast<BitCastInst>(&Op)) { 1393 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy()); 1394 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy()); 1395 1396 // Verify that either both or neither are vectors. 1397 if ((SrcTy == nullptr) != (DestTy == nullptr)) 1398 return nullptr; 1399 1400 // If vectors, verify that they have the same number of elements. 1401 if (SrcTy && SrcTy->getElementCount() != DestTy->getElementCount()) 1402 return nullptr; 1403 } 1404 1405 // Test if a FCmpInst instruction is used exclusively by a select as 1406 // part of a minimum or maximum operation. If so, refrain from doing 1407 // any other folding. This helps out other analyses which understand 1408 // non-obfuscated minimum and maximum idioms. And in this case, at 1409 // least one of the comparison operands has at least one user besides 1410 // the compare (the select), which would often largely negate the 1411 // benefit of folding anyway. 1412 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) { 1413 if (CI->hasOneUse()) { 1414 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1); 1415 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) 1416 return nullptr; 1417 } 1418 } 1419 1420 // Make sure that one of the select arms constant folds successfully. 1421 Value *NewTV = constantFoldOperationIntoSelectOperand(Op, SI, /*IsTrueArm*/ true); 1422 Value *NewFV = constantFoldOperationIntoSelectOperand(Op, SI, /*IsTrueArm*/ false); 1423 if (!NewTV && !NewFV) 1424 return nullptr; 1425 1426 // Create an instruction for the arm that did not fold. 1427 if (!NewTV) 1428 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this); 1429 if (!NewFV) 1430 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this); 1431 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI); 1432 } 1433 1434 static Value *simplifyInstructionWithPHI(Instruction &I, PHINode *PN, 1435 Value *InValue, BasicBlock *InBB, 1436 const DataLayout &DL, 1437 const SimplifyQuery SQ) { 1438 // NB: It is a precondition of this transform that the operands be 1439 // phi translatable! This is usually trivially satisfied by limiting it 1440 // to constant ops, and for selects we do a more sophisticated check. 1441 SmallVector<Value *> Ops; 1442 for (Value *Op : I.operands()) { 1443 if (Op == PN) 1444 Ops.push_back(InValue); 1445 else 1446 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB)); 1447 } 1448 1449 // Don't consider the simplification successful if we get back a constant 1450 // expression. That's just an instruction in hiding. 1451 // Also reject the case where we simplify back to the phi node. We wouldn't 1452 // be able to remove it in that case. 1453 Value *NewVal = simplifyInstructionWithOperands( 1454 &I, Ops, SQ.getWithInstruction(InBB->getTerminator())); 1455 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr())) 1456 return NewVal; 1457 1458 // Check if incoming PHI value can be replaced with constant 1459 // based on implied condition. 1460 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator()); 1461 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I); 1462 if (TerminatorBI && TerminatorBI->isConditional() && 1463 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) { 1464 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent(); 1465 std::optional<bool> ImpliedCond = 1466 isImpliedCondition(TerminatorBI->getCondition(), ICmp->getPredicate(), 1467 Ops[0], Ops[1], DL, LHSIsTrue); 1468 if (ImpliedCond) 1469 return ConstantInt::getBool(I.getType(), ImpliedCond.value()); 1470 } 1471 1472 return nullptr; 1473 } 1474 1475 Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN) { 1476 unsigned NumPHIValues = PN->getNumIncomingValues(); 1477 if (NumPHIValues == 0) 1478 return nullptr; 1479 1480 // We normally only transform phis with a single use. However, if a PHI has 1481 // multiple uses and they are all the same operation, we can fold *all* of the 1482 // uses into the PHI. 1483 if (!PN->hasOneUse()) { 1484 // Walk the use list for the instruction, comparing them to I. 1485 for (User *U : PN->users()) { 1486 Instruction *UI = cast<Instruction>(U); 1487 if (UI != &I && !I.isIdenticalTo(UI)) 1488 return nullptr; 1489 } 1490 // Otherwise, we can replace *all* users with the new PHI we form. 1491 } 1492 1493 // Check to see whether the instruction can be folded into each phi operand. 1494 // If there is one operand that does not fold, remember the BB it is in. 1495 // If there is more than one or if *it* is a PHI, bail out. 1496 SmallVector<Value *> NewPhiValues; 1497 BasicBlock *NonSimplifiedBB = nullptr; 1498 Value *NonSimplifiedInVal = nullptr; 1499 for (unsigned i = 0; i != NumPHIValues; ++i) { 1500 Value *InVal = PN->getIncomingValue(i); 1501 BasicBlock *InBB = PN->getIncomingBlock(i); 1502 1503 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) { 1504 NewPhiValues.push_back(NewVal); 1505 continue; 1506 } 1507 1508 if (NonSimplifiedBB) return nullptr; // More than one non-simplified value. 1509 1510 NonSimplifiedBB = InBB; 1511 NonSimplifiedInVal = InVal; 1512 NewPhiValues.push_back(nullptr); 1513 1514 // If the InVal is an invoke at the end of the pred block, then we can't 1515 // insert a computation after it without breaking the edge. 1516 if (isa<InvokeInst>(InVal)) 1517 if (cast<Instruction>(InVal)->getParent() == NonSimplifiedBB) 1518 return nullptr; 1519 1520 // If the incoming non-constant value is reachable from the phis block, 1521 // we'll push the operation across a loop backedge. This could result in 1522 // an infinite combine loop, and is generally non-profitable (especially 1523 // if the operation was originally outside the loop). 1524 if (isPotentiallyReachable(PN->getParent(), NonSimplifiedBB, nullptr, &DT, 1525 LI)) 1526 return nullptr; 1527 } 1528 1529 // If there is exactly one non-simplified value, we can insert a copy of the 1530 // operation in that block. However, if this is a critical edge, we would be 1531 // inserting the computation on some other paths (e.g. inside a loop). Only 1532 // do this if the pred block is unconditionally branching into the phi block. 1533 // Also, make sure that the pred block is not dead code. 1534 if (NonSimplifiedBB != nullptr) { 1535 BranchInst *BI = dyn_cast<BranchInst>(NonSimplifiedBB->getTerminator()); 1536 if (!BI || !BI->isUnconditional() || 1537 !DT.isReachableFromEntry(NonSimplifiedBB)) 1538 return nullptr; 1539 } 1540 1541 // Okay, we can do the transformation: create the new PHI node. 1542 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues()); 1543 InsertNewInstBefore(NewPN, PN->getIterator()); 1544 NewPN->takeName(PN); 1545 NewPN->setDebugLoc(PN->getDebugLoc()); 1546 1547 // If we are going to have to insert a new computation, do so right before the 1548 // predecessor's terminator. 1549 Instruction *Clone = nullptr; 1550 if (NonSimplifiedBB) { 1551 Clone = I.clone(); 1552 for (Use &U : Clone->operands()) { 1553 if (U == PN) 1554 U = NonSimplifiedInVal; 1555 else 1556 U = U->DoPHITranslation(PN->getParent(), NonSimplifiedBB); 1557 } 1558 InsertNewInstBefore(Clone, NonSimplifiedBB->getTerminator()->getIterator()); 1559 } 1560 1561 for (unsigned i = 0; i != NumPHIValues; ++i) { 1562 if (NewPhiValues[i]) 1563 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i)); 1564 else 1565 NewPN->addIncoming(Clone, PN->getIncomingBlock(i)); 1566 } 1567 1568 for (User *U : make_early_inc_range(PN->users())) { 1569 Instruction *User = cast<Instruction>(U); 1570 if (User == &I) continue; 1571 replaceInstUsesWith(*User, NewPN); 1572 eraseInstFromFunction(*User); 1573 } 1574 1575 replaceAllDbgUsesWith(const_cast<PHINode &>(*PN), 1576 const_cast<PHINode &>(*NewPN), 1577 const_cast<PHINode &>(*PN), DT); 1578 return replaceInstUsesWith(I, NewPN); 1579 } 1580 1581 Instruction *InstCombinerImpl::foldBinopWithPhiOperands(BinaryOperator &BO) { 1582 // TODO: This should be similar to the incoming values check in foldOpIntoPhi: 1583 // we are guarding against replicating the binop in >1 predecessor. 1584 // This could miss matching a phi with 2 constant incoming values. 1585 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0)); 1586 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1)); 1587 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() || 1588 Phi0->getNumOperands() != Phi1->getNumOperands()) 1589 return nullptr; 1590 1591 // TODO: Remove the restriction for binop being in the same block as the phis. 1592 if (BO.getParent() != Phi0->getParent() || 1593 BO.getParent() != Phi1->getParent()) 1594 return nullptr; 1595 1596 // Fold if there is at least one specific constant value in phi0 or phi1's 1597 // incoming values that comes from the same block and this specific constant 1598 // value can be used to do optimization for specific binary operator. 1599 // For example: 1600 // %phi0 = phi i32 [0, %bb0], [%i, %bb1] 1601 // %phi1 = phi i32 [%j, %bb0], [0, %bb1] 1602 // %add = add i32 %phi0, %phi1 1603 // ==> 1604 // %add = phi i32 [%j, %bb0], [%i, %bb1] 1605 Constant *C = ConstantExpr::getBinOpIdentity(BO.getOpcode(), BO.getType(), 1606 /*AllowRHSConstant*/ false); 1607 if (C) { 1608 SmallVector<Value *, 4> NewIncomingValues; 1609 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) { 1610 auto &Phi0Use = std::get<0>(T); 1611 auto &Phi1Use = std::get<1>(T); 1612 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use)) 1613 return false; 1614 Value *Phi0UseV = Phi0Use.get(); 1615 Value *Phi1UseV = Phi1Use.get(); 1616 if (Phi0UseV == C) 1617 NewIncomingValues.push_back(Phi1UseV); 1618 else if (Phi1UseV == C) 1619 NewIncomingValues.push_back(Phi0UseV); 1620 else 1621 return false; 1622 return true; 1623 }; 1624 1625 if (all_of(zip(Phi0->operands(), Phi1->operands()), 1626 CanFoldIncomingValuePair)) { 1627 PHINode *NewPhi = 1628 PHINode::Create(Phi0->getType(), Phi0->getNumOperands()); 1629 assert(NewIncomingValues.size() == Phi0->getNumOperands() && 1630 "The number of collected incoming values should equal the number " 1631 "of the original PHINode operands!"); 1632 for (unsigned I = 0; I < Phi0->getNumOperands(); I++) 1633 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I)); 1634 return NewPhi; 1635 } 1636 } 1637 1638 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2) 1639 return nullptr; 1640 1641 // Match a pair of incoming constants for one of the predecessor blocks. 1642 BasicBlock *ConstBB, *OtherBB; 1643 Constant *C0, *C1; 1644 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) { 1645 ConstBB = Phi0->getIncomingBlock(0); 1646 OtherBB = Phi0->getIncomingBlock(1); 1647 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) { 1648 ConstBB = Phi0->getIncomingBlock(1); 1649 OtherBB = Phi0->getIncomingBlock(0); 1650 } else { 1651 return nullptr; 1652 } 1653 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1))) 1654 return nullptr; 1655 1656 // The block that we are hoisting to must reach here unconditionally. 1657 // Otherwise, we could be speculatively executing an expensive or 1658 // non-speculative op. 1659 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator()); 1660 if (!PredBlockBranch || PredBlockBranch->isConditional() || 1661 !DT.isReachableFromEntry(OtherBB)) 1662 return nullptr; 1663 1664 // TODO: This check could be tightened to only apply to binops (div/rem) that 1665 // are not safe to speculatively execute. But that could allow hoisting 1666 // potentially expensive instructions (fdiv for example). 1667 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter) 1668 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBIter)) 1669 return nullptr; 1670 1671 // Fold constants for the predecessor block with constant incoming values. 1672 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL); 1673 if (!NewC) 1674 return nullptr; 1675 1676 // Make a new binop in the predecessor block with the non-constant incoming 1677 // values. 1678 Builder.SetInsertPoint(PredBlockBranch); 1679 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(), 1680 Phi0->getIncomingValueForBlock(OtherBB), 1681 Phi1->getIncomingValueForBlock(OtherBB)); 1682 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO)) 1683 NotFoldedNewBO->copyIRFlags(&BO); 1684 1685 // Replace the binop with a phi of the new values. The old phis are dead. 1686 PHINode *NewPhi = PHINode::Create(BO.getType(), 2); 1687 NewPhi->addIncoming(NewBO, OtherBB); 1688 NewPhi->addIncoming(NewC, ConstBB); 1689 return NewPhi; 1690 } 1691 1692 Instruction *InstCombinerImpl::foldBinOpIntoSelectOrPhi(BinaryOperator &I) { 1693 if (!isa<Constant>(I.getOperand(1))) 1694 return nullptr; 1695 1696 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) { 1697 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel)) 1698 return NewSel; 1699 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) { 1700 if (Instruction *NewPhi = foldOpIntoPhi(I, PN)) 1701 return NewPhi; 1702 } 1703 return nullptr; 1704 } 1705 1706 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) { 1707 // If this GEP has only 0 indices, it is the same pointer as 1708 // Src. If Src is not a trivial GEP too, don't combine 1709 // the indices. 1710 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() && 1711 !Src.hasOneUse()) 1712 return false; 1713 return true; 1714 } 1715 1716 Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) { 1717 if (!isa<VectorType>(Inst.getType())) 1718 return nullptr; 1719 1720 BinaryOperator::BinaryOps Opcode = Inst.getOpcode(); 1721 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1); 1722 assert(cast<VectorType>(LHS->getType())->getElementCount() == 1723 cast<VectorType>(Inst.getType())->getElementCount()); 1724 assert(cast<VectorType>(RHS->getType())->getElementCount() == 1725 cast<VectorType>(Inst.getType())->getElementCount()); 1726 1727 // If both operands of the binop are vector concatenations, then perform the 1728 // narrow binop on each pair of the source operands followed by concatenation 1729 // of the results. 1730 Value *L0, *L1, *R0, *R1; 1731 ArrayRef<int> Mask; 1732 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) && 1733 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) && 1734 LHS->hasOneUse() && RHS->hasOneUse() && 1735 cast<ShuffleVectorInst>(LHS)->isConcat() && 1736 cast<ShuffleVectorInst>(RHS)->isConcat()) { 1737 // This transform does not have the speculative execution constraint as 1738 // below because the shuffle is a concatenation. The new binops are 1739 // operating on exactly the same elements as the existing binop. 1740 // TODO: We could ease the mask requirement to allow different undef lanes, 1741 // but that requires an analysis of the binop-with-undef output value. 1742 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0); 1743 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0)) 1744 BO->copyIRFlags(&Inst); 1745 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1); 1746 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1)) 1747 BO->copyIRFlags(&Inst); 1748 return new ShuffleVectorInst(NewBO0, NewBO1, Mask); 1749 } 1750 1751 auto createBinOpReverse = [&](Value *X, Value *Y) { 1752 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName()); 1753 if (auto *BO = dyn_cast<BinaryOperator>(V)) 1754 BO->copyIRFlags(&Inst); 1755 Module *M = Inst.getModule(); 1756 Function *F = Intrinsic::getDeclaration( 1757 M, Intrinsic::experimental_vector_reverse, V->getType()); 1758 return CallInst::Create(F, V); 1759 }; 1760 1761 // NOTE: Reverse shuffles don't require the speculative execution protection 1762 // below because they don't affect which lanes take part in the computation. 1763 1764 Value *V1, *V2; 1765 if (match(LHS, m_VecReverse(m_Value(V1)))) { 1766 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2)) 1767 if (match(RHS, m_VecReverse(m_Value(V2))) && 1768 (LHS->hasOneUse() || RHS->hasOneUse() || 1769 (LHS == RHS && LHS->hasNUses(2)))) 1770 return createBinOpReverse(V1, V2); 1771 1772 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat)) 1773 if (LHS->hasOneUse() && isSplatValue(RHS)) 1774 return createBinOpReverse(V1, RHS); 1775 } 1776 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2)) 1777 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2))))) 1778 return createBinOpReverse(LHS, V2); 1779 1780 // It may not be safe to reorder shuffles and things like div, urem, etc. 1781 // because we may trap when executing those ops on unknown vector elements. 1782 // See PR20059. 1783 if (!isSafeToSpeculativelyExecute(&Inst)) 1784 return nullptr; 1785 1786 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) { 1787 Value *XY = Builder.CreateBinOp(Opcode, X, Y); 1788 if (auto *BO = dyn_cast<BinaryOperator>(XY)) 1789 BO->copyIRFlags(&Inst); 1790 return new ShuffleVectorInst(XY, M); 1791 }; 1792 1793 // If both arguments of the binary operation are shuffles that use the same 1794 // mask and shuffle within a single vector, move the shuffle after the binop. 1795 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) && 1796 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) && 1797 V1->getType() == V2->getType() && 1798 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) { 1799 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask) 1800 return createBinOpShuffle(V1, V2, Mask); 1801 } 1802 1803 // If both arguments of a commutative binop are select-shuffles that use the 1804 // same mask with commuted operands, the shuffles are unnecessary. 1805 if (Inst.isCommutative() && 1806 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) && 1807 match(RHS, 1808 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) { 1809 auto *LShuf = cast<ShuffleVectorInst>(LHS); 1810 auto *RShuf = cast<ShuffleVectorInst>(RHS); 1811 // TODO: Allow shuffles that contain undefs in the mask? 1812 // That is legal, but it reduces undef knowledge. 1813 // TODO: Allow arbitrary shuffles by shuffling after binop? 1814 // That might be legal, but we have to deal with poison. 1815 if (LShuf->isSelect() && 1816 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) && 1817 RShuf->isSelect() && 1818 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) { 1819 // Example: 1820 // LHS = shuffle V1, V2, <0, 5, 6, 3> 1821 // RHS = shuffle V2, V1, <0, 5, 6, 3> 1822 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2 1823 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2); 1824 NewBO->copyIRFlags(&Inst); 1825 return NewBO; 1826 } 1827 } 1828 1829 // If one argument is a shuffle within one vector and the other is a constant, 1830 // try moving the shuffle after the binary operation. This canonicalization 1831 // intends to move shuffles closer to other shuffles and binops closer to 1832 // other binops, so they can be folded. It may also enable demanded elements 1833 // transforms. 1834 Constant *C; 1835 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType()); 1836 if (InstVTy && 1837 match(&Inst, m_c_BinOp(m_OneUse(m_Shuffle(m_Value(V1), m_Poison(), 1838 m_Mask(Mask))), 1839 m_ImmConstant(C))) && 1840 cast<FixedVectorType>(V1->getType())->getNumElements() <= 1841 InstVTy->getNumElements()) { 1842 assert(InstVTy->getScalarType() == V1->getType()->getScalarType() && 1843 "Shuffle should not change scalar type"); 1844 1845 // Find constant NewC that has property: 1846 // shuffle(NewC, ShMask) = C 1847 // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>) 1848 // reorder is not possible. A 1-to-1 mapping is not required. Example: 1849 // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef> 1850 bool ConstOp1 = isa<Constant>(RHS); 1851 ArrayRef<int> ShMask = Mask; 1852 unsigned SrcVecNumElts = 1853 cast<FixedVectorType>(V1->getType())->getNumElements(); 1854 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType()); 1855 SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, PoisonScalar); 1856 bool MayChange = true; 1857 unsigned NumElts = InstVTy->getNumElements(); 1858 for (unsigned I = 0; I < NumElts; ++I) { 1859 Constant *CElt = C->getAggregateElement(I); 1860 if (ShMask[I] >= 0) { 1861 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle"); 1862 Constant *NewCElt = NewVecC[ShMask[I]]; 1863 // Bail out if: 1864 // 1. The constant vector contains a constant expression. 1865 // 2. The shuffle needs an element of the constant vector that can't 1866 // be mapped to a new constant vector. 1867 // 3. This is a widening shuffle that copies elements of V1 into the 1868 // extended elements (extending with poison is allowed). 1869 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) || 1870 I >= SrcVecNumElts) { 1871 MayChange = false; 1872 break; 1873 } 1874 NewVecC[ShMask[I]] = CElt; 1875 } 1876 // If this is a widening shuffle, we must be able to extend with poison 1877 // elements. If the original binop does not produce a poison in the high 1878 // lanes, then this transform is not safe. 1879 // Similarly for poison lanes due to the shuffle mask, we can only 1880 // transform binops that preserve poison. 1881 // TODO: We could shuffle those non-poison constant values into the 1882 // result by using a constant vector (rather than an poison vector) 1883 // as operand 1 of the new binop, but that might be too aggressive 1884 // for target-independent shuffle creation. 1885 if (I >= SrcVecNumElts || ShMask[I] < 0) { 1886 Constant *MaybePoison = 1887 ConstOp1 1888 ? ConstantFoldBinaryOpOperands(Opcode, PoisonScalar, CElt, DL) 1889 : ConstantFoldBinaryOpOperands(Opcode, CElt, PoisonScalar, DL); 1890 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) { 1891 MayChange = false; 1892 break; 1893 } 1894 } 1895 } 1896 if (MayChange) { 1897 Constant *NewC = ConstantVector::get(NewVecC); 1898 // It may not be safe to execute a binop on a vector with poison elements 1899 // because the entire instruction can be folded to undef or create poison 1900 // that did not exist in the original code. 1901 // TODO: The shift case should not be necessary. 1902 if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1)) 1903 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1); 1904 1905 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask) 1906 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask) 1907 Value *NewLHS = ConstOp1 ? V1 : NewC; 1908 Value *NewRHS = ConstOp1 ? NewC : V1; 1909 return createBinOpShuffle(NewLHS, NewRHS, Mask); 1910 } 1911 } 1912 1913 // Try to reassociate to sink a splat shuffle after a binary operation. 1914 if (Inst.isAssociative() && Inst.isCommutative()) { 1915 // Canonicalize shuffle operand as LHS. 1916 if (isa<ShuffleVectorInst>(RHS)) 1917 std::swap(LHS, RHS); 1918 1919 Value *X; 1920 ArrayRef<int> MaskC; 1921 int SplatIndex; 1922 Value *Y, *OtherOp; 1923 if (!match(LHS, 1924 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) || 1925 !match(MaskC, m_SplatOrUndefMask(SplatIndex)) || 1926 X->getType() != Inst.getType() || 1927 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp))))) 1928 return nullptr; 1929 1930 // FIXME: This may not be safe if the analysis allows undef elements. By 1931 // moving 'Y' before the splat shuffle, we are implicitly assuming 1932 // that it is not undef/poison at the splat index. 1933 if (isSplatValue(OtherOp, SplatIndex)) { 1934 std::swap(Y, OtherOp); 1935 } else if (!isSplatValue(Y, SplatIndex)) { 1936 return nullptr; 1937 } 1938 1939 // X and Y are splatted values, so perform the binary operation on those 1940 // values followed by a splat followed by the 2nd binary operation: 1941 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp 1942 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y); 1943 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex); 1944 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask); 1945 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp); 1946 1947 // Intersect FMF on both new binops. Other (poison-generating) flags are 1948 // dropped to be safe. 1949 if (isa<FPMathOperator>(R)) { 1950 R->copyFastMathFlags(&Inst); 1951 R->andIRFlags(RHS); 1952 } 1953 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO)) 1954 NewInstBO->copyIRFlags(R); 1955 return R; 1956 } 1957 1958 return nullptr; 1959 } 1960 1961 /// Try to narrow the width of a binop if at least 1 operand is an extend of 1962 /// of a value. This requires a potentially expensive known bits check to make 1963 /// sure the narrow op does not overflow. 1964 Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) { 1965 // We need at least one extended operand. 1966 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1); 1967 1968 // If this is a sub, we swap the operands since we always want an extension 1969 // on the RHS. The LHS can be an extension or a constant. 1970 if (BO.getOpcode() == Instruction::Sub) 1971 std::swap(Op0, Op1); 1972 1973 Value *X; 1974 bool IsSext = match(Op0, m_SExt(m_Value(X))); 1975 if (!IsSext && !match(Op0, m_ZExt(m_Value(X)))) 1976 return nullptr; 1977 1978 // If both operands are the same extension from the same source type and we 1979 // can eliminate at least one (hasOneUse), this might work. 1980 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt; 1981 Value *Y; 1982 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() && 1983 cast<Operator>(Op1)->getOpcode() == CastOpc && 1984 (Op0->hasOneUse() || Op1->hasOneUse()))) { 1985 // If that did not match, see if we have a suitable constant operand. 1986 // Truncating and extending must produce the same constant. 1987 Constant *WideC; 1988 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC))) 1989 return nullptr; 1990 Constant *NarrowC = getLosslessTrunc(WideC, X->getType(), CastOpc); 1991 if (!NarrowC) 1992 return nullptr; 1993 Y = NarrowC; 1994 } 1995 1996 // Swap back now that we found our operands. 1997 if (BO.getOpcode() == Instruction::Sub) 1998 std::swap(X, Y); 1999 2000 // Both operands have narrow versions. Last step: the math must not overflow 2001 // in the narrow width. 2002 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext)) 2003 return nullptr; 2004 2005 // bo (ext X), (ext Y) --> ext (bo X, Y) 2006 // bo (ext X), C --> ext (bo X, C') 2007 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow"); 2008 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) { 2009 if (IsSext) 2010 NewBinOp->setHasNoSignedWrap(); 2011 else 2012 NewBinOp->setHasNoUnsignedWrap(); 2013 } 2014 return CastInst::Create(CastOpc, NarrowBO, BO.getType()); 2015 } 2016 2017 static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2) { 2018 // At least one GEP must be inbounds. 2019 if (!GEP1.isInBounds() && !GEP2.isInBounds()) 2020 return false; 2021 2022 return (GEP1.isInBounds() || GEP1.hasAllZeroIndices()) && 2023 (GEP2.isInBounds() || GEP2.hasAllZeroIndices()); 2024 } 2025 2026 /// Thread a GEP operation with constant indices through the constant true/false 2027 /// arms of a select. 2028 static Instruction *foldSelectGEP(GetElementPtrInst &GEP, 2029 InstCombiner::BuilderTy &Builder) { 2030 if (!GEP.hasAllConstantIndices()) 2031 return nullptr; 2032 2033 Instruction *Sel; 2034 Value *Cond; 2035 Constant *TrueC, *FalseC; 2036 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) || 2037 !match(Sel, 2038 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC)))) 2039 return nullptr; 2040 2041 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC' 2042 // Propagate 'inbounds' and metadata from existing instructions. 2043 // Note: using IRBuilder to create the constants for efficiency. 2044 SmallVector<Value *, 4> IndexC(GEP.indices()); 2045 bool IsInBounds = GEP.isInBounds(); 2046 Type *Ty = GEP.getSourceElementType(); 2047 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", IsInBounds); 2048 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", IsInBounds); 2049 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel); 2050 } 2051 2052 Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP, 2053 GEPOperator *Src) { 2054 // Combine Indices - If the source pointer to this getelementptr instruction 2055 // is a getelementptr instruction with matching element type, combine the 2056 // indices of the two getelementptr instructions into a single instruction. 2057 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src)) 2058 return nullptr; 2059 2060 // For constant GEPs, use a more general offset-based folding approach. 2061 Type *PtrTy = Src->getType()->getScalarType(); 2062 if (GEP.hasAllConstantIndices() && 2063 (Src->hasOneUse() || Src->hasAllConstantIndices())) { 2064 // Split Src into a variable part and a constant suffix. 2065 gep_type_iterator GTI = gep_type_begin(*Src); 2066 Type *BaseType = GTI.getIndexedType(); 2067 bool IsFirstType = true; 2068 unsigned NumVarIndices = 0; 2069 for (auto Pair : enumerate(Src->indices())) { 2070 if (!isa<ConstantInt>(Pair.value())) { 2071 BaseType = GTI.getIndexedType(); 2072 IsFirstType = false; 2073 NumVarIndices = Pair.index() + 1; 2074 } 2075 ++GTI; 2076 } 2077 2078 // Determine the offset for the constant suffix of Src. 2079 APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), 0); 2080 if (NumVarIndices != Src->getNumIndices()) { 2081 // FIXME: getIndexedOffsetInType() does not handled scalable vectors. 2082 if (BaseType->isScalableTy()) 2083 return nullptr; 2084 2085 SmallVector<Value *> ConstantIndices; 2086 if (!IsFirstType) 2087 ConstantIndices.push_back( 2088 Constant::getNullValue(Type::getInt32Ty(GEP.getContext()))); 2089 append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices)); 2090 Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices); 2091 } 2092 2093 // Add the offset for GEP (which is fully constant). 2094 if (!GEP.accumulateConstantOffset(DL, Offset)) 2095 return nullptr; 2096 2097 APInt OffsetOld = Offset; 2098 // Convert the total offset back into indices. 2099 SmallVector<APInt> ConstIndices = 2100 DL.getGEPIndicesForOffset(BaseType, Offset); 2101 if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero())) { 2102 // If both GEP are constant-indexed, and cannot be merged in either way, 2103 // convert them to a GEP of i8. 2104 if (Src->hasAllConstantIndices()) 2105 return replaceInstUsesWith( 2106 GEP, Builder.CreateGEP( 2107 Builder.getInt8Ty(), Src->getOperand(0), 2108 Builder.getInt(OffsetOld), "", 2109 isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)))); 2110 return nullptr; 2111 } 2112 2113 bool IsInBounds = isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)); 2114 SmallVector<Value *> Indices; 2115 append_range(Indices, drop_end(Src->indices(), 2116 Src->getNumIndices() - NumVarIndices)); 2117 for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) { 2118 Indices.push_back(ConstantInt::get(GEP.getContext(), Idx)); 2119 // Even if the total offset is inbounds, we may end up representing it 2120 // by first performing a larger negative offset, and then a smaller 2121 // positive one. The large negative offset might go out of bounds. Only 2122 // preserve inbounds if all signs are the same. 2123 IsInBounds &= Idx.isNonNegative() == ConstIndices[0].isNonNegative(); 2124 } 2125 2126 return replaceInstUsesWith( 2127 GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0), 2128 Indices, "", IsInBounds)); 2129 } 2130 2131 if (Src->getResultElementType() != GEP.getSourceElementType()) 2132 return nullptr; 2133 2134 SmallVector<Value*, 8> Indices; 2135 2136 // Find out whether the last index in the source GEP is a sequential idx. 2137 bool EndsWithSequential = false; 2138 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); 2139 I != E; ++I) 2140 EndsWithSequential = I.isSequential(); 2141 2142 // Can we combine the two pointer arithmetics offsets? 2143 if (EndsWithSequential) { 2144 // Replace: gep (gep %P, long B), long A, ... 2145 // With: T = long A+B; gep %P, T, ... 2146 Value *SO1 = Src->getOperand(Src->getNumOperands()-1); 2147 Value *GO1 = GEP.getOperand(1); 2148 2149 // If they aren't the same type, then the input hasn't been processed 2150 // by the loop above yet (which canonicalizes sequential index types to 2151 // intptr_t). Just avoid transforming this until the input has been 2152 // normalized. 2153 if (SO1->getType() != GO1->getType()) 2154 return nullptr; 2155 2156 Value *Sum = 2157 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP)); 2158 // Only do the combine when we are sure the cost after the 2159 // merge is never more than that before the merge. 2160 if (Sum == nullptr) 2161 return nullptr; 2162 2163 // Update the GEP in place if possible. 2164 if (Src->getNumOperands() == 2) { 2165 GEP.setIsInBounds(isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))); 2166 replaceOperand(GEP, 0, Src->getOperand(0)); 2167 replaceOperand(GEP, 1, Sum); 2168 return &GEP; 2169 } 2170 Indices.append(Src->op_begin()+1, Src->op_end()-1); 2171 Indices.push_back(Sum); 2172 Indices.append(GEP.op_begin()+2, GEP.op_end()); 2173 } else if (isa<Constant>(*GEP.idx_begin()) && 2174 cast<Constant>(*GEP.idx_begin())->isNullValue() && 2175 Src->getNumOperands() != 1) { 2176 // Otherwise we can do the fold if the first index of the GEP is a zero 2177 Indices.append(Src->op_begin()+1, Src->op_end()); 2178 Indices.append(GEP.idx_begin()+1, GEP.idx_end()); 2179 } 2180 2181 if (!Indices.empty()) 2182 return replaceInstUsesWith( 2183 GEP, Builder.CreateGEP( 2184 Src->getSourceElementType(), Src->getOperand(0), Indices, "", 2185 isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)))); 2186 2187 return nullptr; 2188 } 2189 2190 Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, 2191 BuilderTy *Builder, 2192 bool &DoesConsume, unsigned Depth) { 2193 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1)); 2194 // ~(~(X)) -> X. 2195 Value *A, *B; 2196 if (match(V, m_Not(m_Value(A)))) { 2197 DoesConsume = true; 2198 return A; 2199 } 2200 2201 Constant *C; 2202 // Constants can be considered to be not'ed values. 2203 if (match(V, m_ImmConstant(C))) 2204 return ConstantExpr::getNot(C); 2205 2206 if (Depth++ >= MaxAnalysisRecursionDepth) 2207 return nullptr; 2208 2209 // The rest of the cases require that we invert all uses so don't bother 2210 // doing the analysis if we know we can't use the result. 2211 if (!WillInvertAllUses) 2212 return nullptr; 2213 2214 // Compares can be inverted if all of their uses are being modified to use 2215 // the ~V. 2216 if (auto *I = dyn_cast<CmpInst>(V)) { 2217 if (Builder != nullptr) 2218 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0), 2219 I->getOperand(1)); 2220 return NonNull; 2221 } 2222 2223 // If `V` is of the form `A + B` then `-1 - V` can be folded into 2224 // `(-1 - B) - A` if we are willing to invert all of the uses. 2225 if (match(V, m_Add(m_Value(A), m_Value(B)))) { 2226 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder, 2227 DoesConsume, Depth)) 2228 return Builder ? Builder->CreateSub(BV, A) : NonNull; 2229 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder, 2230 DoesConsume, Depth)) 2231 return Builder ? Builder->CreateSub(AV, B) : NonNull; 2232 return nullptr; 2233 } 2234 2235 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded 2236 // into `A ^ B` if we are willing to invert all of the uses. 2237 if (match(V, m_Xor(m_Value(A), m_Value(B)))) { 2238 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder, 2239 DoesConsume, Depth)) 2240 return Builder ? Builder->CreateXor(A, BV) : NonNull; 2241 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder, 2242 DoesConsume, Depth)) 2243 return Builder ? Builder->CreateXor(AV, B) : NonNull; 2244 return nullptr; 2245 } 2246 2247 // If `V` is of the form `B - A` then `-1 - V` can be folded into 2248 // `A + (-1 - B)` if we are willing to invert all of the uses. 2249 if (match(V, m_Sub(m_Value(A), m_Value(B)))) { 2250 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder, 2251 DoesConsume, Depth)) 2252 return Builder ? Builder->CreateAdd(AV, B) : NonNull; 2253 return nullptr; 2254 } 2255 2256 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded 2257 // into `A s>> B` if we are willing to invert all of the uses. 2258 if (match(V, m_AShr(m_Value(A), m_Value(B)))) { 2259 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder, 2260 DoesConsume, Depth)) 2261 return Builder ? Builder->CreateAShr(AV, B) : NonNull; 2262 return nullptr; 2263 } 2264 2265 Value *Cond; 2266 // LogicOps are special in that we canonicalize them at the cost of an 2267 // instruction. 2268 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) && 2269 !shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(V)); 2270 // Selects/min/max with invertible operands are freely invertible 2271 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) { 2272 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr, 2273 DoesConsume, Depth)) 2274 return nullptr; 2275 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder, 2276 DoesConsume, Depth)) { 2277 if (Builder != nullptr) { 2278 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder, 2279 DoesConsume, Depth); 2280 assert(NotB != nullptr && 2281 "Unable to build inverted value for known freely invertable op"); 2282 if (auto *II = dyn_cast<IntrinsicInst>(V)) 2283 return Builder->CreateBinaryIntrinsic( 2284 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB); 2285 return Builder->CreateSelect(Cond, NotA, NotB); 2286 } 2287 return NonNull; 2288 } 2289 } 2290 2291 return nullptr; 2292 } 2293 2294 Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) { 2295 Value *PtrOp = GEP.getOperand(0); 2296 SmallVector<Value *, 8> Indices(GEP.indices()); 2297 Type *GEPType = GEP.getType(); 2298 Type *GEPEltType = GEP.getSourceElementType(); 2299 bool IsGEPSrcEleScalable = GEPEltType->isScalableTy(); 2300 if (Value *V = simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.isInBounds(), 2301 SQ.getWithInstruction(&GEP))) 2302 return replaceInstUsesWith(GEP, V); 2303 2304 // For vector geps, use the generic demanded vector support. 2305 // Skip if GEP return type is scalable. The number of elements is unknown at 2306 // compile-time. 2307 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) { 2308 auto VWidth = GEPFVTy->getNumElements(); 2309 APInt PoisonElts(VWidth, 0); 2310 APInt AllOnesEltMask(APInt::getAllOnes(VWidth)); 2311 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask, 2312 PoisonElts)) { 2313 if (V != &GEP) 2314 return replaceInstUsesWith(GEP, V); 2315 return &GEP; 2316 } 2317 2318 // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if 2319 // possible (decide on canonical form for pointer broadcast), 3) exploit 2320 // undef elements to decrease demanded bits 2321 } 2322 2323 // Eliminate unneeded casts for indices, and replace indices which displace 2324 // by multiples of a zero size type with zero. 2325 bool MadeChange = false; 2326 2327 // Index width may not be the same width as pointer width. 2328 // Data layout chooses the right type based on supported integer types. 2329 Type *NewScalarIndexTy = 2330 DL.getIndexType(GEP.getPointerOperandType()->getScalarType()); 2331 2332 gep_type_iterator GTI = gep_type_begin(GEP); 2333 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E; 2334 ++I, ++GTI) { 2335 // Skip indices into struct types. 2336 if (GTI.isStruct()) 2337 continue; 2338 2339 Type *IndexTy = (*I)->getType(); 2340 Type *NewIndexType = 2341 IndexTy->isVectorTy() 2342 ? VectorType::get(NewScalarIndexTy, 2343 cast<VectorType>(IndexTy)->getElementCount()) 2344 : NewScalarIndexTy; 2345 2346 // If the element type has zero size then any index over it is equivalent 2347 // to an index of zero, so replace it with zero if it is not zero already. 2348 Type *EltTy = GTI.getIndexedType(); 2349 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero()) 2350 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) { 2351 *I = Constant::getNullValue(NewIndexType); 2352 MadeChange = true; 2353 } 2354 2355 if (IndexTy != NewIndexType) { 2356 // If we are using a wider index than needed for this platform, shrink 2357 // it to what we need. If narrower, sign-extend it to what we need. 2358 // This explicit cast can make subsequent optimizations more obvious. 2359 *I = Builder.CreateIntCast(*I, NewIndexType, true); 2360 MadeChange = true; 2361 } 2362 } 2363 if (MadeChange) 2364 return &GEP; 2365 2366 // Check to see if the inputs to the PHI node are getelementptr instructions. 2367 if (auto *PN = dyn_cast<PHINode>(PtrOp)) { 2368 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0)); 2369 if (!Op1) 2370 return nullptr; 2371 2372 // Don't fold a GEP into itself through a PHI node. This can only happen 2373 // through the back-edge of a loop. Folding a GEP into itself means that 2374 // the value of the previous iteration needs to be stored in the meantime, 2375 // thus requiring an additional register variable to be live, but not 2376 // actually achieving anything (the GEP still needs to be executed once per 2377 // loop iteration). 2378 if (Op1 == &GEP) 2379 return nullptr; 2380 2381 int DI = -1; 2382 2383 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) { 2384 auto *Op2 = dyn_cast<GetElementPtrInst>(*I); 2385 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() || 2386 Op1->getSourceElementType() != Op2->getSourceElementType()) 2387 return nullptr; 2388 2389 // As for Op1 above, don't try to fold a GEP into itself. 2390 if (Op2 == &GEP) 2391 return nullptr; 2392 2393 // Keep track of the type as we walk the GEP. 2394 Type *CurTy = nullptr; 2395 2396 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) { 2397 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType()) 2398 return nullptr; 2399 2400 if (Op1->getOperand(J) != Op2->getOperand(J)) { 2401 if (DI == -1) { 2402 // We have not seen any differences yet in the GEPs feeding the 2403 // PHI yet, so we record this one if it is allowed to be a 2404 // variable. 2405 2406 // The first two arguments can vary for any GEP, the rest have to be 2407 // static for struct slots 2408 if (J > 1) { 2409 assert(CurTy && "No current type?"); 2410 if (CurTy->isStructTy()) 2411 return nullptr; 2412 } 2413 2414 DI = J; 2415 } else { 2416 // The GEP is different by more than one input. While this could be 2417 // extended to support GEPs that vary by more than one variable it 2418 // doesn't make sense since it greatly increases the complexity and 2419 // would result in an R+R+R addressing mode which no backend 2420 // directly supports and would need to be broken into several 2421 // simpler instructions anyway. 2422 return nullptr; 2423 } 2424 } 2425 2426 // Sink down a layer of the type for the next iteration. 2427 if (J > 0) { 2428 if (J == 1) { 2429 CurTy = Op1->getSourceElementType(); 2430 } else { 2431 CurTy = 2432 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J)); 2433 } 2434 } 2435 } 2436 } 2437 2438 // If not all GEPs are identical we'll have to create a new PHI node. 2439 // Check that the old PHI node has only one use so that it will get 2440 // removed. 2441 if (DI != -1 && !PN->hasOneUse()) 2442 return nullptr; 2443 2444 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone()); 2445 if (DI == -1) { 2446 // All the GEPs feeding the PHI are identical. Clone one down into our 2447 // BB so that it can be merged with the current GEP. 2448 } else { 2449 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP 2450 // into the current block so it can be merged, and create a new PHI to 2451 // set that index. 2452 PHINode *NewPN; 2453 { 2454 IRBuilderBase::InsertPointGuard Guard(Builder); 2455 Builder.SetInsertPoint(PN); 2456 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(), 2457 PN->getNumOperands()); 2458 } 2459 2460 for (auto &I : PN->operands()) 2461 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI), 2462 PN->getIncomingBlock(I)); 2463 2464 NewGEP->setOperand(DI, NewPN); 2465 } 2466 2467 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt()); 2468 return replaceOperand(GEP, 0, NewGEP); 2469 } 2470 2471 if (auto *Src = dyn_cast<GEPOperator>(PtrOp)) 2472 if (Instruction *I = visitGEPOfGEP(GEP, Src)) 2473 return I; 2474 2475 // Skip if GEP source element type is scalable. The type alloc size is unknown 2476 // at compile-time. 2477 if (GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) { 2478 unsigned AS = GEP.getPointerAddressSpace(); 2479 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() == 2480 DL.getIndexSizeInBits(AS)) { 2481 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue(); 2482 2483 if (TyAllocSize == 1) { 2484 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y), 2485 // but only if the result pointer is only used as if it were an integer, 2486 // or both point to the same underlying object (otherwise provenance is 2487 // not necessarily retained). 2488 Value *X = GEP.getPointerOperand(); 2489 Value *Y; 2490 if (match(GEP.getOperand(1), 2491 m_Sub(m_PtrToInt(m_Value(Y)), m_PtrToInt(m_Specific(X)))) && 2492 GEPType == Y->getType()) { 2493 bool HasSameUnderlyingObject = 2494 getUnderlyingObject(X) == getUnderlyingObject(Y); 2495 bool Changed = false; 2496 GEP.replaceUsesWithIf(Y, [&](Use &U) { 2497 bool ShouldReplace = HasSameUnderlyingObject || 2498 isa<ICmpInst>(U.getUser()) || 2499 isa<PtrToIntInst>(U.getUser()); 2500 Changed |= ShouldReplace; 2501 return ShouldReplace; 2502 }); 2503 return Changed ? &GEP : nullptr; 2504 } 2505 } else { 2506 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V) 2507 Value *V; 2508 if ((has_single_bit(TyAllocSize) && 2509 match(GEP.getOperand(1), 2510 m_Exact(m_AShr(m_Value(V), 2511 m_SpecificInt(countr_zero(TyAllocSize)))))) || 2512 match(GEP.getOperand(1), 2513 m_Exact(m_SDiv(m_Value(V), m_SpecificInt(TyAllocSize))))) { 2514 GetElementPtrInst *NewGEP = GetElementPtrInst::Create( 2515 Builder.getInt8Ty(), GEP.getPointerOperand(), V); 2516 NewGEP->setIsInBounds(GEP.isInBounds()); 2517 return NewGEP; 2518 } 2519 } 2520 } 2521 } 2522 // We do not handle pointer-vector geps here. 2523 if (GEPType->isVectorTy()) 2524 return nullptr; 2525 2526 if (GEP.getNumIndices() == 1) { 2527 // Try to replace ADD + GEP with GEP + GEP. 2528 Value *Idx1, *Idx2; 2529 if (match(GEP.getOperand(1), 2530 m_OneUse(m_Add(m_Value(Idx1), m_Value(Idx2))))) { 2531 // %idx = add i64 %idx1, %idx2 2532 // %gep = getelementptr i32, ptr %ptr, i64 %idx 2533 // as: 2534 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1 2535 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2 2536 auto *NewPtr = Builder.CreateGEP(GEP.getResultElementType(), 2537 GEP.getPointerOperand(), Idx1); 2538 return GetElementPtrInst::Create(GEP.getResultElementType(), NewPtr, 2539 Idx2); 2540 } 2541 ConstantInt *C; 2542 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAdd( 2543 m_Value(Idx1), m_ConstantInt(C))))))) { 2544 // %add = add nsw i32 %idx1, idx2 2545 // %sidx = sext i32 %add to i64 2546 // %gep = getelementptr i32, ptr %ptr, i64 %sidx 2547 // as: 2548 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1 2549 // %newgep = getelementptr i32, ptr %newptr, i32 idx2 2550 auto *NewPtr = Builder.CreateGEP( 2551 GEP.getResultElementType(), GEP.getPointerOperand(), 2552 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType())); 2553 return GetElementPtrInst::Create( 2554 GEP.getResultElementType(), NewPtr, 2555 Builder.CreateSExt(C, GEP.getOperand(1)->getType())); 2556 } 2557 } 2558 2559 if (!GEP.isInBounds()) { 2560 unsigned IdxWidth = 2561 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace()); 2562 APInt BasePtrOffset(IdxWidth, 0); 2563 Value *UnderlyingPtrOp = 2564 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, 2565 BasePtrOffset); 2566 bool CanBeNull, CanBeFreed; 2567 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes( 2568 DL, CanBeNull, CanBeFreed); 2569 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) { 2570 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) && 2571 BasePtrOffset.isNonNegative()) { 2572 APInt AllocSize(IdxWidth, DerefBytes); 2573 if (BasePtrOffset.ule(AllocSize)) { 2574 return GetElementPtrInst::CreateInBounds( 2575 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName()); 2576 } 2577 } 2578 } 2579 } 2580 2581 if (Instruction *R = foldSelectGEP(GEP, Builder)) 2582 return R; 2583 2584 return nullptr; 2585 } 2586 2587 static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, 2588 Instruction *AI) { 2589 if (isa<ConstantPointerNull>(V)) 2590 return true; 2591 if (auto *LI = dyn_cast<LoadInst>(V)) 2592 return isa<GlobalVariable>(LI->getPointerOperand()); 2593 // Two distinct allocations will never be equal. 2594 return isAllocLikeFn(V, &TLI) && V != AI; 2595 } 2596 2597 /// Given a call CB which uses an address UsedV, return true if we can prove the 2598 /// call's only possible effect is storing to V. 2599 static bool isRemovableWrite(CallBase &CB, Value *UsedV, 2600 const TargetLibraryInfo &TLI) { 2601 if (!CB.use_empty()) 2602 // TODO: add recursion if returned attribute is present 2603 return false; 2604 2605 if (CB.isTerminator()) 2606 // TODO: remove implementation restriction 2607 return false; 2608 2609 if (!CB.willReturn() || !CB.doesNotThrow()) 2610 return false; 2611 2612 // If the only possible side effect of the call is writing to the alloca, 2613 // and the result isn't used, we can safely remove any reads implied by the 2614 // call including those which might read the alloca itself. 2615 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI); 2616 return Dest && Dest->Ptr == UsedV; 2617 } 2618 2619 static bool isAllocSiteRemovable(Instruction *AI, 2620 SmallVectorImpl<WeakTrackingVH> &Users, 2621 const TargetLibraryInfo &TLI) { 2622 SmallVector<Instruction*, 4> Worklist; 2623 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI); 2624 Worklist.push_back(AI); 2625 2626 do { 2627 Instruction *PI = Worklist.pop_back_val(); 2628 for (User *U : PI->users()) { 2629 Instruction *I = cast<Instruction>(U); 2630 switch (I->getOpcode()) { 2631 default: 2632 // Give up the moment we see something we can't handle. 2633 return false; 2634 2635 case Instruction::AddrSpaceCast: 2636 case Instruction::BitCast: 2637 case Instruction::GetElementPtr: 2638 Users.emplace_back(I); 2639 Worklist.push_back(I); 2640 continue; 2641 2642 case Instruction::ICmp: { 2643 ICmpInst *ICI = cast<ICmpInst>(I); 2644 // We can fold eq/ne comparisons with null to false/true, respectively. 2645 // We also fold comparisons in some conditions provided the alloc has 2646 // not escaped (see isNeverEqualToUnescapedAlloc). 2647 if (!ICI->isEquality()) 2648 return false; 2649 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0; 2650 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI)) 2651 return false; 2652 2653 // Do not fold compares to aligned_alloc calls, as they may have to 2654 // return null in case the required alignment cannot be satisfied, 2655 // unless we can prove that both alignment and size are valid. 2656 auto AlignmentAndSizeKnownValid = [](CallBase *CB) { 2657 // Check if alignment and size of a call to aligned_alloc is valid, 2658 // that is alignment is a power-of-2 and the size is a multiple of the 2659 // alignment. 2660 const APInt *Alignment; 2661 const APInt *Size; 2662 return match(CB->getArgOperand(0), m_APInt(Alignment)) && 2663 match(CB->getArgOperand(1), m_APInt(Size)) && 2664 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero(); 2665 }; 2666 auto *CB = dyn_cast<CallBase>(AI); 2667 LibFunc TheLibFunc; 2668 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) && 2669 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc && 2670 !AlignmentAndSizeKnownValid(CB)) 2671 return false; 2672 Users.emplace_back(I); 2673 continue; 2674 } 2675 2676 case Instruction::Call: 2677 // Ignore no-op and store intrinsics. 2678 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 2679 switch (II->getIntrinsicID()) { 2680 default: 2681 return false; 2682 2683 case Intrinsic::memmove: 2684 case Intrinsic::memcpy: 2685 case Intrinsic::memset: { 2686 MemIntrinsic *MI = cast<MemIntrinsic>(II); 2687 if (MI->isVolatile() || MI->getRawDest() != PI) 2688 return false; 2689 [[fallthrough]]; 2690 } 2691 case Intrinsic::assume: 2692 case Intrinsic::invariant_start: 2693 case Intrinsic::invariant_end: 2694 case Intrinsic::lifetime_start: 2695 case Intrinsic::lifetime_end: 2696 case Intrinsic::objectsize: 2697 Users.emplace_back(I); 2698 continue; 2699 case Intrinsic::launder_invariant_group: 2700 case Intrinsic::strip_invariant_group: 2701 Users.emplace_back(I); 2702 Worklist.push_back(I); 2703 continue; 2704 } 2705 } 2706 2707 if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) { 2708 Users.emplace_back(I); 2709 continue; 2710 } 2711 2712 if (getFreedOperand(cast<CallBase>(I), &TLI) == PI && 2713 getAllocationFamily(I, &TLI) == Family) { 2714 assert(Family); 2715 Users.emplace_back(I); 2716 continue; 2717 } 2718 2719 if (getReallocatedOperand(cast<CallBase>(I)) == PI && 2720 getAllocationFamily(I, &TLI) == Family) { 2721 assert(Family); 2722 Users.emplace_back(I); 2723 Worklist.push_back(I); 2724 continue; 2725 } 2726 2727 return false; 2728 2729 case Instruction::Store: { 2730 StoreInst *SI = cast<StoreInst>(I); 2731 if (SI->isVolatile() || SI->getPointerOperand() != PI) 2732 return false; 2733 Users.emplace_back(I); 2734 continue; 2735 } 2736 } 2737 llvm_unreachable("missing a return?"); 2738 } 2739 } while (!Worklist.empty()); 2740 return true; 2741 } 2742 2743 Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) { 2744 assert(isa<AllocaInst>(MI) || isRemovableAlloc(&cast<CallBase>(MI), &TLI)); 2745 2746 // If we have a malloc call which is only used in any amount of comparisons to 2747 // null and free calls, delete the calls and replace the comparisons with true 2748 // or false as appropriate. 2749 2750 // This is based on the principle that we can substitute our own allocation 2751 // function (which will never return null) rather than knowledge of the 2752 // specific function being called. In some sense this can change the permitted 2753 // outputs of a program (when we convert a malloc to an alloca, the fact that 2754 // the allocation is now on the stack is potentially visible, for example), 2755 // but we believe in a permissible manner. 2756 SmallVector<WeakTrackingVH, 64> Users; 2757 2758 // If we are removing an alloca with a dbg.declare, insert dbg.value calls 2759 // before each store. 2760 SmallVector<DbgVariableIntrinsic *, 8> DVIs; 2761 SmallVector<DPValue *, 8> DPVs; 2762 std::unique_ptr<DIBuilder> DIB; 2763 if (isa<AllocaInst>(MI)) { 2764 findDbgUsers(DVIs, &MI, &DPVs); 2765 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false)); 2766 } 2767 2768 if (isAllocSiteRemovable(&MI, Users, TLI)) { 2769 for (unsigned i = 0, e = Users.size(); i != e; ++i) { 2770 // Lowering all @llvm.objectsize calls first because they may 2771 // use a bitcast/GEP of the alloca we are removing. 2772 if (!Users[i]) 2773 continue; 2774 2775 Instruction *I = cast<Instruction>(&*Users[i]); 2776 2777 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 2778 if (II->getIntrinsicID() == Intrinsic::objectsize) { 2779 SmallVector<Instruction *> InsertedInstructions; 2780 Value *Result = lowerObjectSizeCall( 2781 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions); 2782 for (Instruction *Inserted : InsertedInstructions) 2783 Worklist.add(Inserted); 2784 replaceInstUsesWith(*I, Result); 2785 eraseInstFromFunction(*I); 2786 Users[i] = nullptr; // Skip examining in the next loop. 2787 } 2788 } 2789 } 2790 for (unsigned i = 0, e = Users.size(); i != e; ++i) { 2791 if (!Users[i]) 2792 continue; 2793 2794 Instruction *I = cast<Instruction>(&*Users[i]); 2795 2796 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) { 2797 replaceInstUsesWith(*C, 2798 ConstantInt::get(Type::getInt1Ty(C->getContext()), 2799 C->isFalseWhenEqual())); 2800 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 2801 for (auto *DVI : DVIs) 2802 if (DVI->isAddressOfVariable()) 2803 ConvertDebugDeclareToDebugValue(DVI, SI, *DIB); 2804 for (auto *DPV : DPVs) 2805 if (DPV->isAddressOfVariable()) 2806 ConvertDebugDeclareToDebugValue(DPV, SI, *DIB); 2807 } else { 2808 // Casts, GEP, or anything else: we're about to delete this instruction, 2809 // so it can not have any valid uses. 2810 replaceInstUsesWith(*I, PoisonValue::get(I->getType())); 2811 } 2812 eraseInstFromFunction(*I); 2813 } 2814 2815 if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) { 2816 // Replace invoke with a NOP intrinsic to maintain the original CFG 2817 Module *M = II->getModule(); 2818 Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing); 2819 InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), 2820 std::nullopt, "", II->getParent()); 2821 } 2822 2823 // Remove debug intrinsics which describe the value contained within the 2824 // alloca. In addition to removing dbg.{declare,addr} which simply point to 2825 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.: 2826 // 2827 // ``` 2828 // define void @foo(i32 %0) { 2829 // %a = alloca i32 ; Deleted. 2830 // store i32 %0, i32* %a 2831 // dbg.value(i32 %0, "arg0") ; Not deleted. 2832 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted. 2833 // call void @trivially_inlinable_no_op(i32* %a) 2834 // ret void 2835 // } 2836 // ``` 2837 // 2838 // This may not be required if we stop describing the contents of allocas 2839 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in 2840 // the LowerDbgDeclare utility. 2841 // 2842 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the 2843 // "arg0" dbg.value may be stale after the call. However, failing to remove 2844 // the DW_OP_deref dbg.value causes large gaps in location coverage. 2845 // 2846 // FIXME: the Assignment Tracking project has now likely made this 2847 // redundant (and it's sometimes harmful). 2848 for (auto *DVI : DVIs) 2849 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref()) 2850 DVI->eraseFromParent(); 2851 for (auto *DPV : DPVs) 2852 if (DPV->isAddressOfVariable() || DPV->getExpression()->startsWithDeref()) 2853 DPV->eraseFromParent(); 2854 2855 return eraseInstFromFunction(MI); 2856 } 2857 return nullptr; 2858 } 2859 2860 /// Move the call to free before a NULL test. 2861 /// 2862 /// Check if this free is accessed after its argument has been test 2863 /// against NULL (property 0). 2864 /// If yes, it is legal to move this call in its predecessor block. 2865 /// 2866 /// The move is performed only if the block containing the call to free 2867 /// will be removed, i.e.: 2868 /// 1. it has only one predecessor P, and P has two successors 2869 /// 2. it contains the call, noops, and an unconditional branch 2870 /// 3. its successor is the same as its predecessor's successor 2871 /// 2872 /// The profitability is out-of concern here and this function should 2873 /// be called only if the caller knows this transformation would be 2874 /// profitable (e.g., for code size). 2875 static Instruction *tryToMoveFreeBeforeNullTest(CallInst &FI, 2876 const DataLayout &DL) { 2877 Value *Op = FI.getArgOperand(0); 2878 BasicBlock *FreeInstrBB = FI.getParent(); 2879 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor(); 2880 2881 // Validate part of constraint #1: Only one predecessor 2882 // FIXME: We can extend the number of predecessor, but in that case, we 2883 // would duplicate the call to free in each predecessor and it may 2884 // not be profitable even for code size. 2885 if (!PredBB) 2886 return nullptr; 2887 2888 // Validate constraint #2: Does this block contains only the call to 2889 // free, noops, and an unconditional branch? 2890 BasicBlock *SuccBB; 2891 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator(); 2892 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB))) 2893 return nullptr; 2894 2895 // If there are only 2 instructions in the block, at this point, 2896 // this is the call to free and unconditional. 2897 // If there are more than 2 instructions, check that they are noops 2898 // i.e., they won't hurt the performance of the generated code. 2899 if (FreeInstrBB->size() != 2) { 2900 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) { 2901 if (&Inst == &FI || &Inst == FreeInstrBBTerminator) 2902 continue; 2903 auto *Cast = dyn_cast<CastInst>(&Inst); 2904 if (!Cast || !Cast->isNoopCast(DL)) 2905 return nullptr; 2906 } 2907 } 2908 // Validate the rest of constraint #1 by matching on the pred branch. 2909 Instruction *TI = PredBB->getTerminator(); 2910 BasicBlock *TrueBB, *FalseBB; 2911 ICmpInst::Predicate Pred; 2912 if (!match(TI, m_Br(m_ICmp(Pred, 2913 m_CombineOr(m_Specific(Op), 2914 m_Specific(Op->stripPointerCasts())), 2915 m_Zero()), 2916 TrueBB, FalseBB))) 2917 return nullptr; 2918 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE) 2919 return nullptr; 2920 2921 // Validate constraint #3: Ensure the null case just falls through. 2922 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB)) 2923 return nullptr; 2924 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) && 2925 "Broken CFG: missing edge from predecessor to successor"); 2926 2927 // At this point, we know that everything in FreeInstrBB can be moved 2928 // before TI. 2929 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) { 2930 if (&Instr == FreeInstrBBTerminator) 2931 break; 2932 Instr.moveBeforePreserving(TI); 2933 } 2934 assert(FreeInstrBB->size() == 1 && 2935 "Only the branch instruction should remain"); 2936 2937 // Now that we've moved the call to free before the NULL check, we have to 2938 // remove any attributes on its parameter that imply it's non-null, because 2939 // those attributes might have only been valid because of the NULL check, and 2940 // we can get miscompiles if we keep them. This is conservative if non-null is 2941 // also implied by something other than the NULL check, but it's guaranteed to 2942 // be correct, and the conservativeness won't matter in practice, since the 2943 // attributes are irrelevant for the call to free itself and the pointer 2944 // shouldn't be used after the call. 2945 AttributeList Attrs = FI.getAttributes(); 2946 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull); 2947 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable); 2948 if (Dereferenceable.isValid()) { 2949 uint64_t Bytes = Dereferenceable.getDereferenceableBytes(); 2950 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, 2951 Attribute::Dereferenceable); 2952 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes); 2953 } 2954 FI.setAttributes(Attrs); 2955 2956 return &FI; 2957 } 2958 2959 Instruction *InstCombinerImpl::visitFree(CallInst &FI, Value *Op) { 2960 // free undef -> unreachable. 2961 if (isa<UndefValue>(Op)) { 2962 // Leave a marker since we can't modify the CFG here. 2963 CreateNonTerminatorUnreachable(&FI); 2964 return eraseInstFromFunction(FI); 2965 } 2966 2967 // If we have 'free null' delete the instruction. This can happen in stl code 2968 // when lots of inlining happens. 2969 if (isa<ConstantPointerNull>(Op)) 2970 return eraseInstFromFunction(FI); 2971 2972 // If we had free(realloc(...)) with no intervening uses, then eliminate the 2973 // realloc() entirely. 2974 CallInst *CI = dyn_cast<CallInst>(Op); 2975 if (CI && CI->hasOneUse()) 2976 if (Value *ReallocatedOp = getReallocatedOperand(CI)) 2977 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp)); 2978 2979 // If we optimize for code size, try to move the call to free before the null 2980 // test so that simplify cfg can remove the empty block and dead code 2981 // elimination the branch. I.e., helps to turn something like: 2982 // if (foo) free(foo); 2983 // into 2984 // free(foo); 2985 // 2986 // Note that we can only do this for 'free' and not for any flavor of 2987 // 'operator delete'; there is no 'operator delete' symbol for which we are 2988 // permitted to invent a call, even if we're passing in a null pointer. 2989 if (MinimizeSize) { 2990 LibFunc Func; 2991 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free) 2992 if (Instruction *I = tryToMoveFreeBeforeNullTest(FI, DL)) 2993 return I; 2994 } 2995 2996 return nullptr; 2997 } 2998 2999 Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) { 3000 // Nothing for now. 3001 return nullptr; 3002 } 3003 3004 // WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()! 3005 bool InstCombinerImpl::removeInstructionsBeforeUnreachable(Instruction &I) { 3006 // Try to remove the previous instruction if it must lead to unreachable. 3007 // This includes instructions like stores and "llvm.assume" that may not get 3008 // removed by simple dead code elimination. 3009 bool Changed = false; 3010 while (Instruction *Prev = I.getPrevNonDebugInstruction()) { 3011 // While we theoretically can erase EH, that would result in a block that 3012 // used to start with an EH no longer starting with EH, which is invalid. 3013 // To make it valid, we'd need to fixup predecessors to no longer refer to 3014 // this block, but that changes CFG, which is not allowed in InstCombine. 3015 if (Prev->isEHPad()) 3016 break; // Can not drop any more instructions. We're done here. 3017 3018 if (!isGuaranteedToTransferExecutionToSuccessor(Prev)) 3019 break; // Can not drop any more instructions. We're done here. 3020 // Otherwise, this instruction can be freely erased, 3021 // even if it is not side-effect free. 3022 3023 // A value may still have uses before we process it here (for example, in 3024 // another unreachable block), so convert those to poison. 3025 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType())); 3026 eraseInstFromFunction(*Prev); 3027 Changed = true; 3028 } 3029 return Changed; 3030 } 3031 3032 Instruction *InstCombinerImpl::visitUnreachableInst(UnreachableInst &I) { 3033 removeInstructionsBeforeUnreachable(I); 3034 return nullptr; 3035 } 3036 3037 Instruction *InstCombinerImpl::visitUnconditionalBranchInst(BranchInst &BI) { 3038 assert(BI.isUnconditional() && "Only for unconditional branches."); 3039 3040 // If this store is the second-to-last instruction in the basic block 3041 // (excluding debug info and bitcasts of pointers) and if the block ends with 3042 // an unconditional branch, try to move the store to the successor block. 3043 3044 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) { 3045 auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) { 3046 return BBI->isDebugOrPseudoInst() || 3047 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()); 3048 }; 3049 3050 BasicBlock::iterator FirstInstr = BBI->getParent()->begin(); 3051 do { 3052 if (BBI != FirstInstr) 3053 --BBI; 3054 } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI)); 3055 3056 return dyn_cast<StoreInst>(BBI); 3057 }; 3058 3059 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI))) 3060 if (mergeStoreIntoSuccessor(*SI)) 3061 return &BI; 3062 3063 return nullptr; 3064 } 3065 3066 void InstCombinerImpl::addDeadEdge(BasicBlock *From, BasicBlock *To, 3067 SmallVectorImpl<BasicBlock *> &Worklist) { 3068 if (!DeadEdges.insert({From, To}).second) 3069 return; 3070 3071 // Replace phi node operands in successor with poison. 3072 for (PHINode &PN : To->phis()) 3073 for (Use &U : PN.incoming_values()) 3074 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) { 3075 replaceUse(U, PoisonValue::get(PN.getType())); 3076 addToWorklist(&PN); 3077 MadeIRChange = true; 3078 } 3079 3080 Worklist.push_back(To); 3081 } 3082 3083 // Under the assumption that I is unreachable, remove it and following 3084 // instructions. Changes are reported directly to MadeIRChange. 3085 void InstCombinerImpl::handleUnreachableFrom( 3086 Instruction *I, SmallVectorImpl<BasicBlock *> &Worklist) { 3087 BasicBlock *BB = I->getParent(); 3088 for (Instruction &Inst : make_early_inc_range( 3089 make_range(std::next(BB->getTerminator()->getReverseIterator()), 3090 std::next(I->getReverseIterator())))) { 3091 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) { 3092 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType())); 3093 MadeIRChange = true; 3094 } 3095 if (Inst.isEHPad() || Inst.getType()->isTokenTy()) 3096 continue; 3097 // RemoveDIs: erase debug-info on this instruction manually. 3098 Inst.dropDbgValues(); 3099 eraseInstFromFunction(Inst); 3100 MadeIRChange = true; 3101 } 3102 3103 // RemoveDIs: to match behaviour in dbg.value mode, drop debug-info on 3104 // terminator too. 3105 BB->getTerminator()->dropDbgValues(); 3106 3107 // Handle potentially dead successors. 3108 for (BasicBlock *Succ : successors(BB)) 3109 addDeadEdge(BB, Succ, Worklist); 3110 } 3111 3112 void InstCombinerImpl::handlePotentiallyDeadBlocks( 3113 SmallVectorImpl<BasicBlock *> &Worklist) { 3114 while (!Worklist.empty()) { 3115 BasicBlock *BB = Worklist.pop_back_val(); 3116 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) { 3117 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred); 3118 })) 3119 continue; 3120 3121 handleUnreachableFrom(&BB->front(), Worklist); 3122 } 3123 } 3124 3125 void InstCombinerImpl::handlePotentiallyDeadSuccessors(BasicBlock *BB, 3126 BasicBlock *LiveSucc) { 3127 SmallVector<BasicBlock *> Worklist; 3128 for (BasicBlock *Succ : successors(BB)) { 3129 // The live successor isn't dead. 3130 if (Succ == LiveSucc) 3131 continue; 3132 3133 addDeadEdge(BB, Succ, Worklist); 3134 } 3135 3136 handlePotentiallyDeadBlocks(Worklist); 3137 } 3138 3139 Instruction *InstCombinerImpl::visitBranchInst(BranchInst &BI) { 3140 if (BI.isUnconditional()) 3141 return visitUnconditionalBranchInst(BI); 3142 3143 // Change br (not X), label True, label False to: br X, label False, True 3144 Value *Cond = BI.getCondition(); 3145 Value *X; 3146 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) { 3147 // Swap Destinations and condition... 3148 BI.swapSuccessors(); 3149 return replaceOperand(BI, 0, X); 3150 } 3151 3152 // Canonicalize logical-and-with-invert as logical-or-with-invert. 3153 // This is done by inverting the condition and swapping successors: 3154 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T 3155 Value *Y; 3156 if (isa<SelectInst>(Cond) && 3157 match(Cond, 3158 m_OneUse(m_LogicalAnd(m_Value(X), m_OneUse(m_Not(m_Value(Y))))))) { 3159 Value *NotX = Builder.CreateNot(X, "not." + X->getName()); 3160 Value *Or = Builder.CreateLogicalOr(NotX, Y); 3161 BI.swapSuccessors(); 3162 return replaceOperand(BI, 0, Or); 3163 } 3164 3165 // If the condition is irrelevant, remove the use so that other 3166 // transforms on the condition become more effective. 3167 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1)) 3168 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType())); 3169 3170 // Canonicalize, for example, fcmp_one -> fcmp_oeq. 3171 CmpInst::Predicate Pred; 3172 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) && 3173 !isCanonicalPredicate(Pred)) { 3174 // Swap destinations and condition. 3175 auto *Cmp = cast<CmpInst>(Cond); 3176 Cmp->setPredicate(CmpInst::getInversePredicate(Pred)); 3177 BI.swapSuccessors(); 3178 Worklist.push(Cmp); 3179 return &BI; 3180 } 3181 3182 if (isa<UndefValue>(Cond)) { 3183 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr); 3184 return nullptr; 3185 } 3186 if (auto *CI = dyn_cast<ConstantInt>(Cond)) { 3187 handlePotentiallyDeadSuccessors(BI.getParent(), 3188 BI.getSuccessor(!CI->getZExtValue())); 3189 return nullptr; 3190 } 3191 3192 DC.registerBranch(&BI); 3193 return nullptr; 3194 } 3195 3196 Instruction *InstCombinerImpl::visitSwitchInst(SwitchInst &SI) { 3197 Value *Cond = SI.getCondition(); 3198 Value *Op0; 3199 ConstantInt *AddRHS; 3200 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) { 3201 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'. 3202 for (auto Case : SI.cases()) { 3203 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS); 3204 assert(isa<ConstantInt>(NewCase) && 3205 "Result of expression should be constant"); 3206 Case.setValue(cast<ConstantInt>(NewCase)); 3207 } 3208 return replaceOperand(SI, 0, Op0); 3209 } 3210 3211 ConstantInt *SubLHS; 3212 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) { 3213 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'. 3214 for (auto Case : SI.cases()) { 3215 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue()); 3216 assert(isa<ConstantInt>(NewCase) && 3217 "Result of expression should be constant"); 3218 Case.setValue(cast<ConstantInt>(NewCase)); 3219 } 3220 return replaceOperand(SI, 0, Op0); 3221 } 3222 3223 uint64_t ShiftAmt; 3224 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) && 3225 ShiftAmt < Op0->getType()->getScalarSizeInBits() && 3226 all_of(SI.cases(), [&](const auto &Case) { 3227 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt; 3228 })) { 3229 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'. 3230 OverflowingBinaryOperator *Shl = cast<OverflowingBinaryOperator>(Cond); 3231 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() || 3232 Shl->hasOneUse()) { 3233 Value *NewCond = Op0; 3234 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) { 3235 // If the shift may wrap, we need to mask off the shifted bits. 3236 unsigned BitWidth = Op0->getType()->getScalarSizeInBits(); 3237 NewCond = Builder.CreateAnd( 3238 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt)); 3239 } 3240 for (auto Case : SI.cases()) { 3241 const APInt &CaseVal = Case.getCaseValue()->getValue(); 3242 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt) 3243 : CaseVal.lshr(ShiftAmt); 3244 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase)); 3245 } 3246 return replaceOperand(SI, 0, NewCond); 3247 } 3248 } 3249 3250 // Fold switch(zext/sext(X)) into switch(X) if possible. 3251 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) { 3252 bool IsZExt = isa<ZExtInst>(Cond); 3253 Type *SrcTy = Op0->getType(); 3254 unsigned NewWidth = SrcTy->getScalarSizeInBits(); 3255 3256 if (all_of(SI.cases(), [&](const auto &Case) { 3257 const APInt &CaseVal = Case.getCaseValue()->getValue(); 3258 return IsZExt ? CaseVal.isIntN(NewWidth) 3259 : CaseVal.isSignedIntN(NewWidth); 3260 })) { 3261 for (auto &Case : SI.cases()) { 3262 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth); 3263 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase)); 3264 } 3265 return replaceOperand(SI, 0, Op0); 3266 } 3267 } 3268 3269 KnownBits Known = computeKnownBits(Cond, 0, &SI); 3270 unsigned LeadingKnownZeros = Known.countMinLeadingZeros(); 3271 unsigned LeadingKnownOnes = Known.countMinLeadingOnes(); 3272 3273 // Compute the number of leading bits we can ignore. 3274 // TODO: A better way to determine this would use ComputeNumSignBits(). 3275 for (const auto &C : SI.cases()) { 3276 LeadingKnownZeros = 3277 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero()); 3278 LeadingKnownOnes = 3279 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one()); 3280 } 3281 3282 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes); 3283 3284 // Shrink the condition operand if the new type is smaller than the old type. 3285 // But do not shrink to a non-standard type, because backend can't generate 3286 // good code for that yet. 3287 // TODO: We can make it aggressive again after fixing PR39569. 3288 if (NewWidth > 0 && NewWidth < Known.getBitWidth() && 3289 shouldChangeType(Known.getBitWidth(), NewWidth)) { 3290 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth); 3291 Builder.SetInsertPoint(&SI); 3292 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc"); 3293 3294 for (auto Case : SI.cases()) { 3295 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth); 3296 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase)); 3297 } 3298 return replaceOperand(SI, 0, NewCond); 3299 } 3300 3301 if (isa<UndefValue>(Cond)) { 3302 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr); 3303 return nullptr; 3304 } 3305 if (auto *CI = dyn_cast<ConstantInt>(Cond)) { 3306 handlePotentiallyDeadSuccessors(SI.getParent(), 3307 SI.findCaseValue(CI)->getCaseSuccessor()); 3308 return nullptr; 3309 } 3310 3311 return nullptr; 3312 } 3313 3314 Instruction * 3315 InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) { 3316 auto *WO = dyn_cast<WithOverflowInst>(EV.getAggregateOperand()); 3317 if (!WO) 3318 return nullptr; 3319 3320 Intrinsic::ID OvID = WO->getIntrinsicID(); 3321 const APInt *C = nullptr; 3322 if (match(WO->getRHS(), m_APIntAllowUndef(C))) { 3323 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow || 3324 OvID == Intrinsic::umul_with_overflow)) { 3325 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X 3326 if (C->isAllOnes()) 3327 return BinaryOperator::CreateNeg(WO->getLHS()); 3328 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n 3329 if (C->isPowerOf2()) { 3330 return BinaryOperator::CreateShl( 3331 WO->getLHS(), 3332 ConstantInt::get(WO->getLHS()->getType(), C->logBase2())); 3333 } 3334 } 3335 } 3336 3337 // We're extracting from an overflow intrinsic. See if we're the only user. 3338 // That allows us to simplify multiple result intrinsics to simpler things 3339 // that just get one value. 3340 if (!WO->hasOneUse()) 3341 return nullptr; 3342 3343 // Check if we're grabbing only the result of a 'with overflow' intrinsic 3344 // and replace it with a traditional binary instruction. 3345 if (*EV.idx_begin() == 0) { 3346 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 3347 Value *LHS = WO->getLHS(), *RHS = WO->getRHS(); 3348 // Replace the old instruction's uses with poison. 3349 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType())); 3350 eraseInstFromFunction(*WO); 3351 return BinaryOperator::Create(BinOp, LHS, RHS); 3352 } 3353 3354 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst"); 3355 3356 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS. 3357 if (OvID == Intrinsic::usub_with_overflow) 3358 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS()); 3359 3360 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but 3361 // +1 is not possible because we assume signed values. 3362 if (OvID == Intrinsic::smul_with_overflow && 3363 WO->getLHS()->getType()->isIntOrIntVectorTy(1)) 3364 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS()); 3365 3366 // If only the overflow result is used, and the right hand side is a 3367 // constant (or constant splat), we can remove the intrinsic by directly 3368 // checking for overflow. 3369 if (C) { 3370 // Compute the no-wrap range for LHS given RHS=C, then construct an 3371 // equivalent icmp, potentially using an offset. 3372 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion( 3373 WO->getBinaryOp(), *C, WO->getNoWrapKind()); 3374 3375 CmpInst::Predicate Pred; 3376 APInt NewRHSC, Offset; 3377 NWR.getEquivalentICmp(Pred, NewRHSC, Offset); 3378 auto *OpTy = WO->getRHS()->getType(); 3379 auto *NewLHS = WO->getLHS(); 3380 if (Offset != 0) 3381 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset)); 3382 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS, 3383 ConstantInt::get(OpTy, NewRHSC)); 3384 } 3385 3386 return nullptr; 3387 } 3388 3389 Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) { 3390 Value *Agg = EV.getAggregateOperand(); 3391 3392 if (!EV.hasIndices()) 3393 return replaceInstUsesWith(EV, Agg); 3394 3395 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(), 3396 SQ.getWithInstruction(&EV))) 3397 return replaceInstUsesWith(EV, V); 3398 3399 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) { 3400 // We're extracting from an insertvalue instruction, compare the indices 3401 const unsigned *exti, *exte, *insi, *inse; 3402 for (exti = EV.idx_begin(), insi = IV->idx_begin(), 3403 exte = EV.idx_end(), inse = IV->idx_end(); 3404 exti != exte && insi != inse; 3405 ++exti, ++insi) { 3406 if (*insi != *exti) 3407 // The insert and extract both reference distinctly different elements. 3408 // This means the extract is not influenced by the insert, and we can 3409 // replace the aggregate operand of the extract with the aggregate 3410 // operand of the insert. i.e., replace 3411 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 3412 // %E = extractvalue { i32, { i32 } } %I, 0 3413 // with 3414 // %E = extractvalue { i32, { i32 } } %A, 0 3415 return ExtractValueInst::Create(IV->getAggregateOperand(), 3416 EV.getIndices()); 3417 } 3418 if (exti == exte && insi == inse) 3419 // Both iterators are at the end: Index lists are identical. Replace 3420 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 3421 // %C = extractvalue { i32, { i32 } } %B, 1, 0 3422 // with "i32 42" 3423 return replaceInstUsesWith(EV, IV->getInsertedValueOperand()); 3424 if (exti == exte) { 3425 // The extract list is a prefix of the insert list. i.e. replace 3426 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 3427 // %E = extractvalue { i32, { i32 } } %I, 1 3428 // with 3429 // %X = extractvalue { i32, { i32 } } %A, 1 3430 // %E = insertvalue { i32 } %X, i32 42, 0 3431 // by switching the order of the insert and extract (though the 3432 // insertvalue should be left in, since it may have other uses). 3433 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(), 3434 EV.getIndices()); 3435 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(), 3436 ArrayRef(insi, inse)); 3437 } 3438 if (insi == inse) 3439 // The insert list is a prefix of the extract list 3440 // We can simply remove the common indices from the extract and make it 3441 // operate on the inserted value instead of the insertvalue result. 3442 // i.e., replace 3443 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 3444 // %E = extractvalue { i32, { i32 } } %I, 1, 0 3445 // with 3446 // %E extractvalue { i32 } { i32 42 }, 0 3447 return ExtractValueInst::Create(IV->getInsertedValueOperand(), 3448 ArrayRef(exti, exte)); 3449 } 3450 3451 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV)) 3452 return R; 3453 3454 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) { 3455 // Bail out if the aggregate contains scalable vector type 3456 if (auto *STy = dyn_cast<StructType>(Agg->getType()); 3457 STy && STy->containsScalableVectorType()) 3458 return nullptr; 3459 3460 // If the (non-volatile) load only has one use, we can rewrite this to a 3461 // load from a GEP. This reduces the size of the load. If a load is used 3462 // only by extractvalue instructions then this either must have been 3463 // optimized before, or it is a struct with padding, in which case we 3464 // don't want to do the transformation as it loses padding knowledge. 3465 if (L->isSimple() && L->hasOneUse()) { 3466 // extractvalue has integer indices, getelementptr has Value*s. Convert. 3467 SmallVector<Value*, 4> Indices; 3468 // Prefix an i32 0 since we need the first element. 3469 Indices.push_back(Builder.getInt32(0)); 3470 for (unsigned Idx : EV.indices()) 3471 Indices.push_back(Builder.getInt32(Idx)); 3472 3473 // We need to insert these at the location of the old load, not at that of 3474 // the extractvalue. 3475 Builder.SetInsertPoint(L); 3476 Value *GEP = Builder.CreateInBoundsGEP(L->getType(), 3477 L->getPointerOperand(), Indices); 3478 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP); 3479 // Whatever aliasing information we had for the orignal load must also 3480 // hold for the smaller load, so propagate the annotations. 3481 NL->setAAMetadata(L->getAAMetadata()); 3482 // Returning the load directly will cause the main loop to insert it in 3483 // the wrong spot, so use replaceInstUsesWith(). 3484 return replaceInstUsesWith(EV, NL); 3485 } 3486 } 3487 3488 if (auto *PN = dyn_cast<PHINode>(Agg)) 3489 if (Instruction *Res = foldOpIntoPhi(EV, PN)) 3490 return Res; 3491 3492 // We could simplify extracts from other values. Note that nested extracts may 3493 // already be simplified implicitly by the above: extract (extract (insert) ) 3494 // will be translated into extract ( insert ( extract ) ) first and then just 3495 // the value inserted, if appropriate. Similarly for extracts from single-use 3496 // loads: extract (extract (load)) will be translated to extract (load (gep)) 3497 // and if again single-use then via load (gep (gep)) to load (gep). 3498 // However, double extracts from e.g. function arguments or return values 3499 // aren't handled yet. 3500 return nullptr; 3501 } 3502 3503 /// Return 'true' if the given typeinfo will match anything. 3504 static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) { 3505 switch (Personality) { 3506 case EHPersonality::GNU_C: 3507 case EHPersonality::GNU_C_SjLj: 3508 case EHPersonality::Rust: 3509 // The GCC C EH and Rust personality only exists to support cleanups, so 3510 // it's not clear what the semantics of catch clauses are. 3511 return false; 3512 case EHPersonality::Unknown: 3513 return false; 3514 case EHPersonality::GNU_Ada: 3515 // While __gnat_all_others_value will match any Ada exception, it doesn't 3516 // match foreign exceptions (or didn't, before gcc-4.7). 3517 return false; 3518 case EHPersonality::GNU_CXX: 3519 case EHPersonality::GNU_CXX_SjLj: 3520 case EHPersonality::GNU_ObjC: 3521 case EHPersonality::MSVC_X86SEH: 3522 case EHPersonality::MSVC_TableSEH: 3523 case EHPersonality::MSVC_CXX: 3524 case EHPersonality::CoreCLR: 3525 case EHPersonality::Wasm_CXX: 3526 case EHPersonality::XL_CXX: 3527 return TypeInfo->isNullValue(); 3528 } 3529 llvm_unreachable("invalid enum"); 3530 } 3531 3532 static bool shorter_filter(const Value *LHS, const Value *RHS) { 3533 return 3534 cast<ArrayType>(LHS->getType())->getNumElements() 3535 < 3536 cast<ArrayType>(RHS->getType())->getNumElements(); 3537 } 3538 3539 Instruction *InstCombinerImpl::visitLandingPadInst(LandingPadInst &LI) { 3540 // The logic here should be correct for any real-world personality function. 3541 // However if that turns out not to be true, the offending logic can always 3542 // be conditioned on the personality function, like the catch-all logic is. 3543 EHPersonality Personality = 3544 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn()); 3545 3546 // Simplify the list of clauses, eg by removing repeated catch clauses 3547 // (these are often created by inlining). 3548 bool MakeNewInstruction = false; // If true, recreate using the following: 3549 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction; 3550 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup. 3551 3552 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already. 3553 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) { 3554 bool isLastClause = i + 1 == e; 3555 if (LI.isCatch(i)) { 3556 // A catch clause. 3557 Constant *CatchClause = LI.getClause(i); 3558 Constant *TypeInfo = CatchClause->stripPointerCasts(); 3559 3560 // If we already saw this clause, there is no point in having a second 3561 // copy of it. 3562 if (AlreadyCaught.insert(TypeInfo).second) { 3563 // This catch clause was not already seen. 3564 NewClauses.push_back(CatchClause); 3565 } else { 3566 // Repeated catch clause - drop the redundant copy. 3567 MakeNewInstruction = true; 3568 } 3569 3570 // If this is a catch-all then there is no point in keeping any following 3571 // clauses or marking the landingpad as having a cleanup. 3572 if (isCatchAll(Personality, TypeInfo)) { 3573 if (!isLastClause) 3574 MakeNewInstruction = true; 3575 CleanupFlag = false; 3576 break; 3577 } 3578 } else { 3579 // A filter clause. If any of the filter elements were already caught 3580 // then they can be dropped from the filter. It is tempting to try to 3581 // exploit the filter further by saying that any typeinfo that does not 3582 // occur in the filter can't be caught later (and thus can be dropped). 3583 // However this would be wrong, since typeinfos can match without being 3584 // equal (for example if one represents a C++ class, and the other some 3585 // class derived from it). 3586 assert(LI.isFilter(i) && "Unsupported landingpad clause!"); 3587 Constant *FilterClause = LI.getClause(i); 3588 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType()); 3589 unsigned NumTypeInfos = FilterType->getNumElements(); 3590 3591 // An empty filter catches everything, so there is no point in keeping any 3592 // following clauses or marking the landingpad as having a cleanup. By 3593 // dealing with this case here the following code is made a bit simpler. 3594 if (!NumTypeInfos) { 3595 NewClauses.push_back(FilterClause); 3596 if (!isLastClause) 3597 MakeNewInstruction = true; 3598 CleanupFlag = false; 3599 break; 3600 } 3601 3602 bool MakeNewFilter = false; // If true, make a new filter. 3603 SmallVector<Constant *, 16> NewFilterElts; // New elements. 3604 if (isa<ConstantAggregateZero>(FilterClause)) { 3605 // Not an empty filter - it contains at least one null typeinfo. 3606 assert(NumTypeInfos > 0 && "Should have handled empty filter already!"); 3607 Constant *TypeInfo = 3608 Constant::getNullValue(FilterType->getElementType()); 3609 // If this typeinfo is a catch-all then the filter can never match. 3610 if (isCatchAll(Personality, TypeInfo)) { 3611 // Throw the filter away. 3612 MakeNewInstruction = true; 3613 continue; 3614 } 3615 3616 // There is no point in having multiple copies of this typeinfo, so 3617 // discard all but the first copy if there is more than one. 3618 NewFilterElts.push_back(TypeInfo); 3619 if (NumTypeInfos > 1) 3620 MakeNewFilter = true; 3621 } else { 3622 ConstantArray *Filter = cast<ConstantArray>(FilterClause); 3623 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements. 3624 NewFilterElts.reserve(NumTypeInfos); 3625 3626 // Remove any filter elements that were already caught or that already 3627 // occurred in the filter. While there, see if any of the elements are 3628 // catch-alls. If so, the filter can be discarded. 3629 bool SawCatchAll = false; 3630 for (unsigned j = 0; j != NumTypeInfos; ++j) { 3631 Constant *Elt = Filter->getOperand(j); 3632 Constant *TypeInfo = Elt->stripPointerCasts(); 3633 if (isCatchAll(Personality, TypeInfo)) { 3634 // This element is a catch-all. Bail out, noting this fact. 3635 SawCatchAll = true; 3636 break; 3637 } 3638 3639 // Even if we've seen a type in a catch clause, we don't want to 3640 // remove it from the filter. An unexpected type handler may be 3641 // set up for a call site which throws an exception of the same 3642 // type caught. In order for the exception thrown by the unexpected 3643 // handler to propagate correctly, the filter must be correctly 3644 // described for the call site. 3645 // 3646 // Example: 3647 // 3648 // void unexpected() { throw 1;} 3649 // void foo() throw (int) { 3650 // std::set_unexpected(unexpected); 3651 // try { 3652 // throw 2.0; 3653 // } catch (int i) {} 3654 // } 3655 3656 // There is no point in having multiple copies of the same typeinfo in 3657 // a filter, so only add it if we didn't already. 3658 if (SeenInFilter.insert(TypeInfo).second) 3659 NewFilterElts.push_back(cast<Constant>(Elt)); 3660 } 3661 // A filter containing a catch-all cannot match anything by definition. 3662 if (SawCatchAll) { 3663 // Throw the filter away. 3664 MakeNewInstruction = true; 3665 continue; 3666 } 3667 3668 // If we dropped something from the filter, make a new one. 3669 if (NewFilterElts.size() < NumTypeInfos) 3670 MakeNewFilter = true; 3671 } 3672 if (MakeNewFilter) { 3673 FilterType = ArrayType::get(FilterType->getElementType(), 3674 NewFilterElts.size()); 3675 FilterClause = ConstantArray::get(FilterType, NewFilterElts); 3676 MakeNewInstruction = true; 3677 } 3678 3679 NewClauses.push_back(FilterClause); 3680 3681 // If the new filter is empty then it will catch everything so there is 3682 // no point in keeping any following clauses or marking the landingpad 3683 // as having a cleanup. The case of the original filter being empty was 3684 // already handled above. 3685 if (MakeNewFilter && !NewFilterElts.size()) { 3686 assert(MakeNewInstruction && "New filter but not a new instruction!"); 3687 CleanupFlag = false; 3688 break; 3689 } 3690 } 3691 } 3692 3693 // If several filters occur in a row then reorder them so that the shortest 3694 // filters come first (those with the smallest number of elements). This is 3695 // advantageous because shorter filters are more likely to match, speeding up 3696 // unwinding, but mostly because it increases the effectiveness of the other 3697 // filter optimizations below. 3698 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) { 3699 unsigned j; 3700 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters. 3701 for (j = i; j != e; ++j) 3702 if (!isa<ArrayType>(NewClauses[j]->getType())) 3703 break; 3704 3705 // Check whether the filters are already sorted by length. We need to know 3706 // if sorting them is actually going to do anything so that we only make a 3707 // new landingpad instruction if it does. 3708 for (unsigned k = i; k + 1 < j; ++k) 3709 if (shorter_filter(NewClauses[k+1], NewClauses[k])) { 3710 // Not sorted, so sort the filters now. Doing an unstable sort would be 3711 // correct too but reordering filters pointlessly might confuse users. 3712 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j, 3713 shorter_filter); 3714 MakeNewInstruction = true; 3715 break; 3716 } 3717 3718 // Look for the next batch of filters. 3719 i = j + 1; 3720 } 3721 3722 // If typeinfos matched if and only if equal, then the elements of a filter L 3723 // that occurs later than a filter F could be replaced by the intersection of 3724 // the elements of F and L. In reality two typeinfos can match without being 3725 // equal (for example if one represents a C++ class, and the other some class 3726 // derived from it) so it would be wrong to perform this transform in general. 3727 // However the transform is correct and useful if F is a subset of L. In that 3728 // case L can be replaced by F, and thus removed altogether since repeating a 3729 // filter is pointless. So here we look at all pairs of filters F and L where 3730 // L follows F in the list of clauses, and remove L if every element of F is 3731 // an element of L. This can occur when inlining C++ functions with exception 3732 // specifications. 3733 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) { 3734 // Examine each filter in turn. 3735 Value *Filter = NewClauses[i]; 3736 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType()); 3737 if (!FTy) 3738 // Not a filter - skip it. 3739 continue; 3740 unsigned FElts = FTy->getNumElements(); 3741 // Examine each filter following this one. Doing this backwards means that 3742 // we don't have to worry about filters disappearing under us when removed. 3743 for (unsigned j = NewClauses.size() - 1; j != i; --j) { 3744 Value *LFilter = NewClauses[j]; 3745 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType()); 3746 if (!LTy) 3747 // Not a filter - skip it. 3748 continue; 3749 // If Filter is a subset of LFilter, i.e. every element of Filter is also 3750 // an element of LFilter, then discard LFilter. 3751 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j; 3752 // If Filter is empty then it is a subset of LFilter. 3753 if (!FElts) { 3754 // Discard LFilter. 3755 NewClauses.erase(J); 3756 MakeNewInstruction = true; 3757 // Move on to the next filter. 3758 continue; 3759 } 3760 unsigned LElts = LTy->getNumElements(); 3761 // If Filter is longer than LFilter then it cannot be a subset of it. 3762 if (FElts > LElts) 3763 // Move on to the next filter. 3764 continue; 3765 // At this point we know that LFilter has at least one element. 3766 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros. 3767 // Filter is a subset of LFilter iff Filter contains only zeros (as we 3768 // already know that Filter is not longer than LFilter). 3769 if (isa<ConstantAggregateZero>(Filter)) { 3770 assert(FElts <= LElts && "Should have handled this case earlier!"); 3771 // Discard LFilter. 3772 NewClauses.erase(J); 3773 MakeNewInstruction = true; 3774 } 3775 // Move on to the next filter. 3776 continue; 3777 } 3778 ConstantArray *LArray = cast<ConstantArray>(LFilter); 3779 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros. 3780 // Since Filter is non-empty and contains only zeros, it is a subset of 3781 // LFilter iff LFilter contains a zero. 3782 assert(FElts > 0 && "Should have eliminated the empty filter earlier!"); 3783 for (unsigned l = 0; l != LElts; ++l) 3784 if (LArray->getOperand(l)->isNullValue()) { 3785 // LFilter contains a zero - discard it. 3786 NewClauses.erase(J); 3787 MakeNewInstruction = true; 3788 break; 3789 } 3790 // Move on to the next filter. 3791 continue; 3792 } 3793 // At this point we know that both filters are ConstantArrays. Loop over 3794 // operands to see whether every element of Filter is also an element of 3795 // LFilter. Since filters tend to be short this is probably faster than 3796 // using a method that scales nicely. 3797 ConstantArray *FArray = cast<ConstantArray>(Filter); 3798 bool AllFound = true; 3799 for (unsigned f = 0; f != FElts; ++f) { 3800 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts(); 3801 AllFound = false; 3802 for (unsigned l = 0; l != LElts; ++l) { 3803 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts(); 3804 if (LTypeInfo == FTypeInfo) { 3805 AllFound = true; 3806 break; 3807 } 3808 } 3809 if (!AllFound) 3810 break; 3811 } 3812 if (AllFound) { 3813 // Discard LFilter. 3814 NewClauses.erase(J); 3815 MakeNewInstruction = true; 3816 } 3817 // Move on to the next filter. 3818 } 3819 } 3820 3821 // If we changed any of the clauses, replace the old landingpad instruction 3822 // with a new one. 3823 if (MakeNewInstruction) { 3824 LandingPadInst *NLI = LandingPadInst::Create(LI.getType(), 3825 NewClauses.size()); 3826 for (unsigned i = 0, e = NewClauses.size(); i != e; ++i) 3827 NLI->addClause(NewClauses[i]); 3828 // A landing pad with no clauses must have the cleanup flag set. It is 3829 // theoretically possible, though highly unlikely, that we eliminated all 3830 // clauses. If so, force the cleanup flag to true. 3831 if (NewClauses.empty()) 3832 CleanupFlag = true; 3833 NLI->setCleanup(CleanupFlag); 3834 return NLI; 3835 } 3836 3837 // Even if none of the clauses changed, we may nonetheless have understood 3838 // that the cleanup flag is pointless. Clear it if so. 3839 if (LI.isCleanup() != CleanupFlag) { 3840 assert(!CleanupFlag && "Adding a cleanup, not removing one?!"); 3841 LI.setCleanup(CleanupFlag); 3842 return &LI; 3843 } 3844 3845 return nullptr; 3846 } 3847 3848 Value * 3849 InstCombinerImpl::pushFreezeToPreventPoisonFromPropagating(FreezeInst &OrigFI) { 3850 // Try to push freeze through instructions that propagate but don't produce 3851 // poison as far as possible. If an operand of freeze follows three 3852 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one 3853 // guaranteed-non-poison operands then push the freeze through to the one 3854 // operand that is not guaranteed non-poison. The actual transform is as 3855 // follows. 3856 // Op1 = ... ; Op1 can be posion 3857 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have 3858 // ; single guaranteed-non-poison operands 3859 // ... = Freeze(Op0) 3860 // => 3861 // Op1 = ... 3862 // Op1.fr = Freeze(Op1) 3863 // ... = Inst(Op1.fr, NonPoisonOps...) 3864 auto *OrigOp = OrigFI.getOperand(0); 3865 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp); 3866 3867 // While we could change the other users of OrigOp to use freeze(OrigOp), that 3868 // potentially reduces their optimization potential, so let's only do this iff 3869 // the OrigOp is only used by the freeze. 3870 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp)) 3871 return nullptr; 3872 3873 // We can't push the freeze through an instruction which can itself create 3874 // poison. If the only source of new poison is flags, we can simply 3875 // strip them (since we know the only use is the freeze and nothing can 3876 // benefit from them.) 3877 if (canCreateUndefOrPoison(cast<Operator>(OrigOp), 3878 /*ConsiderFlagsAndMetadata*/ false)) 3879 return nullptr; 3880 3881 // If operand is guaranteed not to be poison, there is no need to add freeze 3882 // to the operand. So we first find the operand that is not guaranteed to be 3883 // poison. 3884 Use *MaybePoisonOperand = nullptr; 3885 for (Use &U : OrigOpInst->operands()) { 3886 if (isa<MetadataAsValue>(U.get()) || 3887 isGuaranteedNotToBeUndefOrPoison(U.get())) 3888 continue; 3889 if (!MaybePoisonOperand) 3890 MaybePoisonOperand = &U; 3891 else 3892 return nullptr; 3893 } 3894 3895 OrigOpInst->dropPoisonGeneratingFlagsAndMetadata(); 3896 3897 // If all operands are guaranteed to be non-poison, we can drop freeze. 3898 if (!MaybePoisonOperand) 3899 return OrigOp; 3900 3901 Builder.SetInsertPoint(OrigOpInst); 3902 auto *FrozenMaybePoisonOperand = Builder.CreateFreeze( 3903 MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr"); 3904 3905 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand); 3906 return OrigOp; 3907 } 3908 3909 Instruction *InstCombinerImpl::foldFreezeIntoRecurrence(FreezeInst &FI, 3910 PHINode *PN) { 3911 // Detect whether this is a recurrence with a start value and some number of 3912 // backedge values. We'll check whether we can push the freeze through the 3913 // backedge values (possibly dropping poison flags along the way) until we 3914 // reach the phi again. In that case, we can move the freeze to the start 3915 // value. 3916 Use *StartU = nullptr; 3917 SmallVector<Value *> Worklist; 3918 for (Use &U : PN->incoming_values()) { 3919 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) { 3920 // Add backedge value to worklist. 3921 Worklist.push_back(U.get()); 3922 continue; 3923 } 3924 3925 // Don't bother handling multiple start values. 3926 if (StartU) 3927 return nullptr; 3928 StartU = &U; 3929 } 3930 3931 if (!StartU || Worklist.empty()) 3932 return nullptr; // Not a recurrence. 3933 3934 Value *StartV = StartU->get(); 3935 BasicBlock *StartBB = PN->getIncomingBlock(*StartU); 3936 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV); 3937 // We can't insert freeze if the start value is the result of the 3938 // terminator (e.g. an invoke). 3939 if (StartNeedsFreeze && StartBB->getTerminator() == StartV) 3940 return nullptr; 3941 3942 SmallPtrSet<Value *, 32> Visited; 3943 SmallVector<Instruction *> DropFlags; 3944 while (!Worklist.empty()) { 3945 Value *V = Worklist.pop_back_val(); 3946 if (!Visited.insert(V).second) 3947 continue; 3948 3949 if (Visited.size() > 32) 3950 return nullptr; // Limit the total number of values we inspect. 3951 3952 // Assume that PN is non-poison, because it will be after the transform. 3953 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V)) 3954 continue; 3955 3956 Instruction *I = dyn_cast<Instruction>(V); 3957 if (!I || canCreateUndefOrPoison(cast<Operator>(I), 3958 /*ConsiderFlagsAndMetadata*/ false)) 3959 return nullptr; 3960 3961 DropFlags.push_back(I); 3962 append_range(Worklist, I->operands()); 3963 } 3964 3965 for (Instruction *I : DropFlags) 3966 I->dropPoisonGeneratingFlagsAndMetadata(); 3967 3968 if (StartNeedsFreeze) { 3969 Builder.SetInsertPoint(StartBB->getTerminator()); 3970 Value *FrozenStartV = Builder.CreateFreeze(StartV, 3971 StartV->getName() + ".fr"); 3972 replaceUse(*StartU, FrozenStartV); 3973 } 3974 return replaceInstUsesWith(FI, PN); 3975 } 3976 3977 bool InstCombinerImpl::freezeOtherUses(FreezeInst &FI) { 3978 Value *Op = FI.getOperand(0); 3979 3980 if (isa<Constant>(Op) || Op->hasOneUse()) 3981 return false; 3982 3983 // Move the freeze directly after the definition of its operand, so that 3984 // it dominates the maximum number of uses. Note that it may not dominate 3985 // *all* uses if the operand is an invoke/callbr and the use is in a phi on 3986 // the normal/default destination. This is why the domination check in the 3987 // replacement below is still necessary. 3988 BasicBlock::iterator MoveBefore; 3989 if (isa<Argument>(Op)) { 3990 MoveBefore = 3991 FI.getFunction()->getEntryBlock().getFirstNonPHIOrDbgOrAlloca(); 3992 } else { 3993 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef(); 3994 if (!MoveBeforeOpt) 3995 return false; 3996 MoveBefore = *MoveBeforeOpt; 3997 } 3998 3999 // Don't move to the position of a debug intrinsic. 4000 if (isa<DbgInfoIntrinsic>(MoveBefore)) 4001 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator(); 4002 // Re-point iterator to come after any debug-info records, if we're 4003 // running in "RemoveDIs" mode 4004 MoveBefore.setHeadBit(false); 4005 4006 bool Changed = false; 4007 if (&FI != &*MoveBefore) { 4008 FI.moveBefore(*MoveBefore->getParent(), MoveBefore); 4009 Changed = true; 4010 } 4011 4012 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool { 4013 bool Dominates = DT.dominates(&FI, U); 4014 Changed |= Dominates; 4015 return Dominates; 4016 }); 4017 4018 return Changed; 4019 } 4020 4021 // Check if any direct or bitcast user of this value is a shuffle instruction. 4022 static bool isUsedWithinShuffleVector(Value *V) { 4023 for (auto *U : V->users()) { 4024 if (isa<ShuffleVectorInst>(U)) 4025 return true; 4026 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U)) 4027 return true; 4028 } 4029 return false; 4030 } 4031 4032 Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) { 4033 Value *Op0 = I.getOperand(0); 4034 4035 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I))) 4036 return replaceInstUsesWith(I, V); 4037 4038 // freeze (phi const, x) --> phi const, (freeze x) 4039 if (auto *PN = dyn_cast<PHINode>(Op0)) { 4040 if (Instruction *NV = foldOpIntoPhi(I, PN)) 4041 return NV; 4042 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN)) 4043 return NV; 4044 } 4045 4046 if (Value *NI = pushFreezeToPreventPoisonFromPropagating(I)) 4047 return replaceInstUsesWith(I, NI); 4048 4049 // If I is freeze(undef), check its uses and fold it to a fixed constant. 4050 // - or: pick -1 4051 // - select's condition: if the true value is constant, choose it by making 4052 // the condition true. 4053 // - default: pick 0 4054 // 4055 // Note that this transform is intentionally done here rather than 4056 // via an analysis in InstSimplify or at individual user sites. That is 4057 // because we must produce the same value for all uses of the freeze - 4058 // it's the reason "freeze" exists! 4059 // 4060 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid 4061 // duplicating logic for binops at least. 4062 auto getUndefReplacement = [&I](Type *Ty) { 4063 Constant *BestValue = nullptr; 4064 Constant *NullValue = Constant::getNullValue(Ty); 4065 for (const auto *U : I.users()) { 4066 Constant *C = NullValue; 4067 if (match(U, m_Or(m_Value(), m_Value()))) 4068 C = ConstantInt::getAllOnesValue(Ty); 4069 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value()))) 4070 C = ConstantInt::getTrue(Ty); 4071 4072 if (!BestValue) 4073 BestValue = C; 4074 else if (BestValue != C) 4075 BestValue = NullValue; 4076 } 4077 assert(BestValue && "Must have at least one use"); 4078 return BestValue; 4079 }; 4080 4081 if (match(Op0, m_Undef())) { 4082 // Don't fold freeze(undef/poison) if it's used as a vector operand in 4083 // a shuffle. This may improve codegen for shuffles that allow 4084 // unspecified inputs. 4085 if (isUsedWithinShuffleVector(&I)) 4086 return nullptr; 4087 return replaceInstUsesWith(I, getUndefReplacement(I.getType())); 4088 } 4089 4090 Constant *C; 4091 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement()) { 4092 Constant *ReplaceC = getUndefReplacement(I.getType()->getScalarType()); 4093 return replaceInstUsesWith(I, Constant::replaceUndefsWith(C, ReplaceC)); 4094 } 4095 4096 // Replace uses of Op with freeze(Op). 4097 if (freezeOtherUses(I)) 4098 return &I; 4099 4100 return nullptr; 4101 } 4102 4103 /// Check for case where the call writes to an otherwise dead alloca. This 4104 /// shows up for unused out-params in idiomatic C/C++ code. Note that this 4105 /// helper *only* analyzes the write; doesn't check any other legality aspect. 4106 static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI) { 4107 auto *CB = dyn_cast<CallBase>(I); 4108 if (!CB) 4109 // TODO: handle e.g. store to alloca here - only worth doing if we extend 4110 // to allow reload along used path as described below. Otherwise, this 4111 // is simply a store to a dead allocation which will be removed. 4112 return false; 4113 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI); 4114 if (!Dest) 4115 return false; 4116 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr)); 4117 if (!AI) 4118 // TODO: allow malloc? 4119 return false; 4120 // TODO: allow memory access dominated by move point? Note that since AI 4121 // could have a reference to itself captured by the call, we would need to 4122 // account for cycles in doing so. 4123 SmallVector<const User *> AllocaUsers; 4124 SmallPtrSet<const User *, 4> Visited; 4125 auto pushUsers = [&](const Instruction &I) { 4126 for (const User *U : I.users()) { 4127 if (Visited.insert(U).second) 4128 AllocaUsers.push_back(U); 4129 } 4130 }; 4131 pushUsers(*AI); 4132 while (!AllocaUsers.empty()) { 4133 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val()); 4134 if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) || 4135 isa<AddrSpaceCastInst>(UserI)) { 4136 pushUsers(*UserI); 4137 continue; 4138 } 4139 if (UserI == CB) 4140 continue; 4141 // TODO: support lifetime.start/end here 4142 return false; 4143 } 4144 return true; 4145 } 4146 4147 /// Try to move the specified instruction from its current block into the 4148 /// beginning of DestBlock, which can only happen if it's safe to move the 4149 /// instruction past all of the instructions between it and the end of its 4150 /// block. 4151 bool InstCombinerImpl::tryToSinkInstruction(Instruction *I, 4152 BasicBlock *DestBlock) { 4153 BasicBlock *SrcBlock = I->getParent(); 4154 4155 // Cannot move control-flow-involving, volatile loads, vaarg, etc. 4156 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() || 4157 I->isTerminator()) 4158 return false; 4159 4160 // Do not sink static or dynamic alloca instructions. Static allocas must 4161 // remain in the entry block, and dynamic allocas must not be sunk in between 4162 // a stacksave / stackrestore pair, which would incorrectly shorten its 4163 // lifetime. 4164 if (isa<AllocaInst>(I)) 4165 return false; 4166 4167 // Do not sink into catchswitch blocks. 4168 if (isa<CatchSwitchInst>(DestBlock->getTerminator())) 4169 return false; 4170 4171 // Do not sink convergent call instructions. 4172 if (auto *CI = dyn_cast<CallInst>(I)) { 4173 if (CI->isConvergent()) 4174 return false; 4175 } 4176 4177 // Unless we can prove that the memory write isn't visibile except on the 4178 // path we're sinking to, we must bail. 4179 if (I->mayWriteToMemory()) { 4180 if (!SoleWriteToDeadLocal(I, TLI)) 4181 return false; 4182 } 4183 4184 // We can only sink load instructions if there is nothing between the load and 4185 // the end of block that could change the value. 4186 if (I->mayReadFromMemory()) { 4187 // We don't want to do any sophisticated alias analysis, so we only check 4188 // the instructions after I in I's parent block if we try to sink to its 4189 // successor block. 4190 if (DestBlock->getUniquePredecessor() != I->getParent()) 4191 return false; 4192 for (BasicBlock::iterator Scan = std::next(I->getIterator()), 4193 E = I->getParent()->end(); 4194 Scan != E; ++Scan) 4195 if (Scan->mayWriteToMemory()) 4196 return false; 4197 } 4198 4199 I->dropDroppableUses([&](const Use *U) { 4200 auto *I = dyn_cast<Instruction>(U->getUser()); 4201 if (I && I->getParent() != DestBlock) { 4202 Worklist.add(I); 4203 return true; 4204 } 4205 return false; 4206 }); 4207 /// FIXME: We could remove droppable uses that are not dominated by 4208 /// the new position. 4209 4210 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt(); 4211 I->moveBefore(*DestBlock, InsertPos); 4212 ++NumSunkInst; 4213 4214 // Also sink all related debug uses from the source basic block. Otherwise we 4215 // get debug use before the def. Attempt to salvage debug uses first, to 4216 // maximise the range variables have location for. If we cannot salvage, then 4217 // mark the location undef: we know it was supposed to receive a new location 4218 // here, but that computation has been sunk. 4219 SmallVector<DbgVariableIntrinsic *, 2> DbgUsers; 4220 findDbgUsers(DbgUsers, I); 4221 4222 // For all debug values in the destination block, the sunk instruction 4223 // will still be available, so they do not need to be dropped. 4224 SmallVector<DbgVariableIntrinsic *, 2> DbgUsersToSalvage; 4225 SmallVector<DPValue *, 2> DPValuesToSalvage; 4226 for (auto &DbgUser : DbgUsers) 4227 if (DbgUser->getParent() != DestBlock) 4228 DbgUsersToSalvage.push_back(DbgUser); 4229 4230 // Process the sinking DbgUsersToSalvage in reverse order, as we only want 4231 // to clone the last appearing debug intrinsic for each given variable. 4232 SmallVector<DbgVariableIntrinsic *, 2> DbgUsersToSink; 4233 for (DbgVariableIntrinsic *DVI : DbgUsersToSalvage) 4234 if (DVI->getParent() == SrcBlock) 4235 DbgUsersToSink.push_back(DVI); 4236 llvm::sort(DbgUsersToSink, 4237 [](auto *A, auto *B) { return B->comesBefore(A); }); 4238 4239 SmallVector<DbgVariableIntrinsic *, 2> DIIClones; 4240 SmallSet<DebugVariable, 4> SunkVariables; 4241 for (auto *User : DbgUsersToSink) { 4242 // A dbg.declare instruction should not be cloned, since there can only be 4243 // one per variable fragment. It should be left in the original place 4244 // because the sunk instruction is not an alloca (otherwise we could not be 4245 // here). 4246 if (isa<DbgDeclareInst>(User)) 4247 continue; 4248 4249 DebugVariable DbgUserVariable = 4250 DebugVariable(User->getVariable(), User->getExpression(), 4251 User->getDebugLoc()->getInlinedAt()); 4252 4253 if (!SunkVariables.insert(DbgUserVariable).second) 4254 continue; 4255 4256 // Leave dbg.assign intrinsics in their original positions and there should 4257 // be no need to insert a clone. 4258 if (isa<DbgAssignIntrinsic>(User)) 4259 continue; 4260 4261 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone())); 4262 if (isa<DbgDeclareInst>(User) && isa<CastInst>(I)) 4263 DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0)); 4264 LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n'); 4265 } 4266 4267 // Perform salvaging without the clones, then sink the clones. 4268 if (!DIIClones.empty()) { 4269 // RemoveDIs: pass in empty vector of DPValues until we get to instrumenting 4270 // this pass. 4271 SmallVector<DPValue *, 1> DummyDPValues; 4272 salvageDebugInfoForDbgValues(*I, DbgUsersToSalvage, DummyDPValues); 4273 // The clones are in reverse order of original appearance, reverse again to 4274 // maintain the original order. 4275 for (auto &DIIClone : llvm::reverse(DIIClones)) { 4276 DIIClone->insertBefore(&*InsertPos); 4277 LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n'); 4278 } 4279 } 4280 4281 return true; 4282 } 4283 4284 bool InstCombinerImpl::run() { 4285 while (!Worklist.isEmpty()) { 4286 // Walk deferred instructions in reverse order, and push them to the 4287 // worklist, which means they'll end up popped from the worklist in-order. 4288 while (Instruction *I = Worklist.popDeferred()) { 4289 // Check to see if we can DCE the instruction. We do this already here to 4290 // reduce the number of uses and thus allow other folds to trigger. 4291 // Note that eraseInstFromFunction() may push additional instructions on 4292 // the deferred worklist, so this will DCE whole instruction chains. 4293 if (isInstructionTriviallyDead(I, &TLI)) { 4294 eraseInstFromFunction(*I); 4295 ++NumDeadInst; 4296 continue; 4297 } 4298 4299 Worklist.push(I); 4300 } 4301 4302 Instruction *I = Worklist.removeOne(); 4303 if (I == nullptr) continue; // skip null values. 4304 4305 // Check to see if we can DCE the instruction. 4306 if (isInstructionTriviallyDead(I, &TLI)) { 4307 eraseInstFromFunction(*I); 4308 ++NumDeadInst; 4309 continue; 4310 } 4311 4312 if (!DebugCounter::shouldExecute(VisitCounter)) 4313 continue; 4314 4315 // See if we can trivially sink this instruction to its user if we can 4316 // prove that the successor is not executed more frequently than our block. 4317 // Return the UserBlock if successful. 4318 auto getOptionalSinkBlockForInst = 4319 [this](Instruction *I) -> std::optional<BasicBlock *> { 4320 if (!EnableCodeSinking) 4321 return std::nullopt; 4322 4323 BasicBlock *BB = I->getParent(); 4324 BasicBlock *UserParent = nullptr; 4325 unsigned NumUsers = 0; 4326 4327 for (auto *U : I->users()) { 4328 if (U->isDroppable()) 4329 continue; 4330 if (NumUsers > MaxSinkNumUsers) 4331 return std::nullopt; 4332 4333 Instruction *UserInst = cast<Instruction>(U); 4334 // Special handling for Phi nodes - get the block the use occurs in. 4335 if (PHINode *PN = dyn_cast<PHINode>(UserInst)) { 4336 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 4337 if (PN->getIncomingValue(i) == I) { 4338 // Bail out if we have uses in different blocks. We don't do any 4339 // sophisticated analysis (i.e finding NearestCommonDominator of 4340 // these use blocks). 4341 if (UserParent && UserParent != PN->getIncomingBlock(i)) 4342 return std::nullopt; 4343 UserParent = PN->getIncomingBlock(i); 4344 } 4345 } 4346 assert(UserParent && "expected to find user block!"); 4347 } else { 4348 if (UserParent && UserParent != UserInst->getParent()) 4349 return std::nullopt; 4350 UserParent = UserInst->getParent(); 4351 } 4352 4353 // Make sure these checks are done only once, naturally we do the checks 4354 // the first time we get the userparent, this will save compile time. 4355 if (NumUsers == 0) { 4356 // Try sinking to another block. If that block is unreachable, then do 4357 // not bother. SimplifyCFG should handle it. 4358 if (UserParent == BB || !DT.isReachableFromEntry(UserParent)) 4359 return std::nullopt; 4360 4361 auto *Term = UserParent->getTerminator(); 4362 // See if the user is one of our successors that has only one 4363 // predecessor, so that we don't have to split the critical edge. 4364 // Another option where we can sink is a block that ends with a 4365 // terminator that does not pass control to other block (such as 4366 // return or unreachable or resume). In this case: 4367 // - I dominates the User (by SSA form); 4368 // - the User will be executed at most once. 4369 // So sinking I down to User is always profitable or neutral. 4370 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term)) 4371 return std::nullopt; 4372 4373 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?"); 4374 } 4375 4376 NumUsers++; 4377 } 4378 4379 // No user or only has droppable users. 4380 if (!UserParent) 4381 return std::nullopt; 4382 4383 return UserParent; 4384 }; 4385 4386 auto OptBB = getOptionalSinkBlockForInst(I); 4387 if (OptBB) { 4388 auto *UserParent = *OptBB; 4389 // Okay, the CFG is simple enough, try to sink this instruction. 4390 if (tryToSinkInstruction(I, UserParent)) { 4391 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n'); 4392 MadeIRChange = true; 4393 // We'll add uses of the sunk instruction below, but since 4394 // sinking can expose opportunities for it's *operands* add 4395 // them to the worklist 4396 for (Use &U : I->operands()) 4397 if (Instruction *OpI = dyn_cast<Instruction>(U.get())) 4398 Worklist.push(OpI); 4399 } 4400 } 4401 4402 // Now that we have an instruction, try combining it to simplify it. 4403 Builder.SetInsertPoint(I); 4404 Builder.CollectMetadataToCopy( 4405 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation}); 4406 4407 #ifndef NDEBUG 4408 std::string OrigI; 4409 #endif 4410 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); 4411 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n'); 4412 4413 if (Instruction *Result = visit(*I)) { 4414 ++NumCombined; 4415 // Should we replace the old instruction with a new one? 4416 if (Result != I) { 4417 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n' 4418 << " New = " << *Result << '\n'); 4419 4420 Result->copyMetadata(*I, 4421 {LLVMContext::MD_dbg, LLVMContext::MD_annotation}); 4422 // Everything uses the new instruction now. 4423 I->replaceAllUsesWith(Result); 4424 4425 // Move the name to the new instruction first. 4426 Result->takeName(I); 4427 4428 // Insert the new instruction into the basic block... 4429 BasicBlock *InstParent = I->getParent(); 4430 BasicBlock::iterator InsertPos = I->getIterator(); 4431 4432 // Are we replace a PHI with something that isn't a PHI, or vice versa? 4433 if (isa<PHINode>(Result) != isa<PHINode>(I)) { 4434 // We need to fix up the insertion point. 4435 if (isa<PHINode>(I)) // PHI -> Non-PHI 4436 InsertPos = InstParent->getFirstInsertionPt(); 4437 else // Non-PHI -> PHI 4438 InsertPos = InstParent->getFirstNonPHI()->getIterator(); 4439 } 4440 4441 Result->insertInto(InstParent, InsertPos); 4442 4443 // Push the new instruction and any users onto the worklist. 4444 Worklist.pushUsersToWorkList(*Result); 4445 Worklist.push(Result); 4446 4447 eraseInstFromFunction(*I); 4448 } else { 4449 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n' 4450 << " New = " << *I << '\n'); 4451 4452 // If the instruction was modified, it's possible that it is now dead. 4453 // if so, remove it. 4454 if (isInstructionTriviallyDead(I, &TLI)) { 4455 eraseInstFromFunction(*I); 4456 } else { 4457 Worklist.pushUsersToWorkList(*I); 4458 Worklist.push(I); 4459 } 4460 } 4461 MadeIRChange = true; 4462 } 4463 } 4464 4465 Worklist.zap(); 4466 return MadeIRChange; 4467 } 4468 4469 // Track the scopes used by !alias.scope and !noalias. In a function, a 4470 // @llvm.experimental.noalias.scope.decl is only useful if that scope is used 4471 // by both sets. If not, the declaration of the scope can be safely omitted. 4472 // The MDNode of the scope can be omitted as well for the instructions that are 4473 // part of this function. We do not do that at this point, as this might become 4474 // too time consuming to do. 4475 class AliasScopeTracker { 4476 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists; 4477 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists; 4478 4479 public: 4480 void analyse(Instruction *I) { 4481 // This seems to be faster than checking 'mayReadOrWriteMemory()'. 4482 if (!I->hasMetadataOtherThanDebugLoc()) 4483 return; 4484 4485 auto Track = [](Metadata *ScopeList, auto &Container) { 4486 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList); 4487 if (!MDScopeList || !Container.insert(MDScopeList).second) 4488 return; 4489 for (const auto &MDOperand : MDScopeList->operands()) 4490 if (auto *MDScope = dyn_cast<MDNode>(MDOperand)) 4491 Container.insert(MDScope); 4492 }; 4493 4494 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists); 4495 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists); 4496 } 4497 4498 bool isNoAliasScopeDeclDead(Instruction *Inst) { 4499 NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst); 4500 if (!Decl) 4501 return false; 4502 4503 assert(Decl->use_empty() && 4504 "llvm.experimental.noalias.scope.decl in use ?"); 4505 const MDNode *MDSL = Decl->getScopeList(); 4506 assert(MDSL->getNumOperands() == 1 && 4507 "llvm.experimental.noalias.scope should refer to a single scope"); 4508 auto &MDOperand = MDSL->getOperand(0); 4509 if (auto *MD = dyn_cast<MDNode>(MDOperand)) 4510 return !UsedAliasScopesAndLists.contains(MD) || 4511 !UsedNoAliasScopesAndLists.contains(MD); 4512 4513 // Not an MDNode ? throw away. 4514 return true; 4515 } 4516 }; 4517 4518 /// Populate the IC worklist from a function, by walking it in reverse 4519 /// post-order and adding all reachable code to the worklist. 4520 /// 4521 /// This has a couple of tricks to make the code faster and more powerful. In 4522 /// particular, we constant fold and DCE instructions as we go, to avoid adding 4523 /// them to the worklist (this significantly speeds up instcombine on code where 4524 /// many instructions are dead or constant). Additionally, if we find a branch 4525 /// whose condition is a known constant, we only visit the reachable successors. 4526 bool InstCombinerImpl::prepareWorklist( 4527 Function &F, ReversePostOrderTraversal<BasicBlock *> &RPOT) { 4528 bool MadeIRChange = false; 4529 SmallPtrSet<BasicBlock *, 32> LiveBlocks; 4530 SmallVector<Instruction *, 128> InstrsForInstructionWorklist; 4531 DenseMap<Constant *, Constant *> FoldedConstants; 4532 AliasScopeTracker SeenAliasScopes; 4533 4534 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) { 4535 for (BasicBlock *Succ : successors(BB)) 4536 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second) 4537 for (PHINode &PN : Succ->phis()) 4538 for (Use &U : PN.incoming_values()) 4539 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) { 4540 U.set(PoisonValue::get(PN.getType())); 4541 MadeIRChange = true; 4542 } 4543 }; 4544 4545 for (BasicBlock *BB : RPOT) { 4546 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) { 4547 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred); 4548 })) { 4549 HandleOnlyLiveSuccessor(BB, nullptr); 4550 continue; 4551 } 4552 LiveBlocks.insert(BB); 4553 4554 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) { 4555 // ConstantProp instruction if trivially constant. 4556 if (!Inst.use_empty() && 4557 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0)))) 4558 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) { 4559 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst 4560 << '\n'); 4561 Inst.replaceAllUsesWith(C); 4562 ++NumConstProp; 4563 if (isInstructionTriviallyDead(&Inst, &TLI)) 4564 Inst.eraseFromParent(); 4565 MadeIRChange = true; 4566 continue; 4567 } 4568 4569 // See if we can constant fold its operands. 4570 for (Use &U : Inst.operands()) { 4571 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U)) 4572 continue; 4573 4574 auto *C = cast<Constant>(U); 4575 Constant *&FoldRes = FoldedConstants[C]; 4576 if (!FoldRes) 4577 FoldRes = ConstantFoldConstant(C, DL, &TLI); 4578 4579 if (FoldRes != C) { 4580 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst 4581 << "\n Old = " << *C 4582 << "\n New = " << *FoldRes << '\n'); 4583 U = FoldRes; 4584 MadeIRChange = true; 4585 } 4586 } 4587 4588 // Skip processing debug and pseudo intrinsics in InstCombine. Processing 4589 // these call instructions consumes non-trivial amount of time and 4590 // provides no value for the optimization. 4591 if (!Inst.isDebugOrPseudoInst()) { 4592 InstrsForInstructionWorklist.push_back(&Inst); 4593 SeenAliasScopes.analyse(&Inst); 4594 } 4595 } 4596 4597 // If this is a branch or switch on a constant, mark only the single 4598 // live successor. Otherwise assume all successors are live. 4599 Instruction *TI = BB->getTerminator(); 4600 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) { 4601 if (isa<UndefValue>(BI->getCondition())) { 4602 // Branch on undef is UB. 4603 HandleOnlyLiveSuccessor(BB, nullptr); 4604 continue; 4605 } 4606 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) { 4607 bool CondVal = Cond->getZExtValue(); 4608 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal)); 4609 continue; 4610 } 4611 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 4612 if (isa<UndefValue>(SI->getCondition())) { 4613 // Switch on undef is UB. 4614 HandleOnlyLiveSuccessor(BB, nullptr); 4615 continue; 4616 } 4617 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) { 4618 HandleOnlyLiveSuccessor(BB, 4619 SI->findCaseValue(Cond)->getCaseSuccessor()); 4620 continue; 4621 } 4622 } 4623 } 4624 4625 // Remove instructions inside unreachable blocks. This prevents the 4626 // instcombine code from having to deal with some bad special cases, and 4627 // reduces use counts of instructions. 4628 for (BasicBlock &BB : F) { 4629 if (LiveBlocks.count(&BB)) 4630 continue; 4631 4632 unsigned NumDeadInstInBB; 4633 unsigned NumDeadDbgInstInBB; 4634 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) = 4635 removeAllNonTerminatorAndEHPadInstructions(&BB); 4636 4637 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0; 4638 NumDeadInst += NumDeadInstInBB; 4639 } 4640 4641 // Once we've found all of the instructions to add to instcombine's worklist, 4642 // add them in reverse order. This way instcombine will visit from the top 4643 // of the function down. This jives well with the way that it adds all uses 4644 // of instructions to the worklist after doing a transformation, thus avoiding 4645 // some N^2 behavior in pathological cases. 4646 Worklist.reserve(InstrsForInstructionWorklist.size()); 4647 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) { 4648 // DCE instruction if trivially dead. As we iterate in reverse program 4649 // order here, we will clean up whole chains of dead instructions. 4650 if (isInstructionTriviallyDead(Inst, &TLI) || 4651 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) { 4652 ++NumDeadInst; 4653 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n'); 4654 salvageDebugInfo(*Inst); 4655 Inst->eraseFromParent(); 4656 MadeIRChange = true; 4657 continue; 4658 } 4659 4660 Worklist.push(Inst); 4661 } 4662 4663 return MadeIRChange; 4664 } 4665 4666 static bool combineInstructionsOverFunction( 4667 Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, 4668 AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, 4669 DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, 4670 ProfileSummaryInfo *PSI, LoopInfo *LI, const InstCombineOptions &Opts) { 4671 auto &DL = F.getParent()->getDataLayout(); 4672 4673 /// Builder - This is an IRBuilder that automatically inserts new 4674 /// instructions into the worklist when they are created. 4675 IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder( 4676 F.getContext(), TargetFolder(DL), 4677 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) { 4678 Worklist.add(I); 4679 if (auto *Assume = dyn_cast<AssumeInst>(I)) 4680 AC.registerAssumption(Assume); 4681 })); 4682 4683 ReversePostOrderTraversal<BasicBlock *> RPOT(&F.front()); 4684 4685 // Lower dbg.declare intrinsics otherwise their value may be clobbered 4686 // by instcombiner. 4687 bool MadeIRChange = false; 4688 if (ShouldLowerDbgDeclare) 4689 MadeIRChange = LowerDbgDeclare(F); 4690 4691 // Iterate while there is work to do. 4692 unsigned Iteration = 0; 4693 while (true) { 4694 ++Iteration; 4695 4696 if (Iteration > Opts.MaxIterations && !Opts.VerifyFixpoint) { 4697 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations 4698 << " on " << F.getName() 4699 << " reached; stopping without verifying fixpoint\n"); 4700 break; 4701 } 4702 4703 ++NumWorklistIterations; 4704 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " 4705 << F.getName() << "\n"); 4706 4707 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT, 4708 ORE, BFI, PSI, DL, LI); 4709 IC.MaxArraySizeForCombine = MaxArraySize; 4710 bool MadeChangeInThisIteration = IC.prepareWorklist(F, RPOT); 4711 MadeChangeInThisIteration |= IC.run(); 4712 if (!MadeChangeInThisIteration) 4713 break; 4714 4715 MadeIRChange = true; 4716 if (Iteration > Opts.MaxIterations) { 4717 report_fatal_error( 4718 "Instruction Combining did not reach a fixpoint after " + 4719 Twine(Opts.MaxIterations) + " iterations"); 4720 } 4721 } 4722 4723 if (Iteration == 1) 4724 ++NumOneIteration; 4725 else if (Iteration == 2) 4726 ++NumTwoIterations; 4727 else if (Iteration == 3) 4728 ++NumThreeIterations; 4729 else 4730 ++NumFourOrMoreIterations; 4731 4732 return MadeIRChange; 4733 } 4734 4735 InstCombinePass::InstCombinePass(InstCombineOptions Opts) : Options(Opts) {} 4736 4737 void InstCombinePass::printPipeline( 4738 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 4739 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline( 4740 OS, MapClassName2PassName); 4741 OS << '<'; 4742 OS << "max-iterations=" << Options.MaxIterations << ";"; 4743 OS << (Options.UseLoopInfo ? "" : "no-") << "use-loop-info;"; 4744 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint"; 4745 OS << '>'; 4746 } 4747 4748 PreservedAnalyses InstCombinePass::run(Function &F, 4749 FunctionAnalysisManager &AM) { 4750 auto &AC = AM.getResult<AssumptionAnalysis>(F); 4751 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 4752 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 4753 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 4754 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 4755 4756 // TODO: Only use LoopInfo when the option is set. This requires that the 4757 // callers in the pass pipeline explicitly set the option. 4758 auto *LI = AM.getCachedResult<LoopAnalysis>(F); 4759 if (!LI && Options.UseLoopInfo) 4760 LI = &AM.getResult<LoopAnalysis>(F); 4761 4762 auto *AA = &AM.getResult<AAManager>(F); 4763 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 4764 ProfileSummaryInfo *PSI = 4765 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 4766 auto *BFI = (PSI && PSI->hasProfileSummary()) ? 4767 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr; 4768 4769 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE, 4770 BFI, PSI, LI, Options)) 4771 // No changes, all analyses are preserved. 4772 return PreservedAnalyses::all(); 4773 4774 // Mark all the analyses that instcombine updates as preserved. 4775 PreservedAnalyses PA; 4776 PA.preserveSet<CFGAnalyses>(); 4777 return PA; 4778 } 4779 4780 void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const { 4781 AU.setPreservesCFG(); 4782 AU.addRequired<AAResultsWrapperPass>(); 4783 AU.addRequired<AssumptionCacheTracker>(); 4784 AU.addRequired<TargetLibraryInfoWrapperPass>(); 4785 AU.addRequired<TargetTransformInfoWrapperPass>(); 4786 AU.addRequired<DominatorTreeWrapperPass>(); 4787 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 4788 AU.addPreserved<DominatorTreeWrapperPass>(); 4789 AU.addPreserved<AAResultsWrapperPass>(); 4790 AU.addPreserved<BasicAAWrapperPass>(); 4791 AU.addPreserved<GlobalsAAWrapperPass>(); 4792 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 4793 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU); 4794 } 4795 4796 bool InstructionCombiningPass::runOnFunction(Function &F) { 4797 if (skipFunction(F)) 4798 return false; 4799 4800 // Required analyses. 4801 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 4802 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 4803 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 4804 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 4805 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 4806 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 4807 4808 // Optional analyses. 4809 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 4810 auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; 4811 ProfileSummaryInfo *PSI = 4812 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 4813 BlockFrequencyInfo *BFI = 4814 (PSI && PSI->hasProfileSummary()) ? 4815 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() : 4816 nullptr; 4817 4818 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE, 4819 BFI, PSI, LI, InstCombineOptions()); 4820 } 4821 4822 char InstructionCombiningPass::ID = 0; 4823 4824 InstructionCombiningPass::InstructionCombiningPass() : FunctionPass(ID) { 4825 initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry()); 4826 } 4827 4828 INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine", 4829 "Combine redundant instructions", false, false) 4830 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4831 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 4832 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 4833 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 4834 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 4835 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 4836 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 4837 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass) 4838 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 4839 INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine", 4840 "Combine redundant instructions", false, false) 4841 4842 // Initialization Routines 4843 void llvm::initializeInstCombine(PassRegistry &Registry) { 4844 initializeInstructionCombiningPassPass(Registry); 4845 } 4846 4847 FunctionPass *llvm::createInstructionCombiningPass() { 4848 return new InstructionCombiningPass(); 4849 } 4850