1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // InstructionCombining - Combine instructions to form fewer, simple 10 // instructions. This pass does not modify the CFG. This pass is where 11 // algebraic simplification happens. 12 // 13 // This pass combines things like: 14 // %Y = add i32 %X, 1 15 // %Z = add i32 %Y, 1 16 // into: 17 // %Z = add i32 %X, 2 18 // 19 // This is a simple worklist driven algorithm. 20 // 21 // This pass guarantees that the following canonicalizations are performed on 22 // the program: 23 // 1. If a binary operator has a constant operand, it is moved to the RHS 24 // 2. Bitwise operators with constant operands are always grouped so that 25 // shifts are performed first, then or's, then and's, then xor's. 26 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible 27 // 4. All cmp instructions on boolean values are replaced with logical ops 28 // 5. add X, X is represented as (X*2) => (X << 1) 29 // 6. Multiplies with a power-of-two constant argument are transformed into 30 // shifts. 31 // ... etc. 32 // 33 //===----------------------------------------------------------------------===// 34 35 #include "InstCombineInternal.h" 36 #include "llvm-c/Initialization.h" 37 #include "llvm-c/Transforms/InstCombine.h" 38 #include "llvm/ADT/APInt.h" 39 #include "llvm/ADT/ArrayRef.h" 40 #include "llvm/ADT/DenseMap.h" 41 #include "llvm/ADT/None.h" 42 #include "llvm/ADT/SmallPtrSet.h" 43 #include "llvm/ADT/SmallVector.h" 44 #include "llvm/ADT/Statistic.h" 45 #include "llvm/ADT/TinyPtrVector.h" 46 #include "llvm/Analysis/AliasAnalysis.h" 47 #include "llvm/Analysis/AssumptionCache.h" 48 #include "llvm/Analysis/BasicAliasAnalysis.h" 49 #include "llvm/Analysis/BlockFrequencyInfo.h" 50 #include "llvm/Analysis/CFG.h" 51 #include "llvm/Analysis/ConstantFolding.h" 52 #include "llvm/Analysis/EHPersonalities.h" 53 #include "llvm/Analysis/GlobalsModRef.h" 54 #include "llvm/Analysis/InstructionSimplify.h" 55 #include "llvm/Analysis/LazyBlockFrequencyInfo.h" 56 #include "llvm/Analysis/LoopInfo.h" 57 #include "llvm/Analysis/MemoryBuiltins.h" 58 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 59 #include "llvm/Analysis/ProfileSummaryInfo.h" 60 #include "llvm/Analysis/TargetFolder.h" 61 #include "llvm/Analysis/TargetLibraryInfo.h" 62 #include "llvm/Analysis/TargetTransformInfo.h" 63 #include "llvm/Analysis/ValueTracking.h" 64 #include "llvm/Analysis/VectorUtils.h" 65 #include "llvm/IR/BasicBlock.h" 66 #include "llvm/IR/CFG.h" 67 #include "llvm/IR/Constant.h" 68 #include "llvm/IR/Constants.h" 69 #include "llvm/IR/DIBuilder.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DerivedTypes.h" 72 #include "llvm/IR/Dominators.h" 73 #include "llvm/IR/Function.h" 74 #include "llvm/IR/GetElementPtrTypeIterator.h" 75 #include "llvm/IR/IRBuilder.h" 76 #include "llvm/IR/InstrTypes.h" 77 #include "llvm/IR/Instruction.h" 78 #include "llvm/IR/Instructions.h" 79 #include "llvm/IR/IntrinsicInst.h" 80 #include "llvm/IR/Intrinsics.h" 81 #include "llvm/IR/LegacyPassManager.h" 82 #include "llvm/IR/Metadata.h" 83 #include "llvm/IR/Operator.h" 84 #include "llvm/IR/PassManager.h" 85 #include "llvm/IR/PatternMatch.h" 86 #include "llvm/IR/Type.h" 87 #include "llvm/IR/Use.h" 88 #include "llvm/IR/User.h" 89 #include "llvm/IR/Value.h" 90 #include "llvm/IR/ValueHandle.h" 91 #include "llvm/InitializePasses.h" 92 #include "llvm/Pass.h" 93 #include "llvm/Support/CBindingWrapping.h" 94 #include "llvm/Support/Casting.h" 95 #include "llvm/Support/CommandLine.h" 96 #include "llvm/Support/Compiler.h" 97 #include "llvm/Support/Debug.h" 98 #include "llvm/Support/DebugCounter.h" 99 #include "llvm/Support/ErrorHandling.h" 100 #include "llvm/Support/KnownBits.h" 101 #include "llvm/Support/raw_ostream.h" 102 #include "llvm/Transforms/InstCombine/InstCombine.h" 103 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 104 #include "llvm/Transforms/Utils/Local.h" 105 #include <algorithm> 106 #include <cassert> 107 #include <cstdint> 108 #include <memory> 109 #include <string> 110 #include <utility> 111 112 using namespace llvm; 113 using namespace llvm::PatternMatch; 114 115 #define DEBUG_TYPE "instcombine" 116 117 STATISTIC(NumWorklistIterations, 118 "Number of instruction combining iterations performed"); 119 120 STATISTIC(NumCombined , "Number of insts combined"); 121 STATISTIC(NumConstProp, "Number of constant folds"); 122 STATISTIC(NumDeadInst , "Number of dead inst eliminated"); 123 STATISTIC(NumSunkInst , "Number of instructions sunk"); 124 STATISTIC(NumExpand, "Number of expansions"); 125 STATISTIC(NumFactor , "Number of factorizations"); 126 STATISTIC(NumReassoc , "Number of reassociations"); 127 DEBUG_COUNTER(VisitCounter, "instcombine-visit", 128 "Controls which instructions are visited"); 129 130 // FIXME: these limits eventually should be as low as 2. 131 static constexpr unsigned InstCombineDefaultMaxIterations = 1000; 132 #ifndef NDEBUG 133 static constexpr unsigned InstCombineDefaultInfiniteLoopThreshold = 100; 134 #else 135 static constexpr unsigned InstCombineDefaultInfiniteLoopThreshold = 1000; 136 #endif 137 138 static cl::opt<bool> 139 EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), 140 cl::init(true)); 141 142 static cl::opt<unsigned> LimitMaxIterations( 143 "instcombine-max-iterations", 144 cl::desc("Limit the maximum number of instruction combining iterations"), 145 cl::init(InstCombineDefaultMaxIterations)); 146 147 static cl::opt<unsigned> InfiniteLoopDetectionThreshold( 148 "instcombine-infinite-loop-threshold", 149 cl::desc("Number of instruction combining iterations considered an " 150 "infinite loop"), 151 cl::init(InstCombineDefaultInfiniteLoopThreshold), cl::Hidden); 152 153 static cl::opt<unsigned> 154 MaxArraySize("instcombine-maxarray-size", cl::init(1024), 155 cl::desc("Maximum array size considered when doing a combine")); 156 157 // FIXME: Remove this flag when it is no longer necessary to convert 158 // llvm.dbg.declare to avoid inaccurate debug info. Setting this to false 159 // increases variable availability at the cost of accuracy. Variables that 160 // cannot be promoted by mem2reg or SROA will be described as living in memory 161 // for their entire lifetime. However, passes like DSE and instcombine can 162 // delete stores to the alloca, leading to misleading and inaccurate debug 163 // information. This flag can be removed when those passes are fixed. 164 static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", 165 cl::Hidden, cl::init(true)); 166 167 Optional<Instruction *> 168 InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) { 169 // Handle target specific intrinsics 170 if (II.getCalledFunction()->isTargetIntrinsic()) { 171 return TTI.instCombineIntrinsic(*this, II); 172 } 173 return None; 174 } 175 176 Optional<Value *> InstCombiner::targetSimplifyDemandedUseBitsIntrinsic( 177 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, 178 bool &KnownBitsComputed) { 179 // Handle target specific intrinsics 180 if (II.getCalledFunction()->isTargetIntrinsic()) { 181 return TTI.simplifyDemandedUseBitsIntrinsic(*this, II, DemandedMask, Known, 182 KnownBitsComputed); 183 } 184 return None; 185 } 186 187 Optional<Value *> InstCombiner::targetSimplifyDemandedVectorEltsIntrinsic( 188 IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, 189 APInt &UndefElts3, 190 std::function<void(Instruction *, unsigned, APInt, APInt &)> 191 SimplifyAndSetOp) { 192 // Handle target specific intrinsics 193 if (II.getCalledFunction()->isTargetIntrinsic()) { 194 return TTI.simplifyDemandedVectorEltsIntrinsic( 195 *this, II, DemandedElts, UndefElts, UndefElts2, UndefElts3, 196 SimplifyAndSetOp); 197 } 198 return None; 199 } 200 201 Value *InstCombinerImpl::EmitGEPOffset(User *GEP) { 202 return llvm::EmitGEPOffset(&Builder, DL, GEP); 203 } 204 205 /// Return true if it is desirable to convert an integer computation from a 206 /// given bit width to a new bit width. 207 /// We don't want to convert from a legal to an illegal type or from a smaller 208 /// to a larger illegal type. A width of '1' is always treated as a legal type 209 /// because i1 is a fundamental type in IR, and there are many specialized 210 /// optimizations for i1 types. Widths of 8, 16 or 32 are equally treated as 211 /// legal to convert to, in order to open up more combining opportunities. 212 /// NOTE: this treats i8, i16 and i32 specially, due to them being so common 213 /// from frontend languages. 214 bool InstCombinerImpl::shouldChangeType(unsigned FromWidth, 215 unsigned ToWidth) const { 216 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth); 217 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth); 218 219 // Convert to widths of 8, 16 or 32 even if they are not legal types. Only 220 // shrink types, to prevent infinite loops. 221 if (ToWidth < FromWidth && (ToWidth == 8 || ToWidth == 16 || ToWidth == 32)) 222 return true; 223 224 // If this is a legal integer from type, and the result would be an illegal 225 // type, don't do the transformation. 226 if (FromLegal && !ToLegal) 227 return false; 228 229 // Otherwise, if both are illegal, do not increase the size of the result. We 230 // do allow things like i160 -> i64, but not i64 -> i160. 231 if (!FromLegal && !ToLegal && ToWidth > FromWidth) 232 return false; 233 234 return true; 235 } 236 237 /// Return true if it is desirable to convert a computation from 'From' to 'To'. 238 /// We don't want to convert from a legal to an illegal type or from a smaller 239 /// to a larger illegal type. i1 is always treated as a legal type because it is 240 /// a fundamental type in IR, and there are many specialized optimizations for 241 /// i1 types. 242 bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const { 243 // TODO: This could be extended to allow vectors. Datalayout changes might be 244 // needed to properly support that. 245 if (!From->isIntegerTy() || !To->isIntegerTy()) 246 return false; 247 248 unsigned FromWidth = From->getPrimitiveSizeInBits(); 249 unsigned ToWidth = To->getPrimitiveSizeInBits(); 250 return shouldChangeType(FromWidth, ToWidth); 251 } 252 253 // Return true, if No Signed Wrap should be maintained for I. 254 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C", 255 // where both B and C should be ConstantInts, results in a constant that does 256 // not overflow. This function only handles the Add and Sub opcodes. For 257 // all other opcodes, the function conservatively returns false. 258 static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) { 259 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 260 if (!OBO || !OBO->hasNoSignedWrap()) 261 return false; 262 263 // We reason about Add and Sub Only. 264 Instruction::BinaryOps Opcode = I.getOpcode(); 265 if (Opcode != Instruction::Add && Opcode != Instruction::Sub) 266 return false; 267 268 const APInt *BVal, *CVal; 269 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal))) 270 return false; 271 272 bool Overflow = false; 273 if (Opcode == Instruction::Add) 274 (void)BVal->sadd_ov(*CVal, Overflow); 275 else 276 (void)BVal->ssub_ov(*CVal, Overflow); 277 278 return !Overflow; 279 } 280 281 static bool hasNoUnsignedWrap(BinaryOperator &I) { 282 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 283 return OBO && OBO->hasNoUnsignedWrap(); 284 } 285 286 static bool hasNoSignedWrap(BinaryOperator &I) { 287 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 288 return OBO && OBO->hasNoSignedWrap(); 289 } 290 291 /// Conservatively clears subclassOptionalData after a reassociation or 292 /// commutation. We preserve fast-math flags when applicable as they can be 293 /// preserved. 294 static void ClearSubclassDataAfterReassociation(BinaryOperator &I) { 295 FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I); 296 if (!FPMO) { 297 I.clearSubclassOptionalData(); 298 return; 299 } 300 301 FastMathFlags FMF = I.getFastMathFlags(); 302 I.clearSubclassOptionalData(); 303 I.setFastMathFlags(FMF); 304 } 305 306 /// Combine constant operands of associative operations either before or after a 307 /// cast to eliminate one of the associative operations: 308 /// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2))) 309 /// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2)) 310 static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, 311 InstCombinerImpl &IC) { 312 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0)); 313 if (!Cast || !Cast->hasOneUse()) 314 return false; 315 316 // TODO: Enhance logic for other casts and remove this check. 317 auto CastOpcode = Cast->getOpcode(); 318 if (CastOpcode != Instruction::ZExt) 319 return false; 320 321 // TODO: Enhance logic for other BinOps and remove this check. 322 if (!BinOp1->isBitwiseLogicOp()) 323 return false; 324 325 auto AssocOpcode = BinOp1->getOpcode(); 326 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0)); 327 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode) 328 return false; 329 330 Constant *C1, *C2; 331 if (!match(BinOp1->getOperand(1), m_Constant(C1)) || 332 !match(BinOp2->getOperand(1), m_Constant(C2))) 333 return false; 334 335 // TODO: This assumes a zext cast. 336 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2 337 // to the destination type might lose bits. 338 339 // Fold the constants together in the destination type: 340 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC) 341 Type *DestTy = C1->getType(); 342 Constant *CastC2 = ConstantExpr::getCast(CastOpcode, C2, DestTy); 343 Constant *FoldedC = ConstantExpr::get(AssocOpcode, C1, CastC2); 344 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0)); 345 IC.replaceOperand(*BinOp1, 1, FoldedC); 346 return true; 347 } 348 349 /// This performs a few simplifications for operators that are associative or 350 /// commutative: 351 /// 352 /// Commutative operators: 353 /// 354 /// 1. Order operands such that they are listed from right (least complex) to 355 /// left (most complex). This puts constants before unary operators before 356 /// binary operators. 357 /// 358 /// Associative operators: 359 /// 360 /// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 361 /// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 362 /// 363 /// Associative and commutative operators: 364 /// 365 /// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 366 /// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 367 /// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 368 /// if C1 and C2 are constants. 369 bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) { 370 Instruction::BinaryOps Opcode = I.getOpcode(); 371 bool Changed = false; 372 373 do { 374 // Order operands such that they are listed from right (least complex) to 375 // left (most complex). This puts constants before unary operators before 376 // binary operators. 377 if (I.isCommutative() && getComplexity(I.getOperand(0)) < 378 getComplexity(I.getOperand(1))) 379 Changed = !I.swapOperands(); 380 381 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0)); 382 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)); 383 384 if (I.isAssociative()) { 385 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 386 if (Op0 && Op0->getOpcode() == Opcode) { 387 Value *A = Op0->getOperand(0); 388 Value *B = Op0->getOperand(1); 389 Value *C = I.getOperand(1); 390 391 // Does "B op C" simplify? 392 if (Value *V = SimplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) { 393 // It simplifies to V. Form "A op V". 394 replaceOperand(I, 0, A); 395 replaceOperand(I, 1, V); 396 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0); 397 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0); 398 399 // Conservatively clear all optional flags since they may not be 400 // preserved by the reassociation. Reset nsw/nuw based on the above 401 // analysis. 402 ClearSubclassDataAfterReassociation(I); 403 404 // Note: this is only valid because SimplifyBinOp doesn't look at 405 // the operands to Op0. 406 if (IsNUW) 407 I.setHasNoUnsignedWrap(true); 408 409 if (IsNSW) 410 I.setHasNoSignedWrap(true); 411 412 Changed = true; 413 ++NumReassoc; 414 continue; 415 } 416 } 417 418 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 419 if (Op1 && Op1->getOpcode() == Opcode) { 420 Value *A = I.getOperand(0); 421 Value *B = Op1->getOperand(0); 422 Value *C = Op1->getOperand(1); 423 424 // Does "A op B" simplify? 425 if (Value *V = SimplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) { 426 // It simplifies to V. Form "V op C". 427 replaceOperand(I, 0, V); 428 replaceOperand(I, 1, C); 429 // Conservatively clear the optional flags, since they may not be 430 // preserved by the reassociation. 431 ClearSubclassDataAfterReassociation(I); 432 Changed = true; 433 ++NumReassoc; 434 continue; 435 } 436 } 437 } 438 439 if (I.isAssociative() && I.isCommutative()) { 440 if (simplifyAssocCastAssoc(&I, *this)) { 441 Changed = true; 442 ++NumReassoc; 443 continue; 444 } 445 446 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 447 if (Op0 && Op0->getOpcode() == Opcode) { 448 Value *A = Op0->getOperand(0); 449 Value *B = Op0->getOperand(1); 450 Value *C = I.getOperand(1); 451 452 // Does "C op A" simplify? 453 if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) { 454 // It simplifies to V. Form "V op B". 455 replaceOperand(I, 0, V); 456 replaceOperand(I, 1, B); 457 // Conservatively clear the optional flags, since they may not be 458 // preserved by the reassociation. 459 ClearSubclassDataAfterReassociation(I); 460 Changed = true; 461 ++NumReassoc; 462 continue; 463 } 464 } 465 466 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 467 if (Op1 && Op1->getOpcode() == Opcode) { 468 Value *A = I.getOperand(0); 469 Value *B = Op1->getOperand(0); 470 Value *C = Op1->getOperand(1); 471 472 // Does "C op A" simplify? 473 if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) { 474 // It simplifies to V. Form "B op V". 475 replaceOperand(I, 0, B); 476 replaceOperand(I, 1, V); 477 // Conservatively clear the optional flags, since they may not be 478 // preserved by the reassociation. 479 ClearSubclassDataAfterReassociation(I); 480 Changed = true; 481 ++NumReassoc; 482 continue; 483 } 484 } 485 486 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 487 // if C1 and C2 are constants. 488 Value *A, *B; 489 Constant *C1, *C2; 490 if (Op0 && Op1 && 491 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode && 492 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) && 493 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2))))) { 494 bool IsNUW = hasNoUnsignedWrap(I) && 495 hasNoUnsignedWrap(*Op0) && 496 hasNoUnsignedWrap(*Op1); 497 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ? 498 BinaryOperator::CreateNUW(Opcode, A, B) : 499 BinaryOperator::Create(Opcode, A, B); 500 501 if (isa<FPMathOperator>(NewBO)) { 502 FastMathFlags Flags = I.getFastMathFlags(); 503 Flags &= Op0->getFastMathFlags(); 504 Flags &= Op1->getFastMathFlags(); 505 NewBO->setFastMathFlags(Flags); 506 } 507 InsertNewInstWith(NewBO, I); 508 NewBO->takeName(Op1); 509 replaceOperand(I, 0, NewBO); 510 replaceOperand(I, 1, ConstantExpr::get(Opcode, C1, C2)); 511 // Conservatively clear the optional flags, since they may not be 512 // preserved by the reassociation. 513 ClearSubclassDataAfterReassociation(I); 514 if (IsNUW) 515 I.setHasNoUnsignedWrap(true); 516 517 Changed = true; 518 continue; 519 } 520 } 521 522 // No further simplifications. 523 return Changed; 524 } while (true); 525 } 526 527 /// Return whether "X LOp (Y ROp Z)" is always equal to 528 /// "(X LOp Y) ROp (X LOp Z)". 529 static bool leftDistributesOverRight(Instruction::BinaryOps LOp, 530 Instruction::BinaryOps ROp) { 531 // X & (Y | Z) <--> (X & Y) | (X & Z) 532 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z) 533 if (LOp == Instruction::And) 534 return ROp == Instruction::Or || ROp == Instruction::Xor; 535 536 // X | (Y & Z) <--> (X | Y) & (X | Z) 537 if (LOp == Instruction::Or) 538 return ROp == Instruction::And; 539 540 // X * (Y + Z) <--> (X * Y) + (X * Z) 541 // X * (Y - Z) <--> (X * Y) - (X * Z) 542 if (LOp == Instruction::Mul) 543 return ROp == Instruction::Add || ROp == Instruction::Sub; 544 545 return false; 546 } 547 548 /// Return whether "(X LOp Y) ROp Z" is always equal to 549 /// "(X ROp Z) LOp (Y ROp Z)". 550 static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, 551 Instruction::BinaryOps ROp) { 552 if (Instruction::isCommutative(ROp)) 553 return leftDistributesOverRight(ROp, LOp); 554 555 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts. 556 return Instruction::isBitwiseLogicOp(LOp) && Instruction::isShift(ROp); 557 558 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z", 559 // but this requires knowing that the addition does not overflow and other 560 // such subtleties. 561 } 562 563 /// This function returns identity value for given opcode, which can be used to 564 /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1). 565 static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) { 566 if (isa<Constant>(V)) 567 return nullptr; 568 569 return ConstantExpr::getBinOpIdentity(Opcode, V->getType()); 570 } 571 572 /// This function predicates factorization using distributive laws. By default, 573 /// it just returns the 'Op' inputs. But for special-cases like 574 /// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add 575 /// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to 576 /// allow more factorization opportunities. 577 static Instruction::BinaryOps 578 getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, 579 Value *&LHS, Value *&RHS) { 580 assert(Op && "Expected a binary operator"); 581 LHS = Op->getOperand(0); 582 RHS = Op->getOperand(1); 583 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) { 584 Constant *C; 585 if (match(Op, m_Shl(m_Value(), m_Constant(C)))) { 586 // X << C --> X * (1 << C) 587 RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), C); 588 return Instruction::Mul; 589 } 590 // TODO: We can add other conversions e.g. shr => div etc. 591 } 592 return Op->getOpcode(); 593 } 594 595 /// This tries to simplify binary operations by factorizing out common terms 596 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)"). 597 Value *InstCombinerImpl::tryFactorization(BinaryOperator &I, 598 Instruction::BinaryOps InnerOpcode, 599 Value *A, Value *B, Value *C, 600 Value *D) { 601 assert(A && B && C && D && "All values must be provided"); 602 603 Value *V = nullptr; 604 Value *SimplifiedInst = nullptr; 605 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 606 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); 607 608 // Does "X op' Y" always equal "Y op' X"? 609 bool InnerCommutative = Instruction::isCommutative(InnerOpcode); 610 611 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"? 612 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) 613 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the 614 // commutative case, "(A op' B) op (C op' A)"? 615 if (A == C || (InnerCommutative && A == D)) { 616 if (A != C) 617 std::swap(C, D); 618 // Consider forming "A op' (B op D)". 619 // If "B op D" simplifies then it can be formed with no cost. 620 V = SimplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I)); 621 // If "B op D" doesn't simplify then only go on if both of the existing 622 // operations "A op' B" and "C op' D" will be zapped as no longer used. 623 if (!V && LHS->hasOneUse() && RHS->hasOneUse()) 624 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName()); 625 if (V) { 626 SimplifiedInst = Builder.CreateBinOp(InnerOpcode, A, V); 627 } 628 } 629 630 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"? 631 if (!SimplifiedInst && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) 632 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the 633 // commutative case, "(A op' B) op (B op' D)"? 634 if (B == D || (InnerCommutative && B == C)) { 635 if (B != D) 636 std::swap(C, D); 637 // Consider forming "(A op C) op' B". 638 // If "A op C" simplifies then it can be formed with no cost. 639 V = SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I)); 640 641 // If "A op C" doesn't simplify then only go on if both of the existing 642 // operations "A op' B" and "C op' D" will be zapped as no longer used. 643 if (!V && LHS->hasOneUse() && RHS->hasOneUse()) 644 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName()); 645 if (V) { 646 SimplifiedInst = Builder.CreateBinOp(InnerOpcode, V, B); 647 } 648 } 649 650 if (SimplifiedInst) { 651 ++NumFactor; 652 SimplifiedInst->takeName(&I); 653 654 // Check if we can add NSW/NUW flags to SimplifiedInst. If so, set them. 655 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) { 656 if (isa<OverflowingBinaryOperator>(SimplifiedInst)) { 657 bool HasNSW = false; 658 bool HasNUW = false; 659 if (isa<OverflowingBinaryOperator>(&I)) { 660 HasNSW = I.hasNoSignedWrap(); 661 HasNUW = I.hasNoUnsignedWrap(); 662 } 663 664 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) { 665 HasNSW &= LOBO->hasNoSignedWrap(); 666 HasNUW &= LOBO->hasNoUnsignedWrap(); 667 } 668 669 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) { 670 HasNSW &= ROBO->hasNoSignedWrap(); 671 HasNUW &= ROBO->hasNoUnsignedWrap(); 672 } 673 674 if (TopLevelOpcode == Instruction::Add && 675 InnerOpcode == Instruction::Mul) { 676 // We can propagate 'nsw' if we know that 677 // %Y = mul nsw i16 %X, C 678 // %Z = add nsw i16 %Y, %X 679 // => 680 // %Z = mul nsw i16 %X, C+1 681 // 682 // iff C+1 isn't INT_MIN 683 const APInt *CInt; 684 if (match(V, m_APInt(CInt))) { 685 if (!CInt->isMinSignedValue()) 686 BO->setHasNoSignedWrap(HasNSW); 687 } 688 689 // nuw can be propagated with any constant or nuw value. 690 BO->setHasNoUnsignedWrap(HasNUW); 691 } 692 } 693 } 694 } 695 return SimplifiedInst; 696 } 697 698 /// This tries to simplify binary operations which some other binary operation 699 /// distributes over either by factorizing out common terms 700 /// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in 701 /// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win). 702 /// Returns the simplified value, or null if it didn't simplify. 703 Value *InstCombinerImpl::SimplifyUsingDistributiveLaws(BinaryOperator &I) { 704 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 705 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 706 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 707 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); 708 709 { 710 // Factorization. 711 Value *A, *B, *C, *D; 712 Instruction::BinaryOps LHSOpcode, RHSOpcode; 713 if (Op0) 714 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B); 715 if (Op1) 716 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D); 717 718 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize 719 // a common term. 720 if (Op0 && Op1 && LHSOpcode == RHSOpcode) 721 if (Value *V = tryFactorization(I, LHSOpcode, A, B, C, D)) 722 return V; 723 724 // The instruction has the form "(A op' B) op (C)". Try to factorize common 725 // term. 726 if (Op0) 727 if (Value *Ident = getIdentityValue(LHSOpcode, RHS)) 728 if (Value *V = tryFactorization(I, LHSOpcode, A, B, RHS, Ident)) 729 return V; 730 731 // The instruction has the form "(B) op (C op' D)". Try to factorize common 732 // term. 733 if (Op1) 734 if (Value *Ident = getIdentityValue(RHSOpcode, LHS)) 735 if (Value *V = tryFactorization(I, RHSOpcode, LHS, Ident, C, D)) 736 return V; 737 } 738 739 // Expansion. 740 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) { 741 // The instruction has the form "(A op' B) op C". See if expanding it out 742 // to "(A op C) op' (B op C)" results in simplifications. 743 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 744 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 745 746 // Disable the use of undef because it's not safe to distribute undef. 747 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef(); 748 Value *L = SimplifyBinOp(TopLevelOpcode, A, C, SQDistributive); 749 Value *R = SimplifyBinOp(TopLevelOpcode, B, C, SQDistributive); 750 751 // Do "A op C" and "B op C" both simplify? 752 if (L && R) { 753 // They do! Return "L op' R". 754 ++NumExpand; 755 C = Builder.CreateBinOp(InnerOpcode, L, R); 756 C->takeName(&I); 757 return C; 758 } 759 760 // Does "A op C" simplify to the identity value for the inner opcode? 761 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) { 762 // They do! Return "B op C". 763 ++NumExpand; 764 C = Builder.CreateBinOp(TopLevelOpcode, B, C); 765 C->takeName(&I); 766 return C; 767 } 768 769 // Does "B op C" simplify to the identity value for the inner opcode? 770 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) { 771 // They do! Return "A op C". 772 ++NumExpand; 773 C = Builder.CreateBinOp(TopLevelOpcode, A, C); 774 C->takeName(&I); 775 return C; 776 } 777 } 778 779 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) { 780 // The instruction has the form "A op (B op' C)". See if expanding it out 781 // to "(A op B) op' (A op C)" results in simplifications. 782 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 783 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op' 784 785 // Disable the use of undef because it's not safe to distribute undef. 786 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef(); 787 Value *L = SimplifyBinOp(TopLevelOpcode, A, B, SQDistributive); 788 Value *R = SimplifyBinOp(TopLevelOpcode, A, C, SQDistributive); 789 790 // Do "A op B" and "A op C" both simplify? 791 if (L && R) { 792 // They do! Return "L op' R". 793 ++NumExpand; 794 A = Builder.CreateBinOp(InnerOpcode, L, R); 795 A->takeName(&I); 796 return A; 797 } 798 799 // Does "A op B" simplify to the identity value for the inner opcode? 800 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) { 801 // They do! Return "A op C". 802 ++NumExpand; 803 A = Builder.CreateBinOp(TopLevelOpcode, A, C); 804 A->takeName(&I); 805 return A; 806 } 807 808 // Does "A op C" simplify to the identity value for the inner opcode? 809 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) { 810 // They do! Return "A op B". 811 ++NumExpand; 812 A = Builder.CreateBinOp(TopLevelOpcode, A, B); 813 A->takeName(&I); 814 return A; 815 } 816 } 817 818 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS); 819 } 820 821 Value *InstCombinerImpl::SimplifySelectsFeedingBinaryOp(BinaryOperator &I, 822 Value *LHS, 823 Value *RHS) { 824 Value *A, *B, *C, *D, *E, *F; 825 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C))); 826 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F))); 827 if (!LHSIsSelect && !RHSIsSelect) 828 return nullptr; 829 830 FastMathFlags FMF; 831 BuilderTy::FastMathFlagGuard Guard(Builder); 832 if (isa<FPMathOperator>(&I)) { 833 FMF = I.getFastMathFlags(); 834 Builder.setFastMathFlags(FMF); 835 } 836 837 Instruction::BinaryOps Opcode = I.getOpcode(); 838 SimplifyQuery Q = SQ.getWithInstruction(&I); 839 840 Value *Cond, *True = nullptr, *False = nullptr; 841 if (LHSIsSelect && RHSIsSelect && A == D) { 842 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F) 843 Cond = A; 844 True = SimplifyBinOp(Opcode, B, E, FMF, Q); 845 False = SimplifyBinOp(Opcode, C, F, FMF, Q); 846 847 if (LHS->hasOneUse() && RHS->hasOneUse()) { 848 if (False && !True) 849 True = Builder.CreateBinOp(Opcode, B, E); 850 else if (True && !False) 851 False = Builder.CreateBinOp(Opcode, C, F); 852 } 853 } else if (LHSIsSelect && LHS->hasOneUse()) { 854 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y) 855 Cond = A; 856 True = SimplifyBinOp(Opcode, B, RHS, FMF, Q); 857 False = SimplifyBinOp(Opcode, C, RHS, FMF, Q); 858 } else if (RHSIsSelect && RHS->hasOneUse()) { 859 // X op (D ? E : F) -> D ? (X op E) : (X op F) 860 Cond = D; 861 True = SimplifyBinOp(Opcode, LHS, E, FMF, Q); 862 False = SimplifyBinOp(Opcode, LHS, F, FMF, Q); 863 } 864 865 if (!True || !False) 866 return nullptr; 867 868 Value *SI = Builder.CreateSelect(Cond, True, False); 869 SI->takeName(&I); 870 return SI; 871 } 872 873 /// Freely adapt every user of V as-if V was changed to !V. 874 /// WARNING: only if canFreelyInvertAllUsersOf() said this can be done. 875 void InstCombinerImpl::freelyInvertAllUsersOf(Value *I) { 876 for (User *U : I->users()) { 877 switch (cast<Instruction>(U)->getOpcode()) { 878 case Instruction::Select: { 879 auto *SI = cast<SelectInst>(U); 880 SI->swapValues(); 881 SI->swapProfMetadata(); 882 break; 883 } 884 case Instruction::Br: 885 cast<BranchInst>(U)->swapSuccessors(); // swaps prof metadata too 886 break; 887 case Instruction::Xor: 888 replaceInstUsesWith(cast<Instruction>(*U), I); 889 break; 890 default: 891 llvm_unreachable("Got unexpected user - out of sync with " 892 "canFreelyInvertAllUsersOf() ?"); 893 } 894 } 895 } 896 897 /// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a 898 /// constant zero (which is the 'negate' form). 899 Value *InstCombinerImpl::dyn_castNegVal(Value *V) const { 900 Value *NegV; 901 if (match(V, m_Neg(m_Value(NegV)))) 902 return NegV; 903 904 // Constants can be considered to be negated values if they can be folded. 905 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 906 return ConstantExpr::getNeg(C); 907 908 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V)) 909 if (C->getType()->getElementType()->isIntegerTy()) 910 return ConstantExpr::getNeg(C); 911 912 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) { 913 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 914 Constant *Elt = CV->getAggregateElement(i); 915 if (!Elt) 916 return nullptr; 917 918 if (isa<UndefValue>(Elt)) 919 continue; 920 921 if (!isa<ConstantInt>(Elt)) 922 return nullptr; 923 } 924 return ConstantExpr::getNeg(CV); 925 } 926 927 return nullptr; 928 } 929 930 static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO, 931 InstCombiner::BuilderTy &Builder) { 932 if (auto *Cast = dyn_cast<CastInst>(&I)) 933 return Builder.CreateCast(Cast->getOpcode(), SO, I.getType()); 934 935 assert(I.isBinaryOp() && "Unexpected opcode for select folding"); 936 937 // Figure out if the constant is the left or the right argument. 938 bool ConstIsRHS = isa<Constant>(I.getOperand(1)); 939 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS)); 940 941 if (auto *SOC = dyn_cast<Constant>(SO)) { 942 if (ConstIsRHS) 943 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand); 944 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC); 945 } 946 947 Value *Op0 = SO, *Op1 = ConstOperand; 948 if (!ConstIsRHS) 949 std::swap(Op0, Op1); 950 951 auto *BO = cast<BinaryOperator>(&I); 952 Value *RI = Builder.CreateBinOp(BO->getOpcode(), Op0, Op1, 953 SO->getName() + ".op"); 954 auto *FPInst = dyn_cast<Instruction>(RI); 955 if (FPInst && isa<FPMathOperator>(FPInst)) 956 FPInst->copyFastMathFlags(BO); 957 return RI; 958 } 959 960 Instruction *InstCombinerImpl::FoldOpIntoSelect(Instruction &Op, 961 SelectInst *SI) { 962 // Don't modify shared select instructions. 963 if (!SI->hasOneUse()) 964 return nullptr; 965 966 Value *TV = SI->getTrueValue(); 967 Value *FV = SI->getFalseValue(); 968 if (!(isa<Constant>(TV) || isa<Constant>(FV))) 969 return nullptr; 970 971 // Bool selects with constant operands can be folded to logical ops. 972 if (SI->getType()->isIntOrIntVectorTy(1)) 973 return nullptr; 974 975 // If it's a bitcast involving vectors, make sure it has the same number of 976 // elements on both sides. 977 if (auto *BC = dyn_cast<BitCastInst>(&Op)) { 978 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy()); 979 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy()); 980 981 // Verify that either both or neither are vectors. 982 if ((SrcTy == nullptr) != (DestTy == nullptr)) 983 return nullptr; 984 985 // If vectors, verify that they have the same number of elements. 986 if (SrcTy && SrcTy->getElementCount() != DestTy->getElementCount()) 987 return nullptr; 988 } 989 990 // Test if a CmpInst instruction is used exclusively by a select as 991 // part of a minimum or maximum operation. If so, refrain from doing 992 // any other folding. This helps out other analyses which understand 993 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 994 // and CodeGen. And in this case, at least one of the comparison 995 // operands has at least one user besides the compare (the select), 996 // which would often largely negate the benefit of folding anyway. 997 if (auto *CI = dyn_cast<CmpInst>(SI->getCondition())) { 998 if (CI->hasOneUse()) { 999 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1); 1000 1001 // FIXME: This is a hack to avoid infinite looping with min/max patterns. 1002 // We have to ensure that vector constants that only differ with 1003 // undef elements are treated as equivalent. 1004 auto areLooselyEqual = [](Value *A, Value *B) { 1005 if (A == B) 1006 return true; 1007 1008 // Test for vector constants. 1009 Constant *ConstA, *ConstB; 1010 if (!match(A, m_Constant(ConstA)) || !match(B, m_Constant(ConstB))) 1011 return false; 1012 1013 // TODO: Deal with FP constants? 1014 if (!A->getType()->isIntOrIntVectorTy() || A->getType() != B->getType()) 1015 return false; 1016 1017 // Compare for equality including undefs as equal. 1018 auto *Cmp = ConstantExpr::getCompare(ICmpInst::ICMP_EQ, ConstA, ConstB); 1019 const APInt *C; 1020 return match(Cmp, m_APIntAllowUndef(C)) && C->isOneValue(); 1021 }; 1022 1023 if ((areLooselyEqual(TV, Op0) && areLooselyEqual(FV, Op1)) || 1024 (areLooselyEqual(FV, Op0) && areLooselyEqual(TV, Op1))) 1025 return nullptr; 1026 } 1027 } 1028 1029 Value *NewTV = foldOperationIntoSelectOperand(Op, TV, Builder); 1030 Value *NewFV = foldOperationIntoSelectOperand(Op, FV, Builder); 1031 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI); 1032 } 1033 1034 static Value *foldOperationIntoPhiValue(BinaryOperator *I, Value *InV, 1035 InstCombiner::BuilderTy &Builder) { 1036 bool ConstIsRHS = isa<Constant>(I->getOperand(1)); 1037 Constant *C = cast<Constant>(I->getOperand(ConstIsRHS)); 1038 1039 if (auto *InC = dyn_cast<Constant>(InV)) { 1040 if (ConstIsRHS) 1041 return ConstantExpr::get(I->getOpcode(), InC, C); 1042 return ConstantExpr::get(I->getOpcode(), C, InC); 1043 } 1044 1045 Value *Op0 = InV, *Op1 = C; 1046 if (!ConstIsRHS) 1047 std::swap(Op0, Op1); 1048 1049 Value *RI = Builder.CreateBinOp(I->getOpcode(), Op0, Op1, "phi.bo"); 1050 auto *FPInst = dyn_cast<Instruction>(RI); 1051 if (FPInst && isa<FPMathOperator>(FPInst)) 1052 FPInst->copyFastMathFlags(I); 1053 return RI; 1054 } 1055 1056 Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN) { 1057 unsigned NumPHIValues = PN->getNumIncomingValues(); 1058 if (NumPHIValues == 0) 1059 return nullptr; 1060 1061 // We normally only transform phis with a single use. However, if a PHI has 1062 // multiple uses and they are all the same operation, we can fold *all* of the 1063 // uses into the PHI. 1064 if (!PN->hasOneUse()) { 1065 // Walk the use list for the instruction, comparing them to I. 1066 for (User *U : PN->users()) { 1067 Instruction *UI = cast<Instruction>(U); 1068 if (UI != &I && !I.isIdenticalTo(UI)) 1069 return nullptr; 1070 } 1071 // Otherwise, we can replace *all* users with the new PHI we form. 1072 } 1073 1074 // Check to see if all of the operands of the PHI are simple constants 1075 // (constantint/constantfp/undef). If there is one non-constant value, 1076 // remember the BB it is in. If there is more than one or if *it* is a PHI, 1077 // bail out. We don't do arbitrary constant expressions here because moving 1078 // their computation can be expensive without a cost model. 1079 BasicBlock *NonConstBB = nullptr; 1080 for (unsigned i = 0; i != NumPHIValues; ++i) { 1081 Value *InVal = PN->getIncomingValue(i); 1082 // If I is a freeze instruction, count undef as a non-constant. 1083 if (match(InVal, m_ImmConstant()) && 1084 (!isa<FreezeInst>(I) || isGuaranteedNotToBeUndefOrPoison(InVal))) 1085 continue; 1086 1087 if (isa<PHINode>(InVal)) return nullptr; // Itself a phi. 1088 if (NonConstBB) return nullptr; // More than one non-const value. 1089 1090 NonConstBB = PN->getIncomingBlock(i); 1091 1092 // If the InVal is an invoke at the end of the pred block, then we can't 1093 // insert a computation after it without breaking the edge. 1094 if (isa<InvokeInst>(InVal)) 1095 if (cast<Instruction>(InVal)->getParent() == NonConstBB) 1096 return nullptr; 1097 1098 // If the incoming non-constant value is in I's block, we will remove one 1099 // instruction, but insert another equivalent one, leading to infinite 1100 // instcombine. 1101 if (isPotentiallyReachable(I.getParent(), NonConstBB, &DT, LI)) 1102 return nullptr; 1103 } 1104 1105 // If there is exactly one non-constant value, we can insert a copy of the 1106 // operation in that block. However, if this is a critical edge, we would be 1107 // inserting the computation on some other paths (e.g. inside a loop). Only 1108 // do this if the pred block is unconditionally branching into the phi block. 1109 // Also, make sure that the pred block is not dead code. 1110 if (NonConstBB != nullptr) { 1111 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator()); 1112 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(NonConstBB)) 1113 return nullptr; 1114 } 1115 1116 // Okay, we can do the transformation: create the new PHI node. 1117 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues()); 1118 InsertNewInstBefore(NewPN, *PN); 1119 NewPN->takeName(PN); 1120 1121 // If we are going to have to insert a new computation, do so right before the 1122 // predecessor's terminator. 1123 if (NonConstBB) 1124 Builder.SetInsertPoint(NonConstBB->getTerminator()); 1125 1126 // Next, add all of the operands to the PHI. 1127 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) { 1128 // We only currently try to fold the condition of a select when it is a phi, 1129 // not the true/false values. 1130 Value *TrueV = SI->getTrueValue(); 1131 Value *FalseV = SI->getFalseValue(); 1132 BasicBlock *PhiTransBB = PN->getParent(); 1133 for (unsigned i = 0; i != NumPHIValues; ++i) { 1134 BasicBlock *ThisBB = PN->getIncomingBlock(i); 1135 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB); 1136 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB); 1137 Value *InV = nullptr; 1138 // Beware of ConstantExpr: it may eventually evaluate to getNullValue, 1139 // even if currently isNullValue gives false. 1140 Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)); 1141 // For vector constants, we cannot use isNullValue to fold into 1142 // FalseVInPred versus TrueVInPred. When we have individual nonzero 1143 // elements in the vector, we will incorrectly fold InC to 1144 // `TrueVInPred`. 1145 if (InC && isa<ConstantInt>(InC)) 1146 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred; 1147 else { 1148 // Generate the select in the same block as PN's current incoming block. 1149 // Note: ThisBB need not be the NonConstBB because vector constants 1150 // which are constants by definition are handled here. 1151 // FIXME: This can lead to an increase in IR generation because we might 1152 // generate selects for vector constant phi operand, that could not be 1153 // folded to TrueVInPred or FalseVInPred as done for ConstantInt. For 1154 // non-vector phis, this transformation was always profitable because 1155 // the select would be generated exactly once in the NonConstBB. 1156 Builder.SetInsertPoint(ThisBB->getTerminator()); 1157 InV = Builder.CreateSelect(PN->getIncomingValue(i), TrueVInPred, 1158 FalseVInPred, "phi.sel"); 1159 } 1160 NewPN->addIncoming(InV, ThisBB); 1161 } 1162 } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) { 1163 Constant *C = cast<Constant>(I.getOperand(1)); 1164 for (unsigned i = 0; i != NumPHIValues; ++i) { 1165 Value *InV = nullptr; 1166 if (auto *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 1167 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); 1168 else 1169 InV = Builder.CreateCmp(CI->getPredicate(), PN->getIncomingValue(i), 1170 C, "phi.cmp"); 1171 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 1172 } 1173 } else if (auto *BO = dyn_cast<BinaryOperator>(&I)) { 1174 for (unsigned i = 0; i != NumPHIValues; ++i) { 1175 Value *InV = foldOperationIntoPhiValue(BO, PN->getIncomingValue(i), 1176 Builder); 1177 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 1178 } 1179 } else if (isa<FreezeInst>(&I)) { 1180 for (unsigned i = 0; i != NumPHIValues; ++i) { 1181 Value *InV; 1182 if (NonConstBB == PN->getIncomingBlock(i)) 1183 InV = Builder.CreateFreeze(PN->getIncomingValue(i), "phi.fr"); 1184 else 1185 InV = PN->getIncomingValue(i); 1186 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 1187 } 1188 } else { 1189 CastInst *CI = cast<CastInst>(&I); 1190 Type *RetTy = CI->getType(); 1191 for (unsigned i = 0; i != NumPHIValues; ++i) { 1192 Value *InV; 1193 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 1194 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy); 1195 else 1196 InV = Builder.CreateCast(CI->getOpcode(), PN->getIncomingValue(i), 1197 I.getType(), "phi.cast"); 1198 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 1199 } 1200 } 1201 1202 for (User *U : make_early_inc_range(PN->users())) { 1203 Instruction *User = cast<Instruction>(U); 1204 if (User == &I) continue; 1205 replaceInstUsesWith(*User, NewPN); 1206 eraseInstFromFunction(*User); 1207 } 1208 return replaceInstUsesWith(I, NewPN); 1209 } 1210 1211 Instruction *InstCombinerImpl::foldBinOpIntoSelectOrPhi(BinaryOperator &I) { 1212 if (!isa<Constant>(I.getOperand(1))) 1213 return nullptr; 1214 1215 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) { 1216 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel)) 1217 return NewSel; 1218 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) { 1219 if (Instruction *NewPhi = foldOpIntoPhi(I, PN)) 1220 return NewPhi; 1221 } 1222 return nullptr; 1223 } 1224 1225 /// Given a pointer type and a constant offset, determine whether or not there 1226 /// is a sequence of GEP indices into the pointed type that will land us at the 1227 /// specified offset. If so, fill them into NewIndices and return the resultant 1228 /// element type, otherwise return null. 1229 Type * 1230 InstCombinerImpl::FindElementAtOffset(PointerType *PtrTy, int64_t Offset, 1231 SmallVectorImpl<Value *> &NewIndices) { 1232 Type *Ty = PtrTy->getElementType(); 1233 if (!Ty->isSized()) 1234 return nullptr; 1235 1236 // Start with the index over the outer type. Note that the type size 1237 // might be zero (even if the offset isn't zero) if the indexed type 1238 // is something like [0 x {int, int}] 1239 Type *IndexTy = DL.getIndexType(PtrTy); 1240 int64_t FirstIdx = 0; 1241 if (int64_t TySize = DL.getTypeAllocSize(Ty)) { 1242 FirstIdx = Offset/TySize; 1243 Offset -= FirstIdx*TySize; 1244 1245 // Handle hosts where % returns negative instead of values [0..TySize). 1246 if (Offset < 0) { 1247 --FirstIdx; 1248 Offset += TySize; 1249 assert(Offset >= 0); 1250 } 1251 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset"); 1252 } 1253 1254 NewIndices.push_back(ConstantInt::get(IndexTy, FirstIdx)); 1255 1256 // Index into the types. If we fail, set OrigBase to null. 1257 while (Offset) { 1258 // Indexing into tail padding between struct/array elements. 1259 if (uint64_t(Offset * 8) >= DL.getTypeSizeInBits(Ty)) 1260 return nullptr; 1261 1262 if (StructType *STy = dyn_cast<StructType>(Ty)) { 1263 const StructLayout *SL = DL.getStructLayout(STy); 1264 assert(Offset < (int64_t)SL->getSizeInBytes() && 1265 "Offset must stay within the indexed type"); 1266 1267 unsigned Elt = SL->getElementContainingOffset(Offset); 1268 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1269 Elt)); 1270 1271 Offset -= SL->getElementOffset(Elt); 1272 Ty = STy->getElementType(Elt); 1273 } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 1274 uint64_t EltSize = DL.getTypeAllocSize(AT->getElementType()); 1275 assert(EltSize && "Cannot index into a zero-sized array"); 1276 NewIndices.push_back(ConstantInt::get(IndexTy,Offset/EltSize)); 1277 Offset %= EltSize; 1278 Ty = AT->getElementType(); 1279 } else { 1280 // Otherwise, we can't index into the middle of this atomic type, bail. 1281 return nullptr; 1282 } 1283 } 1284 1285 return Ty; 1286 } 1287 1288 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) { 1289 // If this GEP has only 0 indices, it is the same pointer as 1290 // Src. If Src is not a trivial GEP too, don't combine 1291 // the indices. 1292 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() && 1293 !Src.hasOneUse()) 1294 return false; 1295 return true; 1296 } 1297 1298 /// Return a value X such that Val = X * Scale, or null if none. 1299 /// If the multiplication is known not to overflow, then NoSignedWrap is set. 1300 Value *InstCombinerImpl::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) { 1301 assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!"); 1302 assert(cast<IntegerType>(Val->getType())->getBitWidth() == 1303 Scale.getBitWidth() && "Scale not compatible with value!"); 1304 1305 // If Val is zero or Scale is one then Val = Val * Scale. 1306 if (match(Val, m_Zero()) || Scale == 1) { 1307 NoSignedWrap = true; 1308 return Val; 1309 } 1310 1311 // If Scale is zero then it does not divide Val. 1312 if (Scale.isMinValue()) 1313 return nullptr; 1314 1315 // Look through chains of multiplications, searching for a constant that is 1316 // divisible by Scale. For example, descaling X*(Y*(Z*4)) by a factor of 4 1317 // will find the constant factor 4 and produce X*(Y*Z). Descaling X*(Y*8) by 1318 // a factor of 4 will produce X*(Y*2). The principle of operation is to bore 1319 // down from Val: 1320 // 1321 // Val = M1 * X || Analysis starts here and works down 1322 // M1 = M2 * Y || Doesn't descend into terms with more 1323 // M2 = Z * 4 \/ than one use 1324 // 1325 // Then to modify a term at the bottom: 1326 // 1327 // Val = M1 * X 1328 // M1 = Z * Y || Replaced M2 with Z 1329 // 1330 // Then to work back up correcting nsw flags. 1331 1332 // Op - the term we are currently analyzing. Starts at Val then drills down. 1333 // Replaced with its descaled value before exiting from the drill down loop. 1334 Value *Op = Val; 1335 1336 // Parent - initially null, but after drilling down notes where Op came from. 1337 // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the 1338 // 0'th operand of Val. 1339 std::pair<Instruction *, unsigned> Parent; 1340 1341 // Set if the transform requires a descaling at deeper levels that doesn't 1342 // overflow. 1343 bool RequireNoSignedWrap = false; 1344 1345 // Log base 2 of the scale. Negative if not a power of 2. 1346 int32_t logScale = Scale.exactLogBase2(); 1347 1348 for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down 1349 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 1350 // If Op is a constant divisible by Scale then descale to the quotient. 1351 APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth. 1352 APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder); 1353 if (!Remainder.isMinValue()) 1354 // Not divisible by Scale. 1355 return nullptr; 1356 // Replace with the quotient in the parent. 1357 Op = ConstantInt::get(CI->getType(), Quotient); 1358 NoSignedWrap = true; 1359 break; 1360 } 1361 1362 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) { 1363 if (BO->getOpcode() == Instruction::Mul) { 1364 // Multiplication. 1365 NoSignedWrap = BO->hasNoSignedWrap(); 1366 if (RequireNoSignedWrap && !NoSignedWrap) 1367 return nullptr; 1368 1369 // There are three cases for multiplication: multiplication by exactly 1370 // the scale, multiplication by a constant different to the scale, and 1371 // multiplication by something else. 1372 Value *LHS = BO->getOperand(0); 1373 Value *RHS = BO->getOperand(1); 1374 1375 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 1376 // Multiplication by a constant. 1377 if (CI->getValue() == Scale) { 1378 // Multiplication by exactly the scale, replace the multiplication 1379 // by its left-hand side in the parent. 1380 Op = LHS; 1381 break; 1382 } 1383 1384 // Otherwise drill down into the constant. 1385 if (!Op->hasOneUse()) 1386 return nullptr; 1387 1388 Parent = std::make_pair(BO, 1); 1389 continue; 1390 } 1391 1392 // Multiplication by something else. Drill down into the left-hand side 1393 // since that's where the reassociate pass puts the good stuff. 1394 if (!Op->hasOneUse()) 1395 return nullptr; 1396 1397 Parent = std::make_pair(BO, 0); 1398 continue; 1399 } 1400 1401 if (logScale > 0 && BO->getOpcode() == Instruction::Shl && 1402 isa<ConstantInt>(BO->getOperand(1))) { 1403 // Multiplication by a power of 2. 1404 NoSignedWrap = BO->hasNoSignedWrap(); 1405 if (RequireNoSignedWrap && !NoSignedWrap) 1406 return nullptr; 1407 1408 Value *LHS = BO->getOperand(0); 1409 int32_t Amt = cast<ConstantInt>(BO->getOperand(1))-> 1410 getLimitedValue(Scale.getBitWidth()); 1411 // Op = LHS << Amt. 1412 1413 if (Amt == logScale) { 1414 // Multiplication by exactly the scale, replace the multiplication 1415 // by its left-hand side in the parent. 1416 Op = LHS; 1417 break; 1418 } 1419 if (Amt < logScale || !Op->hasOneUse()) 1420 return nullptr; 1421 1422 // Multiplication by more than the scale. Reduce the multiplying amount 1423 // by the scale in the parent. 1424 Parent = std::make_pair(BO, 1); 1425 Op = ConstantInt::get(BO->getType(), Amt - logScale); 1426 break; 1427 } 1428 } 1429 1430 if (!Op->hasOneUse()) 1431 return nullptr; 1432 1433 if (CastInst *Cast = dyn_cast<CastInst>(Op)) { 1434 if (Cast->getOpcode() == Instruction::SExt) { 1435 // Op is sign-extended from a smaller type, descale in the smaller type. 1436 unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits(); 1437 APInt SmallScale = Scale.trunc(SmallSize); 1438 // Suppose Op = sext X, and we descale X as Y * SmallScale. We want to 1439 // descale Op as (sext Y) * Scale. In order to have 1440 // sext (Y * SmallScale) = (sext Y) * Scale 1441 // some conditions need to hold however: SmallScale must sign-extend to 1442 // Scale and the multiplication Y * SmallScale should not overflow. 1443 if (SmallScale.sext(Scale.getBitWidth()) != Scale) 1444 // SmallScale does not sign-extend to Scale. 1445 return nullptr; 1446 assert(SmallScale.exactLogBase2() == logScale); 1447 // Require that Y * SmallScale must not overflow. 1448 RequireNoSignedWrap = true; 1449 1450 // Drill down through the cast. 1451 Parent = std::make_pair(Cast, 0); 1452 Scale = SmallScale; 1453 continue; 1454 } 1455 1456 if (Cast->getOpcode() == Instruction::Trunc) { 1457 // Op is truncated from a larger type, descale in the larger type. 1458 // Suppose Op = trunc X, and we descale X as Y * sext Scale. Then 1459 // trunc (Y * sext Scale) = (trunc Y) * Scale 1460 // always holds. However (trunc Y) * Scale may overflow even if 1461 // trunc (Y * sext Scale) does not, so nsw flags need to be cleared 1462 // from this point up in the expression (see later). 1463 if (RequireNoSignedWrap) 1464 return nullptr; 1465 1466 // Drill down through the cast. 1467 unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits(); 1468 Parent = std::make_pair(Cast, 0); 1469 Scale = Scale.sext(LargeSize); 1470 if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits()) 1471 logScale = -1; 1472 assert(Scale.exactLogBase2() == logScale); 1473 continue; 1474 } 1475 } 1476 1477 // Unsupported expression, bail out. 1478 return nullptr; 1479 } 1480 1481 // If Op is zero then Val = Op * Scale. 1482 if (match(Op, m_Zero())) { 1483 NoSignedWrap = true; 1484 return Op; 1485 } 1486 1487 // We know that we can successfully descale, so from here on we can safely 1488 // modify the IR. Op holds the descaled version of the deepest term in the 1489 // expression. NoSignedWrap is 'true' if multiplying Op by Scale is known 1490 // not to overflow. 1491 1492 if (!Parent.first) 1493 // The expression only had one term. 1494 return Op; 1495 1496 // Rewrite the parent using the descaled version of its operand. 1497 assert(Parent.first->hasOneUse() && "Drilled down when more than one use!"); 1498 assert(Op != Parent.first->getOperand(Parent.second) && 1499 "Descaling was a no-op?"); 1500 replaceOperand(*Parent.first, Parent.second, Op); 1501 Worklist.push(Parent.first); 1502 1503 // Now work back up the expression correcting nsw flags. The logic is based 1504 // on the following observation: if X * Y is known not to overflow as a signed 1505 // multiplication, and Y is replaced by a value Z with smaller absolute value, 1506 // then X * Z will not overflow as a signed multiplication either. As we work 1507 // our way up, having NoSignedWrap 'true' means that the descaled value at the 1508 // current level has strictly smaller absolute value than the original. 1509 Instruction *Ancestor = Parent.first; 1510 do { 1511 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) { 1512 // If the multiplication wasn't nsw then we can't say anything about the 1513 // value of the descaled multiplication, and we have to clear nsw flags 1514 // from this point on up. 1515 bool OpNoSignedWrap = BO->hasNoSignedWrap(); 1516 NoSignedWrap &= OpNoSignedWrap; 1517 if (NoSignedWrap != OpNoSignedWrap) { 1518 BO->setHasNoSignedWrap(NoSignedWrap); 1519 Worklist.push(Ancestor); 1520 } 1521 } else if (Ancestor->getOpcode() == Instruction::Trunc) { 1522 // The fact that the descaled input to the trunc has smaller absolute 1523 // value than the original input doesn't tell us anything useful about 1524 // the absolute values of the truncations. 1525 NoSignedWrap = false; 1526 } 1527 assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) && 1528 "Failed to keep proper track of nsw flags while drilling down?"); 1529 1530 if (Ancestor == Val) 1531 // Got to the top, all done! 1532 return Val; 1533 1534 // Move up one level in the expression. 1535 assert(Ancestor->hasOneUse() && "Drilled down when more than one use!"); 1536 Ancestor = Ancestor->user_back(); 1537 } while (true); 1538 } 1539 1540 Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) { 1541 if (!isa<VectorType>(Inst.getType())) 1542 return nullptr; 1543 1544 BinaryOperator::BinaryOps Opcode = Inst.getOpcode(); 1545 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1); 1546 assert(cast<VectorType>(LHS->getType())->getElementCount() == 1547 cast<VectorType>(Inst.getType())->getElementCount()); 1548 assert(cast<VectorType>(RHS->getType())->getElementCount() == 1549 cast<VectorType>(Inst.getType())->getElementCount()); 1550 1551 // If both operands of the binop are vector concatenations, then perform the 1552 // narrow binop on each pair of the source operands followed by concatenation 1553 // of the results. 1554 Value *L0, *L1, *R0, *R1; 1555 ArrayRef<int> Mask; 1556 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) && 1557 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) && 1558 LHS->hasOneUse() && RHS->hasOneUse() && 1559 cast<ShuffleVectorInst>(LHS)->isConcat() && 1560 cast<ShuffleVectorInst>(RHS)->isConcat()) { 1561 // This transform does not have the speculative execution constraint as 1562 // below because the shuffle is a concatenation. The new binops are 1563 // operating on exactly the same elements as the existing binop. 1564 // TODO: We could ease the mask requirement to allow different undef lanes, 1565 // but that requires an analysis of the binop-with-undef output value. 1566 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0); 1567 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0)) 1568 BO->copyIRFlags(&Inst); 1569 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1); 1570 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1)) 1571 BO->copyIRFlags(&Inst); 1572 return new ShuffleVectorInst(NewBO0, NewBO1, Mask); 1573 } 1574 1575 // It may not be safe to reorder shuffles and things like div, urem, etc. 1576 // because we may trap when executing those ops on unknown vector elements. 1577 // See PR20059. 1578 if (!isSafeToSpeculativelyExecute(&Inst)) 1579 return nullptr; 1580 1581 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) { 1582 Value *XY = Builder.CreateBinOp(Opcode, X, Y); 1583 if (auto *BO = dyn_cast<BinaryOperator>(XY)) 1584 BO->copyIRFlags(&Inst); 1585 return new ShuffleVectorInst(XY, UndefValue::get(XY->getType()), M); 1586 }; 1587 1588 // If both arguments of the binary operation are shuffles that use the same 1589 // mask and shuffle within a single vector, move the shuffle after the binop. 1590 Value *V1, *V2; 1591 if (match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))) && 1592 match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(Mask))) && 1593 V1->getType() == V2->getType() && 1594 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) { 1595 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask) 1596 return createBinOpShuffle(V1, V2, Mask); 1597 } 1598 1599 // If both arguments of a commutative binop are select-shuffles that use the 1600 // same mask with commuted operands, the shuffles are unnecessary. 1601 if (Inst.isCommutative() && 1602 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) && 1603 match(RHS, 1604 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) { 1605 auto *LShuf = cast<ShuffleVectorInst>(LHS); 1606 auto *RShuf = cast<ShuffleVectorInst>(RHS); 1607 // TODO: Allow shuffles that contain undefs in the mask? 1608 // That is legal, but it reduces undef knowledge. 1609 // TODO: Allow arbitrary shuffles by shuffling after binop? 1610 // That might be legal, but we have to deal with poison. 1611 if (LShuf->isSelect() && 1612 !is_contained(LShuf->getShuffleMask(), UndefMaskElem) && 1613 RShuf->isSelect() && 1614 !is_contained(RShuf->getShuffleMask(), UndefMaskElem)) { 1615 // Example: 1616 // LHS = shuffle V1, V2, <0, 5, 6, 3> 1617 // RHS = shuffle V2, V1, <0, 5, 6, 3> 1618 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2 1619 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2); 1620 NewBO->copyIRFlags(&Inst); 1621 return NewBO; 1622 } 1623 } 1624 1625 // If one argument is a shuffle within one vector and the other is a constant, 1626 // try moving the shuffle after the binary operation. This canonicalization 1627 // intends to move shuffles closer to other shuffles and binops closer to 1628 // other binops, so they can be folded. It may also enable demanded elements 1629 // transforms. 1630 Constant *C; 1631 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType()); 1632 if (InstVTy && 1633 match(&Inst, 1634 m_c_BinOp(m_OneUse(m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))), 1635 m_ImmConstant(C))) && 1636 cast<FixedVectorType>(V1->getType())->getNumElements() <= 1637 InstVTy->getNumElements()) { 1638 assert(InstVTy->getScalarType() == V1->getType()->getScalarType() && 1639 "Shuffle should not change scalar type"); 1640 1641 // Find constant NewC that has property: 1642 // shuffle(NewC, ShMask) = C 1643 // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>) 1644 // reorder is not possible. A 1-to-1 mapping is not required. Example: 1645 // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef> 1646 bool ConstOp1 = isa<Constant>(RHS); 1647 ArrayRef<int> ShMask = Mask; 1648 unsigned SrcVecNumElts = 1649 cast<FixedVectorType>(V1->getType())->getNumElements(); 1650 UndefValue *UndefScalar = UndefValue::get(C->getType()->getScalarType()); 1651 SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, UndefScalar); 1652 bool MayChange = true; 1653 unsigned NumElts = InstVTy->getNumElements(); 1654 for (unsigned I = 0; I < NumElts; ++I) { 1655 Constant *CElt = C->getAggregateElement(I); 1656 if (ShMask[I] >= 0) { 1657 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle"); 1658 Constant *NewCElt = NewVecC[ShMask[I]]; 1659 // Bail out if: 1660 // 1. The constant vector contains a constant expression. 1661 // 2. The shuffle needs an element of the constant vector that can't 1662 // be mapped to a new constant vector. 1663 // 3. This is a widening shuffle that copies elements of V1 into the 1664 // extended elements (extending with undef is allowed). 1665 if (!CElt || (!isa<UndefValue>(NewCElt) && NewCElt != CElt) || 1666 I >= SrcVecNumElts) { 1667 MayChange = false; 1668 break; 1669 } 1670 NewVecC[ShMask[I]] = CElt; 1671 } 1672 // If this is a widening shuffle, we must be able to extend with undef 1673 // elements. If the original binop does not produce an undef in the high 1674 // lanes, then this transform is not safe. 1675 // Similarly for undef lanes due to the shuffle mask, we can only 1676 // transform binops that preserve undef. 1677 // TODO: We could shuffle those non-undef constant values into the 1678 // result by using a constant vector (rather than an undef vector) 1679 // as operand 1 of the new binop, but that might be too aggressive 1680 // for target-independent shuffle creation. 1681 if (I >= SrcVecNumElts || ShMask[I] < 0) { 1682 Constant *MaybeUndef = 1683 ConstOp1 ? ConstantExpr::get(Opcode, UndefScalar, CElt) 1684 : ConstantExpr::get(Opcode, CElt, UndefScalar); 1685 if (!isa<UndefValue>(MaybeUndef)) { 1686 MayChange = false; 1687 break; 1688 } 1689 } 1690 } 1691 if (MayChange) { 1692 Constant *NewC = ConstantVector::get(NewVecC); 1693 // It may not be safe to execute a binop on a vector with undef elements 1694 // because the entire instruction can be folded to undef or create poison 1695 // that did not exist in the original code. 1696 if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1)) 1697 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1); 1698 1699 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask) 1700 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask) 1701 Value *NewLHS = ConstOp1 ? V1 : NewC; 1702 Value *NewRHS = ConstOp1 ? NewC : V1; 1703 return createBinOpShuffle(NewLHS, NewRHS, Mask); 1704 } 1705 } 1706 1707 // Try to reassociate to sink a splat shuffle after a binary operation. 1708 if (Inst.isAssociative() && Inst.isCommutative()) { 1709 // Canonicalize shuffle operand as LHS. 1710 if (isa<ShuffleVectorInst>(RHS)) 1711 std::swap(LHS, RHS); 1712 1713 Value *X; 1714 ArrayRef<int> MaskC; 1715 int SplatIndex; 1716 BinaryOperator *BO; 1717 if (!match(LHS, 1718 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) || 1719 !match(MaskC, m_SplatOrUndefMask(SplatIndex)) || 1720 X->getType() != Inst.getType() || !match(RHS, m_OneUse(m_BinOp(BO))) || 1721 BO->getOpcode() != Opcode) 1722 return nullptr; 1723 1724 // FIXME: This may not be safe if the analysis allows undef elements. By 1725 // moving 'Y' before the splat shuffle, we are implicitly assuming 1726 // that it is not undef/poison at the splat index. 1727 Value *Y, *OtherOp; 1728 if (isSplatValue(BO->getOperand(0), SplatIndex)) { 1729 Y = BO->getOperand(0); 1730 OtherOp = BO->getOperand(1); 1731 } else if (isSplatValue(BO->getOperand(1), SplatIndex)) { 1732 Y = BO->getOperand(1); 1733 OtherOp = BO->getOperand(0); 1734 } else { 1735 return nullptr; 1736 } 1737 1738 // X and Y are splatted values, so perform the binary operation on those 1739 // values followed by a splat followed by the 2nd binary operation: 1740 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp 1741 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y); 1742 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex); 1743 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask); 1744 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp); 1745 1746 // Intersect FMF on both new binops. Other (poison-generating) flags are 1747 // dropped to be safe. 1748 if (isa<FPMathOperator>(R)) { 1749 R->copyFastMathFlags(&Inst); 1750 R->andIRFlags(BO); 1751 } 1752 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO)) 1753 NewInstBO->copyIRFlags(R); 1754 return R; 1755 } 1756 1757 return nullptr; 1758 } 1759 1760 /// Try to narrow the width of a binop if at least 1 operand is an extend of 1761 /// of a value. This requires a potentially expensive known bits check to make 1762 /// sure the narrow op does not overflow. 1763 Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) { 1764 // We need at least one extended operand. 1765 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1); 1766 1767 // If this is a sub, we swap the operands since we always want an extension 1768 // on the RHS. The LHS can be an extension or a constant. 1769 if (BO.getOpcode() == Instruction::Sub) 1770 std::swap(Op0, Op1); 1771 1772 Value *X; 1773 bool IsSext = match(Op0, m_SExt(m_Value(X))); 1774 if (!IsSext && !match(Op0, m_ZExt(m_Value(X)))) 1775 return nullptr; 1776 1777 // If both operands are the same extension from the same source type and we 1778 // can eliminate at least one (hasOneUse), this might work. 1779 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt; 1780 Value *Y; 1781 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() && 1782 cast<Operator>(Op1)->getOpcode() == CastOpc && 1783 (Op0->hasOneUse() || Op1->hasOneUse()))) { 1784 // If that did not match, see if we have a suitable constant operand. 1785 // Truncating and extending must produce the same constant. 1786 Constant *WideC; 1787 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC))) 1788 return nullptr; 1789 Constant *NarrowC = ConstantExpr::getTrunc(WideC, X->getType()); 1790 if (ConstantExpr::getCast(CastOpc, NarrowC, BO.getType()) != WideC) 1791 return nullptr; 1792 Y = NarrowC; 1793 } 1794 1795 // Swap back now that we found our operands. 1796 if (BO.getOpcode() == Instruction::Sub) 1797 std::swap(X, Y); 1798 1799 // Both operands have narrow versions. Last step: the math must not overflow 1800 // in the narrow width. 1801 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext)) 1802 return nullptr; 1803 1804 // bo (ext X), (ext Y) --> ext (bo X, Y) 1805 // bo (ext X), C --> ext (bo X, C') 1806 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow"); 1807 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) { 1808 if (IsSext) 1809 NewBinOp->setHasNoSignedWrap(); 1810 else 1811 NewBinOp->setHasNoUnsignedWrap(); 1812 } 1813 return CastInst::Create(CastOpc, NarrowBO, BO.getType()); 1814 } 1815 1816 static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2) { 1817 // At least one GEP must be inbounds. 1818 if (!GEP1.isInBounds() && !GEP2.isInBounds()) 1819 return false; 1820 1821 return (GEP1.isInBounds() || GEP1.hasAllZeroIndices()) && 1822 (GEP2.isInBounds() || GEP2.hasAllZeroIndices()); 1823 } 1824 1825 /// Thread a GEP operation with constant indices through the constant true/false 1826 /// arms of a select. 1827 static Instruction *foldSelectGEP(GetElementPtrInst &GEP, 1828 InstCombiner::BuilderTy &Builder) { 1829 if (!GEP.hasAllConstantIndices()) 1830 return nullptr; 1831 1832 Instruction *Sel; 1833 Value *Cond; 1834 Constant *TrueC, *FalseC; 1835 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) || 1836 !match(Sel, 1837 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC)))) 1838 return nullptr; 1839 1840 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC' 1841 // Propagate 'inbounds' and metadata from existing instructions. 1842 // Note: using IRBuilder to create the constants for efficiency. 1843 SmallVector<Value *, 4> IndexC(GEP.indices()); 1844 bool IsInBounds = GEP.isInBounds(); 1845 Value *NewTrueC = IsInBounds ? Builder.CreateInBoundsGEP(TrueC, IndexC) 1846 : Builder.CreateGEP(TrueC, IndexC); 1847 Value *NewFalseC = IsInBounds ? Builder.CreateInBoundsGEP(FalseC, IndexC) 1848 : Builder.CreateGEP(FalseC, IndexC); 1849 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel); 1850 } 1851 1852 Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) { 1853 SmallVector<Value *, 8> Ops(GEP.operands()); 1854 Type *GEPType = GEP.getType(); 1855 Type *GEPEltType = GEP.getSourceElementType(); 1856 bool IsGEPSrcEleScalable = isa<ScalableVectorType>(GEPEltType); 1857 if (Value *V = SimplifyGEPInst(GEPEltType, Ops, SQ.getWithInstruction(&GEP))) 1858 return replaceInstUsesWith(GEP, V); 1859 1860 // For vector geps, use the generic demanded vector support. 1861 // Skip if GEP return type is scalable. The number of elements is unknown at 1862 // compile-time. 1863 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) { 1864 auto VWidth = GEPFVTy->getNumElements(); 1865 APInt UndefElts(VWidth, 0); 1866 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); 1867 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask, 1868 UndefElts)) { 1869 if (V != &GEP) 1870 return replaceInstUsesWith(GEP, V); 1871 return &GEP; 1872 } 1873 1874 // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if 1875 // possible (decide on canonical form for pointer broadcast), 3) exploit 1876 // undef elements to decrease demanded bits 1877 } 1878 1879 Value *PtrOp = GEP.getOperand(0); 1880 1881 // Eliminate unneeded casts for indices, and replace indices which displace 1882 // by multiples of a zero size type with zero. 1883 bool MadeChange = false; 1884 1885 // Index width may not be the same width as pointer width. 1886 // Data layout chooses the right type based on supported integer types. 1887 Type *NewScalarIndexTy = 1888 DL.getIndexType(GEP.getPointerOperandType()->getScalarType()); 1889 1890 gep_type_iterator GTI = gep_type_begin(GEP); 1891 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E; 1892 ++I, ++GTI) { 1893 // Skip indices into struct types. 1894 if (GTI.isStruct()) 1895 continue; 1896 1897 Type *IndexTy = (*I)->getType(); 1898 Type *NewIndexType = 1899 IndexTy->isVectorTy() 1900 ? VectorType::get(NewScalarIndexTy, 1901 cast<VectorType>(IndexTy)->getElementCount()) 1902 : NewScalarIndexTy; 1903 1904 // If the element type has zero size then any index over it is equivalent 1905 // to an index of zero, so replace it with zero if it is not zero already. 1906 Type *EltTy = GTI.getIndexedType(); 1907 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero()) 1908 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) { 1909 *I = Constant::getNullValue(NewIndexType); 1910 MadeChange = true; 1911 } 1912 1913 if (IndexTy != NewIndexType) { 1914 // If we are using a wider index than needed for this platform, shrink 1915 // it to what we need. If narrower, sign-extend it to what we need. 1916 // This explicit cast can make subsequent optimizations more obvious. 1917 *I = Builder.CreateIntCast(*I, NewIndexType, true); 1918 MadeChange = true; 1919 } 1920 } 1921 if (MadeChange) 1922 return &GEP; 1923 1924 // Check to see if the inputs to the PHI node are getelementptr instructions. 1925 if (auto *PN = dyn_cast<PHINode>(PtrOp)) { 1926 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0)); 1927 if (!Op1) 1928 return nullptr; 1929 1930 // Don't fold a GEP into itself through a PHI node. This can only happen 1931 // through the back-edge of a loop. Folding a GEP into itself means that 1932 // the value of the previous iteration needs to be stored in the meantime, 1933 // thus requiring an additional register variable to be live, but not 1934 // actually achieving anything (the GEP still needs to be executed once per 1935 // loop iteration). 1936 if (Op1 == &GEP) 1937 return nullptr; 1938 1939 int DI = -1; 1940 1941 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) { 1942 auto *Op2 = dyn_cast<GetElementPtrInst>(*I); 1943 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands()) 1944 return nullptr; 1945 1946 // As for Op1 above, don't try to fold a GEP into itself. 1947 if (Op2 == &GEP) 1948 return nullptr; 1949 1950 // Keep track of the type as we walk the GEP. 1951 Type *CurTy = nullptr; 1952 1953 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) { 1954 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType()) 1955 return nullptr; 1956 1957 if (Op1->getOperand(J) != Op2->getOperand(J)) { 1958 if (DI == -1) { 1959 // We have not seen any differences yet in the GEPs feeding the 1960 // PHI yet, so we record this one if it is allowed to be a 1961 // variable. 1962 1963 // The first two arguments can vary for any GEP, the rest have to be 1964 // static for struct slots 1965 if (J > 1) { 1966 assert(CurTy && "No current type?"); 1967 if (CurTy->isStructTy()) 1968 return nullptr; 1969 } 1970 1971 DI = J; 1972 } else { 1973 // The GEP is different by more than one input. While this could be 1974 // extended to support GEPs that vary by more than one variable it 1975 // doesn't make sense since it greatly increases the complexity and 1976 // would result in an R+R+R addressing mode which no backend 1977 // directly supports and would need to be broken into several 1978 // simpler instructions anyway. 1979 return nullptr; 1980 } 1981 } 1982 1983 // Sink down a layer of the type for the next iteration. 1984 if (J > 0) { 1985 if (J == 1) { 1986 CurTy = Op1->getSourceElementType(); 1987 } else { 1988 CurTy = 1989 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J)); 1990 } 1991 } 1992 } 1993 } 1994 1995 // If not all GEPs are identical we'll have to create a new PHI node. 1996 // Check that the old PHI node has only one use so that it will get 1997 // removed. 1998 if (DI != -1 && !PN->hasOneUse()) 1999 return nullptr; 2000 2001 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone()); 2002 if (DI == -1) { 2003 // All the GEPs feeding the PHI are identical. Clone one down into our 2004 // BB so that it can be merged with the current GEP. 2005 } else { 2006 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP 2007 // into the current block so it can be merged, and create a new PHI to 2008 // set that index. 2009 PHINode *NewPN; 2010 { 2011 IRBuilderBase::InsertPointGuard Guard(Builder); 2012 Builder.SetInsertPoint(PN); 2013 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(), 2014 PN->getNumOperands()); 2015 } 2016 2017 for (auto &I : PN->operands()) 2018 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI), 2019 PN->getIncomingBlock(I)); 2020 2021 NewGEP->setOperand(DI, NewPN); 2022 } 2023 2024 GEP.getParent()->getInstList().insert( 2025 GEP.getParent()->getFirstInsertionPt(), NewGEP); 2026 replaceOperand(GEP, 0, NewGEP); 2027 PtrOp = NewGEP; 2028 } 2029 2030 // Combine Indices - If the source pointer to this getelementptr instruction 2031 // is a getelementptr instruction, combine the indices of the two 2032 // getelementptr instructions into a single instruction. 2033 if (auto *Src = dyn_cast<GEPOperator>(PtrOp)) { 2034 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src)) 2035 return nullptr; 2036 2037 // Try to reassociate loop invariant GEP chains to enable LICM. 2038 if (LI && Src->getNumOperands() == 2 && GEP.getNumOperands() == 2 && 2039 Src->hasOneUse()) { 2040 if (Loop *L = LI->getLoopFor(GEP.getParent())) { 2041 Value *GO1 = GEP.getOperand(1); 2042 Value *SO1 = Src->getOperand(1); 2043 // Reassociate the two GEPs if SO1 is variant in the loop and GO1 is 2044 // invariant: this breaks the dependence between GEPs and allows LICM 2045 // to hoist the invariant part out of the loop. 2046 if (L->isLoopInvariant(GO1) && !L->isLoopInvariant(SO1)) { 2047 // We have to be careful here. 2048 // We have something like: 2049 // %src = getelementptr <ty>, <ty>* %base, <ty> %idx 2050 // %gep = getelementptr <ty>, <ty>* %src, <ty> %idx2 2051 // If we just swap idx & idx2 then we could inadvertantly 2052 // change %src from a vector to a scalar, or vice versa. 2053 // Cases: 2054 // 1) %base a scalar & idx a scalar & idx2 a vector 2055 // => Swapping idx & idx2 turns %src into a vector type. 2056 // 2) %base a scalar & idx a vector & idx2 a scalar 2057 // => Swapping idx & idx2 turns %src in a scalar type 2058 // 3) %base, %idx, and %idx2 are scalars 2059 // => %src & %gep are scalars 2060 // => swapping idx & idx2 is safe 2061 // 4) %base a vector 2062 // => %src is a vector 2063 // => swapping idx & idx2 is safe. 2064 auto *SO0 = Src->getOperand(0); 2065 auto *SO0Ty = SO0->getType(); 2066 if (!isa<VectorType>(GEPType) || // case 3 2067 isa<VectorType>(SO0Ty)) { // case 4 2068 Src->setOperand(1, GO1); 2069 GEP.setOperand(1, SO1); 2070 return &GEP; 2071 } else { 2072 // Case 1 or 2 2073 // -- have to recreate %src & %gep 2074 // put NewSrc at same location as %src 2075 Builder.SetInsertPoint(cast<Instruction>(PtrOp)); 2076 auto *NewSrc = cast<GetElementPtrInst>( 2077 Builder.CreateGEP(GEPEltType, SO0, GO1, Src->getName())); 2078 NewSrc->setIsInBounds(Src->isInBounds()); 2079 auto *NewGEP = GetElementPtrInst::Create(GEPEltType, NewSrc, {SO1}); 2080 NewGEP->setIsInBounds(GEP.isInBounds()); 2081 return NewGEP; 2082 } 2083 } 2084 } 2085 } 2086 2087 // Note that if our source is a gep chain itself then we wait for that 2088 // chain to be resolved before we perform this transformation. This 2089 // avoids us creating a TON of code in some cases. 2090 if (auto *SrcGEP = dyn_cast<GEPOperator>(Src->getOperand(0))) 2091 if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP)) 2092 return nullptr; // Wait until our source is folded to completion. 2093 2094 SmallVector<Value*, 8> Indices; 2095 2096 // Find out whether the last index in the source GEP is a sequential idx. 2097 bool EndsWithSequential = false; 2098 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); 2099 I != E; ++I) 2100 EndsWithSequential = I.isSequential(); 2101 2102 // Can we combine the two pointer arithmetics offsets? 2103 if (EndsWithSequential) { 2104 // Replace: gep (gep %P, long B), long A, ... 2105 // With: T = long A+B; gep %P, T, ... 2106 Value *SO1 = Src->getOperand(Src->getNumOperands()-1); 2107 Value *GO1 = GEP.getOperand(1); 2108 2109 // If they aren't the same type, then the input hasn't been processed 2110 // by the loop above yet (which canonicalizes sequential index types to 2111 // intptr_t). Just avoid transforming this until the input has been 2112 // normalized. 2113 if (SO1->getType() != GO1->getType()) 2114 return nullptr; 2115 2116 Value *Sum = 2117 SimplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP)); 2118 // Only do the combine when we are sure the cost after the 2119 // merge is never more than that before the merge. 2120 if (Sum == nullptr) 2121 return nullptr; 2122 2123 // Update the GEP in place if possible. 2124 if (Src->getNumOperands() == 2) { 2125 GEP.setIsInBounds(isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))); 2126 replaceOperand(GEP, 0, Src->getOperand(0)); 2127 replaceOperand(GEP, 1, Sum); 2128 return &GEP; 2129 } 2130 Indices.append(Src->op_begin()+1, Src->op_end()-1); 2131 Indices.push_back(Sum); 2132 Indices.append(GEP.op_begin()+2, GEP.op_end()); 2133 } else if (isa<Constant>(*GEP.idx_begin()) && 2134 cast<Constant>(*GEP.idx_begin())->isNullValue() && 2135 Src->getNumOperands() != 1) { 2136 // Otherwise we can do the fold if the first index of the GEP is a zero 2137 Indices.append(Src->op_begin()+1, Src->op_end()); 2138 Indices.append(GEP.idx_begin()+1, GEP.idx_end()); 2139 } 2140 2141 if (!Indices.empty()) 2142 return isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)) 2143 ? GetElementPtrInst::CreateInBounds( 2144 Src->getSourceElementType(), Src->getOperand(0), Indices, 2145 GEP.getName()) 2146 : GetElementPtrInst::Create(Src->getSourceElementType(), 2147 Src->getOperand(0), Indices, 2148 GEP.getName()); 2149 } 2150 2151 // Skip if GEP source element type is scalable. The type alloc size is unknown 2152 // at compile-time. 2153 if (GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) { 2154 unsigned AS = GEP.getPointerAddressSpace(); 2155 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() == 2156 DL.getIndexSizeInBits(AS)) { 2157 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedSize(); 2158 2159 bool Matched = false; 2160 uint64_t C; 2161 Value *V = nullptr; 2162 if (TyAllocSize == 1) { 2163 V = GEP.getOperand(1); 2164 Matched = true; 2165 } else if (match(GEP.getOperand(1), 2166 m_AShr(m_Value(V), m_ConstantInt(C)))) { 2167 if (TyAllocSize == 1ULL << C) 2168 Matched = true; 2169 } else if (match(GEP.getOperand(1), 2170 m_SDiv(m_Value(V), m_ConstantInt(C)))) { 2171 if (TyAllocSize == C) 2172 Matched = true; 2173 } 2174 2175 if (Matched) { 2176 // Canonicalize (gep i8* X, -(ptrtoint Y)) 2177 // to (inttoptr (sub (ptrtoint X), (ptrtoint Y))) 2178 // The GEP pattern is emitted by the SCEV expander for certain kinds of 2179 // pointer arithmetic. 2180 if (match(V, m_Neg(m_PtrToInt(m_Value())))) { 2181 Operator *Index = cast<Operator>(V); 2182 Value *PtrToInt = Builder.CreatePtrToInt(PtrOp, Index->getType()); 2183 Value *NewSub = Builder.CreateSub(PtrToInt, Index->getOperand(1)); 2184 return CastInst::Create(Instruction::IntToPtr, NewSub, GEPType); 2185 } 2186 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) 2187 // to (bitcast Y) 2188 Value *Y; 2189 if (match(V, m_Sub(m_PtrToInt(m_Value(Y)), 2190 m_PtrToInt(m_Specific(GEP.getOperand(0)))))) 2191 return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y, GEPType); 2192 } 2193 } 2194 } 2195 2196 // We do not handle pointer-vector geps here. 2197 if (GEPType->isVectorTy()) 2198 return nullptr; 2199 2200 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0). 2201 Value *StrippedPtr = PtrOp->stripPointerCasts(); 2202 PointerType *StrippedPtrTy = cast<PointerType>(StrippedPtr->getType()); 2203 2204 if (StrippedPtr != PtrOp) { 2205 bool HasZeroPointerIndex = false; 2206 Type *StrippedPtrEltTy = StrippedPtrTy->getElementType(); 2207 2208 if (auto *C = dyn_cast<ConstantInt>(GEP.getOperand(1))) 2209 HasZeroPointerIndex = C->isZero(); 2210 2211 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... 2212 // into : GEP [10 x i8]* X, i32 0, ... 2213 // 2214 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ... 2215 // into : GEP i8* X, ... 2216 // 2217 // This occurs when the program declares an array extern like "int X[];" 2218 if (HasZeroPointerIndex) { 2219 if (auto *CATy = dyn_cast<ArrayType>(GEPEltType)) { 2220 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ? 2221 if (CATy->getElementType() == StrippedPtrEltTy) { 2222 // -> GEP i8* X, ... 2223 SmallVector<Value *, 8> Idx(drop_begin(GEP.indices())); 2224 GetElementPtrInst *Res = GetElementPtrInst::Create( 2225 StrippedPtrEltTy, StrippedPtr, Idx, GEP.getName()); 2226 Res->setIsInBounds(GEP.isInBounds()); 2227 if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) 2228 return Res; 2229 // Insert Res, and create an addrspacecast. 2230 // e.g., 2231 // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ... 2232 // -> 2233 // %0 = GEP i8 addrspace(1)* X, ... 2234 // addrspacecast i8 addrspace(1)* %0 to i8* 2235 return new AddrSpaceCastInst(Builder.Insert(Res), GEPType); 2236 } 2237 2238 if (auto *XATy = dyn_cast<ArrayType>(StrippedPtrEltTy)) { 2239 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ? 2240 if (CATy->getElementType() == XATy->getElementType()) { 2241 // -> GEP [10 x i8]* X, i32 0, ... 2242 // At this point, we know that the cast source type is a pointer 2243 // to an array of the same type as the destination pointer 2244 // array. Because the array type is never stepped over (there 2245 // is a leading zero) we can fold the cast into this GEP. 2246 if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) { 2247 GEP.setSourceElementType(XATy); 2248 return replaceOperand(GEP, 0, StrippedPtr); 2249 } 2250 // Cannot replace the base pointer directly because StrippedPtr's 2251 // address space is different. Instead, create a new GEP followed by 2252 // an addrspacecast. 2253 // e.g., 2254 // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*), 2255 // i32 0, ... 2256 // -> 2257 // %0 = GEP [10 x i8] addrspace(1)* X, ... 2258 // addrspacecast i8 addrspace(1)* %0 to i8* 2259 SmallVector<Value *, 8> Idx(GEP.indices()); 2260 Value *NewGEP = 2261 GEP.isInBounds() 2262 ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr, 2263 Idx, GEP.getName()) 2264 : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Idx, 2265 GEP.getName()); 2266 return new AddrSpaceCastInst(NewGEP, GEPType); 2267 } 2268 } 2269 } 2270 } else if (GEP.getNumOperands() == 2 && !IsGEPSrcEleScalable) { 2271 // Skip if GEP source element type is scalable. The type alloc size is 2272 // unknown at compile-time. 2273 // Transform things like: %t = getelementptr i32* 2274 // bitcast ([2 x i32]* %str to i32*), i32 %V into: %t1 = getelementptr [2 2275 // x i32]* %str, i32 0, i32 %V; bitcast 2276 if (StrippedPtrEltTy->isArrayTy() && 2277 DL.getTypeAllocSize(StrippedPtrEltTy->getArrayElementType()) == 2278 DL.getTypeAllocSize(GEPEltType)) { 2279 Type *IdxType = DL.getIndexType(GEPType); 2280 Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) }; 2281 Value *NewGEP = 2282 GEP.isInBounds() 2283 ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr, Idx, 2284 GEP.getName()) 2285 : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Idx, 2286 GEP.getName()); 2287 2288 // V and GEP are both pointer types --> BitCast 2289 return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, GEPType); 2290 } 2291 2292 // Transform things like: 2293 // %V = mul i64 %N, 4 2294 // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V 2295 // into: %t1 = getelementptr i32* %arr, i32 %N; bitcast 2296 if (GEPEltType->isSized() && StrippedPtrEltTy->isSized()) { 2297 // Check that changing the type amounts to dividing the index by a scale 2298 // factor. 2299 uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedSize(); 2300 uint64_t SrcSize = DL.getTypeAllocSize(StrippedPtrEltTy).getFixedSize(); 2301 if (ResSize && SrcSize % ResSize == 0) { 2302 Value *Idx = GEP.getOperand(1); 2303 unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); 2304 uint64_t Scale = SrcSize / ResSize; 2305 2306 // Earlier transforms ensure that the index has the right type 2307 // according to Data Layout, which considerably simplifies the 2308 // logic by eliminating implicit casts. 2309 assert(Idx->getType() == DL.getIndexType(GEPType) && 2310 "Index type does not match the Data Layout preferences"); 2311 2312 bool NSW; 2313 if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) { 2314 // Successfully decomposed Idx as NewIdx * Scale, form a new GEP. 2315 // If the multiplication NewIdx * Scale may overflow then the new 2316 // GEP may not be "inbounds". 2317 Value *NewGEP = 2318 GEP.isInBounds() && NSW 2319 ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr, 2320 NewIdx, GEP.getName()) 2321 : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, NewIdx, 2322 GEP.getName()); 2323 2324 // The NewGEP must be pointer typed, so must the old one -> BitCast 2325 return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, 2326 GEPType); 2327 } 2328 } 2329 } 2330 2331 // Similarly, transform things like: 2332 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp 2333 // (where tmp = 8*tmp2) into: 2334 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast 2335 if (GEPEltType->isSized() && StrippedPtrEltTy->isSized() && 2336 StrippedPtrEltTy->isArrayTy()) { 2337 // Check that changing to the array element type amounts to dividing the 2338 // index by a scale factor. 2339 uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedSize(); 2340 uint64_t ArrayEltSize = 2341 DL.getTypeAllocSize(StrippedPtrEltTy->getArrayElementType()) 2342 .getFixedSize(); 2343 if (ResSize && ArrayEltSize % ResSize == 0) { 2344 Value *Idx = GEP.getOperand(1); 2345 unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); 2346 uint64_t Scale = ArrayEltSize / ResSize; 2347 2348 // Earlier transforms ensure that the index has the right type 2349 // according to the Data Layout, which considerably simplifies 2350 // the logic by eliminating implicit casts. 2351 assert(Idx->getType() == DL.getIndexType(GEPType) && 2352 "Index type does not match the Data Layout preferences"); 2353 2354 bool NSW; 2355 if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) { 2356 // Successfully decomposed Idx as NewIdx * Scale, form a new GEP. 2357 // If the multiplication NewIdx * Scale may overflow then the new 2358 // GEP may not be "inbounds". 2359 Type *IndTy = DL.getIndexType(GEPType); 2360 Value *Off[2] = {Constant::getNullValue(IndTy), NewIdx}; 2361 2362 Value *NewGEP = 2363 GEP.isInBounds() && NSW 2364 ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr, 2365 Off, GEP.getName()) 2366 : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Off, 2367 GEP.getName()); 2368 // The NewGEP must be pointer typed, so must the old one -> BitCast 2369 return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, 2370 GEPType); 2371 } 2372 } 2373 } 2374 } 2375 } 2376 2377 // addrspacecast between types is canonicalized as a bitcast, then an 2378 // addrspacecast. To take advantage of the below bitcast + struct GEP, look 2379 // through the addrspacecast. 2380 Value *ASCStrippedPtrOp = PtrOp; 2381 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(PtrOp)) { 2382 // X = bitcast A addrspace(1)* to B addrspace(1)* 2383 // Y = addrspacecast A addrspace(1)* to B addrspace(2)* 2384 // Z = gep Y, <...constant indices...> 2385 // Into an addrspacecasted GEP of the struct. 2386 if (auto *BC = dyn_cast<BitCastInst>(ASC->getOperand(0))) 2387 ASCStrippedPtrOp = BC; 2388 } 2389 2390 if (auto *BCI = dyn_cast<BitCastInst>(ASCStrippedPtrOp)) { 2391 Value *SrcOp = BCI->getOperand(0); 2392 PointerType *SrcType = cast<PointerType>(BCI->getSrcTy()); 2393 Type *SrcEltType = SrcType->getElementType(); 2394 2395 // GEP directly using the source operand if this GEP is accessing an element 2396 // of a bitcasted pointer to vector or array of the same dimensions: 2397 // gep (bitcast <c x ty>* X to [c x ty]*), Y, Z --> gep X, Y, Z 2398 // gep (bitcast [c x ty]* X to <c x ty>*), Y, Z --> gep X, Y, Z 2399 auto areMatchingArrayAndVecTypes = [](Type *ArrTy, Type *VecTy, 2400 const DataLayout &DL) { 2401 auto *VecVTy = cast<FixedVectorType>(VecTy); 2402 return ArrTy->getArrayElementType() == VecVTy->getElementType() && 2403 ArrTy->getArrayNumElements() == VecVTy->getNumElements() && 2404 DL.getTypeAllocSize(ArrTy) == DL.getTypeAllocSize(VecTy); 2405 }; 2406 if (GEP.getNumOperands() == 3 && 2407 ((GEPEltType->isArrayTy() && isa<FixedVectorType>(SrcEltType) && 2408 areMatchingArrayAndVecTypes(GEPEltType, SrcEltType, DL)) || 2409 (isa<FixedVectorType>(GEPEltType) && SrcEltType->isArrayTy() && 2410 areMatchingArrayAndVecTypes(SrcEltType, GEPEltType, DL)))) { 2411 2412 // Create a new GEP here, as using `setOperand()` followed by 2413 // `setSourceElementType()` won't actually update the type of the 2414 // existing GEP Value. Causing issues if this Value is accessed when 2415 // constructing an AddrSpaceCastInst 2416 Value *NGEP = 2417 GEP.isInBounds() 2418 ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, {Ops[1], Ops[2]}) 2419 : Builder.CreateGEP(SrcEltType, SrcOp, {Ops[1], Ops[2]}); 2420 NGEP->takeName(&GEP); 2421 2422 // Preserve GEP address space to satisfy users 2423 if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace()) 2424 return new AddrSpaceCastInst(NGEP, GEPType); 2425 2426 return replaceInstUsesWith(GEP, NGEP); 2427 } 2428 2429 // See if we can simplify: 2430 // X = bitcast A* to B* 2431 // Y = gep X, <...constant indices...> 2432 // into a gep of the original struct. This is important for SROA and alias 2433 // analysis of unions. If "A" is also a bitcast, wait for A/X to be merged. 2434 unsigned OffsetBits = DL.getIndexTypeSizeInBits(GEPType); 2435 APInt Offset(OffsetBits, 0); 2436 if (!isa<BitCastInst>(SrcOp) && GEP.accumulateConstantOffset(DL, Offset)) { 2437 // If this GEP instruction doesn't move the pointer, just replace the GEP 2438 // with a bitcast of the real input to the dest type. 2439 if (!Offset) { 2440 // If the bitcast is of an allocation, and the allocation will be 2441 // converted to match the type of the cast, don't touch this. 2442 if (isa<AllocaInst>(SrcOp) || isAllocationFn(SrcOp, &TLI)) { 2443 // See if the bitcast simplifies, if so, don't nuke this GEP yet. 2444 if (Instruction *I = visitBitCast(*BCI)) { 2445 if (I != BCI) { 2446 I->takeName(BCI); 2447 BCI->getParent()->getInstList().insert(BCI->getIterator(), I); 2448 replaceInstUsesWith(*BCI, I); 2449 } 2450 return &GEP; 2451 } 2452 } 2453 2454 if (SrcType->getPointerAddressSpace() != GEP.getAddressSpace()) 2455 return new AddrSpaceCastInst(SrcOp, GEPType); 2456 return new BitCastInst(SrcOp, GEPType); 2457 } 2458 2459 // Otherwise, if the offset is non-zero, we need to find out if there is a 2460 // field at Offset in 'A's type. If so, we can pull the cast through the 2461 // GEP. 2462 SmallVector<Value*, 8> NewIndices; 2463 if (FindElementAtOffset(SrcType, Offset.getSExtValue(), NewIndices)) { 2464 Value *NGEP = 2465 GEP.isInBounds() 2466 ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, NewIndices) 2467 : Builder.CreateGEP(SrcEltType, SrcOp, NewIndices); 2468 2469 if (NGEP->getType() == GEPType) 2470 return replaceInstUsesWith(GEP, NGEP); 2471 NGEP->takeName(&GEP); 2472 2473 if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace()) 2474 return new AddrSpaceCastInst(NGEP, GEPType); 2475 return new BitCastInst(NGEP, GEPType); 2476 } 2477 } 2478 } 2479 2480 if (!GEP.isInBounds()) { 2481 unsigned IdxWidth = 2482 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace()); 2483 APInt BasePtrOffset(IdxWidth, 0); 2484 Value *UnderlyingPtrOp = 2485 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, 2486 BasePtrOffset); 2487 if (auto *AI = dyn_cast<AllocaInst>(UnderlyingPtrOp)) { 2488 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) && 2489 BasePtrOffset.isNonNegative()) { 2490 APInt AllocSize( 2491 IdxWidth, 2492 DL.getTypeAllocSize(AI->getAllocatedType()).getKnownMinSize()); 2493 if (BasePtrOffset.ule(AllocSize)) { 2494 return GetElementPtrInst::CreateInBounds( 2495 GEP.getSourceElementType(), PtrOp, makeArrayRef(Ops).slice(1), 2496 GEP.getName()); 2497 } 2498 } 2499 } 2500 } 2501 2502 if (Instruction *R = foldSelectGEP(GEP, Builder)) 2503 return R; 2504 2505 return nullptr; 2506 } 2507 2508 static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo *TLI, 2509 Instruction *AI) { 2510 if (isa<ConstantPointerNull>(V)) 2511 return true; 2512 if (auto *LI = dyn_cast<LoadInst>(V)) 2513 return isa<GlobalVariable>(LI->getPointerOperand()); 2514 // Two distinct allocations will never be equal. 2515 // We rely on LookThroughBitCast in isAllocLikeFn being false, since looking 2516 // through bitcasts of V can cause 2517 // the result statement below to be true, even when AI and V (ex: 2518 // i8* ->i32* ->i8* of AI) are the same allocations. 2519 return isAllocLikeFn(V, TLI) && V != AI; 2520 } 2521 2522 static bool isAllocSiteRemovable(Instruction *AI, 2523 SmallVectorImpl<WeakTrackingVH> &Users, 2524 const TargetLibraryInfo *TLI) { 2525 SmallVector<Instruction*, 4> Worklist; 2526 Worklist.push_back(AI); 2527 2528 do { 2529 Instruction *PI = Worklist.pop_back_val(); 2530 for (User *U : PI->users()) { 2531 Instruction *I = cast<Instruction>(U); 2532 switch (I->getOpcode()) { 2533 default: 2534 // Give up the moment we see something we can't handle. 2535 return false; 2536 2537 case Instruction::AddrSpaceCast: 2538 case Instruction::BitCast: 2539 case Instruction::GetElementPtr: 2540 Users.emplace_back(I); 2541 Worklist.push_back(I); 2542 continue; 2543 2544 case Instruction::ICmp: { 2545 ICmpInst *ICI = cast<ICmpInst>(I); 2546 // We can fold eq/ne comparisons with null to false/true, respectively. 2547 // We also fold comparisons in some conditions provided the alloc has 2548 // not escaped (see isNeverEqualToUnescapedAlloc). 2549 if (!ICI->isEquality()) 2550 return false; 2551 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0; 2552 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI)) 2553 return false; 2554 Users.emplace_back(I); 2555 continue; 2556 } 2557 2558 case Instruction::Call: 2559 // Ignore no-op and store intrinsics. 2560 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 2561 switch (II->getIntrinsicID()) { 2562 default: 2563 return false; 2564 2565 case Intrinsic::memmove: 2566 case Intrinsic::memcpy: 2567 case Intrinsic::memset: { 2568 MemIntrinsic *MI = cast<MemIntrinsic>(II); 2569 if (MI->isVolatile() || MI->getRawDest() != PI) 2570 return false; 2571 LLVM_FALLTHROUGH; 2572 } 2573 case Intrinsic::assume: 2574 case Intrinsic::invariant_start: 2575 case Intrinsic::invariant_end: 2576 case Intrinsic::lifetime_start: 2577 case Intrinsic::lifetime_end: 2578 case Intrinsic::objectsize: 2579 Users.emplace_back(I); 2580 continue; 2581 } 2582 } 2583 2584 if (isFreeCall(I, TLI)) { 2585 Users.emplace_back(I); 2586 continue; 2587 } 2588 return false; 2589 2590 case Instruction::Store: { 2591 StoreInst *SI = cast<StoreInst>(I); 2592 if (SI->isVolatile() || SI->getPointerOperand() != PI) 2593 return false; 2594 Users.emplace_back(I); 2595 continue; 2596 } 2597 } 2598 llvm_unreachable("missing a return?"); 2599 } 2600 } while (!Worklist.empty()); 2601 return true; 2602 } 2603 2604 Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) { 2605 // If we have a malloc call which is only used in any amount of comparisons to 2606 // null and free calls, delete the calls and replace the comparisons with true 2607 // or false as appropriate. 2608 2609 // This is based on the principle that we can substitute our own allocation 2610 // function (which will never return null) rather than knowledge of the 2611 // specific function being called. In some sense this can change the permitted 2612 // outputs of a program (when we convert a malloc to an alloca, the fact that 2613 // the allocation is now on the stack is potentially visible, for example), 2614 // but we believe in a permissible manner. 2615 SmallVector<WeakTrackingVH, 64> Users; 2616 2617 // If we are removing an alloca with a dbg.declare, insert dbg.value calls 2618 // before each store. 2619 SmallVector<DbgVariableIntrinsic *, 8> DVIs; 2620 std::unique_ptr<DIBuilder> DIB; 2621 if (isa<AllocaInst>(MI)) { 2622 findDbgUsers(DVIs, &MI); 2623 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false)); 2624 } 2625 2626 if (isAllocSiteRemovable(&MI, Users, &TLI)) { 2627 for (unsigned i = 0, e = Users.size(); i != e; ++i) { 2628 // Lowering all @llvm.objectsize calls first because they may 2629 // use a bitcast/GEP of the alloca we are removing. 2630 if (!Users[i]) 2631 continue; 2632 2633 Instruction *I = cast<Instruction>(&*Users[i]); 2634 2635 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 2636 if (II->getIntrinsicID() == Intrinsic::objectsize) { 2637 Value *Result = 2638 lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/true); 2639 replaceInstUsesWith(*I, Result); 2640 eraseInstFromFunction(*I); 2641 Users[i] = nullptr; // Skip examining in the next loop. 2642 } 2643 } 2644 } 2645 for (unsigned i = 0, e = Users.size(); i != e; ++i) { 2646 if (!Users[i]) 2647 continue; 2648 2649 Instruction *I = cast<Instruction>(&*Users[i]); 2650 2651 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) { 2652 replaceInstUsesWith(*C, 2653 ConstantInt::get(Type::getInt1Ty(C->getContext()), 2654 C->isFalseWhenEqual())); 2655 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 2656 for (auto *DVI : DVIs) 2657 if (DVI->isAddressOfVariable()) 2658 ConvertDebugDeclareToDebugValue(DVI, SI, *DIB); 2659 } else { 2660 // Casts, GEP, or anything else: we're about to delete this instruction, 2661 // so it can not have any valid uses. 2662 replaceInstUsesWith(*I, UndefValue::get(I->getType())); 2663 } 2664 eraseInstFromFunction(*I); 2665 } 2666 2667 if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) { 2668 // Replace invoke with a NOP intrinsic to maintain the original CFG 2669 Module *M = II->getModule(); 2670 Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing); 2671 InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), 2672 None, "", II->getParent()); 2673 } 2674 2675 // Remove debug intrinsics which describe the value contained within the 2676 // alloca. In addition to removing dbg.{declare,addr} which simply point to 2677 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.: 2678 // 2679 // ``` 2680 // define void @foo(i32 %0) { 2681 // %a = alloca i32 ; Deleted. 2682 // store i32 %0, i32* %a 2683 // dbg.value(i32 %0, "arg0") ; Not deleted. 2684 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted. 2685 // call void @trivially_inlinable_no_op(i32* %a) 2686 // ret void 2687 // } 2688 // ``` 2689 // 2690 // This may not be required if we stop describing the contents of allocas 2691 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in 2692 // the LowerDbgDeclare utility. 2693 // 2694 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the 2695 // "arg0" dbg.value may be stale after the call. However, failing to remove 2696 // the DW_OP_deref dbg.value causes large gaps in location coverage. 2697 for (auto *DVI : DVIs) 2698 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref()) 2699 DVI->eraseFromParent(); 2700 2701 return eraseInstFromFunction(MI); 2702 } 2703 return nullptr; 2704 } 2705 2706 /// Move the call to free before a NULL test. 2707 /// 2708 /// Check if this free is accessed after its argument has been test 2709 /// against NULL (property 0). 2710 /// If yes, it is legal to move this call in its predecessor block. 2711 /// 2712 /// The move is performed only if the block containing the call to free 2713 /// will be removed, i.e.: 2714 /// 1. it has only one predecessor P, and P has two successors 2715 /// 2. it contains the call, noops, and an unconditional branch 2716 /// 3. its successor is the same as its predecessor's successor 2717 /// 2718 /// The profitability is out-of concern here and this function should 2719 /// be called only if the caller knows this transformation would be 2720 /// profitable (e.g., for code size). 2721 static Instruction *tryToMoveFreeBeforeNullTest(CallInst &FI, 2722 const DataLayout &DL) { 2723 Value *Op = FI.getArgOperand(0); 2724 BasicBlock *FreeInstrBB = FI.getParent(); 2725 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor(); 2726 2727 // Validate part of constraint #1: Only one predecessor 2728 // FIXME: We can extend the number of predecessor, but in that case, we 2729 // would duplicate the call to free in each predecessor and it may 2730 // not be profitable even for code size. 2731 if (!PredBB) 2732 return nullptr; 2733 2734 // Validate constraint #2: Does this block contains only the call to 2735 // free, noops, and an unconditional branch? 2736 BasicBlock *SuccBB; 2737 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator(); 2738 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB))) 2739 return nullptr; 2740 2741 // If there are only 2 instructions in the block, at this point, 2742 // this is the call to free and unconditional. 2743 // If there are more than 2 instructions, check that they are noops 2744 // i.e., they won't hurt the performance of the generated code. 2745 if (FreeInstrBB->size() != 2) { 2746 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) { 2747 if (&Inst == &FI || &Inst == FreeInstrBBTerminator) 2748 continue; 2749 auto *Cast = dyn_cast<CastInst>(&Inst); 2750 if (!Cast || !Cast->isNoopCast(DL)) 2751 return nullptr; 2752 } 2753 } 2754 // Validate the rest of constraint #1 by matching on the pred branch. 2755 Instruction *TI = PredBB->getTerminator(); 2756 BasicBlock *TrueBB, *FalseBB; 2757 ICmpInst::Predicate Pred; 2758 if (!match(TI, m_Br(m_ICmp(Pred, 2759 m_CombineOr(m_Specific(Op), 2760 m_Specific(Op->stripPointerCasts())), 2761 m_Zero()), 2762 TrueBB, FalseBB))) 2763 return nullptr; 2764 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE) 2765 return nullptr; 2766 2767 // Validate constraint #3: Ensure the null case just falls through. 2768 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB)) 2769 return nullptr; 2770 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) && 2771 "Broken CFG: missing edge from predecessor to successor"); 2772 2773 // At this point, we know that everything in FreeInstrBB can be moved 2774 // before TI. 2775 for (BasicBlock::iterator It = FreeInstrBB->begin(), End = FreeInstrBB->end(); 2776 It != End;) { 2777 Instruction &Instr = *It++; 2778 if (&Instr == FreeInstrBBTerminator) 2779 break; 2780 Instr.moveBefore(TI); 2781 } 2782 assert(FreeInstrBB->size() == 1 && 2783 "Only the branch instruction should remain"); 2784 return &FI; 2785 } 2786 2787 Instruction *InstCombinerImpl::visitFree(CallInst &FI) { 2788 Value *Op = FI.getArgOperand(0); 2789 2790 // free undef -> unreachable. 2791 if (isa<UndefValue>(Op)) { 2792 // Leave a marker since we can't modify the CFG here. 2793 CreateNonTerminatorUnreachable(&FI); 2794 return eraseInstFromFunction(FI); 2795 } 2796 2797 // If we have 'free null' delete the instruction. This can happen in stl code 2798 // when lots of inlining happens. 2799 if (isa<ConstantPointerNull>(Op)) 2800 return eraseInstFromFunction(FI); 2801 2802 // If we optimize for code size, try to move the call to free before the null 2803 // test so that simplify cfg can remove the empty block and dead code 2804 // elimination the branch. I.e., helps to turn something like: 2805 // if (foo) free(foo); 2806 // into 2807 // free(foo); 2808 // 2809 // Note that we can only do this for 'free' and not for any flavor of 2810 // 'operator delete'; there is no 'operator delete' symbol for which we are 2811 // permitted to invent a call, even if we're passing in a null pointer. 2812 if (MinimizeSize) { 2813 LibFunc Func; 2814 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free) 2815 if (Instruction *I = tryToMoveFreeBeforeNullTest(FI, DL)) 2816 return I; 2817 } 2818 2819 return nullptr; 2820 } 2821 2822 static bool isMustTailCall(Value *V) { 2823 if (auto *CI = dyn_cast<CallInst>(V)) 2824 return CI->isMustTailCall(); 2825 return false; 2826 } 2827 2828 Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) { 2829 if (RI.getNumOperands() == 0) // ret void 2830 return nullptr; 2831 2832 Value *ResultOp = RI.getOperand(0); 2833 Type *VTy = ResultOp->getType(); 2834 if (!VTy->isIntegerTy() || isa<Constant>(ResultOp)) 2835 return nullptr; 2836 2837 // Don't replace result of musttail calls. 2838 if (isMustTailCall(ResultOp)) 2839 return nullptr; 2840 2841 // There might be assume intrinsics dominating this return that completely 2842 // determine the value. If so, constant fold it. 2843 KnownBits Known = computeKnownBits(ResultOp, 0, &RI); 2844 if (Known.isConstant()) 2845 return replaceOperand(RI, 0, 2846 Constant::getIntegerValue(VTy, Known.getConstant())); 2847 2848 return nullptr; 2849 } 2850 2851 Instruction *InstCombinerImpl::visitUnreachableInst(UnreachableInst &I) { 2852 // Try to remove the previous instruction if it must lead to unreachable. 2853 // This includes instructions like stores and "llvm.assume" that may not get 2854 // removed by simple dead code elimination. 2855 Instruction *Prev = I.getPrevNonDebugInstruction(); 2856 if (Prev && !Prev->isEHPad() && 2857 isGuaranteedToTransferExecutionToSuccessor(Prev)) { 2858 // Temporarily disable removal of volatile stores preceding unreachable, 2859 // pending a potential LangRef change permitting volatile stores to trap. 2860 // TODO: Either remove this code, or properly integrate the check into 2861 // isGuaranteedToTransferExecutionToSuccessor(). 2862 if (auto *SI = dyn_cast<StoreInst>(Prev)) 2863 if (SI->isVolatile()) 2864 return nullptr; 2865 2866 // A value may still have uses before we process it here (for example, in 2867 // another unreachable block), so convert those to undef. 2868 replaceInstUsesWith(*Prev, UndefValue::get(Prev->getType())); 2869 eraseInstFromFunction(*Prev); 2870 return &I; 2871 } 2872 return nullptr; 2873 } 2874 2875 Instruction *InstCombinerImpl::visitUnconditionalBranchInst(BranchInst &BI) { 2876 assert(BI.isUnconditional() && "Only for unconditional branches."); 2877 2878 // If this store is the second-to-last instruction in the basic block 2879 // (excluding debug info and bitcasts of pointers) and if the block ends with 2880 // an unconditional branch, try to move the store to the successor block. 2881 2882 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) { 2883 auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) { 2884 return isa<DbgInfoIntrinsic>(BBI) || 2885 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()); 2886 }; 2887 2888 BasicBlock::iterator FirstInstr = BBI->getParent()->begin(); 2889 do { 2890 if (BBI != FirstInstr) 2891 --BBI; 2892 } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI)); 2893 2894 return dyn_cast<StoreInst>(BBI); 2895 }; 2896 2897 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI))) 2898 if (mergeStoreIntoSuccessor(*SI)) 2899 return &BI; 2900 2901 return nullptr; 2902 } 2903 2904 Instruction *InstCombinerImpl::visitBranchInst(BranchInst &BI) { 2905 if (BI.isUnconditional()) 2906 return visitUnconditionalBranchInst(BI); 2907 2908 // Change br (not X), label True, label False to: br X, label False, True 2909 Value *X = nullptr; 2910 if (match(&BI, m_Br(m_Not(m_Value(X)), m_BasicBlock(), m_BasicBlock())) && 2911 !isa<Constant>(X)) { 2912 // Swap Destinations and condition... 2913 BI.swapSuccessors(); 2914 return replaceOperand(BI, 0, X); 2915 } 2916 2917 // If the condition is irrelevant, remove the use so that other 2918 // transforms on the condition become more effective. 2919 if (!isa<ConstantInt>(BI.getCondition()) && 2920 BI.getSuccessor(0) == BI.getSuccessor(1)) 2921 return replaceOperand( 2922 BI, 0, ConstantInt::getFalse(BI.getCondition()->getType())); 2923 2924 // Canonicalize, for example, fcmp_one -> fcmp_oeq. 2925 CmpInst::Predicate Pred; 2926 if (match(&BI, m_Br(m_OneUse(m_FCmp(Pred, m_Value(), m_Value())), 2927 m_BasicBlock(), m_BasicBlock())) && 2928 !isCanonicalPredicate(Pred)) { 2929 // Swap destinations and condition. 2930 CmpInst *Cond = cast<CmpInst>(BI.getCondition()); 2931 Cond->setPredicate(CmpInst::getInversePredicate(Pred)); 2932 BI.swapSuccessors(); 2933 Worklist.push(Cond); 2934 return &BI; 2935 } 2936 2937 return nullptr; 2938 } 2939 2940 Instruction *InstCombinerImpl::visitSwitchInst(SwitchInst &SI) { 2941 Value *Cond = SI.getCondition(); 2942 Value *Op0; 2943 ConstantInt *AddRHS; 2944 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) { 2945 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'. 2946 for (auto Case : SI.cases()) { 2947 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS); 2948 assert(isa<ConstantInt>(NewCase) && 2949 "Result of expression should be constant"); 2950 Case.setValue(cast<ConstantInt>(NewCase)); 2951 } 2952 return replaceOperand(SI, 0, Op0); 2953 } 2954 2955 KnownBits Known = computeKnownBits(Cond, 0, &SI); 2956 unsigned LeadingKnownZeros = Known.countMinLeadingZeros(); 2957 unsigned LeadingKnownOnes = Known.countMinLeadingOnes(); 2958 2959 // Compute the number of leading bits we can ignore. 2960 // TODO: A better way to determine this would use ComputeNumSignBits(). 2961 for (auto &C : SI.cases()) { 2962 LeadingKnownZeros = std::min( 2963 LeadingKnownZeros, C.getCaseValue()->getValue().countLeadingZeros()); 2964 LeadingKnownOnes = std::min( 2965 LeadingKnownOnes, C.getCaseValue()->getValue().countLeadingOnes()); 2966 } 2967 2968 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes); 2969 2970 // Shrink the condition operand if the new type is smaller than the old type. 2971 // But do not shrink to a non-standard type, because backend can't generate 2972 // good code for that yet. 2973 // TODO: We can make it aggressive again after fixing PR39569. 2974 if (NewWidth > 0 && NewWidth < Known.getBitWidth() && 2975 shouldChangeType(Known.getBitWidth(), NewWidth)) { 2976 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth); 2977 Builder.SetInsertPoint(&SI); 2978 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc"); 2979 2980 for (auto Case : SI.cases()) { 2981 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth); 2982 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase)); 2983 } 2984 return replaceOperand(SI, 0, NewCond); 2985 } 2986 2987 return nullptr; 2988 } 2989 2990 Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) { 2991 Value *Agg = EV.getAggregateOperand(); 2992 2993 if (!EV.hasIndices()) 2994 return replaceInstUsesWith(EV, Agg); 2995 2996 if (Value *V = SimplifyExtractValueInst(Agg, EV.getIndices(), 2997 SQ.getWithInstruction(&EV))) 2998 return replaceInstUsesWith(EV, V); 2999 3000 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) { 3001 // We're extracting from an insertvalue instruction, compare the indices 3002 const unsigned *exti, *exte, *insi, *inse; 3003 for (exti = EV.idx_begin(), insi = IV->idx_begin(), 3004 exte = EV.idx_end(), inse = IV->idx_end(); 3005 exti != exte && insi != inse; 3006 ++exti, ++insi) { 3007 if (*insi != *exti) 3008 // The insert and extract both reference distinctly different elements. 3009 // This means the extract is not influenced by the insert, and we can 3010 // replace the aggregate operand of the extract with the aggregate 3011 // operand of the insert. i.e., replace 3012 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 3013 // %E = extractvalue { i32, { i32 } } %I, 0 3014 // with 3015 // %E = extractvalue { i32, { i32 } } %A, 0 3016 return ExtractValueInst::Create(IV->getAggregateOperand(), 3017 EV.getIndices()); 3018 } 3019 if (exti == exte && insi == inse) 3020 // Both iterators are at the end: Index lists are identical. Replace 3021 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 3022 // %C = extractvalue { i32, { i32 } } %B, 1, 0 3023 // with "i32 42" 3024 return replaceInstUsesWith(EV, IV->getInsertedValueOperand()); 3025 if (exti == exte) { 3026 // The extract list is a prefix of the insert list. i.e. replace 3027 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 3028 // %E = extractvalue { i32, { i32 } } %I, 1 3029 // with 3030 // %X = extractvalue { i32, { i32 } } %A, 1 3031 // %E = insertvalue { i32 } %X, i32 42, 0 3032 // by switching the order of the insert and extract (though the 3033 // insertvalue should be left in, since it may have other uses). 3034 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(), 3035 EV.getIndices()); 3036 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(), 3037 makeArrayRef(insi, inse)); 3038 } 3039 if (insi == inse) 3040 // The insert list is a prefix of the extract list 3041 // We can simply remove the common indices from the extract and make it 3042 // operate on the inserted value instead of the insertvalue result. 3043 // i.e., replace 3044 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 3045 // %E = extractvalue { i32, { i32 } } %I, 1, 0 3046 // with 3047 // %E extractvalue { i32 } { i32 42 }, 0 3048 return ExtractValueInst::Create(IV->getInsertedValueOperand(), 3049 makeArrayRef(exti, exte)); 3050 } 3051 if (WithOverflowInst *WO = dyn_cast<WithOverflowInst>(Agg)) { 3052 // We're extracting from an overflow intrinsic, see if we're the only user, 3053 // which allows us to simplify multiple result intrinsics to simpler 3054 // things that just get one value. 3055 if (WO->hasOneUse()) { 3056 // Check if we're grabbing only the result of a 'with overflow' intrinsic 3057 // and replace it with a traditional binary instruction. 3058 if (*EV.idx_begin() == 0) { 3059 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 3060 Value *LHS = WO->getLHS(), *RHS = WO->getRHS(); 3061 replaceInstUsesWith(*WO, UndefValue::get(WO->getType())); 3062 eraseInstFromFunction(*WO); 3063 return BinaryOperator::Create(BinOp, LHS, RHS); 3064 } 3065 3066 // If the normal result of the add is dead, and the RHS is a constant, 3067 // we can transform this into a range comparison. 3068 // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 3069 if (WO->getIntrinsicID() == Intrinsic::uadd_with_overflow) 3070 if (ConstantInt *CI = dyn_cast<ConstantInt>(WO->getRHS())) 3071 return new ICmpInst(ICmpInst::ICMP_UGT, WO->getLHS(), 3072 ConstantExpr::getNot(CI)); 3073 } 3074 } 3075 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) 3076 // If the (non-volatile) load only has one use, we can rewrite this to a 3077 // load from a GEP. This reduces the size of the load. If a load is used 3078 // only by extractvalue instructions then this either must have been 3079 // optimized before, or it is a struct with padding, in which case we 3080 // don't want to do the transformation as it loses padding knowledge. 3081 if (L->isSimple() && L->hasOneUse()) { 3082 // extractvalue has integer indices, getelementptr has Value*s. Convert. 3083 SmallVector<Value*, 4> Indices; 3084 // Prefix an i32 0 since we need the first element. 3085 Indices.push_back(Builder.getInt32(0)); 3086 for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end(); 3087 I != E; ++I) 3088 Indices.push_back(Builder.getInt32(*I)); 3089 3090 // We need to insert these at the location of the old load, not at that of 3091 // the extractvalue. 3092 Builder.SetInsertPoint(L); 3093 Value *GEP = Builder.CreateInBoundsGEP(L->getType(), 3094 L->getPointerOperand(), Indices); 3095 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP); 3096 // Whatever aliasing information we had for the orignal load must also 3097 // hold for the smaller load, so propagate the annotations. 3098 AAMDNodes Nodes; 3099 L->getAAMetadata(Nodes); 3100 NL->setAAMetadata(Nodes); 3101 // Returning the load directly will cause the main loop to insert it in 3102 // the wrong spot, so use replaceInstUsesWith(). 3103 return replaceInstUsesWith(EV, NL); 3104 } 3105 // We could simplify extracts from other values. Note that nested extracts may 3106 // already be simplified implicitly by the above: extract (extract (insert) ) 3107 // will be translated into extract ( insert ( extract ) ) first and then just 3108 // the value inserted, if appropriate. Similarly for extracts from single-use 3109 // loads: extract (extract (load)) will be translated to extract (load (gep)) 3110 // and if again single-use then via load (gep (gep)) to load (gep). 3111 // However, double extracts from e.g. function arguments or return values 3112 // aren't handled yet. 3113 return nullptr; 3114 } 3115 3116 /// Return 'true' if the given typeinfo will match anything. 3117 static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) { 3118 switch (Personality) { 3119 case EHPersonality::GNU_C: 3120 case EHPersonality::GNU_C_SjLj: 3121 case EHPersonality::Rust: 3122 // The GCC C EH and Rust personality only exists to support cleanups, so 3123 // it's not clear what the semantics of catch clauses are. 3124 return false; 3125 case EHPersonality::Unknown: 3126 return false; 3127 case EHPersonality::GNU_Ada: 3128 // While __gnat_all_others_value will match any Ada exception, it doesn't 3129 // match foreign exceptions (or didn't, before gcc-4.7). 3130 return false; 3131 case EHPersonality::GNU_CXX: 3132 case EHPersonality::GNU_CXX_SjLj: 3133 case EHPersonality::GNU_ObjC: 3134 case EHPersonality::MSVC_X86SEH: 3135 case EHPersonality::MSVC_TableSEH: 3136 case EHPersonality::MSVC_CXX: 3137 case EHPersonality::CoreCLR: 3138 case EHPersonality::Wasm_CXX: 3139 case EHPersonality::XL_CXX: 3140 return TypeInfo->isNullValue(); 3141 } 3142 llvm_unreachable("invalid enum"); 3143 } 3144 3145 static bool shorter_filter(const Value *LHS, const Value *RHS) { 3146 return 3147 cast<ArrayType>(LHS->getType())->getNumElements() 3148 < 3149 cast<ArrayType>(RHS->getType())->getNumElements(); 3150 } 3151 3152 Instruction *InstCombinerImpl::visitLandingPadInst(LandingPadInst &LI) { 3153 // The logic here should be correct for any real-world personality function. 3154 // However if that turns out not to be true, the offending logic can always 3155 // be conditioned on the personality function, like the catch-all logic is. 3156 EHPersonality Personality = 3157 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn()); 3158 3159 // Simplify the list of clauses, eg by removing repeated catch clauses 3160 // (these are often created by inlining). 3161 bool MakeNewInstruction = false; // If true, recreate using the following: 3162 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction; 3163 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup. 3164 3165 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already. 3166 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) { 3167 bool isLastClause = i + 1 == e; 3168 if (LI.isCatch(i)) { 3169 // A catch clause. 3170 Constant *CatchClause = LI.getClause(i); 3171 Constant *TypeInfo = CatchClause->stripPointerCasts(); 3172 3173 // If we already saw this clause, there is no point in having a second 3174 // copy of it. 3175 if (AlreadyCaught.insert(TypeInfo).second) { 3176 // This catch clause was not already seen. 3177 NewClauses.push_back(CatchClause); 3178 } else { 3179 // Repeated catch clause - drop the redundant copy. 3180 MakeNewInstruction = true; 3181 } 3182 3183 // If this is a catch-all then there is no point in keeping any following 3184 // clauses or marking the landingpad as having a cleanup. 3185 if (isCatchAll(Personality, TypeInfo)) { 3186 if (!isLastClause) 3187 MakeNewInstruction = true; 3188 CleanupFlag = false; 3189 break; 3190 } 3191 } else { 3192 // A filter clause. If any of the filter elements were already caught 3193 // then they can be dropped from the filter. It is tempting to try to 3194 // exploit the filter further by saying that any typeinfo that does not 3195 // occur in the filter can't be caught later (and thus can be dropped). 3196 // However this would be wrong, since typeinfos can match without being 3197 // equal (for example if one represents a C++ class, and the other some 3198 // class derived from it). 3199 assert(LI.isFilter(i) && "Unsupported landingpad clause!"); 3200 Constant *FilterClause = LI.getClause(i); 3201 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType()); 3202 unsigned NumTypeInfos = FilterType->getNumElements(); 3203 3204 // An empty filter catches everything, so there is no point in keeping any 3205 // following clauses or marking the landingpad as having a cleanup. By 3206 // dealing with this case here the following code is made a bit simpler. 3207 if (!NumTypeInfos) { 3208 NewClauses.push_back(FilterClause); 3209 if (!isLastClause) 3210 MakeNewInstruction = true; 3211 CleanupFlag = false; 3212 break; 3213 } 3214 3215 bool MakeNewFilter = false; // If true, make a new filter. 3216 SmallVector<Constant *, 16> NewFilterElts; // New elements. 3217 if (isa<ConstantAggregateZero>(FilterClause)) { 3218 // Not an empty filter - it contains at least one null typeinfo. 3219 assert(NumTypeInfos > 0 && "Should have handled empty filter already!"); 3220 Constant *TypeInfo = 3221 Constant::getNullValue(FilterType->getElementType()); 3222 // If this typeinfo is a catch-all then the filter can never match. 3223 if (isCatchAll(Personality, TypeInfo)) { 3224 // Throw the filter away. 3225 MakeNewInstruction = true; 3226 continue; 3227 } 3228 3229 // There is no point in having multiple copies of this typeinfo, so 3230 // discard all but the first copy if there is more than one. 3231 NewFilterElts.push_back(TypeInfo); 3232 if (NumTypeInfos > 1) 3233 MakeNewFilter = true; 3234 } else { 3235 ConstantArray *Filter = cast<ConstantArray>(FilterClause); 3236 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements. 3237 NewFilterElts.reserve(NumTypeInfos); 3238 3239 // Remove any filter elements that were already caught or that already 3240 // occurred in the filter. While there, see if any of the elements are 3241 // catch-alls. If so, the filter can be discarded. 3242 bool SawCatchAll = false; 3243 for (unsigned j = 0; j != NumTypeInfos; ++j) { 3244 Constant *Elt = Filter->getOperand(j); 3245 Constant *TypeInfo = Elt->stripPointerCasts(); 3246 if (isCatchAll(Personality, TypeInfo)) { 3247 // This element is a catch-all. Bail out, noting this fact. 3248 SawCatchAll = true; 3249 break; 3250 } 3251 3252 // Even if we've seen a type in a catch clause, we don't want to 3253 // remove it from the filter. An unexpected type handler may be 3254 // set up for a call site which throws an exception of the same 3255 // type caught. In order for the exception thrown by the unexpected 3256 // handler to propagate correctly, the filter must be correctly 3257 // described for the call site. 3258 // 3259 // Example: 3260 // 3261 // void unexpected() { throw 1;} 3262 // void foo() throw (int) { 3263 // std::set_unexpected(unexpected); 3264 // try { 3265 // throw 2.0; 3266 // } catch (int i) {} 3267 // } 3268 3269 // There is no point in having multiple copies of the same typeinfo in 3270 // a filter, so only add it if we didn't already. 3271 if (SeenInFilter.insert(TypeInfo).second) 3272 NewFilterElts.push_back(cast<Constant>(Elt)); 3273 } 3274 // A filter containing a catch-all cannot match anything by definition. 3275 if (SawCatchAll) { 3276 // Throw the filter away. 3277 MakeNewInstruction = true; 3278 continue; 3279 } 3280 3281 // If we dropped something from the filter, make a new one. 3282 if (NewFilterElts.size() < NumTypeInfos) 3283 MakeNewFilter = true; 3284 } 3285 if (MakeNewFilter) { 3286 FilterType = ArrayType::get(FilterType->getElementType(), 3287 NewFilterElts.size()); 3288 FilterClause = ConstantArray::get(FilterType, NewFilterElts); 3289 MakeNewInstruction = true; 3290 } 3291 3292 NewClauses.push_back(FilterClause); 3293 3294 // If the new filter is empty then it will catch everything so there is 3295 // no point in keeping any following clauses or marking the landingpad 3296 // as having a cleanup. The case of the original filter being empty was 3297 // already handled above. 3298 if (MakeNewFilter && !NewFilterElts.size()) { 3299 assert(MakeNewInstruction && "New filter but not a new instruction!"); 3300 CleanupFlag = false; 3301 break; 3302 } 3303 } 3304 } 3305 3306 // If several filters occur in a row then reorder them so that the shortest 3307 // filters come first (those with the smallest number of elements). This is 3308 // advantageous because shorter filters are more likely to match, speeding up 3309 // unwinding, but mostly because it increases the effectiveness of the other 3310 // filter optimizations below. 3311 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) { 3312 unsigned j; 3313 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters. 3314 for (j = i; j != e; ++j) 3315 if (!isa<ArrayType>(NewClauses[j]->getType())) 3316 break; 3317 3318 // Check whether the filters are already sorted by length. We need to know 3319 // if sorting them is actually going to do anything so that we only make a 3320 // new landingpad instruction if it does. 3321 for (unsigned k = i; k + 1 < j; ++k) 3322 if (shorter_filter(NewClauses[k+1], NewClauses[k])) { 3323 // Not sorted, so sort the filters now. Doing an unstable sort would be 3324 // correct too but reordering filters pointlessly might confuse users. 3325 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j, 3326 shorter_filter); 3327 MakeNewInstruction = true; 3328 break; 3329 } 3330 3331 // Look for the next batch of filters. 3332 i = j + 1; 3333 } 3334 3335 // If typeinfos matched if and only if equal, then the elements of a filter L 3336 // that occurs later than a filter F could be replaced by the intersection of 3337 // the elements of F and L. In reality two typeinfos can match without being 3338 // equal (for example if one represents a C++ class, and the other some class 3339 // derived from it) so it would be wrong to perform this transform in general. 3340 // However the transform is correct and useful if F is a subset of L. In that 3341 // case L can be replaced by F, and thus removed altogether since repeating a 3342 // filter is pointless. So here we look at all pairs of filters F and L where 3343 // L follows F in the list of clauses, and remove L if every element of F is 3344 // an element of L. This can occur when inlining C++ functions with exception 3345 // specifications. 3346 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) { 3347 // Examine each filter in turn. 3348 Value *Filter = NewClauses[i]; 3349 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType()); 3350 if (!FTy) 3351 // Not a filter - skip it. 3352 continue; 3353 unsigned FElts = FTy->getNumElements(); 3354 // Examine each filter following this one. Doing this backwards means that 3355 // we don't have to worry about filters disappearing under us when removed. 3356 for (unsigned j = NewClauses.size() - 1; j != i; --j) { 3357 Value *LFilter = NewClauses[j]; 3358 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType()); 3359 if (!LTy) 3360 // Not a filter - skip it. 3361 continue; 3362 // If Filter is a subset of LFilter, i.e. every element of Filter is also 3363 // an element of LFilter, then discard LFilter. 3364 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j; 3365 // If Filter is empty then it is a subset of LFilter. 3366 if (!FElts) { 3367 // Discard LFilter. 3368 NewClauses.erase(J); 3369 MakeNewInstruction = true; 3370 // Move on to the next filter. 3371 continue; 3372 } 3373 unsigned LElts = LTy->getNumElements(); 3374 // If Filter is longer than LFilter then it cannot be a subset of it. 3375 if (FElts > LElts) 3376 // Move on to the next filter. 3377 continue; 3378 // At this point we know that LFilter has at least one element. 3379 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros. 3380 // Filter is a subset of LFilter iff Filter contains only zeros (as we 3381 // already know that Filter is not longer than LFilter). 3382 if (isa<ConstantAggregateZero>(Filter)) { 3383 assert(FElts <= LElts && "Should have handled this case earlier!"); 3384 // Discard LFilter. 3385 NewClauses.erase(J); 3386 MakeNewInstruction = true; 3387 } 3388 // Move on to the next filter. 3389 continue; 3390 } 3391 ConstantArray *LArray = cast<ConstantArray>(LFilter); 3392 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros. 3393 // Since Filter is non-empty and contains only zeros, it is a subset of 3394 // LFilter iff LFilter contains a zero. 3395 assert(FElts > 0 && "Should have eliminated the empty filter earlier!"); 3396 for (unsigned l = 0; l != LElts; ++l) 3397 if (LArray->getOperand(l)->isNullValue()) { 3398 // LFilter contains a zero - discard it. 3399 NewClauses.erase(J); 3400 MakeNewInstruction = true; 3401 break; 3402 } 3403 // Move on to the next filter. 3404 continue; 3405 } 3406 // At this point we know that both filters are ConstantArrays. Loop over 3407 // operands to see whether every element of Filter is also an element of 3408 // LFilter. Since filters tend to be short this is probably faster than 3409 // using a method that scales nicely. 3410 ConstantArray *FArray = cast<ConstantArray>(Filter); 3411 bool AllFound = true; 3412 for (unsigned f = 0; f != FElts; ++f) { 3413 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts(); 3414 AllFound = false; 3415 for (unsigned l = 0; l != LElts; ++l) { 3416 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts(); 3417 if (LTypeInfo == FTypeInfo) { 3418 AllFound = true; 3419 break; 3420 } 3421 } 3422 if (!AllFound) 3423 break; 3424 } 3425 if (AllFound) { 3426 // Discard LFilter. 3427 NewClauses.erase(J); 3428 MakeNewInstruction = true; 3429 } 3430 // Move on to the next filter. 3431 } 3432 } 3433 3434 // If we changed any of the clauses, replace the old landingpad instruction 3435 // with a new one. 3436 if (MakeNewInstruction) { 3437 LandingPadInst *NLI = LandingPadInst::Create(LI.getType(), 3438 NewClauses.size()); 3439 for (unsigned i = 0, e = NewClauses.size(); i != e; ++i) 3440 NLI->addClause(NewClauses[i]); 3441 // A landing pad with no clauses must have the cleanup flag set. It is 3442 // theoretically possible, though highly unlikely, that we eliminated all 3443 // clauses. If so, force the cleanup flag to true. 3444 if (NewClauses.empty()) 3445 CleanupFlag = true; 3446 NLI->setCleanup(CleanupFlag); 3447 return NLI; 3448 } 3449 3450 // Even if none of the clauses changed, we may nonetheless have understood 3451 // that the cleanup flag is pointless. Clear it if so. 3452 if (LI.isCleanup() != CleanupFlag) { 3453 assert(!CleanupFlag && "Adding a cleanup, not removing one?!"); 3454 LI.setCleanup(CleanupFlag); 3455 return &LI; 3456 } 3457 3458 return nullptr; 3459 } 3460 3461 Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) { 3462 Value *Op0 = I.getOperand(0); 3463 3464 if (Value *V = SimplifyFreezeInst(Op0, SQ.getWithInstruction(&I))) 3465 return replaceInstUsesWith(I, V); 3466 3467 // freeze (phi const, x) --> phi const, (freeze x) 3468 if (auto *PN = dyn_cast<PHINode>(Op0)) { 3469 if (Instruction *NV = foldOpIntoPhi(I, PN)) 3470 return NV; 3471 } 3472 3473 if (match(Op0, m_Undef())) { 3474 // If I is freeze(undef), see its uses and fold it to the best constant. 3475 // - or: pick -1 3476 // - select's condition: pick the value that leads to choosing a constant 3477 // - other ops: pick 0 3478 Constant *BestValue = nullptr; 3479 Constant *NullValue = Constant::getNullValue(I.getType()); 3480 for (const auto *U : I.users()) { 3481 Constant *C = NullValue; 3482 3483 if (match(U, m_Or(m_Value(), m_Value()))) 3484 C = Constant::getAllOnesValue(I.getType()); 3485 else if (const auto *SI = dyn_cast<SelectInst>(U)) { 3486 if (SI->getCondition() == &I) { 3487 APInt CondVal(1, isa<Constant>(SI->getFalseValue()) ? 0 : 1); 3488 C = Constant::getIntegerValue(I.getType(), CondVal); 3489 } 3490 } 3491 3492 if (!BestValue) 3493 BestValue = C; 3494 else if (BestValue != C) 3495 BestValue = NullValue; 3496 } 3497 3498 return replaceInstUsesWith(I, BestValue); 3499 } 3500 3501 return nullptr; 3502 } 3503 3504 /// Try to move the specified instruction from its current block into the 3505 /// beginning of DestBlock, which can only happen if it's safe to move the 3506 /// instruction past all of the instructions between it and the end of its 3507 /// block. 3508 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { 3509 assert(I->getSingleUndroppableUse() && "Invariants didn't hold!"); 3510 BasicBlock *SrcBlock = I->getParent(); 3511 3512 // Cannot move control-flow-involving, volatile loads, vaarg, etc. 3513 if (isa<PHINode>(I) || I->isEHPad() || I->mayHaveSideEffects() || 3514 I->isTerminator()) 3515 return false; 3516 3517 // Do not sink static or dynamic alloca instructions. Static allocas must 3518 // remain in the entry block, and dynamic allocas must not be sunk in between 3519 // a stacksave / stackrestore pair, which would incorrectly shorten its 3520 // lifetime. 3521 if (isa<AllocaInst>(I)) 3522 return false; 3523 3524 // Do not sink into catchswitch blocks. 3525 if (isa<CatchSwitchInst>(DestBlock->getTerminator())) 3526 return false; 3527 3528 // Do not sink convergent call instructions. 3529 if (auto *CI = dyn_cast<CallInst>(I)) { 3530 if (CI->isConvergent()) 3531 return false; 3532 } 3533 // We can only sink load instructions if there is nothing between the load and 3534 // the end of block that could change the value. 3535 if (I->mayReadFromMemory()) { 3536 // We don't want to do any sophisticated alias analysis, so we only check 3537 // the instructions after I in I's parent block if we try to sink to its 3538 // successor block. 3539 if (DestBlock->getUniquePredecessor() != I->getParent()) 3540 return false; 3541 for (BasicBlock::iterator Scan = I->getIterator(), 3542 E = I->getParent()->end(); 3543 Scan != E; ++Scan) 3544 if (Scan->mayWriteToMemory()) 3545 return false; 3546 } 3547 3548 I->dropDroppableUses([DestBlock](const Use *U) { 3549 if (auto *I = dyn_cast<Instruction>(U->getUser())) 3550 return I->getParent() != DestBlock; 3551 return true; 3552 }); 3553 /// FIXME: We could remove droppable uses that are not dominated by 3554 /// the new position. 3555 3556 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt(); 3557 I->moveBefore(&*InsertPos); 3558 ++NumSunkInst; 3559 3560 // Also sink all related debug uses from the source basic block. Otherwise we 3561 // get debug use before the def. Attempt to salvage debug uses first, to 3562 // maximise the range variables have location for. If we cannot salvage, then 3563 // mark the location undef: we know it was supposed to receive a new location 3564 // here, but that computation has been sunk. 3565 SmallVector<DbgVariableIntrinsic *, 2> DbgUsers; 3566 findDbgUsers(DbgUsers, I); 3567 3568 // Update the arguments of a dbg.declare instruction, so that it 3569 // does not point into a sunk instruction. 3570 auto updateDbgDeclare = [&I](DbgVariableIntrinsic *DII) { 3571 if (!isa<DbgDeclareInst>(DII)) 3572 return false; 3573 3574 if (isa<CastInst>(I)) 3575 DII->setOperand( 3576 0, MetadataAsValue::get(I->getContext(), 3577 ValueAsMetadata::get(I->getOperand(0)))); 3578 return true; 3579 }; 3580 3581 SmallVector<DbgVariableIntrinsic *, 2> DIIClones; 3582 for (auto User : DbgUsers) { 3583 // A dbg.declare instruction should not be cloned, since there can only be 3584 // one per variable fragment. It should be left in the original place 3585 // because the sunk instruction is not an alloca (otherwise we could not be 3586 // here). 3587 if (User->getParent() != SrcBlock || updateDbgDeclare(User)) 3588 continue; 3589 3590 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone())); 3591 LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n'); 3592 } 3593 3594 // Perform salvaging without the clones, then sink the clones. 3595 if (!DIIClones.empty()) { 3596 salvageDebugInfoForDbgValues(*I, DbgUsers); 3597 for (auto &DIIClone : DIIClones) { 3598 DIIClone->insertBefore(&*InsertPos); 3599 LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n'); 3600 } 3601 } 3602 3603 return true; 3604 } 3605 3606 bool InstCombinerImpl::run() { 3607 while (!Worklist.isEmpty()) { 3608 // Walk deferred instructions in reverse order, and push them to the 3609 // worklist, which means they'll end up popped from the worklist in-order. 3610 while (Instruction *I = Worklist.popDeferred()) { 3611 // Check to see if we can DCE the instruction. We do this already here to 3612 // reduce the number of uses and thus allow other folds to trigger. 3613 // Note that eraseInstFromFunction() may push additional instructions on 3614 // the deferred worklist, so this will DCE whole instruction chains. 3615 if (isInstructionTriviallyDead(I, &TLI)) { 3616 eraseInstFromFunction(*I); 3617 ++NumDeadInst; 3618 continue; 3619 } 3620 3621 Worklist.push(I); 3622 } 3623 3624 Instruction *I = Worklist.removeOne(); 3625 if (I == nullptr) continue; // skip null values. 3626 3627 // Check to see if we can DCE the instruction. 3628 if (isInstructionTriviallyDead(I, &TLI)) { 3629 eraseInstFromFunction(*I); 3630 ++NumDeadInst; 3631 continue; 3632 } 3633 3634 if (!DebugCounter::shouldExecute(VisitCounter)) 3635 continue; 3636 3637 // Instruction isn't dead, see if we can constant propagate it. 3638 if (!I->use_empty() && 3639 (I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) { 3640 if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) { 3641 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I 3642 << '\n'); 3643 3644 // Add operands to the worklist. 3645 replaceInstUsesWith(*I, C); 3646 ++NumConstProp; 3647 if (isInstructionTriviallyDead(I, &TLI)) 3648 eraseInstFromFunction(*I); 3649 MadeIRChange = true; 3650 continue; 3651 } 3652 } 3653 3654 // See if we can trivially sink this instruction to its user if we can 3655 // prove that the successor is not executed more frequently than our block. 3656 if (EnableCodeSinking) 3657 if (Use *SingleUse = I->getSingleUndroppableUse()) { 3658 BasicBlock *BB = I->getParent(); 3659 Instruction *UserInst = cast<Instruction>(SingleUse->getUser()); 3660 BasicBlock *UserParent; 3661 3662 // Get the block the use occurs in. 3663 if (PHINode *PN = dyn_cast<PHINode>(UserInst)) 3664 UserParent = PN->getIncomingBlock(*SingleUse); 3665 else 3666 UserParent = UserInst->getParent(); 3667 3668 // Try sinking to another block. If that block is unreachable, then do 3669 // not bother. SimplifyCFG should handle it. 3670 if (UserParent != BB && DT.isReachableFromEntry(UserParent)) { 3671 // See if the user is one of our successors that has only one 3672 // predecessor, so that we don't have to split the critical edge. 3673 bool ShouldSink = UserParent->getUniquePredecessor() == BB; 3674 // Another option where we can sink is a block that ends with a 3675 // terminator that does not pass control to other block (such as 3676 // return or unreachable). In this case: 3677 // - I dominates the User (by SSA form); 3678 // - the User will be executed at most once. 3679 // So sinking I down to User is always profitable or neutral. 3680 if (!ShouldSink) { 3681 auto *Term = UserParent->getTerminator(); 3682 ShouldSink = isa<ReturnInst>(Term) || isa<UnreachableInst>(Term); 3683 } 3684 if (ShouldSink) { 3685 assert(DT.dominates(BB, UserParent) && 3686 "Dominance relation broken?"); 3687 // Okay, the CFG is simple enough, try to sink this instruction. 3688 if (TryToSinkInstruction(I, UserParent)) { 3689 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n'); 3690 MadeIRChange = true; 3691 // We'll add uses of the sunk instruction below, but since sinking 3692 // can expose opportunities for it's *operands* add them to the 3693 // worklist 3694 for (Use &U : I->operands()) 3695 if (Instruction *OpI = dyn_cast<Instruction>(U.get())) 3696 Worklist.push(OpI); 3697 } 3698 } 3699 } 3700 } 3701 3702 // Now that we have an instruction, try combining it to simplify it. 3703 Builder.SetInsertPoint(I); 3704 Builder.CollectMetadataToCopy( 3705 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation}); 3706 3707 #ifndef NDEBUG 3708 std::string OrigI; 3709 #endif 3710 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); 3711 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n'); 3712 3713 if (Instruction *Result = visit(*I)) { 3714 ++NumCombined; 3715 // Should we replace the old instruction with a new one? 3716 if (Result != I) { 3717 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n' 3718 << " New = " << *Result << '\n'); 3719 3720 Result->copyMetadata(*I, 3721 {LLVMContext::MD_dbg, LLVMContext::MD_annotation}); 3722 // Everything uses the new instruction now. 3723 I->replaceAllUsesWith(Result); 3724 3725 // Move the name to the new instruction first. 3726 Result->takeName(I); 3727 3728 // Insert the new instruction into the basic block... 3729 BasicBlock *InstParent = I->getParent(); 3730 BasicBlock::iterator InsertPos = I->getIterator(); 3731 3732 // Are we replace a PHI with something that isn't a PHI, or vice versa? 3733 if (isa<PHINode>(Result) != isa<PHINode>(I)) { 3734 // We need to fix up the insertion point. 3735 if (isa<PHINode>(I)) // PHI -> Non-PHI 3736 InsertPos = InstParent->getFirstInsertionPt(); 3737 else // Non-PHI -> PHI 3738 InsertPos = InstParent->getFirstNonPHI()->getIterator(); 3739 } 3740 3741 InstParent->getInstList().insert(InsertPos, Result); 3742 3743 // Push the new instruction and any users onto the worklist. 3744 Worklist.pushUsersToWorkList(*Result); 3745 Worklist.push(Result); 3746 3747 eraseInstFromFunction(*I); 3748 } else { 3749 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n' 3750 << " New = " << *I << '\n'); 3751 3752 // If the instruction was modified, it's possible that it is now dead. 3753 // if so, remove it. 3754 if (isInstructionTriviallyDead(I, &TLI)) { 3755 eraseInstFromFunction(*I); 3756 } else { 3757 Worklist.pushUsersToWorkList(*I); 3758 Worklist.push(I); 3759 } 3760 } 3761 MadeIRChange = true; 3762 } 3763 } 3764 3765 Worklist.zap(); 3766 return MadeIRChange; 3767 } 3768 3769 // Track the scopes used by !alias.scope and !noalias. In a function, a 3770 // @llvm.experimental.noalias.scope.decl is only useful if that scope is used 3771 // by both sets. If not, the declaration of the scope can be safely omitted. 3772 // The MDNode of the scope can be omitted as well for the instructions that are 3773 // part of this function. We do not do that at this point, as this might become 3774 // too time consuming to do. 3775 class AliasScopeTracker { 3776 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists; 3777 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists; 3778 3779 public: 3780 void analyse(Instruction *I) { 3781 // This seems to be faster than checking 'mayReadOrWriteMemory()'. 3782 if (!I->hasMetadataOtherThanDebugLoc()) 3783 return; 3784 3785 auto Track = [](Metadata *ScopeList, auto &Container) { 3786 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList); 3787 if (!MDScopeList || !Container.insert(MDScopeList).second) 3788 return; 3789 for (auto &MDOperand : MDScopeList->operands()) 3790 if (auto *MDScope = dyn_cast<MDNode>(MDOperand)) 3791 Container.insert(MDScope); 3792 }; 3793 3794 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists); 3795 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists); 3796 } 3797 3798 bool isNoAliasScopeDeclDead(Instruction *Inst) { 3799 NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst); 3800 if (!Decl) 3801 return false; 3802 3803 assert(Decl->use_empty() && 3804 "llvm.experimental.noalias.scope.decl in use ?"); 3805 const MDNode *MDSL = Decl->getScopeList(); 3806 assert(MDSL->getNumOperands() == 1 && 3807 "llvm.experimental.noalias.scope should refer to a single scope"); 3808 auto &MDOperand = MDSL->getOperand(0); 3809 if (auto *MD = dyn_cast<MDNode>(MDOperand)) 3810 return !UsedAliasScopesAndLists.contains(MD) || 3811 !UsedNoAliasScopesAndLists.contains(MD); 3812 3813 // Not an MDNode ? throw away. 3814 return true; 3815 } 3816 }; 3817 3818 /// Populate the IC worklist from a function, by walking it in depth-first 3819 /// order and adding all reachable code to the worklist. 3820 /// 3821 /// This has a couple of tricks to make the code faster and more powerful. In 3822 /// particular, we constant fold and DCE instructions as we go, to avoid adding 3823 /// them to the worklist (this significantly speeds up instcombine on code where 3824 /// many instructions are dead or constant). Additionally, if we find a branch 3825 /// whose condition is a known constant, we only visit the reachable successors. 3826 static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL, 3827 const TargetLibraryInfo *TLI, 3828 InstCombineWorklist &ICWorklist) { 3829 bool MadeIRChange = false; 3830 SmallPtrSet<BasicBlock *, 32> Visited; 3831 SmallVector<BasicBlock*, 256> Worklist; 3832 Worklist.push_back(&F.front()); 3833 3834 SmallVector<Instruction*, 128> InstrsForInstCombineWorklist; 3835 DenseMap<Constant *, Constant *> FoldedConstants; 3836 AliasScopeTracker SeenAliasScopes; 3837 3838 do { 3839 BasicBlock *BB = Worklist.pop_back_val(); 3840 3841 // We have now visited this block! If we've already been here, ignore it. 3842 if (!Visited.insert(BB).second) 3843 continue; 3844 3845 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 3846 Instruction *Inst = &*BBI++; 3847 3848 // ConstantProp instruction if trivially constant. 3849 if (!Inst->use_empty() && 3850 (Inst->getNumOperands() == 0 || isa<Constant>(Inst->getOperand(0)))) 3851 if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) { 3852 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *Inst 3853 << '\n'); 3854 Inst->replaceAllUsesWith(C); 3855 ++NumConstProp; 3856 if (isInstructionTriviallyDead(Inst, TLI)) 3857 Inst->eraseFromParent(); 3858 MadeIRChange = true; 3859 continue; 3860 } 3861 3862 // See if we can constant fold its operands. 3863 for (Use &U : Inst->operands()) { 3864 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U)) 3865 continue; 3866 3867 auto *C = cast<Constant>(U); 3868 Constant *&FoldRes = FoldedConstants[C]; 3869 if (!FoldRes) 3870 FoldRes = ConstantFoldConstant(C, DL, TLI); 3871 3872 if (FoldRes != C) { 3873 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << *Inst 3874 << "\n Old = " << *C 3875 << "\n New = " << *FoldRes << '\n'); 3876 U = FoldRes; 3877 MadeIRChange = true; 3878 } 3879 } 3880 3881 // Skip processing debug and pseudo intrinsics in InstCombine. Processing 3882 // these call instructions consumes non-trivial amount of time and 3883 // provides no value for the optimization. 3884 if (!Inst->isDebugOrPseudoInst()) { 3885 InstrsForInstCombineWorklist.push_back(Inst); 3886 SeenAliasScopes.analyse(Inst); 3887 } 3888 } 3889 3890 // Recursively visit successors. If this is a branch or switch on a 3891 // constant, only visit the reachable successor. 3892 Instruction *TI = BB->getTerminator(); 3893 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 3894 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) { 3895 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue(); 3896 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal); 3897 Worklist.push_back(ReachableBB); 3898 continue; 3899 } 3900 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 3901 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) { 3902 Worklist.push_back(SI->findCaseValue(Cond)->getCaseSuccessor()); 3903 continue; 3904 } 3905 } 3906 3907 append_range(Worklist, successors(TI)); 3908 } while (!Worklist.empty()); 3909 3910 // Remove instructions inside unreachable blocks. This prevents the 3911 // instcombine code from having to deal with some bad special cases, and 3912 // reduces use counts of instructions. 3913 for (BasicBlock &BB : F) { 3914 if (Visited.count(&BB)) 3915 continue; 3916 3917 unsigned NumDeadInstInBB; 3918 unsigned NumDeadDbgInstInBB; 3919 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) = 3920 removeAllNonTerminatorAndEHPadInstructions(&BB); 3921 3922 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0; 3923 NumDeadInst += NumDeadInstInBB; 3924 } 3925 3926 // Once we've found all of the instructions to add to instcombine's worklist, 3927 // add them in reverse order. This way instcombine will visit from the top 3928 // of the function down. This jives well with the way that it adds all uses 3929 // of instructions to the worklist after doing a transformation, thus avoiding 3930 // some N^2 behavior in pathological cases. 3931 ICWorklist.reserve(InstrsForInstCombineWorklist.size()); 3932 for (Instruction *Inst : reverse(InstrsForInstCombineWorklist)) { 3933 // DCE instruction if trivially dead. As we iterate in reverse program 3934 // order here, we will clean up whole chains of dead instructions. 3935 if (isInstructionTriviallyDead(Inst, TLI) || 3936 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) { 3937 ++NumDeadInst; 3938 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n'); 3939 salvageDebugInfo(*Inst); 3940 Inst->eraseFromParent(); 3941 MadeIRChange = true; 3942 continue; 3943 } 3944 3945 ICWorklist.push(Inst); 3946 } 3947 3948 return MadeIRChange; 3949 } 3950 3951 static bool combineInstructionsOverFunction( 3952 Function &F, InstCombineWorklist &Worklist, AliasAnalysis *AA, 3953 AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, 3954 DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, 3955 ProfileSummaryInfo *PSI, unsigned MaxIterations, LoopInfo *LI) { 3956 auto &DL = F.getParent()->getDataLayout(); 3957 MaxIterations = std::min(MaxIterations, LimitMaxIterations.getValue()); 3958 3959 /// Builder - This is an IRBuilder that automatically inserts new 3960 /// instructions into the worklist when they are created. 3961 IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder( 3962 F.getContext(), TargetFolder(DL), 3963 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) { 3964 Worklist.add(I); 3965 if (match(I, m_Intrinsic<Intrinsic::assume>())) 3966 AC.registerAssumption(cast<CallInst>(I)); 3967 })); 3968 3969 // Lower dbg.declare intrinsics otherwise their value may be clobbered 3970 // by instcombiner. 3971 bool MadeIRChange = false; 3972 if (ShouldLowerDbgDeclare) 3973 MadeIRChange = LowerDbgDeclare(F); 3974 3975 // Iterate while there is work to do. 3976 unsigned Iteration = 0; 3977 while (true) { 3978 ++NumWorklistIterations; 3979 ++Iteration; 3980 3981 if (Iteration > InfiniteLoopDetectionThreshold) { 3982 report_fatal_error( 3983 "Instruction Combining seems stuck in an infinite loop after " + 3984 Twine(InfiniteLoopDetectionThreshold) + " iterations."); 3985 } 3986 3987 if (Iteration > MaxIterations) { 3988 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << MaxIterations 3989 << " on " << F.getName() 3990 << " reached; stopping before reaching a fixpoint\n"); 3991 break; 3992 } 3993 3994 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " 3995 << F.getName() << "\n"); 3996 3997 MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist); 3998 3999 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT, 4000 ORE, BFI, PSI, DL, LI); 4001 IC.MaxArraySizeForCombine = MaxArraySize; 4002 4003 if (!IC.run()) 4004 break; 4005 4006 MadeIRChange = true; 4007 } 4008 4009 return MadeIRChange; 4010 } 4011 4012 InstCombinePass::InstCombinePass() : MaxIterations(LimitMaxIterations) {} 4013 4014 InstCombinePass::InstCombinePass(unsigned MaxIterations) 4015 : MaxIterations(MaxIterations) {} 4016 4017 PreservedAnalyses InstCombinePass::run(Function &F, 4018 FunctionAnalysisManager &AM) { 4019 auto &AC = AM.getResult<AssumptionAnalysis>(F); 4020 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 4021 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 4022 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 4023 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 4024 4025 auto *LI = AM.getCachedResult<LoopAnalysis>(F); 4026 4027 auto *AA = &AM.getResult<AAManager>(F); 4028 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 4029 ProfileSummaryInfo *PSI = 4030 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 4031 auto *BFI = (PSI && PSI->hasProfileSummary()) ? 4032 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr; 4033 4034 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE, 4035 BFI, PSI, MaxIterations, LI)) 4036 // No changes, all analyses are preserved. 4037 return PreservedAnalyses::all(); 4038 4039 // Mark all the analyses that instcombine updates as preserved. 4040 PreservedAnalyses PA; 4041 PA.preserveSet<CFGAnalyses>(); 4042 PA.preserve<AAManager>(); 4043 PA.preserve<BasicAA>(); 4044 PA.preserve<GlobalsAA>(); 4045 return PA; 4046 } 4047 4048 void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const { 4049 AU.setPreservesCFG(); 4050 AU.addRequired<AAResultsWrapperPass>(); 4051 AU.addRequired<AssumptionCacheTracker>(); 4052 AU.addRequired<TargetLibraryInfoWrapperPass>(); 4053 AU.addRequired<TargetTransformInfoWrapperPass>(); 4054 AU.addRequired<DominatorTreeWrapperPass>(); 4055 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 4056 AU.addPreserved<DominatorTreeWrapperPass>(); 4057 AU.addPreserved<AAResultsWrapperPass>(); 4058 AU.addPreserved<BasicAAWrapperPass>(); 4059 AU.addPreserved<GlobalsAAWrapperPass>(); 4060 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 4061 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU); 4062 } 4063 4064 bool InstructionCombiningPass::runOnFunction(Function &F) { 4065 if (skipFunction(F)) 4066 return false; 4067 4068 // Required analyses. 4069 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 4070 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 4071 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 4072 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 4073 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 4074 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 4075 4076 // Optional analyses. 4077 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 4078 auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; 4079 ProfileSummaryInfo *PSI = 4080 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 4081 BlockFrequencyInfo *BFI = 4082 (PSI && PSI->hasProfileSummary()) ? 4083 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() : 4084 nullptr; 4085 4086 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE, 4087 BFI, PSI, MaxIterations, LI); 4088 } 4089 4090 char InstructionCombiningPass::ID = 0; 4091 4092 InstructionCombiningPass::InstructionCombiningPass() 4093 : FunctionPass(ID), MaxIterations(InstCombineDefaultMaxIterations) { 4094 initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry()); 4095 } 4096 4097 InstructionCombiningPass::InstructionCombiningPass(unsigned MaxIterations) 4098 : FunctionPass(ID), MaxIterations(MaxIterations) { 4099 initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry()); 4100 } 4101 4102 INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine", 4103 "Combine redundant instructions", false, false) 4104 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4105 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 4106 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 4107 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 4108 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 4109 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 4110 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 4111 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass) 4112 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 4113 INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine", 4114 "Combine redundant instructions", false, false) 4115 4116 // Initialization Routines 4117 void llvm::initializeInstCombine(PassRegistry &Registry) { 4118 initializeInstructionCombiningPassPass(Registry); 4119 } 4120 4121 void LLVMInitializeInstCombine(LLVMPassRegistryRef R) { 4122 initializeInstructionCombiningPassPass(*unwrap(R)); 4123 } 4124 4125 FunctionPass *llvm::createInstructionCombiningPass() { 4126 return new InstructionCombiningPass(); 4127 } 4128 4129 FunctionPass *llvm::createInstructionCombiningPass(unsigned MaxIterations) { 4130 return new InstructionCombiningPass(MaxIterations); 4131 } 4132 4133 void LLVMAddInstructionCombiningPass(LLVMPassManagerRef PM) { 4134 unwrap(PM)->add(createInstructionCombiningPass()); 4135 } 4136