1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains routines that help analyze properties that chains of 10 // computations have. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/ValueTracking.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/ScopeExit.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/StringRef.h" 24 #include "llvm/ADT/iterator_range.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/Analysis/AssumeBundleQueries.h" 27 #include "llvm/Analysis/AssumptionCache.h" 28 #include "llvm/Analysis/ConstantFolding.h" 29 #include "llvm/Analysis/GuardUtils.h" 30 #include "llvm/Analysis/InstructionSimplify.h" 31 #include "llvm/Analysis/Loads.h" 32 #include "llvm/Analysis/LoopInfo.h" 33 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 34 #include "llvm/Analysis/TargetLibraryInfo.h" 35 #include "llvm/Analysis/VectorUtils.h" 36 #include "llvm/IR/Argument.h" 37 #include "llvm/IR/Attributes.h" 38 #include "llvm/IR/BasicBlock.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/ConstantRange.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/DiagnosticInfo.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/EHPersonalities.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/GetElementPtrTypeIterator.h" 48 #include "llvm/IR/GlobalAlias.h" 49 #include "llvm/IR/GlobalValue.h" 50 #include "llvm/IR/GlobalVariable.h" 51 #include "llvm/IR/InstrTypes.h" 52 #include "llvm/IR/Instruction.h" 53 #include "llvm/IR/Instructions.h" 54 #include "llvm/IR/IntrinsicInst.h" 55 #include "llvm/IR/Intrinsics.h" 56 #include "llvm/IR/IntrinsicsAArch64.h" 57 #include "llvm/IR/IntrinsicsAMDGPU.h" 58 #include "llvm/IR/IntrinsicsRISCV.h" 59 #include "llvm/IR/IntrinsicsX86.h" 60 #include "llvm/IR/LLVMContext.h" 61 #include "llvm/IR/Metadata.h" 62 #include "llvm/IR/Module.h" 63 #include "llvm/IR/Operator.h" 64 #include "llvm/IR/PatternMatch.h" 65 #include "llvm/IR/Type.h" 66 #include "llvm/IR/User.h" 67 #include "llvm/IR/Value.h" 68 #include "llvm/Support/Casting.h" 69 #include "llvm/Support/CommandLine.h" 70 #include "llvm/Support/Compiler.h" 71 #include "llvm/Support/ErrorHandling.h" 72 #include "llvm/Support/KnownBits.h" 73 #include "llvm/Support/MathExtras.h" 74 #include <algorithm> 75 #include <cassert> 76 #include <cstdint> 77 #include <optional> 78 #include <utility> 79 80 using namespace llvm; 81 using namespace llvm::PatternMatch; 82 83 // Controls the number of uses of the value searched for possible 84 // dominating comparisons. 85 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 86 cl::Hidden, cl::init(20)); 87 88 89 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 90 /// returns the element type's bitwidth. 91 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 92 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 93 return BitWidth; 94 95 return DL.getPointerTypeSizeInBits(Ty); 96 } 97 98 // Given the provided Value and, potentially, a context instruction, return 99 // the preferred context instruction (if any). 100 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 101 // If we've been provided with a context instruction, then use that (provided 102 // it has been inserted). 103 if (CxtI && CxtI->getParent()) 104 return CxtI; 105 106 // If the value is really an already-inserted instruction, then use that. 107 CxtI = dyn_cast<Instruction>(V); 108 if (CxtI && CxtI->getParent()) 109 return CxtI; 110 111 return nullptr; 112 } 113 114 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) { 115 // If we've been provided with a context instruction, then use that (provided 116 // it has been inserted). 117 if (CxtI && CxtI->getParent()) 118 return CxtI; 119 120 // If the value is really an already-inserted instruction, then use that. 121 CxtI = dyn_cast<Instruction>(V1); 122 if (CxtI && CxtI->getParent()) 123 return CxtI; 124 125 CxtI = dyn_cast<Instruction>(V2); 126 if (CxtI && CxtI->getParent()) 127 return CxtI; 128 129 return nullptr; 130 } 131 132 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, 133 const APInt &DemandedElts, 134 APInt &DemandedLHS, APInt &DemandedRHS) { 135 if (isa<ScalableVectorType>(Shuf->getType())) { 136 assert(DemandedElts == APInt(1,1)); 137 DemandedLHS = DemandedRHS = DemandedElts; 138 return true; 139 } 140 141 int NumElts = 142 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements(); 143 return llvm::getShuffleDemandedElts(NumElts, Shuf->getShuffleMask(), 144 DemandedElts, DemandedLHS, DemandedRHS); 145 } 146 147 static void computeKnownBits(const Value *V, const APInt &DemandedElts, 148 KnownBits &Known, unsigned Depth, 149 const SimplifyQuery &Q); 150 151 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 152 const SimplifyQuery &Q) { 153 // Since the number of lanes in a scalable vector is unknown at compile time, 154 // we track one bit which is implicitly broadcast to all lanes. This means 155 // that all lanes in a scalable vector are considered demanded. 156 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 157 APInt DemandedElts = 158 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); 159 computeKnownBits(V, DemandedElts, Known, Depth, Q); 160 } 161 162 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 163 const DataLayout &DL, unsigned Depth, 164 AssumptionCache *AC, const Instruction *CxtI, 165 const DominatorTree *DT, bool UseInstrInfo) { 166 ::computeKnownBits(V, Known, Depth, 167 SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, 168 safeCxtI(V, CxtI), UseInstrInfo)); 169 } 170 171 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, 172 KnownBits &Known, const DataLayout &DL, 173 unsigned Depth, AssumptionCache *AC, 174 const Instruction *CxtI, const DominatorTree *DT, 175 bool UseInstrInfo) { 176 ::computeKnownBits(V, DemandedElts, Known, Depth, 177 SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, 178 safeCxtI(V, CxtI), UseInstrInfo)); 179 } 180 181 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, 182 unsigned Depth, const SimplifyQuery &Q); 183 184 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 185 const SimplifyQuery &Q); 186 187 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 188 unsigned Depth, AssumptionCache *AC, 189 const Instruction *CxtI, 190 const DominatorTree *DT, bool UseInstrInfo) { 191 return ::computeKnownBits(V, Depth, 192 SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, 193 safeCxtI(V, CxtI), UseInstrInfo)); 194 } 195 196 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, 197 const DataLayout &DL, unsigned Depth, 198 AssumptionCache *AC, const Instruction *CxtI, 199 const DominatorTree *DT, bool UseInstrInfo) { 200 return ::computeKnownBits(V, DemandedElts, Depth, 201 SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, 202 safeCxtI(V, CxtI), UseInstrInfo)); 203 } 204 205 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 206 const DataLayout &DL, AssumptionCache *AC, 207 const Instruction *CxtI, const DominatorTree *DT, 208 bool UseInstrInfo) { 209 assert(LHS->getType() == RHS->getType() && 210 "LHS and RHS should have the same type"); 211 assert(LHS->getType()->isIntOrIntVectorTy() && 212 "LHS and RHS should be integers"); 213 // Look for an inverted mask: (X & ~M) op (Y & M). 214 { 215 Value *M; 216 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 217 match(RHS, m_c_And(m_Specific(M), m_Value()))) 218 return true; 219 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 220 match(LHS, m_c_And(m_Specific(M), m_Value()))) 221 return true; 222 } 223 224 // X op (Y & ~X) 225 if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) || 226 match(LHS, m_c_And(m_Not(m_Specific(RHS)), m_Value()))) 227 return true; 228 229 // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern 230 // for constant Y. 231 Value *Y; 232 if (match(RHS, 233 m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) || 234 match(LHS, m_c_Xor(m_c_And(m_Specific(RHS), m_Value(Y)), m_Deferred(Y)))) 235 return true; 236 237 // Peek through extends to find a 'not' of the other side: 238 // (ext Y) op ext(~Y) 239 // (ext ~Y) op ext(Y) 240 if ((match(LHS, m_ZExtOrSExt(m_Value(Y))) && 241 match(RHS, m_ZExtOrSExt(m_Not(m_Specific(Y))))) || 242 (match(RHS, m_ZExtOrSExt(m_Value(Y))) && 243 match(LHS, m_ZExtOrSExt(m_Not(m_Specific(Y)))))) 244 return true; 245 246 // Look for: (A & B) op ~(A | B) 247 { 248 Value *A, *B; 249 if (match(LHS, m_And(m_Value(A), m_Value(B))) && 250 match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) 251 return true; 252 if (match(RHS, m_And(m_Value(A), m_Value(B))) && 253 match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) 254 return true; 255 } 256 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 257 KnownBits LHSKnown(IT->getBitWidth()); 258 KnownBits RHSKnown(IT->getBitWidth()); 259 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, UseInstrInfo); 260 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, UseInstrInfo); 261 return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown); 262 } 263 264 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) { 265 return !I->user_empty() && all_of(I->users(), [](const User *U) { 266 ICmpInst::Predicate P; 267 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P); 268 }); 269 } 270 271 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 272 const SimplifyQuery &Q); 273 274 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 275 bool OrZero, unsigned Depth, 276 AssumptionCache *AC, const Instruction *CxtI, 277 const DominatorTree *DT, bool UseInstrInfo) { 278 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, 279 SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, 280 safeCxtI(V, CxtI), 281 UseInstrInfo)); 282 } 283 284 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, 285 unsigned Depth, const SimplifyQuery &Q); 286 287 static bool isKnownNonZero(const Value *V, unsigned Depth, 288 const SimplifyQuery &Q); 289 290 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 291 AssumptionCache *AC, const Instruction *CxtI, 292 const DominatorTree *DT, bool UseInstrInfo) { 293 return ::isKnownNonZero(V, Depth, 294 SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, 295 safeCxtI(V, CxtI), UseInstrInfo)); 296 } 297 298 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 299 unsigned Depth, AssumptionCache *AC, 300 const Instruction *CxtI, const DominatorTree *DT, 301 bool UseInstrInfo) { 302 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); 303 return Known.isNonNegative(); 304 } 305 306 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 307 AssumptionCache *AC, const Instruction *CxtI, 308 const DominatorTree *DT, bool UseInstrInfo) { 309 if (auto *CI = dyn_cast<ConstantInt>(V)) 310 return CI->getValue().isStrictlyPositive(); 311 312 // TODO: We'd doing two recursive queries here. We should factor this such 313 // that only a single query is needed. 314 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) && 315 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); 316 } 317 318 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 319 AssumptionCache *AC, const Instruction *CxtI, 320 const DominatorTree *DT, bool UseInstrInfo) { 321 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); 322 return Known.isNegative(); 323 } 324 325 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, 326 const SimplifyQuery &Q); 327 328 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 329 const DataLayout &DL, AssumptionCache *AC, 330 const Instruction *CxtI, const DominatorTree *DT, 331 bool UseInstrInfo) { 332 return ::isKnownNonEqual(V1, V2, 0, 333 SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, 334 safeCxtI(V2, V1, CxtI), UseInstrInfo)); 335 } 336 337 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 338 const SimplifyQuery &Q); 339 340 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 341 const DataLayout &DL, unsigned Depth, 342 AssumptionCache *AC, const Instruction *CxtI, 343 const DominatorTree *DT, bool UseInstrInfo) { 344 return ::MaskedValueIsZero(V, Mask, Depth, 345 SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, 346 safeCxtI(V, CxtI), UseInstrInfo)); 347 } 348 349 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, 350 unsigned Depth, const SimplifyQuery &Q); 351 352 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 353 const SimplifyQuery &Q) { 354 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 355 APInt DemandedElts = 356 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); 357 return ComputeNumSignBits(V, DemandedElts, Depth, Q); 358 } 359 360 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 361 unsigned Depth, AssumptionCache *AC, 362 const Instruction *CxtI, 363 const DominatorTree *DT, bool UseInstrInfo) { 364 return ::ComputeNumSignBits(V, Depth, 365 SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, 366 safeCxtI(V, CxtI), UseInstrInfo)); 367 } 368 369 unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL, 370 unsigned Depth, AssumptionCache *AC, 371 const Instruction *CxtI, 372 const DominatorTree *DT) { 373 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT); 374 return V->getType()->getScalarSizeInBits() - SignBits + 1; 375 } 376 377 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 378 bool NSW, const APInt &DemandedElts, 379 KnownBits &KnownOut, KnownBits &Known2, 380 unsigned Depth, const SimplifyQuery &Q) { 381 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q); 382 383 // If one operand is unknown and we have no nowrap information, 384 // the result will be unknown independently of the second operand. 385 if (KnownOut.isUnknown() && !NSW) 386 return; 387 388 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); 389 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut); 390 } 391 392 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 393 const APInt &DemandedElts, KnownBits &Known, 394 KnownBits &Known2, unsigned Depth, 395 const SimplifyQuery &Q) { 396 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q); 397 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); 398 399 bool isKnownNegative = false; 400 bool isKnownNonNegative = false; 401 // If the multiplication is known not to overflow, compute the sign bit. 402 if (NSW) { 403 if (Op0 == Op1) { 404 // The product of a number with itself is non-negative. 405 isKnownNonNegative = true; 406 } else { 407 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 408 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 409 bool isKnownNegativeOp1 = Known.isNegative(); 410 bool isKnownNegativeOp0 = Known2.isNegative(); 411 // The product of two numbers with the same sign is non-negative. 412 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 413 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 414 // The product of a negative number and a non-negative number is either 415 // negative or zero. 416 if (!isKnownNonNegative) 417 isKnownNegative = 418 (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 419 Known2.isNonZero()) || 420 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero()); 421 } 422 } 423 424 bool SelfMultiply = Op0 == Op1; 425 // TODO: SelfMultiply can be poison, but not undef. 426 if (SelfMultiply) 427 SelfMultiply &= 428 isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1); 429 Known = KnownBits::mul(Known, Known2, SelfMultiply); 430 431 // Only make use of no-wrap flags if we failed to compute the sign bit 432 // directly. This matters if the multiplication always overflows, in 433 // which case we prefer to follow the result of the direct computation, 434 // though as the program is invoking undefined behaviour we can choose 435 // whatever we like here. 436 if (isKnownNonNegative && !Known.isNegative()) 437 Known.makeNonNegative(); 438 else if (isKnownNegative && !Known.isNonNegative()) 439 Known.makeNegative(); 440 } 441 442 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 443 KnownBits &Known) { 444 unsigned BitWidth = Known.getBitWidth(); 445 unsigned NumRanges = Ranges.getNumOperands() / 2; 446 assert(NumRanges >= 1); 447 448 Known.Zero.setAllBits(); 449 Known.One.setAllBits(); 450 451 for (unsigned i = 0; i < NumRanges; ++i) { 452 ConstantInt *Lower = 453 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 454 ConstantInt *Upper = 455 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 456 ConstantRange Range(Lower->getValue(), Upper->getValue()); 457 458 // The first CommonPrefixBits of all values in Range are equal. 459 unsigned CommonPrefixBits = 460 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countl_zero(); 461 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 462 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth); 463 Known.One &= UnsignedMax & Mask; 464 Known.Zero &= ~UnsignedMax & Mask; 465 } 466 } 467 468 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 469 SmallVector<const Value *, 16> WorkSet(1, I); 470 SmallPtrSet<const Value *, 32> Visited; 471 SmallPtrSet<const Value *, 16> EphValues; 472 473 // The instruction defining an assumption's condition itself is always 474 // considered ephemeral to that assumption (even if it has other 475 // non-ephemeral users). See r246696's test case for an example. 476 if (is_contained(I->operands(), E)) 477 return true; 478 479 while (!WorkSet.empty()) { 480 const Value *V = WorkSet.pop_back_val(); 481 if (!Visited.insert(V).second) 482 continue; 483 484 // If all uses of this value are ephemeral, then so is this value. 485 if (llvm::all_of(V->users(), [&](const User *U) { 486 return EphValues.count(U); 487 })) { 488 if (V == E) 489 return true; 490 491 if (V == I || (isa<Instruction>(V) && 492 !cast<Instruction>(V)->mayHaveSideEffects() && 493 !cast<Instruction>(V)->isTerminator())) { 494 EphValues.insert(V); 495 if (const User *U = dyn_cast<User>(V)) 496 append_range(WorkSet, U->operands()); 497 } 498 } 499 } 500 501 return false; 502 } 503 504 // Is this an intrinsic that cannot be speculated but also cannot trap? 505 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { 506 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I)) 507 return CI->isAssumeLikeIntrinsic(); 508 509 return false; 510 } 511 512 bool llvm::isValidAssumeForContext(const Instruction *Inv, 513 const Instruction *CxtI, 514 const DominatorTree *DT) { 515 // There are two restrictions on the use of an assume: 516 // 1. The assume must dominate the context (or the control flow must 517 // reach the assume whenever it reaches the context). 518 // 2. The context must not be in the assume's set of ephemeral values 519 // (otherwise we will use the assume to prove that the condition 520 // feeding the assume is trivially true, thus causing the removal of 521 // the assume). 522 523 if (Inv->getParent() == CxtI->getParent()) { 524 // If Inv and CtxI are in the same block, check if the assume (Inv) is first 525 // in the BB. 526 if (Inv->comesBefore(CxtI)) 527 return true; 528 529 // Don't let an assume affect itself - this would cause the problems 530 // `isEphemeralValueOf` is trying to prevent, and it would also make 531 // the loop below go out of bounds. 532 if (Inv == CxtI) 533 return false; 534 535 // The context comes first, but they're both in the same block. 536 // Make sure there is nothing in between that might interrupt 537 // the control flow, not even CxtI itself. 538 // We limit the scan distance between the assume and its context instruction 539 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so 540 // it can be adjusted if needed (could be turned into a cl::opt). 541 auto Range = make_range(CxtI->getIterator(), Inv->getIterator()); 542 if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15)) 543 return false; 544 545 return !isEphemeralValueOf(Inv, CxtI); 546 } 547 548 // Inv and CxtI are in different blocks. 549 if (DT) { 550 if (DT->dominates(Inv, CxtI)) 551 return true; 552 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 553 // We don't have a DT, but this trivially dominates. 554 return true; 555 } 556 557 return false; 558 } 559 560 // TODO: cmpExcludesZero misses many cases where `RHS` is non-constant but 561 // we still have enough information about `RHS` to conclude non-zero. For 562 // example Pred=EQ, RHS=isKnownNonZero. cmpExcludesZero is called in loops 563 // so the extra compile time may not be worth it, but possibly a second API 564 // should be created for use outside of loops. 565 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) { 566 // v u> y implies v != 0. 567 if (Pred == ICmpInst::ICMP_UGT) 568 return true; 569 570 // Special-case v != 0 to also handle v != null. 571 if (Pred == ICmpInst::ICMP_NE) 572 return match(RHS, m_Zero()); 573 574 // All other predicates - rely on generic ConstantRange handling. 575 const APInt *C; 576 if (!match(RHS, m_APInt(C))) 577 return false; 578 579 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C); 580 return !TrueValues.contains(APInt::getZero(C->getBitWidth())); 581 } 582 583 static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q) { 584 // Use of assumptions is context-sensitive. If we don't have a context, we 585 // cannot use them! 586 if (!Q.AC || !Q.CxtI) 587 return false; 588 589 if (Q.CxtI && V->getType()->isPointerTy()) { 590 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull}; 591 if (!NullPointerIsDefined(Q.CxtI->getFunction(), 592 V->getType()->getPointerAddressSpace())) 593 AttrKinds.push_back(Attribute::Dereferenceable); 594 595 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC)) 596 return true; 597 } 598 599 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 600 if (!AssumeVH) 601 continue; 602 CallInst *I = cast<CallInst>(AssumeVH); 603 assert(I->getFunction() == Q.CxtI->getFunction() && 604 "Got assumption for the wrong function!"); 605 606 // Warning: This loop can end up being somewhat performance sensitive. 607 // We're running this loop for once for each value queried resulting in a 608 // runtime of ~O(#assumes * #values). 609 610 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 611 "must be an assume intrinsic"); 612 613 Value *RHS; 614 CmpInst::Predicate Pred; 615 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); 616 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS)))) 617 return false; 618 619 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT)) 620 return true; 621 } 622 623 return false; 624 } 625 626 static void computeKnownBitsFromCmp(const Value *V, const ICmpInst *Cmp, 627 KnownBits &Known, unsigned Depth, 628 const SimplifyQuery &Q) { 629 unsigned BitWidth = Known.getBitWidth(); 630 // We are attempting to compute known bits for the operands of an assume. 631 // Do not try to use other assumptions for those recursive calls because 632 // that can lead to mutual recursion and a compile-time explosion. 633 // An example of the mutual recursion: computeKnownBits can call 634 // isKnownNonZero which calls computeKnownBitsFromAssume (this function) 635 // and so on. 636 SimplifyQuery QueryNoAC = Q; 637 QueryNoAC.AC = nullptr; 638 639 // Note that ptrtoint may change the bitwidth. 640 Value *A, *B; 641 auto m_V = 642 m_CombineOr(m_Specific(V), m_PtrToIntSameSize(Q.DL, m_Specific(V))); 643 644 CmpInst::Predicate Pred; 645 uint64_t C; 646 switch (Cmp->getPredicate()) { 647 case ICmpInst::ICMP_EQ: 648 // assume(v = a) 649 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A)))) { 650 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 651 Known = Known.unionWith(RHSKnown); 652 // assume(v & b = a) 653 } else if (match(Cmp, 654 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A)))) { 655 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 656 KnownBits MaskKnown = computeKnownBits(B, Depth + 1, QueryNoAC); 657 658 // For those bits in the mask that are known to be one, we can propagate 659 // known bits from the RHS to V. 660 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 661 Known.One |= RHSKnown.One & MaskKnown.One; 662 // assume(~(v & b) = a) 663 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 664 m_Value(A)))) { 665 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 666 KnownBits MaskKnown = computeKnownBits(B, Depth + 1, QueryNoAC); 667 668 // For those bits in the mask that are known to be one, we can propagate 669 // inverted known bits from the RHS to V. 670 Known.Zero |= RHSKnown.One & MaskKnown.One; 671 Known.One |= RHSKnown.Zero & MaskKnown.One; 672 // assume(v | b = a) 673 } else if (match(Cmp, 674 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A)))) { 675 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 676 KnownBits BKnown = computeKnownBits(B, Depth + 1, QueryNoAC); 677 678 // For those bits in B that are known to be zero, we can propagate known 679 // bits from the RHS to V. 680 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 681 Known.One |= RHSKnown.One & BKnown.Zero; 682 // assume(~(v | b) = a) 683 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 684 m_Value(A)))) { 685 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 686 KnownBits BKnown = computeKnownBits(B, Depth + 1, QueryNoAC); 687 688 // For those bits in B that are known to be zero, we can propagate 689 // inverted known bits from the RHS to V. 690 Known.Zero |= RHSKnown.One & BKnown.Zero; 691 Known.One |= RHSKnown.Zero & BKnown.Zero; 692 // assume(v ^ b = a) 693 } else if (match(Cmp, 694 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A)))) { 695 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 696 KnownBits BKnown = computeKnownBits(B, Depth + 1, QueryNoAC); 697 698 // For those bits in B that are known to be zero, we can propagate known 699 // bits from the RHS to V. For those bits in B that are known to be one, 700 // we can propagate inverted known bits from the RHS to V. 701 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 702 Known.One |= RHSKnown.One & BKnown.Zero; 703 Known.Zero |= RHSKnown.One & BKnown.One; 704 Known.One |= RHSKnown.Zero & BKnown.One; 705 // assume(~(v ^ b) = a) 706 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 707 m_Value(A)))) { 708 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 709 KnownBits BKnown = computeKnownBits(B, Depth + 1, QueryNoAC); 710 711 // For those bits in B that are known to be zero, we can propagate 712 // inverted known bits from the RHS to V. For those bits in B that are 713 // known to be one, we can propagate known bits from the RHS to V. 714 Known.Zero |= RHSKnown.One & BKnown.Zero; 715 Known.One |= RHSKnown.Zero & BKnown.Zero; 716 Known.Zero |= RHSKnown.Zero & BKnown.One; 717 Known.One |= RHSKnown.One & BKnown.One; 718 // assume(v << c = a) 719 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 720 m_Value(A))) && 721 C < BitWidth) { 722 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 723 724 // For those bits in RHS that are known, we can propagate them to known 725 // bits in V shifted to the right by C. 726 RHSKnown.Zero.lshrInPlace(C); 727 RHSKnown.One.lshrInPlace(C); 728 Known = Known.unionWith(RHSKnown); 729 // assume(~(v << c) = a) 730 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 731 m_Value(A))) && 732 C < BitWidth) { 733 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 734 // For those bits in RHS that are known, we can propagate them inverted 735 // to known bits in V shifted to the right by C. 736 RHSKnown.One.lshrInPlace(C); 737 Known.Zero |= RHSKnown.One; 738 RHSKnown.Zero.lshrInPlace(C); 739 Known.One |= RHSKnown.Zero; 740 // assume(v >> c = a) 741 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 742 m_Value(A))) && 743 C < BitWidth) { 744 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 745 // For those bits in RHS that are known, we can propagate them to known 746 // bits in V shifted to the right by C. 747 Known.Zero |= RHSKnown.Zero << C; 748 Known.One |= RHSKnown.One << C; 749 // assume(~(v >> c) = a) 750 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 751 m_Value(A))) && 752 C < BitWidth) { 753 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 754 // For those bits in RHS that are known, we can propagate them inverted 755 // to known bits in V shifted to the right by C. 756 Known.Zero |= RHSKnown.One << C; 757 Known.One |= RHSKnown.Zero << C; 758 } 759 break; 760 case ICmpInst::ICMP_NE: { 761 // assume (v & b != 0) where b is a power of 2 762 const APInt *BPow2; 763 if (match(Cmp, m_ICmp(Pred, m_c_And(m_V, m_Power2(BPow2)), m_Zero()))) { 764 Known.One |= *BPow2; 765 } 766 break; 767 } 768 default: 769 const APInt *Offset = nullptr; 770 if (match(Cmp, m_ICmp(Pred, m_CombineOr(m_V, m_Add(m_V, m_APInt(Offset))), 771 m_Value(A)))) { 772 KnownBits RHSKnown = computeKnownBits(A, Depth + 1, QueryNoAC); 773 ConstantRange RHSRange = 774 ConstantRange::fromKnownBits(RHSKnown, Cmp->isSigned()); 775 ConstantRange LHSRange = 776 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange); 777 if (Offset) 778 LHSRange = LHSRange.sub(*Offset); 779 Known = Known.unionWith(LHSRange.toKnownBits()); 780 } 781 break; 782 } 783 } 784 785 void llvm::computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 786 unsigned Depth, const SimplifyQuery &Q) { 787 // Use of assumptions is context-sensitive. If we don't have a context, we 788 // cannot use them! 789 if (!Q.AC || !Q.CxtI) 790 return; 791 792 unsigned BitWidth = Known.getBitWidth(); 793 794 // Refine Known set if the pointer alignment is set by assume bundles. 795 if (V->getType()->isPointerTy()) { 796 if (RetainedKnowledge RK = getKnowledgeValidInContext( 797 V, { Attribute::Alignment }, Q.CxtI, Q.DT, Q.AC)) { 798 if (isPowerOf2_64(RK.ArgValue)) 799 Known.Zero.setLowBits(Log2_64(RK.ArgValue)); 800 } 801 } 802 803 // Note that the patterns below need to be kept in sync with the code 804 // in AssumptionCache::updateAffectedValues. 805 806 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 807 if (!AssumeVH) 808 continue; 809 CallInst *I = cast<CallInst>(AssumeVH); 810 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 811 "Got assumption for the wrong function!"); 812 813 // Warning: This loop can end up being somewhat performance sensitive. 814 // We're running this loop for once for each value queried resulting in a 815 // runtime of ~O(#assumes * #values). 816 817 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 818 "must be an assume intrinsic"); 819 820 Value *Arg = I->getArgOperand(0); 821 822 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 823 assert(BitWidth == 1 && "assume operand is not i1?"); 824 (void)BitWidth; 825 Known.setAllOnes(); 826 return; 827 } 828 if (match(Arg, m_Not(m_Specific(V))) && 829 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 830 assert(BitWidth == 1 && "assume operand is not i1?"); 831 (void)BitWidth; 832 Known.setAllZero(); 833 return; 834 } 835 836 // The remaining tests are all recursive, so bail out if we hit the limit. 837 if (Depth == MaxAnalysisRecursionDepth) 838 continue; 839 840 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); 841 if (!Cmp) 842 continue; 843 844 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT)) 845 continue; 846 847 computeKnownBitsFromCmp(V, Cmp, Known, Depth, Q); 848 } 849 850 // Conflicting assumption: Undefined behavior will occur on this execution 851 // path. 852 if (Known.hasConflict()) 853 Known.resetAll(); 854 } 855 856 /// Compute known bits from a shift operator, including those with a 857 /// non-constant shift amount. Known is the output of this function. Known2 is a 858 /// pre-allocated temporary with the same bit width as Known and on return 859 /// contains the known bit of the shift value source. KF is an 860 /// operator-specific function that, given the known-bits and a shift amount, 861 /// compute the implied known-bits of the shift operator's result respectively 862 /// for that shift amount. The results from calling KF are conservatively 863 /// combined for all permitted shift amounts. 864 static void computeKnownBitsFromShiftOperator( 865 const Operator *I, const APInt &DemandedElts, KnownBits &Known, 866 KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q, 867 function_ref<KnownBits(const KnownBits &, const KnownBits &, bool)> KF) { 868 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 869 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 870 // To limit compile-time impact, only query isKnownNonZero() if we know at 871 // least something about the shift amount. 872 bool ShAmtNonZero = 873 Known.isNonZero() || 874 (Known.getMaxValue().ult(Known.getBitWidth()) && 875 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q)); 876 Known = KF(Known2, Known, ShAmtNonZero); 877 } 878 879 static KnownBits 880 getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, 881 const KnownBits &KnownLHS, const KnownBits &KnownRHS, 882 unsigned Depth, const SimplifyQuery &Q) { 883 unsigned BitWidth = KnownLHS.getBitWidth(); 884 KnownBits KnownOut(BitWidth); 885 bool IsAnd = false; 886 bool HasKnownOne = !KnownLHS.One.isZero() || !KnownRHS.One.isZero(); 887 Value *X = nullptr, *Y = nullptr; 888 889 switch (I->getOpcode()) { 890 case Instruction::And: 891 KnownOut = KnownLHS & KnownRHS; 892 IsAnd = true; 893 // and(x, -x) is common idioms that will clear all but lowest set 894 // bit. If we have a single known bit in x, we can clear all bits 895 // above it. 896 // TODO: instcombine often reassociates independent `and` which can hide 897 // this pattern. Try to match and(x, and(-x, y)) / and(and(x, y), -x). 898 if (HasKnownOne && match(I, m_c_And(m_Value(X), m_Neg(m_Deferred(X))))) { 899 // -(-x) == x so using whichever (LHS/RHS) gets us a better result. 900 if (KnownLHS.countMaxTrailingZeros() <= KnownRHS.countMaxTrailingZeros()) 901 KnownOut = KnownLHS.blsi(); 902 else 903 KnownOut = KnownRHS.blsi(); 904 } 905 break; 906 case Instruction::Or: 907 KnownOut = KnownLHS | KnownRHS; 908 break; 909 case Instruction::Xor: 910 KnownOut = KnownLHS ^ KnownRHS; 911 // xor(x, x-1) is common idioms that will clear all but lowest set 912 // bit. If we have a single known bit in x, we can clear all bits 913 // above it. 914 // TODO: xor(x, x-1) is often rewritting as xor(x, x-C) where C != 915 // -1 but for the purpose of demanded bits (xor(x, x-C) & 916 // Demanded) == (xor(x, x-1) & Demanded). Extend the xor pattern 917 // to use arbitrary C if xor(x, x-C) as the same as xor(x, x-1). 918 if (HasKnownOne && 919 match(I, m_c_Xor(m_Value(X), m_c_Add(m_Deferred(X), m_AllOnes())))) { 920 const KnownBits &XBits = I->getOperand(0) == X ? KnownLHS : KnownRHS; 921 KnownOut = XBits.blsmsk(); 922 } 923 break; 924 default: 925 llvm_unreachable("Invalid Op used in 'analyzeKnownBitsFromAndXorOr'"); 926 } 927 928 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 929 // xor/or(x, add (x, -1)) is an idiom that will always set the low bit. 930 // here we handle the more general case of adding any odd number by 931 // matching the form and/xor/or(x, add(x, y)) where y is odd. 932 // TODO: This could be generalized to clearing any bit set in y where the 933 // following bit is known to be unset in y. 934 if (!KnownOut.Zero[0] && !KnownOut.One[0] && 935 (match(I, m_c_BinOp(m_Value(X), m_c_Add(m_Deferred(X), m_Value(Y)))) || 936 match(I, m_c_BinOp(m_Value(X), m_Sub(m_Deferred(X), m_Value(Y)))) || 937 match(I, m_c_BinOp(m_Value(X), m_Sub(m_Value(Y), m_Deferred(X)))))) { 938 KnownBits KnownY(BitWidth); 939 computeKnownBits(Y, DemandedElts, KnownY, Depth + 1, Q); 940 if (KnownY.countMinTrailingOnes() > 0) { 941 if (IsAnd) 942 KnownOut.Zero.setBit(0); 943 else 944 KnownOut.One.setBit(0); 945 } 946 } 947 return KnownOut; 948 } 949 950 // Public so this can be used in `SimplifyDemandedUseBits`. 951 KnownBits llvm::analyzeKnownBitsFromAndXorOr( 952 const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, 953 unsigned Depth, const DataLayout &DL, AssumptionCache *AC, 954 const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { 955 auto *FVTy = dyn_cast<FixedVectorType>(I->getType()); 956 APInt DemandedElts = 957 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); 958 959 return getKnownBitsFromAndXorOr(I, DemandedElts, KnownLHS, KnownRHS, Depth, 960 SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, 961 safeCxtI(I, CxtI), 962 UseInstrInfo)); 963 } 964 965 ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) { 966 Attribute Attr = F->getFnAttribute(Attribute::VScaleRange); 967 // Without vscale_range, we only know that vscale is non-zero. 968 if (!Attr.isValid()) 969 return ConstantRange(APInt(BitWidth, 1), APInt::getZero(BitWidth)); 970 971 unsigned AttrMin = Attr.getVScaleRangeMin(); 972 // Minimum is larger than vscale width, result is always poison. 973 if ((unsigned)llvm::bit_width(AttrMin) > BitWidth) 974 return ConstantRange::getEmpty(BitWidth); 975 976 APInt Min(BitWidth, AttrMin); 977 std::optional<unsigned> AttrMax = Attr.getVScaleRangeMax(); 978 if (!AttrMax || (unsigned)llvm::bit_width(*AttrMax) > BitWidth) 979 return ConstantRange(Min, APInt::getZero(BitWidth)); 980 981 return ConstantRange(Min, APInt(BitWidth, *AttrMax) + 1); 982 } 983 984 static void computeKnownBitsFromOperator(const Operator *I, 985 const APInt &DemandedElts, 986 KnownBits &Known, unsigned Depth, 987 const SimplifyQuery &Q) { 988 unsigned BitWidth = Known.getBitWidth(); 989 990 KnownBits Known2(BitWidth); 991 switch (I->getOpcode()) { 992 default: break; 993 case Instruction::Load: 994 if (MDNode *MD = 995 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range)) 996 computeKnownBitsFromRangeMetadata(*MD, Known); 997 break; 998 case Instruction::And: 999 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1000 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1001 1002 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); 1003 break; 1004 case Instruction::Or: 1005 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1006 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1007 1008 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); 1009 break; 1010 case Instruction::Xor: 1011 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1012 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1013 1014 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); 1015 break; 1016 case Instruction::Mul: { 1017 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1018 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts, 1019 Known, Known2, Depth, Q); 1020 break; 1021 } 1022 case Instruction::UDiv: { 1023 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1024 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1025 Known = 1026 KnownBits::udiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I))); 1027 break; 1028 } 1029 case Instruction::SDiv: { 1030 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1031 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1032 Known = 1033 KnownBits::sdiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I))); 1034 break; 1035 } 1036 case Instruction::Select: { 1037 const Value *LHS = nullptr, *RHS = nullptr; 1038 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 1039 if (SelectPatternResult::isMinOrMax(SPF)) { 1040 computeKnownBits(RHS, Known, Depth + 1, Q); 1041 computeKnownBits(LHS, Known2, Depth + 1, Q); 1042 switch (SPF) { 1043 default: 1044 llvm_unreachable("Unhandled select pattern flavor!"); 1045 case SPF_SMAX: 1046 Known = KnownBits::smax(Known, Known2); 1047 break; 1048 case SPF_SMIN: 1049 Known = KnownBits::smin(Known, Known2); 1050 break; 1051 case SPF_UMAX: 1052 Known = KnownBits::umax(Known, Known2); 1053 break; 1054 case SPF_UMIN: 1055 Known = KnownBits::umin(Known, Known2); 1056 break; 1057 } 1058 break; 1059 } 1060 1061 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 1062 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1063 1064 // Only known if known in both the LHS and RHS. 1065 Known = Known.intersectWith(Known2); 1066 1067 if (SPF == SPF_ABS) { 1068 // RHS from matchSelectPattern returns the negation part of abs pattern. 1069 // If the negate has an NSW flag we can assume the sign bit of the result 1070 // will be 0 because that makes abs(INT_MIN) undefined. 1071 if (match(RHS, m_Neg(m_Specific(LHS))) && 1072 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS))) 1073 Known.Zero.setSignBit(); 1074 } 1075 1076 break; 1077 } 1078 case Instruction::FPTrunc: 1079 case Instruction::FPExt: 1080 case Instruction::FPToUI: 1081 case Instruction::FPToSI: 1082 case Instruction::SIToFP: 1083 case Instruction::UIToFP: 1084 break; // Can't work with floating point. 1085 case Instruction::PtrToInt: 1086 case Instruction::IntToPtr: 1087 // Fall through and handle them the same as zext/trunc. 1088 [[fallthrough]]; 1089 case Instruction::ZExt: 1090 case Instruction::Trunc: { 1091 Type *SrcTy = I->getOperand(0)->getType(); 1092 1093 unsigned SrcBitWidth; 1094 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1095 // which fall through here. 1096 Type *ScalarTy = SrcTy->getScalarType(); 1097 SrcBitWidth = ScalarTy->isPointerTy() ? 1098 Q.DL.getPointerTypeSizeInBits(ScalarTy) : 1099 Q.DL.getTypeSizeInBits(ScalarTy); 1100 1101 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1102 Known = Known.anyextOrTrunc(SrcBitWidth); 1103 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1104 Known = Known.zextOrTrunc(BitWidth); 1105 break; 1106 } 1107 case Instruction::BitCast: { 1108 Type *SrcTy = I->getOperand(0)->getType(); 1109 if (SrcTy->isIntOrPtrTy() && 1110 // TODO: For now, not handling conversions like: 1111 // (bitcast i64 %x to <2 x i32>) 1112 !I->getType()->isVectorTy()) { 1113 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1114 break; 1115 } 1116 1117 // Handle cast from vector integer type to scalar or vector integer. 1118 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy); 1119 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() || 1120 !I->getType()->isIntOrIntVectorTy() || 1121 isa<ScalableVectorType>(I->getType())) 1122 break; 1123 1124 // Look through a cast from narrow vector elements to wider type. 1125 // Examples: v4i32 -> v2i64, v3i8 -> v24 1126 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits(); 1127 if (BitWidth % SubBitWidth == 0) { 1128 // Known bits are automatically intersected across demanded elements of a 1129 // vector. So for example, if a bit is computed as known zero, it must be 1130 // zero across all demanded elements of the vector. 1131 // 1132 // For this bitcast, each demanded element of the output is sub-divided 1133 // across a set of smaller vector elements in the source vector. To get 1134 // the known bits for an entire element of the output, compute the known 1135 // bits for each sub-element sequentially. This is done by shifting the 1136 // one-set-bit demanded elements parameter across the sub-elements for 1137 // consecutive calls to computeKnownBits. We are using the demanded 1138 // elements parameter as a mask operator. 1139 // 1140 // The known bits of each sub-element are then inserted into place 1141 // (dependent on endian) to form the full result of known bits. 1142 unsigned NumElts = DemandedElts.getBitWidth(); 1143 unsigned SubScale = BitWidth / SubBitWidth; 1144 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale); 1145 for (unsigned i = 0; i != NumElts; ++i) { 1146 if (DemandedElts[i]) 1147 SubDemandedElts.setBit(i * SubScale); 1148 } 1149 1150 KnownBits KnownSrc(SubBitWidth); 1151 for (unsigned i = 0; i != SubScale; ++i) { 1152 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc, 1153 Depth + 1, Q); 1154 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i; 1155 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth); 1156 } 1157 } 1158 break; 1159 } 1160 case Instruction::SExt: { 1161 // Compute the bits in the result that are not present in the input. 1162 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1163 1164 Known = Known.trunc(SrcBitWidth); 1165 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1166 // If the sign bit of the input is known set or clear, then we know the 1167 // top bits of the result. 1168 Known = Known.sext(BitWidth); 1169 break; 1170 } 1171 case Instruction::Shl: { 1172 bool NUW = Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(I)); 1173 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1174 auto KF = [NUW, NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt, 1175 bool ShAmtNonZero) { 1176 return KnownBits::shl(KnownVal, KnownAmt, NUW, NSW, ShAmtNonZero); 1177 }; 1178 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1179 KF); 1180 // Trailing zeros of a right-shifted constant never decrease. 1181 const APInt *C; 1182 if (match(I->getOperand(0), m_APInt(C))) 1183 Known.Zero.setLowBits(C->countr_zero()); 1184 break; 1185 } 1186 case Instruction::LShr: { 1187 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt, 1188 bool ShAmtNonZero) { 1189 return KnownBits::lshr(KnownVal, KnownAmt, ShAmtNonZero); 1190 }; 1191 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1192 KF); 1193 // Leading zeros of a left-shifted constant never decrease. 1194 const APInt *C; 1195 if (match(I->getOperand(0), m_APInt(C))) 1196 Known.Zero.setHighBits(C->countl_zero()); 1197 break; 1198 } 1199 case Instruction::AShr: { 1200 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt, 1201 bool ShAmtNonZero) { 1202 return KnownBits::ashr(KnownVal, KnownAmt, ShAmtNonZero); 1203 }; 1204 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1205 KF); 1206 break; 1207 } 1208 case Instruction::Sub: { 1209 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1210 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1211 DemandedElts, Known, Known2, Depth, Q); 1212 break; 1213 } 1214 case Instruction::Add: { 1215 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1216 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1217 DemandedElts, Known, Known2, Depth, Q); 1218 break; 1219 } 1220 case Instruction::SRem: 1221 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1222 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1223 Known = KnownBits::srem(Known, Known2); 1224 break; 1225 1226 case Instruction::URem: 1227 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1228 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1229 Known = KnownBits::urem(Known, Known2); 1230 break; 1231 case Instruction::Alloca: 1232 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign())); 1233 break; 1234 case Instruction::GetElementPtr: { 1235 // Analyze all of the subscripts of this getelementptr instruction 1236 // to determine if we can prove known low zero bits. 1237 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1238 // Accumulate the constant indices in a separate variable 1239 // to minimize the number of calls to computeForAddSub. 1240 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true); 1241 1242 gep_type_iterator GTI = gep_type_begin(I); 1243 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1244 // TrailZ can only become smaller, short-circuit if we hit zero. 1245 if (Known.isUnknown()) 1246 break; 1247 1248 Value *Index = I->getOperand(i); 1249 1250 // Handle case when index is zero. 1251 Constant *CIndex = dyn_cast<Constant>(Index); 1252 if (CIndex && CIndex->isZeroValue()) 1253 continue; 1254 1255 if (StructType *STy = GTI.getStructTypeOrNull()) { 1256 // Handle struct member offset arithmetic. 1257 1258 assert(CIndex && 1259 "Access to structure field must be known at compile time"); 1260 1261 if (CIndex->getType()->isVectorTy()) 1262 Index = CIndex->getSplatValue(); 1263 1264 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1265 const StructLayout *SL = Q.DL.getStructLayout(STy); 1266 uint64_t Offset = SL->getElementOffset(Idx); 1267 AccConstIndices += Offset; 1268 continue; 1269 } 1270 1271 // Handle array index arithmetic. 1272 Type *IndexedTy = GTI.getIndexedType(); 1273 if (!IndexedTy->isSized()) { 1274 Known.resetAll(); 1275 break; 1276 } 1277 1278 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); 1279 KnownBits IndexBits(IndexBitWidth); 1280 computeKnownBits(Index, IndexBits, Depth + 1, Q); 1281 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1282 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue(); 1283 KnownBits ScalingFactor(IndexBitWidth); 1284 // Multiply by current sizeof type. 1285 // &A[i] == A + i * sizeof(*A[i]). 1286 if (IndexTypeSize.isScalable()) { 1287 // For scalable types the only thing we know about sizeof is 1288 // that this is a multiple of the minimum size. 1289 ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes)); 1290 } else if (IndexBits.isConstant()) { 1291 APInt IndexConst = IndexBits.getConstant(); 1292 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes); 1293 IndexConst *= ScalingFactor; 1294 AccConstIndices += IndexConst.sextOrTrunc(BitWidth); 1295 continue; 1296 } else { 1297 ScalingFactor = 1298 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes)); 1299 } 1300 IndexBits = KnownBits::mul(IndexBits, ScalingFactor); 1301 1302 // If the offsets have a different width from the pointer, according 1303 // to the language reference we need to sign-extend or truncate them 1304 // to the width of the pointer. 1305 IndexBits = IndexBits.sextOrTrunc(BitWidth); 1306 1307 // Note that inbounds does *not* guarantee nsw for the addition, as only 1308 // the offset is signed, while the base address is unsigned. 1309 Known = KnownBits::computeForAddSub( 1310 /*Add=*/true, /*NSW=*/false, Known, IndexBits); 1311 } 1312 if (!Known.isUnknown() && !AccConstIndices.isZero()) { 1313 KnownBits Index = KnownBits::makeConstant(AccConstIndices); 1314 Known = KnownBits::computeForAddSub( 1315 /*Add=*/true, /*NSW=*/false, Known, Index); 1316 } 1317 break; 1318 } 1319 case Instruction::PHI: { 1320 const PHINode *P = cast<PHINode>(I); 1321 BinaryOperator *BO = nullptr; 1322 Value *R = nullptr, *L = nullptr; 1323 if (matchSimpleRecurrence(P, BO, R, L)) { 1324 // Handle the case of a simple two-predecessor recurrence PHI. 1325 // There's a lot more that could theoretically be done here, but 1326 // this is sufficient to catch some interesting cases. 1327 unsigned Opcode = BO->getOpcode(); 1328 1329 // If this is a shift recurrence, we know the bits being shifted in. 1330 // We can combine that with information about the start value of the 1331 // recurrence to conclude facts about the result. 1332 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr || 1333 Opcode == Instruction::Shl) && 1334 BO->getOperand(0) == I) { 1335 1336 // We have matched a recurrence of the form: 1337 // %iv = [R, %entry], [%iv.next, %backedge] 1338 // %iv.next = shift_op %iv, L 1339 1340 // Recurse with the phi context to avoid concern about whether facts 1341 // inferred hold at original context instruction. TODO: It may be 1342 // correct to use the original context. IF warranted, explore and 1343 // add sufficient tests to cover. 1344 SimplifyQuery RecQ = Q; 1345 RecQ.CxtI = P; 1346 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ); 1347 switch (Opcode) { 1348 case Instruction::Shl: 1349 // A shl recurrence will only increase the tailing zeros 1350 Known.Zero.setLowBits(Known2.countMinTrailingZeros()); 1351 break; 1352 case Instruction::LShr: 1353 // A lshr recurrence will preserve the leading zeros of the 1354 // start value 1355 Known.Zero.setHighBits(Known2.countMinLeadingZeros()); 1356 break; 1357 case Instruction::AShr: 1358 // An ashr recurrence will extend the initial sign bit 1359 Known.Zero.setHighBits(Known2.countMinLeadingZeros()); 1360 Known.One.setHighBits(Known2.countMinLeadingOnes()); 1361 break; 1362 }; 1363 } 1364 1365 // Check for operations that have the property that if 1366 // both their operands have low zero bits, the result 1367 // will have low zero bits. 1368 if (Opcode == Instruction::Add || 1369 Opcode == Instruction::Sub || 1370 Opcode == Instruction::And || 1371 Opcode == Instruction::Or || 1372 Opcode == Instruction::Mul) { 1373 // Change the context instruction to the "edge" that flows into the 1374 // phi. This is important because that is where the value is actually 1375 // "evaluated" even though it is used later somewhere else. (see also 1376 // D69571). 1377 SimplifyQuery RecQ = Q; 1378 1379 unsigned OpNum = P->getOperand(0) == R ? 0 : 1; 1380 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator(); 1381 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator(); 1382 1383 // Ok, we have a PHI of the form L op= R. Check for low 1384 // zero bits. 1385 RecQ.CxtI = RInst; 1386 computeKnownBits(R, Known2, Depth + 1, RecQ); 1387 1388 // We need to take the minimum number of known bits 1389 KnownBits Known3(BitWidth); 1390 RecQ.CxtI = LInst; 1391 computeKnownBits(L, Known3, Depth + 1, RecQ); 1392 1393 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1394 Known3.countMinTrailingZeros())); 1395 1396 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO); 1397 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) { 1398 // If initial value of recurrence is nonnegative, and we are adding 1399 // a nonnegative number with nsw, the result can only be nonnegative 1400 // or poison value regardless of the number of times we execute the 1401 // add in phi recurrence. If initial value is negative and we are 1402 // adding a negative number with nsw, the result can only be 1403 // negative or poison value. Similar arguments apply to sub and mul. 1404 // 1405 // (add non-negative, non-negative) --> non-negative 1406 // (add negative, negative) --> negative 1407 if (Opcode == Instruction::Add) { 1408 if (Known2.isNonNegative() && Known3.isNonNegative()) 1409 Known.makeNonNegative(); 1410 else if (Known2.isNegative() && Known3.isNegative()) 1411 Known.makeNegative(); 1412 } 1413 1414 // (sub nsw non-negative, negative) --> non-negative 1415 // (sub nsw negative, non-negative) --> negative 1416 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) { 1417 if (Known2.isNonNegative() && Known3.isNegative()) 1418 Known.makeNonNegative(); 1419 else if (Known2.isNegative() && Known3.isNonNegative()) 1420 Known.makeNegative(); 1421 } 1422 1423 // (mul nsw non-negative, non-negative) --> non-negative 1424 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1425 Known3.isNonNegative()) 1426 Known.makeNonNegative(); 1427 } 1428 1429 break; 1430 } 1431 } 1432 1433 // Unreachable blocks may have zero-operand PHI nodes. 1434 if (P->getNumIncomingValues() == 0) 1435 break; 1436 1437 // Otherwise take the unions of the known bit sets of the operands, 1438 // taking conservative care to avoid excessive recursion. 1439 if (Depth < MaxAnalysisRecursionDepth - 1 && Known.isUnknown()) { 1440 // Skip if every incoming value references to ourself. 1441 if (isa_and_nonnull<UndefValue>(P->hasConstantValue())) 1442 break; 1443 1444 Known.Zero.setAllBits(); 1445 Known.One.setAllBits(); 1446 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) { 1447 Value *IncValue = P->getIncomingValue(u); 1448 // Skip direct self references. 1449 if (IncValue == P) continue; 1450 1451 // Change the context instruction to the "edge" that flows into the 1452 // phi. This is important because that is where the value is actually 1453 // "evaluated" even though it is used later somewhere else. (see also 1454 // D69571). 1455 SimplifyQuery RecQ = Q; 1456 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator(); 1457 1458 Known2 = KnownBits(BitWidth); 1459 1460 // Recurse, but cap the recursion to one level, because we don't 1461 // want to waste time spinning around in loops. 1462 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ); 1463 1464 // If this failed, see if we can use a conditional branch into the phi 1465 // to help us determine the range of the value. 1466 if (Known2.isUnknown()) { 1467 ICmpInst::Predicate Pred; 1468 const APInt *RHSC; 1469 BasicBlock *TrueSucc, *FalseSucc; 1470 // TODO: Use RHS Value and compute range from its known bits. 1471 if (match(RecQ.CxtI, 1472 m_Br(m_c_ICmp(Pred, m_Specific(IncValue), m_APInt(RHSC)), 1473 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) { 1474 // Check for cases of duplicate successors. 1475 if ((TrueSucc == P->getParent()) != (FalseSucc == P->getParent())) { 1476 // If we're using the false successor, invert the predicate. 1477 if (FalseSucc == P->getParent()) 1478 Pred = CmpInst::getInversePredicate(Pred); 1479 1480 switch (Pred) { 1481 case CmpInst::Predicate::ICMP_EQ: 1482 Known2 = KnownBits::makeConstant(*RHSC); 1483 break; 1484 case CmpInst::Predicate::ICMP_ULE: 1485 Known2.Zero.setHighBits(RHSC->countl_zero()); 1486 break; 1487 case CmpInst::Predicate::ICMP_ULT: 1488 Known2.Zero.setHighBits((*RHSC - 1).countl_zero()); 1489 break; 1490 default: 1491 // TODO - add additional integer predicate handling. 1492 break; 1493 } 1494 } 1495 } 1496 } 1497 1498 Known = Known.intersectWith(Known2); 1499 // If all bits have been ruled out, there's no need to check 1500 // more operands. 1501 if (Known.isUnknown()) 1502 break; 1503 } 1504 } 1505 break; 1506 } 1507 case Instruction::Call: 1508 case Instruction::Invoke: 1509 // If range metadata is attached to this call, set known bits from that, 1510 // and then intersect with known bits based on other properties of the 1511 // function. 1512 if (MDNode *MD = 1513 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range)) 1514 computeKnownBitsFromRangeMetadata(*MD, Known); 1515 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) { 1516 computeKnownBits(RV, Known2, Depth + 1, Q); 1517 Known = Known.unionWith(Known2); 1518 } 1519 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1520 switch (II->getIntrinsicID()) { 1521 default: break; 1522 case Intrinsic::abs: { 1523 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1524 bool IntMinIsPoison = match(II->getArgOperand(1), m_One()); 1525 Known = Known2.abs(IntMinIsPoison); 1526 break; 1527 } 1528 case Intrinsic::bitreverse: 1529 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1530 Known.Zero |= Known2.Zero.reverseBits(); 1531 Known.One |= Known2.One.reverseBits(); 1532 break; 1533 case Intrinsic::bswap: 1534 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1535 Known.Zero |= Known2.Zero.byteSwap(); 1536 Known.One |= Known2.One.byteSwap(); 1537 break; 1538 case Intrinsic::ctlz: { 1539 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1540 // If we have a known 1, its position is our upper bound. 1541 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 1542 // If this call is poison for 0 input, the result will be less than 2^n. 1543 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1544 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1545 unsigned LowBits = llvm::bit_width(PossibleLZ); 1546 Known.Zero.setBitsFrom(LowBits); 1547 break; 1548 } 1549 case Intrinsic::cttz: { 1550 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1551 // If we have a known 1, its position is our upper bound. 1552 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 1553 // If this call is poison for 0 input, the result will be less than 2^n. 1554 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1555 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1556 unsigned LowBits = llvm::bit_width(PossibleTZ); 1557 Known.Zero.setBitsFrom(LowBits); 1558 break; 1559 } 1560 case Intrinsic::ctpop: { 1561 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1562 // We can bound the space the count needs. Also, bits known to be zero 1563 // can't contribute to the population. 1564 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1565 unsigned LowBits = llvm::bit_width(BitsPossiblySet); 1566 Known.Zero.setBitsFrom(LowBits); 1567 // TODO: we could bound KnownOne using the lower bound on the number 1568 // of bits which might be set provided by popcnt KnownOne2. 1569 break; 1570 } 1571 case Intrinsic::fshr: 1572 case Intrinsic::fshl: { 1573 const APInt *SA; 1574 if (!match(I->getOperand(2), m_APInt(SA))) 1575 break; 1576 1577 // Normalize to funnel shift left. 1578 uint64_t ShiftAmt = SA->urem(BitWidth); 1579 if (II->getIntrinsicID() == Intrinsic::fshr) 1580 ShiftAmt = BitWidth - ShiftAmt; 1581 1582 KnownBits Known3(BitWidth); 1583 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1584 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q); 1585 1586 Known.Zero = 1587 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); 1588 Known.One = 1589 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); 1590 break; 1591 } 1592 case Intrinsic::uadd_sat: 1593 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1594 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1595 Known = KnownBits::uadd_sat(Known, Known2); 1596 break; 1597 case Intrinsic::usub_sat: 1598 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1599 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1600 Known = KnownBits::usub_sat(Known, Known2); 1601 break; 1602 case Intrinsic::sadd_sat: 1603 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1604 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1605 Known = KnownBits::sadd_sat(Known, Known2); 1606 break; 1607 case Intrinsic::ssub_sat: 1608 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1609 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1610 Known = KnownBits::ssub_sat(Known, Known2); 1611 break; 1612 case Intrinsic::umin: 1613 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1614 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1615 Known = KnownBits::umin(Known, Known2); 1616 break; 1617 case Intrinsic::umax: 1618 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1619 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1620 Known = KnownBits::umax(Known, Known2); 1621 break; 1622 case Intrinsic::smin: 1623 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1624 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1625 Known = KnownBits::smin(Known, Known2); 1626 break; 1627 case Intrinsic::smax: 1628 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1629 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1630 Known = KnownBits::smax(Known, Known2); 1631 break; 1632 case Intrinsic::ptrmask: { 1633 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1634 1635 const Value *Mask = I->getOperand(1); 1636 Known2 = KnownBits(Mask->getType()->getScalarSizeInBits()); 1637 computeKnownBits(Mask, Known2, Depth + 1, Q); 1638 // This is basically a pointer typed and. 1639 Known &= Known2.zextOrTrunc(Known.getBitWidth()); 1640 break; 1641 } 1642 case Intrinsic::x86_sse42_crc32_64_64: 1643 Known.Zero.setBitsFrom(32); 1644 break; 1645 case Intrinsic::riscv_vsetvli: 1646 case Intrinsic::riscv_vsetvlimax: 1647 // Assume that VL output is >= 65536. 1648 // TODO: Take SEW and LMUL into account. 1649 if (BitWidth > 17) 1650 Known.Zero.setBitsFrom(17); 1651 break; 1652 case Intrinsic::vscale: { 1653 if (!II->getParent() || !II->getFunction()) 1654 break; 1655 1656 Known = getVScaleRange(II->getFunction(), BitWidth).toKnownBits(); 1657 break; 1658 } 1659 } 1660 } 1661 break; 1662 case Instruction::ShuffleVector: { 1663 auto *Shuf = dyn_cast<ShuffleVectorInst>(I); 1664 // FIXME: Do we need to handle ConstantExpr involving shufflevectors? 1665 if (!Shuf) { 1666 Known.resetAll(); 1667 return; 1668 } 1669 // For undef elements, we don't know anything about the common state of 1670 // the shuffle result. 1671 APInt DemandedLHS, DemandedRHS; 1672 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) { 1673 Known.resetAll(); 1674 return; 1675 } 1676 Known.One.setAllBits(); 1677 Known.Zero.setAllBits(); 1678 if (!!DemandedLHS) { 1679 const Value *LHS = Shuf->getOperand(0); 1680 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q); 1681 // If we don't know any bits, early out. 1682 if (Known.isUnknown()) 1683 break; 1684 } 1685 if (!!DemandedRHS) { 1686 const Value *RHS = Shuf->getOperand(1); 1687 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q); 1688 Known = Known.intersectWith(Known2); 1689 } 1690 break; 1691 } 1692 case Instruction::InsertElement: { 1693 if (isa<ScalableVectorType>(I->getType())) { 1694 Known.resetAll(); 1695 return; 1696 } 1697 const Value *Vec = I->getOperand(0); 1698 const Value *Elt = I->getOperand(1); 1699 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2)); 1700 // Early out if the index is non-constant or out-of-range. 1701 unsigned NumElts = DemandedElts.getBitWidth(); 1702 if (!CIdx || CIdx->getValue().uge(NumElts)) { 1703 Known.resetAll(); 1704 return; 1705 } 1706 Known.One.setAllBits(); 1707 Known.Zero.setAllBits(); 1708 unsigned EltIdx = CIdx->getZExtValue(); 1709 // Do we demand the inserted element? 1710 if (DemandedElts[EltIdx]) { 1711 computeKnownBits(Elt, Known, Depth + 1, Q); 1712 // If we don't know any bits, early out. 1713 if (Known.isUnknown()) 1714 break; 1715 } 1716 // We don't need the base vector element that has been inserted. 1717 APInt DemandedVecElts = DemandedElts; 1718 DemandedVecElts.clearBit(EltIdx); 1719 if (!!DemandedVecElts) { 1720 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q); 1721 Known = Known.intersectWith(Known2); 1722 } 1723 break; 1724 } 1725 case Instruction::ExtractElement: { 1726 // Look through extract element. If the index is non-constant or 1727 // out-of-range demand all elements, otherwise just the extracted element. 1728 const Value *Vec = I->getOperand(0); 1729 const Value *Idx = I->getOperand(1); 1730 auto *CIdx = dyn_cast<ConstantInt>(Idx); 1731 if (isa<ScalableVectorType>(Vec->getType())) { 1732 // FIXME: there's probably *something* we can do with scalable vectors 1733 Known.resetAll(); 1734 break; 1735 } 1736 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 1737 APInt DemandedVecElts = APInt::getAllOnes(NumElts); 1738 if (CIdx && CIdx->getValue().ult(NumElts)) 1739 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); 1740 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q); 1741 break; 1742 } 1743 case Instruction::ExtractValue: 1744 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1745 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1746 if (EVI->getNumIndices() != 1) break; 1747 if (EVI->getIndices()[0] == 0) { 1748 switch (II->getIntrinsicID()) { 1749 default: break; 1750 case Intrinsic::uadd_with_overflow: 1751 case Intrinsic::sadd_with_overflow: 1752 computeKnownBitsAddSub(true, II->getArgOperand(0), 1753 II->getArgOperand(1), false, DemandedElts, 1754 Known, Known2, Depth, Q); 1755 break; 1756 case Intrinsic::usub_with_overflow: 1757 case Intrinsic::ssub_with_overflow: 1758 computeKnownBitsAddSub(false, II->getArgOperand(0), 1759 II->getArgOperand(1), false, DemandedElts, 1760 Known, Known2, Depth, Q); 1761 break; 1762 case Intrinsic::umul_with_overflow: 1763 case Intrinsic::smul_with_overflow: 1764 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1765 DemandedElts, Known, Known2, Depth, Q); 1766 break; 1767 } 1768 } 1769 } 1770 break; 1771 case Instruction::Freeze: 1772 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, 1773 Depth + 1)) 1774 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1775 break; 1776 } 1777 } 1778 1779 /// Determine which bits of V are known to be either zero or one and return 1780 /// them. 1781 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, 1782 unsigned Depth, const SimplifyQuery &Q) { 1783 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1784 computeKnownBits(V, DemandedElts, Known, Depth, Q); 1785 return Known; 1786 } 1787 1788 /// Determine which bits of V are known to be either zero or one and return 1789 /// them. 1790 KnownBits computeKnownBits(const Value *V, unsigned Depth, 1791 const SimplifyQuery &Q) { 1792 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1793 computeKnownBits(V, Known, Depth, Q); 1794 return Known; 1795 } 1796 1797 /// Determine which bits of V are known to be either zero or one and return 1798 /// them in the Known bit set. 1799 /// 1800 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1801 /// we cannot optimize based on the assumption that it is zero without changing 1802 /// it to be an explicit zero. If we don't change it to zero, other code could 1803 /// optimized based on the contradictory assumption that it is non-zero. 1804 /// Because instcombine aggressively folds operations with undef args anyway, 1805 /// this won't lose us code quality. 1806 /// 1807 /// This function is defined on values with integer type, values with pointer 1808 /// type, and vectors of integers. In the case 1809 /// where V is a vector, known zero, and known one values are the 1810 /// same width as the vector element, and the bit is set only if it is true 1811 /// for all of the demanded elements in the vector specified by DemandedElts. 1812 void computeKnownBits(const Value *V, const APInt &DemandedElts, 1813 KnownBits &Known, unsigned Depth, 1814 const SimplifyQuery &Q) { 1815 if (!DemandedElts) { 1816 // No demanded elts, better to assume we don't know anything. 1817 Known.resetAll(); 1818 return; 1819 } 1820 1821 assert(V && "No Value?"); 1822 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 1823 1824 #ifndef NDEBUG 1825 Type *Ty = V->getType(); 1826 unsigned BitWidth = Known.getBitWidth(); 1827 1828 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && 1829 "Not integer or pointer type!"); 1830 1831 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { 1832 assert( 1833 FVTy->getNumElements() == DemandedElts.getBitWidth() && 1834 "DemandedElt width should equal the fixed vector number of elements"); 1835 } else { 1836 assert(DemandedElts == APInt(1, 1) && 1837 "DemandedElt width should be 1 for scalars or scalable vectors"); 1838 } 1839 1840 Type *ScalarTy = Ty->getScalarType(); 1841 if (ScalarTy->isPointerTy()) { 1842 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && 1843 "V and Known should have same BitWidth"); 1844 } else { 1845 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && 1846 "V and Known should have same BitWidth"); 1847 } 1848 #endif 1849 1850 const APInt *C; 1851 if (match(V, m_APInt(C))) { 1852 // We know all of the bits for a scalar constant or a splat vector constant! 1853 Known = KnownBits::makeConstant(*C); 1854 return; 1855 } 1856 // Null and aggregate-zero are all-zeros. 1857 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1858 Known.setAllZero(); 1859 return; 1860 } 1861 // Handle a constant vector by taking the intersection of the known bits of 1862 // each element. 1863 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) { 1864 assert(!isa<ScalableVectorType>(V->getType())); 1865 // We know that CDV must be a vector of integers. Take the intersection of 1866 // each element. 1867 Known.Zero.setAllBits(); Known.One.setAllBits(); 1868 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) { 1869 if (!DemandedElts[i]) 1870 continue; 1871 APInt Elt = CDV->getElementAsAPInt(i); 1872 Known.Zero &= ~Elt; 1873 Known.One &= Elt; 1874 } 1875 return; 1876 } 1877 1878 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1879 assert(!isa<ScalableVectorType>(V->getType())); 1880 // We know that CV must be a vector of integers. Take the intersection of 1881 // each element. 1882 Known.Zero.setAllBits(); Known.One.setAllBits(); 1883 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1884 if (!DemandedElts[i]) 1885 continue; 1886 Constant *Element = CV->getAggregateElement(i); 1887 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1888 if (!ElementCI) { 1889 Known.resetAll(); 1890 return; 1891 } 1892 const APInt &Elt = ElementCI->getValue(); 1893 Known.Zero &= ~Elt; 1894 Known.One &= Elt; 1895 } 1896 return; 1897 } 1898 1899 // Start out not knowing anything. 1900 Known.resetAll(); 1901 1902 // We can't imply anything about undefs. 1903 if (isa<UndefValue>(V)) 1904 return; 1905 1906 // There's no point in looking through other users of ConstantData for 1907 // assumptions. Confirm that we've handled them all. 1908 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1909 1910 // All recursive calls that increase depth must come after this. 1911 if (Depth == MaxAnalysisRecursionDepth) 1912 return; 1913 1914 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1915 // the bits of its aliasee. 1916 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1917 if (!GA->isInterposable()) 1918 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 1919 return; 1920 } 1921 1922 if (const Operator *I = dyn_cast<Operator>(V)) 1923 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q); 1924 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 1925 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange()) 1926 Known = CR->toKnownBits(); 1927 } 1928 1929 // Aligned pointers have trailing zeros - refine Known.Zero set 1930 if (isa<PointerType>(V->getType())) { 1931 Align Alignment = V->getPointerAlignment(Q.DL); 1932 Known.Zero.setLowBits(Log2(Alignment)); 1933 } 1934 1935 // computeKnownBitsFromAssume strictly refines Known. 1936 // Therefore, we run them after computeKnownBitsFromOperator. 1937 1938 // Check whether a nearby assume intrinsic can determine some known bits. 1939 computeKnownBitsFromAssume(V, Known, Depth, Q); 1940 1941 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1942 } 1943 1944 /// Try to detect a recurrence that the value of the induction variable is 1945 /// always a power of two (or zero). 1946 static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, 1947 unsigned Depth, SimplifyQuery &Q) { 1948 BinaryOperator *BO = nullptr; 1949 Value *Start = nullptr, *Step = nullptr; 1950 if (!matchSimpleRecurrence(PN, BO, Start, Step)) 1951 return false; 1952 1953 // Initial value must be a power of two. 1954 for (const Use &U : PN->operands()) { 1955 if (U.get() == Start) { 1956 // Initial value comes from a different BB, need to adjust context 1957 // instruction for analysis. 1958 Q.CxtI = PN->getIncomingBlock(U)->getTerminator(); 1959 if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q)) 1960 return false; 1961 } 1962 } 1963 1964 // Except for Mul, the induction variable must be on the left side of the 1965 // increment expression, otherwise its value can be arbitrary. 1966 if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step) 1967 return false; 1968 1969 Q.CxtI = BO->getParent()->getTerminator(); 1970 switch (BO->getOpcode()) { 1971 case Instruction::Mul: 1972 // Power of two is closed under multiplication. 1973 return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || 1974 Q.IIQ.hasNoSignedWrap(BO)) && 1975 isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q); 1976 case Instruction::SDiv: 1977 // Start value must not be signmask for signed division, so simply being a 1978 // power of two is not sufficient, and it has to be a constant. 1979 if (!match(Start, m_Power2()) || match(Start, m_SignMask())) 1980 return false; 1981 [[fallthrough]]; 1982 case Instruction::UDiv: 1983 // Divisor must be a power of two. 1984 // If OrZero is false, cannot guarantee induction variable is non-zero after 1985 // division, same for Shr, unless it is exact division. 1986 return (OrZero || Q.IIQ.isExact(BO)) && 1987 isKnownToBeAPowerOfTwo(Step, false, Depth, Q); 1988 case Instruction::Shl: 1989 return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO); 1990 case Instruction::AShr: 1991 if (!match(Start, m_Power2()) || match(Start, m_SignMask())) 1992 return false; 1993 [[fallthrough]]; 1994 case Instruction::LShr: 1995 return OrZero || Q.IIQ.isExact(BO); 1996 default: 1997 return false; 1998 } 1999 } 2000 2001 /// Return true if the given value is known to have exactly one 2002 /// bit set when defined. For vectors return true if every element is known to 2003 /// be a power of two when defined. Supports values with integer or pointer 2004 /// types and vectors of integers. 2005 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 2006 const SimplifyQuery &Q) { 2007 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 2008 2009 // Attempt to match against constants. 2010 if (OrZero && match(V, m_Power2OrZero())) 2011 return true; 2012 if (match(V, m_Power2())) 2013 return true; 2014 if (Q.CxtI && match(V, m_VScale())) { 2015 const Function *F = Q.CxtI->getFunction(); 2016 // The vscale_range indicates vscale is a power-of-two. 2017 return F->hasFnAttribute(Attribute::VScaleRange); 2018 } 2019 2020 // 1 << X is clearly a power of two if the one is not shifted off the end. If 2021 // it is shifted off the end then the result is undefined. 2022 if (match(V, m_Shl(m_One(), m_Value()))) 2023 return true; 2024 2025 // (signmask) >>l X is clearly a power of two if the one is not shifted off 2026 // the bottom. If it is shifted off the bottom then the result is undefined. 2027 if (match(V, m_LShr(m_SignMask(), m_Value()))) 2028 return true; 2029 2030 // The remaining tests are all recursive, so bail out if we hit the limit. 2031 if (Depth++ == MaxAnalysisRecursionDepth) 2032 return false; 2033 2034 Value *X = nullptr, *Y = nullptr; 2035 // A shift left or a logical shift right of a power of two is a power of two 2036 // or zero. 2037 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 2038 match(V, m_LShr(m_Value(X), m_Value())))) 2039 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 2040 2041 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 2042 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 2043 2044 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 2045 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 2046 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 2047 2048 // Peek through min/max. 2049 if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) { 2050 return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) && 2051 isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q); 2052 } 2053 2054 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 2055 // A power of two and'd with anything is a power of two or zero. 2056 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 2057 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 2058 return true; 2059 // X & (-X) is always a power of two or zero. 2060 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 2061 return true; 2062 return false; 2063 } 2064 2065 // Adding a power-of-two or zero to the same power-of-two or zero yields 2066 // either the original power-of-two, a larger power-of-two or zero. 2067 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2068 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 2069 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) || 2070 Q.IIQ.hasNoSignedWrap(VOBO)) { 2071 if (match(X, m_And(m_Specific(Y), m_Value())) || 2072 match(X, m_And(m_Value(), m_Specific(Y)))) 2073 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 2074 return true; 2075 if (match(Y, m_And(m_Specific(X), m_Value())) || 2076 match(Y, m_And(m_Value(), m_Specific(X)))) 2077 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 2078 return true; 2079 2080 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 2081 KnownBits LHSBits(BitWidth); 2082 computeKnownBits(X, LHSBits, Depth, Q); 2083 2084 KnownBits RHSBits(BitWidth); 2085 computeKnownBits(Y, RHSBits, Depth, Q); 2086 // If i8 V is a power of two or zero: 2087 // ZeroBits: 1 1 1 0 1 1 1 1 2088 // ~ZeroBits: 0 0 0 1 0 0 0 0 2089 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 2090 // If OrZero isn't set, we cannot give back a zero result. 2091 // Make sure either the LHS or RHS has a bit set. 2092 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 2093 return true; 2094 } 2095 } 2096 2097 // A PHI node is power of two if all incoming values are power of two, or if 2098 // it is an induction variable where in each step its value is a power of two. 2099 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2100 SimplifyQuery RecQ = Q; 2101 2102 // Check if it is an induction variable and always power of two. 2103 if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ)) 2104 return true; 2105 2106 // Recursively check all incoming values. Limit recursion to 2 levels, so 2107 // that search complexity is limited to number of operands^2. 2108 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); 2109 return llvm::all_of(PN->operands(), [&](const Use &U) { 2110 // Value is power of 2 if it is coming from PHI node itself by induction. 2111 if (U.get() == PN) 2112 return true; 2113 2114 // Change the context instruction to the incoming block where it is 2115 // evaluated. 2116 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); 2117 return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ); 2118 }); 2119 } 2120 2121 // An exact divide or right shift can only shift off zero bits, so the result 2122 // is a power of two only if the first operand is a power of two and not 2123 // copying a sign bit (sdiv int_min, 2). 2124 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 2125 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 2126 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 2127 Depth, Q); 2128 } 2129 2130 return false; 2131 } 2132 2133 /// Test whether a GEP's result is known to be non-null. 2134 /// 2135 /// Uses properties inherent in a GEP to try to determine whether it is known 2136 /// to be non-null. 2137 /// 2138 /// Currently this routine does not support vector GEPs. 2139 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 2140 const SimplifyQuery &Q) { 2141 const Function *F = nullptr; 2142 if (const Instruction *I = dyn_cast<Instruction>(GEP)) 2143 F = I->getFunction(); 2144 2145 if (!GEP->isInBounds() || 2146 NullPointerIsDefined(F, GEP->getPointerAddressSpace())) 2147 return false; 2148 2149 // FIXME: Support vector-GEPs. 2150 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 2151 2152 // If the base pointer is non-null, we cannot walk to a null address with an 2153 // inbounds GEP in address space zero. 2154 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 2155 return true; 2156 2157 // Walk the GEP operands and see if any operand introduces a non-zero offset. 2158 // If so, then the GEP cannot produce a null pointer, as doing so would 2159 // inherently violate the inbounds contract within address space zero. 2160 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 2161 GTI != GTE; ++GTI) { 2162 // Struct types are easy -- they must always be indexed by a constant. 2163 if (StructType *STy = GTI.getStructTypeOrNull()) { 2164 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 2165 unsigned ElementIdx = OpC->getZExtValue(); 2166 const StructLayout *SL = Q.DL.getStructLayout(STy); 2167 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 2168 if (ElementOffset > 0) 2169 return true; 2170 continue; 2171 } 2172 2173 // If we have a zero-sized type, the index doesn't matter. Keep looping. 2174 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).isZero()) 2175 continue; 2176 2177 // Fast path the constant operand case both for efficiency and so we don't 2178 // increment Depth when just zipping down an all-constant GEP. 2179 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 2180 if (!OpC->isZero()) 2181 return true; 2182 continue; 2183 } 2184 2185 // We post-increment Depth here because while isKnownNonZero increments it 2186 // as well, when we pop back up that increment won't persist. We don't want 2187 // to recurse 10k times just because we have 10k GEP operands. We don't 2188 // bail completely out because we want to handle constant GEPs regardless 2189 // of depth. 2190 if (Depth++ >= MaxAnalysisRecursionDepth) 2191 continue; 2192 2193 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 2194 return true; 2195 } 2196 2197 return false; 2198 } 2199 2200 static bool isKnownNonNullFromDominatingCondition(const Value *V, 2201 const Instruction *CtxI, 2202 const DominatorTree *DT) { 2203 assert(!isa<Constant>(V) && "Called for constant?"); 2204 2205 if (!CtxI || !DT) 2206 return false; 2207 2208 unsigned NumUsesExplored = 0; 2209 for (const auto *U : V->users()) { 2210 // Avoid massive lists 2211 if (NumUsesExplored >= DomConditionsMaxUses) 2212 break; 2213 NumUsesExplored++; 2214 2215 // If the value is used as an argument to a call or invoke, then argument 2216 // attributes may provide an answer about null-ness. 2217 if (const auto *CB = dyn_cast<CallBase>(U)) 2218 if (auto *CalledFunc = CB->getCalledFunction()) 2219 for (const Argument &Arg : CalledFunc->args()) 2220 if (CB->getArgOperand(Arg.getArgNo()) == V && 2221 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) && 2222 DT->dominates(CB, CtxI)) 2223 return true; 2224 2225 // If the value is used as a load/store, then the pointer must be non null. 2226 if (V == getLoadStorePointerOperand(U)) { 2227 const Instruction *I = cast<Instruction>(U); 2228 if (!NullPointerIsDefined(I->getFunction(), 2229 V->getType()->getPointerAddressSpace()) && 2230 DT->dominates(I, CtxI)) 2231 return true; 2232 } 2233 2234 // Consider only compare instructions uniquely controlling a branch 2235 Value *RHS; 2236 CmpInst::Predicate Pred; 2237 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS)))) 2238 continue; 2239 2240 bool NonNullIfTrue; 2241 if (cmpExcludesZero(Pred, RHS)) 2242 NonNullIfTrue = true; 2243 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS)) 2244 NonNullIfTrue = false; 2245 else 2246 continue; 2247 2248 SmallVector<const User *, 4> WorkList; 2249 SmallPtrSet<const User *, 4> Visited; 2250 for (const auto *CmpU : U->users()) { 2251 assert(WorkList.empty() && "Should be!"); 2252 if (Visited.insert(CmpU).second) 2253 WorkList.push_back(CmpU); 2254 2255 while (!WorkList.empty()) { 2256 auto *Curr = WorkList.pop_back_val(); 2257 2258 // If a user is an AND, add all its users to the work list. We only 2259 // propagate "pred != null" condition through AND because it is only 2260 // correct to assume that all conditions of AND are met in true branch. 2261 // TODO: Support similar logic of OR and EQ predicate? 2262 if (NonNullIfTrue) 2263 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) { 2264 for (const auto *CurrU : Curr->users()) 2265 if (Visited.insert(CurrU).second) 2266 WorkList.push_back(CurrU); 2267 continue; 2268 } 2269 2270 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) { 2271 assert(BI->isConditional() && "uses a comparison!"); 2272 2273 BasicBlock *NonNullSuccessor = 2274 BI->getSuccessor(NonNullIfTrue ? 0 : 1); 2275 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 2276 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 2277 return true; 2278 } else if (NonNullIfTrue && isGuard(Curr) && 2279 DT->dominates(cast<Instruction>(Curr), CtxI)) { 2280 return true; 2281 } 2282 } 2283 } 2284 } 2285 2286 return false; 2287 } 2288 2289 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 2290 /// ensure that the value it's attached to is never Value? 'RangeType' is 2291 /// is the type of the value described by the range. 2292 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 2293 const unsigned NumRanges = Ranges->getNumOperands() / 2; 2294 assert(NumRanges >= 1); 2295 for (unsigned i = 0; i < NumRanges; ++i) { 2296 ConstantInt *Lower = 2297 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 2298 ConstantInt *Upper = 2299 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 2300 ConstantRange Range(Lower->getValue(), Upper->getValue()); 2301 if (Range.contains(Value)) 2302 return false; 2303 } 2304 return true; 2305 } 2306 2307 /// Try to detect a recurrence that monotonically increases/decreases from a 2308 /// non-zero starting value. These are common as induction variables. 2309 static bool isNonZeroRecurrence(const PHINode *PN) { 2310 BinaryOperator *BO = nullptr; 2311 Value *Start = nullptr, *Step = nullptr; 2312 const APInt *StartC, *StepC; 2313 if (!matchSimpleRecurrence(PN, BO, Start, Step) || 2314 !match(Start, m_APInt(StartC)) || StartC->isZero()) 2315 return false; 2316 2317 switch (BO->getOpcode()) { 2318 case Instruction::Add: 2319 // Starting from non-zero and stepping away from zero can never wrap back 2320 // to zero. 2321 return BO->hasNoUnsignedWrap() || 2322 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) && 2323 StartC->isNegative() == StepC->isNegative()); 2324 case Instruction::Mul: 2325 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) && 2326 match(Step, m_APInt(StepC)) && !StepC->isZero(); 2327 case Instruction::Shl: 2328 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap(); 2329 case Instruction::AShr: 2330 case Instruction::LShr: 2331 return BO->isExact(); 2332 default: 2333 return false; 2334 } 2335 } 2336 2337 static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth, 2338 const SimplifyQuery &Q, unsigned BitWidth, Value *X, 2339 Value *Y, bool NSW) { 2340 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); 2341 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); 2342 2343 // If X and Y are both non-negative (as signed values) then their sum is not 2344 // zero unless both X and Y are zero. 2345 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 2346 if (isKnownNonZero(Y, DemandedElts, Depth, Q) || 2347 isKnownNonZero(X, DemandedElts, Depth, Q)) 2348 return true; 2349 2350 // If X and Y are both negative (as signed values) then their sum is not 2351 // zero unless both X and Y equal INT_MIN. 2352 if (XKnown.isNegative() && YKnown.isNegative()) { 2353 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2354 // The sign bit of X is set. If some other bit is set then X is not equal 2355 // to INT_MIN. 2356 if (XKnown.One.intersects(Mask)) 2357 return true; 2358 // The sign bit of Y is set. If some other bit is set then Y is not equal 2359 // to INT_MIN. 2360 if (YKnown.One.intersects(Mask)) 2361 return true; 2362 } 2363 2364 // The sum of a non-negative number and a power of two is not zero. 2365 if (XKnown.isNonNegative() && 2366 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 2367 return true; 2368 if (YKnown.isNonNegative() && 2369 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 2370 return true; 2371 2372 return KnownBits::computeForAddSub(/*Add*/ true, NSW, XKnown, YKnown) 2373 .isNonZero(); 2374 } 2375 2376 static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth, 2377 const SimplifyQuery &Q, unsigned BitWidth, Value *X, 2378 Value *Y) { 2379 if (auto *C = dyn_cast<Constant>(X)) 2380 if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Depth, Q)) 2381 return true; 2382 2383 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); 2384 if (XKnown.isUnknown()) 2385 return false; 2386 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); 2387 // If X != Y then X - Y is non zero. 2388 std::optional<bool> ne = KnownBits::ne(XKnown, YKnown); 2389 // If we are unable to compute if X != Y, we won't be able to do anything 2390 // computing the knownbits of the sub expression so just return here. 2391 return ne && *ne; 2392 } 2393 2394 static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, 2395 unsigned Depth, const SimplifyQuery &Q, 2396 const KnownBits &KnownVal) { 2397 auto ShiftOp = [&](const APInt &Lhs, const APInt &Rhs) { 2398 switch (I->getOpcode()) { 2399 case Instruction::Shl: 2400 return Lhs.shl(Rhs); 2401 case Instruction::LShr: 2402 return Lhs.lshr(Rhs); 2403 case Instruction::AShr: 2404 return Lhs.ashr(Rhs); 2405 default: 2406 llvm_unreachable("Unknown Shift Opcode"); 2407 } 2408 }; 2409 2410 auto InvShiftOp = [&](const APInt &Lhs, const APInt &Rhs) { 2411 switch (I->getOpcode()) { 2412 case Instruction::Shl: 2413 return Lhs.lshr(Rhs); 2414 case Instruction::LShr: 2415 case Instruction::AShr: 2416 return Lhs.shl(Rhs); 2417 default: 2418 llvm_unreachable("Unknown Shift Opcode"); 2419 } 2420 }; 2421 2422 if (KnownVal.isUnknown()) 2423 return false; 2424 2425 KnownBits KnownCnt = 2426 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); 2427 APInt MaxShift = KnownCnt.getMaxValue(); 2428 unsigned NumBits = KnownVal.getBitWidth(); 2429 if (MaxShift.uge(NumBits)) 2430 return false; 2431 2432 if (!ShiftOp(KnownVal.One, MaxShift).isZero()) 2433 return true; 2434 2435 // If all of the bits shifted out are known to be zero, and Val is known 2436 // non-zero then at least one non-zero bit must remain. 2437 if (InvShiftOp(KnownVal.Zero, NumBits - MaxShift) 2438 .eq(InvShiftOp(APInt::getAllOnes(NumBits), NumBits - MaxShift)) && 2439 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q)) 2440 return true; 2441 2442 return false; 2443 } 2444 2445 static bool isKnownNonZeroFromOperator(const Operator *I, 2446 const APInt &DemandedElts, 2447 unsigned Depth, const SimplifyQuery &Q) { 2448 unsigned BitWidth = getBitWidth(I->getType()->getScalarType(), Q.DL); 2449 switch (I->getOpcode()) { 2450 case Instruction::GetElementPtr: 2451 if (I->getType()->isPointerTy()) 2452 return isGEPKnownNonNull(cast<GEPOperator>(I), Depth, Q); 2453 break; 2454 case Instruction::BitCast: { 2455 // We need to be a bit careful here. We can only peek through the bitcast 2456 // if the scalar size of elements in the operand are smaller than and a 2457 // multiple of the size they are casting too. Take three cases: 2458 // 2459 // 1) Unsafe: 2460 // bitcast <2 x i16> %NonZero to <4 x i8> 2461 // 2462 // %NonZero can have 2 non-zero i16 elements, but isKnownNonZero on a 2463 // <4 x i8> requires that all 4 i8 elements be non-zero which isn't 2464 // guranteed (imagine just sign bit set in the 2 i16 elements). 2465 // 2466 // 2) Unsafe: 2467 // bitcast <4 x i3> %NonZero to <3 x i4> 2468 // 2469 // Even though the scalar size of the src (`i3`) is smaller than the 2470 // scalar size of the dst `i4`, because `i3` is not a multiple of `i4` 2471 // its possible for the `3 x i4` elements to be zero because there are 2472 // some elements in the destination that don't contain any full src 2473 // element. 2474 // 2475 // 3) Safe: 2476 // bitcast <4 x i8> %NonZero to <2 x i16> 2477 // 2478 // This is always safe as non-zero in the 4 i8 elements implies 2479 // non-zero in the combination of any two adjacent ones. Since i8 is a 2480 // multiple of i16, each i16 is guranteed to have 2 full i8 elements. 2481 // This all implies the 2 i16 elements are non-zero. 2482 Type *FromTy = I->getOperand(0)->getType(); 2483 if ((FromTy->isIntOrIntVectorTy() || FromTy->isPtrOrPtrVectorTy()) && 2484 (BitWidth % getBitWidth(FromTy->getScalarType(), Q.DL)) == 0) 2485 return isKnownNonZero(I->getOperand(0), Depth, Q); 2486 } break; 2487 case Instruction::IntToPtr: 2488 // Note that we have to take special care to avoid looking through 2489 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well 2490 // as casts that can alter the value, e.g., AddrSpaceCasts. 2491 if (!isa<ScalableVectorType>(I->getType()) && 2492 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= 2493 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) 2494 return isKnownNonZero(I->getOperand(0), Depth, Q); 2495 break; 2496 case Instruction::PtrToInt: 2497 // Similar to int2ptr above, we can look through ptr2int here if the cast 2498 // is a no-op or an extend and not a truncate. 2499 if (!isa<ScalableVectorType>(I->getType()) && 2500 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= 2501 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) 2502 return isKnownNonZero(I->getOperand(0), Depth, Q); 2503 break; 2504 case Instruction::Sub: 2505 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), 2506 I->getOperand(1)); 2507 case Instruction::Or: 2508 // X | Y != 0 if X != 0 or Y != 0. 2509 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) || 2510 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); 2511 case Instruction::SExt: 2512 case Instruction::ZExt: 2513 // ext X != 0 if X != 0. 2514 return isKnownNonZero(I->getOperand(0), Depth, Q); 2515 2516 case Instruction::Shl: { 2517 // shl nsw/nuw can't remove any non-zero bits. 2518 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(I); 2519 if (Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO)) 2520 return isKnownNonZero(I->getOperand(0), Depth, Q); 2521 2522 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 2523 // if the lowest bit is shifted off the end. 2524 KnownBits Known(BitWidth); 2525 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth, Q); 2526 if (Known.One[0]) 2527 return true; 2528 2529 return isNonZeroShift(I, DemandedElts, Depth, Q, Known); 2530 } 2531 case Instruction::LShr: 2532 case Instruction::AShr: { 2533 // shr exact can only shift out zero bits. 2534 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(I); 2535 if (BO->isExact()) 2536 return isKnownNonZero(I->getOperand(0), Depth, Q); 2537 2538 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 2539 // defined if the sign bit is shifted off the end. 2540 KnownBits Known = 2541 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); 2542 if (Known.isNegative()) 2543 return true; 2544 2545 return isNonZeroShift(I, DemandedElts, Depth, Q, Known); 2546 } 2547 case Instruction::UDiv: 2548 case Instruction::SDiv: 2549 // X / Y 2550 // div exact can only produce a zero if the dividend is zero. 2551 if (cast<PossiblyExactOperator>(I)->isExact()) 2552 return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); 2553 if (I->getOpcode() == Instruction::UDiv) { 2554 std::optional<bool> XUgeY; 2555 KnownBits XKnown = 2556 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); 2557 if (!XKnown.isUnknown()) { 2558 KnownBits YKnown = 2559 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); 2560 // If X u>= Y then div is non zero (0/0 is UB). 2561 XUgeY = KnownBits::uge(XKnown, YKnown); 2562 } 2563 // If X is total unknown or X u< Y we won't be able to prove non-zero 2564 // with compute known bits so just return early. 2565 return XUgeY && *XUgeY; 2566 } 2567 break; 2568 case Instruction::Add: { 2569 // X + Y. 2570 2571 // If Add has nuw wrap flag, then if either X or Y is non-zero the result is 2572 // non-zero. 2573 auto *BO = cast<OverflowingBinaryOperator>(I); 2574 if (Q.IIQ.hasNoUnsignedWrap(BO)) 2575 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) || 2576 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); 2577 2578 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), 2579 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO)); 2580 } 2581 case Instruction::Mul: { 2582 // If X and Y are non-zero then so is X * Y as long as the multiplication 2583 // does not overflow. 2584 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(I); 2585 if (Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) 2586 return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q) && 2587 isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q); 2588 2589 // If either X or Y is odd, then if the other is non-zero the result can't 2590 // be zero. 2591 KnownBits XKnown = 2592 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); 2593 if (XKnown.One[0]) 2594 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q); 2595 2596 KnownBits YKnown = 2597 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); 2598 if (YKnown.One[0]) 2599 return XKnown.isNonZero() || 2600 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); 2601 2602 // If there exists any subset of X (sX) and subset of Y (sY) s.t sX * sY is 2603 // non-zero, then X * Y is non-zero. We can find sX and sY by just taking 2604 // the lowest known One of X and Y. If they are non-zero, the result 2605 // must be non-zero. We can check if LSB(X) * LSB(Y) != 0 by doing 2606 // X.CountLeadingZeros + Y.CountLeadingZeros < BitWidth. 2607 return (XKnown.countMaxTrailingZeros() + YKnown.countMaxTrailingZeros()) < 2608 BitWidth; 2609 } 2610 case Instruction::Select: { 2611 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2612 2613 // First check if the arm is non-zero using `isKnownNonZero`. If that fails, 2614 // then see if the select condition implies the arm is non-zero. For example 2615 // (X != 0 ? X : Y), we know the true arm is non-zero as the `X` "return" is 2616 // dominated by `X != 0`. 2617 auto SelectArmIsNonZero = [&](bool IsTrueArm) { 2618 Value *Op; 2619 Op = IsTrueArm ? I->getOperand(1) : I->getOperand(2); 2620 // Op is trivially non-zero. 2621 if (isKnownNonZero(Op, DemandedElts, Depth, Q)) 2622 return true; 2623 2624 // The condition of the select dominates the true/false arm. Check if the 2625 // condition implies that a given arm is non-zero. 2626 Value *X; 2627 CmpInst::Predicate Pred; 2628 if (!match(I->getOperand(0), m_c_ICmp(Pred, m_Specific(Op), m_Value(X)))) 2629 return false; 2630 2631 if (!IsTrueArm) 2632 Pred = ICmpInst::getInversePredicate(Pred); 2633 2634 return cmpExcludesZero(Pred, X); 2635 }; 2636 2637 if (SelectArmIsNonZero(/* IsTrueArm */ true) && 2638 SelectArmIsNonZero(/* IsTrueArm */ false)) 2639 return true; 2640 break; 2641 } 2642 case Instruction::PHI: { 2643 auto *PN = cast<PHINode>(I); 2644 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN)) 2645 return true; 2646 2647 // Check if all incoming values are non-zero using recursion. 2648 SimplifyQuery RecQ = Q; 2649 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); 2650 return llvm::all_of(PN->operands(), [&](const Use &U) { 2651 if (U.get() == PN) 2652 return true; 2653 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); 2654 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ); 2655 }); 2656 } 2657 case Instruction::ExtractElement: 2658 if (const auto *EEI = dyn_cast<ExtractElementInst>(I)) { 2659 const Value *Vec = EEI->getVectorOperand(); 2660 const Value *Idx = EEI->getIndexOperand(); 2661 auto *CIdx = dyn_cast<ConstantInt>(Idx); 2662 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { 2663 unsigned NumElts = VecTy->getNumElements(); 2664 APInt DemandedVecElts = APInt::getAllOnes(NumElts); 2665 if (CIdx && CIdx->getValue().ult(NumElts)) 2666 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); 2667 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q); 2668 } 2669 } 2670 break; 2671 case Instruction::Freeze: 2672 return isKnownNonZero(I->getOperand(0), Depth, Q) && 2673 isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, 2674 Depth); 2675 case Instruction::Load: 2676 // A Load tagged with nonnull metadata is never null. 2677 if (Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_nonnull)) 2678 return true; 2679 2680 // No need to fall through to computeKnownBits as range metadata is already 2681 // handled in isKnownNonZero. 2682 return false; 2683 case Instruction::Call: 2684 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 2685 switch (II->getIntrinsicID()) { 2686 case Intrinsic::sshl_sat: 2687 case Intrinsic::ushl_sat: 2688 case Intrinsic::abs: 2689 case Intrinsic::bitreverse: 2690 case Intrinsic::bswap: 2691 case Intrinsic::ctpop: 2692 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q); 2693 case Intrinsic::ssub_sat: 2694 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, 2695 II->getArgOperand(0), II->getArgOperand(1)); 2696 case Intrinsic::sadd_sat: 2697 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, 2698 II->getArgOperand(0), II->getArgOperand(1), 2699 /*NSW*/ true); 2700 case Intrinsic::umax: 2701 case Intrinsic::uadd_sat: 2702 return isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q) || 2703 isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q); 2704 case Intrinsic::smin: 2705 case Intrinsic::smax: { 2706 auto KnownOpImpliesNonZero = [&](const KnownBits &K) { 2707 return II->getIntrinsicID() == Intrinsic::smin 2708 ? K.isNegative() 2709 : K.isStrictlyPositive(); 2710 }; 2711 KnownBits XKnown = 2712 computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q); 2713 if (KnownOpImpliesNonZero(XKnown)) 2714 return true; 2715 KnownBits YKnown = 2716 computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q); 2717 if (KnownOpImpliesNonZero(YKnown)) 2718 return true; 2719 2720 if (XKnown.isNonZero() && YKnown.isNonZero()) 2721 return true; 2722 } 2723 [[fallthrough]]; 2724 case Intrinsic::umin: 2725 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q) && 2726 isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q); 2727 case Intrinsic::cttz: 2728 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q) 2729 .Zero[0]; 2730 case Intrinsic::ctlz: 2731 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q) 2732 .isNonNegative(); 2733 case Intrinsic::fshr: 2734 case Intrinsic::fshl: 2735 // If Op0 == Op1, this is a rotate. rotate(x, y) != 0 iff x != 0. 2736 if (II->getArgOperand(0) == II->getArgOperand(1)) 2737 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q); 2738 break; 2739 case Intrinsic::vscale: 2740 return true; 2741 default: 2742 break; 2743 } 2744 } 2745 break; 2746 } 2747 2748 KnownBits Known(BitWidth); 2749 computeKnownBits(I, DemandedElts, Known, Depth, Q); 2750 return Known.One != 0; 2751 } 2752 2753 /// Return true if the given value is known to be non-zero when defined. For 2754 /// vectors, return true if every demanded element is known to be non-zero when 2755 /// defined. For pointers, if the context instruction and dominator tree are 2756 /// specified, perform context-sensitive analysis and return true if the 2757 /// pointer couldn't possibly be null at the specified instruction. 2758 /// Supports values with integer or pointer type and vectors of integers. 2759 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, 2760 const SimplifyQuery &Q) { 2761 2762 #ifndef NDEBUG 2763 Type *Ty = V->getType(); 2764 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 2765 2766 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { 2767 assert( 2768 FVTy->getNumElements() == DemandedElts.getBitWidth() && 2769 "DemandedElt width should equal the fixed vector number of elements"); 2770 } else { 2771 assert(DemandedElts == APInt(1, 1) && 2772 "DemandedElt width should be 1 for scalars"); 2773 } 2774 #endif 2775 2776 if (auto *C = dyn_cast<Constant>(V)) { 2777 if (C->isNullValue()) 2778 return false; 2779 if (isa<ConstantInt>(C)) 2780 // Must be non-zero due to null test above. 2781 return true; 2782 2783 // For constant vectors, check that all elements are undefined or known 2784 // non-zero to determine that the whole vector is known non-zero. 2785 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) { 2786 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 2787 if (!DemandedElts[i]) 2788 continue; 2789 Constant *Elt = C->getAggregateElement(i); 2790 if (!Elt || Elt->isNullValue()) 2791 return false; 2792 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 2793 return false; 2794 } 2795 return true; 2796 } 2797 2798 // A global variable in address space 0 is non null unless extern weak 2799 // or an absolute symbol reference. Other address spaces may have null as a 2800 // valid address for a global, so we can't assume anything. 2801 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 2802 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 2803 GV->getType()->getAddressSpace() == 0) 2804 return true; 2805 } 2806 2807 // For constant expressions, fall through to the Operator code below. 2808 if (!isa<ConstantExpr>(V)) 2809 return false; 2810 } 2811 2812 if (auto *I = dyn_cast<Instruction>(V)) { 2813 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) { 2814 // If the possible ranges don't contain zero, then the value is 2815 // definitely non-zero. 2816 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 2817 const APInt ZeroValue(Ty->getBitWidth(), 0); 2818 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 2819 return true; 2820 } 2821 } 2822 } 2823 2824 if (!isa<Constant>(V) && isKnownNonZeroFromAssume(V, Q)) 2825 return true; 2826 2827 // Some of the tests below are recursive, so bail out if we hit the limit. 2828 if (Depth++ >= MaxAnalysisRecursionDepth) 2829 return false; 2830 2831 // Check for pointer simplifications. 2832 2833 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) { 2834 // Alloca never returns null, malloc might. 2835 if (isa<AllocaInst>(V) && PtrTy->getAddressSpace() == 0) 2836 return true; 2837 2838 // A byval, inalloca may not be null in a non-default addres space. A 2839 // nonnull argument is assumed never 0. 2840 if (const Argument *A = dyn_cast<Argument>(V)) { 2841 if (((A->hasPassPointeeByValueCopyAttr() && 2842 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) || 2843 A->hasNonNullAttr())) 2844 return true; 2845 } 2846 2847 if (const auto *Call = dyn_cast<CallBase>(V)) { 2848 if (Call->isReturnNonNull()) 2849 return true; 2850 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) 2851 return isKnownNonZero(RP, Depth, Q); 2852 } 2853 } 2854 2855 if (const auto *I = dyn_cast<Operator>(V)) 2856 if (isKnownNonZeroFromOperator(I, DemandedElts, Depth, Q)) 2857 return true; 2858 2859 if (!isa<Constant>(V) && 2860 isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 2861 return true; 2862 2863 return false; 2864 } 2865 2866 bool isKnownNonZero(const Value *V, unsigned Depth, const SimplifyQuery &Q) { 2867 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 2868 APInt DemandedElts = 2869 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); 2870 return isKnownNonZero(V, DemandedElts, Depth, Q); 2871 } 2872 2873 /// If the pair of operators are the same invertible function, return the 2874 /// the operands of the function corresponding to each input. Otherwise, 2875 /// return std::nullopt. An invertible function is one that is 1-to-1 and maps 2876 /// every input value to exactly one output value. This is equivalent to 2877 /// saying that Op1 and Op2 are equal exactly when the specified pair of 2878 /// operands are equal, (except that Op1 and Op2 may be poison more often.) 2879 static std::optional<std::pair<Value*, Value*>> 2880 getInvertibleOperands(const Operator *Op1, 2881 const Operator *Op2) { 2882 if (Op1->getOpcode() != Op2->getOpcode()) 2883 return std::nullopt; 2884 2885 auto getOperands = [&](unsigned OpNum) -> auto { 2886 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum)); 2887 }; 2888 2889 switch (Op1->getOpcode()) { 2890 default: 2891 break; 2892 case Instruction::Add: 2893 case Instruction::Sub: 2894 if (Op1->getOperand(0) == Op2->getOperand(0)) 2895 return getOperands(1); 2896 if (Op1->getOperand(1) == Op2->getOperand(1)) 2897 return getOperands(0); 2898 break; 2899 case Instruction::Mul: { 2900 // invertible if A * B == (A * B) mod 2^N where A, and B are integers 2901 // and N is the bitwdith. The nsw case is non-obvious, but proven by 2902 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK 2903 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); 2904 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2); 2905 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && 2906 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) 2907 break; 2908 2909 // Assume operand order has been canonicalized 2910 if (Op1->getOperand(1) == Op2->getOperand(1) && 2911 isa<ConstantInt>(Op1->getOperand(1)) && 2912 !cast<ConstantInt>(Op1->getOperand(1))->isZero()) 2913 return getOperands(0); 2914 break; 2915 } 2916 case Instruction::Shl: { 2917 // Same as multiplies, with the difference that we don't need to check 2918 // for a non-zero multiply. Shifts always multiply by non-zero. 2919 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); 2920 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2); 2921 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && 2922 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) 2923 break; 2924 2925 if (Op1->getOperand(1) == Op2->getOperand(1)) 2926 return getOperands(0); 2927 break; 2928 } 2929 case Instruction::AShr: 2930 case Instruction::LShr: { 2931 auto *PEO1 = cast<PossiblyExactOperator>(Op1); 2932 auto *PEO2 = cast<PossiblyExactOperator>(Op2); 2933 if (!PEO1->isExact() || !PEO2->isExact()) 2934 break; 2935 2936 if (Op1->getOperand(1) == Op2->getOperand(1)) 2937 return getOperands(0); 2938 break; 2939 } 2940 case Instruction::SExt: 2941 case Instruction::ZExt: 2942 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType()) 2943 return getOperands(0); 2944 break; 2945 case Instruction::PHI: { 2946 const PHINode *PN1 = cast<PHINode>(Op1); 2947 const PHINode *PN2 = cast<PHINode>(Op2); 2948 2949 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences 2950 // are a single invertible function of the start values? Note that repeated 2951 // application of an invertible function is also invertible 2952 BinaryOperator *BO1 = nullptr; 2953 Value *Start1 = nullptr, *Step1 = nullptr; 2954 BinaryOperator *BO2 = nullptr; 2955 Value *Start2 = nullptr, *Step2 = nullptr; 2956 if (PN1->getParent() != PN2->getParent() || 2957 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) || 2958 !matchSimpleRecurrence(PN2, BO2, Start2, Step2)) 2959 break; 2960 2961 auto Values = getInvertibleOperands(cast<Operator>(BO1), 2962 cast<Operator>(BO2)); 2963 if (!Values) 2964 break; 2965 2966 // We have to be careful of mutually defined recurrences here. Ex: 2967 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V 2968 // * X_i = Y_i = X_(i-1) OP Y_(i-1) 2969 // The invertibility of these is complicated, and not worth reasoning 2970 // about (yet?). 2971 if (Values->first != PN1 || Values->second != PN2) 2972 break; 2973 2974 return std::make_pair(Start1, Start2); 2975 } 2976 } 2977 return std::nullopt; 2978 } 2979 2980 /// Return true if V2 == V1 + X, where X is known non-zero. 2981 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth, 2982 const SimplifyQuery &Q) { 2983 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2984 if (!BO || BO->getOpcode() != Instruction::Add) 2985 return false; 2986 Value *Op = nullptr; 2987 if (V2 == BO->getOperand(0)) 2988 Op = BO->getOperand(1); 2989 else if (V2 == BO->getOperand(1)) 2990 Op = BO->getOperand(0); 2991 else 2992 return false; 2993 return isKnownNonZero(Op, Depth + 1, Q); 2994 } 2995 2996 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and 2997 /// the multiplication is nuw or nsw. 2998 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth, 2999 const SimplifyQuery &Q) { 3000 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { 3001 const APInt *C; 3002 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) && 3003 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && 3004 !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q); 3005 } 3006 return false; 3007 } 3008 3009 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and 3010 /// the shift is nuw or nsw. 3011 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth, 3012 const SimplifyQuery &Q) { 3013 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { 3014 const APInt *C; 3015 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) && 3016 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && 3017 !C->isZero() && isKnownNonZero(V1, Depth + 1, Q); 3018 } 3019 return false; 3020 } 3021 3022 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, 3023 unsigned Depth, const SimplifyQuery &Q) { 3024 // Check two PHIs are in same block. 3025 if (PN1->getParent() != PN2->getParent()) 3026 return false; 3027 3028 SmallPtrSet<const BasicBlock *, 8> VisitedBBs; 3029 bool UsedFullRecursion = false; 3030 for (const BasicBlock *IncomBB : PN1->blocks()) { 3031 if (!VisitedBBs.insert(IncomBB).second) 3032 continue; // Don't reprocess blocks that we have dealt with already. 3033 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB); 3034 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB); 3035 const APInt *C1, *C2; 3036 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2) 3037 continue; 3038 3039 // Only one pair of phi operands is allowed for full recursion. 3040 if (UsedFullRecursion) 3041 return false; 3042 3043 SimplifyQuery RecQ = Q; 3044 RecQ.CxtI = IncomBB->getTerminator(); 3045 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ)) 3046 return false; 3047 UsedFullRecursion = true; 3048 } 3049 return true; 3050 } 3051 3052 /// Return true if it is known that V1 != V2. 3053 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, 3054 const SimplifyQuery &Q) { 3055 if (V1 == V2) 3056 return false; 3057 if (V1->getType() != V2->getType()) 3058 // We can't look through casts yet. 3059 return false; 3060 3061 if (Depth >= MaxAnalysisRecursionDepth) 3062 return false; 3063 3064 // See if we can recurse through (exactly one of) our operands. This 3065 // requires our operation be 1-to-1 and map every input value to exactly 3066 // one output value. Such an operation is invertible. 3067 auto *O1 = dyn_cast<Operator>(V1); 3068 auto *O2 = dyn_cast<Operator>(V2); 3069 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) { 3070 if (auto Values = getInvertibleOperands(O1, O2)) 3071 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q); 3072 3073 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) { 3074 const PHINode *PN2 = cast<PHINode>(V2); 3075 // FIXME: This is missing a generalization to handle the case where one is 3076 // a PHI and another one isn't. 3077 if (isNonEqualPHIs(PN1, PN2, Depth, Q)) 3078 return true; 3079 }; 3080 } 3081 3082 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q)) 3083 return true; 3084 3085 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q)) 3086 return true; 3087 3088 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q)) 3089 return true; 3090 3091 if (V1->getType()->isIntOrIntVectorTy()) { 3092 // Are any known bits in V1 contradictory to known bits in V2? If V1 3093 // has a known zero where V2 has a known one, they must not be equal. 3094 KnownBits Known1 = computeKnownBits(V1, Depth, Q); 3095 KnownBits Known2 = computeKnownBits(V2, Depth, Q); 3096 3097 if (Known1.Zero.intersects(Known2.One) || 3098 Known2.Zero.intersects(Known1.One)) 3099 return true; 3100 } 3101 return false; 3102 } 3103 3104 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 3105 /// simplify operations downstream. Mask is known to be zero for bits that V 3106 /// cannot have. 3107 /// 3108 /// This function is defined on values with integer type, values with pointer 3109 /// type, and vectors of integers. In the case 3110 /// where V is a vector, the mask, known zero, and known one values are the 3111 /// same width as the vector element, and the bit is set only if it is true 3112 /// for all of the elements in the vector. 3113 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 3114 const SimplifyQuery &Q) { 3115 KnownBits Known(Mask.getBitWidth()); 3116 computeKnownBits(V, Known, Depth, Q); 3117 return Mask.isSubsetOf(Known.Zero); 3118 } 3119 3120 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow). 3121 // Returns the input and lower/upper bounds. 3122 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, 3123 const APInt *&CLow, const APInt *&CHigh) { 3124 assert(isa<Operator>(Select) && 3125 cast<Operator>(Select)->getOpcode() == Instruction::Select && 3126 "Input should be a Select!"); 3127 3128 const Value *LHS = nullptr, *RHS = nullptr; 3129 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor; 3130 if (SPF != SPF_SMAX && SPF != SPF_SMIN) 3131 return false; 3132 3133 if (!match(RHS, m_APInt(CLow))) 3134 return false; 3135 3136 const Value *LHS2 = nullptr, *RHS2 = nullptr; 3137 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor; 3138 if (getInverseMinMaxFlavor(SPF) != SPF2) 3139 return false; 3140 3141 if (!match(RHS2, m_APInt(CHigh))) 3142 return false; 3143 3144 if (SPF == SPF_SMIN) 3145 std::swap(CLow, CHigh); 3146 3147 In = LHS2; 3148 return CLow->sle(*CHigh); 3149 } 3150 3151 static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, 3152 const APInt *&CLow, 3153 const APInt *&CHigh) { 3154 assert((II->getIntrinsicID() == Intrinsic::smin || 3155 II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax"); 3156 3157 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID()); 3158 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); 3159 if (!InnerII || InnerII->getIntrinsicID() != InverseID || 3160 !match(II->getArgOperand(1), m_APInt(CLow)) || 3161 !match(InnerII->getArgOperand(1), m_APInt(CHigh))) 3162 return false; 3163 3164 if (II->getIntrinsicID() == Intrinsic::smin) 3165 std::swap(CLow, CHigh); 3166 return CLow->sle(*CHigh); 3167 } 3168 3169 /// For vector constants, loop over the elements and find the constant with the 3170 /// minimum number of sign bits. Return 0 if the value is not a vector constant 3171 /// or if any element was not analyzed; otherwise, return the count for the 3172 /// element with the minimum number of sign bits. 3173 static unsigned computeNumSignBitsVectorConstant(const Value *V, 3174 const APInt &DemandedElts, 3175 unsigned TyBits) { 3176 const auto *CV = dyn_cast<Constant>(V); 3177 if (!CV || !isa<FixedVectorType>(CV->getType())) 3178 return 0; 3179 3180 unsigned MinSignBits = TyBits; 3181 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements(); 3182 for (unsigned i = 0; i != NumElts; ++i) { 3183 if (!DemandedElts[i]) 3184 continue; 3185 // If we find a non-ConstantInt, bail out. 3186 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 3187 if (!Elt) 3188 return 0; 3189 3190 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 3191 } 3192 3193 return MinSignBits; 3194 } 3195 3196 static unsigned ComputeNumSignBitsImpl(const Value *V, 3197 const APInt &DemandedElts, 3198 unsigned Depth, const SimplifyQuery &Q); 3199 3200 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, 3201 unsigned Depth, const SimplifyQuery &Q) { 3202 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q); 3203 assert(Result > 0 && "At least one sign bit needs to be present!"); 3204 return Result; 3205 } 3206 3207 /// Return the number of times the sign bit of the register is replicated into 3208 /// the other bits. We know that at least 1 bit is always equal to the sign bit 3209 /// (itself), but other cases can give us information. For example, immediately 3210 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 3211 /// other, so we return 3. For vectors, return the number of sign bits for the 3212 /// vector element with the minimum number of known sign bits of the demanded 3213 /// elements in the vector specified by DemandedElts. 3214 static unsigned ComputeNumSignBitsImpl(const Value *V, 3215 const APInt &DemandedElts, 3216 unsigned Depth, const SimplifyQuery &Q) { 3217 Type *Ty = V->getType(); 3218 #ifndef NDEBUG 3219 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 3220 3221 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { 3222 assert( 3223 FVTy->getNumElements() == DemandedElts.getBitWidth() && 3224 "DemandedElt width should equal the fixed vector number of elements"); 3225 } else { 3226 assert(DemandedElts == APInt(1, 1) && 3227 "DemandedElt width should be 1 for scalars"); 3228 } 3229 #endif 3230 3231 // We return the minimum number of sign bits that are guaranteed to be present 3232 // in V, so for undef we have to conservatively return 1. We don't have the 3233 // same behavior for poison though -- that's a FIXME today. 3234 3235 Type *ScalarTy = Ty->getScalarType(); 3236 unsigned TyBits = ScalarTy->isPointerTy() ? 3237 Q.DL.getPointerTypeSizeInBits(ScalarTy) : 3238 Q.DL.getTypeSizeInBits(ScalarTy); 3239 3240 unsigned Tmp, Tmp2; 3241 unsigned FirstAnswer = 1; 3242 3243 // Note that ConstantInt is handled by the general computeKnownBits case 3244 // below. 3245 3246 if (Depth == MaxAnalysisRecursionDepth) 3247 return 1; 3248 3249 if (auto *U = dyn_cast<Operator>(V)) { 3250 switch (Operator::getOpcode(V)) { 3251 default: break; 3252 case Instruction::SExt: 3253 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 3254 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 3255 3256 case Instruction::SDiv: { 3257 const APInt *Denominator; 3258 // sdiv X, C -> adds log(C) sign bits. 3259 if (match(U->getOperand(1), m_APInt(Denominator))) { 3260 3261 // Ignore non-positive denominator. 3262 if (!Denominator->isStrictlyPositive()) 3263 break; 3264 3265 // Calculate the incoming numerator bits. 3266 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3267 3268 // Add floor(log(C)) bits to the numerator bits. 3269 return std::min(TyBits, NumBits + Denominator->logBase2()); 3270 } 3271 break; 3272 } 3273 3274 case Instruction::SRem: { 3275 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3276 3277 const APInt *Denominator; 3278 // srem X, C -> we know that the result is within [-C+1,C) when C is a 3279 // positive constant. This let us put a lower bound on the number of sign 3280 // bits. 3281 if (match(U->getOperand(1), m_APInt(Denominator))) { 3282 3283 // Ignore non-positive denominator. 3284 if (Denominator->isStrictlyPositive()) { 3285 // Calculate the leading sign bit constraints by examining the 3286 // denominator. Given that the denominator is positive, there are two 3287 // cases: 3288 // 3289 // 1. The numerator is positive. The result range is [0,C) and 3290 // [0,C) u< (1 << ceilLogBase2(C)). 3291 // 3292 // 2. The numerator is negative. Then the result range is (-C,0] and 3293 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 3294 // 3295 // Thus a lower bound on the number of sign bits is `TyBits - 3296 // ceilLogBase2(C)`. 3297 3298 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 3299 Tmp = std::max(Tmp, ResBits); 3300 } 3301 } 3302 return Tmp; 3303 } 3304 3305 case Instruction::AShr: { 3306 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3307 // ashr X, C -> adds C sign bits. Vectors too. 3308 const APInt *ShAmt; 3309 if (match(U->getOperand(1), m_APInt(ShAmt))) { 3310 if (ShAmt->uge(TyBits)) 3311 break; // Bad shift. 3312 unsigned ShAmtLimited = ShAmt->getZExtValue(); 3313 Tmp += ShAmtLimited; 3314 if (Tmp > TyBits) Tmp = TyBits; 3315 } 3316 return Tmp; 3317 } 3318 case Instruction::Shl: { 3319 const APInt *ShAmt; 3320 if (match(U->getOperand(1), m_APInt(ShAmt))) { 3321 // shl destroys sign bits. 3322 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3323 if (ShAmt->uge(TyBits) || // Bad shift. 3324 ShAmt->uge(Tmp)) break; // Shifted all sign bits out. 3325 Tmp2 = ShAmt->getZExtValue(); 3326 return Tmp - Tmp2; 3327 } 3328 break; 3329 } 3330 case Instruction::And: 3331 case Instruction::Or: 3332 case Instruction::Xor: // NOT is handled here. 3333 // Logical binary ops preserve the number of sign bits at the worst. 3334 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3335 if (Tmp != 1) { 3336 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3337 FirstAnswer = std::min(Tmp, Tmp2); 3338 // We computed what we know about the sign bits as our first 3339 // answer. Now proceed to the generic code that uses 3340 // computeKnownBits, and pick whichever answer is better. 3341 } 3342 break; 3343 3344 case Instruction::Select: { 3345 // If we have a clamp pattern, we know that the number of sign bits will 3346 // be the minimum of the clamp min/max range. 3347 const Value *X; 3348 const APInt *CLow, *CHigh; 3349 if (isSignedMinMaxClamp(U, X, CLow, CHigh)) 3350 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); 3351 3352 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3353 if (Tmp == 1) break; 3354 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 3355 return std::min(Tmp, Tmp2); 3356 } 3357 3358 case Instruction::Add: 3359 // Add can have at most one carry bit. Thus we know that the output 3360 // is, at worst, one more bit than the inputs. 3361 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3362 if (Tmp == 1) break; 3363 3364 // Special case decrementing a value (ADD X, -1): 3365 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 3366 if (CRHS->isAllOnesValue()) { 3367 KnownBits Known(TyBits); 3368 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 3369 3370 // If the input is known to be 0 or 1, the output is 0/-1, which is 3371 // all sign bits set. 3372 if ((Known.Zero | 1).isAllOnes()) 3373 return TyBits; 3374 3375 // If we are subtracting one from a positive number, there is no carry 3376 // out of the result. 3377 if (Known.isNonNegative()) 3378 return Tmp; 3379 } 3380 3381 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3382 if (Tmp2 == 1) break; 3383 return std::min(Tmp, Tmp2) - 1; 3384 3385 case Instruction::Sub: 3386 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3387 if (Tmp2 == 1) break; 3388 3389 // Handle NEG. 3390 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 3391 if (CLHS->isNullValue()) { 3392 KnownBits Known(TyBits); 3393 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 3394 // If the input is known to be 0 or 1, the output is 0/-1, which is 3395 // all sign bits set. 3396 if ((Known.Zero | 1).isAllOnes()) 3397 return TyBits; 3398 3399 // If the input is known to be positive (the sign bit is known clear), 3400 // the output of the NEG has the same number of sign bits as the 3401 // input. 3402 if (Known.isNonNegative()) 3403 return Tmp2; 3404 3405 // Otherwise, we treat this like a SUB. 3406 } 3407 3408 // Sub can have at most one carry bit. Thus we know that the output 3409 // is, at worst, one more bit than the inputs. 3410 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3411 if (Tmp == 1) break; 3412 return std::min(Tmp, Tmp2) - 1; 3413 3414 case Instruction::Mul: { 3415 // The output of the Mul can be at most twice the valid bits in the 3416 // inputs. 3417 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3418 if (SignBitsOp0 == 1) break; 3419 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3420 if (SignBitsOp1 == 1) break; 3421 unsigned OutValidBits = 3422 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 3423 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 3424 } 3425 3426 case Instruction::PHI: { 3427 const PHINode *PN = cast<PHINode>(U); 3428 unsigned NumIncomingValues = PN->getNumIncomingValues(); 3429 // Don't analyze large in-degree PHIs. 3430 if (NumIncomingValues > 4) break; 3431 // Unreachable blocks may have zero-operand PHI nodes. 3432 if (NumIncomingValues == 0) break; 3433 3434 // Take the minimum of all incoming values. This can't infinitely loop 3435 // because of our depth threshold. 3436 SimplifyQuery RecQ = Q; 3437 Tmp = TyBits; 3438 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) { 3439 if (Tmp == 1) return Tmp; 3440 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator(); 3441 Tmp = std::min( 3442 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ)); 3443 } 3444 return Tmp; 3445 } 3446 3447 case Instruction::Trunc: { 3448 // If the input contained enough sign bits that some remain after the 3449 // truncation, then we can make use of that. Otherwise we don't know 3450 // anything. 3451 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3452 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits(); 3453 if (Tmp > (OperandTyBits - TyBits)) 3454 return Tmp - (OperandTyBits - TyBits); 3455 3456 return 1; 3457 } 3458 3459 case Instruction::ExtractElement: 3460 // Look through extract element. At the moment we keep this simple and 3461 // skip tracking the specific element. But at least we might find 3462 // information valid for all elements of the vector (for example if vector 3463 // is sign extended, shifted, etc). 3464 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3465 3466 case Instruction::ShuffleVector: { 3467 // Collect the minimum number of sign bits that are shared by every vector 3468 // element referenced by the shuffle. 3469 auto *Shuf = dyn_cast<ShuffleVectorInst>(U); 3470 if (!Shuf) { 3471 // FIXME: Add support for shufflevector constant expressions. 3472 return 1; 3473 } 3474 APInt DemandedLHS, DemandedRHS; 3475 // For undef elements, we don't know anything about the common state of 3476 // the shuffle result. 3477 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) 3478 return 1; 3479 Tmp = std::numeric_limits<unsigned>::max(); 3480 if (!!DemandedLHS) { 3481 const Value *LHS = Shuf->getOperand(0); 3482 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q); 3483 } 3484 // If we don't know anything, early out and try computeKnownBits 3485 // fall-back. 3486 if (Tmp == 1) 3487 break; 3488 if (!!DemandedRHS) { 3489 const Value *RHS = Shuf->getOperand(1); 3490 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q); 3491 Tmp = std::min(Tmp, Tmp2); 3492 } 3493 // If we don't know anything, early out and try computeKnownBits 3494 // fall-back. 3495 if (Tmp == 1) 3496 break; 3497 assert(Tmp <= TyBits && "Failed to determine minimum sign bits"); 3498 return Tmp; 3499 } 3500 case Instruction::Call: { 3501 if (const auto *II = dyn_cast<IntrinsicInst>(U)) { 3502 switch (II->getIntrinsicID()) { 3503 default: break; 3504 case Intrinsic::abs: 3505 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3506 if (Tmp == 1) break; 3507 3508 // Absolute value reduces number of sign bits by at most 1. 3509 return Tmp - 1; 3510 case Intrinsic::smin: 3511 case Intrinsic::smax: { 3512 const APInt *CLow, *CHigh; 3513 if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh)) 3514 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); 3515 } 3516 } 3517 } 3518 } 3519 } 3520 } 3521 3522 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3523 // use this information. 3524 3525 // If we can examine all elements of a vector constant successfully, we're 3526 // done (we can't do any better than that). If not, keep trying. 3527 if (unsigned VecSignBits = 3528 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits)) 3529 return VecSignBits; 3530 3531 KnownBits Known(TyBits); 3532 computeKnownBits(V, DemandedElts, Known, Depth, Q); 3533 3534 // If we know that the sign bit is either zero or one, determine the number of 3535 // identical bits in the top of the input value. 3536 return std::max(FirstAnswer, Known.countMinSignBits()); 3537 } 3538 3539 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB, 3540 const TargetLibraryInfo *TLI) { 3541 const Function *F = CB.getCalledFunction(); 3542 if (!F) 3543 return Intrinsic::not_intrinsic; 3544 3545 if (F->isIntrinsic()) 3546 return F->getIntrinsicID(); 3547 3548 // We are going to infer semantics of a library function based on mapping it 3549 // to an LLVM intrinsic. Check that the library function is available from 3550 // this callbase and in this environment. 3551 LibFunc Func; 3552 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) || 3553 !CB.onlyReadsMemory()) 3554 return Intrinsic::not_intrinsic; 3555 3556 switch (Func) { 3557 default: 3558 break; 3559 case LibFunc_sin: 3560 case LibFunc_sinf: 3561 case LibFunc_sinl: 3562 return Intrinsic::sin; 3563 case LibFunc_cos: 3564 case LibFunc_cosf: 3565 case LibFunc_cosl: 3566 return Intrinsic::cos; 3567 case LibFunc_exp: 3568 case LibFunc_expf: 3569 case LibFunc_expl: 3570 return Intrinsic::exp; 3571 case LibFunc_exp2: 3572 case LibFunc_exp2f: 3573 case LibFunc_exp2l: 3574 return Intrinsic::exp2; 3575 case LibFunc_log: 3576 case LibFunc_logf: 3577 case LibFunc_logl: 3578 return Intrinsic::log; 3579 case LibFunc_log10: 3580 case LibFunc_log10f: 3581 case LibFunc_log10l: 3582 return Intrinsic::log10; 3583 case LibFunc_log2: 3584 case LibFunc_log2f: 3585 case LibFunc_log2l: 3586 return Intrinsic::log2; 3587 case LibFunc_fabs: 3588 case LibFunc_fabsf: 3589 case LibFunc_fabsl: 3590 return Intrinsic::fabs; 3591 case LibFunc_fmin: 3592 case LibFunc_fminf: 3593 case LibFunc_fminl: 3594 return Intrinsic::minnum; 3595 case LibFunc_fmax: 3596 case LibFunc_fmaxf: 3597 case LibFunc_fmaxl: 3598 return Intrinsic::maxnum; 3599 case LibFunc_copysign: 3600 case LibFunc_copysignf: 3601 case LibFunc_copysignl: 3602 return Intrinsic::copysign; 3603 case LibFunc_floor: 3604 case LibFunc_floorf: 3605 case LibFunc_floorl: 3606 return Intrinsic::floor; 3607 case LibFunc_ceil: 3608 case LibFunc_ceilf: 3609 case LibFunc_ceill: 3610 return Intrinsic::ceil; 3611 case LibFunc_trunc: 3612 case LibFunc_truncf: 3613 case LibFunc_truncl: 3614 return Intrinsic::trunc; 3615 case LibFunc_rint: 3616 case LibFunc_rintf: 3617 case LibFunc_rintl: 3618 return Intrinsic::rint; 3619 case LibFunc_nearbyint: 3620 case LibFunc_nearbyintf: 3621 case LibFunc_nearbyintl: 3622 return Intrinsic::nearbyint; 3623 case LibFunc_round: 3624 case LibFunc_roundf: 3625 case LibFunc_roundl: 3626 return Intrinsic::round; 3627 case LibFunc_roundeven: 3628 case LibFunc_roundevenf: 3629 case LibFunc_roundevenl: 3630 return Intrinsic::roundeven; 3631 case LibFunc_pow: 3632 case LibFunc_powf: 3633 case LibFunc_powl: 3634 return Intrinsic::pow; 3635 case LibFunc_sqrt: 3636 case LibFunc_sqrtf: 3637 case LibFunc_sqrtl: 3638 return Intrinsic::sqrt; 3639 } 3640 3641 return Intrinsic::not_intrinsic; 3642 } 3643 3644 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 3645 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 3646 /// bit despite comparing equal. 3647 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 3648 const DataLayout &DL, 3649 const TargetLibraryInfo *TLI, 3650 bool SignBitOnly, unsigned Depth) { 3651 // TODO: This function does not do the right thing when SignBitOnly is true 3652 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 3653 // which flips the sign bits of NaNs. See 3654 // https://llvm.org/bugs/show_bug.cgi?id=31702. 3655 3656 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 3657 return !CFP->getValueAPF().isNegative() || 3658 (!SignBitOnly && CFP->getValueAPF().isZero()); 3659 } 3660 3661 // Handle vector of constants. 3662 if (auto *CV = dyn_cast<Constant>(V)) { 3663 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) { 3664 unsigned NumElts = CVFVTy->getNumElements(); 3665 for (unsigned i = 0; i != NumElts; ++i) { 3666 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 3667 if (!CFP) 3668 return false; 3669 if (CFP->getValueAPF().isNegative() && 3670 (SignBitOnly || !CFP->getValueAPF().isZero())) 3671 return false; 3672 } 3673 3674 // All non-negative ConstantFPs. 3675 return true; 3676 } 3677 } 3678 3679 if (Depth == MaxAnalysisRecursionDepth) 3680 return false; 3681 3682 const Operator *I = dyn_cast<Operator>(V); 3683 if (!I) 3684 return false; 3685 3686 switch (I->getOpcode()) { 3687 default: 3688 break; 3689 // Unsigned integers are always nonnegative. 3690 case Instruction::UIToFP: 3691 return true; 3692 case Instruction::FDiv: 3693 // X / X is always exactly 1.0 or a NaN. 3694 if (I->getOperand(0) == I->getOperand(1) && 3695 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 3696 return true; 3697 3698 // Set SignBitOnly for RHS, because X / -0.0 is -Inf (or NaN). 3699 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI, 3700 SignBitOnly, Depth + 1) && 3701 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI, 3702 /*SignBitOnly*/ true, Depth + 1); 3703 case Instruction::FMul: 3704 // X * X is always non-negative or a NaN. 3705 if (I->getOperand(0) == I->getOperand(1) && 3706 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 3707 return true; 3708 3709 [[fallthrough]]; 3710 case Instruction::FAdd: 3711 case Instruction::FRem: 3712 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI, 3713 SignBitOnly, Depth + 1) && 3714 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI, 3715 SignBitOnly, Depth + 1); 3716 case Instruction::Select: 3717 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI, 3718 SignBitOnly, Depth + 1) && 3719 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), DL, TLI, 3720 SignBitOnly, Depth + 1); 3721 case Instruction::FPExt: 3722 case Instruction::FPTrunc: 3723 // Widening/narrowing never change sign. 3724 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI, 3725 SignBitOnly, Depth + 1); 3726 case Instruction::ExtractElement: 3727 // Look through extract element. At the moment we keep this simple and skip 3728 // tracking the specific element. But at least we might find information 3729 // valid for all elements of the vector. 3730 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI, 3731 SignBitOnly, Depth + 1); 3732 case Instruction::Call: 3733 const auto *CI = cast<CallInst>(I); 3734 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI); 3735 switch (IID) { 3736 default: 3737 break; 3738 case Intrinsic::canonicalize: 3739 case Intrinsic::arithmetic_fence: 3740 case Intrinsic::floor: 3741 case Intrinsic::ceil: 3742 case Intrinsic::trunc: 3743 case Intrinsic::rint: 3744 case Intrinsic::nearbyint: 3745 case Intrinsic::round: 3746 case Intrinsic::roundeven: 3747 case Intrinsic::fptrunc_round: 3748 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI, 3749 SignBitOnly, Depth + 1); 3750 case Intrinsic::maxnum: { 3751 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1); 3752 auto isPositiveNum = [&](Value *V) { 3753 if (SignBitOnly) { 3754 // With SignBitOnly, this is tricky because the result of 3755 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is 3756 // a constant strictly greater than 0.0. 3757 const APFloat *C; 3758 return match(V, m_APFloat(C)) && 3759 *C > APFloat::getZero(C->getSemantics()); 3760 } 3761 3762 // -0.0 compares equal to 0.0, so if this operand is at least -0.0, 3763 // maxnum can't be ordered-less-than-zero. 3764 return isKnownNeverNaN(V, DL, TLI) && 3765 cannotBeOrderedLessThanZeroImpl(V, DL, TLI, false, Depth + 1); 3766 }; 3767 3768 // TODO: This could be improved. We could also check that neither operand 3769 // has its sign bit set (and at least 1 is not-NAN?). 3770 return isPositiveNum(V0) || isPositiveNum(V1); 3771 } 3772 3773 case Intrinsic::maximum: 3774 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI, 3775 SignBitOnly, Depth + 1) || 3776 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI, 3777 SignBitOnly, Depth + 1); 3778 case Intrinsic::minnum: 3779 case Intrinsic::minimum: 3780 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI, 3781 SignBitOnly, Depth + 1) && 3782 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI, 3783 SignBitOnly, Depth + 1); 3784 case Intrinsic::exp: 3785 case Intrinsic::exp2: 3786 case Intrinsic::fabs: 3787 return true; 3788 case Intrinsic::copysign: 3789 // Only the sign operand matters. 3790 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), DL, TLI, true, 3791 Depth + 1); 3792 case Intrinsic::sqrt: 3793 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 3794 if (!SignBitOnly) 3795 return true; 3796 return CI->hasNoNaNs() && 3797 (CI->hasNoSignedZeros() || 3798 cannotBeNegativeZero(CI->getOperand(0), DL, TLI)); 3799 3800 case Intrinsic::powi: 3801 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 3802 // powi(x,n) is non-negative if n is even. 3803 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 3804 return true; 3805 } 3806 // TODO: This is not correct. Given that exp is an integer, here are the 3807 // ways that pow can return a negative value: 3808 // 3809 // pow(x, exp) --> negative if exp is odd and x is negative. 3810 // pow(-0, exp) --> -inf if exp is negative odd. 3811 // pow(-0, exp) --> -0 if exp is positive odd. 3812 // pow(-inf, exp) --> -0 if exp is negative odd. 3813 // pow(-inf, exp) --> -inf if exp is positive odd. 3814 // 3815 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 3816 // but we must return false if x == -0. Unfortunately we do not currently 3817 // have a way of expressing this constraint. See details in 3818 // https://llvm.org/bugs/show_bug.cgi?id=31702. 3819 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), DL, TLI, 3820 SignBitOnly, Depth + 1); 3821 3822 case Intrinsic::fma: 3823 case Intrinsic::fmuladd: 3824 // x*x+y is non-negative if y is non-negative. 3825 return I->getOperand(0) == I->getOperand(1) && 3826 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 3827 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), DL, TLI, 3828 SignBitOnly, Depth + 1); 3829 } 3830 break; 3831 } 3832 return false; 3833 } 3834 3835 bool llvm::CannotBeOrderedLessThanZero(const Value *V, const DataLayout &DL, 3836 const TargetLibraryInfo *TLI) { 3837 return cannotBeOrderedLessThanZeroImpl(V, DL, TLI, false, 0); 3838 } 3839 3840 bool llvm::SignBitMustBeZero(const Value *V, const DataLayout &DL, 3841 const TargetLibraryInfo *TLI) { 3842 return cannotBeOrderedLessThanZeroImpl(V, DL, TLI, true, 0); 3843 } 3844 3845 /// Return true if it's possible to assume IEEE treatment of input denormals in 3846 /// \p F for \p Val. 3847 static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) { 3848 Ty = Ty->getScalarType(); 3849 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE; 3850 } 3851 3852 static bool inputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty) { 3853 Ty = Ty->getScalarType(); 3854 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics()); 3855 return Mode.Input == DenormalMode::IEEE || 3856 Mode.Input == DenormalMode::PositiveZero; 3857 } 3858 3859 static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty) { 3860 Ty = Ty->getScalarType(); 3861 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics()); 3862 return Mode.Output == DenormalMode::IEEE || 3863 Mode.Output == DenormalMode::PositiveZero; 3864 } 3865 3866 bool KnownFPClass::isKnownNeverLogicalZero(const Function &F, Type *Ty) const { 3867 return isKnownNeverZero() && 3868 (isKnownNeverSubnormal() || inputDenormalIsIEEE(F, Ty)); 3869 } 3870 3871 bool KnownFPClass::isKnownNeverLogicalNegZero(const Function &F, 3872 Type *Ty) const { 3873 return isKnownNeverNegZero() && 3874 (isKnownNeverNegSubnormal() || inputDenormalIsIEEEOrPosZero(F, Ty)); 3875 } 3876 3877 bool KnownFPClass::isKnownNeverLogicalPosZero(const Function &F, 3878 Type *Ty) const { 3879 if (!isKnownNeverPosZero()) 3880 return false; 3881 3882 // If we know there are no denormals, nothing can be flushed to zero. 3883 if (isKnownNeverSubnormal()) 3884 return true; 3885 3886 DenormalMode Mode = F.getDenormalMode(Ty->getScalarType()->getFltSemantics()); 3887 switch (Mode.Input) { 3888 case DenormalMode::IEEE: 3889 return true; 3890 case DenormalMode::PreserveSign: 3891 // Negative subnormal won't flush to +0 3892 return isKnownNeverPosSubnormal(); 3893 case DenormalMode::PositiveZero: 3894 default: 3895 // Both positive and negative subnormal could flush to +0 3896 return false; 3897 } 3898 3899 llvm_unreachable("covered switch over denormal mode"); 3900 } 3901 3902 void KnownFPClass::propagateDenormal(const KnownFPClass &Src, const Function &F, 3903 Type *Ty) { 3904 KnownFPClasses = Src.KnownFPClasses; 3905 // If we aren't assuming the source can't be a zero, we don't have to check if 3906 // a denormal input could be flushed. 3907 if (!Src.isKnownNeverPosZero() && !Src.isKnownNeverNegZero()) 3908 return; 3909 3910 // If we know the input can't be a denormal, it can't be flushed to 0. 3911 if (Src.isKnownNeverSubnormal()) 3912 return; 3913 3914 DenormalMode Mode = F.getDenormalMode(Ty->getScalarType()->getFltSemantics()); 3915 3916 if (!Src.isKnownNeverPosSubnormal() && Mode != DenormalMode::getIEEE()) 3917 KnownFPClasses |= fcPosZero; 3918 3919 if (!Src.isKnownNeverNegSubnormal() && Mode != DenormalMode::getIEEE()) { 3920 if (Mode != DenormalMode::getPositiveZero()) 3921 KnownFPClasses |= fcNegZero; 3922 3923 if (Mode.Input == DenormalMode::PositiveZero || 3924 Mode.Output == DenormalMode::PositiveZero || 3925 Mode.Input == DenormalMode::Dynamic || 3926 Mode.Output == DenormalMode::Dynamic) 3927 KnownFPClasses |= fcPosZero; 3928 } 3929 } 3930 3931 void KnownFPClass::propagateCanonicalizingSrc(const KnownFPClass &Src, 3932 const Function &F, Type *Ty) { 3933 propagateDenormal(Src, F, Ty); 3934 propagateNaN(Src, /*PreserveSign=*/true); 3935 } 3936 3937 /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the 3938 /// same result as an fcmp with the given operands. 3939 std::pair<Value *, FPClassTest> llvm::fcmpToClassTest(FCmpInst::Predicate Pred, 3940 const Function &F, 3941 Value *LHS, Value *RHS, 3942 bool LookThroughSrc) { 3943 const APFloat *ConstRHS; 3944 if (!match(RHS, m_APFloat(ConstRHS))) 3945 return {nullptr, fcNone}; 3946 3947 // fcmp ord x, zero|normal|subnormal|inf -> ~fcNan 3948 if (Pred == FCmpInst::FCMP_ORD && !ConstRHS->isNaN()) 3949 return {LHS, ~fcNan}; 3950 3951 // fcmp uno x, zero|normal|subnormal|inf -> fcNan 3952 if (Pred == FCmpInst::FCMP_UNO && !ConstRHS->isNaN()) 3953 return {LHS, fcNan}; 3954 3955 if (ConstRHS->isZero()) { 3956 // Compares with fcNone are only exactly equal to fcZero if input denormals 3957 // are not flushed. 3958 // TODO: Handle DAZ by expanding masks to cover subnormal cases. 3959 if (Pred != FCmpInst::FCMP_ORD && Pred != FCmpInst::FCMP_UNO && 3960 !inputDenormalIsIEEE(F, LHS->getType())) 3961 return {nullptr, fcNone}; 3962 3963 switch (Pred) { 3964 case FCmpInst::FCMP_OEQ: // Match x == 0.0 3965 return {LHS, fcZero}; 3966 case FCmpInst::FCMP_UEQ: // Match isnan(x) || (x == 0.0) 3967 return {LHS, fcZero | fcNan}; 3968 case FCmpInst::FCMP_UNE: // Match (x != 0.0) 3969 return {LHS, ~fcZero}; 3970 case FCmpInst::FCMP_ONE: // Match !isnan(x) && x != 0.0 3971 return {LHS, ~fcNan & ~fcZero}; 3972 case FCmpInst::FCMP_ORD: 3973 // Canonical form of ord/uno is with a zero. We could also handle 3974 // non-canonical other non-NaN constants or LHS == RHS. 3975 return {LHS, ~fcNan}; 3976 case FCmpInst::FCMP_UNO: 3977 return {LHS, fcNan}; 3978 case FCmpInst::FCMP_OGT: // x > 0 3979 return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf}; 3980 case FCmpInst::FCMP_UGT: // isnan(x) || x > 0 3981 return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf | fcNan}; 3982 case FCmpInst::FCMP_OGE: // x >= 0 3983 return {LHS, fcPositive | fcNegZero}; 3984 case FCmpInst::FCMP_UGE: // isnan(x) || x >= 0 3985 return {LHS, fcPositive | fcNegZero | fcNan}; 3986 case FCmpInst::FCMP_OLT: // x < 0 3987 return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf}; 3988 case FCmpInst::FCMP_ULT: // isnan(x) || x < 0 3989 return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf | fcNan}; 3990 case FCmpInst::FCMP_OLE: // x <= 0 3991 return {LHS, fcNegative | fcPosZero}; 3992 case FCmpInst::FCMP_ULE: // isnan(x) || x <= 0 3993 return {LHS, fcNegative | fcPosZero | fcNan}; 3994 default: 3995 break; 3996 } 3997 3998 return {nullptr, fcNone}; 3999 } 4000 4001 Value *Src = LHS; 4002 const bool IsFabs = LookThroughSrc && match(LHS, m_FAbs(m_Value(Src))); 4003 4004 // Compute the test mask that would return true for the ordered comparisons. 4005 FPClassTest Mask; 4006 4007 if (ConstRHS->isInfinity()) { 4008 switch (Pred) { 4009 case FCmpInst::FCMP_OEQ: 4010 case FCmpInst::FCMP_UNE: { 4011 // Match __builtin_isinf patterns 4012 // 4013 // fcmp oeq x, +inf -> is_fpclass x, fcPosInf 4014 // fcmp oeq fabs(x), +inf -> is_fpclass x, fcInf 4015 // fcmp oeq x, -inf -> is_fpclass x, fcNegInf 4016 // fcmp oeq fabs(x), -inf -> is_fpclass x, 0 -> false 4017 // 4018 // fcmp une x, +inf -> is_fpclass x, ~fcPosInf 4019 // fcmp une fabs(x), +inf -> is_fpclass x, ~fcInf 4020 // fcmp une x, -inf -> is_fpclass x, ~fcNegInf 4021 // fcmp une fabs(x), -inf -> is_fpclass x, fcAllFlags -> true 4022 4023 if (ConstRHS->isNegative()) { 4024 Mask = fcNegInf; 4025 if (IsFabs) 4026 Mask = fcNone; 4027 } else { 4028 Mask = fcPosInf; 4029 if (IsFabs) 4030 Mask |= fcNegInf; 4031 } 4032 4033 break; 4034 } 4035 case FCmpInst::FCMP_ONE: 4036 case FCmpInst::FCMP_UEQ: { 4037 // Match __builtin_isinf patterns 4038 // fcmp one x, -inf -> is_fpclass x, fcNegInf 4039 // fcmp one fabs(x), -inf -> is_fpclass x, ~fcNegInf & ~fcNan 4040 // fcmp one x, +inf -> is_fpclass x, ~fcNegInf & ~fcNan 4041 // fcmp one fabs(x), +inf -> is_fpclass x, ~fcInf & fcNan 4042 // 4043 // fcmp ueq x, +inf -> is_fpclass x, fcPosInf|fcNan 4044 // fcmp ueq (fabs x), +inf -> is_fpclass x, fcInf|fcNan 4045 // fcmp ueq x, -inf -> is_fpclass x, fcNegInf|fcNan 4046 // fcmp ueq fabs(x), -inf -> is_fpclass x, fcNan 4047 if (ConstRHS->isNegative()) { 4048 Mask = ~fcNegInf & ~fcNan; 4049 if (IsFabs) 4050 Mask = ~fcNan; 4051 } else { 4052 Mask = ~fcPosInf & ~fcNan; 4053 if (IsFabs) 4054 Mask &= ~fcNegInf; 4055 } 4056 4057 break; 4058 } 4059 case FCmpInst::FCMP_OLT: 4060 case FCmpInst::FCMP_UGE: { 4061 if (ConstRHS->isNegative()) { 4062 // No value is ordered and less than negative infinity. 4063 // All values are unordered with or at least negative infinity. 4064 // fcmp olt x, -inf -> false 4065 // fcmp uge x, -inf -> true 4066 Mask = fcNone; 4067 break; 4068 } 4069 4070 // fcmp olt fabs(x), +inf -> fcFinite 4071 // fcmp uge fabs(x), +inf -> ~fcFinite 4072 // fcmp olt x, +inf -> fcFinite|fcNegInf 4073 // fcmp uge x, +inf -> ~(fcFinite|fcNegInf) 4074 Mask = fcFinite; 4075 if (!IsFabs) 4076 Mask |= fcNegInf; 4077 break; 4078 } 4079 case FCmpInst::FCMP_OGE: 4080 case FCmpInst::FCMP_ULT: { 4081 if (ConstRHS->isNegative()) // TODO 4082 return {nullptr, fcNone}; 4083 4084 // fcmp oge fabs(x), +inf -> fcInf 4085 // fcmp oge x, +inf -> fcPosInf 4086 // fcmp ult fabs(x), +inf -> ~fcInf 4087 // fcmp ult x, +inf -> ~fcPosInf 4088 Mask = fcPosInf; 4089 if (IsFabs) 4090 Mask |= fcNegInf; 4091 break; 4092 } 4093 case FCmpInst::FCMP_OGT: 4094 case FCmpInst::FCMP_ULE: { 4095 if (ConstRHS->isNegative()) 4096 return {nullptr, fcNone}; 4097 4098 // No value is ordered and greater than infinity. 4099 Mask = fcNone; 4100 break; 4101 } 4102 default: 4103 return {nullptr, fcNone}; 4104 } 4105 } else if (ConstRHS->isSmallestNormalized() && !ConstRHS->isNegative()) { 4106 // Match pattern that's used in __builtin_isnormal. 4107 switch (Pred) { 4108 case FCmpInst::FCMP_OLT: 4109 case FCmpInst::FCMP_UGE: { 4110 // fcmp olt x, smallest_normal -> fcNegInf|fcNegNormal|fcSubnormal|fcZero 4111 // fcmp olt fabs(x), smallest_normal -> fcSubnormal|fcZero 4112 // fcmp uge x, smallest_normal -> fcNan|fcPosNormal|fcPosInf 4113 // fcmp uge fabs(x), smallest_normal -> ~(fcSubnormal|fcZero) 4114 Mask = fcZero | fcSubnormal; 4115 if (!IsFabs) 4116 Mask |= fcNegNormal | fcNegInf; 4117 4118 break; 4119 } 4120 case FCmpInst::FCMP_OGE: 4121 case FCmpInst::FCMP_ULT: { 4122 // fcmp oge x, smallest_normal -> fcPosNormal | fcPosInf 4123 // fcmp oge fabs(x), smallest_normal -> fcInf | fcNormal 4124 // fcmp ult x, smallest_normal -> ~(fcPosNormal | fcPosInf) 4125 // fcmp ult fabs(x), smallest_normal -> ~(fcInf | fcNormal) 4126 Mask = fcPosInf | fcPosNormal; 4127 if (IsFabs) 4128 Mask |= fcNegInf | fcNegNormal; 4129 break; 4130 } 4131 default: 4132 return {nullptr, fcNone}; 4133 } 4134 } else if (ConstRHS->isNaN()) { 4135 // fcmp o__ x, nan -> false 4136 // fcmp u__ x, nan -> true 4137 Mask = fcNone; 4138 } else 4139 return {nullptr, fcNone}; 4140 4141 // Invert the comparison for the unordered cases. 4142 if (FCmpInst::isUnordered(Pred)) 4143 Mask = ~Mask; 4144 4145 return {Src, Mask}; 4146 } 4147 4148 static FPClassTest computeKnownFPClassFromAssumes(const Value *V, 4149 const SimplifyQuery &Q) { 4150 FPClassTest KnownFromAssume = fcAllFlags; 4151 4152 // Try to restrict the floating-point classes based on information from 4153 // assumptions. 4154 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 4155 if (!AssumeVH) 4156 continue; 4157 CallInst *I = cast<CallInst>(AssumeVH); 4158 const Function *F = I->getFunction(); 4159 4160 assert(F == Q.CxtI->getParent()->getParent() && 4161 "Got assumption for the wrong function!"); 4162 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 4163 "must be an assume intrinsic"); 4164 4165 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT)) 4166 continue; 4167 4168 CmpInst::Predicate Pred; 4169 Value *LHS, *RHS; 4170 uint64_t ClassVal = 0; 4171 if (match(I->getArgOperand(0), m_FCmp(Pred, m_Value(LHS), m_Value(RHS)))) { 4172 auto [TestedValue, TestedMask] = 4173 fcmpToClassTest(Pred, *F, LHS, RHS, true); 4174 // First see if we can fold in fabs/fneg into the test. 4175 if (TestedValue == V) 4176 KnownFromAssume &= TestedMask; 4177 else { 4178 // Try again without the lookthrough if we found a different source 4179 // value. 4180 auto [TestedValue, TestedMask] = 4181 fcmpToClassTest(Pred, *F, LHS, RHS, false); 4182 if (TestedValue == V) 4183 KnownFromAssume &= TestedMask; 4184 } 4185 } else if (match(I->getArgOperand(0), 4186 m_Intrinsic<Intrinsic::is_fpclass>( 4187 m_Value(LHS), m_ConstantInt(ClassVal)))) { 4188 KnownFromAssume &= static_cast<FPClassTest>(ClassVal); 4189 } 4190 } 4191 4192 return KnownFromAssume; 4193 } 4194 4195 void computeKnownFPClass(const Value *V, const APInt &DemandedElts, 4196 FPClassTest InterestedClasses, KnownFPClass &Known, 4197 unsigned Depth, const SimplifyQuery &Q); 4198 4199 static void computeKnownFPClass(const Value *V, KnownFPClass &Known, 4200 FPClassTest InterestedClasses, unsigned Depth, 4201 const SimplifyQuery &Q) { 4202 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 4203 APInt DemandedElts = 4204 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); 4205 computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q); 4206 } 4207 4208 static void computeKnownFPClassForFPTrunc(const Operator *Op, 4209 const APInt &DemandedElts, 4210 FPClassTest InterestedClasses, 4211 KnownFPClass &Known, unsigned Depth, 4212 const SimplifyQuery &Q) { 4213 if ((InterestedClasses & 4214 (KnownFPClass::OrderedLessThanZeroMask | fcNan)) == fcNone) 4215 return; 4216 4217 KnownFPClass KnownSrc; 4218 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, 4219 KnownSrc, Depth + 1, Q); 4220 4221 // Sign should be preserved 4222 // TODO: Handle cannot be ordered greater than zero 4223 if (KnownSrc.cannotBeOrderedLessThanZero()) 4224 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask); 4225 4226 Known.propagateNaN(KnownSrc, true); 4227 4228 // Infinity needs a range check. 4229 } 4230 4231 // TODO: Merge implementation of cannotBeOrderedLessThanZero into here. 4232 void computeKnownFPClass(const Value *V, const APInt &DemandedElts, 4233 FPClassTest InterestedClasses, KnownFPClass &Known, 4234 unsigned Depth, const SimplifyQuery &Q) { 4235 assert(Known.isUnknown() && "should not be called with known information"); 4236 4237 if (!DemandedElts) { 4238 // No demanded elts, better to assume we don't know anything. 4239 Known.resetAll(); 4240 return; 4241 } 4242 4243 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 4244 4245 if (auto *CFP = dyn_cast_or_null<ConstantFP>(V)) { 4246 Known.KnownFPClasses = CFP->getValueAPF().classify(); 4247 Known.SignBit = CFP->isNegative(); 4248 return; 4249 } 4250 4251 // Try to handle fixed width vector constants 4252 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); 4253 const Constant *CV = dyn_cast<Constant>(V); 4254 if (VFVTy && CV) { 4255 Known.KnownFPClasses = fcNone; 4256 4257 // For vectors, verify that each element is not NaN. 4258 unsigned NumElts = VFVTy->getNumElements(); 4259 for (unsigned i = 0; i != NumElts; ++i) { 4260 Constant *Elt = CV->getAggregateElement(i); 4261 if (!Elt) { 4262 Known = KnownFPClass(); 4263 return; 4264 } 4265 if (isa<UndefValue>(Elt)) 4266 continue; 4267 auto *CElt = dyn_cast<ConstantFP>(Elt); 4268 if (!CElt) { 4269 Known = KnownFPClass(); 4270 return; 4271 } 4272 4273 KnownFPClass KnownElt{CElt->getValueAPF().classify(), CElt->isNegative()}; 4274 Known |= KnownElt; 4275 } 4276 4277 return; 4278 } 4279 4280 FPClassTest KnownNotFromFlags = fcNone; 4281 if (const auto *CB = dyn_cast<CallBase>(V)) 4282 KnownNotFromFlags |= CB->getRetNoFPClass(); 4283 else if (const auto *Arg = dyn_cast<Argument>(V)) 4284 KnownNotFromFlags |= Arg->getNoFPClass(); 4285 4286 const Operator *Op = dyn_cast<Operator>(V); 4287 if (const FPMathOperator *FPOp = dyn_cast_or_null<FPMathOperator>(Op)) { 4288 if (FPOp->hasNoNaNs()) 4289 KnownNotFromFlags |= fcNan; 4290 if (FPOp->hasNoInfs()) 4291 KnownNotFromFlags |= fcInf; 4292 } 4293 4294 if (Q.AC) { 4295 FPClassTest AssumedClasses = computeKnownFPClassFromAssumes(V, Q); 4296 KnownNotFromFlags |= ~AssumedClasses; 4297 } 4298 4299 // We no longer need to find out about these bits from inputs if we can 4300 // assume this from flags/attributes. 4301 InterestedClasses &= ~KnownNotFromFlags; 4302 4303 auto ClearClassesFromFlags = make_scope_exit([=, &Known] { 4304 Known.knownNot(KnownNotFromFlags); 4305 }); 4306 4307 if (!Op) 4308 return; 4309 4310 // All recursive calls that increase depth must come after this. 4311 if (Depth == MaxAnalysisRecursionDepth) 4312 return; 4313 4314 const unsigned Opc = Op->getOpcode(); 4315 switch (Opc) { 4316 case Instruction::FNeg: { 4317 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, 4318 Known, Depth + 1, Q); 4319 Known.fneg(); 4320 break; 4321 } 4322 case Instruction::Select: { 4323 Value *Cond = Op->getOperand(0); 4324 Value *LHS = Op->getOperand(1); 4325 Value *RHS = Op->getOperand(2); 4326 4327 FPClassTest FilterLHS = fcAllFlags; 4328 FPClassTest FilterRHS = fcAllFlags; 4329 4330 Value *TestedValue = nullptr; 4331 FPClassTest TestedMask = fcNone; 4332 uint64_t ClassVal = 0; 4333 const Function *F = cast<Instruction>(Op)->getFunction(); 4334 CmpInst::Predicate Pred; 4335 Value *CmpLHS, *CmpRHS; 4336 if (F && match(Cond, m_FCmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)))) { 4337 // If the select filters out a value based on the class, it no longer 4338 // participates in the class of the result 4339 4340 // TODO: In some degenerate cases we can infer something if we try again 4341 // without looking through sign operations. 4342 bool LookThroughFAbsFNeg = CmpLHS != LHS && CmpLHS != RHS; 4343 std::tie(TestedValue, TestedMask) = 4344 fcmpToClassTest(Pred, *F, CmpLHS, CmpRHS, LookThroughFAbsFNeg); 4345 } else if (match(Cond, 4346 m_Intrinsic<Intrinsic::is_fpclass>( 4347 m_Value(TestedValue), m_ConstantInt(ClassVal)))) { 4348 TestedMask = static_cast<FPClassTest>(ClassVal); 4349 } 4350 4351 if (TestedValue == LHS) { 4352 // match !isnan(x) ? x : y 4353 FilterLHS = TestedMask; 4354 } else if (TestedValue == RHS) { 4355 // match !isnan(x) ? y : x 4356 FilterRHS = ~TestedMask; 4357 } 4358 4359 KnownFPClass Known2; 4360 computeKnownFPClass(LHS, DemandedElts, InterestedClasses & FilterLHS, Known, 4361 Depth + 1, Q); 4362 Known.KnownFPClasses &= FilterLHS; 4363 4364 computeKnownFPClass(RHS, DemandedElts, InterestedClasses & FilterRHS, 4365 Known2, Depth + 1, Q); 4366 Known2.KnownFPClasses &= FilterRHS; 4367 4368 Known |= Known2; 4369 break; 4370 } 4371 case Instruction::Call: { 4372 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op)) { 4373 const Intrinsic::ID IID = II->getIntrinsicID(); 4374 switch (IID) { 4375 case Intrinsic::fabs: { 4376 if ((InterestedClasses & (fcNan | fcPositive)) != fcNone) { 4377 // If we only care about the sign bit we don't need to inspect the 4378 // operand. 4379 computeKnownFPClass(II->getArgOperand(0), DemandedElts, 4380 InterestedClasses, Known, Depth + 1, Q); 4381 } 4382 4383 Known.fabs(); 4384 break; 4385 } 4386 case Intrinsic::copysign: { 4387 KnownFPClass KnownSign; 4388 4389 computeKnownFPClass(II->getArgOperand(0), DemandedElts, 4390 InterestedClasses, Known, Depth + 1, Q); 4391 computeKnownFPClass(II->getArgOperand(1), DemandedElts, 4392 InterestedClasses, KnownSign, Depth + 1, Q); 4393 Known.copysign(KnownSign); 4394 break; 4395 } 4396 case Intrinsic::fma: 4397 case Intrinsic::fmuladd: { 4398 if ((InterestedClasses & fcNegative) == fcNone) 4399 break; 4400 4401 if (II->getArgOperand(0) != II->getArgOperand(1)) 4402 break; 4403 4404 // The multiply cannot be -0 and therefore the add can't be -0 4405 Known.knownNot(fcNegZero); 4406 4407 // x * x + y is non-negative if y is non-negative. 4408 KnownFPClass KnownAddend; 4409 computeKnownFPClass(II->getArgOperand(2), DemandedElts, 4410 InterestedClasses, KnownAddend, Depth + 1, Q); 4411 4412 // TODO: Known sign bit with no nans 4413 if (KnownAddend.cannotBeOrderedLessThanZero()) 4414 Known.knownNot(fcNegative); 4415 break; 4416 } 4417 case Intrinsic::sqrt: 4418 case Intrinsic::experimental_constrained_sqrt: { 4419 KnownFPClass KnownSrc; 4420 FPClassTest InterestedSrcs = InterestedClasses; 4421 if (InterestedClasses & fcNan) 4422 InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask; 4423 4424 computeKnownFPClass(II->getArgOperand(0), DemandedElts, 4425 InterestedSrcs, KnownSrc, Depth + 1, Q); 4426 4427 if (KnownSrc.isKnownNeverPosInfinity()) 4428 Known.knownNot(fcPosInf); 4429 if (KnownSrc.isKnownNever(fcSNan)) 4430 Known.knownNot(fcSNan); 4431 4432 // Any negative value besides -0 returns a nan. 4433 if (KnownSrc.isKnownNeverNaN() && 4434 KnownSrc.cannotBeOrderedLessThanZero()) 4435 Known.knownNot(fcNan); 4436 4437 // The only negative value that can be returned is -0 for -0 inputs. 4438 Known.knownNot(fcNegInf | fcNegSubnormal | fcNegNormal); 4439 4440 // If the input denormal mode could be PreserveSign, a negative 4441 // subnormal input could produce a negative zero output. 4442 const Function *F = II->getFunction(); 4443 if (Q.IIQ.hasNoSignedZeros(II) || 4444 (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType()))) { 4445 Known.knownNot(fcNegZero); 4446 if (KnownSrc.isKnownNeverNaN()) 4447 Known.SignBit = false; 4448 } 4449 4450 break; 4451 } 4452 case Intrinsic::sin: 4453 case Intrinsic::cos: { 4454 // Return NaN on infinite inputs. 4455 KnownFPClass KnownSrc; 4456 computeKnownFPClass(II->getArgOperand(0), DemandedElts, 4457 InterestedClasses, KnownSrc, Depth + 1, Q); 4458 Known.knownNot(fcInf); 4459 if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity()) 4460 Known.knownNot(fcNan); 4461 break; 4462 } 4463 4464 case Intrinsic::maxnum: 4465 case Intrinsic::minnum: 4466 case Intrinsic::minimum: 4467 case Intrinsic::maximum: { 4468 KnownFPClass KnownLHS, KnownRHS; 4469 computeKnownFPClass(II->getArgOperand(0), DemandedElts, 4470 InterestedClasses, KnownLHS, Depth + 1, Q); 4471 computeKnownFPClass(II->getArgOperand(1), DemandedElts, 4472 InterestedClasses, KnownRHS, Depth + 1, Q); 4473 4474 bool NeverNaN = 4475 KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN(); 4476 Known = KnownLHS | KnownRHS; 4477 4478 // If either operand is not NaN, the result is not NaN. 4479 if (NeverNaN && (IID == Intrinsic::minnum || IID == Intrinsic::maxnum)) 4480 Known.knownNot(fcNan); 4481 4482 if (IID == Intrinsic::maxnum) { 4483 // If at least one operand is known to be positive, the result must be 4484 // positive. 4485 if ((KnownLHS.cannotBeOrderedLessThanZero() && 4486 KnownLHS.isKnownNeverNaN()) || 4487 (KnownRHS.cannotBeOrderedLessThanZero() && 4488 KnownRHS.isKnownNeverNaN())) 4489 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask); 4490 } else if (IID == Intrinsic::maximum) { 4491 // If at least one operand is known to be positive, the result must be 4492 // positive. 4493 if (KnownLHS.cannotBeOrderedLessThanZero() || 4494 KnownRHS.cannotBeOrderedLessThanZero()) 4495 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask); 4496 } else if (IID == Intrinsic::minnum) { 4497 // If at least one operand is known to be negative, the result must be 4498 // negative. 4499 if ((KnownLHS.cannotBeOrderedGreaterThanZero() && 4500 KnownLHS.isKnownNeverNaN()) || 4501 (KnownRHS.cannotBeOrderedGreaterThanZero() && 4502 KnownRHS.isKnownNeverNaN())) 4503 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask); 4504 } else { 4505 // If at least one operand is known to be negative, the result must be 4506 // negative. 4507 if (KnownLHS.cannotBeOrderedGreaterThanZero() || 4508 KnownRHS.cannotBeOrderedGreaterThanZero()) 4509 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask); 4510 } 4511 4512 // Fixup zero handling if denormals could be returned as a zero. 4513 // 4514 // As there's no spec for denormal flushing, be conservative with the 4515 // treatment of denormals that could be flushed to zero. For older 4516 // subtargets on AMDGPU the min/max instructions would not flush the 4517 // output and return the original value. 4518 // 4519 // TODO: This could be refined based on the sign 4520 if ((Known.KnownFPClasses & fcZero) != fcNone && 4521 !Known.isKnownNeverSubnormal()) { 4522 const Function *Parent = II->getFunction(); 4523 if (!Parent) 4524 break; 4525 4526 DenormalMode Mode = Parent->getDenormalMode( 4527 II->getType()->getScalarType()->getFltSemantics()); 4528 if (Mode != DenormalMode::getIEEE()) 4529 Known.KnownFPClasses |= fcZero; 4530 } 4531 4532 break; 4533 } 4534 case Intrinsic::canonicalize: { 4535 KnownFPClass KnownSrc; 4536 computeKnownFPClass(II->getArgOperand(0), DemandedElts, 4537 InterestedClasses, KnownSrc, Depth + 1, Q); 4538 4539 // This is essentially a stronger form of 4540 // propagateCanonicalizingSrc. Other "canonicalizing" operations don't 4541 // actually have an IR canonicalization guarantee. 4542 4543 // Canonicalize may flush denormals to zero, so we have to consider the 4544 // denormal mode to preserve known-not-0 knowledge. 4545 Known.KnownFPClasses = KnownSrc.KnownFPClasses | fcZero | fcQNan; 4546 4547 // Stronger version of propagateNaN 4548 // Canonicalize is guaranteed to quiet signaling nans. 4549 if (KnownSrc.isKnownNeverNaN()) 4550 Known.knownNot(fcNan); 4551 else 4552 Known.knownNot(fcSNan); 4553 4554 const Function *F = II->getFunction(); 4555 if (!F) 4556 break; 4557 4558 // If the parent function flushes denormals, the canonical output cannot 4559 // be a denormal. 4560 const fltSemantics &FPType = 4561 II->getType()->getScalarType()->getFltSemantics(); 4562 DenormalMode DenormMode = F->getDenormalMode(FPType); 4563 if (DenormMode == DenormalMode::getIEEE()) { 4564 if (KnownSrc.isKnownNever(fcPosZero)) 4565 Known.knownNot(fcPosZero); 4566 if (KnownSrc.isKnownNever(fcNegZero)) 4567 Known.knownNot(fcNegZero); 4568 break; 4569 } 4570 4571 if (DenormMode.inputsAreZero() || DenormMode.outputsAreZero()) 4572 Known.knownNot(fcSubnormal); 4573 4574 if (DenormMode.Input == DenormalMode::PositiveZero || 4575 (DenormMode.Output == DenormalMode::PositiveZero && 4576 DenormMode.Input == DenormalMode::IEEE)) 4577 Known.knownNot(fcNegZero); 4578 4579 break; 4580 } 4581 case Intrinsic::trunc: 4582 case Intrinsic::floor: 4583 case Intrinsic::ceil: 4584 case Intrinsic::rint: 4585 case Intrinsic::nearbyint: 4586 case Intrinsic::round: 4587 case Intrinsic::roundeven: { 4588 KnownFPClass KnownSrc; 4589 FPClassTest InterestedSrcs = InterestedClasses; 4590 if (InterestedSrcs & fcPosFinite) 4591 InterestedSrcs |= fcPosFinite; 4592 if (InterestedSrcs & fcNegFinite) 4593 InterestedSrcs |= fcNegFinite; 4594 computeKnownFPClass(II->getArgOperand(0), DemandedElts, 4595 InterestedSrcs, KnownSrc, Depth + 1, Q); 4596 4597 // Integer results cannot be subnormal. 4598 Known.knownNot(fcSubnormal); 4599 4600 Known.propagateNaN(KnownSrc, true); 4601 4602 // Pass through infinities, except PPC_FP128 is a special case for 4603 // intrinsics other than trunc. 4604 if (IID == Intrinsic::trunc || !V->getType()->isMultiUnitFPType()) { 4605 if (KnownSrc.isKnownNeverPosInfinity()) 4606 Known.knownNot(fcPosInf); 4607 if (KnownSrc.isKnownNeverNegInfinity()) 4608 Known.knownNot(fcNegInf); 4609 } 4610 4611 // Negative round ups to 0 produce -0 4612 if (KnownSrc.isKnownNever(fcPosFinite)) 4613 Known.knownNot(fcPosFinite); 4614 if (KnownSrc.isKnownNever(fcNegFinite)) 4615 Known.knownNot(fcNegFinite); 4616 4617 break; 4618 } 4619 case Intrinsic::exp: 4620 case Intrinsic::exp2: { 4621 Known.knownNot(fcNegative); 4622 if ((InterestedClasses & fcNan) == fcNone) 4623 break; 4624 4625 KnownFPClass KnownSrc; 4626 computeKnownFPClass(II->getArgOperand(0), DemandedElts, 4627 InterestedClasses, KnownSrc, Depth + 1, Q); 4628 if (KnownSrc.isKnownNeverNaN()) { 4629 Known.knownNot(fcNan); 4630 Known.SignBit = false; 4631 } 4632 4633 break; 4634 } 4635 case Intrinsic::fptrunc_round: { 4636 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, 4637 Known, Depth, Q); 4638 break; 4639 } 4640 case Intrinsic::log: 4641 case Intrinsic::log10: 4642 case Intrinsic::log2: 4643 case Intrinsic::experimental_constrained_log: 4644 case Intrinsic::experimental_constrained_log10: 4645 case Intrinsic::experimental_constrained_log2: { 4646 // log(+inf) -> +inf 4647 // log([+-]0.0) -> -inf 4648 // log(-inf) -> nan 4649 // log(-x) -> nan 4650 if ((InterestedClasses & (fcNan | fcInf)) == fcNone) 4651 break; 4652 4653 FPClassTest InterestedSrcs = InterestedClasses; 4654 if ((InterestedClasses & fcNegInf) != fcNone) 4655 InterestedSrcs |= fcZero | fcSubnormal; 4656 if ((InterestedClasses & fcNan) != fcNone) 4657 InterestedSrcs |= fcNan | (fcNegative & ~fcNan); 4658 4659 KnownFPClass KnownSrc; 4660 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, 4661 KnownSrc, Depth + 1, Q); 4662 4663 if (KnownSrc.isKnownNeverPosInfinity()) 4664 Known.knownNot(fcPosInf); 4665 4666 if (KnownSrc.isKnownNeverNaN() && 4667 KnownSrc.cannotBeOrderedLessThanZero()) 4668 Known.knownNot(fcNan); 4669 4670 const Function *F = II->getFunction(); 4671 if (F && KnownSrc.isKnownNeverLogicalZero(*F, II->getType())) 4672 Known.knownNot(fcNegInf); 4673 4674 break; 4675 } 4676 case Intrinsic::powi: { 4677 if ((InterestedClasses & fcNegative) == fcNone) 4678 break; 4679 4680 const Value *Exp = II->getArgOperand(1); 4681 Type *ExpTy = Exp->getType(); 4682 unsigned BitWidth = ExpTy->getScalarType()->getIntegerBitWidth(); 4683 KnownBits ExponentKnownBits(BitWidth); 4684 computeKnownBits(Exp, 4685 isa<VectorType>(ExpTy) ? DemandedElts : APInt(1, 1), 4686 ExponentKnownBits, Depth + 1, Q); 4687 4688 if (ExponentKnownBits.Zero[0]) { // Is even 4689 Known.knownNot(fcNegative); 4690 break; 4691 } 4692 4693 // Given that exp is an integer, here are the 4694 // ways that pow can return a negative value: 4695 // 4696 // pow(-x, exp) --> negative if exp is odd and x is negative. 4697 // pow(-0, exp) --> -inf if exp is negative odd. 4698 // pow(-0, exp) --> -0 if exp is positive odd. 4699 // pow(-inf, exp) --> -0 if exp is negative odd. 4700 // pow(-inf, exp) --> -inf if exp is positive odd. 4701 KnownFPClass KnownSrc; 4702 computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative, 4703 KnownSrc, Depth + 1, Q); 4704 if (KnownSrc.isKnownNever(fcNegative)) 4705 Known.knownNot(fcNegative); 4706 break; 4707 } 4708 case Intrinsic::ldexp: { 4709 KnownFPClass KnownSrc; 4710 computeKnownFPClass(II->getArgOperand(0), DemandedElts, 4711 InterestedClasses, KnownSrc, Depth + 1, Q); 4712 Known.propagateNaN(KnownSrc, /*PropagateSign=*/true); 4713 4714 // Sign is preserved, but underflows may produce zeroes. 4715 if (KnownSrc.isKnownNever(fcNegative)) 4716 Known.knownNot(fcNegative); 4717 else if (KnownSrc.cannotBeOrderedLessThanZero()) 4718 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask); 4719 4720 if (KnownSrc.isKnownNever(fcPositive)) 4721 Known.knownNot(fcPositive); 4722 else if (KnownSrc.cannotBeOrderedGreaterThanZero()) 4723 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask); 4724 4725 // Can refine inf/zero handling based on the exponent operand. 4726 const FPClassTest ExpInfoMask = fcZero | fcSubnormal | fcInf; 4727 if ((InterestedClasses & ExpInfoMask) == fcNone) 4728 break; 4729 if ((KnownSrc.KnownFPClasses & ExpInfoMask) == fcNone) 4730 break; 4731 4732 const fltSemantics &Flt 4733 = II->getType()->getScalarType()->getFltSemantics(); 4734 unsigned Precision = APFloat::semanticsPrecision(Flt); 4735 const Value *ExpArg = II->getArgOperand(1); 4736 ConstantRange ExpRange = computeConstantRange( 4737 ExpArg, true, Q.IIQ.UseInstrInfo, Q.AC, Q.CxtI, Q.DT, Depth + 1); 4738 4739 const int MantissaBits = Precision - 1; 4740 if (ExpRange.getSignedMin().sge(static_cast<int64_t>(MantissaBits))) 4741 Known.knownNot(fcSubnormal); 4742 4743 const Function *F = II->getFunction(); 4744 const APInt *ConstVal = ExpRange.getSingleElement(); 4745 if (ConstVal && ConstVal->isZero()) { 4746 // ldexp(x, 0) -> x, so propagate everything. 4747 Known.propagateCanonicalizingSrc(KnownSrc, *F, 4748 II->getType()); 4749 } else if (ExpRange.isAllNegative()) { 4750 // If we know the power is <= 0, can't introduce inf 4751 if (KnownSrc.isKnownNeverPosInfinity()) 4752 Known.knownNot(fcPosInf); 4753 if (KnownSrc.isKnownNeverNegInfinity()) 4754 Known.knownNot(fcNegInf); 4755 } else if (ExpRange.isAllNonNegative()) { 4756 // If we know the power is >= 0, can't introduce subnormal or zero 4757 if (KnownSrc.isKnownNeverPosSubnormal()) 4758 Known.knownNot(fcPosSubnormal); 4759 if (KnownSrc.isKnownNeverNegSubnormal()) 4760 Known.knownNot(fcNegSubnormal); 4761 if (F && KnownSrc.isKnownNeverLogicalPosZero(*F, II->getType())) 4762 Known.knownNot(fcPosZero); 4763 if (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType())) 4764 Known.knownNot(fcNegZero); 4765 } 4766 4767 break; 4768 } 4769 case Intrinsic::arithmetic_fence: { 4770 computeKnownFPClass(II->getArgOperand(0), DemandedElts, 4771 InterestedClasses, Known, Depth + 1, Q); 4772 break; 4773 } 4774 case Intrinsic::experimental_constrained_sitofp: 4775 case Intrinsic::experimental_constrained_uitofp: 4776 // Cannot produce nan 4777 Known.knownNot(fcNan); 4778 4779 // sitofp and uitofp turn into +0.0 for zero. 4780 Known.knownNot(fcNegZero); 4781 4782 // Integers cannot be subnormal 4783 Known.knownNot(fcSubnormal); 4784 4785 if (IID == Intrinsic::experimental_constrained_uitofp) 4786 Known.signBitMustBeZero(); 4787 4788 // TODO: Copy inf handling from instructions 4789 break; 4790 default: 4791 break; 4792 } 4793 } 4794 4795 break; 4796 } 4797 case Instruction::FAdd: 4798 case Instruction::FSub: { 4799 KnownFPClass KnownLHS, KnownRHS; 4800 bool WantNegative = 4801 Op->getOpcode() == Instruction::FAdd && 4802 (InterestedClasses & KnownFPClass::OrderedLessThanZeroMask) != fcNone; 4803 bool WantNaN = (InterestedClasses & fcNan) != fcNone; 4804 bool WantNegZero = (InterestedClasses & fcNegZero) != fcNone; 4805 4806 if (!WantNaN && !WantNegative && !WantNegZero) 4807 break; 4808 4809 FPClassTest InterestedSrcs = InterestedClasses; 4810 if (WantNegative) 4811 InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask; 4812 if (InterestedClasses & fcNan) 4813 InterestedSrcs |= fcInf; 4814 computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedSrcs, 4815 KnownRHS, Depth + 1, Q); 4816 4817 if ((WantNaN && KnownRHS.isKnownNeverNaN()) || 4818 (WantNegative && KnownRHS.cannotBeOrderedLessThanZero()) || 4819 WantNegZero || Opc == Instruction::FSub) { 4820 4821 // RHS is canonically cheaper to compute. Skip inspecting the LHS if 4822 // there's no point. 4823 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedSrcs, 4824 KnownLHS, Depth + 1, Q); 4825 // Adding positive and negative infinity produces NaN. 4826 // TODO: Check sign of infinities. 4827 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() && 4828 (KnownLHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverInfinity())) 4829 Known.knownNot(fcNan); 4830 4831 // FIXME: Context function should always be passed in separately 4832 const Function *F = cast<Instruction>(Op)->getFunction(); 4833 4834 if (Op->getOpcode() == Instruction::FAdd) { 4835 if (KnownLHS.cannotBeOrderedLessThanZero() && 4836 KnownRHS.cannotBeOrderedLessThanZero()) 4837 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask); 4838 if (!F) 4839 break; 4840 4841 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. 4842 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) || 4843 KnownRHS.isKnownNeverLogicalNegZero(*F, Op->getType())) && 4844 // Make sure output negative denormal can't flush to -0 4845 outputDenormalIsIEEEOrPosZero(*F, Op->getType())) 4846 Known.knownNot(fcNegZero); 4847 } else { 4848 if (!F) 4849 break; 4850 4851 // Only fsub -0, +0 can return -0 4852 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) || 4853 KnownRHS.isKnownNeverLogicalPosZero(*F, Op->getType())) && 4854 // Make sure output negative denormal can't flush to -0 4855 outputDenormalIsIEEEOrPosZero(*F, Op->getType())) 4856 Known.knownNot(fcNegZero); 4857 } 4858 } 4859 4860 break; 4861 } 4862 case Instruction::FMul: { 4863 // X * X is always non-negative or a NaN. 4864 if (Op->getOperand(0) == Op->getOperand(1)) 4865 Known.knownNot(fcNegative); 4866 4867 if ((InterestedClasses & fcNan) != fcNan) 4868 break; 4869 4870 // fcSubnormal is only needed in case of DAZ. 4871 const FPClassTest NeedForNan = fcNan | fcInf | fcZero | fcSubnormal; 4872 4873 KnownFPClass KnownLHS, KnownRHS; 4874 computeKnownFPClass(Op->getOperand(1), DemandedElts, NeedForNan, KnownRHS, 4875 Depth + 1, Q); 4876 if (!KnownRHS.isKnownNeverNaN()) 4877 break; 4878 4879 computeKnownFPClass(Op->getOperand(0), DemandedElts, NeedForNan, KnownLHS, 4880 Depth + 1, Q); 4881 if (!KnownLHS.isKnownNeverNaN()) 4882 break; 4883 4884 // If 0 * +/-inf produces NaN. 4885 if (KnownLHS.isKnownNeverInfinity() && KnownRHS.isKnownNeverInfinity()) { 4886 Known.knownNot(fcNan); 4887 break; 4888 } 4889 4890 const Function *F = cast<Instruction>(Op)->getFunction(); 4891 if (!F) 4892 break; 4893 4894 if ((KnownRHS.isKnownNeverInfinity() || 4895 KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) && 4896 (KnownLHS.isKnownNeverInfinity() || 4897 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType()))) 4898 Known.knownNot(fcNan); 4899 4900 break; 4901 } 4902 case Instruction::FDiv: 4903 case Instruction::FRem: { 4904 if (Op->getOperand(0) == Op->getOperand(1)) { 4905 // TODO: Could filter out snan if we inspect the operand 4906 if (Op->getOpcode() == Instruction::FDiv) { 4907 // X / X is always exactly 1.0 or a NaN. 4908 Known.KnownFPClasses = fcNan | fcPosNormal; 4909 } else { 4910 // X % X is always exactly [+-]0.0 or a NaN. 4911 Known.KnownFPClasses = fcNan | fcZero; 4912 } 4913 4914 break; 4915 } 4916 4917 const bool WantNan = (InterestedClasses & fcNan) != fcNone; 4918 const bool WantNegative = (InterestedClasses & fcNegative) != fcNone; 4919 const bool WantPositive = 4920 Opc == Instruction::FRem && (InterestedClasses & fcPositive) != fcNone; 4921 if (!WantNan && !WantNegative && !WantPositive) 4922 break; 4923 4924 KnownFPClass KnownLHS, KnownRHS; 4925 4926 computeKnownFPClass(Op->getOperand(1), DemandedElts, 4927 fcNan | fcInf | fcZero | fcNegative, KnownRHS, 4928 Depth + 1, Q); 4929 4930 bool KnowSomethingUseful = 4931 KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative); 4932 4933 if (KnowSomethingUseful || WantPositive) { 4934 const FPClassTest InterestedLHS = 4935 WantPositive ? fcAllFlags 4936 : fcNan | fcInf | fcZero | fcSubnormal | fcNegative; 4937 4938 computeKnownFPClass(Op->getOperand(0), DemandedElts, 4939 InterestedClasses & InterestedLHS, KnownLHS, 4940 Depth + 1, Q); 4941 } 4942 4943 const Function *F = cast<Instruction>(Op)->getFunction(); 4944 4945 if (Op->getOpcode() == Instruction::FDiv) { 4946 // Only 0/0, Inf/Inf produce NaN. 4947 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() && 4948 (KnownLHS.isKnownNeverInfinity() || 4949 KnownRHS.isKnownNeverInfinity()) && 4950 ((F && KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) || 4951 (F && KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())))) { 4952 Known.knownNot(fcNan); 4953 } 4954 4955 // X / -0.0 is -Inf (or NaN). 4956 // +X / +X is +X 4957 if (KnownLHS.isKnownNever(fcNegative) && KnownRHS.isKnownNever(fcNegative)) 4958 Known.knownNot(fcNegative); 4959 } else { 4960 // Inf REM x and x REM 0 produce NaN. 4961 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() && 4962 KnownLHS.isKnownNeverInfinity() && F && 4963 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())) { 4964 Known.knownNot(fcNan); 4965 } 4966 4967 // The sign for frem is the same as the first operand. 4968 if (KnownLHS.cannotBeOrderedLessThanZero()) 4969 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask); 4970 if (KnownLHS.cannotBeOrderedGreaterThanZero()) 4971 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask); 4972 4973 // See if we can be more aggressive about the sign of 0. 4974 if (KnownLHS.isKnownNever(fcNegative)) 4975 Known.knownNot(fcNegative); 4976 if (KnownLHS.isKnownNever(fcPositive)) 4977 Known.knownNot(fcPositive); 4978 } 4979 4980 break; 4981 } 4982 case Instruction::FPExt: { 4983 // Infinity, nan and zero propagate from source. 4984 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, 4985 Known, Depth + 1, Q); 4986 4987 const fltSemantics &DstTy = 4988 Op->getType()->getScalarType()->getFltSemantics(); 4989 const fltSemantics &SrcTy = 4990 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics(); 4991 4992 // All subnormal inputs should be in the normal range in the result type. 4993 if (APFloat::isRepresentableAsNormalIn(SrcTy, DstTy)) 4994 Known.knownNot(fcSubnormal); 4995 4996 // Sign bit of a nan isn't guaranteed. 4997 if (!Known.isKnownNeverNaN()) 4998 Known.SignBit = std::nullopt; 4999 break; 5000 } 5001 case Instruction::FPTrunc: { 5002 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known, 5003 Depth, Q); 5004 break; 5005 } 5006 case Instruction::SIToFP: 5007 case Instruction::UIToFP: { 5008 // Cannot produce nan 5009 Known.knownNot(fcNan); 5010 5011 // Integers cannot be subnormal 5012 Known.knownNot(fcSubnormal); 5013 5014 // sitofp and uitofp turn into +0.0 for zero. 5015 Known.knownNot(fcNegZero); 5016 if (Op->getOpcode() == Instruction::UIToFP) 5017 Known.signBitMustBeZero(); 5018 5019 if (InterestedClasses & fcInf) { 5020 // Get width of largest magnitude integer (remove a bit if signed). 5021 // This still works for a signed minimum value because the largest FP 5022 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx). 5023 int IntSize = Op->getOperand(0)->getType()->getScalarSizeInBits(); 5024 if (Op->getOpcode() == Instruction::SIToFP) 5025 --IntSize; 5026 5027 // If the exponent of the largest finite FP value can hold the largest 5028 // integer, the result of the cast must be finite. 5029 Type *FPTy = Op->getType()->getScalarType(); 5030 if (ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize) 5031 Known.knownNot(fcInf); 5032 } 5033 5034 break; 5035 } 5036 case Instruction::ExtractElement: { 5037 // Look through extract element. If the index is non-constant or 5038 // out-of-range demand all elements, otherwise just the extracted element. 5039 const Value *Vec = Op->getOperand(0); 5040 const Value *Idx = Op->getOperand(1); 5041 auto *CIdx = dyn_cast<ConstantInt>(Idx); 5042 5043 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { 5044 unsigned NumElts = VecTy->getNumElements(); 5045 APInt DemandedVecElts = APInt::getAllOnes(NumElts); 5046 if (CIdx && CIdx->getValue().ult(NumElts)) 5047 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); 5048 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known, 5049 Depth + 1, Q); 5050 } 5051 5052 break; 5053 } 5054 case Instruction::InsertElement: { 5055 if (isa<ScalableVectorType>(Op->getType())) 5056 return; 5057 5058 const Value *Vec = Op->getOperand(0); 5059 const Value *Elt = Op->getOperand(1); 5060 auto *CIdx = dyn_cast<ConstantInt>(Op->getOperand(2)); 5061 // Early out if the index is non-constant or out-of-range. 5062 unsigned NumElts = DemandedElts.getBitWidth(); 5063 if (!CIdx || CIdx->getValue().uge(NumElts)) 5064 return; 5065 5066 unsigned EltIdx = CIdx->getZExtValue(); 5067 // Do we demand the inserted element? 5068 if (DemandedElts[EltIdx]) { 5069 computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1, Q); 5070 // If we don't know any bits, early out. 5071 if (Known.isUnknown()) 5072 break; 5073 } else { 5074 Known.KnownFPClasses = fcNone; 5075 } 5076 5077 // We don't need the base vector element that has been inserted. 5078 APInt DemandedVecElts = DemandedElts; 5079 DemandedVecElts.clearBit(EltIdx); 5080 if (!!DemandedVecElts) { 5081 KnownFPClass Known2; 5082 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2, 5083 Depth + 1, Q); 5084 Known |= Known2; 5085 } 5086 5087 break; 5088 } 5089 case Instruction::ShuffleVector: { 5090 // For undef elements, we don't know anything about the common state of 5091 // the shuffle result. 5092 APInt DemandedLHS, DemandedRHS; 5093 auto *Shuf = dyn_cast<ShuffleVectorInst>(Op); 5094 if (!Shuf || !getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) 5095 return; 5096 5097 if (!!DemandedLHS) { 5098 const Value *LHS = Shuf->getOperand(0); 5099 computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known, 5100 Depth + 1, Q); 5101 5102 // If we don't know any bits, early out. 5103 if (Known.isUnknown()) 5104 break; 5105 } else { 5106 Known.KnownFPClasses = fcNone; 5107 } 5108 5109 if (!!DemandedRHS) { 5110 KnownFPClass Known2; 5111 const Value *RHS = Shuf->getOperand(1); 5112 computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2, 5113 Depth + 1, Q); 5114 Known |= Known2; 5115 } 5116 5117 break; 5118 } 5119 case Instruction::ExtractValue: { 5120 const ExtractValueInst *Extract = cast<ExtractValueInst>(Op); 5121 ArrayRef<unsigned> Indices = Extract->getIndices(); 5122 const Value *Src = Extract->getAggregateOperand(); 5123 if (isa<StructType>(Src->getType()) && Indices.size() == 1 && 5124 Indices[0] == 0) { 5125 if (const auto *II = dyn_cast<IntrinsicInst>(Src)) { 5126 switch (II->getIntrinsicID()) { 5127 case Intrinsic::frexp: { 5128 Known.knownNot(fcSubnormal); 5129 5130 KnownFPClass KnownSrc; 5131 computeKnownFPClass(II->getArgOperand(0), DemandedElts, 5132 InterestedClasses, KnownSrc, Depth + 1, Q); 5133 5134 const Function *F = cast<Instruction>(Op)->getFunction(); 5135 5136 if (KnownSrc.isKnownNever(fcNegative)) 5137 Known.knownNot(fcNegative); 5138 else { 5139 if (F && KnownSrc.isKnownNeverLogicalNegZero(*F, Op->getType())) 5140 Known.knownNot(fcNegZero); 5141 if (KnownSrc.isKnownNever(fcNegInf)) 5142 Known.knownNot(fcNegInf); 5143 } 5144 5145 if (KnownSrc.isKnownNever(fcPositive)) 5146 Known.knownNot(fcPositive); 5147 else { 5148 if (F && KnownSrc.isKnownNeverLogicalPosZero(*F, Op->getType())) 5149 Known.knownNot(fcPosZero); 5150 if (KnownSrc.isKnownNever(fcPosInf)) 5151 Known.knownNot(fcPosInf); 5152 } 5153 5154 Known.propagateNaN(KnownSrc); 5155 return; 5156 } 5157 default: 5158 break; 5159 } 5160 } 5161 } 5162 5163 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Depth + 1, 5164 Q); 5165 break; 5166 } 5167 case Instruction::PHI: { 5168 const PHINode *P = cast<PHINode>(Op); 5169 // Unreachable blocks may have zero-operand PHI nodes. 5170 if (P->getNumIncomingValues() == 0) 5171 break; 5172 5173 // Otherwise take the unions of the known bit sets of the operands, 5174 // taking conservative care to avoid excessive recursion. 5175 const unsigned PhiRecursionLimit = MaxAnalysisRecursionDepth - 2; 5176 5177 if (Depth < PhiRecursionLimit) { 5178 // Skip if every incoming value references to ourself. 5179 if (isa_and_nonnull<UndefValue>(P->hasConstantValue())) 5180 break; 5181 5182 bool First = true; 5183 5184 for (Value *IncValue : P->incoming_values()) { 5185 // Skip direct self references. 5186 if (IncValue == P) 5187 continue; 5188 5189 KnownFPClass KnownSrc; 5190 // Recurse, but cap the recursion to two levels, because we don't want 5191 // to waste time spinning around in loops. We need at least depth 2 to 5192 // detect known sign bits. 5193 computeKnownFPClass(IncValue, DemandedElts, InterestedClasses, KnownSrc, 5194 PhiRecursionLimit, Q); 5195 5196 if (First) { 5197 Known = KnownSrc; 5198 First = false; 5199 } else { 5200 Known |= KnownSrc; 5201 } 5202 5203 if (Known.KnownFPClasses == fcAllFlags) 5204 break; 5205 } 5206 } 5207 5208 break; 5209 } 5210 default: 5211 break; 5212 } 5213 } 5214 5215 KnownFPClass llvm::computeKnownFPClass( 5216 const Value *V, const APInt &DemandedElts, const DataLayout &DL, 5217 FPClassTest InterestedClasses, unsigned Depth, const TargetLibraryInfo *TLI, 5218 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 5219 bool UseInstrInfo) { 5220 KnownFPClass KnownClasses; 5221 ::computeKnownFPClass( 5222 V, DemandedElts, InterestedClasses, KnownClasses, Depth, 5223 SimplifyQuery(DL, TLI, DT, AC, safeCxtI(V, CxtI), UseInstrInfo)); 5224 return KnownClasses; 5225 } 5226 5227 KnownFPClass llvm::computeKnownFPClass( 5228 const Value *V, const DataLayout &DL, FPClassTest InterestedClasses, 5229 unsigned Depth, const TargetLibraryInfo *TLI, AssumptionCache *AC, 5230 const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { 5231 KnownFPClass Known; 5232 ::computeKnownFPClass( 5233 V, Known, InterestedClasses, Depth, 5234 SimplifyQuery(DL, TLI, DT, AC, safeCxtI(V, CxtI), UseInstrInfo)); 5235 return Known; 5236 } 5237 5238 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) { 5239 5240 // All byte-wide stores are splatable, even of arbitrary variables. 5241 if (V->getType()->isIntegerTy(8)) 5242 return V; 5243 5244 LLVMContext &Ctx = V->getContext(); 5245 5246 // Undef don't care. 5247 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx)); 5248 if (isa<UndefValue>(V)) 5249 return UndefInt8; 5250 5251 // Return Undef for zero-sized type. 5252 if (!DL.getTypeStoreSize(V->getType()).isNonZero()) 5253 return UndefInt8; 5254 5255 Constant *C = dyn_cast<Constant>(V); 5256 if (!C) { 5257 // Conceptually, we could handle things like: 5258 // %a = zext i8 %X to i16 5259 // %b = shl i16 %a, 8 5260 // %c = or i16 %a, %b 5261 // but until there is an example that actually needs this, it doesn't seem 5262 // worth worrying about. 5263 return nullptr; 5264 } 5265 5266 // Handle 'null' ConstantArrayZero etc. 5267 if (C->isNullValue()) 5268 return Constant::getNullValue(Type::getInt8Ty(Ctx)); 5269 5270 // Constant floating-point values can be handled as integer values if the 5271 // corresponding integer value is "byteable". An important case is 0.0. 5272 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 5273 Type *Ty = nullptr; 5274 if (CFP->getType()->isHalfTy()) 5275 Ty = Type::getInt16Ty(Ctx); 5276 else if (CFP->getType()->isFloatTy()) 5277 Ty = Type::getInt32Ty(Ctx); 5278 else if (CFP->getType()->isDoubleTy()) 5279 Ty = Type::getInt64Ty(Ctx); 5280 // Don't handle long double formats, which have strange constraints. 5281 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL) 5282 : nullptr; 5283 } 5284 5285 // We can handle constant integers that are multiple of 8 bits. 5286 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 5287 if (CI->getBitWidth() % 8 == 0) { 5288 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 5289 if (!CI->getValue().isSplat(8)) 5290 return nullptr; 5291 return ConstantInt::get(Ctx, CI->getValue().trunc(8)); 5292 } 5293 } 5294 5295 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 5296 if (CE->getOpcode() == Instruction::IntToPtr) { 5297 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) { 5298 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace()); 5299 return isBytewiseValue( 5300 ConstantExpr::getIntegerCast(CE->getOperand(0), 5301 Type::getIntNTy(Ctx, BitWidth), false), 5302 DL); 5303 } 5304 } 5305 } 5306 5307 auto Merge = [&](Value *LHS, Value *RHS) -> Value * { 5308 if (LHS == RHS) 5309 return LHS; 5310 if (!LHS || !RHS) 5311 return nullptr; 5312 if (LHS == UndefInt8) 5313 return RHS; 5314 if (RHS == UndefInt8) 5315 return LHS; 5316 return nullptr; 5317 }; 5318 5319 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) { 5320 Value *Val = UndefInt8; 5321 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) 5322 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL)))) 5323 return nullptr; 5324 return Val; 5325 } 5326 5327 if (isa<ConstantAggregate>(C)) { 5328 Value *Val = UndefInt8; 5329 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) 5330 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL)))) 5331 return nullptr; 5332 return Val; 5333 } 5334 5335 // Don't try to handle the handful of other constants. 5336 return nullptr; 5337 } 5338 5339 // This is the recursive version of BuildSubAggregate. It takes a few different 5340 // arguments. Idxs is the index within the nested struct From that we are 5341 // looking at now (which is of type IndexedType). IdxSkip is the number of 5342 // indices from Idxs that should be left out when inserting into the resulting 5343 // struct. To is the result struct built so far, new insertvalue instructions 5344 // build on that. 5345 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 5346 SmallVectorImpl<unsigned> &Idxs, 5347 unsigned IdxSkip, 5348 Instruction *InsertBefore) { 5349 StructType *STy = dyn_cast<StructType>(IndexedType); 5350 if (STy) { 5351 // Save the original To argument so we can modify it 5352 Value *OrigTo = To; 5353 // General case, the type indexed by Idxs is a struct 5354 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 5355 // Process each struct element recursively 5356 Idxs.push_back(i); 5357 Value *PrevTo = To; 5358 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 5359 InsertBefore); 5360 Idxs.pop_back(); 5361 if (!To) { 5362 // Couldn't find any inserted value for this index? Cleanup 5363 while (PrevTo != OrigTo) { 5364 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 5365 PrevTo = Del->getAggregateOperand(); 5366 Del->eraseFromParent(); 5367 } 5368 // Stop processing elements 5369 break; 5370 } 5371 } 5372 // If we successfully found a value for each of our subaggregates 5373 if (To) 5374 return To; 5375 } 5376 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 5377 // the struct's elements had a value that was inserted directly. In the latter 5378 // case, perhaps we can't determine each of the subelements individually, but 5379 // we might be able to find the complete struct somewhere. 5380 5381 // Find the value that is at that particular spot 5382 Value *V = FindInsertedValue(From, Idxs); 5383 5384 if (!V) 5385 return nullptr; 5386 5387 // Insert the value in the new (sub) aggregate 5388 return InsertValueInst::Create(To, V, ArrayRef(Idxs).slice(IdxSkip), "tmp", 5389 InsertBefore); 5390 } 5391 5392 // This helper takes a nested struct and extracts a part of it (which is again a 5393 // struct) into a new value. For example, given the struct: 5394 // { a, { b, { c, d }, e } } 5395 // and the indices "1, 1" this returns 5396 // { c, d }. 5397 // 5398 // It does this by inserting an insertvalue for each element in the resulting 5399 // struct, as opposed to just inserting a single struct. This will only work if 5400 // each of the elements of the substruct are known (ie, inserted into From by an 5401 // insertvalue instruction somewhere). 5402 // 5403 // All inserted insertvalue instructions are inserted before InsertBefore 5404 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 5405 Instruction *InsertBefore) { 5406 assert(InsertBefore && "Must have someplace to insert!"); 5407 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 5408 idx_range); 5409 Value *To = PoisonValue::get(IndexedType); 5410 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 5411 unsigned IdxSkip = Idxs.size(); 5412 5413 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 5414 } 5415 5416 /// Given an aggregate and a sequence of indices, see if the scalar value 5417 /// indexed is already around as a register, for example if it was inserted 5418 /// directly into the aggregate. 5419 /// 5420 /// If InsertBefore is not null, this function will duplicate (modified) 5421 /// insertvalues when a part of a nested struct is extracted. 5422 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 5423 Instruction *InsertBefore) { 5424 // Nothing to index? Just return V then (this is useful at the end of our 5425 // recursion). 5426 if (idx_range.empty()) 5427 return V; 5428 // We have indices, so V should have an indexable type. 5429 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 5430 "Not looking at a struct or array?"); 5431 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 5432 "Invalid indices for type?"); 5433 5434 if (Constant *C = dyn_cast<Constant>(V)) { 5435 C = C->getAggregateElement(idx_range[0]); 5436 if (!C) return nullptr; 5437 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 5438 } 5439 5440 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 5441 // Loop the indices for the insertvalue instruction in parallel with the 5442 // requested indices 5443 const unsigned *req_idx = idx_range.begin(); 5444 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 5445 i != e; ++i, ++req_idx) { 5446 if (req_idx == idx_range.end()) { 5447 // We can't handle this without inserting insertvalues 5448 if (!InsertBefore) 5449 return nullptr; 5450 5451 // The requested index identifies a part of a nested aggregate. Handle 5452 // this specially. For example, 5453 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 5454 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 5455 // %C = extractvalue {i32, { i32, i32 } } %B, 1 5456 // This can be changed into 5457 // %A = insertvalue {i32, i32 } undef, i32 10, 0 5458 // %C = insertvalue {i32, i32 } %A, i32 11, 1 5459 // which allows the unused 0,0 element from the nested struct to be 5460 // removed. 5461 return BuildSubAggregate(V, ArrayRef(idx_range.begin(), req_idx), 5462 InsertBefore); 5463 } 5464 5465 // This insert value inserts something else than what we are looking for. 5466 // See if the (aggregate) value inserted into has the value we are 5467 // looking for, then. 5468 if (*req_idx != *i) 5469 return FindInsertedValue(I->getAggregateOperand(), idx_range, 5470 InsertBefore); 5471 } 5472 // If we end up here, the indices of the insertvalue match with those 5473 // requested (though possibly only partially). Now we recursively look at 5474 // the inserted value, passing any remaining indices. 5475 return FindInsertedValue(I->getInsertedValueOperand(), 5476 ArrayRef(req_idx, idx_range.end()), InsertBefore); 5477 } 5478 5479 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 5480 // If we're extracting a value from an aggregate that was extracted from 5481 // something else, we can extract from that something else directly instead. 5482 // However, we will need to chain I's indices with the requested indices. 5483 5484 // Calculate the number of indices required 5485 unsigned size = I->getNumIndices() + idx_range.size(); 5486 // Allocate some space to put the new indices in 5487 SmallVector<unsigned, 5> Idxs; 5488 Idxs.reserve(size); 5489 // Add indices from the extract value instruction 5490 Idxs.append(I->idx_begin(), I->idx_end()); 5491 5492 // Add requested indices 5493 Idxs.append(idx_range.begin(), idx_range.end()); 5494 5495 assert(Idxs.size() == size 5496 && "Number of indices added not correct?"); 5497 5498 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 5499 } 5500 // Otherwise, we don't know (such as, extracting from a function return value 5501 // or load instruction) 5502 return nullptr; 5503 } 5504 5505 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 5506 unsigned CharSize) { 5507 // Make sure the GEP has exactly three arguments. 5508 if (GEP->getNumOperands() != 3) 5509 return false; 5510 5511 // Make sure the index-ee is a pointer to array of \p CharSize integers. 5512 // CharSize. 5513 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 5514 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 5515 return false; 5516 5517 // Check to make sure that the first operand of the GEP is an integer and 5518 // has value 0 so that we are sure we're indexing into the initializer. 5519 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 5520 if (!FirstIdx || !FirstIdx->isZero()) 5521 return false; 5522 5523 return true; 5524 } 5525 5526 // If V refers to an initialized global constant, set Slice either to 5527 // its initializer if the size of its elements equals ElementSize, or, 5528 // for ElementSize == 8, to its representation as an array of unsiged 5529 // char. Return true on success. 5530 // Offset is in the unit "nr of ElementSize sized elements". 5531 bool llvm::getConstantDataArrayInfo(const Value *V, 5532 ConstantDataArraySlice &Slice, 5533 unsigned ElementSize, uint64_t Offset) { 5534 assert(V && "V should not be null."); 5535 assert((ElementSize % 8) == 0 && 5536 "ElementSize expected to be a multiple of the size of a byte."); 5537 unsigned ElementSizeInBytes = ElementSize / 8; 5538 5539 // Drill down into the pointer expression V, ignoring any intervening 5540 // casts, and determine the identity of the object it references along 5541 // with the cumulative byte offset into it. 5542 const GlobalVariable *GV = 5543 dyn_cast<GlobalVariable>(getUnderlyingObject(V)); 5544 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 5545 // Fail if V is not based on constant global object. 5546 return false; 5547 5548 const DataLayout &DL = GV->getParent()->getDataLayout(); 5549 APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0); 5550 5551 if (GV != V->stripAndAccumulateConstantOffsets(DL, Off, 5552 /*AllowNonInbounds*/ true)) 5553 // Fail if a constant offset could not be determined. 5554 return false; 5555 5556 uint64_t StartIdx = Off.getLimitedValue(); 5557 if (StartIdx == UINT64_MAX) 5558 // Fail if the constant offset is excessive. 5559 return false; 5560 5561 // Off/StartIdx is in the unit of bytes. So we need to convert to number of 5562 // elements. Simply bail out if that isn't possible. 5563 if ((StartIdx % ElementSizeInBytes) != 0) 5564 return false; 5565 5566 Offset += StartIdx / ElementSizeInBytes; 5567 ConstantDataArray *Array = nullptr; 5568 ArrayType *ArrayTy = nullptr; 5569 5570 if (GV->getInitializer()->isNullValue()) { 5571 Type *GVTy = GV->getValueType(); 5572 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedValue(); 5573 uint64_t Length = SizeInBytes / ElementSizeInBytes; 5574 5575 Slice.Array = nullptr; 5576 Slice.Offset = 0; 5577 // Return an empty Slice for undersized constants to let callers 5578 // transform even undefined library calls into simpler, well-defined 5579 // expressions. This is preferable to making the calls although it 5580 // prevents sanitizers from detecting such calls. 5581 Slice.Length = Length < Offset ? 0 : Length - Offset; 5582 return true; 5583 } 5584 5585 auto *Init = const_cast<Constant *>(GV->getInitializer()); 5586 if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) { 5587 Type *InitElTy = ArrayInit->getElementType(); 5588 if (InitElTy->isIntegerTy(ElementSize)) { 5589 // If Init is an initializer for an array of the expected type 5590 // and size, use it as is. 5591 Array = ArrayInit; 5592 ArrayTy = ArrayInit->getType(); 5593 } 5594 } 5595 5596 if (!Array) { 5597 if (ElementSize != 8) 5598 // TODO: Handle conversions to larger integral types. 5599 return false; 5600 5601 // Otherwise extract the portion of the initializer starting 5602 // at Offset as an array of bytes, and reset Offset. 5603 Init = ReadByteArrayFromGlobal(GV, Offset); 5604 if (!Init) 5605 return false; 5606 5607 Offset = 0; 5608 Array = dyn_cast<ConstantDataArray>(Init); 5609 ArrayTy = dyn_cast<ArrayType>(Init->getType()); 5610 } 5611 5612 uint64_t NumElts = ArrayTy->getArrayNumElements(); 5613 if (Offset > NumElts) 5614 return false; 5615 5616 Slice.Array = Array; 5617 Slice.Offset = Offset; 5618 Slice.Length = NumElts - Offset; 5619 return true; 5620 } 5621 5622 /// Extract bytes from the initializer of the constant array V, which need 5623 /// not be a nul-terminated string. On success, store the bytes in Str and 5624 /// return true. When TrimAtNul is set, Str will contain only the bytes up 5625 /// to but not including the first nul. Return false on failure. 5626 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 5627 bool TrimAtNul) { 5628 ConstantDataArraySlice Slice; 5629 if (!getConstantDataArrayInfo(V, Slice, 8)) 5630 return false; 5631 5632 if (Slice.Array == nullptr) { 5633 if (TrimAtNul) { 5634 // Return a nul-terminated string even for an empty Slice. This is 5635 // safe because all existing SimplifyLibcalls callers require string 5636 // arguments and the behavior of the functions they fold is undefined 5637 // otherwise. Folding the calls this way is preferable to making 5638 // the undefined library calls, even though it prevents sanitizers 5639 // from reporting such calls. 5640 Str = StringRef(); 5641 return true; 5642 } 5643 if (Slice.Length == 1) { 5644 Str = StringRef("", 1); 5645 return true; 5646 } 5647 // We cannot instantiate a StringRef as we do not have an appropriate string 5648 // of 0s at hand. 5649 return false; 5650 } 5651 5652 // Start out with the entire array in the StringRef. 5653 Str = Slice.Array->getAsString(); 5654 // Skip over 'offset' bytes. 5655 Str = Str.substr(Slice.Offset); 5656 5657 if (TrimAtNul) { 5658 // Trim off the \0 and anything after it. If the array is not nul 5659 // terminated, we just return the whole end of string. The client may know 5660 // some other way that the string is length-bound. 5661 Str = Str.substr(0, Str.find('\0')); 5662 } 5663 return true; 5664 } 5665 5666 // These next two are very similar to the above, but also look through PHI 5667 // nodes. 5668 // TODO: See if we can integrate these two together. 5669 5670 /// If we can compute the length of the string pointed to by 5671 /// the specified pointer, return 'len+1'. If we can't, return 0. 5672 static uint64_t GetStringLengthH(const Value *V, 5673 SmallPtrSetImpl<const PHINode*> &PHIs, 5674 unsigned CharSize) { 5675 // Look through noop bitcast instructions. 5676 V = V->stripPointerCasts(); 5677 5678 // If this is a PHI node, there are two cases: either we have already seen it 5679 // or we haven't. 5680 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 5681 if (!PHIs.insert(PN).second) 5682 return ~0ULL; // already in the set. 5683 5684 // If it was new, see if all the input strings are the same length. 5685 uint64_t LenSoFar = ~0ULL; 5686 for (Value *IncValue : PN->incoming_values()) { 5687 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 5688 if (Len == 0) return 0; // Unknown length -> unknown. 5689 5690 if (Len == ~0ULL) continue; 5691 5692 if (Len != LenSoFar && LenSoFar != ~0ULL) 5693 return 0; // Disagree -> unknown. 5694 LenSoFar = Len; 5695 } 5696 5697 // Success, all agree. 5698 return LenSoFar; 5699 } 5700 5701 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 5702 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 5703 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 5704 if (Len1 == 0) return 0; 5705 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 5706 if (Len2 == 0) return 0; 5707 if (Len1 == ~0ULL) return Len2; 5708 if (Len2 == ~0ULL) return Len1; 5709 if (Len1 != Len2) return 0; 5710 return Len1; 5711 } 5712 5713 // Otherwise, see if we can read the string. 5714 ConstantDataArraySlice Slice; 5715 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 5716 return 0; 5717 5718 if (Slice.Array == nullptr) 5719 // Zeroinitializer (including an empty one). 5720 return 1; 5721 5722 // Search for the first nul character. Return a conservative result even 5723 // when there is no nul. This is safe since otherwise the string function 5724 // being folded such as strlen is undefined, and can be preferable to 5725 // making the undefined library call. 5726 unsigned NullIndex = 0; 5727 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 5728 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 5729 break; 5730 } 5731 5732 return NullIndex + 1; 5733 } 5734 5735 /// If we can compute the length of the string pointed to by 5736 /// the specified pointer, return 'len+1'. If we can't, return 0. 5737 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 5738 if (!V->getType()->isPointerTy()) 5739 return 0; 5740 5741 SmallPtrSet<const PHINode*, 32> PHIs; 5742 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 5743 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 5744 // an empty string as a length. 5745 return Len == ~0ULL ? 1 : Len; 5746 } 5747 5748 const Value * 5749 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call, 5750 bool MustPreserveNullness) { 5751 assert(Call && 5752 "getArgumentAliasingToReturnedPointer only works on nonnull calls"); 5753 if (const Value *RV = Call->getReturnedArgOperand()) 5754 return RV; 5755 // This can be used only as a aliasing property. 5756 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 5757 Call, MustPreserveNullness)) 5758 return Call->getArgOperand(0); 5759 return nullptr; 5760 } 5761 5762 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 5763 const CallBase *Call, bool MustPreserveNullness) { 5764 switch (Call->getIntrinsicID()) { 5765 case Intrinsic::launder_invariant_group: 5766 case Intrinsic::strip_invariant_group: 5767 case Intrinsic::aarch64_irg: 5768 case Intrinsic::aarch64_tagp: 5769 // The amdgcn_make_buffer_rsrc function does not alter the address of the 5770 // input pointer (and thus preserve null-ness for the purposes of escape 5771 // analysis, which is where the MustPreserveNullness flag comes in to play). 5772 // However, it will not necessarily map ptr addrspace(N) null to ptr 5773 // addrspace(8) null, aka the "null descriptor", which has "all loads return 5774 // 0, all stores are dropped" semantics. Given the context of this intrinsic 5775 // list, no one should be relying on such a strict interpretation of 5776 // MustPreserveNullness (and, at time of writing, they are not), but we 5777 // document this fact out of an abundance of caution. 5778 case Intrinsic::amdgcn_make_buffer_rsrc: 5779 return true; 5780 case Intrinsic::ptrmask: 5781 return !MustPreserveNullness; 5782 default: 5783 return false; 5784 } 5785 } 5786 5787 /// \p PN defines a loop-variant pointer to an object. Check if the 5788 /// previous iteration of the loop was referring to the same object as \p PN. 5789 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 5790 const LoopInfo *LI) { 5791 // Find the loop-defined value. 5792 Loop *L = LI->getLoopFor(PN->getParent()); 5793 if (PN->getNumIncomingValues() != 2) 5794 return true; 5795 5796 // Find the value from previous iteration. 5797 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 5798 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 5799 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 5800 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 5801 return true; 5802 5803 // If a new pointer is loaded in the loop, the pointer references a different 5804 // object in every iteration. E.g.: 5805 // for (i) 5806 // int *p = a[i]; 5807 // ... 5808 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 5809 if (!L->isLoopInvariant(Load->getPointerOperand())) 5810 return false; 5811 return true; 5812 } 5813 5814 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) { 5815 if (!V->getType()->isPointerTy()) 5816 return V; 5817 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 5818 if (auto *GEP = dyn_cast<GEPOperator>(V)) { 5819 V = GEP->getPointerOperand(); 5820 } else if (Operator::getOpcode(V) == Instruction::BitCast || 5821 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 5822 V = cast<Operator>(V)->getOperand(0); 5823 if (!V->getType()->isPointerTy()) 5824 return V; 5825 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) { 5826 if (GA->isInterposable()) 5827 return V; 5828 V = GA->getAliasee(); 5829 } else { 5830 if (auto *PHI = dyn_cast<PHINode>(V)) { 5831 // Look through single-arg phi nodes created by LCSSA. 5832 if (PHI->getNumIncomingValues() == 1) { 5833 V = PHI->getIncomingValue(0); 5834 continue; 5835 } 5836 } else if (auto *Call = dyn_cast<CallBase>(V)) { 5837 // CaptureTracking can know about special capturing properties of some 5838 // intrinsics like launder.invariant.group, that can't be expressed with 5839 // the attributes, but have properties like returning aliasing pointer. 5840 // Because some analysis may assume that nocaptured pointer is not 5841 // returned from some special intrinsic (because function would have to 5842 // be marked with returns attribute), it is crucial to use this function 5843 // because it should be in sync with CaptureTracking. Not using it may 5844 // cause weird miscompilations where 2 aliasing pointers are assumed to 5845 // noalias. 5846 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 5847 V = RP; 5848 continue; 5849 } 5850 } 5851 5852 return V; 5853 } 5854 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 5855 } 5856 return V; 5857 } 5858 5859 void llvm::getUnderlyingObjects(const Value *V, 5860 SmallVectorImpl<const Value *> &Objects, 5861 LoopInfo *LI, unsigned MaxLookup) { 5862 SmallPtrSet<const Value *, 4> Visited; 5863 SmallVector<const Value *, 4> Worklist; 5864 Worklist.push_back(V); 5865 do { 5866 const Value *P = Worklist.pop_back_val(); 5867 P = getUnderlyingObject(P, MaxLookup); 5868 5869 if (!Visited.insert(P).second) 5870 continue; 5871 5872 if (auto *SI = dyn_cast<SelectInst>(P)) { 5873 Worklist.push_back(SI->getTrueValue()); 5874 Worklist.push_back(SI->getFalseValue()); 5875 continue; 5876 } 5877 5878 if (auto *PN = dyn_cast<PHINode>(P)) { 5879 // If this PHI changes the underlying object in every iteration of the 5880 // loop, don't look through it. Consider: 5881 // int **A; 5882 // for (i) { 5883 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 5884 // Curr = A[i]; 5885 // *Prev, *Curr; 5886 // 5887 // Prev is tracking Curr one iteration behind so they refer to different 5888 // underlying objects. 5889 if (!LI || !LI->isLoopHeader(PN->getParent()) || 5890 isSameUnderlyingObjectInLoop(PN, LI)) 5891 append_range(Worklist, PN->incoming_values()); 5892 continue; 5893 } 5894 5895 Objects.push_back(P); 5896 } while (!Worklist.empty()); 5897 } 5898 5899 /// This is the function that does the work of looking through basic 5900 /// ptrtoint+arithmetic+inttoptr sequences. 5901 static const Value *getUnderlyingObjectFromInt(const Value *V) { 5902 do { 5903 if (const Operator *U = dyn_cast<Operator>(V)) { 5904 // If we find a ptrtoint, we can transfer control back to the 5905 // regular getUnderlyingObjectFromInt. 5906 if (U->getOpcode() == Instruction::PtrToInt) 5907 return U->getOperand(0); 5908 // If we find an add of a constant, a multiplied value, or a phi, it's 5909 // likely that the other operand will lead us to the base 5910 // object. We don't have to worry about the case where the 5911 // object address is somehow being computed by the multiply, 5912 // because our callers only care when the result is an 5913 // identifiable object. 5914 if (U->getOpcode() != Instruction::Add || 5915 (!isa<ConstantInt>(U->getOperand(1)) && 5916 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 5917 !isa<PHINode>(U->getOperand(1)))) 5918 return V; 5919 V = U->getOperand(0); 5920 } else { 5921 return V; 5922 } 5923 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 5924 } while (true); 5925 } 5926 5927 /// This is a wrapper around getUnderlyingObjects and adds support for basic 5928 /// ptrtoint+arithmetic+inttoptr sequences. 5929 /// It returns false if unidentified object is found in getUnderlyingObjects. 5930 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 5931 SmallVectorImpl<Value *> &Objects) { 5932 SmallPtrSet<const Value *, 16> Visited; 5933 SmallVector<const Value *, 4> Working(1, V); 5934 do { 5935 V = Working.pop_back_val(); 5936 5937 SmallVector<const Value *, 4> Objs; 5938 getUnderlyingObjects(V, Objs); 5939 5940 for (const Value *V : Objs) { 5941 if (!Visited.insert(V).second) 5942 continue; 5943 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 5944 const Value *O = 5945 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 5946 if (O->getType()->isPointerTy()) { 5947 Working.push_back(O); 5948 continue; 5949 } 5950 } 5951 // If getUnderlyingObjects fails to find an identifiable object, 5952 // getUnderlyingObjectsForCodeGen also fails for safety. 5953 if (!isIdentifiedObject(V)) { 5954 Objects.clear(); 5955 return false; 5956 } 5957 Objects.push_back(const_cast<Value *>(V)); 5958 } 5959 } while (!Working.empty()); 5960 return true; 5961 } 5962 5963 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) { 5964 AllocaInst *Result = nullptr; 5965 SmallPtrSet<Value *, 4> Visited; 5966 SmallVector<Value *, 4> Worklist; 5967 5968 auto AddWork = [&](Value *V) { 5969 if (Visited.insert(V).second) 5970 Worklist.push_back(V); 5971 }; 5972 5973 AddWork(V); 5974 do { 5975 V = Worklist.pop_back_val(); 5976 assert(Visited.count(V)); 5977 5978 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 5979 if (Result && Result != AI) 5980 return nullptr; 5981 Result = AI; 5982 } else if (CastInst *CI = dyn_cast<CastInst>(V)) { 5983 AddWork(CI->getOperand(0)); 5984 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { 5985 for (Value *IncValue : PN->incoming_values()) 5986 AddWork(IncValue); 5987 } else if (auto *SI = dyn_cast<SelectInst>(V)) { 5988 AddWork(SI->getTrueValue()); 5989 AddWork(SI->getFalseValue()); 5990 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { 5991 if (OffsetZero && !GEP->hasAllZeroIndices()) 5992 return nullptr; 5993 AddWork(GEP->getPointerOperand()); 5994 } else if (CallBase *CB = dyn_cast<CallBase>(V)) { 5995 Value *Returned = CB->getReturnedArgOperand(); 5996 if (Returned) 5997 AddWork(Returned); 5998 else 5999 return nullptr; 6000 } else { 6001 return nullptr; 6002 } 6003 } while (!Worklist.empty()); 6004 6005 return Result; 6006 } 6007 6008 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper( 6009 const Value *V, bool AllowLifetime, bool AllowDroppable) { 6010 for (const User *U : V->users()) { 6011 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 6012 if (!II) 6013 return false; 6014 6015 if (AllowLifetime && II->isLifetimeStartOrEnd()) 6016 continue; 6017 6018 if (AllowDroppable && II->isDroppable()) 6019 continue; 6020 6021 return false; 6022 } 6023 return true; 6024 } 6025 6026 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 6027 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper( 6028 V, /* AllowLifetime */ true, /* AllowDroppable */ false); 6029 } 6030 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) { 6031 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper( 6032 V, /* AllowLifetime */ true, /* AllowDroppable */ true); 6033 } 6034 6035 bool llvm::mustSuppressSpeculation(const LoadInst &LI) { 6036 if (!LI.isUnordered()) 6037 return true; 6038 const Function &F = *LI.getFunction(); 6039 // Speculative load may create a race that did not exist in the source. 6040 return F.hasFnAttribute(Attribute::SanitizeThread) || 6041 // Speculative load may load data from dirty regions. 6042 F.hasFnAttribute(Attribute::SanitizeAddress) || 6043 F.hasFnAttribute(Attribute::SanitizeHWAddress); 6044 } 6045 6046 bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst, 6047 const Instruction *CtxI, 6048 AssumptionCache *AC, 6049 const DominatorTree *DT, 6050 const TargetLibraryInfo *TLI) { 6051 return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI, 6052 AC, DT, TLI); 6053 } 6054 6055 bool llvm::isSafeToSpeculativelyExecuteWithOpcode( 6056 unsigned Opcode, const Instruction *Inst, const Instruction *CtxI, 6057 AssumptionCache *AC, const DominatorTree *DT, 6058 const TargetLibraryInfo *TLI) { 6059 #ifndef NDEBUG 6060 if (Inst->getOpcode() != Opcode) { 6061 // Check that the operands are actually compatible with the Opcode override. 6062 auto hasEqualReturnAndLeadingOperandTypes = 6063 [](const Instruction *Inst, unsigned NumLeadingOperands) { 6064 if (Inst->getNumOperands() < NumLeadingOperands) 6065 return false; 6066 const Type *ExpectedType = Inst->getType(); 6067 for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp) 6068 if (Inst->getOperand(ItOp)->getType() != ExpectedType) 6069 return false; 6070 return true; 6071 }; 6072 assert(!Instruction::isBinaryOp(Opcode) || 6073 hasEqualReturnAndLeadingOperandTypes(Inst, 2)); 6074 assert(!Instruction::isUnaryOp(Opcode) || 6075 hasEqualReturnAndLeadingOperandTypes(Inst, 1)); 6076 } 6077 #endif 6078 6079 switch (Opcode) { 6080 default: 6081 return true; 6082 case Instruction::UDiv: 6083 case Instruction::URem: { 6084 // x / y is undefined if y == 0. 6085 const APInt *V; 6086 if (match(Inst->getOperand(1), m_APInt(V))) 6087 return *V != 0; 6088 return false; 6089 } 6090 case Instruction::SDiv: 6091 case Instruction::SRem: { 6092 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 6093 const APInt *Numerator, *Denominator; 6094 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 6095 return false; 6096 // We cannot hoist this division if the denominator is 0. 6097 if (*Denominator == 0) 6098 return false; 6099 // It's safe to hoist if the denominator is not 0 or -1. 6100 if (!Denominator->isAllOnes()) 6101 return true; 6102 // At this point we know that the denominator is -1. It is safe to hoist as 6103 // long we know that the numerator is not INT_MIN. 6104 if (match(Inst->getOperand(0), m_APInt(Numerator))) 6105 return !Numerator->isMinSignedValue(); 6106 // The numerator *might* be MinSignedValue. 6107 return false; 6108 } 6109 case Instruction::Load: { 6110 const LoadInst *LI = dyn_cast<LoadInst>(Inst); 6111 if (!LI) 6112 return false; 6113 if (mustSuppressSpeculation(*LI)) 6114 return false; 6115 const DataLayout &DL = LI->getModule()->getDataLayout(); 6116 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), 6117 LI->getType(), LI->getAlign(), DL, 6118 CtxI, AC, DT, TLI); 6119 } 6120 case Instruction::Call: { 6121 auto *CI = dyn_cast<const CallInst>(Inst); 6122 if (!CI) 6123 return false; 6124 const Function *Callee = CI->getCalledFunction(); 6125 6126 // The called function could have undefined behavior or side-effects, even 6127 // if marked readnone nounwind. 6128 return Callee && Callee->isSpeculatable(); 6129 } 6130 case Instruction::VAArg: 6131 case Instruction::Alloca: 6132 case Instruction::Invoke: 6133 case Instruction::CallBr: 6134 case Instruction::PHI: 6135 case Instruction::Store: 6136 case Instruction::Ret: 6137 case Instruction::Br: 6138 case Instruction::IndirectBr: 6139 case Instruction::Switch: 6140 case Instruction::Unreachable: 6141 case Instruction::Fence: 6142 case Instruction::AtomicRMW: 6143 case Instruction::AtomicCmpXchg: 6144 case Instruction::LandingPad: 6145 case Instruction::Resume: 6146 case Instruction::CatchSwitch: 6147 case Instruction::CatchPad: 6148 case Instruction::CatchRet: 6149 case Instruction::CleanupPad: 6150 case Instruction::CleanupRet: 6151 return false; // Misc instructions which have effects 6152 } 6153 } 6154 6155 bool llvm::mayHaveNonDefUseDependency(const Instruction &I) { 6156 if (I.mayReadOrWriteMemory()) 6157 // Memory dependency possible 6158 return true; 6159 if (!isSafeToSpeculativelyExecute(&I)) 6160 // Can't move above a maythrow call or infinite loop. Or if an 6161 // inalloca alloca, above a stacksave call. 6162 return true; 6163 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6164 // 1) Can't reorder two inf-loop calls, even if readonly 6165 // 2) Also can't reorder an inf-loop call below a instruction which isn't 6166 // safe to speculative execute. (Inverse of above) 6167 return true; 6168 return false; 6169 } 6170 6171 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult. 6172 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) { 6173 switch (OR) { 6174 case ConstantRange::OverflowResult::MayOverflow: 6175 return OverflowResult::MayOverflow; 6176 case ConstantRange::OverflowResult::AlwaysOverflowsLow: 6177 return OverflowResult::AlwaysOverflowsLow; 6178 case ConstantRange::OverflowResult::AlwaysOverflowsHigh: 6179 return OverflowResult::AlwaysOverflowsHigh; 6180 case ConstantRange::OverflowResult::NeverOverflows: 6181 return OverflowResult::NeverOverflows; 6182 } 6183 llvm_unreachable("Unknown OverflowResult"); 6184 } 6185 6186 /// Combine constant ranges from computeConstantRange() and computeKnownBits(). 6187 static ConstantRange computeConstantRangeIncludingKnownBits( 6188 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth, 6189 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 6190 bool UseInstrInfo = true) { 6191 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); 6192 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned); 6193 ConstantRange CR2 = computeConstantRange(V, ForSigned, UseInstrInfo); 6194 ConstantRange::PreferredRangeType RangeType = 6195 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned; 6196 return CR1.intersectWith(CR2, RangeType); 6197 } 6198 6199 OverflowResult llvm::computeOverflowForUnsignedMul( 6200 const Value *LHS, const Value *RHS, const DataLayout &DL, 6201 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 6202 bool UseInstrInfo) { 6203 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 6204 UseInstrInfo); 6205 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 6206 UseInstrInfo); 6207 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false); 6208 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false); 6209 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange)); 6210 } 6211 6212 OverflowResult 6213 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS, 6214 const DataLayout &DL, AssumptionCache *AC, 6215 const Instruction *CxtI, 6216 const DominatorTree *DT, bool UseInstrInfo) { 6217 // Multiplying n * m significant bits yields a result of n + m significant 6218 // bits. If the total number of significant bits does not exceed the 6219 // result bit width (minus 1), there is no overflow. 6220 // This means if we have enough leading sign bits in the operands 6221 // we can guarantee that the result does not overflow. 6222 // Ref: "Hacker's Delight" by Henry Warren 6223 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 6224 6225 // Note that underestimating the number of sign bits gives a more 6226 // conservative answer. 6227 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + 6228 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); 6229 6230 // First handle the easy case: if we have enough sign bits there's 6231 // definitely no overflow. 6232 if (SignBits > BitWidth + 1) 6233 return OverflowResult::NeverOverflows; 6234 6235 // There are two ambiguous cases where there can be no overflow: 6236 // SignBits == BitWidth + 1 and 6237 // SignBits == BitWidth 6238 // The second case is difficult to check, therefore we only handle the 6239 // first case. 6240 if (SignBits == BitWidth + 1) { 6241 // It overflows only when both arguments are negative and the true 6242 // product is exactly the minimum negative number. 6243 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 6244 // For simplicity we just check if at least one side is not negative. 6245 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 6246 UseInstrInfo); 6247 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 6248 UseInstrInfo); 6249 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) 6250 return OverflowResult::NeverOverflows; 6251 } 6252 return OverflowResult::MayOverflow; 6253 } 6254 6255 OverflowResult llvm::computeOverflowForUnsignedAdd( 6256 const Value *LHS, const Value *RHS, const DataLayout &DL, 6257 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 6258 bool UseInstrInfo) { 6259 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 6260 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, UseInstrInfo); 6261 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 6262 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, UseInstrInfo); 6263 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange)); 6264 } 6265 6266 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 6267 const Value *RHS, 6268 const AddOperator *Add, 6269 const DataLayout &DL, 6270 AssumptionCache *AC, 6271 const Instruction *CxtI, 6272 const DominatorTree *DT) { 6273 if (Add && Add->hasNoSignedWrap()) { 6274 return OverflowResult::NeverOverflows; 6275 } 6276 6277 // If LHS and RHS each have at least two sign bits, the addition will look 6278 // like 6279 // 6280 // XX..... + 6281 // YY..... 6282 // 6283 // If the carry into the most significant position is 0, X and Y can't both 6284 // be 1 and therefore the carry out of the addition is also 0. 6285 // 6286 // If the carry into the most significant position is 1, X and Y can't both 6287 // be 0 and therefore the carry out of the addition is also 1. 6288 // 6289 // Since the carry into the most significant position is always equal to 6290 // the carry out of the addition, there is no signed overflow. 6291 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 6292 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 6293 return OverflowResult::NeverOverflows; 6294 6295 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 6296 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 6297 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 6298 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 6299 OverflowResult OR = 6300 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange)); 6301 if (OR != OverflowResult::MayOverflow) 6302 return OR; 6303 6304 // The remaining code needs Add to be available. Early returns if not so. 6305 if (!Add) 6306 return OverflowResult::MayOverflow; 6307 6308 // If the sign of Add is the same as at least one of the operands, this add 6309 // CANNOT overflow. If this can be determined from the known bits of the 6310 // operands the above signedAddMayOverflow() check will have already done so. 6311 // The only other way to improve on the known bits is from an assumption, so 6312 // call computeKnownBitsFromAssume() directly. 6313 bool LHSOrRHSKnownNonNegative = 6314 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative()); 6315 bool LHSOrRHSKnownNegative = 6316 (LHSRange.isAllNegative() || RHSRange.isAllNegative()); 6317 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 6318 KnownBits AddKnown(LHSRange.getBitWidth()); 6319 computeKnownBitsFromAssume( 6320 Add, AddKnown, /*Depth=*/0, 6321 SimplifyQuery(DL, /*TLI*/ nullptr, DT, AC, CxtI, DT)); 6322 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 6323 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) 6324 return OverflowResult::NeverOverflows; 6325 } 6326 6327 return OverflowResult::MayOverflow; 6328 } 6329 6330 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, 6331 const Value *RHS, 6332 const DataLayout &DL, 6333 AssumptionCache *AC, 6334 const Instruction *CxtI, 6335 const DominatorTree *DT) { 6336 // X - (X % ?) 6337 // The remainder of a value can't have greater magnitude than itself, 6338 // so the subtraction can't overflow. 6339 6340 // X - (X -nuw ?) 6341 // In the minimal case, this would simplify to "?", so there's no subtract 6342 // at all. But if this analysis is used to peek through casts, for example, 6343 // then determining no-overflow may allow other transforms. 6344 6345 // TODO: There are other patterns like this. 6346 // See simplifyICmpWithBinOpOnLHS() for candidates. 6347 if (match(RHS, m_URem(m_Specific(LHS), m_Value())) || 6348 match(RHS, m_NUWSub(m_Specific(LHS), m_Value()))) 6349 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT)) 6350 return OverflowResult::NeverOverflows; 6351 6352 // Checking for conditions implied by dominating conditions may be expensive. 6353 // Limit it to usub_with_overflow calls for now. 6354 if (match(CxtI, 6355 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value()))) 6356 if (auto C = 6357 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) { 6358 if (*C) 6359 return OverflowResult::NeverOverflows; 6360 return OverflowResult::AlwaysOverflowsLow; 6361 } 6362 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 6363 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); 6364 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 6365 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); 6366 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange)); 6367 } 6368 6369 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, 6370 const Value *RHS, 6371 const DataLayout &DL, 6372 AssumptionCache *AC, 6373 const Instruction *CxtI, 6374 const DominatorTree *DT) { 6375 // X - (X % ?) 6376 // The remainder of a value can't have greater magnitude than itself, 6377 // so the subtraction can't overflow. 6378 6379 // X - (X -nsw ?) 6380 // In the minimal case, this would simplify to "?", so there's no subtract 6381 // at all. But if this analysis is used to peek through casts, for example, 6382 // then determining no-overflow may allow other transforms. 6383 if (match(RHS, m_SRem(m_Specific(LHS), m_Value())) || 6384 match(RHS, m_NSWSub(m_Specific(LHS), m_Value()))) 6385 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT)) 6386 return OverflowResult::NeverOverflows; 6387 6388 // If LHS and RHS each have at least two sign bits, the subtraction 6389 // cannot overflow. 6390 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 6391 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 6392 return OverflowResult::NeverOverflows; 6393 6394 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 6395 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 6396 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 6397 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 6398 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange)); 6399 } 6400 6401 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, 6402 const DominatorTree &DT) { 6403 SmallVector<const BranchInst *, 2> GuardingBranches; 6404 SmallVector<const ExtractValueInst *, 2> Results; 6405 6406 for (const User *U : WO->users()) { 6407 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 6408 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 6409 6410 if (EVI->getIndices()[0] == 0) 6411 Results.push_back(EVI); 6412 else { 6413 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 6414 6415 for (const auto *U : EVI->users()) 6416 if (const auto *B = dyn_cast<BranchInst>(U)) { 6417 assert(B->isConditional() && "How else is it using an i1?"); 6418 GuardingBranches.push_back(B); 6419 } 6420 } 6421 } else { 6422 // We are using the aggregate directly in a way we don't want to analyze 6423 // here (storing it to a global, say). 6424 return false; 6425 } 6426 } 6427 6428 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 6429 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 6430 if (!NoWrapEdge.isSingleEdge()) 6431 return false; 6432 6433 // Check if all users of the add are provably no-wrap. 6434 for (const auto *Result : Results) { 6435 // If the extractvalue itself is not executed on overflow, the we don't 6436 // need to check each use separately, since domination is transitive. 6437 if (DT.dominates(NoWrapEdge, Result->getParent())) 6438 continue; 6439 6440 for (const auto &RU : Result->uses()) 6441 if (!DT.dominates(NoWrapEdge, RU)) 6442 return false; 6443 } 6444 6445 return true; 6446 }; 6447 6448 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 6449 } 6450 6451 /// Shifts return poison if shiftwidth is larger than the bitwidth. 6452 static bool shiftAmountKnownInRange(const Value *ShiftAmount) { 6453 auto *C = dyn_cast<Constant>(ShiftAmount); 6454 if (!C) 6455 return false; 6456 6457 // Shifts return poison if shiftwidth is larger than the bitwidth. 6458 SmallVector<const Constant *, 4> ShiftAmounts; 6459 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) { 6460 unsigned NumElts = FVTy->getNumElements(); 6461 for (unsigned i = 0; i < NumElts; ++i) 6462 ShiftAmounts.push_back(C->getAggregateElement(i)); 6463 } else if (isa<ScalableVectorType>(C->getType())) 6464 return false; // Can't tell, just return false to be safe 6465 else 6466 ShiftAmounts.push_back(C); 6467 6468 bool Safe = llvm::all_of(ShiftAmounts, [](const Constant *C) { 6469 auto *CI = dyn_cast_or_null<ConstantInt>(C); 6470 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth()); 6471 }); 6472 6473 return Safe; 6474 } 6475 6476 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly, 6477 bool ConsiderFlagsAndMetadata) { 6478 6479 if (ConsiderFlagsAndMetadata && Op->hasPoisonGeneratingFlagsOrMetadata()) 6480 return true; 6481 6482 unsigned Opcode = Op->getOpcode(); 6483 6484 // Check whether opcode is a poison/undef-generating operation 6485 switch (Opcode) { 6486 case Instruction::Shl: 6487 case Instruction::AShr: 6488 case Instruction::LShr: 6489 return !shiftAmountKnownInRange(Op->getOperand(1)); 6490 case Instruction::FPToSI: 6491 case Instruction::FPToUI: 6492 // fptosi/ui yields poison if the resulting value does not fit in the 6493 // destination type. 6494 return true; 6495 case Instruction::Call: 6496 if (auto *II = dyn_cast<IntrinsicInst>(Op)) { 6497 switch (II->getIntrinsicID()) { 6498 // TODO: Add more intrinsics. 6499 case Intrinsic::ctlz: 6500 case Intrinsic::cttz: 6501 case Intrinsic::abs: 6502 if (cast<ConstantInt>(II->getArgOperand(1))->isNullValue()) 6503 return false; 6504 break; 6505 case Intrinsic::ctpop: 6506 case Intrinsic::bswap: 6507 case Intrinsic::bitreverse: 6508 case Intrinsic::fshl: 6509 case Intrinsic::fshr: 6510 case Intrinsic::smax: 6511 case Intrinsic::smin: 6512 case Intrinsic::umax: 6513 case Intrinsic::umin: 6514 case Intrinsic::ptrmask: 6515 case Intrinsic::fptoui_sat: 6516 case Intrinsic::fptosi_sat: 6517 case Intrinsic::sadd_with_overflow: 6518 case Intrinsic::ssub_with_overflow: 6519 case Intrinsic::smul_with_overflow: 6520 case Intrinsic::uadd_with_overflow: 6521 case Intrinsic::usub_with_overflow: 6522 case Intrinsic::umul_with_overflow: 6523 case Intrinsic::sadd_sat: 6524 case Intrinsic::uadd_sat: 6525 case Intrinsic::ssub_sat: 6526 case Intrinsic::usub_sat: 6527 return false; 6528 case Intrinsic::sshl_sat: 6529 case Intrinsic::ushl_sat: 6530 return !shiftAmountKnownInRange(II->getArgOperand(1)); 6531 case Intrinsic::fma: 6532 case Intrinsic::fmuladd: 6533 case Intrinsic::sqrt: 6534 case Intrinsic::powi: 6535 case Intrinsic::sin: 6536 case Intrinsic::cos: 6537 case Intrinsic::pow: 6538 case Intrinsic::log: 6539 case Intrinsic::log10: 6540 case Intrinsic::log2: 6541 case Intrinsic::exp: 6542 case Intrinsic::exp2: 6543 case Intrinsic::fabs: 6544 case Intrinsic::copysign: 6545 case Intrinsic::floor: 6546 case Intrinsic::ceil: 6547 case Intrinsic::trunc: 6548 case Intrinsic::rint: 6549 case Intrinsic::nearbyint: 6550 case Intrinsic::round: 6551 case Intrinsic::roundeven: 6552 case Intrinsic::fptrunc_round: 6553 case Intrinsic::canonicalize: 6554 case Intrinsic::arithmetic_fence: 6555 case Intrinsic::minnum: 6556 case Intrinsic::maxnum: 6557 case Intrinsic::minimum: 6558 case Intrinsic::maximum: 6559 case Intrinsic::is_fpclass: 6560 return false; 6561 case Intrinsic::lround: 6562 case Intrinsic::llround: 6563 case Intrinsic::lrint: 6564 case Intrinsic::llrint: 6565 // If the value doesn't fit an unspecified value is returned (but this 6566 // is not poison). 6567 return false; 6568 } 6569 } 6570 [[fallthrough]]; 6571 case Instruction::CallBr: 6572 case Instruction::Invoke: { 6573 const auto *CB = cast<CallBase>(Op); 6574 return !CB->hasRetAttr(Attribute::NoUndef); 6575 } 6576 case Instruction::InsertElement: 6577 case Instruction::ExtractElement: { 6578 // If index exceeds the length of the vector, it returns poison 6579 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType()); 6580 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1; 6581 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp)); 6582 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue())) 6583 return true; 6584 return false; 6585 } 6586 case Instruction::ShuffleVector: { 6587 // shufflevector may return undef. 6588 if (PoisonOnly) 6589 return false; 6590 ArrayRef<int> Mask = isa<ConstantExpr>(Op) 6591 ? cast<ConstantExpr>(Op)->getShuffleMask() 6592 : cast<ShuffleVectorInst>(Op)->getShuffleMask(); 6593 return is_contained(Mask, PoisonMaskElem); 6594 } 6595 case Instruction::FNeg: 6596 case Instruction::PHI: 6597 case Instruction::Select: 6598 case Instruction::URem: 6599 case Instruction::SRem: 6600 case Instruction::ExtractValue: 6601 case Instruction::InsertValue: 6602 case Instruction::Freeze: 6603 case Instruction::ICmp: 6604 case Instruction::FCmp: 6605 case Instruction::FAdd: 6606 case Instruction::FSub: 6607 case Instruction::FMul: 6608 case Instruction::FDiv: 6609 case Instruction::FRem: 6610 return false; 6611 case Instruction::GetElementPtr: 6612 // inbounds is handled above 6613 // TODO: what about inrange on constexpr? 6614 return false; 6615 default: { 6616 const auto *CE = dyn_cast<ConstantExpr>(Op); 6617 if (isa<CastInst>(Op) || (CE && CE->isCast())) 6618 return false; 6619 else if (Instruction::isBinaryOp(Opcode)) 6620 return false; 6621 // Be conservative and return true. 6622 return true; 6623 } 6624 } 6625 } 6626 6627 bool llvm::canCreateUndefOrPoison(const Operator *Op, 6628 bool ConsiderFlagsAndMetadata) { 6629 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false, 6630 ConsiderFlagsAndMetadata); 6631 } 6632 6633 bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata) { 6634 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true, 6635 ConsiderFlagsAndMetadata); 6636 } 6637 6638 static bool directlyImpliesPoison(const Value *ValAssumedPoison, 6639 const Value *V, unsigned Depth) { 6640 if (ValAssumedPoison == V) 6641 return true; 6642 6643 const unsigned MaxDepth = 2; 6644 if (Depth >= MaxDepth) 6645 return false; 6646 6647 if (const auto *I = dyn_cast<Instruction>(V)) { 6648 if (any_of(I->operands(), [=](const Use &Op) { 6649 return propagatesPoison(Op) && 6650 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1); 6651 })) 6652 return true; 6653 6654 // V = extractvalue V0, idx 6655 // V2 = extractvalue V0, idx2 6656 // V0's elements are all poison or not. (e.g., add_with_overflow) 6657 const WithOverflowInst *II; 6658 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) && 6659 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) || 6660 llvm::is_contained(II->args(), ValAssumedPoison))) 6661 return true; 6662 } 6663 return false; 6664 } 6665 6666 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V, 6667 unsigned Depth) { 6668 if (isGuaranteedNotToBePoison(ValAssumedPoison)) 6669 return true; 6670 6671 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0)) 6672 return true; 6673 6674 const unsigned MaxDepth = 2; 6675 if (Depth >= MaxDepth) 6676 return false; 6677 6678 const auto *I = dyn_cast<Instruction>(ValAssumedPoison); 6679 if (I && !canCreatePoison(cast<Operator>(I))) { 6680 return all_of(I->operands(), [=](const Value *Op) { 6681 return impliesPoison(Op, V, Depth + 1); 6682 }); 6683 } 6684 return false; 6685 } 6686 6687 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) { 6688 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0); 6689 } 6690 6691 static bool programUndefinedIfUndefOrPoison(const Value *V, 6692 bool PoisonOnly); 6693 6694 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V, 6695 AssumptionCache *AC, 6696 const Instruction *CtxI, 6697 const DominatorTree *DT, 6698 unsigned Depth, bool PoisonOnly) { 6699 if (Depth >= MaxAnalysisRecursionDepth) 6700 return false; 6701 6702 if (isa<MetadataAsValue>(V)) 6703 return false; 6704 6705 if (const auto *A = dyn_cast<Argument>(V)) { 6706 if (A->hasAttribute(Attribute::NoUndef) || 6707 A->hasAttribute(Attribute::Dereferenceable) || 6708 A->hasAttribute(Attribute::DereferenceableOrNull)) 6709 return true; 6710 } 6711 6712 if (auto *C = dyn_cast<Constant>(V)) { 6713 if (isa<UndefValue>(C)) 6714 return PoisonOnly && !isa<PoisonValue>(C); 6715 6716 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) || 6717 isa<ConstantPointerNull>(C) || isa<Function>(C)) 6718 return true; 6719 6720 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C)) 6721 return (PoisonOnly ? !C->containsPoisonElement() 6722 : !C->containsUndefOrPoisonElement()) && 6723 !C->containsConstantExpression(); 6724 } 6725 6726 // Strip cast operations from a pointer value. 6727 // Note that stripPointerCastsSameRepresentation can strip off getelementptr 6728 // inbounds with zero offset. To guarantee that the result isn't poison, the 6729 // stripped pointer is checked as it has to be pointing into an allocated 6730 // object or be null `null` to ensure `inbounds` getelement pointers with a 6731 // zero offset could not produce poison. 6732 // It can strip off addrspacecast that do not change bit representation as 6733 // well. We believe that such addrspacecast is equivalent to no-op. 6734 auto *StrippedV = V->stripPointerCastsSameRepresentation(); 6735 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) || 6736 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV)) 6737 return true; 6738 6739 auto OpCheck = [&](const Value *V) { 6740 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1, 6741 PoisonOnly); 6742 }; 6743 6744 if (auto *Opr = dyn_cast<Operator>(V)) { 6745 // If the value is a freeze instruction, then it can never 6746 // be undef or poison. 6747 if (isa<FreezeInst>(V)) 6748 return true; 6749 6750 if (const auto *CB = dyn_cast<CallBase>(V)) { 6751 if (CB->hasRetAttr(Attribute::NoUndef)) 6752 return true; 6753 } 6754 6755 if (const auto *PN = dyn_cast<PHINode>(V)) { 6756 unsigned Num = PN->getNumIncomingValues(); 6757 bool IsWellDefined = true; 6758 for (unsigned i = 0; i < Num; ++i) { 6759 auto *TI = PN->getIncomingBlock(i)->getTerminator(); 6760 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, 6761 DT, Depth + 1, PoisonOnly)) { 6762 IsWellDefined = false; 6763 break; 6764 } 6765 } 6766 if (IsWellDefined) 6767 return true; 6768 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck)) 6769 return true; 6770 } 6771 6772 if (auto *I = dyn_cast<LoadInst>(V)) 6773 if (I->hasMetadata(LLVMContext::MD_noundef) || 6774 I->hasMetadata(LLVMContext::MD_dereferenceable) || 6775 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null)) 6776 return true; 6777 6778 if (programUndefinedIfUndefOrPoison(V, PoisonOnly)) 6779 return true; 6780 6781 // CxtI may be null or a cloned instruction. 6782 if (!CtxI || !CtxI->getParent() || !DT) 6783 return false; 6784 6785 auto *DNode = DT->getNode(CtxI->getParent()); 6786 if (!DNode) 6787 // Unreachable block 6788 return false; 6789 6790 // If V is used as a branch condition before reaching CtxI, V cannot be 6791 // undef or poison. 6792 // br V, BB1, BB2 6793 // BB1: 6794 // CtxI ; V cannot be undef or poison here 6795 auto *Dominator = DNode->getIDom(); 6796 while (Dominator) { 6797 auto *TI = Dominator->getBlock()->getTerminator(); 6798 6799 Value *Cond = nullptr; 6800 if (auto BI = dyn_cast_or_null<BranchInst>(TI)) { 6801 if (BI->isConditional()) 6802 Cond = BI->getCondition(); 6803 } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) { 6804 Cond = SI->getCondition(); 6805 } 6806 6807 if (Cond) { 6808 if (Cond == V) 6809 return true; 6810 else if (PoisonOnly && isa<Operator>(Cond)) { 6811 // For poison, we can analyze further 6812 auto *Opr = cast<Operator>(Cond); 6813 if (any_of(Opr->operands(), 6814 [V](const Use &U) { return V == U && propagatesPoison(U); })) 6815 return true; 6816 } 6817 } 6818 6819 Dominator = Dominator->getIDom(); 6820 } 6821 6822 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC)) 6823 return true; 6824 6825 return false; 6826 } 6827 6828 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC, 6829 const Instruction *CtxI, 6830 const DominatorTree *DT, 6831 unsigned Depth) { 6832 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false); 6833 } 6834 6835 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC, 6836 const Instruction *CtxI, 6837 const DominatorTree *DT, unsigned Depth) { 6838 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true); 6839 } 6840 6841 /// Return true if undefined behavior would provably be executed on the path to 6842 /// OnPathTo if Root produced a posion result. Note that this doesn't say 6843 /// anything about whether OnPathTo is actually executed or whether Root is 6844 /// actually poison. This can be used to assess whether a new use of Root can 6845 /// be added at a location which is control equivalent with OnPathTo (such as 6846 /// immediately before it) without introducing UB which didn't previously 6847 /// exist. Note that a false result conveys no information. 6848 bool llvm::mustExecuteUBIfPoisonOnPathTo(Instruction *Root, 6849 Instruction *OnPathTo, 6850 DominatorTree *DT) { 6851 // Basic approach is to assume Root is poison, propagate poison forward 6852 // through all users we can easily track, and then check whether any of those 6853 // users are provable UB and must execute before out exiting block might 6854 // exit. 6855 6856 // The set of all recursive users we've visited (which are assumed to all be 6857 // poison because of said visit) 6858 SmallSet<const Value *, 16> KnownPoison; 6859 SmallVector<const Instruction*, 16> Worklist; 6860 Worklist.push_back(Root); 6861 while (!Worklist.empty()) { 6862 const Instruction *I = Worklist.pop_back_val(); 6863 6864 // If we know this must trigger UB on a path leading our target. 6865 if (mustTriggerUB(I, KnownPoison) && DT->dominates(I, OnPathTo)) 6866 return true; 6867 6868 // If we can't analyze propagation through this instruction, just skip it 6869 // and transitive users. Safe as false is a conservative result. 6870 if (I != Root && !any_of(I->operands(), [&KnownPoison](const Use &U) { 6871 return KnownPoison.contains(U) && propagatesPoison(U); 6872 })) 6873 continue; 6874 6875 if (KnownPoison.insert(I).second) 6876 for (const User *User : I->users()) 6877 Worklist.push_back(cast<Instruction>(User)); 6878 } 6879 6880 // Might be non-UB, or might have a path we couldn't prove must execute on 6881 // way to exiting bb. 6882 return false; 6883 } 6884 6885 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 6886 const DataLayout &DL, 6887 AssumptionCache *AC, 6888 const Instruction *CxtI, 6889 const DominatorTree *DT) { 6890 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 6891 Add, DL, AC, CxtI, DT); 6892 } 6893 6894 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 6895 const Value *RHS, 6896 const DataLayout &DL, 6897 AssumptionCache *AC, 6898 const Instruction *CxtI, 6899 const DominatorTree *DT) { 6900 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 6901 } 6902 6903 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 6904 // Note: An atomic operation isn't guaranteed to return in a reasonable amount 6905 // of time because it's possible for another thread to interfere with it for an 6906 // arbitrary length of time, but programs aren't allowed to rely on that. 6907 6908 // If there is no successor, then execution can't transfer to it. 6909 if (isa<ReturnInst>(I)) 6910 return false; 6911 if (isa<UnreachableInst>(I)) 6912 return false; 6913 6914 // Note: Do not add new checks here; instead, change Instruction::mayThrow or 6915 // Instruction::willReturn. 6916 // 6917 // FIXME: Move this check into Instruction::willReturn. 6918 if (isa<CatchPadInst>(I)) { 6919 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) { 6920 default: 6921 // A catchpad may invoke exception object constructors and such, which 6922 // in some languages can be arbitrary code, so be conservative by default. 6923 return false; 6924 case EHPersonality::CoreCLR: 6925 // For CoreCLR, it just involves a type test. 6926 return true; 6927 } 6928 } 6929 6930 // An instruction that returns without throwing must transfer control flow 6931 // to a successor. 6932 return !I->mayThrow() && I->willReturn(); 6933 } 6934 6935 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { 6936 // TODO: This is slightly conservative for invoke instruction since exiting 6937 // via an exception *is* normal control for them. 6938 for (const Instruction &I : *BB) 6939 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6940 return false; 6941 return true; 6942 } 6943 6944 bool llvm::isGuaranteedToTransferExecutionToSuccessor( 6945 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End, 6946 unsigned ScanLimit) { 6947 return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End), 6948 ScanLimit); 6949 } 6950 6951 bool llvm::isGuaranteedToTransferExecutionToSuccessor( 6952 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) { 6953 assert(ScanLimit && "scan limit must be non-zero"); 6954 for (const Instruction &I : Range) { 6955 if (isa<DbgInfoIntrinsic>(I)) 6956 continue; 6957 if (--ScanLimit == 0) 6958 return false; 6959 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6960 return false; 6961 } 6962 return true; 6963 } 6964 6965 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 6966 const Loop *L) { 6967 // The loop header is guaranteed to be executed for every iteration. 6968 // 6969 // FIXME: Relax this constraint to cover all basic blocks that are 6970 // guaranteed to be executed at every iteration. 6971 if (I->getParent() != L->getHeader()) return false; 6972 6973 for (const Instruction &LI : *L->getHeader()) { 6974 if (&LI == I) return true; 6975 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 6976 } 6977 llvm_unreachable("Instruction not contained in its own parent basic block."); 6978 } 6979 6980 bool llvm::propagatesPoison(const Use &PoisonOp) { 6981 const Operator *I = cast<Operator>(PoisonOp.getUser()); 6982 switch (I->getOpcode()) { 6983 case Instruction::Freeze: 6984 case Instruction::PHI: 6985 case Instruction::Invoke: 6986 return false; 6987 case Instruction::Select: 6988 return PoisonOp.getOperandNo() == 0; 6989 case Instruction::Call: 6990 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 6991 switch (II->getIntrinsicID()) { 6992 // TODO: Add more intrinsics. 6993 case Intrinsic::sadd_with_overflow: 6994 case Intrinsic::ssub_with_overflow: 6995 case Intrinsic::smul_with_overflow: 6996 case Intrinsic::uadd_with_overflow: 6997 case Intrinsic::usub_with_overflow: 6998 case Intrinsic::umul_with_overflow: 6999 // If an input is a vector containing a poison element, the 7000 // two output vectors (calculated results, overflow bits)' 7001 // corresponding lanes are poison. 7002 return true; 7003 case Intrinsic::ctpop: 7004 return true; 7005 } 7006 } 7007 return false; 7008 case Instruction::ICmp: 7009 case Instruction::FCmp: 7010 case Instruction::GetElementPtr: 7011 return true; 7012 default: 7013 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I)) 7014 return true; 7015 7016 // Be conservative and return false. 7017 return false; 7018 } 7019 } 7020 7021 void llvm::getGuaranteedWellDefinedOps( 7022 const Instruction *I, SmallVectorImpl<const Value *> &Operands) { 7023 switch (I->getOpcode()) { 7024 case Instruction::Store: 7025 Operands.push_back(cast<StoreInst>(I)->getPointerOperand()); 7026 break; 7027 7028 case Instruction::Load: 7029 Operands.push_back(cast<LoadInst>(I)->getPointerOperand()); 7030 break; 7031 7032 // Since dereferenceable attribute imply noundef, atomic operations 7033 // also implicitly have noundef pointers too 7034 case Instruction::AtomicCmpXchg: 7035 Operands.push_back(cast<AtomicCmpXchgInst>(I)->getPointerOperand()); 7036 break; 7037 7038 case Instruction::AtomicRMW: 7039 Operands.push_back(cast<AtomicRMWInst>(I)->getPointerOperand()); 7040 break; 7041 7042 case Instruction::Call: 7043 case Instruction::Invoke: { 7044 const CallBase *CB = cast<CallBase>(I); 7045 if (CB->isIndirectCall()) 7046 Operands.push_back(CB->getCalledOperand()); 7047 for (unsigned i = 0; i < CB->arg_size(); ++i) { 7048 if (CB->paramHasAttr(i, Attribute::NoUndef) || 7049 CB->paramHasAttr(i, Attribute::Dereferenceable) || 7050 CB->paramHasAttr(i, Attribute::DereferenceableOrNull)) 7051 Operands.push_back(CB->getArgOperand(i)); 7052 } 7053 break; 7054 } 7055 case Instruction::Ret: 7056 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef)) 7057 Operands.push_back(I->getOperand(0)); 7058 break; 7059 case Instruction::Switch: 7060 Operands.push_back(cast<SwitchInst>(I)->getCondition()); 7061 break; 7062 case Instruction::Br: { 7063 auto *BR = cast<BranchInst>(I); 7064 if (BR->isConditional()) 7065 Operands.push_back(BR->getCondition()); 7066 break; 7067 } 7068 default: 7069 break; 7070 } 7071 } 7072 7073 void llvm::getGuaranteedNonPoisonOps(const Instruction *I, 7074 SmallVectorImpl<const Value *> &Operands) { 7075 getGuaranteedWellDefinedOps(I, Operands); 7076 switch (I->getOpcode()) { 7077 // Divisors of these operations are allowed to be partially undef. 7078 case Instruction::UDiv: 7079 case Instruction::SDiv: 7080 case Instruction::URem: 7081 case Instruction::SRem: 7082 Operands.push_back(I->getOperand(1)); 7083 break; 7084 default: 7085 break; 7086 } 7087 } 7088 7089 bool llvm::mustTriggerUB(const Instruction *I, 7090 const SmallPtrSetImpl<const Value *> &KnownPoison) { 7091 SmallVector<const Value *, 4> NonPoisonOps; 7092 getGuaranteedNonPoisonOps(I, NonPoisonOps); 7093 7094 for (const auto *V : NonPoisonOps) 7095 if (KnownPoison.count(V)) 7096 return true; 7097 7098 return false; 7099 } 7100 7101 static bool programUndefinedIfUndefOrPoison(const Value *V, 7102 bool PoisonOnly) { 7103 // We currently only look for uses of values within the same basic 7104 // block, as that makes it easier to guarantee that the uses will be 7105 // executed given that Inst is executed. 7106 // 7107 // FIXME: Expand this to consider uses beyond the same basic block. To do 7108 // this, look out for the distinction between post-dominance and strong 7109 // post-dominance. 7110 const BasicBlock *BB = nullptr; 7111 BasicBlock::const_iterator Begin; 7112 if (const auto *Inst = dyn_cast<Instruction>(V)) { 7113 BB = Inst->getParent(); 7114 Begin = Inst->getIterator(); 7115 Begin++; 7116 } else if (const auto *Arg = dyn_cast<Argument>(V)) { 7117 BB = &Arg->getParent()->getEntryBlock(); 7118 Begin = BB->begin(); 7119 } else { 7120 return false; 7121 } 7122 7123 // Limit number of instructions we look at, to avoid scanning through large 7124 // blocks. The current limit is chosen arbitrarily. 7125 unsigned ScanLimit = 32; 7126 BasicBlock::const_iterator End = BB->end(); 7127 7128 if (!PoisonOnly) { 7129 // Since undef does not propagate eagerly, be conservative & just check 7130 // whether a value is directly passed to an instruction that must take 7131 // well-defined operands. 7132 7133 for (const auto &I : make_range(Begin, End)) { 7134 if (isa<DbgInfoIntrinsic>(I)) 7135 continue; 7136 if (--ScanLimit == 0) 7137 break; 7138 7139 SmallVector<const Value *, 4> WellDefinedOps; 7140 getGuaranteedWellDefinedOps(&I, WellDefinedOps); 7141 if (is_contained(WellDefinedOps, V)) 7142 return true; 7143 7144 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 7145 break; 7146 } 7147 return false; 7148 } 7149 7150 // Set of instructions that we have proved will yield poison if Inst 7151 // does. 7152 SmallSet<const Value *, 16> YieldsPoison; 7153 SmallSet<const BasicBlock *, 4> Visited; 7154 7155 YieldsPoison.insert(V); 7156 Visited.insert(BB); 7157 7158 while (true) { 7159 for (const auto &I : make_range(Begin, End)) { 7160 if (isa<DbgInfoIntrinsic>(I)) 7161 continue; 7162 if (--ScanLimit == 0) 7163 return false; 7164 if (mustTriggerUB(&I, YieldsPoison)) 7165 return true; 7166 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 7167 return false; 7168 7169 // If an operand is poison and propagates it, mark I as yielding poison. 7170 for (const Use &Op : I.operands()) { 7171 if (YieldsPoison.count(Op) && propagatesPoison(Op)) { 7172 YieldsPoison.insert(&I); 7173 break; 7174 } 7175 } 7176 7177 // Special handling for select, which returns poison if its operand 0 is 7178 // poison (handled in the loop above) *or* if both its true/false operands 7179 // are poison (handled here). 7180 if (I.getOpcode() == Instruction::Select && 7181 YieldsPoison.count(I.getOperand(1)) && 7182 YieldsPoison.count(I.getOperand(2))) { 7183 YieldsPoison.insert(&I); 7184 } 7185 } 7186 7187 BB = BB->getSingleSuccessor(); 7188 if (!BB || !Visited.insert(BB).second) 7189 break; 7190 7191 Begin = BB->getFirstNonPHI()->getIterator(); 7192 End = BB->end(); 7193 } 7194 return false; 7195 } 7196 7197 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) { 7198 return ::programUndefinedIfUndefOrPoison(Inst, false); 7199 } 7200 7201 bool llvm::programUndefinedIfPoison(const Instruction *Inst) { 7202 return ::programUndefinedIfUndefOrPoison(Inst, true); 7203 } 7204 7205 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 7206 if (FMF.noNaNs()) 7207 return true; 7208 7209 if (auto *C = dyn_cast<ConstantFP>(V)) 7210 return !C->isNaN(); 7211 7212 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 7213 if (!C->getElementType()->isFloatingPointTy()) 7214 return false; 7215 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 7216 if (C->getElementAsAPFloat(I).isNaN()) 7217 return false; 7218 } 7219 return true; 7220 } 7221 7222 if (isa<ConstantAggregateZero>(V)) 7223 return true; 7224 7225 return false; 7226 } 7227 7228 static bool isKnownNonZero(const Value *V) { 7229 if (auto *C = dyn_cast<ConstantFP>(V)) 7230 return !C->isZero(); 7231 7232 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 7233 if (!C->getElementType()->isFloatingPointTy()) 7234 return false; 7235 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 7236 if (C->getElementAsAPFloat(I).isZero()) 7237 return false; 7238 } 7239 return true; 7240 } 7241 7242 return false; 7243 } 7244 7245 /// Match clamp pattern for float types without care about NaNs or signed zeros. 7246 /// Given non-min/max outer cmp/select from the clamp pattern this 7247 /// function recognizes if it can be substitued by a "canonical" min/max 7248 /// pattern. 7249 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 7250 Value *CmpLHS, Value *CmpRHS, 7251 Value *TrueVal, Value *FalseVal, 7252 Value *&LHS, Value *&RHS) { 7253 // Try to match 7254 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 7255 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 7256 // and return description of the outer Max/Min. 7257 7258 // First, check if select has inverse order: 7259 if (CmpRHS == FalseVal) { 7260 std::swap(TrueVal, FalseVal); 7261 Pred = CmpInst::getInversePredicate(Pred); 7262 } 7263 7264 // Assume success now. If there's no match, callers should not use these anyway. 7265 LHS = TrueVal; 7266 RHS = FalseVal; 7267 7268 const APFloat *FC1; 7269 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 7270 return {SPF_UNKNOWN, SPNB_NA, false}; 7271 7272 const APFloat *FC2; 7273 switch (Pred) { 7274 case CmpInst::FCMP_OLT: 7275 case CmpInst::FCMP_OLE: 7276 case CmpInst::FCMP_ULT: 7277 case CmpInst::FCMP_ULE: 7278 if (match(FalseVal, 7279 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 7280 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 7281 *FC1 < *FC2) 7282 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 7283 break; 7284 case CmpInst::FCMP_OGT: 7285 case CmpInst::FCMP_OGE: 7286 case CmpInst::FCMP_UGT: 7287 case CmpInst::FCMP_UGE: 7288 if (match(FalseVal, 7289 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 7290 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 7291 *FC1 > *FC2) 7292 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 7293 break; 7294 default: 7295 break; 7296 } 7297 7298 return {SPF_UNKNOWN, SPNB_NA, false}; 7299 } 7300 7301 /// Recognize variations of: 7302 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 7303 static SelectPatternResult matchClamp(CmpInst::Predicate Pred, 7304 Value *CmpLHS, Value *CmpRHS, 7305 Value *TrueVal, Value *FalseVal) { 7306 // Swap the select operands and predicate to match the patterns below. 7307 if (CmpRHS != TrueVal) { 7308 Pred = ICmpInst::getSwappedPredicate(Pred); 7309 std::swap(TrueVal, FalseVal); 7310 } 7311 const APInt *C1; 7312 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 7313 const APInt *C2; 7314 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 7315 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 7316 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 7317 return {SPF_SMAX, SPNB_NA, false}; 7318 7319 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 7320 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 7321 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 7322 return {SPF_SMIN, SPNB_NA, false}; 7323 7324 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 7325 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 7326 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 7327 return {SPF_UMAX, SPNB_NA, false}; 7328 7329 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 7330 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 7331 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 7332 return {SPF_UMIN, SPNB_NA, false}; 7333 } 7334 return {SPF_UNKNOWN, SPNB_NA, false}; 7335 } 7336 7337 /// Recognize variations of: 7338 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) 7339 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, 7340 Value *CmpLHS, Value *CmpRHS, 7341 Value *TVal, Value *FVal, 7342 unsigned Depth) { 7343 // TODO: Allow FP min/max with nnan/nsz. 7344 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); 7345 7346 Value *A = nullptr, *B = nullptr; 7347 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); 7348 if (!SelectPatternResult::isMinOrMax(L.Flavor)) 7349 return {SPF_UNKNOWN, SPNB_NA, false}; 7350 7351 Value *C = nullptr, *D = nullptr; 7352 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); 7353 if (L.Flavor != R.Flavor) 7354 return {SPF_UNKNOWN, SPNB_NA, false}; 7355 7356 // We have something like: x Pred y ? min(a, b) : min(c, d). 7357 // Try to match the compare to the min/max operations of the select operands. 7358 // First, make sure we have the right compare predicate. 7359 switch (L.Flavor) { 7360 case SPF_SMIN: 7361 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { 7362 Pred = ICmpInst::getSwappedPredicate(Pred); 7363 std::swap(CmpLHS, CmpRHS); 7364 } 7365 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 7366 break; 7367 return {SPF_UNKNOWN, SPNB_NA, false}; 7368 case SPF_SMAX: 7369 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { 7370 Pred = ICmpInst::getSwappedPredicate(Pred); 7371 std::swap(CmpLHS, CmpRHS); 7372 } 7373 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 7374 break; 7375 return {SPF_UNKNOWN, SPNB_NA, false}; 7376 case SPF_UMIN: 7377 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { 7378 Pred = ICmpInst::getSwappedPredicate(Pred); 7379 std::swap(CmpLHS, CmpRHS); 7380 } 7381 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 7382 break; 7383 return {SPF_UNKNOWN, SPNB_NA, false}; 7384 case SPF_UMAX: 7385 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 7386 Pred = ICmpInst::getSwappedPredicate(Pred); 7387 std::swap(CmpLHS, CmpRHS); 7388 } 7389 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 7390 break; 7391 return {SPF_UNKNOWN, SPNB_NA, false}; 7392 default: 7393 return {SPF_UNKNOWN, SPNB_NA, false}; 7394 } 7395 7396 // If there is a common operand in the already matched min/max and the other 7397 // min/max operands match the compare operands (either directly or inverted), 7398 // then this is min/max of the same flavor. 7399 7400 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 7401 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 7402 if (D == B) { 7403 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 7404 match(A, m_Not(m_Specific(CmpRHS))))) 7405 return {L.Flavor, SPNB_NA, false}; 7406 } 7407 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 7408 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 7409 if (C == B) { 7410 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 7411 match(A, m_Not(m_Specific(CmpRHS))))) 7412 return {L.Flavor, SPNB_NA, false}; 7413 } 7414 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 7415 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 7416 if (D == A) { 7417 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 7418 match(B, m_Not(m_Specific(CmpRHS))))) 7419 return {L.Flavor, SPNB_NA, false}; 7420 } 7421 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 7422 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 7423 if (C == A) { 7424 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 7425 match(B, m_Not(m_Specific(CmpRHS))))) 7426 return {L.Flavor, SPNB_NA, false}; 7427 } 7428 7429 return {SPF_UNKNOWN, SPNB_NA, false}; 7430 } 7431 7432 /// If the input value is the result of a 'not' op, constant integer, or vector 7433 /// splat of a constant integer, return the bitwise-not source value. 7434 /// TODO: This could be extended to handle non-splat vector integer constants. 7435 static Value *getNotValue(Value *V) { 7436 Value *NotV; 7437 if (match(V, m_Not(m_Value(NotV)))) 7438 return NotV; 7439 7440 const APInt *C; 7441 if (match(V, m_APInt(C))) 7442 return ConstantInt::get(V->getType(), ~(*C)); 7443 7444 return nullptr; 7445 } 7446 7447 /// Match non-obvious integer minimum and maximum sequences. 7448 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 7449 Value *CmpLHS, Value *CmpRHS, 7450 Value *TrueVal, Value *FalseVal, 7451 Value *&LHS, Value *&RHS, 7452 unsigned Depth) { 7453 // Assume success. If there's no match, callers should not use these anyway. 7454 LHS = TrueVal; 7455 RHS = FalseVal; 7456 7457 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); 7458 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 7459 return SPR; 7460 7461 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); 7462 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 7463 return SPR; 7464 7465 // Look through 'not' ops to find disguised min/max. 7466 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y) 7467 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y) 7468 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) { 7469 switch (Pred) { 7470 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false}; 7471 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false}; 7472 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false}; 7473 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false}; 7474 default: break; 7475 } 7476 } 7477 7478 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X) 7479 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X) 7480 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) { 7481 switch (Pred) { 7482 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false}; 7483 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false}; 7484 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false}; 7485 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false}; 7486 default: break; 7487 } 7488 } 7489 7490 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 7491 return {SPF_UNKNOWN, SPNB_NA, false}; 7492 7493 const APInt *C1; 7494 if (!match(CmpRHS, m_APInt(C1))) 7495 return {SPF_UNKNOWN, SPNB_NA, false}; 7496 7497 // An unsigned min/max can be written with a signed compare. 7498 const APInt *C2; 7499 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 7500 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 7501 // Is the sign bit set? 7502 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 7503 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 7504 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue()) 7505 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 7506 7507 // Is the sign bit clear? 7508 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 7509 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 7510 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue()) 7511 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 7512 } 7513 7514 return {SPF_UNKNOWN, SPNB_NA, false}; 7515 } 7516 7517 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) { 7518 assert(X && Y && "Invalid operand"); 7519 7520 // X = sub (0, Y) || X = sub nsw (0, Y) 7521 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) || 7522 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y))))) 7523 return true; 7524 7525 // Y = sub (0, X) || Y = sub nsw (0, X) 7526 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) || 7527 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X))))) 7528 return true; 7529 7530 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A) 7531 Value *A, *B; 7532 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) && 7533 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) || 7534 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) && 7535 match(Y, m_NSWSub(m_Specific(B), m_Specific(A))))); 7536 } 7537 7538 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 7539 FastMathFlags FMF, 7540 Value *CmpLHS, Value *CmpRHS, 7541 Value *TrueVal, Value *FalseVal, 7542 Value *&LHS, Value *&RHS, 7543 unsigned Depth) { 7544 bool HasMismatchedZeros = false; 7545 if (CmpInst::isFPPredicate(Pred)) { 7546 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one 7547 // 0.0 operand, set the compare's 0.0 operands to that same value for the 7548 // purpose of identifying min/max. Disregard vector constants with undefined 7549 // elements because those can not be back-propagated for analysis. 7550 Value *OutputZeroVal = nullptr; 7551 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) && 7552 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement()) 7553 OutputZeroVal = TrueVal; 7554 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) && 7555 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement()) 7556 OutputZeroVal = FalseVal; 7557 7558 if (OutputZeroVal) { 7559 if (match(CmpLHS, m_AnyZeroFP()) && CmpLHS != OutputZeroVal) { 7560 HasMismatchedZeros = true; 7561 CmpLHS = OutputZeroVal; 7562 } 7563 if (match(CmpRHS, m_AnyZeroFP()) && CmpRHS != OutputZeroVal) { 7564 HasMismatchedZeros = true; 7565 CmpRHS = OutputZeroVal; 7566 } 7567 } 7568 } 7569 7570 LHS = CmpLHS; 7571 RHS = CmpRHS; 7572 7573 // Signed zero may return inconsistent results between implementations. 7574 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 7575 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 7576 // Therefore, we behave conservatively and only proceed if at least one of the 7577 // operands is known to not be zero or if we don't care about signed zero. 7578 switch (Pred) { 7579 default: break; 7580 case CmpInst::FCMP_OGT: case CmpInst::FCMP_OLT: 7581 case CmpInst::FCMP_UGT: case CmpInst::FCMP_ULT: 7582 if (!HasMismatchedZeros) 7583 break; 7584 [[fallthrough]]; 7585 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 7586 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 7587 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 7588 !isKnownNonZero(CmpRHS)) 7589 return {SPF_UNKNOWN, SPNB_NA, false}; 7590 } 7591 7592 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 7593 bool Ordered = false; 7594 7595 // When given one NaN and one non-NaN input: 7596 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 7597 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 7598 // ordered comparison fails), which could be NaN or non-NaN. 7599 // so here we discover exactly what NaN behavior is required/accepted. 7600 if (CmpInst::isFPPredicate(Pred)) { 7601 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 7602 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 7603 7604 if (LHSSafe && RHSSafe) { 7605 // Both operands are known non-NaN. 7606 NaNBehavior = SPNB_RETURNS_ANY; 7607 } else if (CmpInst::isOrdered(Pred)) { 7608 // An ordered comparison will return false when given a NaN, so it 7609 // returns the RHS. 7610 Ordered = true; 7611 if (LHSSafe) 7612 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 7613 NaNBehavior = SPNB_RETURNS_NAN; 7614 else if (RHSSafe) 7615 NaNBehavior = SPNB_RETURNS_OTHER; 7616 else 7617 // Completely unsafe. 7618 return {SPF_UNKNOWN, SPNB_NA, false}; 7619 } else { 7620 Ordered = false; 7621 // An unordered comparison will return true when given a NaN, so it 7622 // returns the LHS. 7623 if (LHSSafe) 7624 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 7625 NaNBehavior = SPNB_RETURNS_OTHER; 7626 else if (RHSSafe) 7627 NaNBehavior = SPNB_RETURNS_NAN; 7628 else 7629 // Completely unsafe. 7630 return {SPF_UNKNOWN, SPNB_NA, false}; 7631 } 7632 } 7633 7634 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 7635 std::swap(CmpLHS, CmpRHS); 7636 Pred = CmpInst::getSwappedPredicate(Pred); 7637 if (NaNBehavior == SPNB_RETURNS_NAN) 7638 NaNBehavior = SPNB_RETURNS_OTHER; 7639 else if (NaNBehavior == SPNB_RETURNS_OTHER) 7640 NaNBehavior = SPNB_RETURNS_NAN; 7641 Ordered = !Ordered; 7642 } 7643 7644 // ([if]cmp X, Y) ? X : Y 7645 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 7646 switch (Pred) { 7647 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 7648 case ICmpInst::ICMP_UGT: 7649 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 7650 case ICmpInst::ICMP_SGT: 7651 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 7652 case ICmpInst::ICMP_ULT: 7653 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 7654 case ICmpInst::ICMP_SLT: 7655 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 7656 case FCmpInst::FCMP_UGT: 7657 case FCmpInst::FCMP_UGE: 7658 case FCmpInst::FCMP_OGT: 7659 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 7660 case FCmpInst::FCMP_ULT: 7661 case FCmpInst::FCMP_ULE: 7662 case FCmpInst::FCMP_OLT: 7663 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 7664 } 7665 } 7666 7667 if (isKnownNegation(TrueVal, FalseVal)) { 7668 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can 7669 // match against either LHS or sext(LHS). 7670 auto MaybeSExtCmpLHS = 7671 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS))); 7672 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes()); 7673 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One()); 7674 if (match(TrueVal, MaybeSExtCmpLHS)) { 7675 // Set the return values. If the compare uses the negated value (-X >s 0), 7676 // swap the return values because the negated value is always 'RHS'. 7677 LHS = TrueVal; 7678 RHS = FalseVal; 7679 if (match(CmpLHS, m_Neg(m_Specific(FalseVal)))) 7680 std::swap(LHS, RHS); 7681 7682 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) 7683 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) 7684 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 7685 return {SPF_ABS, SPNB_NA, false}; 7686 7687 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X) 7688 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne)) 7689 return {SPF_ABS, SPNB_NA, false}; 7690 7691 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) 7692 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) 7693 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 7694 return {SPF_NABS, SPNB_NA, false}; 7695 } 7696 else if (match(FalseVal, MaybeSExtCmpLHS)) { 7697 // Set the return values. If the compare uses the negated value (-X >s 0), 7698 // swap the return values because the negated value is always 'RHS'. 7699 LHS = FalseVal; 7700 RHS = TrueVal; 7701 if (match(CmpLHS, m_Neg(m_Specific(TrueVal)))) 7702 std::swap(LHS, RHS); 7703 7704 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) 7705 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) 7706 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 7707 return {SPF_NABS, SPNB_NA, false}; 7708 7709 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) 7710 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) 7711 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 7712 return {SPF_ABS, SPNB_NA, false}; 7713 } 7714 } 7715 7716 if (CmpInst::isIntPredicate(Pred)) 7717 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); 7718 7719 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 7720 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 7721 // semantics than minNum. Be conservative in such case. 7722 if (NaNBehavior != SPNB_RETURNS_ANY || 7723 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 7724 !isKnownNonZero(CmpRHS))) 7725 return {SPF_UNKNOWN, SPNB_NA, false}; 7726 7727 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 7728 } 7729 7730 /// Helps to match a select pattern in case of a type mismatch. 7731 /// 7732 /// The function processes the case when type of true and false values of a 7733 /// select instruction differs from type of the cmp instruction operands because 7734 /// of a cast instruction. The function checks if it is legal to move the cast 7735 /// operation after "select". If yes, it returns the new second value of 7736 /// "select" (with the assumption that cast is moved): 7737 /// 1. As operand of cast instruction when both values of "select" are same cast 7738 /// instructions. 7739 /// 2. As restored constant (by applying reverse cast operation) when the first 7740 /// value of the "select" is a cast operation and the second value is a 7741 /// constant. 7742 /// NOTE: We return only the new second value because the first value could be 7743 /// accessed as operand of cast instruction. 7744 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 7745 Instruction::CastOps *CastOp) { 7746 auto *Cast1 = dyn_cast<CastInst>(V1); 7747 if (!Cast1) 7748 return nullptr; 7749 7750 *CastOp = Cast1->getOpcode(); 7751 Type *SrcTy = Cast1->getSrcTy(); 7752 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 7753 // If V1 and V2 are both the same cast from the same type, look through V1. 7754 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 7755 return Cast2->getOperand(0); 7756 return nullptr; 7757 } 7758 7759 auto *C = dyn_cast<Constant>(V2); 7760 if (!C) 7761 return nullptr; 7762 7763 Constant *CastedTo = nullptr; 7764 switch (*CastOp) { 7765 case Instruction::ZExt: 7766 if (CmpI->isUnsigned()) 7767 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 7768 break; 7769 case Instruction::SExt: 7770 if (CmpI->isSigned()) 7771 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 7772 break; 7773 case Instruction::Trunc: 7774 Constant *CmpConst; 7775 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 7776 CmpConst->getType() == SrcTy) { 7777 // Here we have the following case: 7778 // 7779 // %cond = cmp iN %x, CmpConst 7780 // %tr = trunc iN %x to iK 7781 // %narrowsel = select i1 %cond, iK %t, iK C 7782 // 7783 // We can always move trunc after select operation: 7784 // 7785 // %cond = cmp iN %x, CmpConst 7786 // %widesel = select i1 %cond, iN %x, iN CmpConst 7787 // %tr = trunc iN %widesel to iK 7788 // 7789 // Note that C could be extended in any way because we don't care about 7790 // upper bits after truncation. It can't be abs pattern, because it would 7791 // look like: 7792 // 7793 // select i1 %cond, x, -x. 7794 // 7795 // So only min/max pattern could be matched. Such match requires widened C 7796 // == CmpConst. That is why set widened C = CmpConst, condition trunc 7797 // CmpConst == C is checked below. 7798 CastedTo = CmpConst; 7799 } else { 7800 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 7801 } 7802 break; 7803 case Instruction::FPTrunc: 7804 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 7805 break; 7806 case Instruction::FPExt: 7807 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 7808 break; 7809 case Instruction::FPToUI: 7810 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 7811 break; 7812 case Instruction::FPToSI: 7813 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 7814 break; 7815 case Instruction::UIToFP: 7816 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 7817 break; 7818 case Instruction::SIToFP: 7819 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 7820 break; 7821 default: 7822 break; 7823 } 7824 7825 if (!CastedTo) 7826 return nullptr; 7827 7828 // Make sure the cast doesn't lose any information. 7829 Constant *CastedBack = 7830 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 7831 if (CastedBack != C) 7832 return nullptr; 7833 7834 return CastedTo; 7835 } 7836 7837 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 7838 Instruction::CastOps *CastOp, 7839 unsigned Depth) { 7840 if (Depth >= MaxAnalysisRecursionDepth) 7841 return {SPF_UNKNOWN, SPNB_NA, false}; 7842 7843 SelectInst *SI = dyn_cast<SelectInst>(V); 7844 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 7845 7846 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 7847 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 7848 7849 Value *TrueVal = SI->getTrueValue(); 7850 Value *FalseVal = SI->getFalseValue(); 7851 7852 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS, 7853 CastOp, Depth); 7854 } 7855 7856 SelectPatternResult llvm::matchDecomposedSelectPattern( 7857 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, 7858 Instruction::CastOps *CastOp, unsigned Depth) { 7859 CmpInst::Predicate Pred = CmpI->getPredicate(); 7860 Value *CmpLHS = CmpI->getOperand(0); 7861 Value *CmpRHS = CmpI->getOperand(1); 7862 FastMathFlags FMF; 7863 if (isa<FPMathOperator>(CmpI)) 7864 FMF = CmpI->getFastMathFlags(); 7865 7866 // Bail out early. 7867 if (CmpI->isEquality()) 7868 return {SPF_UNKNOWN, SPNB_NA, false}; 7869 7870 // Deal with type mismatches. 7871 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 7872 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { 7873 // If this is a potential fmin/fmax with a cast to integer, then ignore 7874 // -0.0 because there is no corresponding integer value. 7875 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 7876 FMF.setNoSignedZeros(); 7877 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 7878 cast<CastInst>(TrueVal)->getOperand(0), C, 7879 LHS, RHS, Depth); 7880 } 7881 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { 7882 // If this is a potential fmin/fmax with a cast to integer, then ignore 7883 // -0.0 because there is no corresponding integer value. 7884 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 7885 FMF.setNoSignedZeros(); 7886 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 7887 C, cast<CastInst>(FalseVal)->getOperand(0), 7888 LHS, RHS, Depth); 7889 } 7890 } 7891 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 7892 LHS, RHS, Depth); 7893 } 7894 7895 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { 7896 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; 7897 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; 7898 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; 7899 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; 7900 if (SPF == SPF_FMINNUM) 7901 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; 7902 if (SPF == SPF_FMAXNUM) 7903 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; 7904 llvm_unreachable("unhandled!"); 7905 } 7906 7907 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { 7908 if (SPF == SPF_SMIN) return SPF_SMAX; 7909 if (SPF == SPF_UMIN) return SPF_UMAX; 7910 if (SPF == SPF_SMAX) return SPF_SMIN; 7911 if (SPF == SPF_UMAX) return SPF_UMIN; 7912 llvm_unreachable("unhandled!"); 7913 } 7914 7915 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) { 7916 switch (MinMaxID) { 7917 case Intrinsic::smax: return Intrinsic::smin; 7918 case Intrinsic::smin: return Intrinsic::smax; 7919 case Intrinsic::umax: return Intrinsic::umin; 7920 case Intrinsic::umin: return Intrinsic::umax; 7921 // Please note that next four intrinsics may produce the same result for 7922 // original and inverted case even if X != Y due to NaN is handled specially. 7923 case Intrinsic::maximum: return Intrinsic::minimum; 7924 case Intrinsic::minimum: return Intrinsic::maximum; 7925 case Intrinsic::maxnum: return Intrinsic::minnum; 7926 case Intrinsic::minnum: return Intrinsic::maxnum; 7927 default: llvm_unreachable("Unexpected intrinsic"); 7928 } 7929 } 7930 7931 APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) { 7932 switch (SPF) { 7933 case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth); 7934 case SPF_SMIN: return APInt::getSignedMinValue(BitWidth); 7935 case SPF_UMAX: return APInt::getMaxValue(BitWidth); 7936 case SPF_UMIN: return APInt::getMinValue(BitWidth); 7937 default: llvm_unreachable("Unexpected flavor"); 7938 } 7939 } 7940 7941 std::pair<Intrinsic::ID, bool> 7942 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) { 7943 // Check if VL contains select instructions that can be folded into a min/max 7944 // vector intrinsic and return the intrinsic if it is possible. 7945 // TODO: Support floating point min/max. 7946 bool AllCmpSingleUse = true; 7947 SelectPatternResult SelectPattern; 7948 SelectPattern.Flavor = SPF_UNKNOWN; 7949 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) { 7950 Value *LHS, *RHS; 7951 auto CurrentPattern = matchSelectPattern(I, LHS, RHS); 7952 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) || 7953 CurrentPattern.Flavor == SPF_FMINNUM || 7954 CurrentPattern.Flavor == SPF_FMAXNUM || 7955 !I->getType()->isIntOrIntVectorTy()) 7956 return false; 7957 if (SelectPattern.Flavor != SPF_UNKNOWN && 7958 SelectPattern.Flavor != CurrentPattern.Flavor) 7959 return false; 7960 SelectPattern = CurrentPattern; 7961 AllCmpSingleUse &= 7962 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value())); 7963 return true; 7964 })) { 7965 switch (SelectPattern.Flavor) { 7966 case SPF_SMIN: 7967 return {Intrinsic::smin, AllCmpSingleUse}; 7968 case SPF_UMIN: 7969 return {Intrinsic::umin, AllCmpSingleUse}; 7970 case SPF_SMAX: 7971 return {Intrinsic::smax, AllCmpSingleUse}; 7972 case SPF_UMAX: 7973 return {Intrinsic::umax, AllCmpSingleUse}; 7974 default: 7975 llvm_unreachable("unexpected select pattern flavor"); 7976 } 7977 } 7978 return {Intrinsic::not_intrinsic, false}; 7979 } 7980 7981 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, 7982 Value *&Start, Value *&Step) { 7983 // Handle the case of a simple two-predecessor recurrence PHI. 7984 // There's a lot more that could theoretically be done here, but 7985 // this is sufficient to catch some interesting cases. 7986 if (P->getNumIncomingValues() != 2) 7987 return false; 7988 7989 for (unsigned i = 0; i != 2; ++i) { 7990 Value *L = P->getIncomingValue(i); 7991 Value *R = P->getIncomingValue(!i); 7992 Operator *LU = dyn_cast<Operator>(L); 7993 if (!LU) 7994 continue; 7995 unsigned Opcode = LU->getOpcode(); 7996 7997 switch (Opcode) { 7998 default: 7999 continue; 8000 // TODO: Expand list -- xor, div, gep, uaddo, etc.. 8001 case Instruction::LShr: 8002 case Instruction::AShr: 8003 case Instruction::Shl: 8004 case Instruction::Add: 8005 case Instruction::Sub: 8006 case Instruction::And: 8007 case Instruction::Or: 8008 case Instruction::Mul: 8009 case Instruction::FMul: { 8010 Value *LL = LU->getOperand(0); 8011 Value *LR = LU->getOperand(1); 8012 // Find a recurrence. 8013 if (LL == P) 8014 L = LR; 8015 else if (LR == P) 8016 L = LL; 8017 else 8018 continue; // Check for recurrence with L and R flipped. 8019 8020 break; // Match! 8021 } 8022 }; 8023 8024 // We have matched a recurrence of the form: 8025 // %iv = [R, %entry], [%iv.next, %backedge] 8026 // %iv.next = binop %iv, L 8027 // OR 8028 // %iv = [R, %entry], [%iv.next, %backedge] 8029 // %iv.next = binop L, %iv 8030 BO = cast<BinaryOperator>(LU); 8031 Start = R; 8032 Step = L; 8033 return true; 8034 } 8035 return false; 8036 } 8037 8038 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, 8039 Value *&Start, Value *&Step) { 8040 BinaryOperator *BO = nullptr; 8041 P = dyn_cast<PHINode>(I->getOperand(0)); 8042 if (!P) 8043 P = dyn_cast<PHINode>(I->getOperand(1)); 8044 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I; 8045 } 8046 8047 /// Return true if "icmp Pred LHS RHS" is always true. 8048 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 8049 const Value *RHS, const DataLayout &DL, 8050 unsigned Depth) { 8051 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 8052 return true; 8053 8054 switch (Pred) { 8055 default: 8056 return false; 8057 8058 case CmpInst::ICMP_SLE: { 8059 const APInt *C; 8060 8061 // LHS s<= LHS +_{nsw} C if C >= 0 8062 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 8063 return !C->isNegative(); 8064 return false; 8065 } 8066 8067 case CmpInst::ICMP_ULE: { 8068 const APInt *C; 8069 8070 // LHS u<= LHS +_{nuw} C for any C 8071 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 8072 return true; 8073 8074 // RHS >> V u<= RHS for any V 8075 if (match(LHS, m_LShr(m_Specific(RHS), m_Value()))) 8076 return true; 8077 8078 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 8079 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 8080 const Value *&X, 8081 const APInt *&CA, const APInt *&CB) { 8082 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 8083 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 8084 return true; 8085 8086 // If X & C == 0 then (X | C) == X +_{nuw} C 8087 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 8088 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 8089 KnownBits Known(CA->getBitWidth()); 8090 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 8091 /*CxtI*/ nullptr, /*DT*/ nullptr); 8092 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 8093 return true; 8094 } 8095 8096 return false; 8097 }; 8098 8099 const Value *X; 8100 const APInt *CLHS, *CRHS; 8101 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 8102 return CLHS->ule(*CRHS); 8103 8104 return false; 8105 } 8106 } 8107 } 8108 8109 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 8110 /// ALHS ARHS" is true. Otherwise, return std::nullopt. 8111 static std::optional<bool> 8112 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 8113 const Value *ARHS, const Value *BLHS, const Value *BRHS, 8114 const DataLayout &DL, unsigned Depth) { 8115 switch (Pred) { 8116 default: 8117 return std::nullopt; 8118 8119 case CmpInst::ICMP_SLT: 8120 case CmpInst::ICMP_SLE: 8121 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 8122 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 8123 return true; 8124 return std::nullopt; 8125 8126 case CmpInst::ICMP_SGT: 8127 case CmpInst::ICMP_SGE: 8128 if (isTruePredicate(CmpInst::ICMP_SLE, ALHS, BLHS, DL, Depth) && 8129 isTruePredicate(CmpInst::ICMP_SLE, BRHS, ARHS, DL, Depth)) 8130 return true; 8131 return std::nullopt; 8132 8133 case CmpInst::ICMP_ULT: 8134 case CmpInst::ICMP_ULE: 8135 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 8136 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 8137 return true; 8138 return std::nullopt; 8139 8140 case CmpInst::ICMP_UGT: 8141 case CmpInst::ICMP_UGE: 8142 if (isTruePredicate(CmpInst::ICMP_ULE, ALHS, BLHS, DL, Depth) && 8143 isTruePredicate(CmpInst::ICMP_ULE, BRHS, ARHS, DL, Depth)) 8144 return true; 8145 return std::nullopt; 8146 } 8147 } 8148 8149 /// Return true if the operands of two compares (expanded as "L0 pred L1" and 8150 /// "R0 pred R1") match. IsSwappedOps is true when the operands match, but are 8151 /// swapped. 8152 static bool areMatchingOperands(const Value *L0, const Value *L1, const Value *R0, 8153 const Value *R1, bool &AreSwappedOps) { 8154 bool AreMatchingOps = (L0 == R0 && L1 == R1); 8155 AreSwappedOps = (L0 == R1 && L1 == R0); 8156 return AreMatchingOps || AreSwappedOps; 8157 } 8158 8159 /// Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true. 8160 /// Return false if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is false. 8161 /// Otherwise, return std::nullopt if we can't infer anything. 8162 static std::optional<bool> 8163 isImpliedCondMatchingOperands(CmpInst::Predicate LPred, 8164 CmpInst::Predicate RPred, bool AreSwappedOps) { 8165 // Canonicalize the predicate as if the operands were not commuted. 8166 if (AreSwappedOps) 8167 RPred = ICmpInst::getSwappedPredicate(RPred); 8168 8169 if (CmpInst::isImpliedTrueByMatchingCmp(LPred, RPred)) 8170 return true; 8171 if (CmpInst::isImpliedFalseByMatchingCmp(LPred, RPred)) 8172 return false; 8173 8174 return std::nullopt; 8175 } 8176 8177 /// Return true if "icmp LPred X, LC" implies "icmp RPred X, RC" is true. 8178 /// Return false if "icmp LPred X, LC" implies "icmp RPred X, RC" is false. 8179 /// Otherwise, return std::nullopt if we can't infer anything. 8180 static std::optional<bool> isImpliedCondCommonOperandWithConstants( 8181 CmpInst::Predicate LPred, const APInt &LC, CmpInst::Predicate RPred, 8182 const APInt &RC) { 8183 ConstantRange DomCR = ConstantRange::makeExactICmpRegion(LPred, LC); 8184 ConstantRange CR = ConstantRange::makeExactICmpRegion(RPred, RC); 8185 ConstantRange Intersection = DomCR.intersectWith(CR); 8186 ConstantRange Difference = DomCR.difference(CR); 8187 if (Intersection.isEmptySet()) 8188 return false; 8189 if (Difference.isEmptySet()) 8190 return true; 8191 return std::nullopt; 8192 } 8193 8194 /// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") 8195 /// is true. Return false if LHS implies RHS is false. Otherwise, return 8196 /// std::nullopt if we can't infer anything. 8197 static std::optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 8198 CmpInst::Predicate RPred, 8199 const Value *R0, const Value *R1, 8200 const DataLayout &DL, 8201 bool LHSIsTrue, unsigned Depth) { 8202 Value *L0 = LHS->getOperand(0); 8203 Value *L1 = LHS->getOperand(1); 8204 8205 // The rest of the logic assumes the LHS condition is true. If that's not the 8206 // case, invert the predicate to make it so. 8207 CmpInst::Predicate LPred = 8208 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 8209 8210 // Can we infer anything when the two compares have matching operands? 8211 bool AreSwappedOps; 8212 if (areMatchingOperands(L0, L1, R0, R1, AreSwappedOps)) 8213 return isImpliedCondMatchingOperands(LPred, RPred, AreSwappedOps); 8214 8215 // Can we infer anything when the 0-operands match and the 1-operands are 8216 // constants (not necessarily matching)? 8217 const APInt *LC, *RC; 8218 if (L0 == R0 && match(L1, m_APInt(LC)) && match(R1, m_APInt(RC))) 8219 return isImpliedCondCommonOperandWithConstants(LPred, *LC, RPred, *RC); 8220 8221 if (LPred == RPred) 8222 return isImpliedCondOperands(LPred, L0, L1, R0, R1, DL, Depth); 8223 8224 return std::nullopt; 8225 } 8226 8227 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 8228 /// false. Otherwise, return std::nullopt if we can't infer anything. We 8229 /// expect the RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' 8230 /// instruction. 8231 static std::optional<bool> 8232 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred, 8233 const Value *RHSOp0, const Value *RHSOp1, 8234 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { 8235 // The LHS must be an 'or', 'and', or a 'select' instruction. 8236 assert((LHS->getOpcode() == Instruction::And || 8237 LHS->getOpcode() == Instruction::Or || 8238 LHS->getOpcode() == Instruction::Select) && 8239 "Expected LHS to be 'and', 'or', or 'select'."); 8240 8241 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit"); 8242 8243 // If the result of an 'or' is false, then we know both legs of the 'or' are 8244 // false. Similarly, if the result of an 'and' is true, then we know both 8245 // legs of the 'and' are true. 8246 const Value *ALHS, *ARHS; 8247 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) || 8248 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) { 8249 // FIXME: Make this non-recursion. 8250 if (std::optional<bool> Implication = isImpliedCondition( 8251 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) 8252 return Implication; 8253 if (std::optional<bool> Implication = isImpliedCondition( 8254 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) 8255 return Implication; 8256 return std::nullopt; 8257 } 8258 return std::nullopt; 8259 } 8260 8261 std::optional<bool> 8262 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred, 8263 const Value *RHSOp0, const Value *RHSOp1, 8264 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { 8265 // Bail out when we hit the limit. 8266 if (Depth == MaxAnalysisRecursionDepth) 8267 return std::nullopt; 8268 8269 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 8270 // example. 8271 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy()) 8272 return std::nullopt; 8273 8274 assert(LHS->getType()->isIntOrIntVectorTy(1) && 8275 "Expected integer type only!"); 8276 8277 // Both LHS and RHS are icmps. 8278 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 8279 if (LHSCmp) 8280 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, 8281 Depth); 8282 8283 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect 8284 /// the RHS to be an icmp. 8285 /// FIXME: Add support for and/or/select on the RHS. 8286 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) { 8287 if ((LHSI->getOpcode() == Instruction::And || 8288 LHSI->getOpcode() == Instruction::Or || 8289 LHSI->getOpcode() == Instruction::Select)) 8290 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, 8291 Depth); 8292 } 8293 return std::nullopt; 8294 } 8295 8296 std::optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 8297 const DataLayout &DL, 8298 bool LHSIsTrue, unsigned Depth) { 8299 // LHS ==> RHS by definition 8300 if (LHS == RHS) 8301 return LHSIsTrue; 8302 8303 if (const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS)) 8304 return isImpliedCondition(LHS, RHSCmp->getPredicate(), 8305 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL, 8306 LHSIsTrue, Depth); 8307 8308 if (Depth == MaxAnalysisRecursionDepth) 8309 return std::nullopt; 8310 8311 // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2 8312 // LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2 8313 const Value *RHS1, *RHS2; 8314 if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) { 8315 if (std::optional<bool> Imp = 8316 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1)) 8317 if (*Imp == true) 8318 return true; 8319 if (std::optional<bool> Imp = 8320 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1)) 8321 if (*Imp == true) 8322 return true; 8323 } 8324 if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) { 8325 if (std::optional<bool> Imp = 8326 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1)) 8327 if (*Imp == false) 8328 return false; 8329 if (std::optional<bool> Imp = 8330 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1)) 8331 if (*Imp == false) 8332 return false; 8333 } 8334 8335 return std::nullopt; 8336 } 8337 8338 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch 8339 // condition dominating ContextI or nullptr, if no condition is found. 8340 static std::pair<Value *, bool> 8341 getDomPredecessorCondition(const Instruction *ContextI) { 8342 if (!ContextI || !ContextI->getParent()) 8343 return {nullptr, false}; 8344 8345 // TODO: This is a poor/cheap way to determine dominance. Should we use a 8346 // dominator tree (eg, from a SimplifyQuery) instead? 8347 const BasicBlock *ContextBB = ContextI->getParent(); 8348 const BasicBlock *PredBB = ContextBB->getSinglePredecessor(); 8349 if (!PredBB) 8350 return {nullptr, false}; 8351 8352 // We need a conditional branch in the predecessor. 8353 Value *PredCond; 8354 BasicBlock *TrueBB, *FalseBB; 8355 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB))) 8356 return {nullptr, false}; 8357 8358 // The branch should get simplified. Don't bother simplifying this condition. 8359 if (TrueBB == FalseBB) 8360 return {nullptr, false}; 8361 8362 assert((TrueBB == ContextBB || FalseBB == ContextBB) && 8363 "Predecessor block does not point to successor?"); 8364 8365 // Is this condition implied by the predecessor condition? 8366 return {PredCond, TrueBB == ContextBB}; 8367 } 8368 8369 std::optional<bool> llvm::isImpliedByDomCondition(const Value *Cond, 8370 const Instruction *ContextI, 8371 const DataLayout &DL) { 8372 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool"); 8373 auto PredCond = getDomPredecessorCondition(ContextI); 8374 if (PredCond.first) 8375 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second); 8376 return std::nullopt; 8377 } 8378 8379 std::optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred, 8380 const Value *LHS, 8381 const Value *RHS, 8382 const Instruction *ContextI, 8383 const DataLayout &DL) { 8384 auto PredCond = getDomPredecessorCondition(ContextI); 8385 if (PredCond.first) 8386 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL, 8387 PredCond.second); 8388 return std::nullopt; 8389 } 8390 8391 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, 8392 APInt &Upper, const InstrInfoQuery &IIQ, 8393 bool PreferSignedRange) { 8394 unsigned Width = Lower.getBitWidth(); 8395 const APInt *C; 8396 switch (BO.getOpcode()) { 8397 case Instruction::Add: 8398 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { 8399 bool HasNSW = IIQ.hasNoSignedWrap(&BO); 8400 bool HasNUW = IIQ.hasNoUnsignedWrap(&BO); 8401 8402 // If the caller expects a signed compare, then try to use a signed range. 8403 // Otherwise if both no-wraps are set, use the unsigned range because it 8404 // is never larger than the signed range. Example: 8405 // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125]. 8406 if (PreferSignedRange && HasNSW && HasNUW) 8407 HasNUW = false; 8408 8409 if (HasNUW) { 8410 // 'add nuw x, C' produces [C, UINT_MAX]. 8411 Lower = *C; 8412 } else if (HasNSW) { 8413 if (C->isNegative()) { 8414 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C]. 8415 Lower = APInt::getSignedMinValue(Width); 8416 Upper = APInt::getSignedMaxValue(Width) + *C + 1; 8417 } else { 8418 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX]. 8419 Lower = APInt::getSignedMinValue(Width) + *C; 8420 Upper = APInt::getSignedMaxValue(Width) + 1; 8421 } 8422 } 8423 } 8424 break; 8425 8426 case Instruction::And: 8427 if (match(BO.getOperand(1), m_APInt(C))) 8428 // 'and x, C' produces [0, C]. 8429 Upper = *C + 1; 8430 break; 8431 8432 case Instruction::Or: 8433 if (match(BO.getOperand(1), m_APInt(C))) 8434 // 'or x, C' produces [C, UINT_MAX]. 8435 Lower = *C; 8436 break; 8437 8438 case Instruction::AShr: 8439 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 8440 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C]. 8441 Lower = APInt::getSignedMinValue(Width).ashr(*C); 8442 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1; 8443 } else if (match(BO.getOperand(0), m_APInt(C))) { 8444 unsigned ShiftAmount = Width - 1; 8445 if (!C->isZero() && IIQ.isExact(&BO)) 8446 ShiftAmount = C->countr_zero(); 8447 if (C->isNegative()) { 8448 // 'ashr C, x' produces [C, C >> (Width-1)] 8449 Lower = *C; 8450 Upper = C->ashr(ShiftAmount) + 1; 8451 } else { 8452 // 'ashr C, x' produces [C >> (Width-1), C] 8453 Lower = C->ashr(ShiftAmount); 8454 Upper = *C + 1; 8455 } 8456 } 8457 break; 8458 8459 case Instruction::LShr: 8460 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 8461 // 'lshr x, C' produces [0, UINT_MAX >> C]. 8462 Upper = APInt::getAllOnes(Width).lshr(*C) + 1; 8463 } else if (match(BO.getOperand(0), m_APInt(C))) { 8464 // 'lshr C, x' produces [C >> (Width-1), C]. 8465 unsigned ShiftAmount = Width - 1; 8466 if (!C->isZero() && IIQ.isExact(&BO)) 8467 ShiftAmount = C->countr_zero(); 8468 Lower = C->lshr(ShiftAmount); 8469 Upper = *C + 1; 8470 } 8471 break; 8472 8473 case Instruction::Shl: 8474 if (match(BO.getOperand(0), m_APInt(C))) { 8475 if (IIQ.hasNoUnsignedWrap(&BO)) { 8476 // 'shl nuw C, x' produces [C, C << CLZ(C)] 8477 Lower = *C; 8478 Upper = Lower.shl(Lower.countl_zero()) + 1; 8479 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw? 8480 if (C->isNegative()) { 8481 // 'shl nsw C, x' produces [C << CLO(C)-1, C] 8482 unsigned ShiftAmount = C->countl_one() - 1; 8483 Lower = C->shl(ShiftAmount); 8484 Upper = *C + 1; 8485 } else { 8486 // 'shl nsw C, x' produces [C, C << CLZ(C)-1] 8487 unsigned ShiftAmount = C->countl_zero() - 1; 8488 Lower = *C; 8489 Upper = C->shl(ShiftAmount) + 1; 8490 } 8491 } 8492 } 8493 break; 8494 8495 case Instruction::SDiv: 8496 if (match(BO.getOperand(1), m_APInt(C))) { 8497 APInt IntMin = APInt::getSignedMinValue(Width); 8498 APInt IntMax = APInt::getSignedMaxValue(Width); 8499 if (C->isAllOnes()) { 8500 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] 8501 // where C != -1 and C != 0 and C != 1 8502 Lower = IntMin + 1; 8503 Upper = IntMax + 1; 8504 } else if (C->countl_zero() < Width - 1) { 8505 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C] 8506 // where C != -1 and C != 0 and C != 1 8507 Lower = IntMin.sdiv(*C); 8508 Upper = IntMax.sdiv(*C); 8509 if (Lower.sgt(Upper)) 8510 std::swap(Lower, Upper); 8511 Upper = Upper + 1; 8512 assert(Upper != Lower && "Upper part of range has wrapped!"); 8513 } 8514 } else if (match(BO.getOperand(0), m_APInt(C))) { 8515 if (C->isMinSignedValue()) { 8516 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. 8517 Lower = *C; 8518 Upper = Lower.lshr(1) + 1; 8519 } else { 8520 // 'sdiv C, x' produces [-|C|, |C|]. 8521 Upper = C->abs() + 1; 8522 Lower = (-Upper) + 1; 8523 } 8524 } 8525 break; 8526 8527 case Instruction::UDiv: 8528 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { 8529 // 'udiv x, C' produces [0, UINT_MAX / C]. 8530 Upper = APInt::getMaxValue(Width).udiv(*C) + 1; 8531 } else if (match(BO.getOperand(0), m_APInt(C))) { 8532 // 'udiv C, x' produces [0, C]. 8533 Upper = *C + 1; 8534 } 8535 break; 8536 8537 case Instruction::SRem: 8538 if (match(BO.getOperand(1), m_APInt(C))) { 8539 // 'srem x, C' produces (-|C|, |C|). 8540 Upper = C->abs(); 8541 Lower = (-Upper) + 1; 8542 } 8543 break; 8544 8545 case Instruction::URem: 8546 if (match(BO.getOperand(1), m_APInt(C))) 8547 // 'urem x, C' produces [0, C). 8548 Upper = *C; 8549 break; 8550 8551 default: 8552 break; 8553 } 8554 } 8555 8556 static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II) { 8557 unsigned Width = II.getType()->getScalarSizeInBits(); 8558 const APInt *C; 8559 switch (II.getIntrinsicID()) { 8560 case Intrinsic::ctpop: 8561 case Intrinsic::ctlz: 8562 case Intrinsic::cttz: 8563 // Maximum of set/clear bits is the bit width. 8564 return ConstantRange::getNonEmpty(APInt::getZero(Width), 8565 APInt(Width, Width + 1)); 8566 case Intrinsic::uadd_sat: 8567 // uadd.sat(x, C) produces [C, UINT_MAX]. 8568 if (match(II.getOperand(0), m_APInt(C)) || 8569 match(II.getOperand(1), m_APInt(C))) 8570 return ConstantRange::getNonEmpty(*C, APInt::getZero(Width)); 8571 break; 8572 case Intrinsic::sadd_sat: 8573 if (match(II.getOperand(0), m_APInt(C)) || 8574 match(II.getOperand(1), m_APInt(C))) { 8575 if (C->isNegative()) 8576 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)]. 8577 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width), 8578 APInt::getSignedMaxValue(Width) + *C + 8579 1); 8580 8581 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX]. 8582 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) + *C, 8583 APInt::getSignedMaxValue(Width) + 1); 8584 } 8585 break; 8586 case Intrinsic::usub_sat: 8587 // usub.sat(C, x) produces [0, C]. 8588 if (match(II.getOperand(0), m_APInt(C))) 8589 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1); 8590 8591 // usub.sat(x, C) produces [0, UINT_MAX - C]. 8592 if (match(II.getOperand(1), m_APInt(C))) 8593 return ConstantRange::getNonEmpty(APInt::getZero(Width), 8594 APInt::getMaxValue(Width) - *C + 1); 8595 break; 8596 case Intrinsic::ssub_sat: 8597 if (match(II.getOperand(0), m_APInt(C))) { 8598 if (C->isNegative()) 8599 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)]. 8600 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width), 8601 *C - APInt::getSignedMinValue(Width) + 8602 1); 8603 8604 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX]. 8605 return ConstantRange::getNonEmpty(*C - APInt::getSignedMaxValue(Width), 8606 APInt::getSignedMaxValue(Width) + 1); 8607 } else if (match(II.getOperand(1), m_APInt(C))) { 8608 if (C->isNegative()) 8609 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]: 8610 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) - *C, 8611 APInt::getSignedMaxValue(Width) + 1); 8612 8613 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C]. 8614 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width), 8615 APInt::getSignedMaxValue(Width) - *C + 8616 1); 8617 } 8618 break; 8619 case Intrinsic::umin: 8620 case Intrinsic::umax: 8621 case Intrinsic::smin: 8622 case Intrinsic::smax: 8623 if (!match(II.getOperand(0), m_APInt(C)) && 8624 !match(II.getOperand(1), m_APInt(C))) 8625 break; 8626 8627 switch (II.getIntrinsicID()) { 8628 case Intrinsic::umin: 8629 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1); 8630 case Intrinsic::umax: 8631 return ConstantRange::getNonEmpty(*C, APInt::getZero(Width)); 8632 case Intrinsic::smin: 8633 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width), 8634 *C + 1); 8635 case Intrinsic::smax: 8636 return ConstantRange::getNonEmpty(*C, 8637 APInt::getSignedMaxValue(Width) + 1); 8638 default: 8639 llvm_unreachable("Must be min/max intrinsic"); 8640 } 8641 break; 8642 case Intrinsic::abs: 8643 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX], 8644 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. 8645 if (match(II.getOperand(1), m_One())) 8646 return ConstantRange::getNonEmpty(APInt::getZero(Width), 8647 APInt::getSignedMaxValue(Width) + 1); 8648 8649 return ConstantRange::getNonEmpty(APInt::getZero(Width), 8650 APInt::getSignedMinValue(Width) + 1); 8651 case Intrinsic::vscale: 8652 if (!II.getParent() || !II.getFunction()) 8653 break; 8654 return getVScaleRange(II.getFunction(), Width); 8655 default: 8656 break; 8657 } 8658 8659 return ConstantRange::getFull(Width); 8660 } 8661 8662 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower, 8663 APInt &Upper, const InstrInfoQuery &IIQ) { 8664 const Value *LHS = nullptr, *RHS = nullptr; 8665 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS); 8666 if (R.Flavor == SPF_UNKNOWN) 8667 return; 8668 8669 unsigned BitWidth = SI.getType()->getScalarSizeInBits(); 8670 8671 if (R.Flavor == SelectPatternFlavor::SPF_ABS) { 8672 // If the negation part of the abs (in RHS) has the NSW flag, 8673 // then the result of abs(X) is [0..SIGNED_MAX], 8674 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. 8675 Lower = APInt::getZero(BitWidth); 8676 if (match(RHS, m_Neg(m_Specific(LHS))) && 8677 IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) 8678 Upper = APInt::getSignedMaxValue(BitWidth) + 1; 8679 else 8680 Upper = APInt::getSignedMinValue(BitWidth) + 1; 8681 return; 8682 } 8683 8684 if (R.Flavor == SelectPatternFlavor::SPF_NABS) { 8685 // The result of -abs(X) is <= 0. 8686 Lower = APInt::getSignedMinValue(BitWidth); 8687 Upper = APInt(BitWidth, 1); 8688 return; 8689 } 8690 8691 const APInt *C; 8692 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C))) 8693 return; 8694 8695 switch (R.Flavor) { 8696 case SPF_UMIN: 8697 Upper = *C + 1; 8698 break; 8699 case SPF_UMAX: 8700 Lower = *C; 8701 break; 8702 case SPF_SMIN: 8703 Lower = APInt::getSignedMinValue(BitWidth); 8704 Upper = *C + 1; 8705 break; 8706 case SPF_SMAX: 8707 Lower = *C; 8708 Upper = APInt::getSignedMaxValue(BitWidth) + 1; 8709 break; 8710 default: 8711 break; 8712 } 8713 } 8714 8715 static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) { 8716 // The maximum representable value of a half is 65504. For floats the maximum 8717 // value is 3.4e38 which requires roughly 129 bits. 8718 unsigned BitWidth = I->getType()->getScalarSizeInBits(); 8719 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy()) 8720 return; 8721 if (isa<FPToSIInst>(I) && BitWidth >= 17) { 8722 Lower = APInt(BitWidth, -65504); 8723 Upper = APInt(BitWidth, 65505); 8724 } 8725 8726 if (isa<FPToUIInst>(I) && BitWidth >= 16) { 8727 // For a fptoui the lower limit is left as 0. 8728 Upper = APInt(BitWidth, 65505); 8729 } 8730 } 8731 8732 ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned, 8733 bool UseInstrInfo, AssumptionCache *AC, 8734 const Instruction *CtxI, 8735 const DominatorTree *DT, 8736 unsigned Depth) { 8737 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction"); 8738 8739 if (Depth == MaxAnalysisRecursionDepth) 8740 return ConstantRange::getFull(V->getType()->getScalarSizeInBits()); 8741 8742 const APInt *C; 8743 if (match(V, m_APInt(C))) 8744 return ConstantRange(*C); 8745 8746 InstrInfoQuery IIQ(UseInstrInfo); 8747 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 8748 ConstantRange CR = ConstantRange::getFull(BitWidth); 8749 if (auto *BO = dyn_cast<BinaryOperator>(V)) { 8750 APInt Lower = APInt(BitWidth, 0); 8751 APInt Upper = APInt(BitWidth, 0); 8752 // TODO: Return ConstantRange. 8753 setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned); 8754 CR = ConstantRange::getNonEmpty(Lower, Upper); 8755 } else if (auto *II = dyn_cast<IntrinsicInst>(V)) 8756 CR = getRangeForIntrinsic(*II); 8757 else if (auto *SI = dyn_cast<SelectInst>(V)) { 8758 APInt Lower = APInt(BitWidth, 0); 8759 APInt Upper = APInt(BitWidth, 0); 8760 // TODO: Return ConstantRange. 8761 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ); 8762 CR = ConstantRange::getNonEmpty(Lower, Upper); 8763 } else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) { 8764 APInt Lower = APInt(BitWidth, 0); 8765 APInt Upper = APInt(BitWidth, 0); 8766 // TODO: Return ConstantRange. 8767 setLimitForFPToI(cast<Instruction>(V), Lower, Upper); 8768 CR = ConstantRange::getNonEmpty(Lower, Upper); 8769 } 8770 8771 if (auto *I = dyn_cast<Instruction>(V)) 8772 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range)) 8773 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range)); 8774 8775 if (CtxI && AC) { 8776 // Try to restrict the range based on information from assumptions. 8777 for (auto &AssumeVH : AC->assumptionsFor(V)) { 8778 if (!AssumeVH) 8779 continue; 8780 CallInst *I = cast<CallInst>(AssumeVH); 8781 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() && 8782 "Got assumption for the wrong function!"); 8783 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 8784 "must be an assume intrinsic"); 8785 8786 if (!isValidAssumeForContext(I, CtxI, DT)) 8787 continue; 8788 Value *Arg = I->getArgOperand(0); 8789 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); 8790 // Currently we just use information from comparisons. 8791 if (!Cmp || Cmp->getOperand(0) != V) 8792 continue; 8793 // TODO: Set "ForSigned" parameter via Cmp->isSigned()? 8794 ConstantRange RHS = 8795 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false, 8796 UseInstrInfo, AC, I, DT, Depth + 1); 8797 CR = CR.intersectWith( 8798 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS)); 8799 } 8800 } 8801 8802 return CR; 8803 } 8804