1 //===- InstCombineMulDivRem.cpp -------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv, 10 // srem, urem, frem. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/IR/BasicBlock.h" 20 #include "llvm/IR/Constant.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/InstrTypes.h" 23 #include "llvm/IR/Instruction.h" 24 #include "llvm/IR/Instructions.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/IR/Intrinsics.h" 27 #include "llvm/IR/Operator.h" 28 #include "llvm/IR/PatternMatch.h" 29 #include "llvm/IR/Type.h" 30 #include "llvm/IR/Value.h" 31 #include "llvm/Support/Casting.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/KnownBits.h" 34 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 35 #include "llvm/Transforms/Utils/BuildLibCalls.h" 36 #include <cassert> 37 #include <cstddef> 38 #include <cstdint> 39 #include <utility> 40 41 using namespace llvm; 42 using namespace PatternMatch; 43 44 #define DEBUG_TYPE "instcombine" 45 46 /// The specific integer value is used in a context where it is known to be 47 /// non-zero. If this allows us to simplify the computation, do so and return 48 /// the new operand, otherwise return null. 49 static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC, 50 Instruction &CxtI) { 51 // If V has multiple uses, then we would have to do more analysis to determine 52 // if this is safe. For example, the use could be in dynamically unreached 53 // code. 54 if (!V->hasOneUse()) return nullptr; 55 56 bool MadeChange = false; 57 58 // ((1 << A) >>u B) --> (1 << (A-B)) 59 // Because V cannot be zero, we know that B is less than A. 60 Value *A = nullptr, *B = nullptr, *One = nullptr; 61 if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) && 62 match(One, m_One())) { 63 A = IC.Builder.CreateSub(A, B); 64 return IC.Builder.CreateShl(One, A); 65 } 66 67 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it 68 // inexact. Similarly for <<. 69 BinaryOperator *I = dyn_cast<BinaryOperator>(V); 70 if (I && I->isLogicalShift() && 71 IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) { 72 // We know that this is an exact/nuw shift and that the input is a 73 // non-zero context as well. 74 if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) { 75 I->setOperand(0, V2); 76 MadeChange = true; 77 } 78 79 if (I->getOpcode() == Instruction::LShr && !I->isExact()) { 80 I->setIsExact(); 81 MadeChange = true; 82 } 83 84 if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) { 85 I->setHasNoUnsignedWrap(); 86 MadeChange = true; 87 } 88 } 89 90 // TODO: Lots more we could do here: 91 // If V is a phi node, we can call this on each of its operands. 92 // "select cond, X, 0" can simplify to "X". 93 94 return MadeChange ? V : nullptr; 95 } 96 97 /// A helper routine of InstCombiner::visitMul(). 98 /// 99 /// If C is a scalar/vector of known powers of 2, then this function returns 100 /// a new scalar/vector obtained from logBase2 of C. 101 /// Return a null pointer otherwise. 102 static Constant *getLogBase2(Type *Ty, Constant *C) { 103 const APInt *IVal; 104 if (match(C, m_APInt(IVal)) && IVal->isPowerOf2()) 105 return ConstantInt::get(Ty, IVal->logBase2()); 106 107 if (!Ty->isVectorTy()) 108 return nullptr; 109 110 SmallVector<Constant *, 4> Elts; 111 for (unsigned I = 0, E = Ty->getVectorNumElements(); I != E; ++I) { 112 Constant *Elt = C->getAggregateElement(I); 113 if (!Elt) 114 return nullptr; 115 if (isa<UndefValue>(Elt)) { 116 Elts.push_back(UndefValue::get(Ty->getScalarType())); 117 continue; 118 } 119 if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2()) 120 return nullptr; 121 Elts.push_back(ConstantInt::get(Ty->getScalarType(), IVal->logBase2())); 122 } 123 124 return ConstantVector::get(Elts); 125 } 126 127 Instruction *InstCombiner::visitMul(BinaryOperator &I) { 128 if (Value *V = SimplifyMulInst(I.getOperand(0), I.getOperand(1), 129 SQ.getWithInstruction(&I))) 130 return replaceInstUsesWith(I, V); 131 132 if (SimplifyAssociativeOrCommutative(I)) 133 return &I; 134 135 if (Instruction *X = foldVectorBinop(I)) 136 return X; 137 138 if (Value *V = SimplifyUsingDistributiveLaws(I)) 139 return replaceInstUsesWith(I, V); 140 141 // X * -1 == 0 - X 142 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 143 if (match(Op1, m_AllOnes())) { 144 BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName()); 145 if (I.hasNoSignedWrap()) 146 BO->setHasNoSignedWrap(); 147 return BO; 148 } 149 150 // Also allow combining multiply instructions on vectors. 151 { 152 Value *NewOp; 153 Constant *C1, *C2; 154 const APInt *IVal; 155 if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)), 156 m_Constant(C1))) && 157 match(C1, m_APInt(IVal))) { 158 // ((X << C2)*C1) == (X * (C1 << C2)) 159 Constant *Shl = ConstantExpr::getShl(C1, C2); 160 BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0)); 161 BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl); 162 if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap()) 163 BO->setHasNoUnsignedWrap(); 164 if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() && 165 Shl->isNotMinSignedValue()) 166 BO->setHasNoSignedWrap(); 167 return BO; 168 } 169 170 if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) { 171 // Replace X*(2^C) with X << C, where C is either a scalar or a vector. 172 if (Constant *NewCst = getLogBase2(NewOp->getType(), C1)) { 173 BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst); 174 175 if (I.hasNoUnsignedWrap()) 176 Shl->setHasNoUnsignedWrap(); 177 if (I.hasNoSignedWrap()) { 178 const APInt *V; 179 if (match(NewCst, m_APInt(V)) && *V != V->getBitWidth() - 1) 180 Shl->setHasNoSignedWrap(); 181 } 182 183 return Shl; 184 } 185 } 186 } 187 188 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 189 // (Y - X) * (-(2**n)) -> (X - Y) * (2**n), for positive nonzero n 190 // (Y + const) * (-(2**n)) -> (-constY) * (2**n), for positive nonzero n 191 // The "* (2**n)" thus becomes a potential shifting opportunity. 192 { 193 const APInt & Val = CI->getValue(); 194 const APInt &PosVal = Val.abs(); 195 if (Val.isNegative() && PosVal.isPowerOf2()) { 196 Value *X = nullptr, *Y = nullptr; 197 if (Op0->hasOneUse()) { 198 ConstantInt *C1; 199 Value *Sub = nullptr; 200 if (match(Op0, m_Sub(m_Value(Y), m_Value(X)))) 201 Sub = Builder.CreateSub(X, Y, "suba"); 202 else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1)))) 203 Sub = Builder.CreateSub(Builder.CreateNeg(C1), Y, "subc"); 204 if (Sub) 205 return 206 BinaryOperator::CreateMul(Sub, 207 ConstantInt::get(Y->getType(), PosVal)); 208 } 209 } 210 } 211 } 212 213 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I)) 214 return FoldedMul; 215 216 // Simplify mul instructions with a constant RHS. 217 if (isa<Constant>(Op1)) { 218 // Canonicalize (X+C1)*CI -> X*CI+C1*CI. 219 Value *X; 220 Constant *C1; 221 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) { 222 Value *Mul = Builder.CreateMul(C1, Op1); 223 // Only go forward with the transform if C1*CI simplifies to a tidier 224 // constant. 225 if (!match(Mul, m_Mul(m_Value(), m_Value()))) 226 return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul); 227 } 228 } 229 230 // -X * C --> X * -C 231 Value *X, *Y; 232 Constant *Op1C; 233 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C))) 234 return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C)); 235 236 // -X * -Y --> X * Y 237 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) { 238 auto *NewMul = BinaryOperator::CreateMul(X, Y); 239 if (I.hasNoSignedWrap() && 240 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() && 241 cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap()) 242 NewMul->setHasNoSignedWrap(); 243 return NewMul; 244 } 245 246 // -X * Y --> -(X * Y) 247 // X * -Y --> -(X * Y) 248 if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y)))) 249 return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y)); 250 251 // (X / Y) * Y = X - (X % Y) 252 // (X / Y) * -Y = (X % Y) - X 253 { 254 Value *Y = Op1; 255 BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0); 256 if (!Div || (Div->getOpcode() != Instruction::UDiv && 257 Div->getOpcode() != Instruction::SDiv)) { 258 Y = Op0; 259 Div = dyn_cast<BinaryOperator>(Op1); 260 } 261 Value *Neg = dyn_castNegVal(Y); 262 if (Div && Div->hasOneUse() && 263 (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) && 264 (Div->getOpcode() == Instruction::UDiv || 265 Div->getOpcode() == Instruction::SDiv)) { 266 Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1); 267 268 // If the division is exact, X % Y is zero, so we end up with X or -X. 269 if (Div->isExact()) { 270 if (DivOp1 == Y) 271 return replaceInstUsesWith(I, X); 272 return BinaryOperator::CreateNeg(X); 273 } 274 275 auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem 276 : Instruction::SRem; 277 Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1); 278 if (DivOp1 == Y) 279 return BinaryOperator::CreateSub(X, Rem); 280 return BinaryOperator::CreateSub(Rem, X); 281 } 282 } 283 284 /// i1 mul -> i1 and. 285 if (I.getType()->isIntOrIntVectorTy(1)) 286 return BinaryOperator::CreateAnd(Op0, Op1); 287 288 // X*(1 << Y) --> X << Y 289 // (1 << Y)*X --> X << Y 290 { 291 Value *Y; 292 BinaryOperator *BO = nullptr; 293 bool ShlNSW = false; 294 if (match(Op0, m_Shl(m_One(), m_Value(Y)))) { 295 BO = BinaryOperator::CreateShl(Op1, Y); 296 ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap(); 297 } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) { 298 BO = BinaryOperator::CreateShl(Op0, Y); 299 ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap(); 300 } 301 if (BO) { 302 if (I.hasNoUnsignedWrap()) 303 BO->setHasNoUnsignedWrap(); 304 if (I.hasNoSignedWrap() && ShlNSW) 305 BO->setHasNoSignedWrap(); 306 return BO; 307 } 308 } 309 310 // (bool X) * Y --> X ? Y : 0 311 // Y * (bool X) --> X ? Y : 0 312 if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 313 return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0)); 314 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 315 return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0)); 316 317 // (lshr X, 31) * Y --> (ashr X, 31) & Y 318 // Y * (lshr X, 31) --> (ashr X, 31) & Y 319 // TODO: We are not checking one-use because the elimination of the multiply 320 // is better for analysis? 321 // TODO: Should we canonicalize to '(X < 0) ? Y : 0' instead? That would be 322 // more similar to what we're doing above. 323 const APInt *C; 324 if (match(Op0, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) 325 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op1); 326 if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) 327 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0); 328 329 if (Instruction *Ext = narrowMathIfNoOverflow(I)) 330 return Ext; 331 332 bool Changed = false; 333 if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) { 334 Changed = true; 335 I.setHasNoSignedWrap(true); 336 } 337 338 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) { 339 Changed = true; 340 I.setHasNoUnsignedWrap(true); 341 } 342 343 return Changed ? &I : nullptr; 344 } 345 346 Instruction *InstCombiner::visitFMul(BinaryOperator &I) { 347 if (Value *V = SimplifyFMulInst(I.getOperand(0), I.getOperand(1), 348 I.getFastMathFlags(), 349 SQ.getWithInstruction(&I))) 350 return replaceInstUsesWith(I, V); 351 352 if (SimplifyAssociativeOrCommutative(I)) 353 return &I; 354 355 if (Instruction *X = foldVectorBinop(I)) 356 return X; 357 358 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I)) 359 return FoldedMul; 360 361 // X * -1.0 --> -X 362 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 363 if (match(Op1, m_SpecificFP(-1.0))) 364 return BinaryOperator::CreateFNegFMF(Op0, &I); 365 366 // -X * -Y --> X * Y 367 Value *X, *Y; 368 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) 369 return BinaryOperator::CreateFMulFMF(X, Y, &I); 370 371 // -X * C --> X * -C 372 Constant *C; 373 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C))) 374 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I); 375 376 // Sink negation: -X * Y --> -(X * Y) 377 // But don't transform constant expressions because there's an inverse fold. 378 if (match(Op0, m_OneUse(m_FNeg(m_Value(X)))) && !isa<ConstantExpr>(Op0)) 379 return BinaryOperator::CreateFNegFMF(Builder.CreateFMulFMF(X, Op1, &I), &I); 380 381 // Sink negation: Y * -X --> -(X * Y) 382 // But don't transform constant expressions because there's an inverse fold. 383 if (match(Op1, m_OneUse(m_FNeg(m_Value(X)))) && !isa<ConstantExpr>(Op1)) 384 return BinaryOperator::CreateFNegFMF(Builder.CreateFMulFMF(X, Op0, &I), &I); 385 386 // fabs(X) * fabs(X) -> X * X 387 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::fabs>(m_Value(X)))) 388 return BinaryOperator::CreateFMulFMF(X, X, &I); 389 390 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E) 391 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1)) 392 return replaceInstUsesWith(I, V); 393 394 if (I.hasAllowReassoc()) { 395 // Reassociate constant RHS with another constant to form constant 396 // expression. 397 if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) { 398 Constant *C1; 399 if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) { 400 // (C1 / X) * C --> (C * C1) / X 401 Constant *CC1 = ConstantExpr::getFMul(C, C1); 402 if (CC1->isNormalFP()) 403 return BinaryOperator::CreateFDivFMF(CC1, X, &I); 404 } 405 if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) { 406 // (X / C1) * C --> X * (C / C1) 407 Constant *CDivC1 = ConstantExpr::getFDiv(C, C1); 408 if (CDivC1->isNormalFP()) 409 return BinaryOperator::CreateFMulFMF(X, CDivC1, &I); 410 411 // If the constant was a denormal, try reassociating differently. 412 // (X / C1) * C --> X / (C1 / C) 413 Constant *C1DivC = ConstantExpr::getFDiv(C1, C); 414 if (Op0->hasOneUse() && C1DivC->isNormalFP()) 415 return BinaryOperator::CreateFDivFMF(X, C1DivC, &I); 416 } 417 418 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are 419 // canonicalized to 'fadd X, C'. Distributing the multiply may allow 420 // further folds and (X * C) + C2 is 'fma'. 421 if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) { 422 // (X + C1) * C --> (X * C) + (C * C1) 423 Constant *CC1 = ConstantExpr::getFMul(C, C1); 424 Value *XC = Builder.CreateFMulFMF(X, C, &I); 425 return BinaryOperator::CreateFAddFMF(XC, CC1, &I); 426 } 427 if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) { 428 // (C1 - X) * C --> (C * C1) - (X * C) 429 Constant *CC1 = ConstantExpr::getFMul(C, C1); 430 Value *XC = Builder.CreateFMulFMF(X, C, &I); 431 return BinaryOperator::CreateFSubFMF(CC1, XC, &I); 432 } 433 } 434 435 Value *Z; 436 if (match(&I, m_c_FMul(m_OneUse(m_FDiv(m_Value(X), m_Value(Y))), 437 m_Value(Z)))) { 438 // Sink division: (X / Y) * Z --> (X * Z) / Y 439 Value *NewFMul = Builder.CreateFMulFMF(X, Z, &I); 440 return BinaryOperator::CreateFDivFMF(NewFMul, Y, &I); 441 } 442 443 // sqrt(X) * sqrt(Y) -> sqrt(X * Y) 444 // nnan disallows the possibility of returning a number if both operands are 445 // negative (in that case, we should return NaN). 446 if (I.hasNoNaNs() && 447 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(X)))) && 448 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) { 449 Value *XY = Builder.CreateFMulFMF(X, Y, &I); 450 Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &I); 451 return replaceInstUsesWith(I, Sqrt); 452 } 453 454 // Like the similar transform in instsimplify, this requires 'nsz' because 455 // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0. 456 if (I.hasNoNaNs() && I.hasNoSignedZeros() && Op0 == Op1 && 457 Op0->hasNUses(2)) { 458 // Peek through fdiv to find squaring of square root: 459 // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y 460 if (match(Op0, m_FDiv(m_Value(X), 461 m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) { 462 Value *XX = Builder.CreateFMulFMF(X, X, &I); 463 return BinaryOperator::CreateFDivFMF(XX, Y, &I); 464 } 465 // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X) 466 if (match(Op0, m_FDiv(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y)), 467 m_Value(X)))) { 468 Value *XX = Builder.CreateFMulFMF(X, X, &I); 469 return BinaryOperator::CreateFDivFMF(Y, XX, &I); 470 } 471 } 472 473 // exp(X) * exp(Y) -> exp(X + Y) 474 // Match as long as at least one of exp has only one use. 475 if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) && 476 match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y))) && 477 (Op0->hasOneUse() || Op1->hasOneUse())) { 478 Value *XY = Builder.CreateFAddFMF(X, Y, &I); 479 Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I); 480 return replaceInstUsesWith(I, Exp); 481 } 482 483 // exp2(X) * exp2(Y) -> exp2(X + Y) 484 // Match as long as at least one of exp2 has only one use. 485 if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) && 486 match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y))) && 487 (Op0->hasOneUse() || Op1->hasOneUse())) { 488 Value *XY = Builder.CreateFAddFMF(X, Y, &I); 489 Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I); 490 return replaceInstUsesWith(I, Exp2); 491 } 492 493 // (X*Y) * X => (X*X) * Y where Y != X 494 // The purpose is two-fold: 495 // 1) to form a power expression (of X). 496 // 2) potentially shorten the critical path: After transformation, the 497 // latency of the instruction Y is amortized by the expression of X*X, 498 // and therefore Y is in a "less critical" position compared to what it 499 // was before the transformation. 500 if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) && 501 Op1 != Y) { 502 Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I); 503 return BinaryOperator::CreateFMulFMF(XX, Y, &I); 504 } 505 if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) && 506 Op0 != Y) { 507 Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I); 508 return BinaryOperator::CreateFMulFMF(XX, Y, &I); 509 } 510 } 511 512 // log2(X * 0.5) * Y = log2(X) * Y - Y 513 if (I.isFast()) { 514 IntrinsicInst *Log2 = nullptr; 515 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>( 516 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) { 517 Log2 = cast<IntrinsicInst>(Op0); 518 Y = Op1; 519 } 520 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>( 521 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) { 522 Log2 = cast<IntrinsicInst>(Op1); 523 Y = Op0; 524 } 525 if (Log2) { 526 Log2->setArgOperand(0, X); 527 Log2->copyFastMathFlags(&I); 528 Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I); 529 return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I); 530 } 531 } 532 533 return nullptr; 534 } 535 536 /// Fold a divide or remainder with a select instruction divisor when one of the 537 /// select operands is zero. In that case, we can use the other select operand 538 /// because div/rem by zero is undefined. 539 bool InstCombiner::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) { 540 SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1)); 541 if (!SI) 542 return false; 543 544 int NonNullOperand; 545 if (match(SI->getTrueValue(), m_Zero())) 546 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y 547 NonNullOperand = 2; 548 else if (match(SI->getFalseValue(), m_Zero())) 549 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y 550 NonNullOperand = 1; 551 else 552 return false; 553 554 // Change the div/rem to use 'Y' instead of the select. 555 I.setOperand(1, SI->getOperand(NonNullOperand)); 556 557 // Okay, we know we replace the operand of the div/rem with 'Y' with no 558 // problem. However, the select, or the condition of the select may have 559 // multiple uses. Based on our knowledge that the operand must be non-zero, 560 // propagate the known value for the select into other uses of it, and 561 // propagate a known value of the condition into its other users. 562 563 // If the select and condition only have a single use, don't bother with this, 564 // early exit. 565 Value *SelectCond = SI->getCondition(); 566 if (SI->use_empty() && SelectCond->hasOneUse()) 567 return true; 568 569 // Scan the current block backward, looking for other uses of SI. 570 BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin(); 571 Type *CondTy = SelectCond->getType(); 572 while (BBI != BBFront) { 573 --BBI; 574 // If we found an instruction that we can't assume will return, so 575 // information from below it cannot be propagated above it. 576 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI)) 577 break; 578 579 // Replace uses of the select or its condition with the known values. 580 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end(); 581 I != E; ++I) { 582 if (*I == SI) { 583 *I = SI->getOperand(NonNullOperand); 584 Worklist.Add(&*BBI); 585 } else if (*I == SelectCond) { 586 *I = NonNullOperand == 1 ? ConstantInt::getTrue(CondTy) 587 : ConstantInt::getFalse(CondTy); 588 Worklist.Add(&*BBI); 589 } 590 } 591 592 // If we past the instruction, quit looking for it. 593 if (&*BBI == SI) 594 SI = nullptr; 595 if (&*BBI == SelectCond) 596 SelectCond = nullptr; 597 598 // If we ran out of things to eliminate, break out of the loop. 599 if (!SelectCond && !SI) 600 break; 601 602 } 603 return true; 604 } 605 606 /// True if the multiply can not be expressed in an int this size. 607 static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product, 608 bool IsSigned) { 609 bool Overflow; 610 Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow); 611 return Overflow; 612 } 613 614 /// True if C1 is a multiple of C2. Quotient contains C1/C2. 615 static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient, 616 bool IsSigned) { 617 assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal"); 618 619 // Bail if we will divide by zero. 620 if (C2.isNullValue()) 621 return false; 622 623 // Bail if we would divide INT_MIN by -1. 624 if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue()) 625 return false; 626 627 APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned); 628 if (IsSigned) 629 APInt::sdivrem(C1, C2, Quotient, Remainder); 630 else 631 APInt::udivrem(C1, C2, Quotient, Remainder); 632 633 return Remainder.isMinValue(); 634 } 635 636 /// This function implements the transforms common to both integer division 637 /// instructions (udiv and sdiv). It is called by the visitors to those integer 638 /// division instructions. 639 /// Common integer divide transforms 640 Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) { 641 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 642 bool IsSigned = I.getOpcode() == Instruction::SDiv; 643 Type *Ty = I.getType(); 644 645 // The RHS is known non-zero. 646 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) { 647 I.setOperand(1, V); 648 return &I; 649 } 650 651 // Handle cases involving: [su]div X, (select Cond, Y, Z) 652 // This does not apply for fdiv. 653 if (simplifyDivRemOfSelectWithZeroOp(I)) 654 return &I; 655 656 const APInt *C2; 657 if (match(Op1, m_APInt(C2))) { 658 Value *X; 659 const APInt *C1; 660 661 // (X / C1) / C2 -> X / (C1*C2) 662 if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) || 663 (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) { 664 APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned); 665 if (!multiplyOverflows(*C1, *C2, Product, IsSigned)) 666 return BinaryOperator::Create(I.getOpcode(), X, 667 ConstantInt::get(Ty, Product)); 668 } 669 670 if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) || 671 (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) { 672 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned); 673 674 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1. 675 if (isMultiple(*C2, *C1, Quotient, IsSigned)) { 676 auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X, 677 ConstantInt::get(Ty, Quotient)); 678 NewDiv->setIsExact(I.isExact()); 679 return NewDiv; 680 } 681 682 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2. 683 if (isMultiple(*C1, *C2, Quotient, IsSigned)) { 684 auto *Mul = BinaryOperator::Create(Instruction::Mul, X, 685 ConstantInt::get(Ty, Quotient)); 686 auto *OBO = cast<OverflowingBinaryOperator>(Op0); 687 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); 688 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); 689 return Mul; 690 } 691 } 692 693 if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) && 694 *C1 != C1->getBitWidth() - 1) || 695 (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) { 696 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned); 697 APInt C1Shifted = APInt::getOneBitSet( 698 C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue())); 699 700 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1. 701 if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) { 702 auto *BO = BinaryOperator::Create(I.getOpcode(), X, 703 ConstantInt::get(Ty, Quotient)); 704 BO->setIsExact(I.isExact()); 705 return BO; 706 } 707 708 // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2. 709 if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) { 710 auto *Mul = BinaryOperator::Create(Instruction::Mul, X, 711 ConstantInt::get(Ty, Quotient)); 712 auto *OBO = cast<OverflowingBinaryOperator>(Op0); 713 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); 714 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); 715 return Mul; 716 } 717 } 718 719 if (!C2->isNullValue()) // avoid X udiv 0 720 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I)) 721 return FoldedDiv; 722 } 723 724 if (match(Op0, m_One())) { 725 assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?"); 726 if (IsSigned) { 727 // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the 728 // result is one, if Op1 is -1 then the result is minus one, otherwise 729 // it's zero. 730 Value *Inc = Builder.CreateAdd(Op1, Op0); 731 Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3)); 732 return SelectInst::Create(Cmp, Op1, ConstantInt::get(Ty, 0)); 733 } else { 734 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the 735 // result is one, otherwise it's zero. 736 return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty); 737 } 738 } 739 740 // See if we can fold away this div instruction. 741 if (SimplifyDemandedInstructionBits(I)) 742 return &I; 743 744 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y 745 Value *X, *Z; 746 if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1 747 if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) || 748 (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1))))) 749 return BinaryOperator::Create(I.getOpcode(), X, Op1); 750 751 // (X << Y) / X -> 1 << Y 752 Value *Y; 753 if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y)))) 754 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y); 755 if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y)))) 756 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y); 757 758 // X / (X * Y) -> 1 / Y if the multiplication does not overflow. 759 if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) { 760 bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap(); 761 bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap(); 762 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) { 763 I.setOperand(0, ConstantInt::get(Ty, 1)); 764 I.setOperand(1, Y); 765 return &I; 766 } 767 } 768 769 return nullptr; 770 } 771 772 static const unsigned MaxDepth = 6; 773 774 namespace { 775 776 using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1, 777 const BinaryOperator &I, 778 InstCombiner &IC); 779 780 /// Used to maintain state for visitUDivOperand(). 781 struct UDivFoldAction { 782 /// Informs visitUDiv() how to fold this operand. This can be zero if this 783 /// action joins two actions together. 784 FoldUDivOperandCb FoldAction; 785 786 /// Which operand to fold. 787 Value *OperandToFold; 788 789 union { 790 /// The instruction returned when FoldAction is invoked. 791 Instruction *FoldResult; 792 793 /// Stores the LHS action index if this action joins two actions together. 794 size_t SelectLHSIdx; 795 }; 796 797 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand) 798 : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {} 799 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS) 800 : FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {} 801 }; 802 803 } // end anonymous namespace 804 805 // X udiv 2^C -> X >> C 806 static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1, 807 const BinaryOperator &I, InstCombiner &IC) { 808 Constant *C1 = getLogBase2(Op0->getType(), cast<Constant>(Op1)); 809 if (!C1) 810 llvm_unreachable("Failed to constant fold udiv -> logbase2"); 811 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, C1); 812 if (I.isExact()) 813 LShr->setIsExact(); 814 return LShr; 815 } 816 817 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) 818 // X udiv (zext (C1 << N)), where C1 is "1<<C2" --> X >> (N+C2) 819 static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I, 820 InstCombiner &IC) { 821 Value *ShiftLeft; 822 if (!match(Op1, m_ZExt(m_Value(ShiftLeft)))) 823 ShiftLeft = Op1; 824 825 Constant *CI; 826 Value *N; 827 if (!match(ShiftLeft, m_Shl(m_Constant(CI), m_Value(N)))) 828 llvm_unreachable("match should never fail here!"); 829 Constant *Log2Base = getLogBase2(N->getType(), CI); 830 if (!Log2Base) 831 llvm_unreachable("getLogBase2 should never fail here!"); 832 N = IC.Builder.CreateAdd(N, Log2Base); 833 if (Op1 != ShiftLeft) 834 N = IC.Builder.CreateZExt(N, Op1->getType()); 835 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N); 836 if (I.isExact()) 837 LShr->setIsExact(); 838 return LShr; 839 } 840 841 // Recursively visits the possible right hand operands of a udiv 842 // instruction, seeing through select instructions, to determine if we can 843 // replace the udiv with something simpler. If we find that an operand is not 844 // able to simplify the udiv, we abort the entire transformation. 845 static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I, 846 SmallVectorImpl<UDivFoldAction> &Actions, 847 unsigned Depth = 0) { 848 // Check to see if this is an unsigned division with an exact power of 2, 849 // if so, convert to a right shift. 850 if (match(Op1, m_Power2())) { 851 Actions.push_back(UDivFoldAction(foldUDivPow2Cst, Op1)); 852 return Actions.size(); 853 } 854 855 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) 856 if (match(Op1, m_Shl(m_Power2(), m_Value())) || 857 match(Op1, m_ZExt(m_Shl(m_Power2(), m_Value())))) { 858 Actions.push_back(UDivFoldAction(foldUDivShl, Op1)); 859 return Actions.size(); 860 } 861 862 // The remaining tests are all recursive, so bail out if we hit the limit. 863 if (Depth++ == MaxDepth) 864 return 0; 865 866 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 867 if (size_t LHSIdx = 868 visitUDivOperand(Op0, SI->getOperand(1), I, Actions, Depth)) 869 if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions, Depth)) { 870 Actions.push_back(UDivFoldAction(nullptr, Op1, LHSIdx - 1)); 871 return Actions.size(); 872 } 873 874 return 0; 875 } 876 877 /// If we have zero-extended operands of an unsigned div or rem, we may be able 878 /// to narrow the operation (sink the zext below the math). 879 static Instruction *narrowUDivURem(BinaryOperator &I, 880 InstCombiner::BuilderTy &Builder) { 881 Instruction::BinaryOps Opcode = I.getOpcode(); 882 Value *N = I.getOperand(0); 883 Value *D = I.getOperand(1); 884 Type *Ty = I.getType(); 885 Value *X, *Y; 886 if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) && 887 X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) { 888 // udiv (zext X), (zext Y) --> zext (udiv X, Y) 889 // urem (zext X), (zext Y) --> zext (urem X, Y) 890 Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y); 891 return new ZExtInst(NarrowOp, Ty); 892 } 893 894 Constant *C; 895 if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) || 896 (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) { 897 // If the constant is the same in the smaller type, use the narrow version. 898 Constant *TruncC = ConstantExpr::getTrunc(C, X->getType()); 899 if (ConstantExpr::getZExt(TruncC, Ty) != C) 900 return nullptr; 901 902 // udiv (zext X), C --> zext (udiv X, C') 903 // urem (zext X), C --> zext (urem X, C') 904 // udiv C, (zext X) --> zext (udiv C', X) 905 // urem C, (zext X) --> zext (urem C', X) 906 Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC) 907 : Builder.CreateBinOp(Opcode, TruncC, X); 908 return new ZExtInst(NarrowOp, Ty); 909 } 910 911 return nullptr; 912 } 913 914 Instruction *InstCombiner::visitUDiv(BinaryOperator &I) { 915 if (Value *V = SimplifyUDivInst(I.getOperand(0), I.getOperand(1), 916 SQ.getWithInstruction(&I))) 917 return replaceInstUsesWith(I, V); 918 919 if (Instruction *X = foldVectorBinop(I)) 920 return X; 921 922 // Handle the integer div common cases 923 if (Instruction *Common = commonIDivTransforms(I)) 924 return Common; 925 926 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 927 Value *X; 928 const APInt *C1, *C2; 929 if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) { 930 // (X lshr C1) udiv C2 --> X udiv (C2 << C1) 931 bool Overflow; 932 APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow); 933 if (!Overflow) { 934 bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value())); 935 BinaryOperator *BO = BinaryOperator::CreateUDiv( 936 X, ConstantInt::get(X->getType(), C2ShlC1)); 937 if (IsExact) 938 BO->setIsExact(); 939 return BO; 940 } 941 } 942 943 // Op0 / C where C is large (negative) --> zext (Op0 >= C) 944 // TODO: Could use isKnownNegative() to handle non-constant values. 945 Type *Ty = I.getType(); 946 if (match(Op1, m_Negative())) { 947 Value *Cmp = Builder.CreateICmpUGE(Op0, Op1); 948 return CastInst::CreateZExtOrBitCast(Cmp, Ty); 949 } 950 // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined) 951 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { 952 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty)); 953 return CastInst::CreateZExtOrBitCast(Cmp, Ty); 954 } 955 956 if (Instruction *NarrowDiv = narrowUDivURem(I, Builder)) 957 return NarrowDiv; 958 959 // If the udiv operands are non-overflowing multiplies with a common operand, 960 // then eliminate the common factor: 961 // (A * B) / (A * X) --> B / X (and commuted variants) 962 // TODO: The code would be reduced if we had m_c_NUWMul pattern matching. 963 // TODO: If -reassociation handled this generally, we could remove this. 964 Value *A, *B; 965 if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) { 966 if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) || 967 match(Op1, m_NUWMul(m_Value(X), m_Specific(A)))) 968 return BinaryOperator::CreateUDiv(B, X); 969 if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) || 970 match(Op1, m_NUWMul(m_Value(X), m_Specific(B)))) 971 return BinaryOperator::CreateUDiv(A, X); 972 } 973 974 // (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...)))) 975 SmallVector<UDivFoldAction, 6> UDivActions; 976 if (visitUDivOperand(Op0, Op1, I, UDivActions)) 977 for (unsigned i = 0, e = UDivActions.size(); i != e; ++i) { 978 FoldUDivOperandCb Action = UDivActions[i].FoldAction; 979 Value *ActionOp1 = UDivActions[i].OperandToFold; 980 Instruction *Inst; 981 if (Action) 982 Inst = Action(Op0, ActionOp1, I, *this); 983 else { 984 // This action joins two actions together. The RHS of this action is 985 // simply the last action we processed, we saved the LHS action index in 986 // the joining action. 987 size_t SelectRHSIdx = i - 1; 988 Value *SelectRHS = UDivActions[SelectRHSIdx].FoldResult; 989 size_t SelectLHSIdx = UDivActions[i].SelectLHSIdx; 990 Value *SelectLHS = UDivActions[SelectLHSIdx].FoldResult; 991 Inst = SelectInst::Create(cast<SelectInst>(ActionOp1)->getCondition(), 992 SelectLHS, SelectRHS); 993 } 994 995 // If this is the last action to process, return it to the InstCombiner. 996 // Otherwise, we insert it before the UDiv and record it so that we may 997 // use it as part of a joining action (i.e., a SelectInst). 998 if (e - i != 1) { 999 Inst->insertBefore(&I); 1000 UDivActions[i].FoldResult = Inst; 1001 } else 1002 return Inst; 1003 } 1004 1005 return nullptr; 1006 } 1007 1008 Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { 1009 if (Value *V = SimplifySDivInst(I.getOperand(0), I.getOperand(1), 1010 SQ.getWithInstruction(&I))) 1011 return replaceInstUsesWith(I, V); 1012 1013 if (Instruction *X = foldVectorBinop(I)) 1014 return X; 1015 1016 // Handle the integer div common cases 1017 if (Instruction *Common = commonIDivTransforms(I)) 1018 return Common; 1019 1020 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1021 Value *X; 1022 // sdiv Op0, -1 --> -Op0 1023 // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined) 1024 if (match(Op1, m_AllOnes()) || 1025 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 1026 return BinaryOperator::CreateNeg(Op0); 1027 1028 // X / INT_MIN --> X == INT_MIN 1029 if (match(Op1, m_SignMask())) 1030 return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), I.getType()); 1031 1032 const APInt *Op1C; 1033 if (match(Op1, m_APInt(Op1C))) { 1034 // sdiv exact X, C --> ashr exact X, log2(C) 1035 if (I.isExact() && Op1C->isNonNegative() && Op1C->isPowerOf2()) { 1036 Value *ShAmt = ConstantInt::get(Op1->getType(), Op1C->exactLogBase2()); 1037 return BinaryOperator::CreateExactAShr(Op0, ShAmt, I.getName()); 1038 } 1039 1040 // If the dividend is sign-extended and the constant divisor is small enough 1041 // to fit in the source type, shrink the division to the narrower type: 1042 // (sext X) sdiv C --> sext (X sdiv C) 1043 Value *Op0Src; 1044 if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) && 1045 Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) { 1046 1047 // In the general case, we need to make sure that the dividend is not the 1048 // minimum signed value because dividing that by -1 is UB. But here, we 1049 // know that the -1 divisor case is already handled above. 1050 1051 Constant *NarrowDivisor = 1052 ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType()); 1053 Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor); 1054 return new SExtInst(NarrowOp, Op0->getType()); 1055 } 1056 1057 // -X / C --> X / -C (if the negation doesn't overflow). 1058 // TODO: This could be enhanced to handle arbitrary vector constants by 1059 // checking if all elements are not the min-signed-val. 1060 if (!Op1C->isMinSignedValue() && 1061 match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) { 1062 Constant *NegC = ConstantInt::get(I.getType(), -(*Op1C)); 1063 Instruction *BO = BinaryOperator::CreateSDiv(X, NegC); 1064 BO->setIsExact(I.isExact()); 1065 return BO; 1066 } 1067 } 1068 1069 // -X / Y --> -(X / Y) 1070 Value *Y; 1071 if (match(&I, m_SDiv(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y)))) 1072 return BinaryOperator::CreateNSWNeg( 1073 Builder.CreateSDiv(X, Y, I.getName(), I.isExact())); 1074 1075 // If the sign bits of both operands are zero (i.e. we can prove they are 1076 // unsigned inputs), turn this into a udiv. 1077 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); 1078 if (MaskedValueIsZero(Op0, Mask, 0, &I)) { 1079 if (MaskedValueIsZero(Op1, Mask, 0, &I)) { 1080 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set 1081 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); 1082 BO->setIsExact(I.isExact()); 1083 return BO; 1084 } 1085 1086 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { 1087 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y) 1088 // Safe because the only negative value (1 << Y) can take on is 1089 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have 1090 // the sign bit set. 1091 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); 1092 BO->setIsExact(I.isExact()); 1093 return BO; 1094 } 1095 } 1096 1097 return nullptr; 1098 } 1099 1100 /// Remove negation and try to convert division into multiplication. 1101 static Instruction *foldFDivConstantDivisor(BinaryOperator &I) { 1102 Constant *C; 1103 if (!match(I.getOperand(1), m_Constant(C))) 1104 return nullptr; 1105 1106 // -X / C --> X / -C 1107 Value *X; 1108 if (match(I.getOperand(0), m_FNeg(m_Value(X)))) 1109 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I); 1110 1111 // If the constant divisor has an exact inverse, this is always safe. If not, 1112 // then we can still create a reciprocal if fast-math-flags allow it and the 1113 // constant is a regular number (not zero, infinite, or denormal). 1114 if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP()))) 1115 return nullptr; 1116 1117 // Disallow denormal constants because we don't know what would happen 1118 // on all targets. 1119 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that 1120 // denorms are flushed? 1121 auto *RecipC = ConstantExpr::getFDiv(ConstantFP::get(I.getType(), 1.0), C); 1122 if (!RecipC->isNormalFP()) 1123 return nullptr; 1124 1125 // X / C --> X * (1 / C) 1126 return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I); 1127 } 1128 1129 /// Remove negation and try to reassociate constant math. 1130 static Instruction *foldFDivConstantDividend(BinaryOperator &I) { 1131 Constant *C; 1132 if (!match(I.getOperand(0), m_Constant(C))) 1133 return nullptr; 1134 1135 // C / -X --> -C / X 1136 Value *X; 1137 if (match(I.getOperand(1), m_FNeg(m_Value(X)))) 1138 return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I); 1139 1140 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal()) 1141 return nullptr; 1142 1143 // Try to reassociate C / X expressions where X includes another constant. 1144 Constant *C2, *NewC = nullptr; 1145 if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) { 1146 // C / (X * C2) --> (C / C2) / X 1147 NewC = ConstantExpr::getFDiv(C, C2); 1148 } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) { 1149 // C / (X / C2) --> (C * C2) / X 1150 NewC = ConstantExpr::getFMul(C, C2); 1151 } 1152 // Disallow denormal constants because we don't know what would happen 1153 // on all targets. 1154 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that 1155 // denorms are flushed? 1156 if (!NewC || !NewC->isNormalFP()) 1157 return nullptr; 1158 1159 return BinaryOperator::CreateFDivFMF(NewC, X, &I); 1160 } 1161 1162 Instruction *InstCombiner::visitFDiv(BinaryOperator &I) { 1163 if (Value *V = SimplifyFDivInst(I.getOperand(0), I.getOperand(1), 1164 I.getFastMathFlags(), 1165 SQ.getWithInstruction(&I))) 1166 return replaceInstUsesWith(I, V); 1167 1168 if (Instruction *X = foldVectorBinop(I)) 1169 return X; 1170 1171 if (Instruction *R = foldFDivConstantDivisor(I)) 1172 return R; 1173 1174 if (Instruction *R = foldFDivConstantDividend(I)) 1175 return R; 1176 1177 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1178 if (isa<Constant>(Op0)) 1179 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 1180 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1181 return R; 1182 1183 if (isa<Constant>(Op1)) 1184 if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) 1185 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1186 return R; 1187 1188 if (I.hasAllowReassoc() && I.hasAllowReciprocal()) { 1189 Value *X, *Y; 1190 if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && 1191 (!isa<Constant>(Y) || !isa<Constant>(Op1))) { 1192 // (X / Y) / Z => X / (Y * Z) 1193 Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I); 1194 return BinaryOperator::CreateFDivFMF(X, YZ, &I); 1195 } 1196 if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && 1197 (!isa<Constant>(Y) || !isa<Constant>(Op0))) { 1198 // Z / (X / Y) => (Y * Z) / X 1199 Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I); 1200 return BinaryOperator::CreateFDivFMF(YZ, X, &I); 1201 } 1202 } 1203 1204 if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) { 1205 // sin(X) / cos(X) -> tan(X) 1206 // cos(X) / sin(X) -> 1/tan(X) (cotangent) 1207 Value *X; 1208 bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) && 1209 match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X))); 1210 bool IsCot = 1211 !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) && 1212 match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X))); 1213 1214 if ((IsTan || IsCot) && hasUnaryFloatFn(&TLI, I.getType(), LibFunc_tan, 1215 LibFunc_tanf, LibFunc_tanl)) { 1216 IRBuilder<> B(&I); 1217 IRBuilder<>::FastMathFlagGuard FMFGuard(B); 1218 B.setFastMathFlags(I.getFastMathFlags()); 1219 AttributeList Attrs = 1220 cast<CallBase>(Op0)->getCalledFunction()->getAttributes(); 1221 Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf, 1222 LibFunc_tanl, B, Attrs); 1223 if (IsCot) 1224 Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res); 1225 return replaceInstUsesWith(I, Res); 1226 } 1227 } 1228 1229 // -X / -Y -> X / Y 1230 Value *X, *Y; 1231 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) { 1232 I.setOperand(0, X); 1233 I.setOperand(1, Y); 1234 return &I; 1235 } 1236 1237 // X / (X * Y) --> 1.0 / Y 1238 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed. 1239 // We can ignore the possibility that X is infinity because INF/INF is NaN. 1240 if (I.hasNoNaNs() && I.hasAllowReassoc() && 1241 match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) { 1242 I.setOperand(0, ConstantFP::get(I.getType(), 1.0)); 1243 I.setOperand(1, Y); 1244 return &I; 1245 } 1246 1247 return nullptr; 1248 } 1249 1250 /// This function implements the transforms common to both integer remainder 1251 /// instructions (urem and srem). It is called by the visitors to those integer 1252 /// remainder instructions. 1253 /// Common integer remainder transforms 1254 Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) { 1255 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1256 1257 // The RHS is known non-zero. 1258 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) { 1259 I.setOperand(1, V); 1260 return &I; 1261 } 1262 1263 // Handle cases involving: rem X, (select Cond, Y, Z) 1264 if (simplifyDivRemOfSelectWithZeroOp(I)) 1265 return &I; 1266 1267 if (isa<Constant>(Op1)) { 1268 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) { 1269 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) { 1270 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1271 return R; 1272 } else if (auto *PN = dyn_cast<PHINode>(Op0I)) { 1273 const APInt *Op1Int; 1274 if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() && 1275 (I.getOpcode() == Instruction::URem || 1276 !Op1Int->isMinSignedValue())) { 1277 // foldOpIntoPhi will speculate instructions to the end of the PHI's 1278 // predecessor blocks, so do this only if we know the srem or urem 1279 // will not fault. 1280 if (Instruction *NV = foldOpIntoPhi(I, PN)) 1281 return NV; 1282 } 1283 } 1284 1285 // See if we can fold away this rem instruction. 1286 if (SimplifyDemandedInstructionBits(I)) 1287 return &I; 1288 } 1289 } 1290 1291 return nullptr; 1292 } 1293 1294 Instruction *InstCombiner::visitURem(BinaryOperator &I) { 1295 if (Value *V = SimplifyURemInst(I.getOperand(0), I.getOperand(1), 1296 SQ.getWithInstruction(&I))) 1297 return replaceInstUsesWith(I, V); 1298 1299 if (Instruction *X = foldVectorBinop(I)) 1300 return X; 1301 1302 if (Instruction *common = commonIRemTransforms(I)) 1303 return common; 1304 1305 if (Instruction *NarrowRem = narrowUDivURem(I, Builder)) 1306 return NarrowRem; 1307 1308 // X urem Y -> X and Y-1, where Y is a power of 2, 1309 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1310 Type *Ty = I.getType(); 1311 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { 1312 Constant *N1 = Constant::getAllOnesValue(Ty); 1313 Value *Add = Builder.CreateAdd(Op1, N1); 1314 return BinaryOperator::CreateAnd(Op0, Add); 1315 } 1316 1317 // 1 urem X -> zext(X != 1) 1318 if (match(Op0, m_One())) 1319 return CastInst::CreateZExtOrBitCast(Builder.CreateICmpNE(Op1, Op0), Ty); 1320 1321 // X urem C -> X < C ? X : X - C, where C >= signbit. 1322 if (match(Op1, m_Negative())) { 1323 Value *Cmp = Builder.CreateICmpULT(Op0, Op1); 1324 Value *Sub = Builder.CreateSub(Op0, Op1); 1325 return SelectInst::Create(Cmp, Op0, Sub); 1326 } 1327 1328 // If the divisor is a sext of a boolean, then the divisor must be max 1329 // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also 1330 // max unsigned value. In that case, the remainder is 0: 1331 // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0 1332 Value *X; 1333 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { 1334 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty)); 1335 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0); 1336 } 1337 1338 return nullptr; 1339 } 1340 1341 Instruction *InstCombiner::visitSRem(BinaryOperator &I) { 1342 if (Value *V = SimplifySRemInst(I.getOperand(0), I.getOperand(1), 1343 SQ.getWithInstruction(&I))) 1344 return replaceInstUsesWith(I, V); 1345 1346 if (Instruction *X = foldVectorBinop(I)) 1347 return X; 1348 1349 // Handle the integer rem common cases 1350 if (Instruction *Common = commonIRemTransforms(I)) 1351 return Common; 1352 1353 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1354 { 1355 const APInt *Y; 1356 // X % -Y -> X % Y 1357 if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue()) { 1358 Worklist.AddValue(I.getOperand(1)); 1359 I.setOperand(1, ConstantInt::get(I.getType(), -*Y)); 1360 return &I; 1361 } 1362 } 1363 1364 // -X srem Y --> -(X srem Y) 1365 Value *X, *Y; 1366 if (match(&I, m_SRem(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y)))) 1367 return BinaryOperator::CreateNSWNeg(Builder.CreateSRem(X, Y)); 1368 1369 // If the sign bits of both operands are zero (i.e. we can prove they are 1370 // unsigned inputs), turn this into a urem. 1371 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); 1372 if (MaskedValueIsZero(Op1, Mask, 0, &I) && 1373 MaskedValueIsZero(Op0, Mask, 0, &I)) { 1374 // X srem Y -> X urem Y, iff X and Y don't have sign bit set 1375 return BinaryOperator::CreateURem(Op0, Op1, I.getName()); 1376 } 1377 1378 // If it's a constant vector, flip any negative values positive. 1379 if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) { 1380 Constant *C = cast<Constant>(Op1); 1381 unsigned VWidth = C->getType()->getVectorNumElements(); 1382 1383 bool hasNegative = false; 1384 bool hasMissing = false; 1385 for (unsigned i = 0; i != VWidth; ++i) { 1386 Constant *Elt = C->getAggregateElement(i); 1387 if (!Elt) { 1388 hasMissing = true; 1389 break; 1390 } 1391 1392 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt)) 1393 if (RHS->isNegative()) 1394 hasNegative = true; 1395 } 1396 1397 if (hasNegative && !hasMissing) { 1398 SmallVector<Constant *, 16> Elts(VWidth); 1399 for (unsigned i = 0; i != VWidth; ++i) { 1400 Elts[i] = C->getAggregateElement(i); // Handle undef, etc. 1401 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) { 1402 if (RHS->isNegative()) 1403 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS)); 1404 } 1405 } 1406 1407 Constant *NewRHSV = ConstantVector::get(Elts); 1408 if (NewRHSV != C) { // Don't loop on -MININT 1409 Worklist.AddValue(I.getOperand(1)); 1410 I.setOperand(1, NewRHSV); 1411 return &I; 1412 } 1413 } 1414 } 1415 1416 return nullptr; 1417 } 1418 1419 Instruction *InstCombiner::visitFRem(BinaryOperator &I) { 1420 if (Value *V = SimplifyFRemInst(I.getOperand(0), I.getOperand(1), 1421 I.getFastMathFlags(), 1422 SQ.getWithInstruction(&I))) 1423 return replaceInstUsesWith(I, V); 1424 1425 if (Instruction *X = foldVectorBinop(I)) 1426 return X; 1427 1428 return nullptr; 1429 } 1430