1 //===- InstCombineMulDivRem.cpp -------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv, 10 // srem, urem, frem. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/IR/BasicBlock.h" 20 #include "llvm/IR/Constant.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/InstrTypes.h" 23 #include "llvm/IR/Instruction.h" 24 #include "llvm/IR/Instructions.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/IR/Intrinsics.h" 27 #include "llvm/IR/Operator.h" 28 #include "llvm/IR/PatternMatch.h" 29 #include "llvm/IR/Type.h" 30 #include "llvm/IR/Value.h" 31 #include "llvm/Support/Casting.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/KnownBits.h" 34 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 35 #include "llvm/Transforms/Utils/BuildLibCalls.h" 36 #include <cassert> 37 #include <cstddef> 38 #include <cstdint> 39 #include <utility> 40 41 using namespace llvm; 42 using namespace PatternMatch; 43 44 #define DEBUG_TYPE "instcombine" 45 46 /// The specific integer value is used in a context where it is known to be 47 /// non-zero. If this allows us to simplify the computation, do so and return 48 /// the new operand, otherwise return null. 49 static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC, 50 Instruction &CxtI) { 51 // If V has multiple uses, then we would have to do more analysis to determine 52 // if this is safe. For example, the use could be in dynamically unreached 53 // code. 54 if (!V->hasOneUse()) return nullptr; 55 56 bool MadeChange = false; 57 58 // ((1 << A) >>u B) --> (1 << (A-B)) 59 // Because V cannot be zero, we know that B is less than A. 60 Value *A = nullptr, *B = nullptr, *One = nullptr; 61 if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) && 62 match(One, m_One())) { 63 A = IC.Builder.CreateSub(A, B); 64 return IC.Builder.CreateShl(One, A); 65 } 66 67 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it 68 // inexact. Similarly for <<. 69 BinaryOperator *I = dyn_cast<BinaryOperator>(V); 70 if (I && I->isLogicalShift() && 71 IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) { 72 // We know that this is an exact/nuw shift and that the input is a 73 // non-zero context as well. 74 if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) { 75 IC.replaceOperand(*I, 0, V2); 76 MadeChange = true; 77 } 78 79 if (I->getOpcode() == Instruction::LShr && !I->isExact()) { 80 I->setIsExact(); 81 MadeChange = true; 82 } 83 84 if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) { 85 I->setHasNoUnsignedWrap(); 86 MadeChange = true; 87 } 88 } 89 90 // TODO: Lots more we could do here: 91 // If V is a phi node, we can call this on each of its operands. 92 // "select cond, X, 0" can simplify to "X". 93 94 return MadeChange ? V : nullptr; 95 } 96 97 /// A helper routine of InstCombiner::visitMul(). 98 /// 99 /// If C is a scalar/fixed width vector of known powers of 2, then this 100 /// function returns a new scalar/fixed width vector obtained from logBase2 101 /// of C. 102 /// Return a null pointer otherwise. 103 static Constant *getLogBase2(Type *Ty, Constant *C) { 104 const APInt *IVal; 105 if (match(C, m_APInt(IVal)) && IVal->isPowerOf2()) 106 return ConstantInt::get(Ty, IVal->logBase2()); 107 108 // FIXME: We can extract pow of 2 of splat constant for scalable vectors. 109 if (!isa<FixedVectorType>(Ty)) 110 return nullptr; 111 112 SmallVector<Constant *, 4> Elts; 113 for (unsigned I = 0, E = cast<FixedVectorType>(Ty)->getNumElements(); I != E; 114 ++I) { 115 Constant *Elt = C->getAggregateElement(I); 116 if (!Elt) 117 return nullptr; 118 if (isa<UndefValue>(Elt)) { 119 Elts.push_back(UndefValue::get(Ty->getScalarType())); 120 continue; 121 } 122 if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2()) 123 return nullptr; 124 Elts.push_back(ConstantInt::get(Ty->getScalarType(), IVal->logBase2())); 125 } 126 127 return ConstantVector::get(Elts); 128 } 129 130 // TODO: This is a specific form of a much more general pattern. 131 // We could detect a select with any binop identity constant, or we 132 // could use SimplifyBinOp to see if either arm of the select reduces. 133 // But that needs to be done carefully and/or while removing potential 134 // reverse canonicalizations as in InstCombiner::foldSelectIntoOp(). 135 static Value *foldMulSelectToNegate(BinaryOperator &I, 136 InstCombiner::BuilderTy &Builder) { 137 Value *Cond, *OtherOp; 138 139 // mul (select Cond, 1, -1), OtherOp --> select Cond, OtherOp, -OtherOp 140 // mul OtherOp, (select Cond, 1, -1) --> select Cond, OtherOp, -OtherOp 141 if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_One(), m_AllOnes())), 142 m_Value(OtherOp)))) 143 return Builder.CreateSelect(Cond, OtherOp, Builder.CreateNeg(OtherOp)); 144 145 // mul (select Cond, -1, 1), OtherOp --> select Cond, -OtherOp, OtherOp 146 // mul OtherOp, (select Cond, -1, 1) --> select Cond, -OtherOp, OtherOp 147 if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_AllOnes(), m_One())), 148 m_Value(OtherOp)))) 149 return Builder.CreateSelect(Cond, Builder.CreateNeg(OtherOp), OtherOp); 150 151 // fmul (select Cond, 1.0, -1.0), OtherOp --> select Cond, OtherOp, -OtherOp 152 // fmul OtherOp, (select Cond, 1.0, -1.0) --> select Cond, OtherOp, -OtherOp 153 if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(1.0), 154 m_SpecificFP(-1.0))), 155 m_Value(OtherOp)))) { 156 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); 157 Builder.setFastMathFlags(I.getFastMathFlags()); 158 return Builder.CreateSelect(Cond, OtherOp, Builder.CreateFNeg(OtherOp)); 159 } 160 161 // fmul (select Cond, -1.0, 1.0), OtherOp --> select Cond, -OtherOp, OtherOp 162 // fmul OtherOp, (select Cond, -1.0, 1.0) --> select Cond, -OtherOp, OtherOp 163 if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(-1.0), 164 m_SpecificFP(1.0))), 165 m_Value(OtherOp)))) { 166 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); 167 Builder.setFastMathFlags(I.getFastMathFlags()); 168 return Builder.CreateSelect(Cond, Builder.CreateFNeg(OtherOp), OtherOp); 169 } 170 171 return nullptr; 172 } 173 174 Instruction *InstCombiner::visitMul(BinaryOperator &I) { 175 if (Value *V = SimplifyMulInst(I.getOperand(0), I.getOperand(1), 176 SQ.getWithInstruction(&I))) 177 return replaceInstUsesWith(I, V); 178 179 if (SimplifyAssociativeOrCommutative(I)) 180 return &I; 181 182 if (Instruction *X = foldVectorBinop(I)) 183 return X; 184 185 if (Value *V = SimplifyUsingDistributiveLaws(I)) 186 return replaceInstUsesWith(I, V); 187 188 // X * -1 == 0 - X 189 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 190 if (match(Op1, m_AllOnes())) { 191 BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName()); 192 if (I.hasNoSignedWrap()) 193 BO->setHasNoSignedWrap(); 194 return BO; 195 } 196 197 // Also allow combining multiply instructions on vectors. 198 { 199 Value *NewOp; 200 Constant *C1, *C2; 201 const APInt *IVal; 202 if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)), 203 m_Constant(C1))) && 204 match(C1, m_APInt(IVal))) { 205 // ((X << C2)*C1) == (X * (C1 << C2)) 206 Constant *Shl = ConstantExpr::getShl(C1, C2); 207 BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0)); 208 BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl); 209 if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap()) 210 BO->setHasNoUnsignedWrap(); 211 if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() && 212 Shl->isNotMinSignedValue()) 213 BO->setHasNoSignedWrap(); 214 return BO; 215 } 216 217 if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) { 218 // Replace X*(2^C) with X << C, where C is either a scalar or a vector. 219 // Note that we need to sanitize undef multipliers to 1, 220 // to avoid introducing poison. 221 Constant *SafeC1 = Constant::replaceUndefsWith( 222 C1, ConstantInt::get(C1->getType()->getScalarType(), 1)); 223 if (Constant *NewCst = getLogBase2(NewOp->getType(), SafeC1)) { 224 BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst); 225 226 if (I.hasNoUnsignedWrap()) 227 Shl->setHasNoUnsignedWrap(); 228 if (I.hasNoSignedWrap()) { 229 const APInt *V; 230 if (match(NewCst, m_APInt(V)) && *V != V->getBitWidth() - 1) 231 Shl->setHasNoSignedWrap(); 232 } 233 234 return Shl; 235 } 236 } 237 } 238 239 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 240 // (Y - X) * (-(2**n)) -> (X - Y) * (2**n), for positive nonzero n 241 // (Y + const) * (-(2**n)) -> (-constY) * (2**n), for positive nonzero n 242 // The "* (2**n)" thus becomes a potential shifting opportunity. 243 { 244 const APInt & Val = CI->getValue(); 245 const APInt &PosVal = Val.abs(); 246 if (Val.isNegative() && PosVal.isPowerOf2()) { 247 Value *X = nullptr, *Y = nullptr; 248 if (Op0->hasOneUse()) { 249 ConstantInt *C1; 250 Value *Sub = nullptr; 251 if (match(Op0, m_Sub(m_Value(Y), m_Value(X)))) 252 Sub = Builder.CreateSub(X, Y, "suba"); 253 else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1)))) 254 Sub = Builder.CreateSub(Builder.CreateNeg(C1), Y, "subc"); 255 if (Sub) 256 return 257 BinaryOperator::CreateMul(Sub, 258 ConstantInt::get(Y->getType(), PosVal)); 259 } 260 } 261 } 262 } 263 264 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I)) 265 return FoldedMul; 266 267 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder)) 268 return replaceInstUsesWith(I, FoldedMul); 269 270 // Simplify mul instructions with a constant RHS. 271 if (isa<Constant>(Op1)) { 272 // Canonicalize (X+C1)*CI -> X*CI+C1*CI. 273 Value *X; 274 Constant *C1; 275 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) { 276 Value *Mul = Builder.CreateMul(C1, Op1); 277 // Only go forward with the transform if C1*CI simplifies to a tidier 278 // constant. 279 if (!match(Mul, m_Mul(m_Value(), m_Value()))) 280 return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul); 281 } 282 } 283 284 // abs(X) * abs(X) -> X * X 285 // nabs(X) * nabs(X) -> X * X 286 if (Op0 == Op1) { 287 Value *X, *Y; 288 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor; 289 if (SPF == SPF_ABS || SPF == SPF_NABS) 290 return BinaryOperator::CreateMul(X, X); 291 } 292 293 // -X * C --> X * -C 294 Value *X, *Y; 295 Constant *Op1C; 296 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C))) 297 return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C)); 298 299 // -X * -Y --> X * Y 300 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) { 301 auto *NewMul = BinaryOperator::CreateMul(X, Y); 302 if (I.hasNoSignedWrap() && 303 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() && 304 cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap()) 305 NewMul->setHasNoSignedWrap(); 306 return NewMul; 307 } 308 309 // -X * Y --> -(X * Y) 310 // X * -Y --> -(X * Y) 311 if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y)))) 312 return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y)); 313 314 // (X / Y) * Y = X - (X % Y) 315 // (X / Y) * -Y = (X % Y) - X 316 { 317 Value *Y = Op1; 318 BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0); 319 if (!Div || (Div->getOpcode() != Instruction::UDiv && 320 Div->getOpcode() != Instruction::SDiv)) { 321 Y = Op0; 322 Div = dyn_cast<BinaryOperator>(Op1); 323 } 324 Value *Neg = dyn_castNegVal(Y); 325 if (Div && Div->hasOneUse() && 326 (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) && 327 (Div->getOpcode() == Instruction::UDiv || 328 Div->getOpcode() == Instruction::SDiv)) { 329 Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1); 330 331 // If the division is exact, X % Y is zero, so we end up with X or -X. 332 if (Div->isExact()) { 333 if (DivOp1 == Y) 334 return replaceInstUsesWith(I, X); 335 return BinaryOperator::CreateNeg(X); 336 } 337 338 auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem 339 : Instruction::SRem; 340 Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1); 341 if (DivOp1 == Y) 342 return BinaryOperator::CreateSub(X, Rem); 343 return BinaryOperator::CreateSub(Rem, X); 344 } 345 } 346 347 /// i1 mul -> i1 and. 348 if (I.getType()->isIntOrIntVectorTy(1)) 349 return BinaryOperator::CreateAnd(Op0, Op1); 350 351 // X*(1 << Y) --> X << Y 352 // (1 << Y)*X --> X << Y 353 { 354 Value *Y; 355 BinaryOperator *BO = nullptr; 356 bool ShlNSW = false; 357 if (match(Op0, m_Shl(m_One(), m_Value(Y)))) { 358 BO = BinaryOperator::CreateShl(Op1, Y); 359 ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap(); 360 } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) { 361 BO = BinaryOperator::CreateShl(Op0, Y); 362 ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap(); 363 } 364 if (BO) { 365 if (I.hasNoUnsignedWrap()) 366 BO->setHasNoUnsignedWrap(); 367 if (I.hasNoSignedWrap() && ShlNSW) 368 BO->setHasNoSignedWrap(); 369 return BO; 370 } 371 } 372 373 // (zext bool X) * (zext bool Y) --> zext (and X, Y) 374 // (sext bool X) * (sext bool Y) --> zext (and X, Y) 375 // Note: -1 * -1 == 1 * 1 == 1 (if the extends match, the result is the same) 376 if (((match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) || 377 (match(Op0, m_SExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) && 378 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() && 379 (Op0->hasOneUse() || Op1->hasOneUse())) { 380 Value *And = Builder.CreateAnd(X, Y, "mulbool"); 381 return CastInst::Create(Instruction::ZExt, And, I.getType()); 382 } 383 // (sext bool X) * (zext bool Y) --> sext (and X, Y) 384 // (zext bool X) * (sext bool Y) --> sext (and X, Y) 385 // Note: -1 * 1 == 1 * -1 == -1 386 if (((match(Op0, m_SExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) || 387 (match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) && 388 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() && 389 (Op0->hasOneUse() || Op1->hasOneUse())) { 390 Value *And = Builder.CreateAnd(X, Y, "mulbool"); 391 return CastInst::Create(Instruction::SExt, And, I.getType()); 392 } 393 394 // (bool X) * Y --> X ? Y : 0 395 // Y * (bool X) --> X ? Y : 0 396 if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 397 return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0)); 398 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 399 return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0)); 400 401 // (lshr X, 31) * Y --> (ashr X, 31) & Y 402 // Y * (lshr X, 31) --> (ashr X, 31) & Y 403 // TODO: We are not checking one-use because the elimination of the multiply 404 // is better for analysis? 405 // TODO: Should we canonicalize to '(X < 0) ? Y : 0' instead? That would be 406 // more similar to what we're doing above. 407 const APInt *C; 408 if (match(Op0, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) 409 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op1); 410 if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) 411 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0); 412 413 if (Instruction *Ext = narrowMathIfNoOverflow(I)) 414 return Ext; 415 416 bool Changed = false; 417 if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) { 418 Changed = true; 419 I.setHasNoSignedWrap(true); 420 } 421 422 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) { 423 Changed = true; 424 I.setHasNoUnsignedWrap(true); 425 } 426 427 return Changed ? &I : nullptr; 428 } 429 430 Instruction *InstCombiner::foldFPSignBitOps(BinaryOperator &I) { 431 BinaryOperator::BinaryOps Opcode = I.getOpcode(); 432 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) && 433 "Expected fmul or fdiv"); 434 435 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 436 Value *X, *Y; 437 438 // -X * -Y --> X * Y 439 // -X / -Y --> X / Y 440 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) 441 return BinaryOperator::CreateWithCopiedFlags(Opcode, X, Y, &I); 442 443 // fabs(X) * fabs(X) -> X * X 444 // fabs(X) / fabs(X) -> X / X 445 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::fabs>(m_Value(X)))) 446 return BinaryOperator::CreateWithCopiedFlags(Opcode, X, X, &I); 447 448 // fabs(X) * fabs(Y) --> fabs(X * Y) 449 // fabs(X) / fabs(Y) --> fabs(X / Y) 450 if (match(Op0, m_Intrinsic<Intrinsic::fabs>(m_Value(X))) && 451 match(Op1, m_Intrinsic<Intrinsic::fabs>(m_Value(Y))) && 452 (Op0->hasOneUse() || Op1->hasOneUse())) { 453 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); 454 Builder.setFastMathFlags(I.getFastMathFlags()); 455 Value *XY = Builder.CreateBinOp(Opcode, X, Y); 456 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, XY); 457 Fabs->takeName(&I); 458 return replaceInstUsesWith(I, Fabs); 459 } 460 461 return nullptr; 462 } 463 464 Instruction *InstCombiner::visitFMul(BinaryOperator &I) { 465 if (Value *V = SimplifyFMulInst(I.getOperand(0), I.getOperand(1), 466 I.getFastMathFlags(), 467 SQ.getWithInstruction(&I))) 468 return replaceInstUsesWith(I, V); 469 470 if (SimplifyAssociativeOrCommutative(I)) 471 return &I; 472 473 if (Instruction *X = foldVectorBinop(I)) 474 return X; 475 476 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I)) 477 return FoldedMul; 478 479 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder)) 480 return replaceInstUsesWith(I, FoldedMul); 481 482 if (Instruction *R = foldFPSignBitOps(I)) 483 return R; 484 485 // X * -1.0 --> -X 486 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 487 if (match(Op1, m_SpecificFP(-1.0))) 488 return UnaryOperator::CreateFNegFMF(Op0, &I); 489 490 // -X * C --> X * -C 491 Value *X, *Y; 492 Constant *C; 493 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C))) 494 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I); 495 496 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E) 497 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1)) 498 return replaceInstUsesWith(I, V); 499 500 if (I.hasAllowReassoc()) { 501 // Reassociate constant RHS with another constant to form constant 502 // expression. 503 if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) { 504 Constant *C1; 505 if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) { 506 // (C1 / X) * C --> (C * C1) / X 507 Constant *CC1 = ConstantExpr::getFMul(C, C1); 508 if (CC1->isNormalFP()) 509 return BinaryOperator::CreateFDivFMF(CC1, X, &I); 510 } 511 if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) { 512 // (X / C1) * C --> X * (C / C1) 513 Constant *CDivC1 = ConstantExpr::getFDiv(C, C1); 514 if (CDivC1->isNormalFP()) 515 return BinaryOperator::CreateFMulFMF(X, CDivC1, &I); 516 517 // If the constant was a denormal, try reassociating differently. 518 // (X / C1) * C --> X / (C1 / C) 519 Constant *C1DivC = ConstantExpr::getFDiv(C1, C); 520 if (Op0->hasOneUse() && C1DivC->isNormalFP()) 521 return BinaryOperator::CreateFDivFMF(X, C1DivC, &I); 522 } 523 524 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are 525 // canonicalized to 'fadd X, C'. Distributing the multiply may allow 526 // further folds and (X * C) + C2 is 'fma'. 527 if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) { 528 // (X + C1) * C --> (X * C) + (C * C1) 529 Constant *CC1 = ConstantExpr::getFMul(C, C1); 530 Value *XC = Builder.CreateFMulFMF(X, C, &I); 531 return BinaryOperator::CreateFAddFMF(XC, CC1, &I); 532 } 533 if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) { 534 // (C1 - X) * C --> (C * C1) - (X * C) 535 Constant *CC1 = ConstantExpr::getFMul(C, C1); 536 Value *XC = Builder.CreateFMulFMF(X, C, &I); 537 return BinaryOperator::CreateFSubFMF(CC1, XC, &I); 538 } 539 } 540 541 Value *Z; 542 if (match(&I, m_c_FMul(m_OneUse(m_FDiv(m_Value(X), m_Value(Y))), 543 m_Value(Z)))) { 544 // Sink division: (X / Y) * Z --> (X * Z) / Y 545 Value *NewFMul = Builder.CreateFMulFMF(X, Z, &I); 546 return BinaryOperator::CreateFDivFMF(NewFMul, Y, &I); 547 } 548 549 // sqrt(X) * sqrt(Y) -> sqrt(X * Y) 550 // nnan disallows the possibility of returning a number if both operands are 551 // negative (in that case, we should return NaN). 552 if (I.hasNoNaNs() && 553 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(X)))) && 554 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) { 555 Value *XY = Builder.CreateFMulFMF(X, Y, &I); 556 Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &I); 557 return replaceInstUsesWith(I, Sqrt); 558 } 559 560 // Like the similar transform in instsimplify, this requires 'nsz' because 561 // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0. 562 if (I.hasNoNaNs() && I.hasNoSignedZeros() && Op0 == Op1 && 563 Op0->hasNUses(2)) { 564 // Peek through fdiv to find squaring of square root: 565 // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y 566 if (match(Op0, m_FDiv(m_Value(X), 567 m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) { 568 Value *XX = Builder.CreateFMulFMF(X, X, &I); 569 return BinaryOperator::CreateFDivFMF(XX, Y, &I); 570 } 571 // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X) 572 if (match(Op0, m_FDiv(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y)), 573 m_Value(X)))) { 574 Value *XX = Builder.CreateFMulFMF(X, X, &I); 575 return BinaryOperator::CreateFDivFMF(Y, XX, &I); 576 } 577 } 578 579 // exp(X) * exp(Y) -> exp(X + Y) 580 // Match as long as at least one of exp has only one use. 581 if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) && 582 match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y))) && 583 (Op0->hasOneUse() || Op1->hasOneUse())) { 584 Value *XY = Builder.CreateFAddFMF(X, Y, &I); 585 Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I); 586 return replaceInstUsesWith(I, Exp); 587 } 588 589 // exp2(X) * exp2(Y) -> exp2(X + Y) 590 // Match as long as at least one of exp2 has only one use. 591 if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) && 592 match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y))) && 593 (Op0->hasOneUse() || Op1->hasOneUse())) { 594 Value *XY = Builder.CreateFAddFMF(X, Y, &I); 595 Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I); 596 return replaceInstUsesWith(I, Exp2); 597 } 598 599 // (X*Y) * X => (X*X) * Y where Y != X 600 // The purpose is two-fold: 601 // 1) to form a power expression (of X). 602 // 2) potentially shorten the critical path: After transformation, the 603 // latency of the instruction Y is amortized by the expression of X*X, 604 // and therefore Y is in a "less critical" position compared to what it 605 // was before the transformation. 606 if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) && 607 Op1 != Y) { 608 Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I); 609 return BinaryOperator::CreateFMulFMF(XX, Y, &I); 610 } 611 if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) && 612 Op0 != Y) { 613 Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I); 614 return BinaryOperator::CreateFMulFMF(XX, Y, &I); 615 } 616 } 617 618 // log2(X * 0.5) * Y = log2(X) * Y - Y 619 if (I.isFast()) { 620 IntrinsicInst *Log2 = nullptr; 621 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>( 622 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) { 623 Log2 = cast<IntrinsicInst>(Op0); 624 Y = Op1; 625 } 626 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>( 627 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) { 628 Log2 = cast<IntrinsicInst>(Op1); 629 Y = Op0; 630 } 631 if (Log2) { 632 Value *Log2 = Builder.CreateUnaryIntrinsic(Intrinsic::log2, X, &I); 633 Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I); 634 return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I); 635 } 636 } 637 638 return nullptr; 639 } 640 641 /// Fold a divide or remainder with a select instruction divisor when one of the 642 /// select operands is zero. In that case, we can use the other select operand 643 /// because div/rem by zero is undefined. 644 bool InstCombiner::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) { 645 SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1)); 646 if (!SI) 647 return false; 648 649 int NonNullOperand; 650 if (match(SI->getTrueValue(), m_Zero())) 651 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y 652 NonNullOperand = 2; 653 else if (match(SI->getFalseValue(), m_Zero())) 654 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y 655 NonNullOperand = 1; 656 else 657 return false; 658 659 // Change the div/rem to use 'Y' instead of the select. 660 replaceOperand(I, 1, SI->getOperand(NonNullOperand)); 661 662 // Okay, we know we replace the operand of the div/rem with 'Y' with no 663 // problem. However, the select, or the condition of the select may have 664 // multiple uses. Based on our knowledge that the operand must be non-zero, 665 // propagate the known value for the select into other uses of it, and 666 // propagate a known value of the condition into its other users. 667 668 // If the select and condition only have a single use, don't bother with this, 669 // early exit. 670 Value *SelectCond = SI->getCondition(); 671 if (SI->use_empty() && SelectCond->hasOneUse()) 672 return true; 673 674 // Scan the current block backward, looking for other uses of SI. 675 BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin(); 676 Type *CondTy = SelectCond->getType(); 677 while (BBI != BBFront) { 678 --BBI; 679 // If we found an instruction that we can't assume will return, so 680 // information from below it cannot be propagated above it. 681 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI)) 682 break; 683 684 // Replace uses of the select or its condition with the known values. 685 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end(); 686 I != E; ++I) { 687 if (*I == SI) { 688 replaceUse(*I, SI->getOperand(NonNullOperand)); 689 Worklist.push(&*BBI); 690 } else if (*I == SelectCond) { 691 replaceUse(*I, NonNullOperand == 1 ? ConstantInt::getTrue(CondTy) 692 : ConstantInt::getFalse(CondTy)); 693 Worklist.push(&*BBI); 694 } 695 } 696 697 // If we past the instruction, quit looking for it. 698 if (&*BBI == SI) 699 SI = nullptr; 700 if (&*BBI == SelectCond) 701 SelectCond = nullptr; 702 703 // If we ran out of things to eliminate, break out of the loop. 704 if (!SelectCond && !SI) 705 break; 706 707 } 708 return true; 709 } 710 711 /// True if the multiply can not be expressed in an int this size. 712 static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product, 713 bool IsSigned) { 714 bool Overflow; 715 Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow); 716 return Overflow; 717 } 718 719 /// True if C1 is a multiple of C2. Quotient contains C1/C2. 720 static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient, 721 bool IsSigned) { 722 assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal"); 723 724 // Bail if we will divide by zero. 725 if (C2.isNullValue()) 726 return false; 727 728 // Bail if we would divide INT_MIN by -1. 729 if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue()) 730 return false; 731 732 APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned); 733 if (IsSigned) 734 APInt::sdivrem(C1, C2, Quotient, Remainder); 735 else 736 APInt::udivrem(C1, C2, Quotient, Remainder); 737 738 return Remainder.isMinValue(); 739 } 740 741 /// This function implements the transforms common to both integer division 742 /// instructions (udiv and sdiv). It is called by the visitors to those integer 743 /// division instructions. 744 /// Common integer divide transforms 745 Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) { 746 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 747 bool IsSigned = I.getOpcode() == Instruction::SDiv; 748 Type *Ty = I.getType(); 749 750 // The RHS is known non-zero. 751 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) 752 return replaceOperand(I, 1, V); 753 754 // Handle cases involving: [su]div X, (select Cond, Y, Z) 755 // This does not apply for fdiv. 756 if (simplifyDivRemOfSelectWithZeroOp(I)) 757 return &I; 758 759 const APInt *C2; 760 if (match(Op1, m_APInt(C2))) { 761 Value *X; 762 const APInt *C1; 763 764 // (X / C1) / C2 -> X / (C1*C2) 765 if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) || 766 (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) { 767 APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned); 768 if (!multiplyOverflows(*C1, *C2, Product, IsSigned)) 769 return BinaryOperator::Create(I.getOpcode(), X, 770 ConstantInt::get(Ty, Product)); 771 } 772 773 if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) || 774 (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) { 775 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned); 776 777 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1. 778 if (isMultiple(*C2, *C1, Quotient, IsSigned)) { 779 auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X, 780 ConstantInt::get(Ty, Quotient)); 781 NewDiv->setIsExact(I.isExact()); 782 return NewDiv; 783 } 784 785 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2. 786 if (isMultiple(*C1, *C2, Quotient, IsSigned)) { 787 auto *Mul = BinaryOperator::Create(Instruction::Mul, X, 788 ConstantInt::get(Ty, Quotient)); 789 auto *OBO = cast<OverflowingBinaryOperator>(Op0); 790 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); 791 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); 792 return Mul; 793 } 794 } 795 796 if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) && 797 *C1 != C1->getBitWidth() - 1) || 798 (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) { 799 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned); 800 APInt C1Shifted = APInt::getOneBitSet( 801 C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue())); 802 803 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1. 804 if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) { 805 auto *BO = BinaryOperator::Create(I.getOpcode(), X, 806 ConstantInt::get(Ty, Quotient)); 807 BO->setIsExact(I.isExact()); 808 return BO; 809 } 810 811 // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2. 812 if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) { 813 auto *Mul = BinaryOperator::Create(Instruction::Mul, X, 814 ConstantInt::get(Ty, Quotient)); 815 auto *OBO = cast<OverflowingBinaryOperator>(Op0); 816 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); 817 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); 818 return Mul; 819 } 820 } 821 822 if (!C2->isNullValue()) // avoid X udiv 0 823 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I)) 824 return FoldedDiv; 825 } 826 827 if (match(Op0, m_One())) { 828 assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?"); 829 if (IsSigned) { 830 // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the 831 // result is one, if Op1 is -1 then the result is minus one, otherwise 832 // it's zero. 833 Value *Inc = Builder.CreateAdd(Op1, Op0); 834 Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3)); 835 return SelectInst::Create(Cmp, Op1, ConstantInt::get(Ty, 0)); 836 } else { 837 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the 838 // result is one, otherwise it's zero. 839 return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty); 840 } 841 } 842 843 // See if we can fold away this div instruction. 844 if (SimplifyDemandedInstructionBits(I)) 845 return &I; 846 847 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y 848 Value *X, *Z; 849 if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1 850 if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) || 851 (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1))))) 852 return BinaryOperator::Create(I.getOpcode(), X, Op1); 853 854 // (X << Y) / X -> 1 << Y 855 Value *Y; 856 if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y)))) 857 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y); 858 if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y)))) 859 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y); 860 861 // X / (X * Y) -> 1 / Y if the multiplication does not overflow. 862 if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) { 863 bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap(); 864 bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap(); 865 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) { 866 replaceOperand(I, 0, ConstantInt::get(Ty, 1)); 867 replaceOperand(I, 1, Y); 868 return &I; 869 } 870 } 871 872 return nullptr; 873 } 874 875 static const unsigned MaxDepth = 6; 876 877 namespace { 878 879 using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1, 880 const BinaryOperator &I, 881 InstCombiner &IC); 882 883 /// Used to maintain state for visitUDivOperand(). 884 struct UDivFoldAction { 885 /// Informs visitUDiv() how to fold this operand. This can be zero if this 886 /// action joins two actions together. 887 FoldUDivOperandCb FoldAction; 888 889 /// Which operand to fold. 890 Value *OperandToFold; 891 892 union { 893 /// The instruction returned when FoldAction is invoked. 894 Instruction *FoldResult; 895 896 /// Stores the LHS action index if this action joins two actions together. 897 size_t SelectLHSIdx; 898 }; 899 900 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand) 901 : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {} 902 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS) 903 : FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {} 904 }; 905 906 } // end anonymous namespace 907 908 // X udiv 2^C -> X >> C 909 static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1, 910 const BinaryOperator &I, InstCombiner &IC) { 911 Constant *C1 = getLogBase2(Op0->getType(), cast<Constant>(Op1)); 912 if (!C1) 913 llvm_unreachable("Failed to constant fold udiv -> logbase2"); 914 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, C1); 915 if (I.isExact()) 916 LShr->setIsExact(); 917 return LShr; 918 } 919 920 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) 921 // X udiv (zext (C1 << N)), where C1 is "1<<C2" --> X >> (N+C2) 922 static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I, 923 InstCombiner &IC) { 924 Value *ShiftLeft; 925 if (!match(Op1, m_ZExt(m_Value(ShiftLeft)))) 926 ShiftLeft = Op1; 927 928 Constant *CI; 929 Value *N; 930 if (!match(ShiftLeft, m_Shl(m_Constant(CI), m_Value(N)))) 931 llvm_unreachable("match should never fail here!"); 932 Constant *Log2Base = getLogBase2(N->getType(), CI); 933 if (!Log2Base) 934 llvm_unreachable("getLogBase2 should never fail here!"); 935 N = IC.Builder.CreateAdd(N, Log2Base); 936 if (Op1 != ShiftLeft) 937 N = IC.Builder.CreateZExt(N, Op1->getType()); 938 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N); 939 if (I.isExact()) 940 LShr->setIsExact(); 941 return LShr; 942 } 943 944 // Recursively visits the possible right hand operands of a udiv 945 // instruction, seeing through select instructions, to determine if we can 946 // replace the udiv with something simpler. If we find that an operand is not 947 // able to simplify the udiv, we abort the entire transformation. 948 static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I, 949 SmallVectorImpl<UDivFoldAction> &Actions, 950 unsigned Depth = 0) { 951 // Check to see if this is an unsigned division with an exact power of 2, 952 // if so, convert to a right shift. 953 if (match(Op1, m_Power2())) { 954 Actions.push_back(UDivFoldAction(foldUDivPow2Cst, Op1)); 955 return Actions.size(); 956 } 957 958 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) 959 if (match(Op1, m_Shl(m_Power2(), m_Value())) || 960 match(Op1, m_ZExt(m_Shl(m_Power2(), m_Value())))) { 961 Actions.push_back(UDivFoldAction(foldUDivShl, Op1)); 962 return Actions.size(); 963 } 964 965 // The remaining tests are all recursive, so bail out if we hit the limit. 966 if (Depth++ == MaxDepth) 967 return 0; 968 969 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 970 if (size_t LHSIdx = 971 visitUDivOperand(Op0, SI->getOperand(1), I, Actions, Depth)) 972 if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions, Depth)) { 973 Actions.push_back(UDivFoldAction(nullptr, Op1, LHSIdx - 1)); 974 return Actions.size(); 975 } 976 977 return 0; 978 } 979 980 /// If we have zero-extended operands of an unsigned div or rem, we may be able 981 /// to narrow the operation (sink the zext below the math). 982 static Instruction *narrowUDivURem(BinaryOperator &I, 983 InstCombiner::BuilderTy &Builder) { 984 Instruction::BinaryOps Opcode = I.getOpcode(); 985 Value *N = I.getOperand(0); 986 Value *D = I.getOperand(1); 987 Type *Ty = I.getType(); 988 Value *X, *Y; 989 if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) && 990 X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) { 991 // udiv (zext X), (zext Y) --> zext (udiv X, Y) 992 // urem (zext X), (zext Y) --> zext (urem X, Y) 993 Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y); 994 return new ZExtInst(NarrowOp, Ty); 995 } 996 997 Constant *C; 998 if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) || 999 (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) { 1000 // If the constant is the same in the smaller type, use the narrow version. 1001 Constant *TruncC = ConstantExpr::getTrunc(C, X->getType()); 1002 if (ConstantExpr::getZExt(TruncC, Ty) != C) 1003 return nullptr; 1004 1005 // udiv (zext X), C --> zext (udiv X, C') 1006 // urem (zext X), C --> zext (urem X, C') 1007 // udiv C, (zext X) --> zext (udiv C', X) 1008 // urem C, (zext X) --> zext (urem C', X) 1009 Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC) 1010 : Builder.CreateBinOp(Opcode, TruncC, X); 1011 return new ZExtInst(NarrowOp, Ty); 1012 } 1013 1014 return nullptr; 1015 } 1016 1017 Instruction *InstCombiner::visitUDiv(BinaryOperator &I) { 1018 if (Value *V = SimplifyUDivInst(I.getOperand(0), I.getOperand(1), 1019 SQ.getWithInstruction(&I))) 1020 return replaceInstUsesWith(I, V); 1021 1022 if (Instruction *X = foldVectorBinop(I)) 1023 return X; 1024 1025 // Handle the integer div common cases 1026 if (Instruction *Common = commonIDivTransforms(I)) 1027 return Common; 1028 1029 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1030 Value *X; 1031 const APInt *C1, *C2; 1032 if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) { 1033 // (X lshr C1) udiv C2 --> X udiv (C2 << C1) 1034 bool Overflow; 1035 APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow); 1036 if (!Overflow) { 1037 bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value())); 1038 BinaryOperator *BO = BinaryOperator::CreateUDiv( 1039 X, ConstantInt::get(X->getType(), C2ShlC1)); 1040 if (IsExact) 1041 BO->setIsExact(); 1042 return BO; 1043 } 1044 } 1045 1046 // Op0 / C where C is large (negative) --> zext (Op0 >= C) 1047 // TODO: Could use isKnownNegative() to handle non-constant values. 1048 Type *Ty = I.getType(); 1049 if (match(Op1, m_Negative())) { 1050 Value *Cmp = Builder.CreateICmpUGE(Op0, Op1); 1051 return CastInst::CreateZExtOrBitCast(Cmp, Ty); 1052 } 1053 // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined) 1054 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { 1055 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty)); 1056 return CastInst::CreateZExtOrBitCast(Cmp, Ty); 1057 } 1058 1059 if (Instruction *NarrowDiv = narrowUDivURem(I, Builder)) 1060 return NarrowDiv; 1061 1062 // If the udiv operands are non-overflowing multiplies with a common operand, 1063 // then eliminate the common factor: 1064 // (A * B) / (A * X) --> B / X (and commuted variants) 1065 // TODO: The code would be reduced if we had m_c_NUWMul pattern matching. 1066 // TODO: If -reassociation handled this generally, we could remove this. 1067 Value *A, *B; 1068 if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) { 1069 if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) || 1070 match(Op1, m_NUWMul(m_Value(X), m_Specific(A)))) 1071 return BinaryOperator::CreateUDiv(B, X); 1072 if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) || 1073 match(Op1, m_NUWMul(m_Value(X), m_Specific(B)))) 1074 return BinaryOperator::CreateUDiv(A, X); 1075 } 1076 1077 // (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...)))) 1078 SmallVector<UDivFoldAction, 6> UDivActions; 1079 if (visitUDivOperand(Op0, Op1, I, UDivActions)) 1080 for (unsigned i = 0, e = UDivActions.size(); i != e; ++i) { 1081 FoldUDivOperandCb Action = UDivActions[i].FoldAction; 1082 Value *ActionOp1 = UDivActions[i].OperandToFold; 1083 Instruction *Inst; 1084 if (Action) 1085 Inst = Action(Op0, ActionOp1, I, *this); 1086 else { 1087 // This action joins two actions together. The RHS of this action is 1088 // simply the last action we processed, we saved the LHS action index in 1089 // the joining action. 1090 size_t SelectRHSIdx = i - 1; 1091 Value *SelectRHS = UDivActions[SelectRHSIdx].FoldResult; 1092 size_t SelectLHSIdx = UDivActions[i].SelectLHSIdx; 1093 Value *SelectLHS = UDivActions[SelectLHSIdx].FoldResult; 1094 Inst = SelectInst::Create(cast<SelectInst>(ActionOp1)->getCondition(), 1095 SelectLHS, SelectRHS); 1096 } 1097 1098 // If this is the last action to process, return it to the InstCombiner. 1099 // Otherwise, we insert it before the UDiv and record it so that we may 1100 // use it as part of a joining action (i.e., a SelectInst). 1101 if (e - i != 1) { 1102 Inst->insertBefore(&I); 1103 UDivActions[i].FoldResult = Inst; 1104 } else 1105 return Inst; 1106 } 1107 1108 return nullptr; 1109 } 1110 1111 Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { 1112 if (Value *V = SimplifySDivInst(I.getOperand(0), I.getOperand(1), 1113 SQ.getWithInstruction(&I))) 1114 return replaceInstUsesWith(I, V); 1115 1116 if (Instruction *X = foldVectorBinop(I)) 1117 return X; 1118 1119 // Handle the integer div common cases 1120 if (Instruction *Common = commonIDivTransforms(I)) 1121 return Common; 1122 1123 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1124 Value *X; 1125 // sdiv Op0, -1 --> -Op0 1126 // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined) 1127 if (match(Op1, m_AllOnes()) || 1128 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 1129 return BinaryOperator::CreateNeg(Op0); 1130 1131 // X / INT_MIN --> X == INT_MIN 1132 if (match(Op1, m_SignMask())) 1133 return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), I.getType()); 1134 1135 const APInt *Op1C; 1136 if (match(Op1, m_APInt(Op1C))) { 1137 // sdiv exact X, C --> ashr exact X, log2(C) 1138 if (I.isExact() && Op1C->isNonNegative() && Op1C->isPowerOf2()) { 1139 Value *ShAmt = ConstantInt::get(Op1->getType(), Op1C->exactLogBase2()); 1140 return BinaryOperator::CreateExactAShr(Op0, ShAmt, I.getName()); 1141 } 1142 1143 // If the dividend is sign-extended and the constant divisor is small enough 1144 // to fit in the source type, shrink the division to the narrower type: 1145 // (sext X) sdiv C --> sext (X sdiv C) 1146 Value *Op0Src; 1147 if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) && 1148 Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) { 1149 1150 // In the general case, we need to make sure that the dividend is not the 1151 // minimum signed value because dividing that by -1 is UB. But here, we 1152 // know that the -1 divisor case is already handled above. 1153 1154 Constant *NarrowDivisor = 1155 ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType()); 1156 Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor); 1157 return new SExtInst(NarrowOp, Op0->getType()); 1158 } 1159 1160 // -X / C --> X / -C (if the negation doesn't overflow). 1161 // TODO: This could be enhanced to handle arbitrary vector constants by 1162 // checking if all elements are not the min-signed-val. 1163 if (!Op1C->isMinSignedValue() && 1164 match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) { 1165 Constant *NegC = ConstantInt::get(I.getType(), -(*Op1C)); 1166 Instruction *BO = BinaryOperator::CreateSDiv(X, NegC); 1167 BO->setIsExact(I.isExact()); 1168 return BO; 1169 } 1170 } 1171 1172 // -X / Y --> -(X / Y) 1173 Value *Y; 1174 if (match(&I, m_SDiv(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y)))) 1175 return BinaryOperator::CreateNSWNeg( 1176 Builder.CreateSDiv(X, Y, I.getName(), I.isExact())); 1177 1178 // If the sign bits of both operands are zero (i.e. we can prove they are 1179 // unsigned inputs), turn this into a udiv. 1180 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); 1181 if (MaskedValueIsZero(Op0, Mask, 0, &I)) { 1182 if (MaskedValueIsZero(Op1, Mask, 0, &I)) { 1183 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set 1184 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); 1185 BO->setIsExact(I.isExact()); 1186 return BO; 1187 } 1188 1189 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { 1190 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y) 1191 // Safe because the only negative value (1 << Y) can take on is 1192 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have 1193 // the sign bit set. 1194 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); 1195 BO->setIsExact(I.isExact()); 1196 return BO; 1197 } 1198 } 1199 1200 return nullptr; 1201 } 1202 1203 /// Remove negation and try to convert division into multiplication. 1204 static Instruction *foldFDivConstantDivisor(BinaryOperator &I) { 1205 Constant *C; 1206 if (!match(I.getOperand(1), m_Constant(C))) 1207 return nullptr; 1208 1209 // -X / C --> X / -C 1210 Value *X; 1211 if (match(I.getOperand(0), m_FNeg(m_Value(X)))) 1212 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I); 1213 1214 // If the constant divisor has an exact inverse, this is always safe. If not, 1215 // then we can still create a reciprocal if fast-math-flags allow it and the 1216 // constant is a regular number (not zero, infinite, or denormal). 1217 if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP()))) 1218 return nullptr; 1219 1220 // Disallow denormal constants because we don't know what would happen 1221 // on all targets. 1222 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that 1223 // denorms are flushed? 1224 auto *RecipC = ConstantExpr::getFDiv(ConstantFP::get(I.getType(), 1.0), C); 1225 if (!RecipC->isNormalFP()) 1226 return nullptr; 1227 1228 // X / C --> X * (1 / C) 1229 return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I); 1230 } 1231 1232 /// Remove negation and try to reassociate constant math. 1233 static Instruction *foldFDivConstantDividend(BinaryOperator &I) { 1234 Constant *C; 1235 if (!match(I.getOperand(0), m_Constant(C))) 1236 return nullptr; 1237 1238 // C / -X --> -C / X 1239 Value *X; 1240 if (match(I.getOperand(1), m_FNeg(m_Value(X)))) 1241 return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I); 1242 1243 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal()) 1244 return nullptr; 1245 1246 // Try to reassociate C / X expressions where X includes another constant. 1247 Constant *C2, *NewC = nullptr; 1248 if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) { 1249 // C / (X * C2) --> (C / C2) / X 1250 NewC = ConstantExpr::getFDiv(C, C2); 1251 } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) { 1252 // C / (X / C2) --> (C * C2) / X 1253 NewC = ConstantExpr::getFMul(C, C2); 1254 } 1255 // Disallow denormal constants because we don't know what would happen 1256 // on all targets. 1257 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that 1258 // denorms are flushed? 1259 if (!NewC || !NewC->isNormalFP()) 1260 return nullptr; 1261 1262 return BinaryOperator::CreateFDivFMF(NewC, X, &I); 1263 } 1264 1265 Instruction *InstCombiner::visitFDiv(BinaryOperator &I) { 1266 if (Value *V = SimplifyFDivInst(I.getOperand(0), I.getOperand(1), 1267 I.getFastMathFlags(), 1268 SQ.getWithInstruction(&I))) 1269 return replaceInstUsesWith(I, V); 1270 1271 if (Instruction *X = foldVectorBinop(I)) 1272 return X; 1273 1274 if (Instruction *R = foldFDivConstantDivisor(I)) 1275 return R; 1276 1277 if (Instruction *R = foldFDivConstantDividend(I)) 1278 return R; 1279 1280 if (Instruction *R = foldFPSignBitOps(I)) 1281 return R; 1282 1283 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1284 if (isa<Constant>(Op0)) 1285 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 1286 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1287 return R; 1288 1289 if (isa<Constant>(Op1)) 1290 if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) 1291 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1292 return R; 1293 1294 if (I.hasAllowReassoc() && I.hasAllowReciprocal()) { 1295 Value *X, *Y; 1296 if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && 1297 (!isa<Constant>(Y) || !isa<Constant>(Op1))) { 1298 // (X / Y) / Z => X / (Y * Z) 1299 Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I); 1300 return BinaryOperator::CreateFDivFMF(X, YZ, &I); 1301 } 1302 if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && 1303 (!isa<Constant>(Y) || !isa<Constant>(Op0))) { 1304 // Z / (X / Y) => (Y * Z) / X 1305 Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I); 1306 return BinaryOperator::CreateFDivFMF(YZ, X, &I); 1307 } 1308 // Z / (1.0 / Y) => (Y * Z) 1309 // 1310 // This is a special case of Z / (X / Y) => (Y * Z) / X, with X = 1.0. The 1311 // m_OneUse check is avoided because even in the case of the multiple uses 1312 // for 1.0/Y, the number of instructions remain the same and a division is 1313 // replaced by a multiplication. 1314 if (match(Op1, m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) 1315 return BinaryOperator::CreateFMulFMF(Y, Op0, &I); 1316 } 1317 1318 if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) { 1319 // sin(X) / cos(X) -> tan(X) 1320 // cos(X) / sin(X) -> 1/tan(X) (cotangent) 1321 Value *X; 1322 bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) && 1323 match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X))); 1324 bool IsCot = 1325 !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) && 1326 match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X))); 1327 1328 if ((IsTan || IsCot) && 1329 hasFloatFn(&TLI, I.getType(), LibFunc_tan, LibFunc_tanf, LibFunc_tanl)) { 1330 IRBuilder<> B(&I); 1331 IRBuilder<>::FastMathFlagGuard FMFGuard(B); 1332 B.setFastMathFlags(I.getFastMathFlags()); 1333 AttributeList Attrs = 1334 cast<CallBase>(Op0)->getCalledFunction()->getAttributes(); 1335 Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf, 1336 LibFunc_tanl, B, Attrs); 1337 if (IsCot) 1338 Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res); 1339 return replaceInstUsesWith(I, Res); 1340 } 1341 } 1342 1343 // X / (X * Y) --> 1.0 / Y 1344 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed. 1345 // We can ignore the possibility that X is infinity because INF/INF is NaN. 1346 Value *X, *Y; 1347 if (I.hasNoNaNs() && I.hasAllowReassoc() && 1348 match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) { 1349 replaceOperand(I, 0, ConstantFP::get(I.getType(), 1.0)); 1350 replaceOperand(I, 1, Y); 1351 return &I; 1352 } 1353 1354 // X / fabs(X) -> copysign(1.0, X) 1355 // fabs(X) / X -> copysign(1.0, X) 1356 if (I.hasNoNaNs() && I.hasNoInfs() && 1357 (match(&I, 1358 m_FDiv(m_Value(X), m_Intrinsic<Intrinsic::fabs>(m_Deferred(X)))) || 1359 match(&I, m_FDiv(m_Intrinsic<Intrinsic::fabs>(m_Value(X)), 1360 m_Deferred(X))))) { 1361 Value *V = Builder.CreateBinaryIntrinsic( 1362 Intrinsic::copysign, ConstantFP::get(I.getType(), 1.0), X, &I); 1363 return replaceInstUsesWith(I, V); 1364 } 1365 return nullptr; 1366 } 1367 1368 /// This function implements the transforms common to both integer remainder 1369 /// instructions (urem and srem). It is called by the visitors to those integer 1370 /// remainder instructions. 1371 /// Common integer remainder transforms 1372 Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) { 1373 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1374 1375 // The RHS is known non-zero. 1376 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) 1377 return replaceOperand(I, 1, V); 1378 1379 // Handle cases involving: rem X, (select Cond, Y, Z) 1380 if (simplifyDivRemOfSelectWithZeroOp(I)) 1381 return &I; 1382 1383 if (isa<Constant>(Op1)) { 1384 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) { 1385 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) { 1386 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1387 return R; 1388 } else if (auto *PN = dyn_cast<PHINode>(Op0I)) { 1389 const APInt *Op1Int; 1390 if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() && 1391 (I.getOpcode() == Instruction::URem || 1392 !Op1Int->isMinSignedValue())) { 1393 // foldOpIntoPhi will speculate instructions to the end of the PHI's 1394 // predecessor blocks, so do this only if we know the srem or urem 1395 // will not fault. 1396 if (Instruction *NV = foldOpIntoPhi(I, PN)) 1397 return NV; 1398 } 1399 } 1400 1401 // See if we can fold away this rem instruction. 1402 if (SimplifyDemandedInstructionBits(I)) 1403 return &I; 1404 } 1405 } 1406 1407 return nullptr; 1408 } 1409 1410 Instruction *InstCombiner::visitURem(BinaryOperator &I) { 1411 if (Value *V = SimplifyURemInst(I.getOperand(0), I.getOperand(1), 1412 SQ.getWithInstruction(&I))) 1413 return replaceInstUsesWith(I, V); 1414 1415 if (Instruction *X = foldVectorBinop(I)) 1416 return X; 1417 1418 if (Instruction *common = commonIRemTransforms(I)) 1419 return common; 1420 1421 if (Instruction *NarrowRem = narrowUDivURem(I, Builder)) 1422 return NarrowRem; 1423 1424 // X urem Y -> X and Y-1, where Y is a power of 2, 1425 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1426 Type *Ty = I.getType(); 1427 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { 1428 // This may increase instruction count, we don't enforce that Y is a 1429 // constant. 1430 Constant *N1 = Constant::getAllOnesValue(Ty); 1431 Value *Add = Builder.CreateAdd(Op1, N1); 1432 return BinaryOperator::CreateAnd(Op0, Add); 1433 } 1434 1435 // 1 urem X -> zext(X != 1) 1436 if (match(Op0, m_One())) { 1437 Value *Cmp = Builder.CreateICmpNE(Op1, ConstantInt::get(Ty, 1)); 1438 return CastInst::CreateZExtOrBitCast(Cmp, Ty); 1439 } 1440 1441 // X urem C -> X < C ? X : X - C, where C >= signbit. 1442 if (match(Op1, m_Negative())) { 1443 Value *Cmp = Builder.CreateICmpULT(Op0, Op1); 1444 Value *Sub = Builder.CreateSub(Op0, Op1); 1445 return SelectInst::Create(Cmp, Op0, Sub); 1446 } 1447 1448 // If the divisor is a sext of a boolean, then the divisor must be max 1449 // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also 1450 // max unsigned value. In that case, the remainder is 0: 1451 // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0 1452 Value *X; 1453 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { 1454 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty)); 1455 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0); 1456 } 1457 1458 return nullptr; 1459 } 1460 1461 Instruction *InstCombiner::visitSRem(BinaryOperator &I) { 1462 if (Value *V = SimplifySRemInst(I.getOperand(0), I.getOperand(1), 1463 SQ.getWithInstruction(&I))) 1464 return replaceInstUsesWith(I, V); 1465 1466 if (Instruction *X = foldVectorBinop(I)) 1467 return X; 1468 1469 // Handle the integer rem common cases 1470 if (Instruction *Common = commonIRemTransforms(I)) 1471 return Common; 1472 1473 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1474 { 1475 const APInt *Y; 1476 // X % -Y -> X % Y 1477 if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue()) 1478 return replaceOperand(I, 1, ConstantInt::get(I.getType(), -*Y)); 1479 } 1480 1481 // -X srem Y --> -(X srem Y) 1482 Value *X, *Y; 1483 if (match(&I, m_SRem(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y)))) 1484 return BinaryOperator::CreateNSWNeg(Builder.CreateSRem(X, Y)); 1485 1486 // If the sign bits of both operands are zero (i.e. we can prove they are 1487 // unsigned inputs), turn this into a urem. 1488 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); 1489 if (MaskedValueIsZero(Op1, Mask, 0, &I) && 1490 MaskedValueIsZero(Op0, Mask, 0, &I)) { 1491 // X srem Y -> X urem Y, iff X and Y don't have sign bit set 1492 return BinaryOperator::CreateURem(Op0, Op1, I.getName()); 1493 } 1494 1495 // If it's a constant vector, flip any negative values positive. 1496 if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) { 1497 Constant *C = cast<Constant>(Op1); 1498 unsigned VWidth = cast<VectorType>(C->getType())->getNumElements(); 1499 1500 bool hasNegative = false; 1501 bool hasMissing = false; 1502 for (unsigned i = 0; i != VWidth; ++i) { 1503 Constant *Elt = C->getAggregateElement(i); 1504 if (!Elt) { 1505 hasMissing = true; 1506 break; 1507 } 1508 1509 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt)) 1510 if (RHS->isNegative()) 1511 hasNegative = true; 1512 } 1513 1514 if (hasNegative && !hasMissing) { 1515 SmallVector<Constant *, 16> Elts(VWidth); 1516 for (unsigned i = 0; i != VWidth; ++i) { 1517 Elts[i] = C->getAggregateElement(i); // Handle undef, etc. 1518 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) { 1519 if (RHS->isNegative()) 1520 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS)); 1521 } 1522 } 1523 1524 Constant *NewRHSV = ConstantVector::get(Elts); 1525 if (NewRHSV != C) // Don't loop on -MININT 1526 return replaceOperand(I, 1, NewRHSV); 1527 } 1528 } 1529 1530 return nullptr; 1531 } 1532 1533 Instruction *InstCombiner::visitFRem(BinaryOperator &I) { 1534 if (Value *V = SimplifyFRemInst(I.getOperand(0), I.getOperand(1), 1535 I.getFastMathFlags(), 1536 SQ.getWithInstruction(&I))) 1537 return replaceInstUsesWith(I, V); 1538 1539 if (Instruction *X = foldVectorBinop(I)) 1540 return X; 1541 1542 return nullptr; 1543 } 1544