1 //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for add, fadd, sub, and fsub.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/InstrTypes.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/AlignOf.h"
30 #include "llvm/Support/Casting.h"
31 #include "llvm/Support/KnownBits.h"
32 #include "llvm/Transforms/InstCombine/InstCombiner.h"
33 #include <cassert>
34 #include <utility>
35
36 using namespace llvm;
37 using namespace PatternMatch;
38
39 #define DEBUG_TYPE "instcombine"
40
41 namespace {
42
43 /// Class representing coefficient of floating-point addend.
44 /// This class needs to be highly efficient, which is especially true for
45 /// the constructor. As of I write this comment, the cost of the default
46 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
47 /// perform write-merging).
48 ///
49 class FAddendCoef {
50 public:
51 // The constructor has to initialize a APFloat, which is unnecessary for
52 // most addends which have coefficient either 1 or -1. So, the constructor
53 // is expensive. In order to avoid the cost of the constructor, we should
54 // reuse some instances whenever possible. The pre-created instances
55 // FAddCombine::Add[0-5] embodies this idea.
56 FAddendCoef() = default;
57 ~FAddendCoef();
58
59 // If possible, don't define operator+/operator- etc because these
60 // operators inevitably call FAddendCoef's constructor which is not cheap.
61 void operator=(const FAddendCoef &A);
62 void operator+=(const FAddendCoef &A);
63 void operator*=(const FAddendCoef &S);
64
set(short C)65 void set(short C) {
66 assert(!insaneIntVal(C) && "Insane coefficient");
67 IsFp = false; IntVal = C;
68 }
69
70 void set(const APFloat& C);
71
72 void negate();
73
isZero() const74 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
75 Value *getValue(Type *) const;
76
isOne() const77 bool isOne() const { return isInt() && IntVal == 1; }
isTwo() const78 bool isTwo() const { return isInt() && IntVal == 2; }
isMinusOne() const79 bool isMinusOne() const { return isInt() && IntVal == -1; }
isMinusTwo() const80 bool isMinusTwo() const { return isInt() && IntVal == -2; }
81
82 private:
insaneIntVal(int V)83 bool insaneIntVal(int V) { return V > 4 || V < -4; }
84
getFpValPtr()85 APFloat *getFpValPtr() { return reinterpret_cast<APFloat *>(&FpValBuf); }
86
getFpValPtr() const87 const APFloat *getFpValPtr() const {
88 return reinterpret_cast<const APFloat *>(&FpValBuf);
89 }
90
getFpVal() const91 const APFloat &getFpVal() const {
92 assert(IsFp && BufHasFpVal && "Incorret state");
93 return *getFpValPtr();
94 }
95
getFpVal()96 APFloat &getFpVal() {
97 assert(IsFp && BufHasFpVal && "Incorret state");
98 return *getFpValPtr();
99 }
100
isInt() const101 bool isInt() const { return !IsFp; }
102
103 // If the coefficient is represented by an integer, promote it to a
104 // floating point.
105 void convertToFpType(const fltSemantics &Sem);
106
107 // Construct an APFloat from a signed integer.
108 // TODO: We should get rid of this function when APFloat can be constructed
109 // from an *SIGNED* integer.
110 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
111
112 bool IsFp = false;
113
114 // True iff FpValBuf contains an instance of APFloat.
115 bool BufHasFpVal = false;
116
117 // The integer coefficient of an individual addend is either 1 or -1,
118 // and we try to simplify at most 4 addends from neighboring at most
119 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
120 // is overkill of this end.
121 short IntVal = 0;
122
123 AlignedCharArrayUnion<APFloat> FpValBuf;
124 };
125
126 /// FAddend is used to represent floating-point addend. An addend is
127 /// represented as <C, V>, where the V is a symbolic value, and C is a
128 /// constant coefficient. A constant addend is represented as <C, 0>.
129 class FAddend {
130 public:
131 FAddend() = default;
132
operator +=(const FAddend & T)133 void operator+=(const FAddend &T) {
134 assert((Val == T.Val) && "Symbolic-values disagree");
135 Coeff += T.Coeff;
136 }
137
getSymVal() const138 Value *getSymVal() const { return Val; }
getCoef() const139 const FAddendCoef &getCoef() const { return Coeff; }
140
isConstant() const141 bool isConstant() const { return Val == nullptr; }
isZero() const142 bool isZero() const { return Coeff.isZero(); }
143
set(short Coefficient,Value * V)144 void set(short Coefficient, Value *V) {
145 Coeff.set(Coefficient);
146 Val = V;
147 }
set(const APFloat & Coefficient,Value * V)148 void set(const APFloat &Coefficient, Value *V) {
149 Coeff.set(Coefficient);
150 Val = V;
151 }
set(const ConstantFP * Coefficient,Value * V)152 void set(const ConstantFP *Coefficient, Value *V) {
153 Coeff.set(Coefficient->getValueAPF());
154 Val = V;
155 }
156
negate()157 void negate() { Coeff.negate(); }
158
159 /// Drill down the U-D chain one step to find the definition of V, and
160 /// try to break the definition into one or two addends.
161 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
162
163 /// Similar to FAddend::drillDownOneStep() except that the value being
164 /// splitted is the addend itself.
165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
166
167 private:
Scale(const FAddendCoef & ScaleAmt)168 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
169
170 // This addend has the value of "Coeff * Val".
171 Value *Val = nullptr;
172 FAddendCoef Coeff;
173 };
174
175 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
176 /// with its neighboring at most two instructions.
177 ///
178 class FAddCombine {
179 public:
FAddCombine(InstCombiner::BuilderTy & B)180 FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {}
181
182 Value *simplify(Instruction *FAdd);
183
184 private:
185 using AddendVect = SmallVector<const FAddend *, 4>;
186
187 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
188
189 /// Convert given addend to a Value
190 Value *createAddendVal(const FAddend &A, bool& NeedNeg);
191
192 /// Return the number of instructions needed to emit the N-ary addition.
193 unsigned calcInstrNumber(const AddendVect& Vect);
194
195 Value *createFSub(Value *Opnd0, Value *Opnd1);
196 Value *createFAdd(Value *Opnd0, Value *Opnd1);
197 Value *createFMul(Value *Opnd0, Value *Opnd1);
198 Value *createFNeg(Value *V);
199 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
200 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
201
202 // Debugging stuff are clustered here.
203 #ifndef NDEBUG
204 unsigned CreateInstrNum;
initCreateInstNum()205 void initCreateInstNum() { CreateInstrNum = 0; }
incCreateInstNum()206 void incCreateInstNum() { CreateInstrNum++; }
207 #else
initCreateInstNum()208 void initCreateInstNum() {}
incCreateInstNum()209 void incCreateInstNum() {}
210 #endif
211
212 InstCombiner::BuilderTy &Builder;
213 Instruction *Instr = nullptr;
214 };
215
216 } // end anonymous namespace
217
218 //===----------------------------------------------------------------------===//
219 //
220 // Implementation of
221 // {FAddendCoef, FAddend, FAddition, FAddCombine}.
222 //
223 //===----------------------------------------------------------------------===//
~FAddendCoef()224 FAddendCoef::~FAddendCoef() {
225 if (BufHasFpVal)
226 getFpValPtr()->~APFloat();
227 }
228
set(const APFloat & C)229 void FAddendCoef::set(const APFloat& C) {
230 APFloat *P = getFpValPtr();
231
232 if (isInt()) {
233 // As the buffer is meanless byte stream, we cannot call
234 // APFloat::operator=().
235 new(P) APFloat(C);
236 } else
237 *P = C;
238
239 IsFp = BufHasFpVal = true;
240 }
241
convertToFpType(const fltSemantics & Sem)242 void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
243 if (!isInt())
244 return;
245
246 APFloat *P = getFpValPtr();
247 if (IntVal > 0)
248 new(P) APFloat(Sem, IntVal);
249 else {
250 new(P) APFloat(Sem, 0 - IntVal);
251 P->changeSign();
252 }
253 IsFp = BufHasFpVal = true;
254 }
255
createAPFloatFromInt(const fltSemantics & Sem,int Val)256 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
257 if (Val >= 0)
258 return APFloat(Sem, Val);
259
260 APFloat T(Sem, 0 - Val);
261 T.changeSign();
262
263 return T;
264 }
265
operator =(const FAddendCoef & That)266 void FAddendCoef::operator=(const FAddendCoef &That) {
267 if (That.isInt())
268 set(That.IntVal);
269 else
270 set(That.getFpVal());
271 }
272
operator +=(const FAddendCoef & That)273 void FAddendCoef::operator+=(const FAddendCoef &That) {
274 RoundingMode RndMode = RoundingMode::NearestTiesToEven;
275 if (isInt() == That.isInt()) {
276 if (isInt())
277 IntVal += That.IntVal;
278 else
279 getFpVal().add(That.getFpVal(), RndMode);
280 return;
281 }
282
283 if (isInt()) {
284 const APFloat &T = That.getFpVal();
285 convertToFpType(T.getSemantics());
286 getFpVal().add(T, RndMode);
287 return;
288 }
289
290 APFloat &T = getFpVal();
291 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
292 }
293
operator *=(const FAddendCoef & That)294 void FAddendCoef::operator*=(const FAddendCoef &That) {
295 if (That.isOne())
296 return;
297
298 if (That.isMinusOne()) {
299 negate();
300 return;
301 }
302
303 if (isInt() && That.isInt()) {
304 int Res = IntVal * (int)That.IntVal;
305 assert(!insaneIntVal(Res) && "Insane int value");
306 IntVal = Res;
307 return;
308 }
309
310 const fltSemantics &Semantic =
311 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
312
313 if (isInt())
314 convertToFpType(Semantic);
315 APFloat &F0 = getFpVal();
316
317 if (That.isInt())
318 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
319 APFloat::rmNearestTiesToEven);
320 else
321 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
322 }
323
negate()324 void FAddendCoef::negate() {
325 if (isInt())
326 IntVal = 0 - IntVal;
327 else
328 getFpVal().changeSign();
329 }
330
getValue(Type * Ty) const331 Value *FAddendCoef::getValue(Type *Ty) const {
332 return isInt() ?
333 ConstantFP::get(Ty, float(IntVal)) :
334 ConstantFP::get(Ty->getContext(), getFpVal());
335 }
336
337 // The definition of <Val> Addends
338 // =========================================
339 // A + B <1, A>, <1,B>
340 // A - B <1, A>, <1,B>
341 // 0 - B <-1, B>
342 // C * A, <C, A>
343 // A + C <1, A> <C, NULL>
344 // 0 +/- 0 <0, NULL> (corner case)
345 //
346 // Legend: A and B are not constant, C is constant
drillValueDownOneStep(Value * Val,FAddend & Addend0,FAddend & Addend1)347 unsigned FAddend::drillValueDownOneStep
348 (Value *Val, FAddend &Addend0, FAddend &Addend1) {
349 Instruction *I = nullptr;
350 if (!Val || !(I = dyn_cast<Instruction>(Val)))
351 return 0;
352
353 unsigned Opcode = I->getOpcode();
354
355 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
356 ConstantFP *C0, *C1;
357 Value *Opnd0 = I->getOperand(0);
358 Value *Opnd1 = I->getOperand(1);
359 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
360 Opnd0 = nullptr;
361
362 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
363 Opnd1 = nullptr;
364
365 if (Opnd0) {
366 if (!C0)
367 Addend0.set(1, Opnd0);
368 else
369 Addend0.set(C0, nullptr);
370 }
371
372 if (Opnd1) {
373 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
374 if (!C1)
375 Addend.set(1, Opnd1);
376 else
377 Addend.set(C1, nullptr);
378 if (Opcode == Instruction::FSub)
379 Addend.negate();
380 }
381
382 if (Opnd0 || Opnd1)
383 return Opnd0 && Opnd1 ? 2 : 1;
384
385 // Both operands are zero. Weird!
386 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
387 return 1;
388 }
389
390 if (I->getOpcode() == Instruction::FMul) {
391 Value *V0 = I->getOperand(0);
392 Value *V1 = I->getOperand(1);
393 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
394 Addend0.set(C, V1);
395 return 1;
396 }
397
398 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
399 Addend0.set(C, V0);
400 return 1;
401 }
402 }
403
404 return 0;
405 }
406
407 // Try to break *this* addend into two addends. e.g. Suppose this addend is
408 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
409 // i.e. <2.3, X> and <2.3, Y>.
drillAddendDownOneStep(FAddend & Addend0,FAddend & Addend1) const410 unsigned FAddend::drillAddendDownOneStep
411 (FAddend &Addend0, FAddend &Addend1) const {
412 if (isConstant())
413 return 0;
414
415 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
416 if (!BreakNum || Coeff.isOne())
417 return BreakNum;
418
419 Addend0.Scale(Coeff);
420
421 if (BreakNum == 2)
422 Addend1.Scale(Coeff);
423
424 return BreakNum;
425 }
426
simplify(Instruction * I)427 Value *FAddCombine::simplify(Instruction *I) {
428 assert(I->hasAllowReassoc() && I->hasNoSignedZeros() &&
429 "Expected 'reassoc'+'nsz' instruction");
430
431 // Currently we are not able to handle vector type.
432 if (I->getType()->isVectorTy())
433 return nullptr;
434
435 assert((I->getOpcode() == Instruction::FAdd ||
436 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
437
438 // Save the instruction before calling other member-functions.
439 Instr = I;
440
441 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
442
443 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
444
445 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
446 unsigned Opnd0_ExpNum = 0;
447 unsigned Opnd1_ExpNum = 0;
448
449 if (!Opnd0.isConstant())
450 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
451
452 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
453 if (OpndNum == 2 && !Opnd1.isConstant())
454 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
455
456 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
457 if (Opnd0_ExpNum && Opnd1_ExpNum) {
458 AddendVect AllOpnds;
459 AllOpnds.push_back(&Opnd0_0);
460 AllOpnds.push_back(&Opnd1_0);
461 if (Opnd0_ExpNum == 2)
462 AllOpnds.push_back(&Opnd0_1);
463 if (Opnd1_ExpNum == 2)
464 AllOpnds.push_back(&Opnd1_1);
465
466 // Compute instruction quota. We should save at least one instruction.
467 unsigned InstQuota = 0;
468
469 Value *V0 = I->getOperand(0);
470 Value *V1 = I->getOperand(1);
471 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
472 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
473
474 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
475 return R;
476 }
477
478 if (OpndNum != 2) {
479 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
480 // splitted into two addends, say "V = X - Y", the instruction would have
481 // been optimized into "I = Y - X" in the previous steps.
482 //
483 const FAddendCoef &CE = Opnd0.getCoef();
484 return CE.isOne() ? Opnd0.getSymVal() : nullptr;
485 }
486
487 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
488 if (Opnd1_ExpNum) {
489 AddendVect AllOpnds;
490 AllOpnds.push_back(&Opnd0);
491 AllOpnds.push_back(&Opnd1_0);
492 if (Opnd1_ExpNum == 2)
493 AllOpnds.push_back(&Opnd1_1);
494
495 if (Value *R = simplifyFAdd(AllOpnds, 1))
496 return R;
497 }
498
499 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
500 if (Opnd0_ExpNum) {
501 AddendVect AllOpnds;
502 AllOpnds.push_back(&Opnd1);
503 AllOpnds.push_back(&Opnd0_0);
504 if (Opnd0_ExpNum == 2)
505 AllOpnds.push_back(&Opnd0_1);
506
507 if (Value *R = simplifyFAdd(AllOpnds, 1))
508 return R;
509 }
510
511 return nullptr;
512 }
513
simplifyFAdd(AddendVect & Addends,unsigned InstrQuota)514 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
515 unsigned AddendNum = Addends.size();
516 assert(AddendNum <= 4 && "Too many addends");
517
518 // For saving intermediate results;
519 unsigned NextTmpIdx = 0;
520 FAddend TmpResult[3];
521
522 // Simplified addends are placed <SimpVect>.
523 AddendVect SimpVect;
524
525 // The outer loop works on one symbolic-value at a time. Suppose the input
526 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
527 // The symbolic-values will be processed in this order: x, y, z.
528 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
529
530 const FAddend *ThisAddend = Addends[SymIdx];
531 if (!ThisAddend) {
532 // This addend was processed before.
533 continue;
534 }
535
536 Value *Val = ThisAddend->getSymVal();
537
538 // If the resulting expr has constant-addend, this constant-addend is
539 // desirable to reside at the top of the resulting expression tree. Placing
540 // constant close to super-expr(s) will potentially reveal some
541 // optimization opportunities in super-expr(s). Here we do not implement
542 // this logic intentionally and rely on SimplifyAssociativeOrCommutative
543 // call later.
544
545 unsigned StartIdx = SimpVect.size();
546 SimpVect.push_back(ThisAddend);
547
548 // The inner loop collects addends sharing same symbolic-value, and these
549 // addends will be later on folded into a single addend. Following above
550 // example, if the symbolic value "y" is being processed, the inner loop
551 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
552 // be later on folded into "<b1+b2, y>".
553 for (unsigned SameSymIdx = SymIdx + 1;
554 SameSymIdx < AddendNum; SameSymIdx++) {
555 const FAddend *T = Addends[SameSymIdx];
556 if (T && T->getSymVal() == Val) {
557 // Set null such that next iteration of the outer loop will not process
558 // this addend again.
559 Addends[SameSymIdx] = nullptr;
560 SimpVect.push_back(T);
561 }
562 }
563
564 // If multiple addends share same symbolic value, fold them together.
565 if (StartIdx + 1 != SimpVect.size()) {
566 FAddend &R = TmpResult[NextTmpIdx ++];
567 R = *SimpVect[StartIdx];
568 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
569 R += *SimpVect[Idx];
570
571 // Pop all addends being folded and push the resulting folded addend.
572 SimpVect.resize(StartIdx);
573 if (!R.isZero()) {
574 SimpVect.push_back(&R);
575 }
576 }
577 }
578
579 assert((NextTmpIdx <= std::size(TmpResult) + 1) && "out-of-bound access");
580
581 Value *Result;
582 if (!SimpVect.empty())
583 Result = createNaryFAdd(SimpVect, InstrQuota);
584 else {
585 // The addition is folded to 0.0.
586 Result = ConstantFP::get(Instr->getType(), 0.0);
587 }
588
589 return Result;
590 }
591
createNaryFAdd(const AddendVect & Opnds,unsigned InstrQuota)592 Value *FAddCombine::createNaryFAdd
593 (const AddendVect &Opnds, unsigned InstrQuota) {
594 assert(!Opnds.empty() && "Expect at least one addend");
595
596 // Step 1: Check if the # of instructions needed exceeds the quota.
597
598 unsigned InstrNeeded = calcInstrNumber(Opnds);
599 if (InstrNeeded > InstrQuota)
600 return nullptr;
601
602 initCreateInstNum();
603
604 // step 2: Emit the N-ary addition.
605 // Note that at most three instructions are involved in Fadd-InstCombine: the
606 // addition in question, and at most two neighboring instructions.
607 // The resulting optimized addition should have at least one less instruction
608 // than the original addition expression tree. This implies that the resulting
609 // N-ary addition has at most two instructions, and we don't need to worry
610 // about tree-height when constructing the N-ary addition.
611
612 Value *LastVal = nullptr;
613 bool LastValNeedNeg = false;
614
615 // Iterate the addends, creating fadd/fsub using adjacent two addends.
616 for (const FAddend *Opnd : Opnds) {
617 bool NeedNeg;
618 Value *V = createAddendVal(*Opnd, NeedNeg);
619 if (!LastVal) {
620 LastVal = V;
621 LastValNeedNeg = NeedNeg;
622 continue;
623 }
624
625 if (LastValNeedNeg == NeedNeg) {
626 LastVal = createFAdd(LastVal, V);
627 continue;
628 }
629
630 if (LastValNeedNeg)
631 LastVal = createFSub(V, LastVal);
632 else
633 LastVal = createFSub(LastVal, V);
634
635 LastValNeedNeg = false;
636 }
637
638 if (LastValNeedNeg) {
639 LastVal = createFNeg(LastVal);
640 }
641
642 #ifndef NDEBUG
643 assert(CreateInstrNum == InstrNeeded &&
644 "Inconsistent in instruction numbers");
645 #endif
646
647 return LastVal;
648 }
649
createFSub(Value * Opnd0,Value * Opnd1)650 Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
651 Value *V = Builder.CreateFSub(Opnd0, Opnd1);
652 if (Instruction *I = dyn_cast<Instruction>(V))
653 createInstPostProc(I);
654 return V;
655 }
656
createFNeg(Value * V)657 Value *FAddCombine::createFNeg(Value *V) {
658 Value *NewV = Builder.CreateFNeg(V);
659 if (Instruction *I = dyn_cast<Instruction>(NewV))
660 createInstPostProc(I, true); // fneg's don't receive instruction numbers.
661 return NewV;
662 }
663
createFAdd(Value * Opnd0,Value * Opnd1)664 Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
665 Value *V = Builder.CreateFAdd(Opnd0, Opnd1);
666 if (Instruction *I = dyn_cast<Instruction>(V))
667 createInstPostProc(I);
668 return V;
669 }
670
createFMul(Value * Opnd0,Value * Opnd1)671 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
672 Value *V = Builder.CreateFMul(Opnd0, Opnd1);
673 if (Instruction *I = dyn_cast<Instruction>(V))
674 createInstPostProc(I);
675 return V;
676 }
677
createInstPostProc(Instruction * NewInstr,bool NoNumber)678 void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
679 NewInstr->setDebugLoc(Instr->getDebugLoc());
680
681 // Keep track of the number of instruction created.
682 if (!NoNumber)
683 incCreateInstNum();
684
685 // Propagate fast-math flags
686 NewInstr->setFastMathFlags(Instr->getFastMathFlags());
687 }
688
689 // Return the number of instruction needed to emit the N-ary addition.
690 // NOTE: Keep this function in sync with createAddendVal().
calcInstrNumber(const AddendVect & Opnds)691 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
692 unsigned OpndNum = Opnds.size();
693 unsigned InstrNeeded = OpndNum - 1;
694
695 // Adjust the number of instructions needed to emit the N-ary add.
696 for (const FAddend *Opnd : Opnds) {
697 if (Opnd->isConstant())
698 continue;
699
700 // The constant check above is really for a few special constant
701 // coefficients.
702 if (isa<UndefValue>(Opnd->getSymVal()))
703 continue;
704
705 const FAddendCoef &CE = Opnd->getCoef();
706 // Let the addend be "c * x". If "c == +/-1", the value of the addend
707 // is immediately available; otherwise, it needs exactly one instruction
708 // to evaluate the value.
709 if (!CE.isMinusOne() && !CE.isOne())
710 InstrNeeded++;
711 }
712 return InstrNeeded;
713 }
714
715 // Input Addend Value NeedNeg(output)
716 // ================================================================
717 // Constant C C false
718 // <+/-1, V> V coefficient is -1
719 // <2/-2, V> "fadd V, V" coefficient is -2
720 // <C, V> "fmul V, C" false
721 //
722 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
createAddendVal(const FAddend & Opnd,bool & NeedNeg)723 Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
724 const FAddendCoef &Coeff = Opnd.getCoef();
725
726 if (Opnd.isConstant()) {
727 NeedNeg = false;
728 return Coeff.getValue(Instr->getType());
729 }
730
731 Value *OpndVal = Opnd.getSymVal();
732
733 if (Coeff.isMinusOne() || Coeff.isOne()) {
734 NeedNeg = Coeff.isMinusOne();
735 return OpndVal;
736 }
737
738 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
739 NeedNeg = Coeff.isMinusTwo();
740 return createFAdd(OpndVal, OpndVal);
741 }
742
743 NeedNeg = false;
744 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
745 }
746
747 // Checks if any operand is negative and we can convert add to sub.
748 // This function checks for following negative patterns
749 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
750 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
751 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
checkForNegativeOperand(BinaryOperator & I,InstCombiner::BuilderTy & Builder)752 static Value *checkForNegativeOperand(BinaryOperator &I,
753 InstCombiner::BuilderTy &Builder) {
754 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
755
756 // This function creates 2 instructions to replace ADD, we need at least one
757 // of LHS or RHS to have one use to ensure benefit in transform.
758 if (!LHS->hasOneUse() && !RHS->hasOneUse())
759 return nullptr;
760
761 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
762 const APInt *C1 = nullptr, *C2 = nullptr;
763
764 // if ONE is on other side, swap
765 if (match(RHS, m_Add(m_Value(X), m_One())))
766 std::swap(LHS, RHS);
767
768 if (match(LHS, m_Add(m_Value(X), m_One()))) {
769 // if XOR on other side, swap
770 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
771 std::swap(X, RHS);
772
773 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
774 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
775 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
776 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
777 Value *NewAnd = Builder.CreateAnd(Z, *C1);
778 return Builder.CreateSub(RHS, NewAnd, "sub");
779 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
780 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
781 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
782 Value *NewOr = Builder.CreateOr(Z, ~(*C1));
783 return Builder.CreateSub(RHS, NewOr, "sub");
784 }
785 }
786 }
787
788 // Restore LHS and RHS
789 LHS = I.getOperand(0);
790 RHS = I.getOperand(1);
791
792 // if XOR is on other side, swap
793 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
794 std::swap(LHS, RHS);
795
796 // C2 is ODD
797 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
798 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
799 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
800 if (C1->countr_zero() == 0)
801 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
802 Value *NewOr = Builder.CreateOr(Z, ~(*C2));
803 return Builder.CreateSub(RHS, NewOr, "sub");
804 }
805 return nullptr;
806 }
807
808 /// Wrapping flags may allow combining constants separated by an extend.
foldNoWrapAdd(BinaryOperator & Add,InstCombiner::BuilderTy & Builder)809 static Instruction *foldNoWrapAdd(BinaryOperator &Add,
810 InstCombiner::BuilderTy &Builder) {
811 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
812 Type *Ty = Add.getType();
813 Constant *Op1C;
814 if (!match(Op1, m_Constant(Op1C)))
815 return nullptr;
816
817 // Try this match first because it results in an add in the narrow type.
818 // (zext (X +nuw C2)) + C1 --> zext (X + (C2 + trunc(C1)))
819 Value *X;
820 const APInt *C1, *C2;
821 if (match(Op1, m_APInt(C1)) &&
822 match(Op0, m_ZExt(m_NUWAddLike(m_Value(X), m_APInt(C2)))) &&
823 C1->isNegative() && C1->sge(-C2->sext(C1->getBitWidth()))) {
824 APInt NewC = *C2 + C1->trunc(C2->getBitWidth());
825 // If the smaller add will fold to zero, we don't need to check one use.
826 if (NewC.isZero())
827 return new ZExtInst(X, Ty);
828 // Otherwise only do this if the existing zero extend will be removed.
829 if (Op0->hasOneUse())
830 return new ZExtInst(
831 Builder.CreateNUWAdd(X, ConstantInt::get(X->getType(), NewC)), Ty);
832 }
833
834 // More general combining of constants in the wide type.
835 // (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
836 // or (zext nneg (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
837 Constant *NarrowC;
838 if (match(Op0, m_OneUse(m_SExtLike(
839 m_NSWAddLike(m_Value(X), m_Constant(NarrowC)))))) {
840 Value *WideC = Builder.CreateSExt(NarrowC, Ty);
841 Value *NewC = Builder.CreateAdd(WideC, Op1C);
842 Value *WideX = Builder.CreateSExt(X, Ty);
843 return BinaryOperator::CreateAdd(WideX, NewC);
844 }
845 // (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C)
846 if (match(Op0,
847 m_OneUse(m_ZExt(m_NUWAddLike(m_Value(X), m_Constant(NarrowC)))))) {
848 Value *WideC = Builder.CreateZExt(NarrowC, Ty);
849 Value *NewC = Builder.CreateAdd(WideC, Op1C);
850 Value *WideX = Builder.CreateZExt(X, Ty);
851 return BinaryOperator::CreateAdd(WideX, NewC);
852 }
853 return nullptr;
854 }
855
foldAddWithConstant(BinaryOperator & Add)856 Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
857 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
858 Type *Ty = Add.getType();
859 Constant *Op1C;
860 if (!match(Op1, m_ImmConstant(Op1C)))
861 return nullptr;
862
863 if (Instruction *NV = foldBinOpIntoSelectOrPhi(Add))
864 return NV;
865
866 Value *X;
867 Constant *Op00C;
868
869 // add (sub C1, X), C2 --> sub (add C1, C2), X
870 if (match(Op0, m_Sub(m_Constant(Op00C), m_Value(X))))
871 return BinaryOperator::CreateSub(ConstantExpr::getAdd(Op00C, Op1C), X);
872
873 Value *Y;
874
875 // add (sub X, Y), -1 --> add (not Y), X
876 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y)))) &&
877 match(Op1, m_AllOnes()))
878 return BinaryOperator::CreateAdd(Builder.CreateNot(Y), X);
879
880 // zext(bool) + C -> bool ? C + 1 : C
881 if (match(Op0, m_ZExt(m_Value(X))) &&
882 X->getType()->getScalarSizeInBits() == 1)
883 return SelectInst::Create(X, InstCombiner::AddOne(Op1C), Op1);
884 // sext(bool) + C -> bool ? C - 1 : C
885 if (match(Op0, m_SExt(m_Value(X))) &&
886 X->getType()->getScalarSizeInBits() == 1)
887 return SelectInst::Create(X, InstCombiner::SubOne(Op1C), Op1);
888
889 // ~X + C --> (C-1) - X
890 if (match(Op0, m_Not(m_Value(X)))) {
891 // ~X + C has NSW and (C-1) won't oveflow => (C-1)-X can have NSW
892 auto *COne = ConstantInt::get(Op1C->getType(), 1);
893 bool WillNotSOV = willNotOverflowSignedSub(Op1C, COne, Add);
894 BinaryOperator *Res =
895 BinaryOperator::CreateSub(ConstantExpr::getSub(Op1C, COne), X);
896 Res->setHasNoSignedWrap(Add.hasNoSignedWrap() && WillNotSOV);
897 return Res;
898 }
899
900 // (iN X s>> (N - 1)) + 1 --> zext (X > -1)
901 const APInt *C;
902 unsigned BitWidth = Ty->getScalarSizeInBits();
903 if (match(Op0, m_OneUse(m_AShr(m_Value(X),
904 m_SpecificIntAllowPoison(BitWidth - 1)))) &&
905 match(Op1, m_One()))
906 return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);
907
908 if (!match(Op1, m_APInt(C)))
909 return nullptr;
910
911 // (X | Op01C) + Op1C --> X + (Op01C + Op1C) iff the `or` is actually an `add`
912 Constant *Op01C;
913 if (match(Op0, m_DisjointOr(m_Value(X), m_ImmConstant(Op01C)))) {
914 BinaryOperator *NewAdd =
915 BinaryOperator::CreateAdd(X, ConstantExpr::getAdd(Op01C, Op1C));
916 NewAdd->setHasNoSignedWrap(Add.hasNoSignedWrap() &&
917 willNotOverflowSignedAdd(Op01C, Op1C, Add));
918 NewAdd->setHasNoUnsignedWrap(Add.hasNoUnsignedWrap());
919 return NewAdd;
920 }
921
922 // (X | C2) + C --> (X | C2) ^ C2 iff (C2 == -C)
923 const APInt *C2;
924 if (match(Op0, m_Or(m_Value(), m_APInt(C2))) && *C2 == -*C)
925 return BinaryOperator::CreateXor(Op0, ConstantInt::get(Add.getType(), *C2));
926
927 if (C->isSignMask()) {
928 // If wrapping is not allowed, then the addition must set the sign bit:
929 // X + (signmask) --> X | signmask
930 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap())
931 return BinaryOperator::CreateOr(Op0, Op1);
932
933 // If wrapping is allowed, then the addition flips the sign bit of LHS:
934 // X + (signmask) --> X ^ signmask
935 return BinaryOperator::CreateXor(Op0, Op1);
936 }
937
938 // Is this add the last step in a convoluted sext?
939 // add(zext(xor i16 X, -32768), -32768) --> sext X
940 if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) &&
941 C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C)
942 return CastInst::Create(Instruction::SExt, X, Ty);
943
944 if (match(Op0, m_Xor(m_Value(X), m_APInt(C2)))) {
945 // (X ^ signmask) + C --> (X + (signmask ^ C))
946 if (C2->isSignMask())
947 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C2 ^ *C));
948
949 // If X has no high-bits set above an xor mask:
950 // add (xor X, LowMaskC), C --> sub (LowMaskC + C), X
951 if (C2->isMask()) {
952 KnownBits LHSKnown = computeKnownBits(X, &Add);
953 if ((*C2 | LHSKnown.Zero).isAllOnes())
954 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C2 + *C), X);
955 }
956
957 // Look for a math+logic pattern that corresponds to sext-in-register of a
958 // value with cleared high bits. Convert that into a pair of shifts:
959 // add (xor X, 0x80), 0xF..F80 --> (X << ShAmtC) >>s ShAmtC
960 // add (xor X, 0xF..F80), 0x80 --> (X << ShAmtC) >>s ShAmtC
961 if (Op0->hasOneUse() && *C2 == -(*C)) {
962 unsigned BitWidth = Ty->getScalarSizeInBits();
963 unsigned ShAmt = 0;
964 if (C->isPowerOf2())
965 ShAmt = BitWidth - C->logBase2() - 1;
966 else if (C2->isPowerOf2())
967 ShAmt = BitWidth - C2->logBase2() - 1;
968 if (ShAmt &&
969 MaskedValueIsZero(X, APInt::getHighBitsSet(BitWidth, ShAmt), &Add)) {
970 Constant *ShAmtC = ConstantInt::get(Ty, ShAmt);
971 Value *NewShl = Builder.CreateShl(X, ShAmtC, "sext");
972 return BinaryOperator::CreateAShr(NewShl, ShAmtC);
973 }
974 }
975 }
976
977 if (C->isOne() && Op0->hasOneUse()) {
978 // add (sext i1 X), 1 --> zext (not X)
979 // TODO: The smallest IR representation is (select X, 0, 1), and that would
980 // not require the one-use check. But we need to remove a transform in
981 // visitSelect and make sure that IR value tracking for select is equal or
982 // better than for these ops.
983 if (match(Op0, m_SExt(m_Value(X))) &&
984 X->getType()->getScalarSizeInBits() == 1)
985 return new ZExtInst(Builder.CreateNot(X), Ty);
986
987 // Shifts and add used to flip and mask off the low bit:
988 // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1
989 const APInt *C3;
990 if (match(Op0, m_AShr(m_Shl(m_Value(X), m_APInt(C2)), m_APInt(C3))) &&
991 C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) {
992 Value *NotX = Builder.CreateNot(X);
993 return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1));
994 }
995 }
996
997 // umax(X, C) + -C --> usub.sat(X, C)
998 if (match(Op0, m_OneUse(m_UMax(m_Value(X), m_SpecificInt(-*C)))))
999 return replaceInstUsesWith(
1000 Add, Builder.CreateBinaryIntrinsic(
1001 Intrinsic::usub_sat, X, ConstantInt::get(Add.getType(), -*C)));
1002
1003 // Fold (add (zext (add X, -1)), 1) -> (zext X) if X is non-zero.
1004 // TODO: There's a general form for any constant on the outer add.
1005 if (C->isOne()) {
1006 if (match(Op0, m_ZExt(m_Add(m_Value(X), m_AllOnes())))) {
1007 const SimplifyQuery Q = SQ.getWithInstruction(&Add);
1008 if (llvm::isKnownNonZero(X, Q))
1009 return new ZExtInst(X, Ty);
1010 }
1011 }
1012
1013 return nullptr;
1014 }
1015
1016 // match variations of a^2 + 2*a*b + b^2
1017 //
1018 // to reuse the code between the FP and Int versions, the instruction OpCodes
1019 // and constant types have been turned into template parameters.
1020 //
1021 // Mul2Rhs: The constant to perform the multiplicative equivalent of X*2 with;
1022 // should be `m_SpecificFP(2.0)` for FP and `m_SpecificInt(1)` for Int
1023 // (we're matching `X<<1` instead of `X*2` for Int)
1024 template <bool FP, typename Mul2Rhs>
matchesSquareSum(BinaryOperator & I,Mul2Rhs M2Rhs,Value * & A,Value * & B)1025 static bool matchesSquareSum(BinaryOperator &I, Mul2Rhs M2Rhs, Value *&A,
1026 Value *&B) {
1027 constexpr unsigned MulOp = FP ? Instruction::FMul : Instruction::Mul;
1028 constexpr unsigned AddOp = FP ? Instruction::FAdd : Instruction::Add;
1029 constexpr unsigned Mul2Op = FP ? Instruction::FMul : Instruction::Shl;
1030
1031 // (a * a) + (((a * 2) + b) * b)
1032 if (match(&I, m_c_BinOp(
1033 AddOp, m_OneUse(m_BinOp(MulOp, m_Value(A), m_Deferred(A))),
1034 m_OneUse(m_c_BinOp(
1035 MulOp,
1036 m_c_BinOp(AddOp, m_BinOp(Mul2Op, m_Deferred(A), M2Rhs),
1037 m_Value(B)),
1038 m_Deferred(B))))))
1039 return true;
1040
1041 // ((a * b) * 2) or ((a * 2) * b)
1042 // +
1043 // (a * a + b * b) or (b * b + a * a)
1044 return match(
1045 &I, m_c_BinOp(
1046 AddOp,
1047 m_CombineOr(
1048 m_OneUse(m_BinOp(
1049 Mul2Op, m_BinOp(MulOp, m_Value(A), m_Value(B)), M2Rhs)),
1050 m_OneUse(m_c_BinOp(MulOp, m_BinOp(Mul2Op, m_Value(A), M2Rhs),
1051 m_Value(B)))),
1052 m_OneUse(
1053 m_c_BinOp(AddOp, m_BinOp(MulOp, m_Deferred(A), m_Deferred(A)),
1054 m_BinOp(MulOp, m_Deferred(B), m_Deferred(B))))));
1055 }
1056
1057 // Fold integer variations of a^2 + 2*a*b + b^2 -> (a + b)^2
foldSquareSumInt(BinaryOperator & I)1058 Instruction *InstCombinerImpl::foldSquareSumInt(BinaryOperator &I) {
1059 Value *A, *B;
1060 if (matchesSquareSum</*FP*/ false>(I, m_SpecificInt(1), A, B)) {
1061 Value *AB = Builder.CreateAdd(A, B);
1062 return BinaryOperator::CreateMul(AB, AB);
1063 }
1064 return nullptr;
1065 }
1066
1067 // Fold floating point variations of a^2 + 2*a*b + b^2 -> (a + b)^2
1068 // Requires `nsz` and `reassoc`.
foldSquareSumFP(BinaryOperator & I)1069 Instruction *InstCombinerImpl::foldSquareSumFP(BinaryOperator &I) {
1070 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() && "Assumption mismatch");
1071 Value *A, *B;
1072 if (matchesSquareSum</*FP*/ true>(I, m_SpecificFP(2.0), A, B)) {
1073 Value *AB = Builder.CreateFAddFMF(A, B, &I);
1074 return BinaryOperator::CreateFMulFMF(AB, AB, &I);
1075 }
1076 return nullptr;
1077 }
1078
1079 // Matches multiplication expression Op * C where C is a constant. Returns the
1080 // constant value in C and the other operand in Op. Returns true if such a
1081 // match is found.
MatchMul(Value * E,Value * & Op,APInt & C)1082 static bool MatchMul(Value *E, Value *&Op, APInt &C) {
1083 const APInt *AI;
1084 if (match(E, m_Mul(m_Value(Op), m_APInt(AI)))) {
1085 C = *AI;
1086 return true;
1087 }
1088 if (match(E, m_Shl(m_Value(Op), m_APInt(AI)))) {
1089 C = APInt(AI->getBitWidth(), 1);
1090 C <<= *AI;
1091 return true;
1092 }
1093 return false;
1094 }
1095
1096 // Matches remainder expression Op % C where C is a constant. Returns the
1097 // constant value in C and the other operand in Op. Returns the signedness of
1098 // the remainder operation in IsSigned. Returns true if such a match is
1099 // found.
MatchRem(Value * E,Value * & Op,APInt & C,bool & IsSigned)1100 static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned) {
1101 const APInt *AI;
1102 IsSigned = false;
1103 if (match(E, m_SRem(m_Value(Op), m_APInt(AI)))) {
1104 IsSigned = true;
1105 C = *AI;
1106 return true;
1107 }
1108 if (match(E, m_URem(m_Value(Op), m_APInt(AI)))) {
1109 C = *AI;
1110 return true;
1111 }
1112 if (match(E, m_And(m_Value(Op), m_APInt(AI))) && (*AI + 1).isPowerOf2()) {
1113 C = *AI + 1;
1114 return true;
1115 }
1116 return false;
1117 }
1118
1119 // Matches division expression Op / C with the given signedness as indicated
1120 // by IsSigned, where C is a constant. Returns the constant value in C and the
1121 // other operand in Op. Returns true if such a match is found.
MatchDiv(Value * E,Value * & Op,APInt & C,bool IsSigned)1122 static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned) {
1123 const APInt *AI;
1124 if (IsSigned && match(E, m_SDiv(m_Value(Op), m_APInt(AI)))) {
1125 C = *AI;
1126 return true;
1127 }
1128 if (!IsSigned) {
1129 if (match(E, m_UDiv(m_Value(Op), m_APInt(AI)))) {
1130 C = *AI;
1131 return true;
1132 }
1133 if (match(E, m_LShr(m_Value(Op), m_APInt(AI)))) {
1134 C = APInt(AI->getBitWidth(), 1);
1135 C <<= *AI;
1136 return true;
1137 }
1138 }
1139 return false;
1140 }
1141
1142 // Returns whether C0 * C1 with the given signedness overflows.
MulWillOverflow(APInt & C0,APInt & C1,bool IsSigned)1143 static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned) {
1144 bool overflow;
1145 if (IsSigned)
1146 (void)C0.smul_ov(C1, overflow);
1147 else
1148 (void)C0.umul_ov(C1, overflow);
1149 return overflow;
1150 }
1151
1152 // Simplifies X % C0 + (( X / C0 ) % C1) * C0 to X % (C0 * C1), where (C0 * C1)
1153 // does not overflow.
1154 // Simplifies (X / C0) * C1 + (X % C0) * C2 to
1155 // (X / C0) * (C1 - C2 * C0) + X * C2
SimplifyAddWithRemainder(BinaryOperator & I)1156 Value *InstCombinerImpl::SimplifyAddWithRemainder(BinaryOperator &I) {
1157 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1158 Value *X, *MulOpV;
1159 APInt C0, MulOpC;
1160 bool IsSigned;
1161 // Match I = X % C0 + MulOpV * C0
1162 if (((MatchRem(LHS, X, C0, IsSigned) && MatchMul(RHS, MulOpV, MulOpC)) ||
1163 (MatchRem(RHS, X, C0, IsSigned) && MatchMul(LHS, MulOpV, MulOpC))) &&
1164 C0 == MulOpC) {
1165 Value *RemOpV;
1166 APInt C1;
1167 bool Rem2IsSigned;
1168 // Match MulOpC = RemOpV % C1
1169 if (MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) &&
1170 IsSigned == Rem2IsSigned) {
1171 Value *DivOpV;
1172 APInt DivOpC;
1173 // Match RemOpV = X / C0
1174 if (MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) && X == DivOpV &&
1175 C0 == DivOpC && !MulWillOverflow(C0, C1, IsSigned)) {
1176 Value *NewDivisor = ConstantInt::get(X->getType(), C0 * C1);
1177 return IsSigned ? Builder.CreateSRem(X, NewDivisor, "srem")
1178 : Builder.CreateURem(X, NewDivisor, "urem");
1179 }
1180 }
1181 }
1182
1183 // Match I = (X / C0) * C1 + (X % C0) * C2
1184 Value *Div, *Rem;
1185 APInt C1, C2;
1186 if (!LHS->hasOneUse() || !MatchMul(LHS, Div, C1))
1187 Div = LHS, C1 = APInt(I.getType()->getScalarSizeInBits(), 1);
1188 if (!RHS->hasOneUse() || !MatchMul(RHS, Rem, C2))
1189 Rem = RHS, C2 = APInt(I.getType()->getScalarSizeInBits(), 1);
1190 if (match(Div, m_IRem(m_Value(), m_Value()))) {
1191 std::swap(Div, Rem);
1192 std::swap(C1, C2);
1193 }
1194 Value *DivOpV;
1195 APInt DivOpC;
1196 if (MatchRem(Rem, X, C0, IsSigned) &&
1197 MatchDiv(Div, DivOpV, DivOpC, IsSigned) && X == DivOpV && C0 == DivOpC &&
1198 // Avoid unprofitable replacement of and with mul.
1199 !(C1.isOne() && !IsSigned && DivOpC.isPowerOf2() && DivOpC != 2)) {
1200 APInt NewC = C1 - C2 * C0;
1201 if (!NewC.isZero() && !Rem->hasOneUse())
1202 return nullptr;
1203 if (!isGuaranteedNotToBeUndef(X, &AC, &I, &DT))
1204 return nullptr;
1205 Value *MulXC2 = Builder.CreateMul(X, ConstantInt::get(X->getType(), C2));
1206 if (NewC.isZero())
1207 return MulXC2;
1208 return Builder.CreateAdd(
1209 Builder.CreateMul(Div, ConstantInt::get(X->getType(), NewC)), MulXC2);
1210 }
1211
1212 return nullptr;
1213 }
1214
1215 /// Fold
1216 /// (1 << NBits) - 1
1217 /// Into:
1218 /// ~(-(1 << NBits))
1219 /// Because a 'not' is better for bit-tracking analysis and other transforms
1220 /// than an 'add'. The new shl is always nsw, and is nuw if old `and` was.
canonicalizeLowbitMask(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1221 static Instruction *canonicalizeLowbitMask(BinaryOperator &I,
1222 InstCombiner::BuilderTy &Builder) {
1223 Value *NBits;
1224 if (!match(&I, m_Add(m_OneUse(m_Shl(m_One(), m_Value(NBits))), m_AllOnes())))
1225 return nullptr;
1226
1227 Constant *MinusOne = Constant::getAllOnesValue(NBits->getType());
1228 Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask");
1229 // Be wary of constant folding.
1230 if (auto *BOp = dyn_cast<BinaryOperator>(NotMask)) {
1231 // Always NSW. But NUW propagates from `add`.
1232 BOp->setHasNoSignedWrap();
1233 BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1234 }
1235
1236 return BinaryOperator::CreateNot(NotMask, I.getName());
1237 }
1238
foldToUnsignedSaturatedAdd(BinaryOperator & I)1239 static Instruction *foldToUnsignedSaturatedAdd(BinaryOperator &I) {
1240 assert(I.getOpcode() == Instruction::Add && "Expecting add instruction");
1241 Type *Ty = I.getType();
1242 auto getUAddSat = [&]() {
1243 return Intrinsic::getOrInsertDeclaration(I.getModule(), Intrinsic::uadd_sat,
1244 Ty);
1245 };
1246
1247 // add (umin X, ~Y), Y --> uaddsat X, Y
1248 Value *X, *Y;
1249 if (match(&I, m_c_Add(m_c_UMin(m_Value(X), m_Not(m_Value(Y))),
1250 m_Deferred(Y))))
1251 return CallInst::Create(getUAddSat(), { X, Y });
1252
1253 // add (umin X, ~C), C --> uaddsat X, C
1254 const APInt *C, *NotC;
1255 if (match(&I, m_Add(m_UMin(m_Value(X), m_APInt(NotC)), m_APInt(C))) &&
1256 *C == ~*NotC)
1257 return CallInst::Create(getUAddSat(), { X, ConstantInt::get(Ty, *C) });
1258
1259 return nullptr;
1260 }
1261
1262 // Transform:
1263 // (add A, (shl (neg B), Y))
1264 // -> (sub A, (shl B, Y))
combineAddSubWithShlAddSub(InstCombiner::BuilderTy & Builder,const BinaryOperator & I)1265 static Instruction *combineAddSubWithShlAddSub(InstCombiner::BuilderTy &Builder,
1266 const BinaryOperator &I) {
1267 Value *A, *B, *Cnt;
1268 if (match(&I,
1269 m_c_Add(m_OneUse(m_Shl(m_OneUse(m_Neg(m_Value(B))), m_Value(Cnt))),
1270 m_Value(A)))) {
1271 Value *NewShl = Builder.CreateShl(B, Cnt);
1272 return BinaryOperator::CreateSub(A, NewShl);
1273 }
1274 return nullptr;
1275 }
1276
1277 /// Try to reduce signed division by power-of-2 to an arithmetic shift right.
foldAddToAshr(BinaryOperator & Add)1278 static Instruction *foldAddToAshr(BinaryOperator &Add) {
1279 // Division must be by power-of-2, but not the minimum signed value.
1280 Value *X;
1281 const APInt *DivC;
1282 if (!match(Add.getOperand(0), m_SDiv(m_Value(X), m_Power2(DivC))) ||
1283 DivC->isNegative())
1284 return nullptr;
1285
1286 // Rounding is done by adding -1 if the dividend (X) is negative and has any
1287 // low bits set. It recognizes two canonical patterns:
1288 // 1. For an 'ugt' cmp with the signed minimum value (SMIN), the
1289 // pattern is: sext (icmp ugt (X & (DivC - 1)), SMIN).
1290 // 2. For an 'eq' cmp, the pattern's: sext (icmp eq X & (SMIN + 1), SMIN + 1).
1291 // Note that, by the time we end up here, if possible, ugt has been
1292 // canonicalized into eq.
1293 const APInt *MaskC, *MaskCCmp;
1294 CmpPredicate Pred;
1295 if (!match(Add.getOperand(1),
1296 m_SExt(m_ICmp(Pred, m_And(m_Specific(X), m_APInt(MaskC)),
1297 m_APInt(MaskCCmp)))))
1298 return nullptr;
1299
1300 if ((Pred != ICmpInst::ICMP_UGT || !MaskCCmp->isSignMask()) &&
1301 (Pred != ICmpInst::ICMP_EQ || *MaskCCmp != *MaskC))
1302 return nullptr;
1303
1304 APInt SMin = APInt::getSignedMinValue(Add.getType()->getScalarSizeInBits());
1305 bool IsMaskValid = Pred == ICmpInst::ICMP_UGT
1306 ? (*MaskC == (SMin | (*DivC - 1)))
1307 : (*DivC == 2 && *MaskC == SMin + 1);
1308 if (!IsMaskValid)
1309 return nullptr;
1310
1311 // (X / DivC) + sext ((X & (SMin | (DivC - 1)) >u SMin) --> X >>s log2(DivC)
1312 return BinaryOperator::CreateAShr(
1313 X, ConstantInt::get(Add.getType(), DivC->exactLogBase2()));
1314 }
1315
foldAddLikeCommutative(Value * LHS,Value * RHS,bool NSW,bool NUW)1316 Instruction *InstCombinerImpl::foldAddLikeCommutative(Value *LHS, Value *RHS,
1317 bool NSW, bool NUW) {
1318 Value *A, *B, *C;
1319 if (match(LHS, m_Sub(m_Value(A), m_Value(B))) &&
1320 match(RHS, m_Sub(m_Value(C), m_Specific(A)))) {
1321 Instruction *R = BinaryOperator::CreateSub(C, B);
1322 bool NSWOut = NSW && match(LHS, m_NSWSub(m_Value(), m_Value())) &&
1323 match(RHS, m_NSWSub(m_Value(), m_Value()));
1324
1325 bool NUWOut = match(LHS, m_NUWSub(m_Value(), m_Value())) &&
1326 match(RHS, m_NUWSub(m_Value(), m_Value()));
1327 R->setHasNoSignedWrap(NSWOut);
1328 R->setHasNoUnsignedWrap(NUWOut);
1329 return R;
1330 }
1331
1332 // ((X s/ C1) << C2) + X => X s% -C1 where -C1 is 1 << C2
1333 const APInt *C1, *C2;
1334 if (match(LHS, m_Shl(m_SDiv(m_Specific(RHS), m_APInt(C1)), m_APInt(C2)))) {
1335 APInt One(C2->getBitWidth(), 1);
1336 APInt MinusC1 = -(*C1);
1337 if (MinusC1 == (One << *C2)) {
1338 Constant *NewRHS = ConstantInt::get(RHS->getType(), MinusC1);
1339 return BinaryOperator::CreateSRem(RHS, NewRHS);
1340 }
1341 }
1342
1343 return nullptr;
1344 }
1345
1346 Instruction *InstCombinerImpl::
canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(BinaryOperator & I)1347 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
1348 BinaryOperator &I) {
1349 assert((I.getOpcode() == Instruction::Add ||
1350 I.getOpcode() == Instruction::Or ||
1351 I.getOpcode() == Instruction::Sub) &&
1352 "Expecting add/or/sub instruction");
1353
1354 // We have a subtraction/addition between a (potentially truncated) *logical*
1355 // right-shift of X and a "select".
1356 Value *X, *Select;
1357 Instruction *LowBitsToSkip, *Extract;
1358 if (!match(&I, m_c_BinOp(m_TruncOrSelf(m_CombineAnd(
1359 m_LShr(m_Value(X), m_Instruction(LowBitsToSkip)),
1360 m_Instruction(Extract))),
1361 m_Value(Select))))
1362 return nullptr;
1363
1364 // `add`/`or` is commutative; but for `sub`, "select" *must* be on RHS.
1365 if (I.getOpcode() == Instruction::Sub && I.getOperand(1) != Select)
1366 return nullptr;
1367
1368 Type *XTy = X->getType();
1369 bool HadTrunc = I.getType() != XTy;
1370
1371 // If there was a truncation of extracted value, then we'll need to produce
1372 // one extra instruction, so we need to ensure one instruction will go away.
1373 if (HadTrunc && !match(&I, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
1374 return nullptr;
1375
1376 // Extraction should extract high NBits bits, with shift amount calculated as:
1377 // low bits to skip = shift bitwidth - high bits to extract
1378 // The shift amount itself may be extended, and we need to look past zero-ext
1379 // when matching NBits, that will matter for matching later.
1380 Value *NBits;
1381 if (!match(LowBitsToSkip,
1382 m_ZExtOrSelf(m_Sub(m_SpecificInt(XTy->getScalarSizeInBits()),
1383 m_ZExtOrSelf(m_Value(NBits))))))
1384 return nullptr;
1385
1386 // Sign-extending value can be zero-extended if we `sub`tract it,
1387 // or sign-extended otherwise.
1388 auto SkipExtInMagic = [&I](Value *&V) {
1389 if (I.getOpcode() == Instruction::Sub)
1390 match(V, m_ZExtOrSelf(m_Value(V)));
1391 else
1392 match(V, m_SExtOrSelf(m_Value(V)));
1393 };
1394
1395 // Now, finally validate the sign-extending magic.
1396 // `select` itself may be appropriately extended, look past that.
1397 SkipExtInMagic(Select);
1398
1399 CmpPredicate Pred;
1400 const APInt *Thr;
1401 Value *SignExtendingValue, *Zero;
1402 bool ShouldSignext;
1403 // It must be a select between two values we will later establish to be a
1404 // sign-extending value and a zero constant. The condition guarding the
1405 // sign-extension must be based on a sign bit of the same X we had in `lshr`.
1406 if (!match(Select, m_Select(m_ICmp(Pred, m_Specific(X), m_APInt(Thr)),
1407 m_Value(SignExtendingValue), m_Value(Zero))) ||
1408 !isSignBitCheck(Pred, *Thr, ShouldSignext))
1409 return nullptr;
1410
1411 // icmp-select pair is commutative.
1412 if (!ShouldSignext)
1413 std::swap(SignExtendingValue, Zero);
1414
1415 // If we should not perform sign-extension then we must add/or/subtract zero.
1416 if (!match(Zero, m_Zero()))
1417 return nullptr;
1418 // Otherwise, it should be some constant, left-shifted by the same NBits we
1419 // had in `lshr`. Said left-shift can also be appropriately extended.
1420 // Again, we must look past zero-ext when looking for NBits.
1421 SkipExtInMagic(SignExtendingValue);
1422 Constant *SignExtendingValueBaseConstant;
1423 if (!match(SignExtendingValue,
1424 m_Shl(m_Constant(SignExtendingValueBaseConstant),
1425 m_ZExtOrSelf(m_Specific(NBits)))))
1426 return nullptr;
1427 // If we `sub`, then the constant should be one, else it should be all-ones.
1428 if (I.getOpcode() == Instruction::Sub
1429 ? !match(SignExtendingValueBaseConstant, m_One())
1430 : !match(SignExtendingValueBaseConstant, m_AllOnes()))
1431 return nullptr;
1432
1433 auto *NewAShr = BinaryOperator::CreateAShr(X, LowBitsToSkip,
1434 Extract->getName() + ".sext");
1435 NewAShr->copyIRFlags(Extract); // Preserve `exact`-ness.
1436 if (!HadTrunc)
1437 return NewAShr;
1438
1439 Builder.Insert(NewAShr);
1440 return TruncInst::CreateTruncOrBitCast(NewAShr, I.getType());
1441 }
1442
1443 /// This is a specialization of a more general transform from
1444 /// foldUsingDistributiveLaws. If that code can be made to work optimally
1445 /// for multi-use cases or propagating nsw/nuw, then we would not need this.
factorizeMathWithShlOps(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1446 static Instruction *factorizeMathWithShlOps(BinaryOperator &I,
1447 InstCombiner::BuilderTy &Builder) {
1448 // TODO: Also handle mul by doubling the shift amount?
1449 assert((I.getOpcode() == Instruction::Add ||
1450 I.getOpcode() == Instruction::Sub) &&
1451 "Expected add/sub");
1452 auto *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
1453 auto *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
1454 if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse()))
1455 return nullptr;
1456
1457 Value *X, *Y, *ShAmt;
1458 if (!match(Op0, m_Shl(m_Value(X), m_Value(ShAmt))) ||
1459 !match(Op1, m_Shl(m_Value(Y), m_Specific(ShAmt))))
1460 return nullptr;
1461
1462 // No-wrap propagates only when all ops have no-wrap.
1463 bool HasNSW = I.hasNoSignedWrap() && Op0->hasNoSignedWrap() &&
1464 Op1->hasNoSignedWrap();
1465 bool HasNUW = I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() &&
1466 Op1->hasNoUnsignedWrap();
1467
1468 // add/sub (X << ShAmt), (Y << ShAmt) --> (add/sub X, Y) << ShAmt
1469 Value *NewMath = Builder.CreateBinOp(I.getOpcode(), X, Y);
1470 if (auto *NewI = dyn_cast<BinaryOperator>(NewMath)) {
1471 NewI->setHasNoSignedWrap(HasNSW);
1472 NewI->setHasNoUnsignedWrap(HasNUW);
1473 }
1474 auto *NewShl = BinaryOperator::CreateShl(NewMath, ShAmt);
1475 NewShl->setHasNoSignedWrap(HasNSW);
1476 NewShl->setHasNoUnsignedWrap(HasNUW);
1477 return NewShl;
1478 }
1479
1480 /// Reduce a sequence of masked half-width multiplies to a single multiply.
1481 /// ((XLow * YHigh) + (YLow * XHigh)) << HalfBits) + (XLow * YLow) --> X * Y
foldBoxMultiply(BinaryOperator & I)1482 static Instruction *foldBoxMultiply(BinaryOperator &I) {
1483 unsigned BitWidth = I.getType()->getScalarSizeInBits();
1484 // Skip the odd bitwidth types.
1485 if ((BitWidth & 0x1))
1486 return nullptr;
1487
1488 unsigned HalfBits = BitWidth >> 1;
1489 APInt HalfMask = APInt::getMaxValue(HalfBits);
1490
1491 // ResLo = (CrossSum << HalfBits) + (YLo * XLo)
1492 Value *XLo, *YLo;
1493 Value *CrossSum;
1494 // Require one-use on the multiply to avoid increasing the number of
1495 // multiplications.
1496 if (!match(&I, m_c_Add(m_Shl(m_Value(CrossSum), m_SpecificInt(HalfBits)),
1497 m_OneUse(m_Mul(m_Value(YLo), m_Value(XLo))))))
1498 return nullptr;
1499
1500 // XLo = X & HalfMask
1501 // YLo = Y & HalfMask
1502 // TODO: Refactor with SimplifyDemandedBits or KnownBits known leading zeros
1503 // to enhance robustness
1504 Value *X, *Y;
1505 if (!match(XLo, m_And(m_Value(X), m_SpecificInt(HalfMask))) ||
1506 !match(YLo, m_And(m_Value(Y), m_SpecificInt(HalfMask))))
1507 return nullptr;
1508
1509 // CrossSum = (X' * (Y >> Halfbits)) + (Y' * (X >> HalfBits))
1510 // X' can be either X or XLo in the pattern (and the same for Y')
1511 if (match(CrossSum,
1512 m_c_Add(m_c_Mul(m_LShr(m_Specific(Y), m_SpecificInt(HalfBits)),
1513 m_CombineOr(m_Specific(X), m_Specific(XLo))),
1514 m_c_Mul(m_LShr(m_Specific(X), m_SpecificInt(HalfBits)),
1515 m_CombineOr(m_Specific(Y), m_Specific(YLo))))))
1516 return BinaryOperator::CreateMul(X, Y);
1517
1518 return nullptr;
1519 }
1520
visitAdd(BinaryOperator & I)1521 Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
1522 if (Value *V = simplifyAddInst(I.getOperand(0), I.getOperand(1),
1523 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1524 SQ.getWithInstruction(&I)))
1525 return replaceInstUsesWith(I, V);
1526
1527 if (SimplifyAssociativeOrCommutative(I))
1528 return &I;
1529
1530 if (Instruction *X = foldVectorBinop(I))
1531 return X;
1532
1533 if (Instruction *Phi = foldBinopWithPhiOperands(I))
1534 return Phi;
1535
1536 // (A*B)+(A*C) -> A*(B+C) etc
1537 if (Value *V = foldUsingDistributiveLaws(I))
1538 return replaceInstUsesWith(I, V);
1539
1540 if (Instruction *R = foldBoxMultiply(I))
1541 return R;
1542
1543 if (Instruction *R = factorizeMathWithShlOps(I, Builder))
1544 return R;
1545
1546 if (Instruction *X = foldAddWithConstant(I))
1547 return X;
1548
1549 if (Instruction *X = foldNoWrapAdd(I, Builder))
1550 return X;
1551
1552 if (Instruction *R = foldBinOpShiftWithShift(I))
1553 return R;
1554
1555 if (Instruction *R = combineAddSubWithShlAddSub(Builder, I))
1556 return R;
1557
1558 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1559 if (Instruction *R = foldAddLikeCommutative(LHS, RHS, I.hasNoSignedWrap(),
1560 I.hasNoUnsignedWrap()))
1561 return R;
1562 if (Instruction *R = foldAddLikeCommutative(RHS, LHS, I.hasNoSignedWrap(),
1563 I.hasNoUnsignedWrap()))
1564 return R;
1565 Type *Ty = I.getType();
1566 if (Ty->isIntOrIntVectorTy(1))
1567 return BinaryOperator::CreateXor(LHS, RHS);
1568
1569 // X + X --> X << 1
1570 if (LHS == RHS) {
1571 auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1));
1572 Shl->setHasNoSignedWrap(I.hasNoSignedWrap());
1573 Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1574 return Shl;
1575 }
1576
1577 Value *A, *B;
1578 if (match(LHS, m_Neg(m_Value(A)))) {
1579 // -A + -B --> -(A + B)
1580 if (match(RHS, m_Neg(m_Value(B))))
1581 return BinaryOperator::CreateNeg(Builder.CreateAdd(A, B));
1582
1583 // -A + B --> B - A
1584 auto *Sub = BinaryOperator::CreateSub(RHS, A);
1585 auto *OB0 = cast<OverflowingBinaryOperator>(LHS);
1586 Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OB0->hasNoSignedWrap());
1587
1588 return Sub;
1589 }
1590
1591 // A + -B --> A - B
1592 if (match(RHS, m_Neg(m_Value(B)))) {
1593 auto *Sub = BinaryOperator::CreateSub(LHS, B);
1594 auto *OBO = cast<OverflowingBinaryOperator>(RHS);
1595 Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO->hasNoSignedWrap());
1596 return Sub;
1597 }
1598
1599 if (Value *V = checkForNegativeOperand(I, Builder))
1600 return replaceInstUsesWith(I, V);
1601
1602 // (A + 1) + ~B --> A - B
1603 // ~B + (A + 1) --> A - B
1604 // (~B + A) + 1 --> A - B
1605 // (A + ~B) + 1 --> A - B
1606 if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B)))) ||
1607 match(&I, m_BinOp(m_c_Add(m_Not(m_Value(B)), m_Value(A)), m_One())))
1608 return BinaryOperator::CreateSub(A, B);
1609
1610 // (A + RHS) + RHS --> A + (RHS << 1)
1611 if (match(LHS, m_OneUse(m_c_Add(m_Value(A), m_Specific(RHS)))))
1612 return BinaryOperator::CreateAdd(A, Builder.CreateShl(RHS, 1, "reass.add"));
1613
1614 // LHS + (A + LHS) --> A + (LHS << 1)
1615 if (match(RHS, m_OneUse(m_c_Add(m_Value(A), m_Specific(LHS)))))
1616 return BinaryOperator::CreateAdd(A, Builder.CreateShl(LHS, 1, "reass.add"));
1617
1618 {
1619 // (A + C1) + (C2 - B) --> (A - B) + (C1 + C2)
1620 Constant *C1, *C2;
1621 if (match(&I, m_c_Add(m_Add(m_Value(A), m_ImmConstant(C1)),
1622 m_Sub(m_ImmConstant(C2), m_Value(B)))) &&
1623 (LHS->hasOneUse() || RHS->hasOneUse())) {
1624 Value *Sub = Builder.CreateSub(A, B);
1625 return BinaryOperator::CreateAdd(Sub, ConstantExpr::getAdd(C1, C2));
1626 }
1627
1628 // Canonicalize a constant sub operand as an add operand for better folding:
1629 // (C1 - A) + B --> (B - A) + C1
1630 if (match(&I, m_c_Add(m_OneUse(m_Sub(m_ImmConstant(C1), m_Value(A))),
1631 m_Value(B)))) {
1632 Value *Sub = Builder.CreateSub(B, A, "reass.sub");
1633 return BinaryOperator::CreateAdd(Sub, C1);
1634 }
1635 }
1636
1637 // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1)
1638 if (Value *V = SimplifyAddWithRemainder(I)) return replaceInstUsesWith(I, V);
1639
1640 const APInt *C1;
1641 // (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit
1642 if (match(&I, m_c_Add(m_And(m_Value(A), m_APInt(C1)), m_Deferred(A))) &&
1643 C1->isPowerOf2() && (ComputeNumSignBits(A) > C1->countl_zero())) {
1644 Constant *NewMask = ConstantInt::get(RHS->getType(), *C1 - 1);
1645 return BinaryOperator::CreateAnd(A, NewMask);
1646 }
1647
1648 // ZExt (B - A) + ZExt(A) --> ZExt(B)
1649 if ((match(RHS, m_ZExt(m_Value(A))) &&
1650 match(LHS, m_ZExt(m_NUWSub(m_Value(B), m_Specific(A))))) ||
1651 (match(LHS, m_ZExt(m_Value(A))) &&
1652 match(RHS, m_ZExt(m_NUWSub(m_Value(B), m_Specific(A))))))
1653 return new ZExtInst(B, LHS->getType());
1654
1655 // zext(A) + sext(A) --> 0 if A is i1
1656 if (match(&I, m_c_BinOp(m_ZExt(m_Value(A)), m_SExt(m_Deferred(A)))) &&
1657 A->getType()->isIntOrIntVectorTy(1))
1658 return replaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1659
1660 // sext(A < B) + zext(A > B) => ucmp/scmp(A, B)
1661 CmpPredicate LTPred, GTPred;
1662 if (match(&I,
1663 m_c_Add(m_SExt(m_c_ICmp(LTPred, m_Value(A), m_Value(B))),
1664 m_ZExt(m_c_ICmp(GTPred, m_Deferred(A), m_Deferred(B))))) &&
1665 A->getType()->isIntOrIntVectorTy()) {
1666 if (ICmpInst::isGT(LTPred)) {
1667 std::swap(LTPred, GTPred);
1668 std::swap(A, B);
1669 }
1670
1671 if (ICmpInst::isLT(LTPred) && ICmpInst::isGT(GTPred) &&
1672 ICmpInst::isSigned(LTPred) == ICmpInst::isSigned(GTPred))
1673 return replaceInstUsesWith(
1674 I, Builder.CreateIntrinsic(
1675 Ty,
1676 ICmpInst::isSigned(LTPred) ? Intrinsic::scmp : Intrinsic::ucmp,
1677 {A, B}));
1678 }
1679
1680 // A+B --> A|B iff A and B have no bits set in common.
1681 WithCache<const Value *> LHSCache(LHS), RHSCache(RHS);
1682 if (haveNoCommonBitsSet(LHSCache, RHSCache, SQ.getWithInstruction(&I)))
1683 return BinaryOperator::CreateDisjointOr(LHS, RHS);
1684
1685 if (Instruction *Ext = narrowMathIfNoOverflow(I))
1686 return Ext;
1687
1688 // (add (xor A, B) (and A, B)) --> (or A, B)
1689 // (add (and A, B) (xor A, B)) --> (or A, B)
1690 if (match(&I, m_c_BinOp(m_Xor(m_Value(A), m_Value(B)),
1691 m_c_And(m_Deferred(A), m_Deferred(B)))))
1692 return BinaryOperator::CreateOr(A, B);
1693
1694 // (add (or A, B) (and A, B)) --> (add A, B)
1695 // (add (and A, B) (or A, B)) --> (add A, B)
1696 if (match(&I, m_c_BinOp(m_Or(m_Value(A), m_Value(B)),
1697 m_c_And(m_Deferred(A), m_Deferred(B))))) {
1698 // Replacing operands in-place to preserve nuw/nsw flags.
1699 replaceOperand(I, 0, A);
1700 replaceOperand(I, 1, B);
1701 return &I;
1702 }
1703
1704 // (add A (or A, -A)) --> (and (add A, -1) A)
1705 // (add A (or -A, A)) --> (and (add A, -1) A)
1706 // (add (or A, -A) A) --> (and (add A, -1) A)
1707 // (add (or -A, A) A) --> (and (add A, -1) A)
1708 if (match(&I, m_c_BinOp(m_Value(A), m_OneUse(m_c_Or(m_Neg(m_Deferred(A)),
1709 m_Deferred(A)))))) {
1710 Value *Add =
1711 Builder.CreateAdd(A, Constant::getAllOnesValue(A->getType()), "",
1712 I.hasNoUnsignedWrap(), I.hasNoSignedWrap());
1713 return BinaryOperator::CreateAnd(Add, A);
1714 }
1715
1716 // Canonicalize ((A & -A) - 1) --> ((A - 1) & ~A)
1717 // Forms all commutable operations, and simplifies ctpop -> cttz folds.
1718 if (match(&I,
1719 m_Add(m_OneUse(m_c_And(m_Value(A), m_OneUse(m_Neg(m_Deferred(A))))),
1720 m_AllOnes()))) {
1721 Constant *AllOnes = ConstantInt::getAllOnesValue(RHS->getType());
1722 Value *Dec = Builder.CreateAdd(A, AllOnes);
1723 Value *Not = Builder.CreateXor(A, AllOnes);
1724 return BinaryOperator::CreateAnd(Dec, Not);
1725 }
1726
1727 // Disguised reassociation/factorization:
1728 // ~(A * C1) + A
1729 // ((A * -C1) - 1) + A
1730 // ((A * -C1) + A) - 1
1731 // (A * (1 - C1)) - 1
1732 if (match(&I,
1733 m_c_Add(m_OneUse(m_Not(m_OneUse(m_Mul(m_Value(A), m_APInt(C1))))),
1734 m_Deferred(A)))) {
1735 Type *Ty = I.getType();
1736 Constant *NewMulC = ConstantInt::get(Ty, 1 - *C1);
1737 Value *NewMul = Builder.CreateMul(A, NewMulC);
1738 return BinaryOperator::CreateAdd(NewMul, ConstantInt::getAllOnesValue(Ty));
1739 }
1740
1741 // (A * -2**C) + B --> B - (A << C)
1742 const APInt *NegPow2C;
1743 if (match(&I, m_c_Add(m_OneUse(m_Mul(m_Value(A), m_NegatedPower2(NegPow2C))),
1744 m_Value(B)))) {
1745 Constant *ShiftAmtC = ConstantInt::get(Ty, NegPow2C->countr_zero());
1746 Value *Shl = Builder.CreateShl(A, ShiftAmtC);
1747 return BinaryOperator::CreateSub(B, Shl);
1748 }
1749
1750 // Canonicalize signum variant that ends in add:
1751 // (A s>> (BW - 1)) + (zext (A s> 0)) --> (A s>> (BW - 1)) | (zext (A != 0))
1752 uint64_t BitWidth = Ty->getScalarSizeInBits();
1753 if (match(LHS, m_AShr(m_Value(A), m_SpecificIntAllowPoison(BitWidth - 1))) &&
1754 match(RHS, m_OneUse(m_ZExt(m_OneUse(m_SpecificICmp(
1755 CmpInst::ICMP_SGT, m_Specific(A), m_ZeroInt())))))) {
1756 Value *NotZero = Builder.CreateIsNotNull(A, "isnotnull");
1757 Value *Zext = Builder.CreateZExt(NotZero, Ty, "isnotnull.zext");
1758 return BinaryOperator::CreateOr(LHS, Zext);
1759 }
1760
1761 {
1762 Value *Cond, *Ext;
1763 Constant *C;
1764 // (add X, (sext/zext (icmp eq X, C)))
1765 // -> (select (icmp eq X, C), (add C, (sext/zext 1)), X)
1766 auto CondMatcher = m_CombineAnd(
1767 m_Value(Cond),
1768 m_SpecificICmp(ICmpInst::ICMP_EQ, m_Deferred(A), m_ImmConstant(C)));
1769
1770 if (match(&I,
1771 m_c_Add(m_Value(A),
1772 m_CombineAnd(m_Value(Ext), m_ZExtOrSExt(CondMatcher)))) &&
1773 Ext->hasOneUse()) {
1774 Value *Add = isa<ZExtInst>(Ext) ? InstCombiner::AddOne(C)
1775 : InstCombiner::SubOne(C);
1776 return replaceInstUsesWith(I, Builder.CreateSelect(Cond, Add, A));
1777 }
1778 }
1779
1780 // (add (add A, 1), (sext (icmp ne A, 0))) => call umax(A, 1)
1781 if (match(LHS, m_Add(m_Value(A), m_One())) &&
1782 match(RHS, m_OneUse(m_SExt(m_OneUse(m_SpecificICmp(
1783 ICmpInst::ICMP_NE, m_Specific(A), m_ZeroInt())))))) {
1784 Value *OneConst = ConstantInt::get(A->getType(), 1);
1785 Value *UMax = Builder.CreateBinaryIntrinsic(Intrinsic::umax, A, OneConst);
1786 return replaceInstUsesWith(I, UMax);
1787 }
1788
1789 if (Instruction *Ashr = foldAddToAshr(I))
1790 return Ashr;
1791
1792 // Ceiling division by power-of-2:
1793 // (X >> log2(N)) + zext(X & (N-1) != 0) --> (X + (N-1)) >> log2(N)
1794 // This is valid when adding (N-1) to X doesn't overflow.
1795 {
1796 Value *X;
1797 const APInt *ShiftAmt, *Mask;
1798 CmpPredicate Pred;
1799
1800 // Match: (X >> C) + zext((X & Mask) != 0)
1801 // or: zext((X & Mask) != 0) + (X >> C)
1802 if (match(&I, m_c_Add(m_OneUse(m_LShr(m_Value(X), m_APInt(ShiftAmt))),
1803 m_ZExt(m_SpecificICmp(
1804 ICmpInst::ICMP_NE,
1805 m_And(m_Deferred(X), m_LowBitMask(Mask)),
1806 m_ZeroInt())))) &&
1807 Mask->popcount() == *ShiftAmt) {
1808
1809 // Check if X + Mask doesn't overflow
1810 Constant *MaskC = ConstantInt::get(X->getType(), *Mask);
1811 if (willNotOverflowUnsignedAdd(X, MaskC, I)) {
1812 // (X + Mask) >> ShiftAmt
1813 Value *Add = Builder.CreateNUWAdd(X, MaskC);
1814 return BinaryOperator::CreateLShr(
1815 Add, ConstantInt::get(X->getType(), *ShiftAmt));
1816 }
1817 }
1818 }
1819
1820 // (~X) + (~Y) --> -2 - (X + Y)
1821 {
1822 // To ensure we can save instructions we need to ensure that we consume both
1823 // LHS/RHS (i.e they have a `not`).
1824 bool ConsumesLHS, ConsumesRHS;
1825 if (isFreeToInvert(LHS, LHS->hasOneUse(), ConsumesLHS) && ConsumesLHS &&
1826 isFreeToInvert(RHS, RHS->hasOneUse(), ConsumesRHS) && ConsumesRHS) {
1827 Value *NotLHS = getFreelyInverted(LHS, LHS->hasOneUse(), &Builder);
1828 Value *NotRHS = getFreelyInverted(RHS, RHS->hasOneUse(), &Builder);
1829 assert(NotLHS != nullptr && NotRHS != nullptr &&
1830 "isFreeToInvert desynced with getFreelyInverted");
1831 Value *LHSPlusRHS = Builder.CreateAdd(NotLHS, NotRHS);
1832 return BinaryOperator::CreateSub(
1833 ConstantInt::getSigned(RHS->getType(), -2), LHSPlusRHS);
1834 }
1835 }
1836
1837 if (Instruction *R = tryFoldInstWithCtpopWithNot(&I))
1838 return R;
1839
1840 // TODO(jingyue): Consider willNotOverflowSignedAdd and
1841 // willNotOverflowUnsignedAdd to reduce the number of invocations of
1842 // computeKnownBits.
1843 bool Changed = false;
1844 if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHSCache, RHSCache, I)) {
1845 Changed = true;
1846 I.setHasNoSignedWrap(true);
1847 }
1848 if (!I.hasNoUnsignedWrap() &&
1849 willNotOverflowUnsignedAdd(LHSCache, RHSCache, I)) {
1850 Changed = true;
1851 I.setHasNoUnsignedWrap(true);
1852 }
1853
1854 if (Instruction *V = canonicalizeLowbitMask(I, Builder))
1855 return V;
1856
1857 if (Instruction *V =
1858 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
1859 return V;
1860
1861 if (Instruction *SatAdd = foldToUnsignedSaturatedAdd(I))
1862 return SatAdd;
1863
1864 // usub.sat(A, B) + B => umax(A, B)
1865 if (match(&I, m_c_BinOp(
1866 m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Value(A), m_Value(B))),
1867 m_Deferred(B)))) {
1868 return replaceInstUsesWith(I,
1869 Builder.CreateIntrinsic(Intrinsic::umax, {I.getType()}, {A, B}));
1870 }
1871
1872 // ctpop(A) + ctpop(B) => ctpop(A | B) if A and B have no bits set in common.
1873 if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(A)))) &&
1874 match(RHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(B)))) &&
1875 haveNoCommonBitsSet(A, B, SQ.getWithInstruction(&I)))
1876 return replaceInstUsesWith(
1877 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
1878 {Builder.CreateOr(A, B)}));
1879
1880 // Fold the log2_ceil idiom:
1881 // zext(ctpop(A) >u/!= 1) + (ctlz(A, true) ^ (BW - 1))
1882 // -->
1883 // BW - ctlz(A - 1, false)
1884 const APInt *XorC;
1885 CmpPredicate Pred;
1886 if (match(&I,
1887 m_c_Add(
1888 m_ZExt(m_ICmp(Pred, m_Intrinsic<Intrinsic::ctpop>(m_Value(A)),
1889 m_One())),
1890 m_OneUse(m_ZExtOrSelf(m_OneUse(m_Xor(
1891 m_OneUse(m_TruncOrSelf(m_OneUse(
1892 m_Intrinsic<Intrinsic::ctlz>(m_Deferred(A), m_One())))),
1893 m_APInt(XorC))))))) &&
1894 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_NE) &&
1895 *XorC == A->getType()->getScalarSizeInBits() - 1) {
1896 Value *Sub = Builder.CreateAdd(A, Constant::getAllOnesValue(A->getType()));
1897 Value *Ctlz = Builder.CreateIntrinsic(Intrinsic::ctlz, {A->getType()},
1898 {Sub, Builder.getFalse()});
1899 Value *Ret = Builder.CreateSub(
1900 ConstantInt::get(A->getType(), A->getType()->getScalarSizeInBits()),
1901 Ctlz, "", /*HasNUW=*/true, /*HasNSW=*/true);
1902 return replaceInstUsesWith(I, Builder.CreateZExtOrTrunc(Ret, I.getType()));
1903 }
1904
1905 if (Instruction *Res = foldSquareSumInt(I))
1906 return Res;
1907
1908 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
1909 return Res;
1910
1911 if (Instruction *Res = foldBinOpOfSelectAndCastOfSelectCondition(I))
1912 return Res;
1913
1914 // Re-enqueue users of the induction variable of add recurrence if we infer
1915 // new nuw/nsw flags.
1916 if (Changed) {
1917 PHINode *PHI;
1918 Value *Start, *Step;
1919 if (matchSimpleRecurrence(&I, PHI, Start, Step))
1920 Worklist.pushUsersToWorkList(*PHI);
1921 }
1922
1923 return Changed ? &I : nullptr;
1924 }
1925
1926 /// Eliminate an op from a linear interpolation (lerp) pattern.
factorizeLerp(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1927 static Instruction *factorizeLerp(BinaryOperator &I,
1928 InstCombiner::BuilderTy &Builder) {
1929 Value *X, *Y, *Z;
1930 if (!match(&I, m_c_FAdd(m_OneUse(m_c_FMul(m_Value(Y),
1931 m_OneUse(m_FSub(m_FPOne(),
1932 m_Value(Z))))),
1933 m_OneUse(m_c_FMul(m_Value(X), m_Deferred(Z))))))
1934 return nullptr;
1935
1936 // (Y * (1.0 - Z)) + (X * Z) --> Y + Z * (X - Y) [8 commuted variants]
1937 Value *XY = Builder.CreateFSubFMF(X, Y, &I);
1938 Value *MulZ = Builder.CreateFMulFMF(Z, XY, &I);
1939 return BinaryOperator::CreateFAddFMF(Y, MulZ, &I);
1940 }
1941
1942 /// Factor a common operand out of fadd/fsub of fmul/fdiv.
factorizeFAddFSub(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1943 static Instruction *factorizeFAddFSub(BinaryOperator &I,
1944 InstCombiner::BuilderTy &Builder) {
1945 assert((I.getOpcode() == Instruction::FAdd ||
1946 I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub");
1947 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() &&
1948 "FP factorization requires FMF");
1949
1950 if (Instruction *Lerp = factorizeLerp(I, Builder))
1951 return Lerp;
1952
1953 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1954 if (!Op0->hasOneUse() || !Op1->hasOneUse())
1955 return nullptr;
1956
1957 Value *X, *Y, *Z;
1958 bool IsFMul;
1959 if ((match(Op0, m_FMul(m_Value(X), m_Value(Z))) &&
1960 match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))) ||
1961 (match(Op0, m_FMul(m_Value(Z), m_Value(X))) &&
1962 match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))))
1963 IsFMul = true;
1964 else if (match(Op0, m_FDiv(m_Value(X), m_Value(Z))) &&
1965 match(Op1, m_FDiv(m_Value(Y), m_Specific(Z))))
1966 IsFMul = false;
1967 else
1968 return nullptr;
1969
1970 // (X * Z) + (Y * Z) --> (X + Y) * Z
1971 // (X * Z) - (Y * Z) --> (X - Y) * Z
1972 // (X / Z) + (Y / Z) --> (X + Y) / Z
1973 // (X / Z) - (Y / Z) --> (X - Y) / Z
1974 bool IsFAdd = I.getOpcode() == Instruction::FAdd;
1975 Value *XY = IsFAdd ? Builder.CreateFAddFMF(X, Y, &I)
1976 : Builder.CreateFSubFMF(X, Y, &I);
1977
1978 // Bail out if we just created a denormal constant.
1979 // TODO: This is copied from a previous implementation. Is it necessary?
1980 const APFloat *C;
1981 if (match(XY, m_APFloat(C)) && !C->isNormal())
1982 return nullptr;
1983
1984 return IsFMul ? BinaryOperator::CreateFMulFMF(XY, Z, &I)
1985 : BinaryOperator::CreateFDivFMF(XY, Z, &I);
1986 }
1987
visitFAdd(BinaryOperator & I)1988 Instruction *InstCombinerImpl::visitFAdd(BinaryOperator &I) {
1989 if (Value *V = simplifyFAddInst(I.getOperand(0), I.getOperand(1),
1990 I.getFastMathFlags(),
1991 SQ.getWithInstruction(&I)))
1992 return replaceInstUsesWith(I, V);
1993
1994 if (SimplifyAssociativeOrCommutative(I))
1995 return &I;
1996
1997 if (Instruction *X = foldVectorBinop(I))
1998 return X;
1999
2000 if (Instruction *Phi = foldBinopWithPhiOperands(I))
2001 return Phi;
2002
2003 if (Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(I))
2004 return FoldedFAdd;
2005
2006 // (-X) + Y --> Y - X
2007 Value *X, *Y;
2008 if (match(&I, m_c_FAdd(m_FNeg(m_Value(X)), m_Value(Y))))
2009 return BinaryOperator::CreateFSubFMF(Y, X, &I);
2010
2011 // Similar to above, but look through fmul/fdiv for the negated term.
2012 // (-X * Y) + Z --> Z - (X * Y) [4 commuted variants]
2013 Value *Z;
2014 if (match(&I, m_c_FAdd(m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))),
2015 m_Value(Z)))) {
2016 Value *XY = Builder.CreateFMulFMF(X, Y, &I);
2017 return BinaryOperator::CreateFSubFMF(Z, XY, &I);
2018 }
2019 // (-X / Y) + Z --> Z - (X / Y) [2 commuted variants]
2020 // (X / -Y) + Z --> Z - (X / Y) [2 commuted variants]
2021 if (match(&I, m_c_FAdd(m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y))),
2022 m_Value(Z))) ||
2023 match(&I, m_c_FAdd(m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))),
2024 m_Value(Z)))) {
2025 Value *XY = Builder.CreateFDivFMF(X, Y, &I);
2026 return BinaryOperator::CreateFSubFMF(Z, XY, &I);
2027 }
2028
2029 // Check for (fadd double (sitofp x), y), see if we can merge this into an
2030 // integer add followed by a promotion.
2031 if (Instruction *R = foldFBinOpOfIntCasts(I))
2032 return R;
2033
2034 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2035 // Handle specials cases for FAdd with selects feeding the operation
2036 if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS))
2037 return replaceInstUsesWith(I, V);
2038
2039 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
2040 if (Instruction *F = factorizeFAddFSub(I, Builder))
2041 return F;
2042
2043 if (Instruction *F = foldSquareSumFP(I))
2044 return F;
2045
2046 // Try to fold fadd into start value of reduction intrinsic.
2047 if (match(&I, m_c_FAdd(m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
2048 m_AnyZeroFP(), m_Value(X))),
2049 m_Value(Y)))) {
2050 // fadd (rdx 0.0, X), Y --> rdx Y, X
2051 return replaceInstUsesWith(
2052 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
2053 {X->getType()}, {Y, X}, &I));
2054 }
2055 const APFloat *StartC, *C;
2056 if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
2057 m_APFloat(StartC), m_Value(X)))) &&
2058 match(RHS, m_APFloat(C))) {
2059 // fadd (rdx StartC, X), C --> rdx (C + StartC), X
2060 Constant *NewStartC = ConstantFP::get(I.getType(), *C + *StartC);
2061 return replaceInstUsesWith(
2062 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
2063 {X->getType()}, {NewStartC, X}, &I));
2064 }
2065
2066 // (X * MulC) + X --> X * (MulC + 1.0)
2067 Constant *MulC;
2068 if (match(&I, m_c_FAdd(m_FMul(m_Value(X), m_ImmConstant(MulC)),
2069 m_Deferred(X)))) {
2070 if (Constant *NewMulC = ConstantFoldBinaryOpOperands(
2071 Instruction::FAdd, MulC, ConstantFP::get(I.getType(), 1.0), DL))
2072 return BinaryOperator::CreateFMulFMF(X, NewMulC, &I);
2073 }
2074
2075 // (-X - Y) + (X + Z) --> Z - Y
2076 if (match(&I, m_c_FAdd(m_FSub(m_FNeg(m_Value(X)), m_Value(Y)),
2077 m_c_FAdd(m_Deferred(X), m_Value(Z)))))
2078 return BinaryOperator::CreateFSubFMF(Z, Y, &I);
2079
2080 if (Value *V = FAddCombine(Builder).simplify(&I))
2081 return replaceInstUsesWith(I, V);
2082 }
2083
2084 // minumum(X, Y) + maximum(X, Y) => X + Y.
2085 if (match(&I,
2086 m_c_FAdd(m_Intrinsic<Intrinsic::maximum>(m_Value(X), m_Value(Y)),
2087 m_c_Intrinsic<Intrinsic::minimum>(m_Deferred(X),
2088 m_Deferred(Y))))) {
2089 BinaryOperator *Result = BinaryOperator::CreateFAddFMF(X, Y, &I);
2090 // We cannot preserve ninf if nnan flag is not set.
2091 // If X is NaN and Y is Inf then in original program we had NaN + NaN,
2092 // while in optimized version NaN + Inf and this is a poison with ninf flag.
2093 if (!Result->hasNoNaNs())
2094 Result->setHasNoInfs(false);
2095 return Result;
2096 }
2097
2098 return nullptr;
2099 }
2100
compute(Value * LHS,Value * RHS)2101 CommonPointerBase CommonPointerBase::compute(Value *LHS, Value *RHS) {
2102 CommonPointerBase Base;
2103
2104 if (LHS->getType() != RHS->getType())
2105 return Base;
2106
2107 // Collect all base pointers of LHS.
2108 SmallPtrSet<Value *, 16> Ptrs;
2109 Value *Ptr = LHS;
2110 while (true) {
2111 Ptrs.insert(Ptr);
2112 if (auto *GEP = dyn_cast<GEPOperator>(Ptr))
2113 Ptr = GEP->getPointerOperand();
2114 else
2115 break;
2116 }
2117
2118 // Find common base and collect RHS GEPs.
2119 while (true) {
2120 if (Ptrs.contains(RHS)) {
2121 Base.Ptr = RHS;
2122 break;
2123 }
2124
2125 if (auto *GEP = dyn_cast<GEPOperator>(RHS)) {
2126 Base.RHSGEPs.push_back(GEP);
2127 Base.RHSNW &= GEP->getNoWrapFlags();
2128 RHS = GEP->getPointerOperand();
2129 } else {
2130 // No common base.
2131 return Base;
2132 }
2133 }
2134
2135 // Collect LHS GEPs.
2136 while (true) {
2137 if (LHS == Base.Ptr)
2138 break;
2139
2140 auto *GEP = cast<GEPOperator>(LHS);
2141 Base.LHSGEPs.push_back(GEP);
2142 Base.LHSNW &= GEP->getNoWrapFlags();
2143 LHS = GEP->getPointerOperand();
2144 }
2145
2146 return Base;
2147 }
2148
2149 /// Optimize pointer differences into the same array into a size. Consider:
2150 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
2151 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
OptimizePointerDifference(Value * LHS,Value * RHS,Type * Ty,bool IsNUW)2152 Value *InstCombinerImpl::OptimizePointerDifference(Value *LHS, Value *RHS,
2153 Type *Ty, bool IsNUW) {
2154 CommonPointerBase Base = CommonPointerBase::compute(LHS, RHS);
2155 if (!Base.Ptr)
2156 return nullptr;
2157
2158 // To avoid duplicating the offset arithmetic, rewrite the GEP to use the
2159 // computed offset.
2160 // TODO: We should probably do this even if there is only one GEP.
2161 bool RewriteGEPs = !Base.LHSGEPs.empty() && !Base.RHSGEPs.empty();
2162
2163 Type *IdxTy = DL.getIndexType(LHS->getType());
2164 Value *Result = EmitGEPOffsets(Base.LHSGEPs, Base.LHSNW, IdxTy, RewriteGEPs);
2165 Value *Offset2 = EmitGEPOffsets(Base.RHSGEPs, Base.RHSNW, IdxTy, RewriteGEPs);
2166
2167 // If this is a single inbounds GEP and the original sub was nuw,
2168 // then the final multiplication is also nuw.
2169 if (auto *I = dyn_cast<OverflowingBinaryOperator>(Result))
2170 if (IsNUW && match(Offset2, m_Zero()) && Base.LHSNW.isInBounds() &&
2171 (I->use_empty() || I->hasOneUse()) && I->hasNoSignedWrap() &&
2172 !I->hasNoUnsignedWrap() &&
2173 ((I->getOpcode() == Instruction::Mul &&
2174 match(I->getOperand(1), m_NonNegative())) ||
2175 I->getOpcode() == Instruction::Shl))
2176 cast<Instruction>(I)->setHasNoUnsignedWrap();
2177
2178 // If we have a 2nd GEP of the same base pointer, subtract the offsets.
2179 // If both GEPs are inbounds, then the subtract does not have signed overflow.
2180 // If both GEPs are nuw and the original sub is nuw, the new sub is also nuw.
2181 if (!match(Offset2, m_Zero())) {
2182 Result =
2183 Builder.CreateSub(Result, Offset2, "gepdiff",
2184 IsNUW && Base.LHSNW.hasNoUnsignedWrap() &&
2185 Base.RHSNW.hasNoUnsignedWrap(),
2186 Base.LHSNW.isInBounds() && Base.RHSNW.isInBounds());
2187 }
2188
2189 return Builder.CreateIntCast(Result, Ty, true);
2190 }
2191
foldSubOfMinMax(BinaryOperator & I,InstCombiner::BuilderTy & Builder)2192 static Instruction *foldSubOfMinMax(BinaryOperator &I,
2193 InstCombiner::BuilderTy &Builder) {
2194 Value *Op0 = I.getOperand(0);
2195 Value *Op1 = I.getOperand(1);
2196 Type *Ty = I.getType();
2197 auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op1);
2198 if (!MinMax)
2199 return nullptr;
2200
2201 // sub(add(X,Y), s/umin(X,Y)) --> s/umax(X,Y)
2202 // sub(add(X,Y), s/umax(X,Y)) --> s/umin(X,Y)
2203 Value *X = MinMax->getLHS();
2204 Value *Y = MinMax->getRHS();
2205 if (match(Op0, m_c_Add(m_Specific(X), m_Specific(Y))) &&
2206 (Op0->hasOneUse() || Op1->hasOneUse())) {
2207 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID());
2208 Function *F = Intrinsic::getOrInsertDeclaration(I.getModule(), InvID, Ty);
2209 return CallInst::Create(F, {X, Y});
2210 }
2211
2212 // sub(add(X,Y),umin(Y,Z)) --> add(X,usub.sat(Y,Z))
2213 // sub(add(X,Z),umin(Y,Z)) --> add(X,usub.sat(Z,Y))
2214 Value *Z;
2215 if (match(Op1, m_OneUse(m_UMin(m_Value(Y), m_Value(Z))))) {
2216 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Y), m_Value(X))))) {
2217 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Y, Z});
2218 return BinaryOperator::CreateAdd(X, USub);
2219 }
2220 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Z), m_Value(X))))) {
2221 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Z, Y});
2222 return BinaryOperator::CreateAdd(X, USub);
2223 }
2224 }
2225
2226 // sub Op0, smin((sub nsw Op0, Z), 0) --> smax Op0, Z
2227 // sub Op0, smax((sub nsw Op0, Z), 0) --> smin Op0, Z
2228 if (MinMax->isSigned() && match(Y, m_ZeroInt()) &&
2229 match(X, m_NSWSub(m_Specific(Op0), m_Value(Z)))) {
2230 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID());
2231 Function *F = Intrinsic::getOrInsertDeclaration(I.getModule(), InvID, Ty);
2232 return CallInst::Create(F, {Op0, Z});
2233 }
2234
2235 return nullptr;
2236 }
2237
visitSub(BinaryOperator & I)2238 Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
2239 if (Value *V = simplifySubInst(I.getOperand(0), I.getOperand(1),
2240 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
2241 SQ.getWithInstruction(&I)))
2242 return replaceInstUsesWith(I, V);
2243
2244 if (Instruction *X = foldVectorBinop(I))
2245 return X;
2246
2247 if (Instruction *Phi = foldBinopWithPhiOperands(I))
2248 return Phi;
2249
2250 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2251
2252 // If this is a 'B = x-(-A)', change to B = x+A.
2253 // We deal with this without involving Negator to preserve NSW flag.
2254 if (Value *V = dyn_castNegVal(Op1)) {
2255 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
2256
2257 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
2258 assert(BO->getOpcode() == Instruction::Sub &&
2259 "Expected a subtraction operator!");
2260 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
2261 Res->setHasNoSignedWrap(true);
2262 } else {
2263 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
2264 Res->setHasNoSignedWrap(true);
2265 }
2266
2267 return Res;
2268 }
2269
2270 // Try this before Negator to preserve NSW flag.
2271 if (Instruction *R = factorizeMathWithShlOps(I, Builder))
2272 return R;
2273
2274 Constant *C;
2275 if (match(Op0, m_ImmConstant(C))) {
2276 Value *X;
2277 Constant *C2;
2278
2279 // C-(X+C2) --> (C-C2)-X
2280 if (match(Op1, m_Add(m_Value(X), m_ImmConstant(C2)))) {
2281 // C-C2 never overflow, and C-(X+C2), (X+C2) has NSW/NUW
2282 // => (C-C2)-X can have NSW/NUW
2283 bool WillNotSOV = willNotOverflowSignedSub(C, C2, I);
2284 BinaryOperator *Res =
2285 BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
2286 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2287 Res->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO1->hasNoSignedWrap() &&
2288 WillNotSOV);
2289 Res->setHasNoUnsignedWrap(I.hasNoUnsignedWrap() &&
2290 OBO1->hasNoUnsignedWrap());
2291 return Res;
2292 }
2293 }
2294
2295 auto TryToNarrowDeduceFlags = [this, &I, &Op0, &Op1]() -> Instruction * {
2296 if (Instruction *Ext = narrowMathIfNoOverflow(I))
2297 return Ext;
2298
2299 bool Changed = false;
2300 if (!I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) {
2301 Changed = true;
2302 I.setHasNoSignedWrap(true);
2303 }
2304 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) {
2305 Changed = true;
2306 I.setHasNoUnsignedWrap(true);
2307 }
2308
2309 return Changed ? &I : nullptr;
2310 };
2311
2312 // First, let's try to interpret `sub a, b` as `add a, (sub 0, b)`,
2313 // and let's try to sink `(sub 0, b)` into `b` itself. But only if this isn't
2314 // a pure negation used by a select that looks like abs/nabs.
2315 bool IsNegation = match(Op0, m_ZeroInt());
2316 if (!IsNegation || none_of(I.users(), [&I, Op1](const User *U) {
2317 const Instruction *UI = dyn_cast<Instruction>(U);
2318 if (!UI)
2319 return false;
2320 return match(UI, m_c_Select(m_Specific(Op1), m_Specific(&I)));
2321 })) {
2322 if (Value *NegOp1 = Negator::Negate(IsNegation, /* IsNSW */ IsNegation &&
2323 I.hasNoSignedWrap(),
2324 Op1, *this))
2325 return BinaryOperator::CreateAdd(NegOp1, Op0);
2326 }
2327 if (IsNegation)
2328 return TryToNarrowDeduceFlags(); // Should have been handled in Negator!
2329
2330 // (A*B)-(A*C) -> A*(B-C) etc
2331 if (Value *V = foldUsingDistributiveLaws(I))
2332 return replaceInstUsesWith(I, V);
2333
2334 if (I.getType()->isIntOrIntVectorTy(1))
2335 return BinaryOperator::CreateXor(Op0, Op1);
2336
2337 // Replace (-1 - A) with (~A).
2338 if (match(Op0, m_AllOnes()))
2339 return BinaryOperator::CreateNot(Op1);
2340
2341 // (X + -1) - Y --> ~Y + X
2342 Value *X, *Y;
2343 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes()))))
2344 return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X);
2345
2346 // if (C1 & C2) == C2 then (X & C1) - (X & C2) -> X & (C1 ^ C2)
2347 Constant *C1, *C2;
2348 if (match(Op0, m_And(m_Value(X), m_ImmConstant(C1))) &&
2349 match(Op1, m_And(m_Specific(X), m_ImmConstant(C2)))) {
2350 Value *AndC = ConstantFoldBinaryInstruction(Instruction::And, C1, C2);
2351 if (C2->isElementWiseEqual(AndC))
2352 return BinaryOperator::CreateAnd(
2353 X, ConstantFoldBinaryInstruction(Instruction::Xor, C1, C2));
2354 }
2355
2356 // Reassociate sub/add sequences to create more add instructions and
2357 // reduce dependency chains:
2358 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
2359 Value *Z;
2360 if (match(Op0, m_OneUse(m_c_Add(m_OneUse(m_Sub(m_Value(X), m_Value(Y))),
2361 m_Value(Z))))) {
2362 Value *XZ = Builder.CreateAdd(X, Z);
2363 Value *YW = Builder.CreateAdd(Y, Op1);
2364 return BinaryOperator::CreateSub(XZ, YW);
2365 }
2366
2367 // ((X - Y) - Op1) --> X - (Y + Op1)
2368 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y))))) {
2369 OverflowingBinaryOperator *LHSSub = cast<OverflowingBinaryOperator>(Op0);
2370 bool HasNUW = I.hasNoUnsignedWrap() && LHSSub->hasNoUnsignedWrap();
2371 bool HasNSW = HasNUW && I.hasNoSignedWrap() && LHSSub->hasNoSignedWrap();
2372 Value *Add = Builder.CreateAdd(Y, Op1, "", /*HasNUW=*/HasNUW,
2373 /*HasNSW=*/HasNSW);
2374 BinaryOperator *Sub = BinaryOperator::CreateSub(X, Add);
2375 Sub->setHasNoUnsignedWrap(HasNUW);
2376 Sub->setHasNoSignedWrap(HasNSW);
2377 return Sub;
2378 }
2379
2380 {
2381 // (X + Z) - (Y + Z) --> (X - Y)
2382 // This is done in other passes, but we want to be able to consume this
2383 // pattern in InstCombine so we can generate it without creating infinite
2384 // loops.
2385 if (match(Op0, m_Add(m_Value(X), m_Value(Z))) &&
2386 match(Op1, m_c_Add(m_Value(Y), m_Specific(Z))))
2387 return BinaryOperator::CreateSub(X, Y);
2388
2389 // (X + C0) - (Y + C1) --> (X - Y) + (C0 - C1)
2390 Constant *CX, *CY;
2391 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_ImmConstant(CX)))) &&
2392 match(Op1, m_OneUse(m_Add(m_Value(Y), m_ImmConstant(CY))))) {
2393 Value *OpsSub = Builder.CreateSub(X, Y);
2394 Constant *ConstsSub = ConstantExpr::getSub(CX, CY);
2395 return BinaryOperator::CreateAdd(OpsSub, ConstsSub);
2396 }
2397 }
2398
2399 {
2400 Value *W, *Z;
2401 if (match(Op0, m_AddLike(m_Value(W), m_Value(X))) &&
2402 match(Op1, m_AddLike(m_Value(Y), m_Value(Z)))) {
2403 Instruction *R = nullptr;
2404 if (W == Y)
2405 R = BinaryOperator::CreateSub(X, Z);
2406 else if (W == Z)
2407 R = BinaryOperator::CreateSub(X, Y);
2408 else if (X == Y)
2409 R = BinaryOperator::CreateSub(W, Z);
2410 else if (X == Z)
2411 R = BinaryOperator::CreateSub(W, Y);
2412 if (R) {
2413 bool NSW = I.hasNoSignedWrap() &&
2414 match(Op0, m_NSWAddLike(m_Value(), m_Value())) &&
2415 match(Op1, m_NSWAddLike(m_Value(), m_Value()));
2416
2417 bool NUW = I.hasNoUnsignedWrap() &&
2418 match(Op1, m_NUWAddLike(m_Value(), m_Value()));
2419 R->setHasNoSignedWrap(NSW);
2420 R->setHasNoUnsignedWrap(NUW);
2421 return R;
2422 }
2423 }
2424 }
2425
2426 // (~X) - (~Y) --> Y - X
2427 {
2428 // Need to ensure we can consume at least one of the `not` instructions,
2429 // otherwise this can inf loop.
2430 bool ConsumesOp0, ConsumesOp1;
2431 if (isFreeToInvert(Op0, Op0->hasOneUse(), ConsumesOp0) &&
2432 isFreeToInvert(Op1, Op1->hasOneUse(), ConsumesOp1) &&
2433 (ConsumesOp0 || ConsumesOp1)) {
2434 Value *NotOp0 = getFreelyInverted(Op0, Op0->hasOneUse(), &Builder);
2435 Value *NotOp1 = getFreelyInverted(Op1, Op1->hasOneUse(), &Builder);
2436 assert(NotOp0 != nullptr && NotOp1 != nullptr &&
2437 "isFreeToInvert desynced with getFreelyInverted");
2438 return BinaryOperator::CreateSub(NotOp1, NotOp0);
2439 }
2440 }
2441
2442 auto m_AddRdx = [](Value *&Vec) {
2443 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_add>(m_Value(Vec)));
2444 };
2445 Value *V0, *V1;
2446 if (match(Op0, m_AddRdx(V0)) && match(Op1, m_AddRdx(V1)) &&
2447 V0->getType() == V1->getType()) {
2448 // Difference of sums is sum of differences:
2449 // add_rdx(V0) - add_rdx(V1) --> add_rdx(V0 - V1)
2450 Value *Sub = Builder.CreateSub(V0, V1);
2451 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_add,
2452 {Sub->getType()}, {Sub});
2453 return replaceInstUsesWith(I, Rdx);
2454 }
2455
2456 if (Constant *C = dyn_cast<Constant>(Op0)) {
2457 Value *X;
2458 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
2459 // C - (zext bool) --> bool ? C - 1 : C
2460 return SelectInst::Create(X, InstCombiner::SubOne(C), C);
2461 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
2462 // C - (sext bool) --> bool ? C + 1 : C
2463 return SelectInst::Create(X, InstCombiner::AddOne(C), C);
2464
2465 // C - ~X == X + (1+C)
2466 if (match(Op1, m_Not(m_Value(X))))
2467 return BinaryOperator::CreateAdd(X, InstCombiner::AddOne(C));
2468
2469 // Try to fold constant sub into select arguments.
2470 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2471 if (Instruction *R = FoldOpIntoSelect(I, SI))
2472 return R;
2473
2474 // Try to fold constant sub into PHI values.
2475 if (PHINode *PN = dyn_cast<PHINode>(Op1))
2476 if (Instruction *R = foldOpIntoPhi(I, PN))
2477 return R;
2478
2479 Constant *C2;
2480
2481 // C-(C2-X) --> X+(C-C2)
2482 if (match(Op1, m_Sub(m_ImmConstant(C2), m_Value(X))))
2483 return BinaryOperator::CreateAdd(X, ConstantExpr::getSub(C, C2));
2484 }
2485
2486 const APInt *Op0C;
2487 if (match(Op0, m_APInt(Op0C))) {
2488 if (Op0C->isMask()) {
2489 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
2490 // zero. We don't use information from dominating conditions so this
2491 // transform is easier to reverse if necessary.
2492 KnownBits RHSKnown = llvm::computeKnownBits(
2493 Op1, SQ.getWithInstruction(&I).getWithoutDomCondCache());
2494 if ((*Op0C | RHSKnown.Zero).isAllOnes())
2495 return BinaryOperator::CreateXor(Op1, Op0);
2496 }
2497
2498 // C - ((C3 -nuw X) & C2) --> (C - (C2 & C3)) + (X & C2) when:
2499 // (C3 - ((C2 & C3) - 1)) is pow2
2500 // ((C2 + C3) & ((C2 & C3) - 1)) == ((C2 & C3) - 1)
2501 // C2 is negative pow2 || sub nuw
2502 const APInt *C2, *C3;
2503 BinaryOperator *InnerSub;
2504 if (match(Op1, m_OneUse(m_And(m_BinOp(InnerSub), m_APInt(C2)))) &&
2505 match(InnerSub, m_Sub(m_APInt(C3), m_Value(X))) &&
2506 (InnerSub->hasNoUnsignedWrap() || C2->isNegatedPowerOf2())) {
2507 APInt C2AndC3 = *C2 & *C3;
2508 APInt C2AndC3Minus1 = C2AndC3 - 1;
2509 APInt C2AddC3 = *C2 + *C3;
2510 if ((*C3 - C2AndC3Minus1).isPowerOf2() &&
2511 C2AndC3Minus1.isSubsetOf(C2AddC3)) {
2512 Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(), *C2));
2513 return BinaryOperator::CreateAdd(
2514 And, ConstantInt::get(I.getType(), *Op0C - C2AndC3));
2515 }
2516 }
2517 }
2518
2519 {
2520 Value *Y;
2521 // X-(X+Y) == -Y X-(Y+X) == -Y
2522 if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y))))
2523 return BinaryOperator::CreateNeg(Y);
2524
2525 // (X-Y)-X == -Y
2526 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
2527 return BinaryOperator::CreateNeg(Y);
2528 }
2529
2530 // (sub (or A, B) (and A, B)) --> (xor A, B)
2531 {
2532 Value *A, *B;
2533 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
2534 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
2535 return BinaryOperator::CreateXor(A, B);
2536 }
2537
2538 // (sub (add A, B) (or A, B)) --> (and A, B)
2539 {
2540 Value *A, *B;
2541 if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
2542 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
2543 return BinaryOperator::CreateAnd(A, B);
2544 }
2545
2546 // (sub (add A, B) (and A, B)) --> (or A, B)
2547 {
2548 Value *A, *B;
2549 if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
2550 match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
2551 return BinaryOperator::CreateOr(A, B);
2552 }
2553
2554 // (sub (and A, B) (or A, B)) --> neg (xor A, B)
2555 {
2556 Value *A, *B;
2557 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2558 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
2559 (Op0->hasOneUse() || Op1->hasOneUse()))
2560 return BinaryOperator::CreateNeg(Builder.CreateXor(A, B));
2561 }
2562
2563 // (sub (or A, B), (xor A, B)) --> (and A, B)
2564 {
2565 Value *A, *B;
2566 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
2567 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
2568 return BinaryOperator::CreateAnd(A, B);
2569 }
2570
2571 // (sub (xor A, B) (or A, B)) --> neg (and A, B)
2572 {
2573 Value *A, *B;
2574 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2575 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
2576 (Op0->hasOneUse() || Op1->hasOneUse()))
2577 return BinaryOperator::CreateNeg(Builder.CreateAnd(A, B));
2578 }
2579
2580 {
2581 Value *Y;
2582 // ((X | Y) - X) --> (~X & Y)
2583 if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
2584 return BinaryOperator::CreateAnd(
2585 Y, Builder.CreateNot(Op1, Op1->getName() + ".not"));
2586 }
2587
2588 {
2589 // (sub (and Op1, (neg X)), Op1) --> neg (and Op1, (add X, -1))
2590 Value *X;
2591 if (match(Op0, m_OneUse(m_c_And(m_Specific(Op1),
2592 m_OneUse(m_Neg(m_Value(X))))))) {
2593 return BinaryOperator::CreateNeg(Builder.CreateAnd(
2594 Op1, Builder.CreateAdd(X, Constant::getAllOnesValue(I.getType()))));
2595 }
2596 }
2597
2598 {
2599 // (sub (and Op1, C), Op1) --> neg (and Op1, ~C)
2600 Constant *C;
2601 if (match(Op0, m_OneUse(m_And(m_Specific(Op1), m_Constant(C))))) {
2602 return BinaryOperator::CreateNeg(
2603 Builder.CreateAnd(Op1, Builder.CreateNot(C)));
2604 }
2605 }
2606
2607 {
2608 // (sub (xor X, (sext C)), (sext C)) => (select C, (neg X), X)
2609 // (sub (sext C), (xor X, (sext C))) => (select C, X, (neg X))
2610 Value *C, *X;
2611 auto m_SubXorCmp = [&C, &X](Value *LHS, Value *RHS) {
2612 return match(LHS, m_OneUse(m_c_Xor(m_Value(X), m_Specific(RHS)))) &&
2613 match(RHS, m_SExt(m_Value(C))) &&
2614 (C->getType()->getScalarSizeInBits() == 1);
2615 };
2616 if (m_SubXorCmp(Op0, Op1))
2617 return SelectInst::Create(C, Builder.CreateNeg(X), X);
2618 if (m_SubXorCmp(Op1, Op0))
2619 return SelectInst::Create(C, X, Builder.CreateNeg(X));
2620 }
2621
2622 if (Instruction *R = tryFoldInstWithCtpopWithNot(&I))
2623 return R;
2624
2625 if (Instruction *R = foldSubOfMinMax(I, Builder))
2626 return R;
2627
2628 {
2629 // If we have a subtraction between some value and a select between
2630 // said value and something else, sink subtraction into select hands, i.e.:
2631 // sub (select %Cond, %TrueVal, %FalseVal), %Op1
2632 // ->
2633 // select %Cond, (sub %TrueVal, %Op1), (sub %FalseVal, %Op1)
2634 // or
2635 // sub %Op0, (select %Cond, %TrueVal, %FalseVal)
2636 // ->
2637 // select %Cond, (sub %Op0, %TrueVal), (sub %Op0, %FalseVal)
2638 // This will result in select between new subtraction and 0.
2639 auto SinkSubIntoSelect =
2640 [Ty = I.getType()](Value *Select, Value *OtherHandOfSub,
2641 auto SubBuilder) -> Instruction * {
2642 Value *Cond, *TrueVal, *FalseVal;
2643 if (!match(Select, m_OneUse(m_Select(m_Value(Cond), m_Value(TrueVal),
2644 m_Value(FalseVal)))))
2645 return nullptr;
2646 if (OtherHandOfSub != TrueVal && OtherHandOfSub != FalseVal)
2647 return nullptr;
2648 // While it is really tempting to just create two subtractions and let
2649 // InstCombine fold one of those to 0, it isn't possible to do so
2650 // because of worklist visitation order. So ugly it is.
2651 bool OtherHandOfSubIsTrueVal = OtherHandOfSub == TrueVal;
2652 Value *NewSub = SubBuilder(OtherHandOfSubIsTrueVal ? FalseVal : TrueVal);
2653 Constant *Zero = Constant::getNullValue(Ty);
2654 SelectInst *NewSel =
2655 SelectInst::Create(Cond, OtherHandOfSubIsTrueVal ? Zero : NewSub,
2656 OtherHandOfSubIsTrueVal ? NewSub : Zero);
2657 // Preserve prof metadata if any.
2658 NewSel->copyMetadata(cast<Instruction>(*Select));
2659 return NewSel;
2660 };
2661 if (Instruction *NewSel = SinkSubIntoSelect(
2662 /*Select=*/Op0, /*OtherHandOfSub=*/Op1,
2663 [Builder = &Builder, Op1](Value *OtherHandOfSelect) {
2664 return Builder->CreateSub(OtherHandOfSelect,
2665 /*OtherHandOfSub=*/Op1);
2666 }))
2667 return NewSel;
2668 if (Instruction *NewSel = SinkSubIntoSelect(
2669 /*Select=*/Op1, /*OtherHandOfSub=*/Op0,
2670 [Builder = &Builder, Op0](Value *OtherHandOfSelect) {
2671 return Builder->CreateSub(/*OtherHandOfSub=*/Op0,
2672 OtherHandOfSelect);
2673 }))
2674 return NewSel;
2675 }
2676
2677 // (X - (X & Y)) --> (X & ~Y)
2678 if (match(Op1, m_c_And(m_Specific(Op0), m_Value(Y))) &&
2679 (Op1->hasOneUse() || isa<Constant>(Y)))
2680 return BinaryOperator::CreateAnd(
2681 Op0, Builder.CreateNot(Y, Y->getName() + ".not"));
2682
2683 // ~X - Min/Max(~X, Y) -> ~Min/Max(X, ~Y) - X
2684 // ~X - Min/Max(Y, ~X) -> ~Min/Max(X, ~Y) - X
2685 // Min/Max(~X, Y) - ~X -> X - ~Min/Max(X, ~Y)
2686 // Min/Max(Y, ~X) - ~X -> X - ~Min/Max(X, ~Y)
2687 // As long as Y is freely invertible, this will be neutral or a win.
2688 // Note: We don't generate the inverse max/min, just create the 'not' of
2689 // it and let other folds do the rest.
2690 if (match(Op0, m_Not(m_Value(X))) &&
2691 match(Op1, m_c_MaxOrMin(m_Specific(Op0), m_Value(Y))) &&
2692 !Op0->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {
2693 Value *Not = Builder.CreateNot(Op1);
2694 return BinaryOperator::CreateSub(Not, X);
2695 }
2696 if (match(Op1, m_Not(m_Value(X))) &&
2697 match(Op0, m_c_MaxOrMin(m_Specific(Op1), m_Value(Y))) &&
2698 !Op1->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {
2699 Value *Not = Builder.CreateNot(Op0);
2700 return BinaryOperator::CreateSub(X, Not);
2701 }
2702
2703 // Optimize pointer differences into the same array into a size. Consider:
2704 // &A[10] - &A[0]: we should compile this to "10".
2705 Value *LHSOp, *RHSOp;
2706 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
2707 match(Op1, m_PtrToInt(m_Value(RHSOp))))
2708 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2709 I.hasNoUnsignedWrap()))
2710 return replaceInstUsesWith(I, Res);
2711
2712 // trunc(p)-trunc(q) -> trunc(p-q)
2713 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
2714 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
2715 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2716 /* IsNUW */ false))
2717 return replaceInstUsesWith(I, Res);
2718
2719 if (match(Op0, m_ZExt(m_PtrToIntSameSize(DL, m_Value(LHSOp)))) &&
2720 match(Op1, m_ZExtOrSelf(m_PtrToInt(m_Value(RHSOp))))) {
2721 if (auto *GEP = dyn_cast<GEPOperator>(LHSOp)) {
2722 if (GEP->getPointerOperand() == RHSOp) {
2723 if (GEP->hasNoUnsignedWrap() || GEP->hasNoUnsignedSignedWrap()) {
2724 Value *Offset = EmitGEPOffset(GEP);
2725 Value *Res = GEP->hasNoUnsignedWrap()
2726 ? Builder.CreateZExt(
2727 Offset, I.getType(), "",
2728 /*IsNonNeg=*/GEP->hasNoUnsignedSignedWrap())
2729 : Builder.CreateSExt(Offset, I.getType());
2730 return replaceInstUsesWith(I, Res);
2731 }
2732 }
2733 }
2734 }
2735
2736 // Canonicalize a shifty way to code absolute value to the common pattern.
2737 // There are 2 potential commuted variants.
2738 // We're relying on the fact that we only do this transform when the shift has
2739 // exactly 2 uses and the xor has exactly 1 use (otherwise, we might increase
2740 // instructions).
2741 Value *A;
2742 const APInt *ShAmt;
2743 Type *Ty = I.getType();
2744 unsigned BitWidth = Ty->getScalarSizeInBits();
2745 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
2746 Op1->hasNUses(2) && *ShAmt == BitWidth - 1 &&
2747 match(Op0, m_OneUse(m_c_Xor(m_Specific(A), m_Specific(Op1))))) {
2748 // B = ashr i32 A, 31 ; smear the sign bit
2749 // sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1)
2750 // --> (A < 0) ? -A : A
2751 Value *IsNeg = Builder.CreateIsNeg(A);
2752 // Copy the nsw flags from the sub to the negate.
2753 Value *NegA = I.hasNoUnsignedWrap()
2754 ? Constant::getNullValue(A->getType())
2755 : Builder.CreateNeg(A, "", I.hasNoSignedWrap());
2756 return SelectInst::Create(IsNeg, NegA, A);
2757 }
2758
2759 // If we are subtracting a low-bit masked subset of some value from an add
2760 // of that same value with no low bits changed, that is clearing some low bits
2761 // of the sum:
2762 // sub (X + AddC), (X & AndC) --> and (X + AddC), ~AndC
2763 const APInt *AddC, *AndC;
2764 if (match(Op0, m_Add(m_Value(X), m_APInt(AddC))) &&
2765 match(Op1, m_And(m_Specific(X), m_APInt(AndC)))) {
2766 unsigned Cttz = AddC->countr_zero();
2767 APInt HighMask(APInt::getHighBitsSet(BitWidth, BitWidth - Cttz));
2768 if ((HighMask & *AndC).isZero())
2769 return BinaryOperator::CreateAnd(Op0, ConstantInt::get(Ty, ~(*AndC)));
2770 }
2771
2772 if (Instruction *V =
2773 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
2774 return V;
2775
2776 // X - usub.sat(X, Y) => umin(X, Y)
2777 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Specific(Op0),
2778 m_Value(Y)))))
2779 return replaceInstUsesWith(
2780 I, Builder.CreateIntrinsic(Intrinsic::umin, {I.getType()}, {Op0, Y}));
2781
2782 // umax(X, Op1) - Op1 --> usub.sat(X, Op1)
2783 // TODO: The one-use restriction is not strictly necessary, but it may
2784 // require improving other pattern matching and/or codegen.
2785 if (match(Op0, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op1)))))
2786 return replaceInstUsesWith(
2787 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op1}));
2788
2789 // Op0 - umin(X, Op0) --> usub.sat(Op0, X)
2790 if (match(Op1, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op0)))))
2791 return replaceInstUsesWith(
2792 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op0, X}));
2793
2794 // Op0 - umax(X, Op0) --> 0 - usub.sat(X, Op0)
2795 if (match(Op1, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op0))))) {
2796 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op0});
2797 return BinaryOperator::CreateNeg(USub);
2798 }
2799
2800 // umin(X, Op1) - Op1 --> 0 - usub.sat(Op1, X)
2801 if (match(Op0, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op1))))) {
2802 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op1, X});
2803 return BinaryOperator::CreateNeg(USub);
2804 }
2805
2806 // C - ctpop(X) => ctpop(~X) if C is bitwidth
2807 if (match(Op0, m_SpecificInt(BitWidth)) &&
2808 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(X)))))
2809 return replaceInstUsesWith(
2810 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
2811 {Builder.CreateNot(X)}));
2812
2813 // Reduce multiplies for difference-of-squares by factoring:
2814 // (X * X) - (Y * Y) --> (X + Y) * (X - Y)
2815 if (match(Op0, m_OneUse(m_Mul(m_Value(X), m_Deferred(X)))) &&
2816 match(Op1, m_OneUse(m_Mul(m_Value(Y), m_Deferred(Y))))) {
2817 auto *OBO0 = cast<OverflowingBinaryOperator>(Op0);
2818 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2819 bool PropagateNSW = I.hasNoSignedWrap() && OBO0->hasNoSignedWrap() &&
2820 OBO1->hasNoSignedWrap() && BitWidth > 2;
2821 bool PropagateNUW = I.hasNoUnsignedWrap() && OBO0->hasNoUnsignedWrap() &&
2822 OBO1->hasNoUnsignedWrap() && BitWidth > 1;
2823 Value *Add = Builder.CreateAdd(X, Y, "add", PropagateNUW, PropagateNSW);
2824 Value *Sub = Builder.CreateSub(X, Y, "sub", PropagateNUW, PropagateNSW);
2825 Value *Mul = Builder.CreateMul(Add, Sub, "", PropagateNUW, PropagateNSW);
2826 return replaceInstUsesWith(I, Mul);
2827 }
2828
2829 // max(X,Y) nsw/nuw - min(X,Y) --> abs(X nsw - Y)
2830 if (match(Op0, m_OneUse(m_c_SMax(m_Value(X), m_Value(Y)))) &&
2831 match(Op1, m_OneUse(m_c_SMin(m_Specific(X), m_Specific(Y))))) {
2832 if (I.hasNoUnsignedWrap() || I.hasNoSignedWrap()) {
2833 Value *Sub =
2834 Builder.CreateSub(X, Y, "sub", /*HasNUW=*/false, /*HasNSW=*/true);
2835 Value *Call =
2836 Builder.CreateBinaryIntrinsic(Intrinsic::abs, Sub, Builder.getTrue());
2837 return replaceInstUsesWith(I, Call);
2838 }
2839 }
2840
2841 if (Instruction *Res = foldBinOpOfSelectAndCastOfSelectCondition(I))
2842 return Res;
2843
2844 // (sub (sext (add nsw (X, Y)), sext (X))) --> (sext (Y))
2845 if (match(Op1, m_SExtLike(m_Value(X))) &&
2846 match(Op0, m_SExtLike(m_c_NSWAdd(m_Specific(X), m_Value(Y))))) {
2847 Value *SExtY = Builder.CreateSExt(Y, I.getType());
2848 return replaceInstUsesWith(I, SExtY);
2849 }
2850
2851 // (sub[ nsw] (sext (add nsw (X, Y)), sext (add nsw (X, Z)))) -->
2852 // --> (sub[ nsw] (sext (Y), sext (Z)))
2853 {
2854 Value *Z, *Add0, *Add1;
2855 if (match(Op0, m_SExtLike(m_Value(Add0))) &&
2856 match(Op1, m_SExtLike(m_Value(Add1))) &&
2857 ((match(Add0, m_NSWAdd(m_Value(X), m_Value(Y))) &&
2858 match(Add1, m_c_NSWAdd(m_Specific(X), m_Value(Z)))) ||
2859 (match(Add0, m_NSWAdd(m_Value(Y), m_Value(X))) &&
2860 match(Add1, m_c_NSWAdd(m_Specific(X), m_Value(Z)))))) {
2861 unsigned NumOfNewInstrs = 0;
2862 // Non-constant Y, Z require new SExt.
2863 NumOfNewInstrs += !isa<Constant>(Y) ? 1 : 0;
2864 NumOfNewInstrs += !isa<Constant>(Z) ? 1 : 0;
2865 // Check if we can trade some of the old instructions for the new ones.
2866 unsigned NumOfDeadInstrs = 0;
2867 if (Op0->hasOneUse()) {
2868 // If Op0 (sext) has multiple uses, then we keep it
2869 // and the add that it uses, otherwise, we can remove
2870 // the sext and probably the add (depending on the number of its uses).
2871 ++NumOfDeadInstrs;
2872 NumOfDeadInstrs += Add0->hasOneUse() ? 1 : 0;
2873 }
2874 if (Op1->hasOneUse()) {
2875 ++NumOfDeadInstrs;
2876 NumOfDeadInstrs += Add1->hasOneUse() ? 1 : 0;
2877 }
2878 if (NumOfDeadInstrs >= NumOfNewInstrs) {
2879 Value *SExtY = Builder.CreateSExt(Y, I.getType());
2880 Value *SExtZ = Builder.CreateSExt(Z, I.getType());
2881 Value *Sub = Builder.CreateSub(SExtY, SExtZ, "",
2882 /*HasNUW=*/false,
2883 /*HasNSW=*/I.hasNoSignedWrap());
2884 return replaceInstUsesWith(I, Sub);
2885 }
2886 }
2887 }
2888
2889 return TryToNarrowDeduceFlags();
2890 }
2891
2892 /// This eliminates floating-point negation in either 'fneg(X)' or
2893 /// 'fsub(-0.0, X)' form by combining into a constant operand.
foldFNegIntoConstant(Instruction & I,const DataLayout & DL)2894 static Instruction *foldFNegIntoConstant(Instruction &I, const DataLayout &DL) {
2895 // This is limited with one-use because fneg is assumed better for
2896 // reassociation and cheaper in codegen than fmul/fdiv.
2897 // TODO: Should the m_OneUse restriction be removed?
2898 Instruction *FNegOp;
2899 if (!match(&I, m_FNeg(m_OneUse(m_Instruction(FNegOp)))))
2900 return nullptr;
2901
2902 Value *X;
2903 Constant *C;
2904
2905 // Fold negation into constant operand.
2906 // -(X * C) --> X * (-C)
2907 if (match(FNegOp, m_FMul(m_Value(X), m_Constant(C))))
2908 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) {
2909 FastMathFlags FNegF = I.getFastMathFlags();
2910 FastMathFlags OpF = FNegOp->getFastMathFlags();
2911 FastMathFlags FMF = FastMathFlags::unionValue(FNegF, OpF) |
2912 FastMathFlags::intersectRewrite(FNegF, OpF);
2913 FMF.setNoInfs(FNegF.noInfs() && OpF.noInfs());
2914 return BinaryOperator::CreateFMulFMF(X, NegC, FMF);
2915 }
2916 // -(X / C) --> X / (-C)
2917 if (match(FNegOp, m_FDiv(m_Value(X), m_Constant(C))))
2918 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
2919 return BinaryOperator::CreateFDivFMF(X, NegC, &I);
2920 // -(C / X) --> (-C) / X
2921 if (match(FNegOp, m_FDiv(m_Constant(C), m_Value(X))))
2922 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) {
2923 Instruction *FDiv = BinaryOperator::CreateFDivFMF(NegC, X, &I);
2924
2925 // Intersect 'nsz' and 'ninf' because those special value exceptions may
2926 // not apply to the fdiv. Everything else propagates from the fneg.
2927 // TODO: We could propagate nsz/ninf from fdiv alone?
2928 FastMathFlags FMF = I.getFastMathFlags();
2929 FastMathFlags OpFMF = FNegOp->getFastMathFlags();
2930 FDiv->setHasNoSignedZeros(FMF.noSignedZeros() && OpFMF.noSignedZeros());
2931 FDiv->setHasNoInfs(FMF.noInfs() && OpFMF.noInfs());
2932 return FDiv;
2933 }
2934 // With NSZ [ counter-example with -0.0: -(-0.0 + 0.0) != 0.0 + -0.0 ]:
2935 // -(X + C) --> -X + -C --> -C - X
2936 if (I.hasNoSignedZeros() && match(FNegOp, m_FAdd(m_Value(X), m_Constant(C))))
2937 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
2938 return BinaryOperator::CreateFSubFMF(NegC, X, &I);
2939
2940 return nullptr;
2941 }
2942
hoistFNegAboveFMulFDiv(Value * FNegOp,Instruction & FMFSource)2943 Instruction *InstCombinerImpl::hoistFNegAboveFMulFDiv(Value *FNegOp,
2944 Instruction &FMFSource) {
2945 Value *X, *Y;
2946 if (match(FNegOp, m_FMul(m_Value(X), m_Value(Y)))) {
2947 // Push into RHS which is more likely to simplify (const or another fneg).
2948 // FIXME: It would be better to invert the transform.
2949 return cast<Instruction>(Builder.CreateFMulFMF(
2950 X, Builder.CreateFNegFMF(Y, &FMFSource), &FMFSource));
2951 }
2952
2953 if (match(FNegOp, m_FDiv(m_Value(X), m_Value(Y)))) {
2954 return cast<Instruction>(Builder.CreateFDivFMF(
2955 Builder.CreateFNegFMF(X, &FMFSource), Y, &FMFSource));
2956 }
2957
2958 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(FNegOp)) {
2959 // Make sure to preserve flags and metadata on the call.
2960 if (II->getIntrinsicID() == Intrinsic::ldexp) {
2961 FastMathFlags FMF = FMFSource.getFastMathFlags() | II->getFastMathFlags();
2962 CallInst *New =
2963 Builder.CreateCall(II->getCalledFunction(),
2964 {Builder.CreateFNegFMF(II->getArgOperand(0), FMF),
2965 II->getArgOperand(1)});
2966 New->setFastMathFlags(FMF);
2967 New->copyMetadata(*II);
2968 return New;
2969 }
2970 }
2971
2972 return nullptr;
2973 }
2974
visitFNeg(UnaryOperator & I)2975 Instruction *InstCombinerImpl::visitFNeg(UnaryOperator &I) {
2976 Value *Op = I.getOperand(0);
2977
2978 if (Value *V = simplifyFNegInst(Op, I.getFastMathFlags(),
2979 getSimplifyQuery().getWithInstruction(&I)))
2980 return replaceInstUsesWith(I, V);
2981
2982 if (Instruction *X = foldFNegIntoConstant(I, DL))
2983 return X;
2984
2985 Value *X, *Y;
2986
2987 // If we can ignore the sign of zeros: -(X - Y) --> (Y - X)
2988 if (I.hasNoSignedZeros() &&
2989 match(Op, m_OneUse(m_FSub(m_Value(X), m_Value(Y)))))
2990 return BinaryOperator::CreateFSubFMF(Y, X, &I);
2991
2992 Value *OneUse;
2993 if (!match(Op, m_OneUse(m_Value(OneUse))))
2994 return nullptr;
2995
2996 if (Instruction *R = hoistFNegAboveFMulFDiv(OneUse, I))
2997 return replaceInstUsesWith(I, R);
2998
2999 // Try to eliminate fneg if at least 1 arm of the select is negated.
3000 Value *Cond;
3001 if (match(OneUse, m_Select(m_Value(Cond), m_Value(X), m_Value(Y)))) {
3002 // Unlike most transforms, this one is not safe to propagate nsz unless
3003 // it is present on the original select. We union the flags from the select
3004 // and fneg and then remove nsz if needed.
3005 auto propagateSelectFMF = [&](SelectInst *S, bool CommonOperand) {
3006 S->copyFastMathFlags(&I);
3007 if (auto *OldSel = dyn_cast<SelectInst>(Op)) {
3008 FastMathFlags FMF = I.getFastMathFlags() | OldSel->getFastMathFlags();
3009 S->setFastMathFlags(FMF);
3010 if (!OldSel->hasNoSignedZeros() && !CommonOperand &&
3011 !isGuaranteedNotToBeUndefOrPoison(OldSel->getCondition()))
3012 S->setHasNoSignedZeros(false);
3013 }
3014 };
3015 // -(Cond ? -P : Y) --> Cond ? P : -Y
3016 Value *P;
3017 if (match(X, m_FNeg(m_Value(P)))) {
3018 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg");
3019 SelectInst *NewSel = SelectInst::Create(Cond, P, NegY);
3020 propagateSelectFMF(NewSel, P == Y);
3021 return NewSel;
3022 }
3023 // -(Cond ? X : -P) --> Cond ? -X : P
3024 if (match(Y, m_FNeg(m_Value(P)))) {
3025 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg");
3026 SelectInst *NewSel = SelectInst::Create(Cond, NegX, P);
3027 propagateSelectFMF(NewSel, P == X);
3028 return NewSel;
3029 }
3030
3031 // -(Cond ? X : C) --> Cond ? -X : -C
3032 // -(Cond ? C : Y) --> Cond ? -C : -Y
3033 if (match(X, m_ImmConstant()) || match(Y, m_ImmConstant())) {
3034 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg");
3035 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg");
3036 SelectInst *NewSel = SelectInst::Create(Cond, NegX, NegY);
3037 propagateSelectFMF(NewSel, /*CommonOperand=*/true);
3038 return NewSel;
3039 }
3040 }
3041
3042 // fneg (copysign x, y) -> copysign x, (fneg y)
3043 if (match(OneUse, m_CopySign(m_Value(X), m_Value(Y)))) {
3044 // The source copysign has an additional value input, so we can't propagate
3045 // flags the copysign doesn't also have.
3046 FastMathFlags FMF = I.getFastMathFlags();
3047 FMF &= cast<FPMathOperator>(OneUse)->getFastMathFlags();
3048 Value *NegY = Builder.CreateFNegFMF(Y, FMF);
3049 Value *NewCopySign = Builder.CreateCopySign(X, NegY, FMF);
3050 return replaceInstUsesWith(I, NewCopySign);
3051 }
3052
3053 // fneg (shuffle x, Mask) --> shuffle (fneg x), Mask
3054 ArrayRef<int> Mask;
3055 if (match(OneUse, m_Shuffle(m_Value(X), m_Poison(), m_Mask(Mask))))
3056 return new ShuffleVectorInst(Builder.CreateFNegFMF(X, &I), Mask);
3057
3058 // fneg (reverse x) --> reverse (fneg x)
3059 if (match(OneUse, m_VecReverse(m_Value(X)))) {
3060 Value *Reverse = Builder.CreateVectorReverse(Builder.CreateFNegFMF(X, &I));
3061 return replaceInstUsesWith(I, Reverse);
3062 }
3063
3064 return nullptr;
3065 }
3066
visitFSub(BinaryOperator & I)3067 Instruction *InstCombinerImpl::visitFSub(BinaryOperator &I) {
3068 if (Value *V = simplifyFSubInst(I.getOperand(0), I.getOperand(1),
3069 I.getFastMathFlags(),
3070 getSimplifyQuery().getWithInstruction(&I)))
3071 return replaceInstUsesWith(I, V);
3072
3073 if (Instruction *X = foldVectorBinop(I))
3074 return X;
3075
3076 if (Instruction *Phi = foldBinopWithPhiOperands(I))
3077 return Phi;
3078
3079 // Subtraction from -0.0 is the canonical form of fneg.
3080 // fsub -0.0, X ==> fneg X
3081 // fsub nsz 0.0, X ==> fneg nsz X
3082 //
3083 // FIXME This matcher does not respect FTZ or DAZ yet:
3084 // fsub -0.0, Denorm ==> +-0
3085 // fneg Denorm ==> -Denorm
3086 Value *Op;
3087 if (match(&I, m_FNeg(m_Value(Op))))
3088 return UnaryOperator::CreateFNegFMF(Op, &I);
3089
3090 if (Instruction *X = foldFNegIntoConstant(I, DL))
3091 return X;
3092
3093 if (Instruction *R = foldFBinOpOfIntCasts(I))
3094 return R;
3095
3096 Value *X, *Y;
3097 Constant *C;
3098
3099 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3100 // If Op0 is not -0.0 or we can ignore -0.0: Z - (X - Y) --> Z + (Y - X)
3101 // Canonicalize to fadd to make analysis easier.
3102 // This can also help codegen because fadd is commutative.
3103 // Note that if this fsub was really an fneg, the fadd with -0.0 will get
3104 // killed later. We still limit that particular transform with 'hasOneUse'
3105 // because an fneg is assumed better/cheaper than a generic fsub.
3106 if (I.hasNoSignedZeros() ||
3107 cannotBeNegativeZero(Op0, getSimplifyQuery().getWithInstruction(&I))) {
3108 if (match(Op1, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
3109 Value *NewSub = Builder.CreateFSubFMF(Y, X, &I);
3110 return BinaryOperator::CreateFAddFMF(Op0, NewSub, &I);
3111 }
3112 }
3113
3114 // (-X) - Op1 --> -(X + Op1)
3115 if (I.hasNoSignedZeros() && !isa<ConstantExpr>(Op0) &&
3116 match(Op0, m_OneUse(m_FNeg(m_Value(X))))) {
3117 Value *FAdd = Builder.CreateFAddFMF(X, Op1, &I);
3118 return UnaryOperator::CreateFNegFMF(FAdd, &I);
3119 }
3120
3121 if (isa<Constant>(Op0))
3122 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
3123 if (Instruction *NV = FoldOpIntoSelect(I, SI))
3124 return NV;
3125
3126 // X - C --> X + (-C)
3127 // But don't transform constant expressions because there's an inverse fold
3128 // for X + (-Y) --> X - Y.
3129 if (match(Op1, m_ImmConstant(C)))
3130 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
3131 return BinaryOperator::CreateFAddFMF(Op0, NegC, &I);
3132
3133 // X - (-Y) --> X + Y
3134 if (match(Op1, m_FNeg(m_Value(Y))))
3135 return BinaryOperator::CreateFAddFMF(Op0, Y, &I);
3136
3137 // Similar to above, but look through a cast of the negated value:
3138 // X - (fptrunc(-Y)) --> X + fptrunc(Y)
3139 Type *Ty = I.getType();
3140 if (match(Op1, m_OneUse(m_FPTrunc(m_FNeg(m_Value(Y))))))
3141 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPTrunc(Y, Ty), &I);
3142
3143 // X - (fpext(-Y)) --> X + fpext(Y)
3144 if (match(Op1, m_OneUse(m_FPExt(m_FNeg(m_Value(Y))))))
3145 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPExt(Y, Ty), &I);
3146
3147 // Similar to above, but look through fmul/fdiv of the negated value:
3148 // Op0 - (-X * Y) --> Op0 + (X * Y)
3149 // Op0 - (Y * -X) --> Op0 + (X * Y)
3150 if (match(Op1, m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))))) {
3151 Value *FMul = Builder.CreateFMulFMF(X, Y, &I);
3152 return BinaryOperator::CreateFAddFMF(Op0, FMul, &I);
3153 }
3154 // Op0 - (-X / Y) --> Op0 + (X / Y)
3155 // Op0 - (X / -Y) --> Op0 + (X / Y)
3156 if (match(Op1, m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y)))) ||
3157 match(Op1, m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))))) {
3158 Value *FDiv = Builder.CreateFDivFMF(X, Y, &I);
3159 return BinaryOperator::CreateFAddFMF(Op0, FDiv, &I);
3160 }
3161
3162 // Handle special cases for FSub with selects feeding the operation
3163 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
3164 return replaceInstUsesWith(I, V);
3165
3166 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
3167 // (Y - X) - Y --> -X
3168 if (match(Op0, m_FSub(m_Specific(Op1), m_Value(X))))
3169 return UnaryOperator::CreateFNegFMF(X, &I);
3170
3171 // Y - (X + Y) --> -X
3172 // Y - (Y + X) --> -X
3173 if (match(Op1, m_c_FAdd(m_Specific(Op0), m_Value(X))))
3174 return UnaryOperator::CreateFNegFMF(X, &I);
3175
3176 // (X * C) - X --> X * (C - 1.0)
3177 if (match(Op0, m_FMul(m_Specific(Op1), m_Constant(C)))) {
3178 if (Constant *CSubOne = ConstantFoldBinaryOpOperands(
3179 Instruction::FSub, C, ConstantFP::get(Ty, 1.0), DL))
3180 return BinaryOperator::CreateFMulFMF(Op1, CSubOne, &I);
3181 }
3182 // X - (X * C) --> X * (1.0 - C)
3183 if (match(Op1, m_FMul(m_Specific(Op0), m_Constant(C)))) {
3184 if (Constant *OneSubC = ConstantFoldBinaryOpOperands(
3185 Instruction::FSub, ConstantFP::get(Ty, 1.0), C, DL))
3186 return BinaryOperator::CreateFMulFMF(Op0, OneSubC, &I);
3187 }
3188
3189 // Reassociate fsub/fadd sequences to create more fadd instructions and
3190 // reduce dependency chains:
3191 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
3192 Value *Z;
3193 if (match(Op0, m_OneUse(m_c_FAdd(m_OneUse(m_FSub(m_Value(X), m_Value(Y))),
3194 m_Value(Z))))) {
3195 Value *XZ = Builder.CreateFAddFMF(X, Z, &I);
3196 Value *YW = Builder.CreateFAddFMF(Y, Op1, &I);
3197 return BinaryOperator::CreateFSubFMF(XZ, YW, &I);
3198 }
3199
3200 auto m_FaddRdx = [](Value *&Sum, Value *&Vec) {
3201 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(m_Value(Sum),
3202 m_Value(Vec)));
3203 };
3204 Value *A0, *A1, *V0, *V1;
3205 if (match(Op0, m_FaddRdx(A0, V0)) && match(Op1, m_FaddRdx(A1, V1)) &&
3206 V0->getType() == V1->getType()) {
3207 // Difference of sums is sum of differences:
3208 // add_rdx(A0, V0) - add_rdx(A1, V1) --> add_rdx(A0, V0 - V1) - A1
3209 Value *Sub = Builder.CreateFSubFMF(V0, V1, &I);
3210 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
3211 {Sub->getType()}, {A0, Sub}, &I);
3212 return BinaryOperator::CreateFSubFMF(Rdx, A1, &I);
3213 }
3214
3215 if (Instruction *F = factorizeFAddFSub(I, Builder))
3216 return F;
3217
3218 // TODO: This performs reassociative folds for FP ops. Some fraction of the
3219 // functionality has been subsumed by simple pattern matching here and in
3220 // InstSimplify. We should let a dedicated reassociation pass handle more
3221 // complex pattern matching and remove this from InstCombine.
3222 if (Value *V = FAddCombine(Builder).simplify(&I))
3223 return replaceInstUsesWith(I, V);
3224
3225 // (X - Y) - Op1 --> X - (Y + Op1)
3226 if (match(Op0, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
3227 Value *FAdd = Builder.CreateFAddFMF(Y, Op1, &I);
3228 return BinaryOperator::CreateFSubFMF(X, FAdd, &I);
3229 }
3230 }
3231
3232 return nullptr;
3233 }
3234