xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp (revision c7a063741720ef81d4caa4613242579d12f1d605)
1 //===- AggressiveInstCombine.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the aggressive expression pattern combiner classes.
10 // Currently, it handles expression patterns for:
11 //  * Truncate instruction
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
16 #include "AggressiveInstCombineInternal.h"
17 #include "llvm-c/Initialization.h"
18 #include "llvm-c/Transforms/AggressiveInstCombine.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/BasicAliasAnalysis.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/IRBuilder.h"
30 #include "llvm/IR/LegacyPassManager.h"
31 #include "llvm/IR/PatternMatch.h"
32 #include "llvm/InitializePasses.h"
33 #include "llvm/Pass.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 
36 using namespace llvm;
37 using namespace PatternMatch;
38 
39 #define DEBUG_TYPE "aggressive-instcombine"
40 
41 STATISTIC(NumAnyOrAllBitsSet, "Number of any/all-bits-set patterns folded");
42 STATISTIC(NumGuardedRotates,
43           "Number of guarded rotates transformed into funnel shifts");
44 STATISTIC(NumGuardedFunnelShifts,
45           "Number of guarded funnel shifts transformed into funnel shifts");
46 STATISTIC(NumPopCountRecognized, "Number of popcount idioms recognized");
47 
48 namespace {
49 /// Contains expression pattern combiner logic.
50 /// This class provides both the logic to combine expression patterns and
51 /// combine them. It differs from InstCombiner class in that each pattern
52 /// combiner runs only once as opposed to InstCombine's multi-iteration,
53 /// which allows pattern combiner to have higher complexity than the O(1)
54 /// required by the instruction combiner.
55 class AggressiveInstCombinerLegacyPass : public FunctionPass {
56 public:
57   static char ID; // Pass identification, replacement for typeid
58 
59   AggressiveInstCombinerLegacyPass() : FunctionPass(ID) {
60     initializeAggressiveInstCombinerLegacyPassPass(
61         *PassRegistry::getPassRegistry());
62   }
63 
64   void getAnalysisUsage(AnalysisUsage &AU) const override;
65 
66   /// Run all expression pattern optimizations on the given /p F function.
67   ///
68   /// \param F function to optimize.
69   /// \returns true if the IR is changed.
70   bool runOnFunction(Function &F) override;
71 };
72 } // namespace
73 
74 /// Match a pattern for a bitwise funnel/rotate operation that partially guards
75 /// against undefined behavior by branching around the funnel-shift/rotation
76 /// when the shift amount is 0.
77 static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT) {
78   if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2)
79     return false;
80 
81   // As with the one-use checks below, this is not strictly necessary, but we
82   // are being cautious to avoid potential perf regressions on targets that
83   // do not actually have a funnel/rotate instruction (where the funnel shift
84   // would be expanded back into math/shift/logic ops).
85   if (!isPowerOf2_32(I.getType()->getScalarSizeInBits()))
86     return false;
87 
88   // Match V to funnel shift left/right and capture the source operands and
89   // shift amount.
90   auto matchFunnelShift = [](Value *V, Value *&ShVal0, Value *&ShVal1,
91                              Value *&ShAmt) {
92     Value *SubAmt;
93     unsigned Width = V->getType()->getScalarSizeInBits();
94 
95     // fshl(ShVal0, ShVal1, ShAmt)
96     //  == (ShVal0 << ShAmt) | (ShVal1 >> (Width -ShAmt))
97     if (match(V, m_OneUse(m_c_Or(
98                      m_Shl(m_Value(ShVal0), m_Value(ShAmt)),
99                      m_LShr(m_Value(ShVal1),
100                             m_Sub(m_SpecificInt(Width), m_Value(SubAmt))))))) {
101       if (ShAmt == SubAmt) // TODO: Use m_Specific
102         return Intrinsic::fshl;
103     }
104 
105     // fshr(ShVal0, ShVal1, ShAmt)
106     //  == (ShVal0 >> ShAmt) | (ShVal1 << (Width - ShAmt))
107     if (match(V,
108               m_OneUse(m_c_Or(m_Shl(m_Value(ShVal0), m_Sub(m_SpecificInt(Width),
109                                                            m_Value(SubAmt))),
110                               m_LShr(m_Value(ShVal1), m_Value(ShAmt)))))) {
111       if (ShAmt == SubAmt) // TODO: Use m_Specific
112         return Intrinsic::fshr;
113     }
114 
115     return Intrinsic::not_intrinsic;
116   };
117 
118   // One phi operand must be a funnel/rotate operation, and the other phi
119   // operand must be the source value of that funnel/rotate operation:
120   // phi [ rotate(RotSrc, ShAmt), FunnelBB ], [ RotSrc, GuardBB ]
121   // phi [ fshl(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal0, GuardBB ]
122   // phi [ fshr(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal1, GuardBB ]
123   PHINode &Phi = cast<PHINode>(I);
124   unsigned FunnelOp = 0, GuardOp = 1;
125   Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1);
126   Value *ShVal0, *ShVal1, *ShAmt;
127   Intrinsic::ID IID = matchFunnelShift(P0, ShVal0, ShVal1, ShAmt);
128   if (IID == Intrinsic::not_intrinsic ||
129       (IID == Intrinsic::fshl && ShVal0 != P1) ||
130       (IID == Intrinsic::fshr && ShVal1 != P1)) {
131     IID = matchFunnelShift(P1, ShVal0, ShVal1, ShAmt);
132     if (IID == Intrinsic::not_intrinsic ||
133         (IID == Intrinsic::fshl && ShVal0 != P0) ||
134         (IID == Intrinsic::fshr && ShVal1 != P0))
135       return false;
136     assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
137            "Pattern must match funnel shift left or right");
138     std::swap(FunnelOp, GuardOp);
139   }
140 
141   // The incoming block with our source operand must be the "guard" block.
142   // That must contain a cmp+branch to avoid the funnel/rotate when the shift
143   // amount is equal to 0. The other incoming block is the block with the
144   // funnel/rotate.
145   BasicBlock *GuardBB = Phi.getIncomingBlock(GuardOp);
146   BasicBlock *FunnelBB = Phi.getIncomingBlock(FunnelOp);
147   Instruction *TermI = GuardBB->getTerminator();
148 
149   // Ensure that the shift values dominate each block.
150   if (!DT.dominates(ShVal0, TermI) || !DT.dominates(ShVal1, TermI))
151     return false;
152 
153   ICmpInst::Predicate Pred;
154   BasicBlock *PhiBB = Phi.getParent();
155   if (!match(TermI, m_Br(m_ICmp(Pred, m_Specific(ShAmt), m_ZeroInt()),
156                          m_SpecificBB(PhiBB), m_SpecificBB(FunnelBB))))
157     return false;
158 
159   if (Pred != CmpInst::ICMP_EQ)
160     return false;
161 
162   IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
163 
164   if (ShVal0 == ShVal1)
165     ++NumGuardedRotates;
166   else
167     ++NumGuardedFunnelShifts;
168 
169   // If this is not a rotate then the select was blocking poison from the
170   // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it.
171   bool IsFshl = IID == Intrinsic::fshl;
172   if (ShVal0 != ShVal1) {
173     if (IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal1))
174       ShVal1 = Builder.CreateFreeze(ShVal1);
175     else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal0))
176       ShVal0 = Builder.CreateFreeze(ShVal0);
177   }
178 
179   // We matched a variation of this IR pattern:
180   // GuardBB:
181   //   %cmp = icmp eq i32 %ShAmt, 0
182   //   br i1 %cmp, label %PhiBB, label %FunnelBB
183   // FunnelBB:
184   //   %sub = sub i32 32, %ShAmt
185   //   %shr = lshr i32 %ShVal1, %sub
186   //   %shl = shl i32 %ShVal0, %ShAmt
187   //   %fsh = or i32 %shr, %shl
188   //   br label %PhiBB
189   // PhiBB:
190   //   %cond = phi i32 [ %fsh, %FunnelBB ], [ %ShVal0, %GuardBB ]
191   // -->
192   // llvm.fshl.i32(i32 %ShVal0, i32 %ShVal1, i32 %ShAmt)
193   Function *F = Intrinsic::getDeclaration(Phi.getModule(), IID, Phi.getType());
194   Phi.replaceAllUsesWith(Builder.CreateCall(F, {ShVal0, ShVal1, ShAmt}));
195   return true;
196 }
197 
198 /// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
199 /// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
200 /// of 'and' ops, then we also need to capture the fact that we saw an
201 /// "and X, 1", so that's an extra return value for that case.
202 struct MaskOps {
203   Value *Root;
204   APInt Mask;
205   bool MatchAndChain;
206   bool FoundAnd1;
207 
208   MaskOps(unsigned BitWidth, bool MatchAnds)
209       : Root(nullptr), Mask(APInt::getZero(BitWidth)), MatchAndChain(MatchAnds),
210         FoundAnd1(false) {}
211 };
212 
213 /// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
214 /// chain of 'and' or 'or' instructions looking for shift ops of a common source
215 /// value. Examples:
216 ///   or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
217 /// returns { X, 0x129 }
218 ///   and (and (X >> 1), 1), (X >> 4)
219 /// returns { X, 0x12 }
220 static bool matchAndOrChain(Value *V, MaskOps &MOps) {
221   Value *Op0, *Op1;
222   if (MOps.MatchAndChain) {
223     // Recurse through a chain of 'and' operands. This requires an extra check
224     // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
225     // in the chain to know that all of the high bits are cleared.
226     if (match(V, m_And(m_Value(Op0), m_One()))) {
227       MOps.FoundAnd1 = true;
228       return matchAndOrChain(Op0, MOps);
229     }
230     if (match(V, m_And(m_Value(Op0), m_Value(Op1))))
231       return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
232   } else {
233     // Recurse through a chain of 'or' operands.
234     if (match(V, m_Or(m_Value(Op0), m_Value(Op1))))
235       return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
236   }
237 
238   // We need a shift-right or a bare value representing a compare of bit 0 of
239   // the original source operand.
240   Value *Candidate;
241   const APInt *BitIndex = nullptr;
242   if (!match(V, m_LShr(m_Value(Candidate), m_APInt(BitIndex))))
243     Candidate = V;
244 
245   // Initialize result source operand.
246   if (!MOps.Root)
247     MOps.Root = Candidate;
248 
249   // The shift constant is out-of-range? This code hasn't been simplified.
250   if (BitIndex && BitIndex->uge(MOps.Mask.getBitWidth()))
251     return false;
252 
253   // Fill in the mask bit derived from the shift constant.
254   MOps.Mask.setBit(BitIndex ? BitIndex->getZExtValue() : 0);
255   return MOps.Root == Candidate;
256 }
257 
258 /// Match patterns that correspond to "any-bits-set" and "all-bits-set".
259 /// These will include a chain of 'or' or 'and'-shifted bits from a
260 /// common source value:
261 /// and (or  (lshr X, C), ...), 1 --> (X & CMask) != 0
262 /// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
263 /// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
264 /// that differ only with a final 'not' of the result. We expect that final
265 /// 'not' to be folded with the compare that we create here (invert predicate).
266 static bool foldAnyOrAllBitsSet(Instruction &I) {
267   // The 'any-bits-set' ('or' chain) pattern is simpler to match because the
268   // final "and X, 1" instruction must be the final op in the sequence.
269   bool MatchAllBitsSet;
270   if (match(&I, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value())))
271     MatchAllBitsSet = true;
272   else if (match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One())))
273     MatchAllBitsSet = false;
274   else
275     return false;
276 
277   MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet);
278   if (MatchAllBitsSet) {
279     if (!matchAndOrChain(cast<BinaryOperator>(&I), MOps) || !MOps.FoundAnd1)
280       return false;
281   } else {
282     if (!matchAndOrChain(cast<BinaryOperator>(&I)->getOperand(0), MOps))
283       return false;
284   }
285 
286   // The pattern was found. Create a masked compare that replaces all of the
287   // shift and logic ops.
288   IRBuilder<> Builder(&I);
289   Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask);
290   Value *And = Builder.CreateAnd(MOps.Root, Mask);
291   Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask)
292                                : Builder.CreateIsNotNull(And);
293   Value *Zext = Builder.CreateZExt(Cmp, I.getType());
294   I.replaceAllUsesWith(Zext);
295   ++NumAnyOrAllBitsSet;
296   return true;
297 }
298 
299 // Try to recognize below function as popcount intrinsic.
300 // This is the "best" algorithm from
301 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
302 // Also used in TargetLowering::expandCTPOP().
303 //
304 // int popcount(unsigned int i) {
305 //   i = i - ((i >> 1) & 0x55555555);
306 //   i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
307 //   i = ((i + (i >> 4)) & 0x0F0F0F0F);
308 //   return (i * 0x01010101) >> 24;
309 // }
310 static bool tryToRecognizePopCount(Instruction &I) {
311   if (I.getOpcode() != Instruction::LShr)
312     return false;
313 
314   Type *Ty = I.getType();
315   if (!Ty->isIntOrIntVectorTy())
316     return false;
317 
318   unsigned Len = Ty->getScalarSizeInBits();
319   // FIXME: fix Len == 8 and other irregular type lengths.
320   if (!(Len <= 128 && Len > 8 && Len % 8 == 0))
321     return false;
322 
323   APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55));
324   APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33));
325   APInt Mask0F = APInt::getSplat(Len, APInt(8, 0x0F));
326   APInt Mask01 = APInt::getSplat(Len, APInt(8, 0x01));
327   APInt MaskShift = APInt(Len, Len - 8);
328 
329   Value *Op0 = I.getOperand(0);
330   Value *Op1 = I.getOperand(1);
331   Value *MulOp0;
332   // Matching "(i * 0x01010101...) >> 24".
333   if ((match(Op0, m_Mul(m_Value(MulOp0), m_SpecificInt(Mask01)))) &&
334        match(Op1, m_SpecificInt(MaskShift))) {
335     Value *ShiftOp0;
336     // Matching "((i + (i >> 4)) & 0x0F0F0F0F...)".
337     if (match(MulOp0, m_And(m_c_Add(m_LShr(m_Value(ShiftOp0), m_SpecificInt(4)),
338                                     m_Deferred(ShiftOp0)),
339                             m_SpecificInt(Mask0F)))) {
340       Value *AndOp0;
341       // Matching "(i & 0x33333333...) + ((i >> 2) & 0x33333333...)".
342       if (match(ShiftOp0,
343                 m_c_Add(m_And(m_Value(AndOp0), m_SpecificInt(Mask33)),
344                         m_And(m_LShr(m_Deferred(AndOp0), m_SpecificInt(2)),
345                               m_SpecificInt(Mask33))))) {
346         Value *Root, *SubOp1;
347         // Matching "i - ((i >> 1) & 0x55555555...)".
348         if (match(AndOp0, m_Sub(m_Value(Root), m_Value(SubOp1))) &&
349             match(SubOp1, m_And(m_LShr(m_Specific(Root), m_SpecificInt(1)),
350                                 m_SpecificInt(Mask55)))) {
351           LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
352           IRBuilder<> Builder(&I);
353           Function *Func = Intrinsic::getDeclaration(
354               I.getModule(), Intrinsic::ctpop, I.getType());
355           I.replaceAllUsesWith(Builder.CreateCall(Func, {Root}));
356           ++NumPopCountRecognized;
357           return true;
358         }
359       }
360     }
361   }
362 
363   return false;
364 }
365 
366 /// This is the entry point for folds that could be implemented in regular
367 /// InstCombine, but they are separated because they are not expected to
368 /// occur frequently and/or have more than a constant-length pattern match.
369 static bool foldUnusualPatterns(Function &F, DominatorTree &DT) {
370   bool MadeChange = false;
371   for (BasicBlock &BB : F) {
372     // Ignore unreachable basic blocks.
373     if (!DT.isReachableFromEntry(&BB))
374       continue;
375     // Do not delete instructions under here and invalidate the iterator.
376     // Walk the block backwards for efficiency. We're matching a chain of
377     // use->defs, so we're more likely to succeed by starting from the bottom.
378     // Also, we want to avoid matching partial patterns.
379     // TODO: It would be more efficient if we removed dead instructions
380     // iteratively in this loop rather than waiting until the end.
381     for (Instruction &I : llvm::reverse(BB)) {
382       MadeChange |= foldAnyOrAllBitsSet(I);
383       MadeChange |= foldGuardedFunnelShift(I, DT);
384       MadeChange |= tryToRecognizePopCount(I);
385     }
386   }
387 
388   // We're done with transforms, so remove dead instructions.
389   if (MadeChange)
390     for (BasicBlock &BB : F)
391       SimplifyInstructionsInBlock(&BB);
392 
393   return MadeChange;
394 }
395 
396 /// This is the entry point for all transforms. Pass manager differences are
397 /// handled in the callers of this function.
398 static bool runImpl(Function &F, AssumptionCache &AC, TargetLibraryInfo &TLI,
399                     DominatorTree &DT) {
400   bool MadeChange = false;
401   const DataLayout &DL = F.getParent()->getDataLayout();
402   TruncInstCombine TIC(AC, TLI, DL, DT);
403   MadeChange |= TIC.run(F);
404   MadeChange |= foldUnusualPatterns(F, DT);
405   return MadeChange;
406 }
407 
408 void AggressiveInstCombinerLegacyPass::getAnalysisUsage(
409     AnalysisUsage &AU) const {
410   AU.setPreservesCFG();
411   AU.addRequired<AssumptionCacheTracker>();
412   AU.addRequired<DominatorTreeWrapperPass>();
413   AU.addRequired<TargetLibraryInfoWrapperPass>();
414   AU.addPreserved<AAResultsWrapperPass>();
415   AU.addPreserved<BasicAAWrapperPass>();
416   AU.addPreserved<DominatorTreeWrapperPass>();
417   AU.addPreserved<GlobalsAAWrapperPass>();
418 }
419 
420 bool AggressiveInstCombinerLegacyPass::runOnFunction(Function &F) {
421   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
422   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
423   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
424   return runImpl(F, AC, TLI, DT);
425 }
426 
427 PreservedAnalyses AggressiveInstCombinePass::run(Function &F,
428                                                  FunctionAnalysisManager &AM) {
429   auto &AC = AM.getResult<AssumptionAnalysis>(F);
430   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
431   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
432   if (!runImpl(F, AC, TLI, DT)) {
433     // No changes, all analyses are preserved.
434     return PreservedAnalyses::all();
435   }
436   // Mark all the analyses that instcombine updates as preserved.
437   PreservedAnalyses PA;
438   PA.preserveSet<CFGAnalyses>();
439   return PA;
440 }
441 
442 char AggressiveInstCombinerLegacyPass::ID = 0;
443 INITIALIZE_PASS_BEGIN(AggressiveInstCombinerLegacyPass,
444                       "aggressive-instcombine",
445                       "Combine pattern based expressions", false, false)
446 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
447 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
448 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
449 INITIALIZE_PASS_END(AggressiveInstCombinerLegacyPass, "aggressive-instcombine",
450                     "Combine pattern based expressions", false, false)
451 
452 // Initialization Routines
453 void llvm::initializeAggressiveInstCombine(PassRegistry &Registry) {
454   initializeAggressiveInstCombinerLegacyPassPass(Registry);
455 }
456 
457 void LLVMInitializeAggressiveInstCombiner(LLVMPassRegistryRef R) {
458   initializeAggressiveInstCombinerLegacyPassPass(*unwrap(R));
459 }
460 
461 FunctionPass *llvm::createAggressiveInstCombinerPass() {
462   return new AggressiveInstCombinerLegacyPass();
463 }
464 
465 void LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM) {
466   unwrap(PM)->add(createAggressiveInstCombinerPass());
467 }
468