xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp (revision f9fd7337f63698f33239c58c07bf430198235a22)
1 //===- TruncInstCombine.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // TruncInstCombine - looks for expression dags post-dominated by TruncInst and
10 // for each eligible dag, it will create a reduced bit-width expression, replace
11 // the old expression with this new one and remove the old expression.
12 // Eligible expression dag is such that:
13 //   1. Contains only supported instructions.
14 //   2. Supported leaves: ZExtInst, SExtInst, TruncInst and Constant value.
15 //   3. Can be evaluated into type with reduced legal bit-width.
16 //   4. All instructions in the dag must not have users outside the dag.
17 //      The only exception is for {ZExt, SExt}Inst with operand type equal to
18 //      the new reduced type evaluated in (3).
19 //
20 // The motivation for this optimization is that evaluating and expression using
21 // smaller bit-width is preferable, especially for vectorization where we can
22 // fit more values in one vectorized instruction. In addition, this optimization
23 // may decrease the number of cast instructions, but will not increase it.
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "AggressiveInstCombineInternal.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/Analysis/ConstantFolding.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/IRBuilder.h"
36 
37 using namespace llvm;
38 
39 #define DEBUG_TYPE "aggressive-instcombine"
40 
41 STATISTIC(
42     NumDAGsReduced,
43     "Number of truncations eliminated by reducing bit width of expression DAG");
44 STATISTIC(NumInstrsReduced,
45           "Number of instructions whose bit width was reduced");
46 
47 /// Given an instruction and a container, it fills all the relevant operands of
48 /// that instruction, with respect to the Trunc expression dag optimizaton.
49 static void getRelevantOperands(Instruction *I, SmallVectorImpl<Value *> &Ops) {
50   unsigned Opc = I->getOpcode();
51   switch (Opc) {
52   case Instruction::Trunc:
53   case Instruction::ZExt:
54   case Instruction::SExt:
55     // These CastInst are considered leaves of the evaluated expression, thus,
56     // their operands are not relevent.
57     break;
58   case Instruction::Add:
59   case Instruction::Sub:
60   case Instruction::Mul:
61   case Instruction::And:
62   case Instruction::Or:
63   case Instruction::Xor:
64     Ops.push_back(I->getOperand(0));
65     Ops.push_back(I->getOperand(1));
66     break;
67   case Instruction::Select:
68     Ops.push_back(I->getOperand(1));
69     Ops.push_back(I->getOperand(2));
70     break;
71   default:
72     llvm_unreachable("Unreachable!");
73   }
74 }
75 
76 bool TruncInstCombine::buildTruncExpressionDag() {
77   SmallVector<Value *, 8> Worklist;
78   SmallVector<Instruction *, 8> Stack;
79   // Clear old expression dag.
80   InstInfoMap.clear();
81 
82   Worklist.push_back(CurrentTruncInst->getOperand(0));
83 
84   while (!Worklist.empty()) {
85     Value *Curr = Worklist.back();
86 
87     if (isa<Constant>(Curr)) {
88       Worklist.pop_back();
89       continue;
90     }
91 
92     auto *I = dyn_cast<Instruction>(Curr);
93     if (!I)
94       return false;
95 
96     if (!Stack.empty() && Stack.back() == I) {
97       // Already handled all instruction operands, can remove it from both the
98       // Worklist and the Stack, and add it to the instruction info map.
99       Worklist.pop_back();
100       Stack.pop_back();
101       // Insert I to the Info map.
102       InstInfoMap.insert(std::make_pair(I, Info()));
103       continue;
104     }
105 
106     if (InstInfoMap.count(I)) {
107       Worklist.pop_back();
108       continue;
109     }
110 
111     // Add the instruction to the stack before start handling its operands.
112     Stack.push_back(I);
113 
114     unsigned Opc = I->getOpcode();
115     switch (Opc) {
116     case Instruction::Trunc:
117     case Instruction::ZExt:
118     case Instruction::SExt:
119       // trunc(trunc(x)) -> trunc(x)
120       // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
121       // trunc(ext(x)) -> trunc(x) if the source type is larger than the new
122       // dest
123       break;
124     case Instruction::Add:
125     case Instruction::Sub:
126     case Instruction::Mul:
127     case Instruction::And:
128     case Instruction::Or:
129     case Instruction::Xor:
130     case Instruction::Select: {
131       SmallVector<Value *, 2> Operands;
132       getRelevantOperands(I, Operands);
133       for (Value *Operand : Operands)
134         Worklist.push_back(Operand);
135       break;
136     }
137     default:
138       // TODO: Can handle more cases here:
139       // 1. shufflevector, extractelement, insertelement
140       // 2. udiv, urem
141       // 3. shl, lshr, ashr
142       // 4. phi node(and loop handling)
143       // ...
144       return false;
145     }
146   }
147   return true;
148 }
149 
150 unsigned TruncInstCombine::getMinBitWidth() {
151   SmallVector<Value *, 8> Worklist;
152   SmallVector<Instruction *, 8> Stack;
153 
154   Value *Src = CurrentTruncInst->getOperand(0);
155   Type *DstTy = CurrentTruncInst->getType();
156   unsigned TruncBitWidth = DstTy->getScalarSizeInBits();
157   unsigned OrigBitWidth =
158       CurrentTruncInst->getOperand(0)->getType()->getScalarSizeInBits();
159 
160   if (isa<Constant>(Src))
161     return TruncBitWidth;
162 
163   Worklist.push_back(Src);
164   InstInfoMap[cast<Instruction>(Src)].ValidBitWidth = TruncBitWidth;
165 
166   while (!Worklist.empty()) {
167     Value *Curr = Worklist.back();
168 
169     if (isa<Constant>(Curr)) {
170       Worklist.pop_back();
171       continue;
172     }
173 
174     // Otherwise, it must be an instruction.
175     auto *I = cast<Instruction>(Curr);
176 
177     auto &Info = InstInfoMap[I];
178 
179     SmallVector<Value *, 2> Operands;
180     getRelevantOperands(I, Operands);
181 
182     if (!Stack.empty() && Stack.back() == I) {
183       // Already handled all instruction operands, can remove it from both, the
184       // Worklist and the Stack, and update MinBitWidth.
185       Worklist.pop_back();
186       Stack.pop_back();
187       for (auto *Operand : Operands)
188         if (auto *IOp = dyn_cast<Instruction>(Operand))
189           Info.MinBitWidth =
190               std::max(Info.MinBitWidth, InstInfoMap[IOp].MinBitWidth);
191       continue;
192     }
193 
194     // Add the instruction to the stack before start handling its operands.
195     Stack.push_back(I);
196     unsigned ValidBitWidth = Info.ValidBitWidth;
197 
198     // Update minimum bit-width before handling its operands. This is required
199     // when the instruction is part of a loop.
200     Info.MinBitWidth = std::max(Info.MinBitWidth, Info.ValidBitWidth);
201 
202     for (auto *Operand : Operands)
203       if (auto *IOp = dyn_cast<Instruction>(Operand)) {
204         // If we already calculated the minimum bit-width for this valid
205         // bit-width, or for a smaller valid bit-width, then just keep the
206         // answer we already calculated.
207         unsigned IOpBitwidth = InstInfoMap.lookup(IOp).ValidBitWidth;
208         if (IOpBitwidth >= ValidBitWidth)
209           continue;
210         InstInfoMap[IOp].ValidBitWidth = ValidBitWidth;
211         Worklist.push_back(IOp);
212       }
213   }
214   unsigned MinBitWidth = InstInfoMap.lookup(cast<Instruction>(Src)).MinBitWidth;
215   assert(MinBitWidth >= TruncBitWidth);
216 
217   if (MinBitWidth > TruncBitWidth) {
218     // In this case reducing expression with vector type might generate a new
219     // vector type, which is not preferable as it might result in generating
220     // sub-optimal code.
221     if (DstTy->isVectorTy())
222       return OrigBitWidth;
223     // Use the smallest integer type in the range [MinBitWidth, OrigBitWidth).
224     Type *Ty = DL.getSmallestLegalIntType(DstTy->getContext(), MinBitWidth);
225     // Update minimum bit-width with the new destination type bit-width if
226     // succeeded to find such, otherwise, with original bit-width.
227     MinBitWidth = Ty ? Ty->getScalarSizeInBits() : OrigBitWidth;
228   } else { // MinBitWidth == TruncBitWidth
229     // In this case the expression can be evaluated with the trunc instruction
230     // destination type, and trunc instruction can be omitted. However, we
231     // should not perform the evaluation if the original type is a legal scalar
232     // type and the target type is illegal.
233     bool FromLegal = MinBitWidth == 1 || DL.isLegalInteger(OrigBitWidth);
234     bool ToLegal = MinBitWidth == 1 || DL.isLegalInteger(MinBitWidth);
235     if (!DstTy->isVectorTy() && FromLegal && !ToLegal)
236       return OrigBitWidth;
237   }
238   return MinBitWidth;
239 }
240 
241 Type *TruncInstCombine::getBestTruncatedType() {
242   if (!buildTruncExpressionDag())
243     return nullptr;
244 
245   // We don't want to duplicate instructions, which isn't profitable. Thus, we
246   // can't shrink something that has multiple users, unless all users are
247   // post-dominated by the trunc instruction, i.e., were visited during the
248   // expression evaluation.
249   unsigned DesiredBitWidth = 0;
250   for (auto Itr : InstInfoMap) {
251     Instruction *I = Itr.first;
252     if (I->hasOneUse())
253       continue;
254     bool IsExtInst = (isa<ZExtInst>(I) || isa<SExtInst>(I));
255     for (auto *U : I->users())
256       if (auto *UI = dyn_cast<Instruction>(U))
257         if (UI != CurrentTruncInst && !InstInfoMap.count(UI)) {
258           if (!IsExtInst)
259             return nullptr;
260           // If this is an extension from the dest type, we can eliminate it,
261           // even if it has multiple users. Thus, update the DesiredBitWidth and
262           // validate all extension instructions agrees on same DesiredBitWidth.
263           unsigned ExtInstBitWidth =
264               I->getOperand(0)->getType()->getScalarSizeInBits();
265           if (DesiredBitWidth && DesiredBitWidth != ExtInstBitWidth)
266             return nullptr;
267           DesiredBitWidth = ExtInstBitWidth;
268         }
269   }
270 
271   unsigned OrigBitWidth =
272       CurrentTruncInst->getOperand(0)->getType()->getScalarSizeInBits();
273 
274   // Calculate minimum allowed bit-width allowed for shrinking the currently
275   // visited truncate's operand.
276   unsigned MinBitWidth = getMinBitWidth();
277 
278   // Check that we can shrink to smaller bit-width than original one and that
279   // it is similar to the DesiredBitWidth is such exists.
280   if (MinBitWidth >= OrigBitWidth ||
281       (DesiredBitWidth && DesiredBitWidth != MinBitWidth))
282     return nullptr;
283 
284   return IntegerType::get(CurrentTruncInst->getContext(), MinBitWidth);
285 }
286 
287 /// Given a reduced scalar type \p Ty and a \p V value, return a reduced type
288 /// for \p V, according to its type, if it vector type, return the vector
289 /// version of \p Ty, otherwise return \p Ty.
290 static Type *getReducedType(Value *V, Type *Ty) {
291   assert(Ty && !Ty->isVectorTy() && "Expect Scalar Type");
292   if (auto *VTy = dyn_cast<VectorType>(V->getType())) {
293     // FIXME: should this handle scalable vectors?
294     return FixedVectorType::get(Ty, VTy->getNumElements());
295   }
296   return Ty;
297 }
298 
299 Value *TruncInstCombine::getReducedOperand(Value *V, Type *SclTy) {
300   Type *Ty = getReducedType(V, SclTy);
301   if (auto *C = dyn_cast<Constant>(V)) {
302     C = ConstantExpr::getIntegerCast(C, Ty, false);
303     // If we got a constantexpr back, try to simplify it with DL info.
304     return ConstantFoldConstant(C, DL, &TLI);
305   }
306 
307   auto *I = cast<Instruction>(V);
308   Info Entry = InstInfoMap.lookup(I);
309   assert(Entry.NewValue);
310   return Entry.NewValue;
311 }
312 
313 void TruncInstCombine::ReduceExpressionDag(Type *SclTy) {
314   NumInstrsReduced += InstInfoMap.size();
315   for (auto &Itr : InstInfoMap) { // Forward
316     Instruction *I = Itr.first;
317     TruncInstCombine::Info &NodeInfo = Itr.second;
318 
319     assert(!NodeInfo.NewValue && "Instruction has been evaluated");
320 
321     IRBuilder<> Builder(I);
322     Value *Res = nullptr;
323     unsigned Opc = I->getOpcode();
324     switch (Opc) {
325     case Instruction::Trunc:
326     case Instruction::ZExt:
327     case Instruction::SExt: {
328       Type *Ty = getReducedType(I, SclTy);
329       // If the source type of the cast is the type we're trying for then we can
330       // just return the source.  There's no need to insert it because it is not
331       // new.
332       if (I->getOperand(0)->getType() == Ty) {
333         assert(!isa<TruncInst>(I) && "Cannot reach here with TruncInst");
334         NodeInfo.NewValue = I->getOperand(0);
335         continue;
336       }
337       // Otherwise, must be the same type of cast, so just reinsert a new one.
338       // This also handles the case of zext(trunc(x)) -> zext(x).
339       Res = Builder.CreateIntCast(I->getOperand(0), Ty,
340                                   Opc == Instruction::SExt);
341 
342       // Update Worklist entries with new value if needed.
343       // There are three possible changes to the Worklist:
344       // 1. Update Old-TruncInst -> New-TruncInst.
345       // 2. Remove Old-TruncInst (if New node is not TruncInst).
346       // 3. Add New-TruncInst (if Old node was not TruncInst).
347       auto Entry = find(Worklist, I);
348       if (Entry != Worklist.end()) {
349         if (auto *NewCI = dyn_cast<TruncInst>(Res))
350           *Entry = NewCI;
351         else
352           Worklist.erase(Entry);
353       } else if (auto *NewCI = dyn_cast<TruncInst>(Res))
354           Worklist.push_back(NewCI);
355       break;
356     }
357     case Instruction::Add:
358     case Instruction::Sub:
359     case Instruction::Mul:
360     case Instruction::And:
361     case Instruction::Or:
362     case Instruction::Xor: {
363       Value *LHS = getReducedOperand(I->getOperand(0), SclTy);
364       Value *RHS = getReducedOperand(I->getOperand(1), SclTy);
365       Res = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
366       break;
367     }
368     case Instruction::Select: {
369       Value *Op0 = I->getOperand(0);
370       Value *LHS = getReducedOperand(I->getOperand(1), SclTy);
371       Value *RHS = getReducedOperand(I->getOperand(2), SclTy);
372       Res = Builder.CreateSelect(Op0, LHS, RHS);
373       break;
374     }
375     default:
376       llvm_unreachable("Unhandled instruction");
377     }
378 
379     NodeInfo.NewValue = Res;
380     if (auto *ResI = dyn_cast<Instruction>(Res))
381       ResI->takeName(I);
382   }
383 
384   Value *Res = getReducedOperand(CurrentTruncInst->getOperand(0), SclTy);
385   Type *DstTy = CurrentTruncInst->getType();
386   if (Res->getType() != DstTy) {
387     IRBuilder<> Builder(CurrentTruncInst);
388     Res = Builder.CreateIntCast(Res, DstTy, false);
389     if (auto *ResI = dyn_cast<Instruction>(Res))
390       ResI->takeName(CurrentTruncInst);
391   }
392   CurrentTruncInst->replaceAllUsesWith(Res);
393 
394   // Erase old expression dag, which was replaced by the reduced expression dag.
395   // We iterate backward, which means we visit the instruction before we visit
396   // any of its operands, this way, when we get to the operand, we already
397   // removed the instructions (from the expression dag) that uses it.
398   CurrentTruncInst->eraseFromParent();
399   for (auto I = InstInfoMap.rbegin(), E = InstInfoMap.rend(); I != E; ++I) {
400     // We still need to check that the instruction has no users before we erase
401     // it, because {SExt, ZExt}Inst Instruction might have other users that was
402     // not reduced, in such case, we need to keep that instruction.
403     if (I->first->use_empty())
404       I->first->eraseFromParent();
405   }
406 }
407 
408 bool TruncInstCombine::run(Function &F) {
409   bool MadeIRChange = false;
410 
411   // Collect all TruncInst in the function into the Worklist for evaluating.
412   for (auto &BB : F) {
413     // Ignore unreachable basic block.
414     if (!DT.isReachableFromEntry(&BB))
415       continue;
416     for (auto &I : BB)
417       if (auto *CI = dyn_cast<TruncInst>(&I))
418         Worklist.push_back(CI);
419   }
420 
421   // Process all TruncInst in the Worklist, for each instruction:
422   //   1. Check if it dominates an eligible expression dag to be reduced.
423   //   2. Create a reduced expression dag and replace the old one with it.
424   while (!Worklist.empty()) {
425     CurrentTruncInst = Worklist.pop_back_val();
426 
427     if (Type *NewDstSclTy = getBestTruncatedType()) {
428       LLVM_DEBUG(
429           dbgs() << "ICE: TruncInstCombine reducing type of expression dag "
430                     "dominated by: "
431                  << CurrentTruncInst << '\n');
432       ReduceExpressionDag(NewDstSclTy);
433       ++NumDAGsReduced;
434       MadeIRChange = true;
435     }
436   }
437 
438   return MadeIRChange;
439 }
440