xref: /freebsd/contrib/llvm-project/clang/lib/Analysis/IntervalPartition.cpp (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1 //===- IntervalPartition.cpp - CFG Partitioning into Intervals --*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  This file defines functionality for partitioning a CFG into intervals.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/Analysis/Analyses/IntervalPartition.h"
14 #include "clang/Analysis/CFG.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include <optional>
18 #include <queue>
19 #include <vector>
20 
21 namespace clang {
22 
23 // Intermediate data used in constructing a CFGIntervalNode.
24 template <typename Node> struct BuildResult {
25   // Use a vector to maintain the insertion order. Given the expected small
26   // number of nodes, vector should be sufficiently efficient. Elements must not
27   // be null.
28   std::vector<const Node *> Nodes;
29   // Elements must not be null.
30   llvm::SmallDenseSet<const Node *> Successors;
31 };
32 
33 namespace internal {
34 static unsigned getID(const CFGBlock &B) { return B.getBlockID(); }
35 static unsigned getID(const CFGIntervalNode &I) { return I.ID; }
36 
37 // `Node` must be one of `CFGBlock` or `CFGIntervalNode`.
38 template <typename Node>
39 BuildResult<Node> buildInterval(llvm::BitVector &Partitioned,
40                                 const Node *Header) {
41   assert(Header != nullptr);
42   BuildResult<Node> Interval;
43   Interval.Nodes.push_back(Header);
44   Partitioned.set(getID(*Header));
45 
46   // FIXME: Compare performance against using RPO to consider nodes, rather than
47   // following successors.
48   //
49   // Elements must not be null. Duplicates are prevented using `Workset`, below.
50   std::queue<const Node *> Worklist;
51   llvm::BitVector Workset(Partitioned.size(), false);
52   for (const Node *S : Header->succs())
53     if (S != nullptr)
54       if (auto SID = getID(*S); !Partitioned.test(SID)) {
55         // Successors are unique, so we don't test against `Workset` before
56         // adding to `Worklist`.
57         Worklist.push(S);
58         Workset.set(SID);
59       }
60 
61   // Contains successors of blocks in the interval that couldn't be added to the
62   // interval on their first encounter. This occurs when they have a predecessor
63   // that is either definitively outside the interval or hasn't been considered
64   // yet. In the latter case, we'll revisit the block through some other path
65   // from the interval. At the end of processing the worklist, we filter out any
66   // that ended up in the interval to produce the output set of interval
67   // successors. Elements are never null.
68   std::vector<const Node *> MaybeSuccessors;
69 
70   while (!Worklist.empty()) {
71     const auto *B = Worklist.front();
72     auto ID = getID(*B);
73     Worklist.pop();
74     Workset.reset(ID);
75 
76     // Check whether all predecessors are in the interval, in which case `B`
77     // is included as well.
78     bool AllInInterval = llvm::all_of(B->preds(), [&](const Node *P) {
79       return llvm::is_contained(Interval.Nodes, P);
80     });
81     if (AllInInterval) {
82       Interval.Nodes.push_back(B);
83       Partitioned.set(ID);
84       for (const Node *S : B->succs())
85         if (S != nullptr)
86           if (auto SID = getID(*S);
87               !Partitioned.test(SID) && !Workset.test(SID)) {
88             Worklist.push(S);
89             Workset.set(SID);
90           }
91     } else {
92       MaybeSuccessors.push_back(B);
93     }
94   }
95 
96   // Any block successors not in the current interval are interval successors.
97   for (const Node *B : MaybeSuccessors)
98     if (!llvm::is_contained(Interval.Nodes, B))
99       Interval.Successors.insert(B);
100 
101   return Interval;
102 }
103 
104 template <typename Node>
105 void fillIntervalNode(CFGIntervalGraph &Graph,
106                       std::vector<CFGIntervalNode *> &Index,
107                       std::queue<const Node *> &Successors,
108                       llvm::BitVector &Partitioned, const Node *Header) {
109   BuildResult<Node> Result = buildInterval(Partitioned, Header);
110   for (const auto *S : Result.Successors)
111     Successors.push(S);
112 
113   CFGIntervalNode &Interval = Graph.emplace_back(Graph.size());
114 
115   // Index the nodes of the new interval. The index maps nodes from the input
116   // graph (specifically, `Result.Nodes`) to identifiers of nodes in the output
117   // graph. In this case, the new interval has identifier `ID` so all of its
118   // nodes (`Result.Nodes`) map to `ID`.
119   for (const auto *N : Result.Nodes) {
120     assert(N != nullptr);
121     assert(getID(*N) < Index.size());
122     Index[getID(*N)] = &Interval;
123   }
124 
125   if constexpr (std::is_same_v<std::decay_t<Node>, CFGBlock>)
126     Interval.Nodes = std::move(Result.Nodes);
127   else {
128     std::vector<const CFGBlock *> Nodes;
129     // Flatten the sub vectors into a single list.
130     size_t Count = 0;
131     for (auto &N : Result.Nodes)
132       Count += N->Nodes.size();
133     Nodes.reserve(Count);
134     for (auto &N : Result.Nodes)
135       Nodes.insert(Nodes.end(), N->Nodes.begin(), N->Nodes.end());
136     Interval.Nodes = std::move(Nodes);
137   }
138 }
139 
140 template <typename Node>
141 CFGIntervalGraph partitionIntoIntervalsImpl(unsigned NumBlockIDs,
142                                             const Node *EntryBlock) {
143   assert(EntryBlock != nullptr);
144   CFGIntervalGraph Graph;
145   // `Index` maps all of the nodes of the input graph to the interval to which
146   // they are assigned in the output graph. The values (interval pointers) are
147   // never null.
148   std::vector<CFGIntervalNode *> Index(NumBlockIDs, nullptr);
149 
150   // Lists header nodes (from the input graph) and their associated
151   // interval. Since header nodes can vary in type and are only needed within
152   // this function, we record them separately from `CFGIntervalNode`. This
153   // choice enables to express `CFGIntervalNode` without using a variant.
154   std::vector<std::pair<const Node *, CFGIntervalNode *>> Intervals;
155   llvm::BitVector Partitioned(NumBlockIDs, false);
156   std::queue<const Node *> Successors;
157 
158   fillIntervalNode(Graph, Index, Successors, Partitioned, EntryBlock);
159   Intervals.emplace_back(EntryBlock, &Graph.back());
160 
161   while (!Successors.empty()) {
162     const auto *B = Successors.front();
163     Successors.pop();
164     assert(B != nullptr);
165     if (Partitioned.test(getID(*B)))
166       continue;
167 
168     // B has not been partitioned, but it has a predecessor that has. Create a
169     // new interval from `B`.
170     fillIntervalNode(Graph, Index, Successors, Partitioned, B);
171     Intervals.emplace_back(B, &Graph.back());
172   }
173 
174   // Go back and patch up all the Intervals -- the successors and predecessors.
175   for (auto [H, N] : Intervals) {
176     // Map input-graph predecessors to output-graph nodes and mark those as
177     // predecessors of `N`. Then, mark `N` as a successor of said predecessor.
178     for (const Node *P : H->preds()) {
179       if (P == nullptr)
180         continue;
181 
182       assert(getID(*P) < NumBlockIDs);
183       CFGIntervalNode *Pred = Index[getID(*P)];
184       if (Pred == nullptr)
185         // Unreachable node.
186         continue;
187       if (Pred != N // Not a backedge.
188           && N->Predecessors.insert(Pred).second)
189         // Note: given the guard above, which guarantees we only ever insert
190         // unique elements, we could use a simple list (like `vector`) for
191         // `Successors`, rather than a set.
192         Pred->Successors.insert(N);
193     }
194   }
195 
196   return Graph;
197 }
198 
199 std::vector<const CFGBlock *> buildInterval(const CFGBlock *Header) {
200   llvm::BitVector Partitioned(Header->getParent()->getNumBlockIDs(), false);
201   return buildInterval(Partitioned, Header).Nodes;
202 }
203 
204 CFGIntervalGraph partitionIntoIntervals(const CFG &Cfg) {
205   return partitionIntoIntervalsImpl(Cfg.getNumBlockIDs(), &Cfg.getEntry());
206 }
207 
208 CFGIntervalGraph partitionIntoIntervals(const CFGIntervalGraph &Graph) {
209   return partitionIntoIntervalsImpl(Graph.size(), &Graph[0]);
210 }
211 } // namespace internal
212 
213 std::optional<std::vector<const CFGBlock *>> getIntervalWTO(const CFG &Cfg) {
214   // Backing storage for the allocated nodes in each graph.
215   unsigned PrevSize = Cfg.size();
216   if (PrevSize == 0)
217     return {};
218   internal::CFGIntervalGraph Graph = internal::partitionIntoIntervals(Cfg);
219   unsigned Size = Graph.size();
220   while (Size > 1 && Size < PrevSize) {
221     PrevSize = Graph.size();
222     Graph = internal::partitionIntoIntervals(Graph);
223     Size = Graph.size();
224   }
225   if (Size > 1)
226     // Not reducible.
227     return std::nullopt;
228 
229   assert(Size != 0);
230   return std::move(Graph[0].Nodes);
231 }
232 
233 WTOCompare::WTOCompare(const WeakTopologicalOrdering &WTO) {
234   if (WTO.empty())
235     return;
236   auto N = WTO[0]->getParent()->getNumBlockIDs();
237   BlockOrder.resize(N, 0);
238   for (unsigned I = 0, S = WTO.size(); I < S; ++I)
239     BlockOrder[WTO[I]->getBlockID()] = I + 1;
240 }
241 } // namespace clang
242