xref: /freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/CodeGenCommonISel.h (revision 06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e)
1 //===- CodeGenCommonISel.h - Common code between ISels ---------*- C++ -*--===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares common utilities that are shared between SelectionDAG and
10 // GlobalISel frameworks.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_CODEGEN_CODEGENCOMMONISEL_H
15 #define LLVM_CODEGEN_CODEGENCOMMONISEL_H
16 
17 #include "llvm/CodeGen/MachineBasicBlock.h"
18 #include <cassert>
19 namespace llvm {
20 
21 class BasicBlock;
22 enum FPClassTest : unsigned;
23 
24 /// Encapsulates all of the information needed to generate a stack protector
25 /// check, and signals to isel when initialized that one needs to be generated.
26 ///
27 /// *NOTE* The following is a high level documentation of SelectionDAG Stack
28 /// Protector Generation. This is now also ported be shared with GlobalISel,
29 /// but without any significant changes.
30 ///
31 /// High Level Overview of ISel Stack Protector Generation:
32 ///
33 /// Previously, the "stack protector" IR pass handled stack protector
34 /// generation. This necessitated splitting basic blocks at the IR level to
35 /// create the success/failure basic blocks in the tail of the basic block in
36 /// question. As a result of this, calls that would have qualified for the
37 /// sibling call optimization were no longer eligible for optimization since
38 /// said calls were no longer right in the "tail position" (i.e. the immediate
39 /// predecessor of a ReturnInst instruction).
40 ///
41 /// Since the sibling call optimization causes the callee to reuse the caller's
42 /// stack, if we could delay the generation of the stack protector check until
43 /// later in CodeGen after the sibling call decision was made, we get both the
44 /// tail call optimization and the stack protector check!
45 ///
46 /// A few goals in solving this problem were:
47 ///
48 ///   1. Preserve the architecture independence of stack protector generation.
49 ///
50 ///   2. Preserve the normal IR level stack protector check for platforms like
51 ///      OpenBSD for which we support platform-specific stack protector
52 ///      generation.
53 ///
54 /// The main problem that guided the present solution is that one can not
55 /// solve this problem in an architecture independent manner at the IR level
56 /// only. This is because:
57 ///
58 ///   1. The decision on whether or not to perform a sibling call on certain
59 ///      platforms (for instance i386) requires lower level information
60 ///      related to available registers that can not be known at the IR level.
61 ///
62 ///   2. Even if the previous point were not true, the decision on whether to
63 ///      perform a tail call is done in LowerCallTo in SelectionDAG (or
64 ///      CallLowering in GlobalISel) which occurs after the Stack Protector
65 ///      Pass. As a result, one would need to put the relevant callinst into the
66 ///      stack protector check success basic block (where the return inst is
67 ///      placed) and then move it back later at ISel/MI time before the
68 ///      stack protector check if the tail call optimization failed. The MI
69 ///      level option was nixed immediately since it would require
70 ///      platform-specific pattern matching. The ISel level option was
71 ///      nixed because SelectionDAG only processes one IR level basic block at a
72 ///      time implying one could not create a DAG Combine to move the callinst.
73 ///
74 /// To get around this problem:
75 ///
76 ///   1. SelectionDAG can only process one block at a time, we can generate
77 ///      multiple machine basic blocks for one IR level basic block.
78 ///      This is how we handle bit tests and switches.
79 ///
80 ///   2. At the MI level, tail calls are represented via a special return
81 ///      MIInst called "tcreturn". Thus if we know the basic block in which we
82 ///      wish to insert the stack protector check, we get the correct behavior
83 ///      by always inserting the stack protector check right before the return
84 ///      statement. This is a "magical transformation" since no matter where
85 ///      the stack protector check intrinsic is, we always insert the stack
86 ///      protector check code at the end of the BB.
87 ///
88 /// Given the aforementioned constraints, the following solution was devised:
89 ///
90 ///   1. On platforms that do not support ISel stack protector check
91 ///      generation, allow for the normal IR level stack protector check
92 ///      generation to continue.
93 ///
94 ///   2. On platforms that do support ISel stack protector check
95 ///      generation:
96 ///
97 ///     a. Use the IR level stack protector pass to decide if a stack
98 ///        protector is required/which BB we insert the stack protector check
99 ///        in by reusing the logic already therein.
100 ///
101 ///     b. After we finish selecting the basic block, we produce the validation
102 ///        code with one of these techniques:
103 ///          1) with a call to a guard check function
104 ///          2) with inlined instrumentation
105 ///
106 ///        1) We insert a call to the check function before the terminator.
107 ///
108 ///        2) We first find a splice point in the parent basic block
109 ///        before the terminator and then splice the terminator of said basic
110 ///        block into the success basic block. Then we code-gen a new tail for
111 ///        the parent basic block consisting of the two loads, the comparison,
112 ///        and finally two branches to the success/failure basic blocks. We
113 ///        conclude by code-gening the failure basic block if we have not
114 ///        code-gened it already (all stack protector checks we generate in
115 ///        the same function, use the same failure basic block).
116 class StackProtectorDescriptor {
117 public:
118   StackProtectorDescriptor() = default;
119 
120   /// Returns true if all fields of the stack protector descriptor are
121   /// initialized implying that we should/are ready to emit a stack protector.
shouldEmitStackProtector()122   bool shouldEmitStackProtector() const {
123     return ParentMBB && SuccessMBB && FailureMBB;
124   }
125 
shouldEmitFunctionBasedCheckStackProtector()126   bool shouldEmitFunctionBasedCheckStackProtector() const {
127     return ParentMBB && !SuccessMBB && !FailureMBB;
128   }
129 
130   /// Initialize the stack protector descriptor structure for a new basic
131   /// block.
initialize(const BasicBlock * BB,MachineBasicBlock * MBB,bool FunctionBasedInstrumentation)132   void initialize(const BasicBlock *BB, MachineBasicBlock *MBB,
133                   bool FunctionBasedInstrumentation) {
134     // Make sure we are not initialized yet.
135     assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is "
136                                           "already initialized!");
137     ParentMBB = MBB;
138     if (!FunctionBasedInstrumentation) {
139       SuccessMBB = addSuccessorMBB(BB, MBB, /* IsLikely */ true);
140       FailureMBB = addSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB);
141     }
142   }
143 
144   /// Reset state that changes when we handle different basic blocks.
145   ///
146   /// This currently includes:
147   ///
148   /// 1. The specific basic block we are generating a
149   /// stack protector for (ParentMBB).
150   ///
151   /// 2. The successor machine basic block that will contain the tail of
152   /// parent mbb after we create the stack protector check (SuccessMBB). This
153   /// BB is visited only on stack protector check success.
resetPerBBState()154   void resetPerBBState() {
155     ParentMBB = nullptr;
156     SuccessMBB = nullptr;
157   }
158 
159   /// Reset state that only changes when we switch functions.
160   ///
161   /// This currently includes:
162   ///
163   /// 1. FailureMBB since we reuse the failure code path for all stack
164   /// protector checks created in an individual function.
165   ///
166   /// 2.The guard variable since the guard variable we are checking against is
167   /// always the same.
resetPerFunctionState()168   void resetPerFunctionState() { FailureMBB = nullptr; }
169 
getParentMBB()170   MachineBasicBlock *getParentMBB() { return ParentMBB; }
getSuccessMBB()171   MachineBasicBlock *getSuccessMBB() { return SuccessMBB; }
getFailureMBB()172   MachineBasicBlock *getFailureMBB() { return FailureMBB; }
173 
174 private:
175   /// The basic block for which we are generating the stack protector.
176   ///
177   /// As a result of stack protector generation, we will splice the
178   /// terminators of this basic block into the successor mbb SuccessMBB and
179   /// replace it with a compare/branch to the successor mbbs
180   /// SuccessMBB/FailureMBB depending on whether or not the stack protector
181   /// was violated.
182   MachineBasicBlock *ParentMBB = nullptr;
183 
184   /// A basic block visited on stack protector check success that contains the
185   /// terminators of ParentMBB.
186   MachineBasicBlock *SuccessMBB = nullptr;
187 
188   /// This basic block visited on stack protector check failure that will
189   /// contain a call to __stack_chk_fail().
190   MachineBasicBlock *FailureMBB = nullptr;
191 
192   /// Add a successor machine basic block to ParentMBB. If the successor mbb
193   /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic
194   /// block will be created. Assign a large weight if IsLikely is true.
195   MachineBasicBlock *addSuccessorMBB(const BasicBlock *BB,
196                                      MachineBasicBlock *ParentMBB,
197                                      bool IsLikely,
198                                      MachineBasicBlock *SuccMBB = nullptr);
199 };
200 
201 /// Find the split point at which to splice the end of BB into its success stack
202 /// protector check machine basic block.
203 ///
204 /// On many platforms, due to ABI constraints, terminators, even before register
205 /// allocation, use physical registers. This creates an issue for us since
206 /// physical registers at this point can not travel across basic
207 /// blocks. Luckily, selectiondag always moves physical registers into vregs
208 /// when they enter functions and moves them through a sequence of copies back
209 /// into the physical registers right before the terminator creating a
210 /// ``Terminator Sequence''. This function is searching for the beginning of the
211 /// terminator sequence so that we can ensure that we splice off not just the
212 /// terminator, but additionally the copies that move the vregs into the
213 /// physical registers.
214 MachineBasicBlock::iterator
215 findSplitPointForStackProtector(MachineBasicBlock *BB,
216                                 const TargetInstrInfo &TII);
217 
218 /// Evaluates if the specified FP class test is better performed as the inverse
219 /// (i.e. fewer instructions should be required to lower it).  An example is the
220 /// test "inf|normal|subnormal|zero", which is an inversion of "nan".
221 /// \param Test The test as specified in 'is_fpclass' intrinsic invocation.
222 /// \returns The inverted test, or fcNone, if inversion does not produce a
223 /// simpler test.
224 FPClassTest invertFPClassTestIfSimpler(FPClassTest Test);
225 
226 /// Assuming the instruction \p MI is going to be deleted, attempt to salvage
227 /// debug users of \p MI by writing the effect of \p MI in a DIExpression.
228 void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI,
229                                  MachineInstr &MI,
230                                  ArrayRef<MachineOperand *> DbgUsers);
231 
232 } // namespace llvm
233 
234 #endif // LLVM_CODEGEN_CODEGENCOMMONISEL_H
235