xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/CodeGenCommonISel.cpp (revision 47ef2a131091508e049ab10cad7f91a3c1342cd9)
1 //===-- CodeGenCommonISel.cpp ---------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines common utilies that are shared between SelectionDAG and
10 // GlobalISel frameworks.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/CodeGenCommonISel.h"
15 #include "llvm/Analysis/BranchProbabilityInfo.h"
16 #include "llvm/CodeGen/MachineBasicBlock.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetOpcodes.h"
20 #include "llvm/IR/DebugInfoMetadata.h"
21 
22 #define DEBUG_TYPE "codegen-common"
23 
24 using namespace llvm;
25 
26 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
27 /// is 0.
28 MachineBasicBlock *
29 StackProtectorDescriptor::addSuccessorMBB(
30     const BasicBlock *BB, MachineBasicBlock *ParentMBB, bool IsLikely,
31     MachineBasicBlock *SuccMBB) {
32   // If SuccBB has not been created yet, create it.
33   if (!SuccMBB) {
34     MachineFunction *MF = ParentMBB->getParent();
35     MachineFunction::iterator BBI(ParentMBB);
36     SuccMBB = MF->CreateMachineBasicBlock(BB);
37     MF->insert(++BBI, SuccMBB);
38   }
39   // Add it as a successor of ParentMBB.
40   ParentMBB->addSuccessor(
41       SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
42   return SuccMBB;
43 }
44 
45 /// Given that the input MI is before a partial terminator sequence TSeq, return
46 /// true if M + TSeq also a partial terminator sequence.
47 ///
48 /// A Terminator sequence is a sequence of MachineInstrs which at this point in
49 /// lowering copy vregs into physical registers, which are then passed into
50 /// terminator instructors so we can satisfy ABI constraints. A partial
51 /// terminator sequence is an improper subset of a terminator sequence (i.e. it
52 /// may be the whole terminator sequence).
53 static bool MIIsInTerminatorSequence(const MachineInstr &MI) {
54   // If we do not have a copy or an implicit def, we return true if and only if
55   // MI is a debug value.
56   if (!MI.isCopy() && !MI.isImplicitDef()) {
57     // Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the
58     // physical registers if there is debug info associated with the terminator
59     // of our mbb. We want to include said debug info in our terminator
60     // sequence, so we return true in that case.
61     if (MI.isDebugInstr())
62       return true;
63 
64     // For GlobalISel, we may have extension instructions for arguments within
65     // copy sequences. Allow these.
66     switch (MI.getOpcode()) {
67     case TargetOpcode::G_TRUNC:
68     case TargetOpcode::G_ZEXT:
69     case TargetOpcode::G_ANYEXT:
70     case TargetOpcode::G_SEXT:
71     case TargetOpcode::G_MERGE_VALUES:
72     case TargetOpcode::G_UNMERGE_VALUES:
73     case TargetOpcode::G_CONCAT_VECTORS:
74     case TargetOpcode::G_BUILD_VECTOR:
75     case TargetOpcode::G_EXTRACT:
76       return true;
77     default:
78       return false;
79     }
80   }
81 
82   // We have left the terminator sequence if we are not doing one of the
83   // following:
84   //
85   // 1. Copying a vreg into a physical register.
86   // 2. Copying a vreg into a vreg.
87   // 3. Defining a register via an implicit def.
88 
89   // OPI should always be a register definition...
90   MachineInstr::const_mop_iterator OPI = MI.operands_begin();
91   if (!OPI->isReg() || !OPI->isDef())
92     return false;
93 
94   // Defining any register via an implicit def is always ok.
95   if (MI.isImplicitDef())
96     return true;
97 
98   // Grab the copy source...
99   MachineInstr::const_mop_iterator OPI2 = OPI;
100   ++OPI2;
101   assert(OPI2 != MI.operands_end()
102          && "Should have a copy implying we should have 2 arguments.");
103 
104   // Make sure that the copy dest is not a vreg when the copy source is a
105   // physical register.
106   if (!OPI2->isReg() ||
107       (!OPI->getReg().isPhysical() && OPI2->getReg().isPhysical()))
108     return false;
109 
110   return true;
111 }
112 
113 /// Find the split point at which to splice the end of BB into its success stack
114 /// protector check machine basic block.
115 ///
116 /// On many platforms, due to ABI constraints, terminators, even before register
117 /// allocation, use physical registers. This creates an issue for us since
118 /// physical registers at this point can not travel across basic
119 /// blocks. Luckily, selectiondag always moves physical registers into vregs
120 /// when they enter functions and moves them through a sequence of copies back
121 /// into the physical registers right before the terminator creating a
122 /// ``Terminator Sequence''. This function is searching for the beginning of the
123 /// terminator sequence so that we can ensure that we splice off not just the
124 /// terminator, but additionally the copies that move the vregs into the
125 /// physical registers.
126 MachineBasicBlock::iterator
127 llvm::findSplitPointForStackProtector(MachineBasicBlock *BB,
128                                       const TargetInstrInfo &TII) {
129   MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator();
130   if (SplitPoint == BB->begin())
131     return SplitPoint;
132 
133   MachineBasicBlock::iterator Start = BB->begin();
134   MachineBasicBlock::iterator Previous = SplitPoint;
135   do {
136     --Previous;
137   } while (Previous != Start && Previous->isDebugInstr());
138 
139   if (TII.isTailCall(*SplitPoint) &&
140       Previous->getOpcode() == TII.getCallFrameDestroyOpcode()) {
141     // Call frames cannot be nested, so if this frame is describing the tail
142     // call itself, then we must insert before the sequence even starts. For
143     // example:
144     //     <split point>
145     //     ADJCALLSTACKDOWN ...
146     //     <Moves>
147     //     ADJCALLSTACKUP ...
148     //     TAILJMP somewhere
149     // On the other hand, it could be an unrelated call in which case this tail
150     // call has no register moves of its own and should be the split point. For
151     // example:
152     //     ADJCALLSTACKDOWN
153     //     CALL something_else
154     //     ADJCALLSTACKUP
155     //     <split point>
156     //     TAILJMP somewhere
157     do {
158       --Previous;
159       if (Previous->isCall())
160         return SplitPoint;
161     } while(Previous->getOpcode() != TII.getCallFrameSetupOpcode());
162 
163     return Previous;
164   }
165 
166   while (MIIsInTerminatorSequence(*Previous)) {
167     SplitPoint = Previous;
168     if (Previous == Start)
169       break;
170     --Previous;
171   }
172 
173   return SplitPoint;
174 }
175 
176 FPClassTest llvm::invertFPClassTestIfSimpler(FPClassTest Test) {
177   FPClassTest InvertedTest = ~Test;
178   // Pick the direction with fewer tests
179   // TODO: Handle more combinations of cases that can be handled together
180   switch (static_cast<unsigned>(InvertedTest)) {
181   case fcNan:
182   case fcSNan:
183   case fcQNan:
184   case fcInf:
185   case fcPosInf:
186   case fcNegInf:
187   case fcNormal:
188   case fcPosNormal:
189   case fcNegNormal:
190   case fcSubnormal:
191   case fcPosSubnormal:
192   case fcNegSubnormal:
193   case fcZero:
194   case fcPosZero:
195   case fcNegZero:
196   case fcFinite:
197   case fcPosFinite:
198   case fcNegFinite:
199   case fcZero | fcNan:
200   case fcSubnormal | fcZero:
201   case fcSubnormal | fcZero | fcNan:
202     return InvertedTest;
203   default:
204     return fcNone;
205   }
206 
207   llvm_unreachable("covered FPClassTest");
208 }
209 
210 static MachineOperand *getSalvageOpsForCopy(const MachineRegisterInfo &MRI,
211                                             MachineInstr &Copy) {
212   assert(Copy.getOpcode() == TargetOpcode::COPY && "Must be a COPY");
213 
214   return &Copy.getOperand(1);
215 }
216 
217 static MachineOperand *getSalvageOpsForTrunc(const MachineRegisterInfo &MRI,
218                                             MachineInstr &Trunc,
219                                             SmallVectorImpl<uint64_t> &Ops) {
220   assert(Trunc.getOpcode() == TargetOpcode::G_TRUNC && "Must be a G_TRUNC");
221 
222   const auto FromLLT = MRI.getType(Trunc.getOperand(1).getReg());
223   const auto ToLLT = MRI.getType(Trunc.defs().begin()->getReg());
224 
225   // TODO: Support non-scalar types.
226   if (!FromLLT.isScalar()) {
227     return nullptr;
228   }
229 
230   auto ExtOps = DIExpression::getExtOps(FromLLT.getSizeInBits(),
231                                         ToLLT.getSizeInBits(), false);
232   Ops.append(ExtOps.begin(), ExtOps.end());
233   return &Trunc.getOperand(1);
234 }
235 
236 static MachineOperand *salvageDebugInfoImpl(const MachineRegisterInfo &MRI,
237                                             MachineInstr &MI,
238                                             SmallVectorImpl<uint64_t> &Ops) {
239   switch (MI.getOpcode()) {
240   case TargetOpcode::G_TRUNC:
241     return getSalvageOpsForTrunc(MRI, MI, Ops);
242   case TargetOpcode::COPY:
243     return getSalvageOpsForCopy(MRI, MI);
244   default:
245     return nullptr;
246   }
247 }
248 
249 void llvm::salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI,
250                                        MachineInstr &MI,
251                                        ArrayRef<MachineOperand *> DbgUsers) {
252   // These are arbitrary chosen limits on the maximum number of values and the
253   // maximum size of a debug expression we can salvage up to, used for
254   // performance reasons.
255   const unsigned MaxExpressionSize = 128;
256 
257   for (auto *DefMO : DbgUsers) {
258     MachineInstr *DbgMI = DefMO->getParent();
259     if (DbgMI->isIndirectDebugValue()) {
260       continue;
261     }
262 
263     int UseMOIdx =
264         DbgMI->findRegisterUseOperandIdx(DefMO->getReg(), /*TRI=*/nullptr);
265     assert(UseMOIdx != -1 && DbgMI->hasDebugOperandForReg(DefMO->getReg()) &&
266            "Must use salvaged instruction as its location");
267 
268     // TODO: Support DBG_VALUE_LIST.
269     if (DbgMI->getOpcode() != TargetOpcode::DBG_VALUE) {
270       assert(DbgMI->getOpcode() == TargetOpcode::DBG_VALUE_LIST &&
271              "Must be either DBG_VALUE or DBG_VALUE_LIST");
272       continue;
273     }
274 
275     const DIExpression *SalvagedExpr = DbgMI->getDebugExpression();
276 
277     SmallVector<uint64_t, 16> Ops;
278     auto Op0 = salvageDebugInfoImpl(MRI, MI, Ops);
279     if (!Op0)
280       continue;
281     SalvagedExpr = DIExpression::appendOpsToArg(SalvagedExpr, Ops, 0, true);
282 
283     bool IsValidSalvageExpr =
284         SalvagedExpr->getNumElements() <= MaxExpressionSize;
285     if (IsValidSalvageExpr) {
286       auto &UseMO = DbgMI->getOperand(UseMOIdx);
287       UseMO.setReg(Op0->getReg());
288       UseMO.setSubReg(Op0->getSubReg());
289       DbgMI->getDebugExpressionOp().setMetadata(SalvagedExpr);
290 
291       LLVM_DEBUG(dbgs() << "SALVAGE: " << *DbgMI << '\n');
292     }
293   }
294 }
295