xref: /freebsd/contrib/llvm-project/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp (revision 357378bbdedf24ce2b90e9bd831af4a9db3ec70a)
1 //===----- BPFMISimplifyPatchable.cpp - MI Simplify Patchable Insts -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass targets a subset of instructions like below
10 //    ld_imm64 r1, @global
11 //    ldd r2, r1, 0
12 //    add r3, struct_base_reg, r2
13 //
14 // Here @global should represent an AMA (abstruct member access).
15 // Such an access is subject to bpf load time patching. After this pass, the
16 // code becomes
17 //    ld_imm64 r1, @global
18 //    add r3, struct_base_reg, r1
19 //
20 // Eventually, at BTF output stage, a relocation record will be generated
21 // for ld_imm64 which should be replaced later by bpf loader:
22 //    r1 = <calculated field_info>
23 //    add r3, struct_base_reg, r1
24 //
25 // This pass also removes the intermediate load generated in IR pass for
26 // __builtin_btf_type_id() intrinsic.
27 //
28 //===----------------------------------------------------------------------===//
29 
30 #include "BPF.h"
31 #include "BPFCORE.h"
32 #include "BPFInstrInfo.h"
33 #include "BPFTargetMachine.h"
34 #include "llvm/CodeGen/MachineFunctionPass.h"
35 #include "llvm/CodeGen/MachineInstrBuilder.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/Support/Debug.h"
38 #include <set>
39 
40 using namespace llvm;
41 
42 #define DEBUG_TYPE "bpf-mi-simplify-patchable"
43 
44 namespace {
45 
46 struct BPFMISimplifyPatchable : public MachineFunctionPass {
47 
48   static char ID;
49   const BPFInstrInfo *TII;
50   MachineFunction *MF;
51 
52   BPFMISimplifyPatchable() : MachineFunctionPass(ID) {
53     initializeBPFMISimplifyPatchablePass(*PassRegistry::getPassRegistry());
54   }
55 
56 private:
57   std::set<MachineInstr *> SkipInsts;
58 
59   // Initialize class variables.
60   void initialize(MachineFunction &MFParm);
61 
62   bool isLoadInst(unsigned Opcode);
63   bool removeLD();
64   void processCandidate(MachineRegisterInfo *MRI, MachineBasicBlock &MBB,
65                         MachineInstr &MI, Register &SrcReg, Register &DstReg,
66                         const GlobalValue *GVal, bool IsAma);
67   void processDstReg(MachineRegisterInfo *MRI, Register &DstReg,
68                      Register &SrcReg, const GlobalValue *GVal,
69                      bool doSrcRegProp, bool IsAma);
70   void processInst(MachineRegisterInfo *MRI, MachineInstr *Inst,
71                    MachineOperand *RelocOp, const GlobalValue *GVal);
72   void checkADDrr(MachineRegisterInfo *MRI, MachineOperand *RelocOp,
73                   const GlobalValue *GVal);
74   void checkShift(MachineRegisterInfo *MRI, MachineBasicBlock &MBB,
75                   MachineOperand *RelocOp, const GlobalValue *GVal,
76                   unsigned Opcode);
77 
78 public:
79   // Main entry point for this pass.
80   bool runOnMachineFunction(MachineFunction &MF) override {
81     if (skipFunction(MF.getFunction()))
82       return false;
83 
84     initialize(MF);
85     return removeLD();
86   }
87 };
88 
89 // Initialize class variables.
90 void BPFMISimplifyPatchable::initialize(MachineFunction &MFParm) {
91   MF = &MFParm;
92   TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
93   LLVM_DEBUG(dbgs() << "*** BPF simplify patchable insts pass ***\n\n");
94 }
95 
96 static bool isST(unsigned Opcode) {
97   return Opcode == BPF::STB_imm || Opcode == BPF::STH_imm ||
98          Opcode == BPF::STW_imm || Opcode == BPF::STD_imm;
99 }
100 
101 static bool isSTX32(unsigned Opcode) {
102   return Opcode == BPF::STB32 || Opcode == BPF::STH32 || Opcode == BPF::STW32;
103 }
104 
105 static bool isSTX64(unsigned Opcode) {
106   return Opcode == BPF::STB || Opcode == BPF::STH || Opcode == BPF::STW ||
107          Opcode == BPF::STD;
108 }
109 
110 static bool isLDX32(unsigned Opcode) {
111   return Opcode == BPF::LDB32 || Opcode == BPF::LDH32 || Opcode == BPF::LDW32;
112 }
113 
114 static bool isLDX64(unsigned Opcode) {
115   return Opcode == BPF::LDB || Opcode == BPF::LDH || Opcode == BPF::LDW ||
116          Opcode == BPF::LDD;
117 }
118 
119 static bool isLDSX(unsigned Opcode) {
120   return Opcode == BPF::LDBSX || Opcode == BPF::LDHSX || Opcode == BPF::LDWSX;
121 }
122 
123 bool BPFMISimplifyPatchable::isLoadInst(unsigned Opcode) {
124   return isLDX32(Opcode) || isLDX64(Opcode) || isLDSX(Opcode);
125 }
126 
127 void BPFMISimplifyPatchable::checkADDrr(MachineRegisterInfo *MRI,
128     MachineOperand *RelocOp, const GlobalValue *GVal) {
129   const MachineInstr *Inst = RelocOp->getParent();
130   const MachineOperand *Op1 = &Inst->getOperand(1);
131   const MachineOperand *Op2 = &Inst->getOperand(2);
132   const MachineOperand *BaseOp = (RelocOp == Op1) ? Op2 : Op1;
133 
134   // Go through all uses of %1 as in %1 = ADD_rr %2, %3
135   const MachineOperand Op0 = Inst->getOperand(0);
136   for (MachineOperand &MO :
137        llvm::make_early_inc_range(MRI->use_operands(Op0.getReg()))) {
138     // The candidate needs to have a unique definition.
139     if (!MRI->getUniqueVRegDef(MO.getReg()))
140       continue;
141 
142     MachineInstr *DefInst = MO.getParent();
143     unsigned Opcode = DefInst->getOpcode();
144     unsigned COREOp;
145     if (isLDX64(Opcode) || isLDSX(Opcode))
146       COREOp = BPF::CORE_LD64;
147     else if (isLDX32(Opcode))
148       COREOp = BPF::CORE_LD32;
149     else if (isSTX64(Opcode) || isSTX32(Opcode) || isST(Opcode))
150       COREOp = BPF::CORE_ST;
151     else
152       continue;
153 
154     // It must be a form of %2 = *(type *)(%1 + 0) or *(type *)(%1 + 0) = %2.
155     const MachineOperand &ImmOp = DefInst->getOperand(2);
156     if (!ImmOp.isImm() || ImmOp.getImm() != 0)
157       continue;
158 
159     // Reject the form:
160     //   %1 = ADD_rr %2, %3
161     //   *(type *)(%2 + 0) = %1
162     if (isSTX64(Opcode) || isSTX32(Opcode)) {
163       const MachineOperand &Opnd = DefInst->getOperand(0);
164       if (Opnd.isReg() && Opnd.getReg() == MO.getReg())
165         continue;
166     }
167 
168     BuildMI(*DefInst->getParent(), *DefInst, DefInst->getDebugLoc(), TII->get(COREOp))
169         .add(DefInst->getOperand(0)).addImm(Opcode).add(*BaseOp)
170         .addGlobalAddress(GVal);
171     DefInst->eraseFromParent();
172   }
173 }
174 
175 void BPFMISimplifyPatchable::checkShift(MachineRegisterInfo *MRI,
176     MachineBasicBlock &MBB, MachineOperand *RelocOp, const GlobalValue *GVal,
177     unsigned Opcode) {
178   // Relocation operand should be the operand #2.
179   MachineInstr *Inst = RelocOp->getParent();
180   if (RelocOp != &Inst->getOperand(2))
181     return;
182 
183   BuildMI(MBB, *Inst, Inst->getDebugLoc(), TII->get(BPF::CORE_SHIFT))
184       .add(Inst->getOperand(0)).addImm(Opcode)
185       .add(Inst->getOperand(1)).addGlobalAddress(GVal);
186   Inst->eraseFromParent();
187 }
188 
189 void BPFMISimplifyPatchable::processCandidate(MachineRegisterInfo *MRI,
190     MachineBasicBlock &MBB, MachineInstr &MI, Register &SrcReg,
191     Register &DstReg, const GlobalValue *GVal, bool IsAma) {
192   if (MRI->getRegClass(DstReg) == &BPF::GPR32RegClass) {
193     if (IsAma) {
194       // We can optimize such a pattern:
195       //  %1:gpr = LD_imm64 @"llvm.s:0:4$0:2"
196       //  %2:gpr32 = LDW32 %1:gpr, 0
197       //  %3:gpr = SUBREG_TO_REG 0, %2:gpr32, %subreg.sub_32
198       //  %4:gpr = ADD_rr %0:gpr, %3:gpr
199       //  or similar patterns below for non-alu32 case.
200       auto Begin = MRI->use_begin(DstReg), End = MRI->use_end();
201       decltype(End) NextI;
202       for (auto I = Begin; I != End; I = NextI) {
203         NextI = std::next(I);
204         if (!MRI->getUniqueVRegDef(I->getReg()))
205           continue;
206 
207         unsigned Opcode = I->getParent()->getOpcode();
208         if (Opcode == BPF::SUBREG_TO_REG) {
209           Register TmpReg = I->getParent()->getOperand(0).getReg();
210           processDstReg(MRI, TmpReg, DstReg, GVal, false, IsAma);
211         }
212       }
213     }
214 
215     BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(BPF::COPY), DstReg)
216         .addReg(SrcReg, 0, BPF::sub_32);
217     return;
218   }
219 
220   // All uses of DstReg replaced by SrcReg
221   processDstReg(MRI, DstReg, SrcReg, GVal, true, IsAma);
222 }
223 
224 void BPFMISimplifyPatchable::processDstReg(MachineRegisterInfo *MRI,
225     Register &DstReg, Register &SrcReg, const GlobalValue *GVal,
226     bool doSrcRegProp, bool IsAma) {
227   auto Begin = MRI->use_begin(DstReg), End = MRI->use_end();
228   decltype(End) NextI;
229   for (auto I = Begin; I != End; I = NextI) {
230     NextI = std::next(I);
231     if (doSrcRegProp) {
232       // In situations like below it is not known if usage is a kill
233       // after setReg():
234       //
235       // .-> %2:gpr = LD_imm64 @"llvm.t:0:0$0:0"
236       // |
237       // |`----------------.
238       // |   %3:gpr = LDD %2:gpr, 0
239       // |   %4:gpr = ADD_rr %0:gpr(tied-def 0), killed %3:gpr <--- (1)
240       // |   %5:gpr = LDD killed %4:gpr, 0       ^^^^^^^^^^^^^
241       // |   STD killed %5:gpr, %1:gpr, 0         this is I
242       //  `----------------.
243       //     %6:gpr = LDD %2:gpr, 0
244       //     %7:gpr = ADD_rr %0:gpr(tied-def 0), killed %6:gpr <--- (2)
245       //     %8:gpr = LDD killed %7:gpr, 0       ^^^^^^^^^^^^^
246       //     STD killed %8:gpr, %1:gpr, 0         this is I
247       //
248       // Instructions (1) and (2) would be updated by setReg() to:
249       //
250       //     ADD_rr %0:gpr(tied-def 0), %2:gpr
251       //
252       // %2:gpr is not killed at (1), so it is necessary to remove kill flag
253       // from I.
254       I->setReg(SrcReg);
255       I->setIsKill(false);
256     }
257 
258     // The candidate needs to have a unique definition.
259     if (IsAma && MRI->getUniqueVRegDef(I->getReg()))
260       processInst(MRI, I->getParent(), &*I, GVal);
261   }
262 }
263 
264 // Check to see whether we could do some optimization
265 // to attach relocation to downstream dependent instructions.
266 // Two kinds of patterns are recognized below:
267 // Pattern 1:
268 //   %1 = LD_imm64 @"llvm.b:0:4$0:1"  <== patch_imm = 4
269 //   %2 = LDD %1, 0  <== this insn will be removed
270 //   %3 = ADD_rr %0, %2
271 //   %4 = LDW[32] %3, 0 OR STW[32] %4, %3, 0
272 //   The `%4 = ...` will be transformed to
273 //      CORE_[ALU32_]MEM(%4, mem_opcode, %0, @"llvm.b:0:4$0:1")
274 //   and later on, BTF emit phase will translate to
275 //      %4 = LDW[32] %0, 4 STW[32] %4, %0, 4
276 //   and attach a relocation to it.
277 // Pattern 2:
278 //    %15 = LD_imm64 @"llvm.t:5:63$0:2" <== relocation type 5
279 //    %16 = LDD %15, 0   <== this insn will be removed
280 //    %17 = SRA_rr %14, %16
281 //    The `%17 = ...` will be transformed to
282 //       %17 = CORE_SHIFT(SRA_ri, %14, @"llvm.t:5:63$0:2")
283 //    and later on, BTF emit phase will translate to
284 //       %r4 = SRA_ri %r4, 63
285 void BPFMISimplifyPatchable::processInst(MachineRegisterInfo *MRI,
286     MachineInstr *Inst, MachineOperand *RelocOp, const GlobalValue *GVal) {
287   unsigned Opcode = Inst->getOpcode();
288   if (isLoadInst(Opcode)) {
289     SkipInsts.insert(Inst);
290     return;
291   }
292 
293   if (Opcode == BPF::ADD_rr)
294     checkADDrr(MRI, RelocOp, GVal);
295   else if (Opcode == BPF::SLL_rr)
296     checkShift(MRI, *Inst->getParent(), RelocOp, GVal, BPF::SLL_ri);
297   else if (Opcode == BPF::SRA_rr)
298     checkShift(MRI, *Inst->getParent(), RelocOp, GVal, BPF::SRA_ri);
299   else if (Opcode == BPF::SRL_rr)
300     checkShift(MRI, *Inst->getParent(), RelocOp, GVal, BPF::SRL_ri);
301 }
302 
303 /// Remove unneeded Load instructions.
304 bool BPFMISimplifyPatchable::removeLD() {
305   MachineRegisterInfo *MRI = &MF->getRegInfo();
306   MachineInstr *ToErase = nullptr;
307   bool Changed = false;
308 
309   for (MachineBasicBlock &MBB : *MF) {
310     for (MachineInstr &MI : MBB) {
311       if (ToErase) {
312         ToErase->eraseFromParent();
313         ToErase = nullptr;
314       }
315 
316       // Ensure the register format is LOAD <reg>, <reg>, 0
317       if (!isLoadInst(MI.getOpcode()))
318         continue;
319 
320       if (SkipInsts.find(&MI) != SkipInsts.end())
321         continue;
322 
323       if (!MI.getOperand(0).isReg() || !MI.getOperand(1).isReg())
324         continue;
325 
326       if (!MI.getOperand(2).isImm() || MI.getOperand(2).getImm())
327         continue;
328 
329       Register DstReg = MI.getOperand(0).getReg();
330       Register SrcReg = MI.getOperand(1).getReg();
331 
332       MachineInstr *DefInst = MRI->getUniqueVRegDef(SrcReg);
333       if (!DefInst)
334         continue;
335 
336       if (DefInst->getOpcode() != BPF::LD_imm64)
337         continue;
338 
339       const MachineOperand &MO = DefInst->getOperand(1);
340       if (!MO.isGlobal())
341         continue;
342 
343       const GlobalValue *GVal = MO.getGlobal();
344       auto *GVar = dyn_cast<GlobalVariable>(GVal);
345       if (!GVar)
346         continue;
347 
348       // Global variables representing structure offset or type id.
349       bool IsAma = false;
350       if (GVar->hasAttribute(BPFCoreSharedInfo::AmaAttr))
351         IsAma = true;
352       else if (!GVar->hasAttribute(BPFCoreSharedInfo::TypeIdAttr))
353         continue;
354 
355       processCandidate(MRI, MBB, MI, SrcReg, DstReg, GVal, IsAma);
356 
357       ToErase = &MI;
358       Changed = true;
359     }
360   }
361 
362   return Changed;
363 }
364 
365 } // namespace
366 
367 INITIALIZE_PASS(BPFMISimplifyPatchable, DEBUG_TYPE,
368                 "BPF PreEmit SimplifyPatchable", false, false)
369 
370 char BPFMISimplifyPatchable::ID = 0;
371 FunctionPass *llvm::createBPFMISimplifyPatchablePass() {
372   return new BPFMISimplifyPatchable();
373 }
374