xref: /freebsd/contrib/llvm-project/llvm/lib/Target/BPF/BPFPreserveStaticOffset.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===------ BPFPreserveStaticOffset.cpp -----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // TLDR: replaces llvm.preserve.static.offset + GEP + load / store
10 //           with llvm.bpf.getelementptr.and.load / store
11 //
12 // This file implements BPFPreserveStaticOffsetPass transformation.
13 // This transformation address two BPF verifier specific issues:
14 //
15 // (a) Access to the fields of some structural types is allowed only
16 //     using load and store instructions with static immediate offsets.
17 //
18 //     Examples of such types are `struct __sk_buff` and `struct
19 //     bpf_sock_ops`.  This is so because offsets of the fields of
20 //     these structures do not match real offsets in the running
21 //     kernel. During BPF program load LDX and STX instructions
22 //     referring to the fields of these types are rewritten so that
23 //     offsets match real offsets. For this rewrite to happen field
24 //     offsets have to be encoded as immediate operands of the
25 //     instructions.
26 //
27 //     See kernel/bpf/verifier.c:convert_ctx_access function in the
28 //     Linux kernel source tree for details.
29 //
30 // (b) Pointers to context parameters of BPF programs must not be
31 //     modified before access.
32 //
33 //     During BPF program verification a tag PTR_TO_CTX is tracked for
34 //     register values. In case if register with such tag is modified
35 //     BPF program is not allowed to read or write memory using this
36 //     register. See kernel/bpf/verifier.c:check_mem_access function
37 //     in the Linux kernel source tree for details.
38 //
39 // The following sequence of the IR instructions:
40 //
41 //   %x = getelementptr %ptr, %constant_offset
42 //   %y = load %x
43 //
44 // Is translated as a single machine instruction:
45 //
46 //   LDW %ptr, %constant_offset
47 //
48 // In order for cases (a) and (b) to work the sequence %x-%y above has
49 // to be preserved by the IR passes.
50 //
51 // However, several optimization passes might sink `load` instruction
52 // or hoist `getelementptr` instruction so that the instructions are
53 // no longer in sequence. Examples of such passes are:
54 // SimplifyCFGPass, InstCombinePass, GVNPass.
55 // After such modification the verifier would reject the BPF program.
56 //
57 // To avoid this issue the patterns like (load/store (getelementptr ...))
58 // are replaced by calls to BPF specific intrinsic functions:
59 // - llvm.bpf.getelementptr.and.load
60 // - llvm.bpf.getelementptr.and.store
61 //
62 // These calls are lowered back to (load/store (getelementptr ...))
63 // by BPFCheckAndAdjustIR pass right before the translation from IR to
64 // machine instructions.
65 //
66 // The transformation is split into the following steps:
67 // - When IR is generated from AST the calls to intrinsic function
68 //   llvm.preserve.static.offset are inserted.
69 // - BPFPreserveStaticOffsetPass is executed as early as possible
70 //   with AllowPatial set to true, this handles marked GEP chains
71 //   with constant offsets.
72 // - BPFPreserveStaticOffsetPass is executed at ScalarOptimizerLateEPCallback
73 //   with AllowPatial set to false, this handles marked GEP chains
74 //   with offsets that became constant after loop unrolling, e.g.
75 //   to handle the following code:
76 //
77 // struct context { int x[4]; } __attribute__((preserve_static_offset));
78 //
79 //   struct context *ctx = ...;
80 // #pragma clang loop unroll(full)
81 //   for (int i = 0; i < 4; ++i)
82 //     foo(ctx->x[i]);
83 //
84 // The early BPFPreserveStaticOffsetPass run is necessary to allow
85 // additional GVN / CSE opportunities after functions inlining.
86 // The relative order of optimization applied to function:
87 // - early stage (1)
88 // - ...
89 // - function inlining (2)
90 // - ...
91 // - loop unrolling
92 // - ...
93 // - ScalarOptimizerLateEPCallback (3)
94 //
95 // When function A is inlined into function B all optimizations for A
96 // are already done, while some passes remain for B. In case if
97 // BPFPreserveStaticOffsetPass is done at (3) but not done at (1)
98 // the code after (2) would contain a mix of
99 // (load (gep %p)) and (get.and.load %p) usages:
100 // - the (load (gep %p)) would come from the calling function;
101 // - the (get.and.load %p) would come from the callee function.
102 // Thus clobbering CSE / GVN passes done after inlining.
103 
104 #include "BPF.h"
105 #include "BPFCORE.h"
106 #include "llvm/ADT/SmallPtrSet.h"
107 #include "llvm/ADT/SmallVector.h"
108 #include "llvm/IR/Argument.h"
109 #include "llvm/IR/Attributes.h"
110 #include "llvm/IR/BasicBlock.h"
111 #include "llvm/IR/Constants.h"
112 #include "llvm/IR/DebugInfoMetadata.h"
113 #include "llvm/IR/DiagnosticInfo.h"
114 #include "llvm/IR/IRBuilder.h"
115 #include "llvm/IR/InstIterator.h"
116 #include "llvm/IR/Instructions.h"
117 #include "llvm/IR/Intrinsics.h"
118 #include "llvm/IR/IntrinsicsBPF.h"
119 #include "llvm/IR/Module.h"
120 #include "llvm/Support/Debug.h"
121 #include "llvm/Support/ErrorHandling.h"
122 
123 #define DEBUG_TYPE "bpf-preserve-static-offset"
124 
125 using namespace llvm;
126 
127 static const unsigned GepAndLoadFirstIdxArg = 6;
128 static const unsigned GepAndStoreFirstIdxArg = 7;
129 
isIntrinsicCall(Value * I,Intrinsic::ID Id)130 static bool isIntrinsicCall(Value *I, Intrinsic::ID Id) {
131   if (auto *Call = dyn_cast<CallInst>(I))
132     if (Function *Func = Call->getCalledFunction())
133       return Func->getIntrinsicID() == Id;
134   return false;
135 }
136 
isPreserveStaticOffsetCall(Value * I)137 static bool isPreserveStaticOffsetCall(Value *I) {
138   return isIntrinsicCall(I, Intrinsic::preserve_static_offset);
139 }
140 
isGEPAndLoad(Value * I)141 static CallInst *isGEPAndLoad(Value *I) {
142   if (isIntrinsicCall(I, Intrinsic::bpf_getelementptr_and_load))
143     return cast<CallInst>(I);
144   return nullptr;
145 }
146 
isGEPAndStore(Value * I)147 static CallInst *isGEPAndStore(Value *I) {
148   if (isIntrinsicCall(I, Intrinsic::bpf_getelementptr_and_store))
149     return cast<CallInst>(I);
150   return nullptr;
151 }
152 
153 template <class T = Instruction>
mergeDILocations(SmallVector<T * > & Insns)154 static DILocation *mergeDILocations(SmallVector<T *> &Insns) {
155   DILocation *Merged = (*Insns.begin())->getDebugLoc();
156   for (T *I : Insns)
157     Merged = DILocation::getMergedLocation(Merged, I->getDebugLoc());
158   return Merged;
159 }
160 
makeIntrinsicCall(Module * M,Intrinsic::BPFIntrinsics Intrinsic,ArrayRef<Type * > Types,ArrayRef<Value * > Args)161 static CallInst *makeIntrinsicCall(Module *M,
162                                    Intrinsic::BPFIntrinsics Intrinsic,
163                                    ArrayRef<Type *> Types,
164                                    ArrayRef<Value *> Args) {
165 
166   Function *Fn = Intrinsic::getDeclaration(M, Intrinsic, Types);
167   return CallInst::Create(Fn, Args);
168 }
169 
setParamElementType(CallInst * Call,unsigned ArgNo,Type * Type)170 static void setParamElementType(CallInst *Call, unsigned ArgNo, Type *Type) {
171   LLVMContext &C = Call->getContext();
172   Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::ElementType, Type));
173 }
174 
setParamReadNone(CallInst * Call,unsigned ArgNo)175 static void setParamReadNone(CallInst *Call, unsigned ArgNo) {
176   LLVMContext &C = Call->getContext();
177   Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::ReadNone));
178 }
179 
setParamReadOnly(CallInst * Call,unsigned ArgNo)180 static void setParamReadOnly(CallInst *Call, unsigned ArgNo) {
181   LLVMContext &C = Call->getContext();
182   Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::ReadOnly));
183 }
184 
setParamWriteOnly(CallInst * Call,unsigned ArgNo)185 static void setParamWriteOnly(CallInst *Call, unsigned ArgNo) {
186   LLVMContext &C = Call->getContext();
187   Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::WriteOnly));
188 }
189 
190 namespace {
191 struct GEPChainInfo {
192   bool InBounds;
193   Type *SourceElementType;
194   SmallVector<Value *> Indices;
195   SmallVector<GetElementPtrInst *> Members;
196 
GEPChainInfo__anon5d6613400111::GEPChainInfo197   GEPChainInfo() { reset(); }
198 
reset__anon5d6613400111::GEPChainInfo199   void reset() {
200     InBounds = true;
201     SourceElementType = nullptr;
202     Indices.clear();
203     Members.clear();
204   }
205 };
206 } // Anonymous namespace
207 
208 template <class T = std::disjunction<LoadInst, StoreInst>>
fillCommonArgs(LLVMContext & C,SmallVector<Value * > & Args,GEPChainInfo & GEP,T * Insn)209 static void fillCommonArgs(LLVMContext &C, SmallVector<Value *> &Args,
210                            GEPChainInfo &GEP, T *Insn) {
211   Type *Int8Ty = Type::getInt8Ty(C);
212   Type *Int1Ty = Type::getInt1Ty(C);
213   // Implementation of Align guarantees that ShiftValue < 64
214   unsigned AlignShiftValue = Log2_64(Insn->getAlign().value());
215   Args.push_back(GEP.Members[0]->getPointerOperand());
216   Args.push_back(ConstantInt::get(Int1Ty, Insn->isVolatile()));
217   Args.push_back(ConstantInt::get(Int8Ty, (unsigned)Insn->getOrdering()));
218   Args.push_back(ConstantInt::get(Int8Ty, (unsigned)Insn->getSyncScopeID()));
219   Args.push_back(ConstantInt::get(Int8Ty, AlignShiftValue));
220   Args.push_back(ConstantInt::get(Int1Ty, GEP.InBounds));
221   Args.append(GEP.Indices.begin(), GEP.Indices.end());
222 }
223 
makeGEPAndLoad(Module * M,GEPChainInfo & GEP,LoadInst * Load)224 static Instruction *makeGEPAndLoad(Module *M, GEPChainInfo &GEP,
225                                    LoadInst *Load) {
226   SmallVector<Value *> Args;
227   fillCommonArgs(M->getContext(), Args, GEP, Load);
228   CallInst *Call = makeIntrinsicCall(M, Intrinsic::bpf_getelementptr_and_load,
229                                      {Load->getType()}, Args);
230   setParamElementType(Call, 0, GEP.SourceElementType);
231   Call->applyMergedLocation(mergeDILocations(GEP.Members), Load->getDebugLoc());
232   Call->setName((*GEP.Members.rbegin())->getName());
233   if (Load->isUnordered()) {
234     Call->setOnlyReadsMemory();
235     Call->setOnlyAccessesArgMemory();
236     setParamReadOnly(Call, 0);
237   }
238   for (unsigned I = GepAndLoadFirstIdxArg; I < Args.size(); ++I)
239     Call->addParamAttr(I, Attribute::ImmArg);
240   Call->setAAMetadata(Load->getAAMetadata());
241   return Call;
242 }
243 
makeGEPAndStore(Module * M,GEPChainInfo & GEP,StoreInst * Store)244 static Instruction *makeGEPAndStore(Module *M, GEPChainInfo &GEP,
245                                     StoreInst *Store) {
246   SmallVector<Value *> Args;
247   Args.push_back(Store->getValueOperand());
248   fillCommonArgs(M->getContext(), Args, GEP, Store);
249   CallInst *Call =
250       makeIntrinsicCall(M, Intrinsic::bpf_getelementptr_and_store,
251                         {Store->getValueOperand()->getType()}, Args);
252   setParamElementType(Call, 1, GEP.SourceElementType);
253   if (Store->getValueOperand()->getType()->isPointerTy())
254     setParamReadNone(Call, 0);
255   Call->applyMergedLocation(mergeDILocations(GEP.Members),
256                             Store->getDebugLoc());
257   if (Store->isUnordered()) {
258     Call->setOnlyWritesMemory();
259     Call->setOnlyAccessesArgMemory();
260     setParamWriteOnly(Call, 1);
261   }
262   for (unsigned I = GepAndStoreFirstIdxArg; I < Args.size(); ++I)
263     Call->addParamAttr(I, Attribute::ImmArg);
264   Call->setAAMetadata(Store->getAAMetadata());
265   return Call;
266 }
267 
getOperandAsUnsigned(CallInst * Call,unsigned ArgNo)268 static unsigned getOperandAsUnsigned(CallInst *Call, unsigned ArgNo) {
269   if (auto *Int = dyn_cast<ConstantInt>(Call->getOperand(ArgNo)))
270     return Int->getValue().getZExtValue();
271   std::string Report;
272   raw_string_ostream ReportS(Report);
273   ReportS << "Expecting ConstantInt as argument #" << ArgNo << " of " << *Call
274           << "\n";
275   report_fatal_error(StringRef(Report));
276 }
277 
reconstructGEP(CallInst * Call,int Delta)278 static GetElementPtrInst *reconstructGEP(CallInst *Call, int Delta) {
279   SmallVector<Value *> Indices;
280   Indices.append(Call->data_operands_begin() + 6 + Delta,
281                  Call->data_operands_end());
282   Type *GEPPointeeType = Call->getParamElementType(Delta);
283   auto *GEP =
284       GetElementPtrInst::Create(GEPPointeeType, Call->getOperand(Delta),
285                                 ArrayRef<Value *>(Indices), Call->getName());
286   GEP->setIsInBounds(getOperandAsUnsigned(Call, 5 + Delta));
287   return GEP;
288 }
289 
290 template <class T = std::disjunction<LoadInst, StoreInst>>
reconstructCommon(CallInst * Call,GetElementPtrInst * GEP,T * Insn,int Delta)291 static void reconstructCommon(CallInst *Call, GetElementPtrInst *GEP, T *Insn,
292                               int Delta) {
293   Insn->setVolatile(getOperandAsUnsigned(Call, 1 + Delta));
294   Insn->setOrdering((AtomicOrdering)getOperandAsUnsigned(Call, 2 + Delta));
295   Insn->setSyncScopeID(getOperandAsUnsigned(Call, 3 + Delta));
296   unsigned AlignShiftValue = getOperandAsUnsigned(Call, 4 + Delta);
297   Insn->setAlignment(Align(1ULL << AlignShiftValue));
298   GEP->setDebugLoc(Call->getDebugLoc());
299   Insn->setDebugLoc(Call->getDebugLoc());
300   Insn->setAAMetadata(Call->getAAMetadata());
301 }
302 
303 std::pair<GetElementPtrInst *, LoadInst *>
reconstructLoad(CallInst * Call)304 BPFPreserveStaticOffsetPass::reconstructLoad(CallInst *Call) {
305   GetElementPtrInst *GEP = reconstructGEP(Call, 0);
306   Type *ReturnType = Call->getFunctionType()->getReturnType();
307   auto *Load = new LoadInst(ReturnType, GEP, "",
308                             /* These would be set in reconstructCommon */
309                             false, Align(1));
310   reconstructCommon(Call, GEP, Load, 0);
311   return std::pair{GEP, Load};
312 }
313 
314 std::pair<GetElementPtrInst *, StoreInst *>
reconstructStore(CallInst * Call)315 BPFPreserveStaticOffsetPass::reconstructStore(CallInst *Call) {
316   GetElementPtrInst *GEP = reconstructGEP(Call, 1);
317   auto *Store = new StoreInst(Call->getOperand(0), GEP,
318                               /* These would be set in reconstructCommon */
319                               false, Align(1));
320   reconstructCommon(Call, GEP, Store, 1);
321   return std::pair{GEP, Store};
322 }
323 
isZero(Value * V)324 static bool isZero(Value *V) {
325   auto *CI = dyn_cast<ConstantInt>(V);
326   return CI && CI->isZero();
327 }
328 
329 // Given a chain of GEP instructions collect information necessary to
330 // merge this chain as a single GEP instruction of form:
331 //   getelementptr %<type>, ptr %p, i32 0, <field_idx1>, <field_idx2>, ...
foldGEPChainAsStructAccess(SmallVector<GetElementPtrInst * > & GEPs,GEPChainInfo & Info)332 static bool foldGEPChainAsStructAccess(SmallVector<GetElementPtrInst *> &GEPs,
333                                        GEPChainInfo &Info) {
334   if (GEPs.empty())
335     return false;
336 
337   if (!all_of(GEPs, [=](GetElementPtrInst *GEP) {
338         return GEP->hasAllConstantIndices();
339       }))
340     return false;
341 
342   GetElementPtrInst *First = GEPs[0];
343   Info.InBounds = First->isInBounds();
344   Info.SourceElementType = First->getSourceElementType();
345   Type *ResultElementType = First->getResultElementType();
346   Info.Indices.append(First->idx_begin(), First->idx_end());
347   Info.Members.push_back(First);
348 
349   for (auto *Iter = GEPs.begin() + 1; Iter != GEPs.end(); ++Iter) {
350     GetElementPtrInst *GEP = *Iter;
351     if (!isZero(*GEP->idx_begin())) {
352       Info.reset();
353       return false;
354     }
355     if (!GEP->getSourceElementType() ||
356         GEP->getSourceElementType() != ResultElementType) {
357       Info.reset();
358       return false;
359     }
360     Info.InBounds &= GEP->isInBounds();
361     Info.Indices.append(GEP->idx_begin() + 1, GEP->idx_end());
362     Info.Members.push_back(GEP);
363     ResultElementType = GEP->getResultElementType();
364   }
365 
366   return true;
367 }
368 
369 // Given a chain of GEP instructions collect information necessary to
370 // merge this chain as a single GEP instruction of form:
371 //   getelementptr i8, ptr %p, i64 %offset
foldGEPChainAsU8Access(SmallVector<GetElementPtrInst * > & GEPs,GEPChainInfo & Info)372 static bool foldGEPChainAsU8Access(SmallVector<GetElementPtrInst *> &GEPs,
373                                    GEPChainInfo &Info) {
374   if (GEPs.empty())
375     return false;
376 
377   GetElementPtrInst *First = GEPs[0];
378   const DataLayout &DL = First->getDataLayout();
379   LLVMContext &C = First->getContext();
380   Type *PtrTy = First->getType()->getScalarType();
381   APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), 0);
382   for (GetElementPtrInst *GEP : GEPs) {
383     if (!GEP->accumulateConstantOffset(DL, Offset)) {
384       Info.reset();
385       return false;
386     }
387     Info.InBounds &= GEP->isInBounds();
388     Info.Members.push_back(GEP);
389   }
390   Info.SourceElementType = Type::getInt8Ty(C);
391   Info.Indices.push_back(ConstantInt::get(C, Offset));
392 
393   return true;
394 }
395 
reportNonStaticGEPChain(Instruction * Insn)396 static void reportNonStaticGEPChain(Instruction *Insn) {
397   auto Msg = DiagnosticInfoUnsupported(
398       *Insn->getFunction(),
399       Twine("Non-constant offset in access to a field of a type marked "
400             "with preserve_static_offset might be rejected by BPF verifier")
401           .concat(Insn->getDebugLoc()
402                       ? ""
403                       : " (pass -g option to get exact location)"),
404       Insn->getDebugLoc(), DS_Warning);
405   Insn->getContext().diagnose(Msg);
406 }
407 
allZeroIndices(SmallVector<GetElementPtrInst * > & GEPs)408 static bool allZeroIndices(SmallVector<GetElementPtrInst *> &GEPs) {
409   return GEPs.empty() || all_of(GEPs, [=](GetElementPtrInst *GEP) {
410            return GEP->hasAllZeroIndices();
411          });
412 }
413 
tryToReplaceWithGEPBuiltin(Instruction * LoadOrStoreTemplate,SmallVector<GetElementPtrInst * > & GEPs,Instruction * InsnToReplace)414 static bool tryToReplaceWithGEPBuiltin(Instruction *LoadOrStoreTemplate,
415                                        SmallVector<GetElementPtrInst *> &GEPs,
416                                        Instruction *InsnToReplace) {
417   GEPChainInfo GEPChain;
418   if (!foldGEPChainAsStructAccess(GEPs, GEPChain) &&
419       !foldGEPChainAsU8Access(GEPs, GEPChain)) {
420     return false;
421   }
422   Module *M = InsnToReplace->getModule();
423   if (auto *Load = dyn_cast<LoadInst>(LoadOrStoreTemplate)) {
424     Instruction *Replacement = makeGEPAndLoad(M, GEPChain, Load);
425     Replacement->insertBefore(InsnToReplace);
426     InsnToReplace->replaceAllUsesWith(Replacement);
427   }
428   if (auto *Store = dyn_cast<StoreInst>(LoadOrStoreTemplate)) {
429     Instruction *Replacement = makeGEPAndStore(M, GEPChain, Store);
430     Replacement->insertBefore(InsnToReplace);
431   }
432   return true;
433 }
434 
435 // Check if U->getPointerOperand() == I
isPointerOperand(Value * I,User * U)436 static bool isPointerOperand(Value *I, User *U) {
437   if (auto *L = dyn_cast<LoadInst>(U))
438     return L->getPointerOperand() == I;
439   if (auto *S = dyn_cast<StoreInst>(U))
440     return S->getPointerOperand() == I;
441   if (auto *GEP = dyn_cast<GetElementPtrInst>(U))
442     return GEP->getPointerOperand() == I;
443   if (auto *Call = isGEPAndLoad(U))
444     return Call->getArgOperand(0) == I;
445   if (auto *Call = isGEPAndStore(U))
446     return Call->getArgOperand(1) == I;
447   return false;
448 }
449 
isInlineableCall(User * U)450 static bool isInlineableCall(User *U) {
451   if (auto *Call = dyn_cast<CallInst>(U))
452     return Call->hasFnAttr(Attribute::InlineHint);
453   return false;
454 }
455 
456 static void rewriteAccessChain(Instruction *Insn,
457                                SmallVector<GetElementPtrInst *> &GEPs,
458                                SmallVector<Instruction *> &Visited,
459                                bool AllowPatial, bool &StillUsed);
460 
rewriteUses(Instruction * Insn,SmallVector<GetElementPtrInst * > & GEPs,SmallVector<Instruction * > & Visited,bool AllowPatial,bool & StillUsed)461 static void rewriteUses(Instruction *Insn,
462                         SmallVector<GetElementPtrInst *> &GEPs,
463                         SmallVector<Instruction *> &Visited, bool AllowPatial,
464                         bool &StillUsed) {
465   for (User *U : Insn->users()) {
466     auto *UI = dyn_cast<Instruction>(U);
467     if (UI && (isPointerOperand(Insn, UI) || isPreserveStaticOffsetCall(UI) ||
468                isInlineableCall(UI)))
469       rewriteAccessChain(UI, GEPs, Visited, AllowPatial, StillUsed);
470     else
471       LLVM_DEBUG({
472         llvm::dbgs() << "unsupported usage in BPFPreserveStaticOffsetPass:\n";
473         llvm::dbgs() << "  Insn: " << *Insn << "\n";
474         llvm::dbgs() << "  User: " << *U << "\n";
475       });
476   }
477 }
478 
479 // A DFS traversal of GEP chain trees starting from Root.
480 //
481 // Recursion descends through GEP instructions and
482 // llvm.preserve.static.offset calls. Recursion stops at any other
483 // instruction. If load or store instruction is reached it is replaced
484 // by a call to `llvm.bpf.getelementptr.and.load` or
485 // `llvm.bpf.getelementptr.and.store` intrinsic.
486 // If `llvm.bpf.getelementptr.and.load/store` is reached the accumulated
487 // GEPs are merged into the intrinsic call.
488 // If nested calls to `llvm.preserve.static.offset` are encountered these
489 // calls are marked for deletion.
490 //
491 // Parameters description:
492 // - Insn - current position in the tree
493 // - GEPs - GEP instructions for the current branch
494 // - Visited - a list of visited instructions in DFS order,
495 //   order is important for unused instruction deletion.
496 // - AllowPartial - when true GEP chains that can't be folded are
497 //   not reported, otherwise diagnostic message is show for such chains.
498 // - StillUsed - set to true if one of the GEP chains could not be
499 //   folded, makes sense when AllowPartial is false, means that root
500 //   preserve.static.offset call is still in use and should remain
501 //   until the next run of this pass.
rewriteAccessChain(Instruction * Insn,SmallVector<GetElementPtrInst * > & GEPs,SmallVector<Instruction * > & Visited,bool AllowPatial,bool & StillUsed)502 static void rewriteAccessChain(Instruction *Insn,
503                                SmallVector<GetElementPtrInst *> &GEPs,
504                                SmallVector<Instruction *> &Visited,
505                                bool AllowPatial, bool &StillUsed) {
506   auto MarkAndTraverseUses = [&]() {
507     Visited.push_back(Insn);
508     rewriteUses(Insn, GEPs, Visited, AllowPatial, StillUsed);
509   };
510   auto TryToReplace = [&](Instruction *LoadOrStore) {
511     // Do nothing for (preserve.static.offset (load/store ..)) or for
512     // GEPs with zero indices. Such constructs lead to zero offset and
513     // are simplified by other passes.
514     if (allZeroIndices(GEPs))
515       return;
516     if (tryToReplaceWithGEPBuiltin(LoadOrStore, GEPs, Insn)) {
517       Visited.push_back(Insn);
518       return;
519     }
520     if (!AllowPatial)
521       reportNonStaticGEPChain(Insn);
522     StillUsed = true;
523   };
524   if (isa<LoadInst>(Insn) || isa<StoreInst>(Insn)) {
525     TryToReplace(Insn);
526   } else if (isGEPAndLoad(Insn)) {
527     auto [GEP, Load] =
528         BPFPreserveStaticOffsetPass::reconstructLoad(cast<CallInst>(Insn));
529     GEPs.push_back(GEP);
530     TryToReplace(Load);
531     GEPs.pop_back();
532     delete Load;
533     delete GEP;
534   } else if (isGEPAndStore(Insn)) {
535     // This  case can't be merged with the above because
536     // `delete Load` / `delete Store` wants a concrete type,
537     // destructor of Instruction is protected.
538     auto [GEP, Store] =
539         BPFPreserveStaticOffsetPass::reconstructStore(cast<CallInst>(Insn));
540     GEPs.push_back(GEP);
541     TryToReplace(Store);
542     GEPs.pop_back();
543     delete Store;
544     delete GEP;
545   } else if (auto *GEP = dyn_cast<GetElementPtrInst>(Insn)) {
546     GEPs.push_back(GEP);
547     MarkAndTraverseUses();
548     GEPs.pop_back();
549   } else if (isPreserveStaticOffsetCall(Insn)) {
550     MarkAndTraverseUses();
551   } else if (isInlineableCall(Insn)) {
552     // Preserve preserve.static.offset call for parameters of
553     // functions that might be inlined. These would be removed on a
554     // second pass after inlining.
555     // Might happen when a pointer to a preserve_static_offset
556     // structure is passed as parameter of a function that would be
557     // inlined inside a loop that would be unrolled.
558     if (AllowPatial)
559       StillUsed = true;
560   } else {
561     SmallString<128> Buf;
562     raw_svector_ostream BufStream(Buf);
563     BufStream << *Insn;
564     report_fatal_error(
565         Twine("Unexpected rewriteAccessChain Insn = ").concat(Buf));
566   }
567 }
568 
removeMarkerCall(Instruction * Marker)569 static void removeMarkerCall(Instruction *Marker) {
570   Marker->replaceAllUsesWith(Marker->getOperand(0));
571   Marker->eraseFromParent();
572 }
573 
rewriteAccessChain(Instruction * Marker,bool AllowPatial,SmallPtrSetImpl<Instruction * > & RemovedMarkers)574 static bool rewriteAccessChain(Instruction *Marker, bool AllowPatial,
575                                SmallPtrSetImpl<Instruction *> &RemovedMarkers) {
576   SmallVector<GetElementPtrInst *> GEPs;
577   SmallVector<Instruction *> Visited;
578   bool StillUsed = false;
579   rewriteUses(Marker, GEPs, Visited, AllowPatial, StillUsed);
580   // Check if Visited instructions could be removed, iterate in
581   // reverse to unblock instructions higher in the chain.
582   for (auto V = Visited.rbegin(); V != Visited.rend(); ++V) {
583     if (isPreserveStaticOffsetCall(*V)) {
584       removeMarkerCall(*V);
585       RemovedMarkers.insert(*V);
586     } else if ((*V)->use_empty()) {
587       (*V)->eraseFromParent();
588     }
589   }
590   return StillUsed;
591 }
592 
593 static std::vector<Instruction *>
collectPreserveStaticOffsetCalls(Function & F)594 collectPreserveStaticOffsetCalls(Function &F) {
595   std::vector<Instruction *> Calls;
596   for (Instruction &Insn : instructions(F))
597     if (isPreserveStaticOffsetCall(&Insn))
598       Calls.push_back(&Insn);
599   return Calls;
600 }
601 
isPreserveArrayIndex(Value * V)602 bool isPreserveArrayIndex(Value *V) {
603   return isIntrinsicCall(V, Intrinsic::preserve_array_access_index);
604 }
605 
isPreserveStructIndex(Value * V)606 bool isPreserveStructIndex(Value *V) {
607   return isIntrinsicCall(V, Intrinsic::preserve_struct_access_index);
608 }
609 
isPreserveUnionIndex(Value * V)610 bool isPreserveUnionIndex(Value *V) {
611   return isIntrinsicCall(V, Intrinsic::preserve_union_access_index);
612 }
613 
removePAICalls(Instruction * Marker)614 static void removePAICalls(Instruction *Marker) {
615   auto IsPointerOperand = [](Value *Op, User *U) {
616     if (auto *GEP = dyn_cast<GetElementPtrInst>(U))
617       return GEP->getPointerOperand() == Op;
618     if (isPreserveStaticOffsetCall(U) || isPreserveArrayIndex(U) ||
619         isPreserveStructIndex(U) || isPreserveUnionIndex(U))
620       return cast<CallInst>(U)->getArgOperand(0) == Op;
621     return false;
622   };
623 
624   SmallVector<Value *, 32> WorkList;
625   WorkList.push_back(Marker);
626   do {
627     Value *V = WorkList.pop_back_val();
628     for (User *U : V->users())
629       if (IsPointerOperand(V, U))
630         WorkList.push_back(U);
631     auto *Call = dyn_cast<CallInst>(V);
632     if (!Call)
633       continue;
634     if (isPreserveArrayIndex(V))
635       BPFCoreSharedInfo::removeArrayAccessCall(Call);
636     else if (isPreserveStructIndex(V))
637       BPFCoreSharedInfo::removeStructAccessCall(Call);
638     else if (isPreserveUnionIndex(V))
639       BPFCoreSharedInfo::removeUnionAccessCall(Call);
640   } while (!WorkList.empty());
641 }
642 
643 // Look for sequences:
644 // - llvm.preserve.static.offset -> getelementptr... -> load
645 // - llvm.preserve.static.offset -> getelementptr... -> store
646 // And replace those with calls to intrinsics:
647 // - llvm.bpf.getelementptr.and.load
648 // - llvm.bpf.getelementptr.and.store
rewriteFunction(Function & F,bool AllowPartial)649 static bool rewriteFunction(Function &F, bool AllowPartial) {
650   LLVM_DEBUG(dbgs() << "********** BPFPreserveStaticOffsetPass (AllowPartial="
651                     << AllowPartial << ") ************\n");
652 
653   auto MarkerCalls = collectPreserveStaticOffsetCalls(F);
654   SmallPtrSet<Instruction *, 16> RemovedMarkers;
655 
656   LLVM_DEBUG(dbgs() << "There are " << MarkerCalls.size()
657                     << " preserve.static.offset calls\n");
658 
659   if (MarkerCalls.empty())
660     return false;
661 
662   for (auto *Call : MarkerCalls)
663     removePAICalls(Call);
664 
665   for (auto *Call : MarkerCalls) {
666     if (RemovedMarkers.contains(Call))
667       continue;
668     bool StillUsed = rewriteAccessChain(Call, AllowPartial, RemovedMarkers);
669     if (!StillUsed || !AllowPartial)
670       removeMarkerCall(Call);
671   }
672 
673   return true;
674 }
675 
676 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)677 llvm::BPFPreserveStaticOffsetPass::run(Function &F,
678                                        FunctionAnalysisManager &AM) {
679   return rewriteFunction(F, AllowPartial) ? PreservedAnalyses::none()
680                                           : PreservedAnalyses::all();
681 }
682