1 //===------ BPFAbstractMemberAccess.cpp - Abstracting Member Accesses -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass abstracted struct/union member accesses in order to support 10 // compile-once run-everywhere (CO-RE). The CO-RE intends to compile the program 11 // which can run on different kernels. In particular, if bpf program tries to 12 // access a particular kernel data structure member, the details of the 13 // intermediate member access will be remembered so bpf loader can do 14 // necessary adjustment right before program loading. 15 // 16 // For example, 17 // 18 // struct s { 19 // int a; 20 // int b; 21 // }; 22 // struct t { 23 // struct s c; 24 // int d; 25 // }; 26 // struct t e; 27 // 28 // For the member access e.c.b, the compiler will generate code 29 // &e + 4 30 // 31 // The compile-once run-everywhere instead generates the following code 32 // r = 4 33 // &e + r 34 // The "4" in "r = 4" can be changed based on a particular kernel version. 35 // For example, on a particular kernel version, if struct s is changed to 36 // 37 // struct s { 38 // int new_field; 39 // int a; 40 // int b; 41 // } 42 // 43 // By repeating the member access on the host, the bpf loader can 44 // adjust "r = 4" as "r = 8". 45 // 46 // This feature relies on the following three intrinsic calls: 47 // addr = preserve_array_access_index(base, dimension, index) 48 // addr = preserve_union_access_index(base, di_index) 49 // !llvm.preserve.access.index <union_ditype> 50 // addr = preserve_struct_access_index(base, gep_index, di_index) 51 // !llvm.preserve.access.index <struct_ditype> 52 // 53 // Bitfield member access needs special attention. User cannot take the 54 // address of a bitfield acceess. To facilitate kernel verifier 55 // for easy bitfield code optimization, a new clang intrinsic is introduced: 56 // uint32_t __builtin_preserve_field_info(member_access, info_kind) 57 // In IR, a chain with two (or more) intrinsic calls will be generated: 58 // ... 59 // addr = preserve_struct_access_index(base, 1, 1) !struct s 60 // uint32_t result = bpf_preserve_field_info(addr, info_kind) 61 // 62 // Suppose the info_kind is FIELD_SIGNEDNESS, 63 // The above two IR intrinsics will be replaced with 64 // a relocatable insn: 65 // signness = /* signness of member_access */ 66 // and signness can be changed by bpf loader based on the 67 // types on the host. 68 // 69 // User can also test whether a field exists or not with 70 // uint32_t result = bpf_preserve_field_info(member_access, FIELD_EXISTENCE) 71 // The field will be always available (result = 1) during initial 72 // compilation, but bpf loader can patch with the correct value 73 // on the target host where the member_access may or may not be available 74 // 75 //===----------------------------------------------------------------------===// 76 77 #include "BPF.h" 78 #include "BPFCORE.h" 79 #include "BPFTargetMachine.h" 80 #include "llvm/IR/DebugInfoMetadata.h" 81 #include "llvm/IR/GlobalVariable.h" 82 #include "llvm/IR/Instruction.h" 83 #include "llvm/IR/Instructions.h" 84 #include "llvm/IR/IntrinsicsBPF.h" 85 #include "llvm/IR/Module.h" 86 #include "llvm/IR/PassManager.h" 87 #include "llvm/IR/Type.h" 88 #include "llvm/IR/User.h" 89 #include "llvm/IR/Value.h" 90 #include "llvm/Pass.h" 91 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 92 #include <stack> 93 94 #define DEBUG_TYPE "bpf-abstract-member-access" 95 96 namespace llvm { 97 constexpr StringRef BPFCoreSharedInfo::AmaAttr; 98 uint32_t BPFCoreSharedInfo::SeqNum; 99 100 Instruction *BPFCoreSharedInfo::insertPassThrough(Module *M, BasicBlock *BB, 101 Instruction *Input, 102 Instruction *Before) { 103 Function *Fn = Intrinsic::getDeclaration( 104 M, Intrinsic::bpf_passthrough, {Input->getType(), Input->getType()}); 105 Constant *SeqNumVal = ConstantInt::get(Type::getInt32Ty(BB->getContext()), 106 BPFCoreSharedInfo::SeqNum++); 107 108 auto *NewInst = CallInst::Create(Fn, {SeqNumVal, Input}); 109 BB->getInstList().insert(Before->getIterator(), NewInst); 110 return NewInst; 111 } 112 } // namespace llvm 113 114 using namespace llvm; 115 116 namespace { 117 class BPFAbstractMemberAccess final { 118 public: 119 BPFAbstractMemberAccess(BPFTargetMachine *TM) : TM(TM) {} 120 121 bool run(Function &F); 122 123 struct CallInfo { 124 uint32_t Kind; 125 uint32_t AccessIndex; 126 Align RecordAlignment; 127 MDNode *Metadata; 128 Value *Base; 129 }; 130 typedef std::stack<std::pair<CallInst *, CallInfo>> CallInfoStack; 131 132 private: 133 enum : uint32_t { 134 BPFPreserveArrayAI = 1, 135 BPFPreserveUnionAI = 2, 136 BPFPreserveStructAI = 3, 137 BPFPreserveFieldInfoAI = 4, 138 }; 139 140 TargetMachine *TM; 141 const DataLayout *DL = nullptr; 142 Module *M = nullptr; 143 144 static std::map<std::string, GlobalVariable *> GEPGlobals; 145 // A map to link preserve_*_access_index instrinsic calls. 146 std::map<CallInst *, std::pair<CallInst *, CallInfo>> AIChain; 147 // A map to hold all the base preserve_*_access_index instrinsic calls. 148 // The base call is not an input of any other preserve_* 149 // intrinsics. 150 std::map<CallInst *, CallInfo> BaseAICalls; 151 152 bool doTransformation(Function &F); 153 154 void traceAICall(CallInst *Call, CallInfo &ParentInfo); 155 void traceBitCast(BitCastInst *BitCast, CallInst *Parent, 156 CallInfo &ParentInfo); 157 void traceGEP(GetElementPtrInst *GEP, CallInst *Parent, 158 CallInfo &ParentInfo); 159 void collectAICallChains(Function &F); 160 161 bool IsPreserveDIAccessIndexCall(const CallInst *Call, CallInfo &Cinfo); 162 bool IsValidAIChain(const MDNode *ParentMeta, uint32_t ParentAI, 163 const MDNode *ChildMeta); 164 bool removePreserveAccessIndexIntrinsic(Function &F); 165 void replaceWithGEP(std::vector<CallInst *> &CallList, 166 uint32_t NumOfZerosIndex, uint32_t DIIndex); 167 bool HasPreserveFieldInfoCall(CallInfoStack &CallStack); 168 void GetStorageBitRange(DIDerivedType *MemberTy, Align RecordAlignment, 169 uint32_t &StartBitOffset, uint32_t &EndBitOffset); 170 uint32_t GetFieldInfo(uint32_t InfoKind, DICompositeType *CTy, 171 uint32_t AccessIndex, uint32_t PatchImm, 172 Align RecordAlignment); 173 174 Value *computeBaseAndAccessKey(CallInst *Call, CallInfo &CInfo, 175 std::string &AccessKey, MDNode *&BaseMeta); 176 MDNode *computeAccessKey(CallInst *Call, CallInfo &CInfo, 177 std::string &AccessKey, bool &IsInt32Ret); 178 uint64_t getConstant(const Value *IndexValue); 179 bool transformGEPChain(CallInst *Call, CallInfo &CInfo); 180 }; 181 182 std::map<std::string, GlobalVariable *> BPFAbstractMemberAccess::GEPGlobals; 183 184 class BPFAbstractMemberAccessLegacyPass final : public FunctionPass { 185 BPFTargetMachine *TM; 186 187 bool runOnFunction(Function &F) override { 188 return BPFAbstractMemberAccess(TM).run(F); 189 } 190 191 public: 192 static char ID; 193 194 // Add optional BPFTargetMachine parameter so that BPF backend can add the 195 // phase with target machine to find out the endianness. The default 196 // constructor (without parameters) is used by the pass manager for managing 197 // purposes. 198 BPFAbstractMemberAccessLegacyPass(BPFTargetMachine *TM = nullptr) 199 : FunctionPass(ID), TM(TM) {} 200 }; 201 202 } // End anonymous namespace 203 204 char BPFAbstractMemberAccessLegacyPass::ID = 0; 205 INITIALIZE_PASS(BPFAbstractMemberAccessLegacyPass, DEBUG_TYPE, 206 "BPF Abstract Member Access", false, false) 207 208 FunctionPass *llvm::createBPFAbstractMemberAccess(BPFTargetMachine *TM) { 209 return new BPFAbstractMemberAccessLegacyPass(TM); 210 } 211 212 bool BPFAbstractMemberAccess::run(Function &F) { 213 LLVM_DEBUG(dbgs() << "********** Abstract Member Accesses **********\n"); 214 215 M = F.getParent(); 216 if (!M) 217 return false; 218 219 // Bail out if no debug info. 220 if (M->debug_compile_units().empty()) 221 return false; 222 223 DL = &M->getDataLayout(); 224 return doTransformation(F); 225 } 226 227 static bool SkipDIDerivedTag(unsigned Tag, bool skipTypedef) { 228 if (Tag != dwarf::DW_TAG_typedef && Tag != dwarf::DW_TAG_const_type && 229 Tag != dwarf::DW_TAG_volatile_type && 230 Tag != dwarf::DW_TAG_restrict_type && 231 Tag != dwarf::DW_TAG_member) 232 return false; 233 if (Tag == dwarf::DW_TAG_typedef && !skipTypedef) 234 return false; 235 return true; 236 } 237 238 static DIType * stripQualifiers(DIType *Ty, bool skipTypedef = true) { 239 while (auto *DTy = dyn_cast<DIDerivedType>(Ty)) { 240 if (!SkipDIDerivedTag(DTy->getTag(), skipTypedef)) 241 break; 242 Ty = DTy->getBaseType(); 243 } 244 return Ty; 245 } 246 247 static const DIType * stripQualifiers(const DIType *Ty) { 248 while (auto *DTy = dyn_cast<DIDerivedType>(Ty)) { 249 if (!SkipDIDerivedTag(DTy->getTag(), true)) 250 break; 251 Ty = DTy->getBaseType(); 252 } 253 return Ty; 254 } 255 256 static uint32_t calcArraySize(const DICompositeType *CTy, uint32_t StartDim) { 257 DINodeArray Elements = CTy->getElements(); 258 uint32_t DimSize = 1; 259 for (uint32_t I = StartDim; I < Elements.size(); ++I) { 260 if (auto *Element = dyn_cast_or_null<DINode>(Elements[I])) 261 if (Element->getTag() == dwarf::DW_TAG_subrange_type) { 262 const DISubrange *SR = cast<DISubrange>(Element); 263 auto *CI = SR->getCount().dyn_cast<ConstantInt *>(); 264 DimSize *= CI->getSExtValue(); 265 } 266 } 267 268 return DimSize; 269 } 270 271 static Type *getBaseElementType(const CallInst *Call) { 272 // Element type is stored in an elementtype() attribute on the first param. 273 return Call->getAttributes().getParamElementType(0); 274 } 275 276 /// Check whether a call is a preserve_*_access_index intrinsic call or not. 277 bool BPFAbstractMemberAccess::IsPreserveDIAccessIndexCall(const CallInst *Call, 278 CallInfo &CInfo) { 279 if (!Call) 280 return false; 281 282 const auto *GV = dyn_cast<GlobalValue>(Call->getCalledOperand()); 283 if (!GV) 284 return false; 285 if (GV->getName().startswith("llvm.preserve.array.access.index")) { 286 CInfo.Kind = BPFPreserveArrayAI; 287 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index); 288 if (!CInfo.Metadata) 289 report_fatal_error("Missing metadata for llvm.preserve.array.access.index intrinsic"); 290 CInfo.AccessIndex = getConstant(Call->getArgOperand(2)); 291 CInfo.Base = Call->getArgOperand(0); 292 CInfo.RecordAlignment = DL->getABITypeAlign(getBaseElementType(Call)); 293 return true; 294 } 295 if (GV->getName().startswith("llvm.preserve.union.access.index")) { 296 CInfo.Kind = BPFPreserveUnionAI; 297 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index); 298 if (!CInfo.Metadata) 299 report_fatal_error("Missing metadata for llvm.preserve.union.access.index intrinsic"); 300 CInfo.AccessIndex = getConstant(Call->getArgOperand(1)); 301 CInfo.Base = Call->getArgOperand(0); 302 CInfo.RecordAlignment = 303 DL->getABITypeAlign(CInfo.Base->getType()->getPointerElementType()); 304 return true; 305 } 306 if (GV->getName().startswith("llvm.preserve.struct.access.index")) { 307 CInfo.Kind = BPFPreserveStructAI; 308 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index); 309 if (!CInfo.Metadata) 310 report_fatal_error("Missing metadata for llvm.preserve.struct.access.index intrinsic"); 311 CInfo.AccessIndex = getConstant(Call->getArgOperand(2)); 312 CInfo.Base = Call->getArgOperand(0); 313 CInfo.RecordAlignment = DL->getABITypeAlign(getBaseElementType(Call)); 314 return true; 315 } 316 if (GV->getName().startswith("llvm.bpf.preserve.field.info")) { 317 CInfo.Kind = BPFPreserveFieldInfoAI; 318 CInfo.Metadata = nullptr; 319 // Check validity of info_kind as clang did not check this. 320 uint64_t InfoKind = getConstant(Call->getArgOperand(1)); 321 if (InfoKind >= BPFCoreSharedInfo::MAX_FIELD_RELOC_KIND) 322 report_fatal_error("Incorrect info_kind for llvm.bpf.preserve.field.info intrinsic"); 323 CInfo.AccessIndex = InfoKind; 324 return true; 325 } 326 if (GV->getName().startswith("llvm.bpf.preserve.type.info")) { 327 CInfo.Kind = BPFPreserveFieldInfoAI; 328 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index); 329 if (!CInfo.Metadata) 330 report_fatal_error("Missing metadata for llvm.preserve.type.info intrinsic"); 331 uint64_t Flag = getConstant(Call->getArgOperand(1)); 332 if (Flag >= BPFCoreSharedInfo::MAX_PRESERVE_TYPE_INFO_FLAG) 333 report_fatal_error("Incorrect flag for llvm.bpf.preserve.type.info intrinsic"); 334 if (Flag == BPFCoreSharedInfo::PRESERVE_TYPE_INFO_EXISTENCE) 335 CInfo.AccessIndex = BPFCoreSharedInfo::TYPE_EXISTENCE; 336 else 337 CInfo.AccessIndex = BPFCoreSharedInfo::TYPE_SIZE; 338 return true; 339 } 340 if (GV->getName().startswith("llvm.bpf.preserve.enum.value")) { 341 CInfo.Kind = BPFPreserveFieldInfoAI; 342 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index); 343 if (!CInfo.Metadata) 344 report_fatal_error("Missing metadata for llvm.preserve.enum.value intrinsic"); 345 uint64_t Flag = getConstant(Call->getArgOperand(2)); 346 if (Flag >= BPFCoreSharedInfo::MAX_PRESERVE_ENUM_VALUE_FLAG) 347 report_fatal_error("Incorrect flag for llvm.bpf.preserve.enum.value intrinsic"); 348 if (Flag == BPFCoreSharedInfo::PRESERVE_ENUM_VALUE_EXISTENCE) 349 CInfo.AccessIndex = BPFCoreSharedInfo::ENUM_VALUE_EXISTENCE; 350 else 351 CInfo.AccessIndex = BPFCoreSharedInfo::ENUM_VALUE; 352 return true; 353 } 354 355 return false; 356 } 357 358 void BPFAbstractMemberAccess::replaceWithGEP(std::vector<CallInst *> &CallList, 359 uint32_t DimensionIndex, 360 uint32_t GEPIndex) { 361 for (auto Call : CallList) { 362 uint32_t Dimension = 1; 363 if (DimensionIndex > 0) 364 Dimension = getConstant(Call->getArgOperand(DimensionIndex)); 365 366 Constant *Zero = 367 ConstantInt::get(Type::getInt32Ty(Call->getParent()->getContext()), 0); 368 SmallVector<Value *, 4> IdxList; 369 for (unsigned I = 0; I < Dimension; ++I) 370 IdxList.push_back(Zero); 371 IdxList.push_back(Call->getArgOperand(GEPIndex)); 372 373 auto *GEP = GetElementPtrInst::CreateInBounds( 374 getBaseElementType(Call), Call->getArgOperand(0), IdxList, "", Call); 375 Call->replaceAllUsesWith(GEP); 376 Call->eraseFromParent(); 377 } 378 } 379 380 bool BPFAbstractMemberAccess::removePreserveAccessIndexIntrinsic(Function &F) { 381 std::vector<CallInst *> PreserveArrayIndexCalls; 382 std::vector<CallInst *> PreserveUnionIndexCalls; 383 std::vector<CallInst *> PreserveStructIndexCalls; 384 bool Found = false; 385 386 for (auto &BB : F) 387 for (auto &I : BB) { 388 auto *Call = dyn_cast<CallInst>(&I); 389 CallInfo CInfo; 390 if (!IsPreserveDIAccessIndexCall(Call, CInfo)) 391 continue; 392 393 Found = true; 394 if (CInfo.Kind == BPFPreserveArrayAI) 395 PreserveArrayIndexCalls.push_back(Call); 396 else if (CInfo.Kind == BPFPreserveUnionAI) 397 PreserveUnionIndexCalls.push_back(Call); 398 else 399 PreserveStructIndexCalls.push_back(Call); 400 } 401 402 // do the following transformation: 403 // . addr = preserve_array_access_index(base, dimension, index) 404 // is transformed to 405 // addr = GEP(base, dimenion's zero's, index) 406 // . addr = preserve_union_access_index(base, di_index) 407 // is transformed to 408 // addr = base, i.e., all usages of "addr" are replaced by "base". 409 // . addr = preserve_struct_access_index(base, gep_index, di_index) 410 // is transformed to 411 // addr = GEP(base, 0, gep_index) 412 replaceWithGEP(PreserveArrayIndexCalls, 1, 2); 413 replaceWithGEP(PreserveStructIndexCalls, 0, 1); 414 for (auto Call : PreserveUnionIndexCalls) { 415 Call->replaceAllUsesWith(Call->getArgOperand(0)); 416 Call->eraseFromParent(); 417 } 418 419 return Found; 420 } 421 422 /// Check whether the access index chain is valid. We check 423 /// here because there may be type casts between two 424 /// access indexes. We want to ensure memory access still valid. 425 bool BPFAbstractMemberAccess::IsValidAIChain(const MDNode *ParentType, 426 uint32_t ParentAI, 427 const MDNode *ChildType) { 428 if (!ChildType) 429 return true; // preserve_field_info, no type comparison needed. 430 431 const DIType *PType = stripQualifiers(cast<DIType>(ParentType)); 432 const DIType *CType = stripQualifiers(cast<DIType>(ChildType)); 433 434 // Child is a derived/pointer type, which is due to type casting. 435 // Pointer type cannot be in the middle of chain. 436 if (isa<DIDerivedType>(CType)) 437 return false; 438 439 // Parent is a pointer type. 440 if (const auto *PtrTy = dyn_cast<DIDerivedType>(PType)) { 441 if (PtrTy->getTag() != dwarf::DW_TAG_pointer_type) 442 return false; 443 return stripQualifiers(PtrTy->getBaseType()) == CType; 444 } 445 446 // Otherwise, struct/union/array types 447 const auto *PTy = dyn_cast<DICompositeType>(PType); 448 const auto *CTy = dyn_cast<DICompositeType>(CType); 449 assert(PTy && CTy && "ParentType or ChildType is null or not composite"); 450 451 uint32_t PTyTag = PTy->getTag(); 452 assert(PTyTag == dwarf::DW_TAG_array_type || 453 PTyTag == dwarf::DW_TAG_structure_type || 454 PTyTag == dwarf::DW_TAG_union_type); 455 456 uint32_t CTyTag = CTy->getTag(); 457 assert(CTyTag == dwarf::DW_TAG_array_type || 458 CTyTag == dwarf::DW_TAG_structure_type || 459 CTyTag == dwarf::DW_TAG_union_type); 460 461 // Multi dimensional arrays, base element should be the same 462 if (PTyTag == dwarf::DW_TAG_array_type && PTyTag == CTyTag) 463 return PTy->getBaseType() == CTy->getBaseType(); 464 465 DIType *Ty; 466 if (PTyTag == dwarf::DW_TAG_array_type) 467 Ty = PTy->getBaseType(); 468 else 469 Ty = dyn_cast<DIType>(PTy->getElements()[ParentAI]); 470 471 return dyn_cast<DICompositeType>(stripQualifiers(Ty)) == CTy; 472 } 473 474 void BPFAbstractMemberAccess::traceAICall(CallInst *Call, 475 CallInfo &ParentInfo) { 476 for (User *U : Call->users()) { 477 Instruction *Inst = dyn_cast<Instruction>(U); 478 if (!Inst) 479 continue; 480 481 if (auto *BI = dyn_cast<BitCastInst>(Inst)) { 482 traceBitCast(BI, Call, ParentInfo); 483 } else if (auto *CI = dyn_cast<CallInst>(Inst)) { 484 CallInfo ChildInfo; 485 486 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) && 487 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex, 488 ChildInfo.Metadata)) { 489 AIChain[CI] = std::make_pair(Call, ParentInfo); 490 traceAICall(CI, ChildInfo); 491 } else { 492 BaseAICalls[Call] = ParentInfo; 493 } 494 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) { 495 if (GI->hasAllZeroIndices()) 496 traceGEP(GI, Call, ParentInfo); 497 else 498 BaseAICalls[Call] = ParentInfo; 499 } else { 500 BaseAICalls[Call] = ParentInfo; 501 } 502 } 503 } 504 505 void BPFAbstractMemberAccess::traceBitCast(BitCastInst *BitCast, 506 CallInst *Parent, 507 CallInfo &ParentInfo) { 508 for (User *U : BitCast->users()) { 509 Instruction *Inst = dyn_cast<Instruction>(U); 510 if (!Inst) 511 continue; 512 513 if (auto *BI = dyn_cast<BitCastInst>(Inst)) { 514 traceBitCast(BI, Parent, ParentInfo); 515 } else if (auto *CI = dyn_cast<CallInst>(Inst)) { 516 CallInfo ChildInfo; 517 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) && 518 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex, 519 ChildInfo.Metadata)) { 520 AIChain[CI] = std::make_pair(Parent, ParentInfo); 521 traceAICall(CI, ChildInfo); 522 } else { 523 BaseAICalls[Parent] = ParentInfo; 524 } 525 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) { 526 if (GI->hasAllZeroIndices()) 527 traceGEP(GI, Parent, ParentInfo); 528 else 529 BaseAICalls[Parent] = ParentInfo; 530 } else { 531 BaseAICalls[Parent] = ParentInfo; 532 } 533 } 534 } 535 536 void BPFAbstractMemberAccess::traceGEP(GetElementPtrInst *GEP, CallInst *Parent, 537 CallInfo &ParentInfo) { 538 for (User *U : GEP->users()) { 539 Instruction *Inst = dyn_cast<Instruction>(U); 540 if (!Inst) 541 continue; 542 543 if (auto *BI = dyn_cast<BitCastInst>(Inst)) { 544 traceBitCast(BI, Parent, ParentInfo); 545 } else if (auto *CI = dyn_cast<CallInst>(Inst)) { 546 CallInfo ChildInfo; 547 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) && 548 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex, 549 ChildInfo.Metadata)) { 550 AIChain[CI] = std::make_pair(Parent, ParentInfo); 551 traceAICall(CI, ChildInfo); 552 } else { 553 BaseAICalls[Parent] = ParentInfo; 554 } 555 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) { 556 if (GI->hasAllZeroIndices()) 557 traceGEP(GI, Parent, ParentInfo); 558 else 559 BaseAICalls[Parent] = ParentInfo; 560 } else { 561 BaseAICalls[Parent] = ParentInfo; 562 } 563 } 564 } 565 566 void BPFAbstractMemberAccess::collectAICallChains(Function &F) { 567 AIChain.clear(); 568 BaseAICalls.clear(); 569 570 for (auto &BB : F) 571 for (auto &I : BB) { 572 CallInfo CInfo; 573 auto *Call = dyn_cast<CallInst>(&I); 574 if (!IsPreserveDIAccessIndexCall(Call, CInfo) || 575 AIChain.find(Call) != AIChain.end()) 576 continue; 577 578 traceAICall(Call, CInfo); 579 } 580 } 581 582 uint64_t BPFAbstractMemberAccess::getConstant(const Value *IndexValue) { 583 const ConstantInt *CV = dyn_cast<ConstantInt>(IndexValue); 584 assert(CV); 585 return CV->getValue().getZExtValue(); 586 } 587 588 /// Get the start and the end of storage offset for \p MemberTy. 589 void BPFAbstractMemberAccess::GetStorageBitRange(DIDerivedType *MemberTy, 590 Align RecordAlignment, 591 uint32_t &StartBitOffset, 592 uint32_t &EndBitOffset) { 593 uint32_t MemberBitSize = MemberTy->getSizeInBits(); 594 uint32_t MemberBitOffset = MemberTy->getOffsetInBits(); 595 uint32_t AlignBits = RecordAlignment.value() * 8; 596 if (RecordAlignment > 8 || MemberBitSize > AlignBits) 597 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info, " 598 "requiring too big alignment"); 599 600 StartBitOffset = MemberBitOffset & ~(AlignBits - 1); 601 if ((StartBitOffset + AlignBits) < (MemberBitOffset + MemberBitSize)) 602 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info, " 603 "cross alignment boundary"); 604 EndBitOffset = StartBitOffset + AlignBits; 605 } 606 607 uint32_t BPFAbstractMemberAccess::GetFieldInfo(uint32_t InfoKind, 608 DICompositeType *CTy, 609 uint32_t AccessIndex, 610 uint32_t PatchImm, 611 Align RecordAlignment) { 612 if (InfoKind == BPFCoreSharedInfo::FIELD_EXISTENCE) 613 return 1; 614 615 uint32_t Tag = CTy->getTag(); 616 if (InfoKind == BPFCoreSharedInfo::FIELD_BYTE_OFFSET) { 617 if (Tag == dwarf::DW_TAG_array_type) { 618 auto *EltTy = stripQualifiers(CTy->getBaseType()); 619 PatchImm += AccessIndex * calcArraySize(CTy, 1) * 620 (EltTy->getSizeInBits() >> 3); 621 } else if (Tag == dwarf::DW_TAG_structure_type) { 622 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]); 623 if (!MemberTy->isBitField()) { 624 PatchImm += MemberTy->getOffsetInBits() >> 3; 625 } else { 626 unsigned SBitOffset, NextSBitOffset; 627 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset, 628 NextSBitOffset); 629 PatchImm += SBitOffset >> 3; 630 } 631 } 632 return PatchImm; 633 } 634 635 if (InfoKind == BPFCoreSharedInfo::FIELD_BYTE_SIZE) { 636 if (Tag == dwarf::DW_TAG_array_type) { 637 auto *EltTy = stripQualifiers(CTy->getBaseType()); 638 return calcArraySize(CTy, 1) * (EltTy->getSizeInBits() >> 3); 639 } else { 640 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]); 641 uint32_t SizeInBits = MemberTy->getSizeInBits(); 642 if (!MemberTy->isBitField()) 643 return SizeInBits >> 3; 644 645 unsigned SBitOffset, NextSBitOffset; 646 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset, NextSBitOffset); 647 SizeInBits = NextSBitOffset - SBitOffset; 648 if (SizeInBits & (SizeInBits - 1)) 649 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info"); 650 return SizeInBits >> 3; 651 } 652 } 653 654 if (InfoKind == BPFCoreSharedInfo::FIELD_SIGNEDNESS) { 655 const DIType *BaseTy; 656 if (Tag == dwarf::DW_TAG_array_type) { 657 // Signedness only checked when final array elements are accessed. 658 if (CTy->getElements().size() != 1) 659 report_fatal_error("Invalid array expression for llvm.bpf.preserve.field.info"); 660 BaseTy = stripQualifiers(CTy->getBaseType()); 661 } else { 662 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]); 663 BaseTy = stripQualifiers(MemberTy->getBaseType()); 664 } 665 666 // Only basic types and enum types have signedness. 667 const auto *BTy = dyn_cast<DIBasicType>(BaseTy); 668 while (!BTy) { 669 const auto *CompTy = dyn_cast<DICompositeType>(BaseTy); 670 // Report an error if the field expression does not have signedness. 671 if (!CompTy || CompTy->getTag() != dwarf::DW_TAG_enumeration_type) 672 report_fatal_error("Invalid field expression for llvm.bpf.preserve.field.info"); 673 BaseTy = stripQualifiers(CompTy->getBaseType()); 674 BTy = dyn_cast<DIBasicType>(BaseTy); 675 } 676 uint32_t Encoding = BTy->getEncoding(); 677 return (Encoding == dwarf::DW_ATE_signed || Encoding == dwarf::DW_ATE_signed_char); 678 } 679 680 if (InfoKind == BPFCoreSharedInfo::FIELD_LSHIFT_U64) { 681 // The value is loaded into a value with FIELD_BYTE_SIZE size, 682 // and then zero or sign extended to U64. 683 // FIELD_LSHIFT_U64 and FIELD_RSHIFT_U64 are operations 684 // to extract the original value. 685 const Triple &Triple = TM->getTargetTriple(); 686 DIDerivedType *MemberTy = nullptr; 687 bool IsBitField = false; 688 uint32_t SizeInBits; 689 690 if (Tag == dwarf::DW_TAG_array_type) { 691 auto *EltTy = stripQualifiers(CTy->getBaseType()); 692 SizeInBits = calcArraySize(CTy, 1) * EltTy->getSizeInBits(); 693 } else { 694 MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]); 695 SizeInBits = MemberTy->getSizeInBits(); 696 IsBitField = MemberTy->isBitField(); 697 } 698 699 if (!IsBitField) { 700 if (SizeInBits > 64) 701 report_fatal_error("too big field size for llvm.bpf.preserve.field.info"); 702 return 64 - SizeInBits; 703 } 704 705 unsigned SBitOffset, NextSBitOffset; 706 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset, NextSBitOffset); 707 if (NextSBitOffset - SBitOffset > 64) 708 report_fatal_error("too big field size for llvm.bpf.preserve.field.info"); 709 710 unsigned OffsetInBits = MemberTy->getOffsetInBits(); 711 if (Triple.getArch() == Triple::bpfel) 712 return SBitOffset + 64 - OffsetInBits - SizeInBits; 713 else 714 return OffsetInBits + 64 - NextSBitOffset; 715 } 716 717 if (InfoKind == BPFCoreSharedInfo::FIELD_RSHIFT_U64) { 718 DIDerivedType *MemberTy = nullptr; 719 bool IsBitField = false; 720 uint32_t SizeInBits; 721 if (Tag == dwarf::DW_TAG_array_type) { 722 auto *EltTy = stripQualifiers(CTy->getBaseType()); 723 SizeInBits = calcArraySize(CTy, 1) * EltTy->getSizeInBits(); 724 } else { 725 MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]); 726 SizeInBits = MemberTy->getSizeInBits(); 727 IsBitField = MemberTy->isBitField(); 728 } 729 730 if (!IsBitField) { 731 if (SizeInBits > 64) 732 report_fatal_error("too big field size for llvm.bpf.preserve.field.info"); 733 return 64 - SizeInBits; 734 } 735 736 unsigned SBitOffset, NextSBitOffset; 737 GetStorageBitRange(MemberTy, RecordAlignment, SBitOffset, NextSBitOffset); 738 if (NextSBitOffset - SBitOffset > 64) 739 report_fatal_error("too big field size for llvm.bpf.preserve.field.info"); 740 741 return 64 - SizeInBits; 742 } 743 744 llvm_unreachable("Unknown llvm.bpf.preserve.field.info info kind"); 745 } 746 747 bool BPFAbstractMemberAccess::HasPreserveFieldInfoCall(CallInfoStack &CallStack) { 748 // This is called in error return path, no need to maintain CallStack. 749 while (CallStack.size()) { 750 auto StackElem = CallStack.top(); 751 if (StackElem.second.Kind == BPFPreserveFieldInfoAI) 752 return true; 753 CallStack.pop(); 754 } 755 return false; 756 } 757 758 /// Compute the base of the whole preserve_* intrinsics chains, i.e., the base 759 /// pointer of the first preserve_*_access_index call, and construct the access 760 /// string, which will be the name of a global variable. 761 Value *BPFAbstractMemberAccess::computeBaseAndAccessKey(CallInst *Call, 762 CallInfo &CInfo, 763 std::string &AccessKey, 764 MDNode *&TypeMeta) { 765 Value *Base = nullptr; 766 std::string TypeName; 767 CallInfoStack CallStack; 768 769 // Put the access chain into a stack with the top as the head of the chain. 770 while (Call) { 771 CallStack.push(std::make_pair(Call, CInfo)); 772 CInfo = AIChain[Call].second; 773 Call = AIChain[Call].first; 774 } 775 776 // The access offset from the base of the head of chain is also 777 // calculated here as all debuginfo types are available. 778 779 // Get type name and calculate the first index. 780 // We only want to get type name from typedef, structure or union. 781 // If user wants a relocation like 782 // int *p; ... __builtin_preserve_access_index(&p[4]) ... 783 // or 784 // int a[10][20]; ... __builtin_preserve_access_index(&a[2][3]) ... 785 // we will skip them. 786 uint32_t FirstIndex = 0; 787 uint32_t PatchImm = 0; // AccessOffset or the requested field info 788 uint32_t InfoKind = BPFCoreSharedInfo::FIELD_BYTE_OFFSET; 789 while (CallStack.size()) { 790 auto StackElem = CallStack.top(); 791 Call = StackElem.first; 792 CInfo = StackElem.second; 793 794 if (!Base) 795 Base = CInfo.Base; 796 797 DIType *PossibleTypeDef = stripQualifiers(cast<DIType>(CInfo.Metadata), 798 false); 799 DIType *Ty = stripQualifiers(PossibleTypeDef); 800 if (CInfo.Kind == BPFPreserveUnionAI || 801 CInfo.Kind == BPFPreserveStructAI) { 802 // struct or union type. If the typedef is in the metadata, always 803 // use the typedef. 804 TypeName = std::string(PossibleTypeDef->getName()); 805 TypeMeta = PossibleTypeDef; 806 PatchImm += FirstIndex * (Ty->getSizeInBits() >> 3); 807 break; 808 } 809 810 assert(CInfo.Kind == BPFPreserveArrayAI); 811 812 // Array entries will always be consumed for accumulative initial index. 813 CallStack.pop(); 814 815 // BPFPreserveArrayAI 816 uint64_t AccessIndex = CInfo.AccessIndex; 817 818 DIType *BaseTy = nullptr; 819 bool CheckElemType = false; 820 if (const auto *CTy = dyn_cast<DICompositeType>(Ty)) { 821 // array type 822 assert(CTy->getTag() == dwarf::DW_TAG_array_type); 823 824 825 FirstIndex += AccessIndex * calcArraySize(CTy, 1); 826 BaseTy = stripQualifiers(CTy->getBaseType()); 827 CheckElemType = CTy->getElements().size() == 1; 828 } else { 829 // pointer type 830 auto *DTy = cast<DIDerivedType>(Ty); 831 assert(DTy->getTag() == dwarf::DW_TAG_pointer_type); 832 833 BaseTy = stripQualifiers(DTy->getBaseType()); 834 CTy = dyn_cast<DICompositeType>(BaseTy); 835 if (!CTy) { 836 CheckElemType = true; 837 } else if (CTy->getTag() != dwarf::DW_TAG_array_type) { 838 FirstIndex += AccessIndex; 839 CheckElemType = true; 840 } else { 841 FirstIndex += AccessIndex * calcArraySize(CTy, 0); 842 } 843 } 844 845 if (CheckElemType) { 846 auto *CTy = dyn_cast<DICompositeType>(BaseTy); 847 if (!CTy) { 848 if (HasPreserveFieldInfoCall(CallStack)) 849 report_fatal_error("Invalid field access for llvm.preserve.field.info intrinsic"); 850 return nullptr; 851 } 852 853 unsigned CTag = CTy->getTag(); 854 if (CTag == dwarf::DW_TAG_structure_type || CTag == dwarf::DW_TAG_union_type) { 855 TypeName = std::string(CTy->getName()); 856 } else { 857 if (HasPreserveFieldInfoCall(CallStack)) 858 report_fatal_error("Invalid field access for llvm.preserve.field.info intrinsic"); 859 return nullptr; 860 } 861 TypeMeta = CTy; 862 PatchImm += FirstIndex * (CTy->getSizeInBits() >> 3); 863 break; 864 } 865 } 866 assert(TypeName.size()); 867 AccessKey += std::to_string(FirstIndex); 868 869 // Traverse the rest of access chain to complete offset calculation 870 // and access key construction. 871 while (CallStack.size()) { 872 auto StackElem = CallStack.top(); 873 CInfo = StackElem.second; 874 CallStack.pop(); 875 876 if (CInfo.Kind == BPFPreserveFieldInfoAI) { 877 InfoKind = CInfo.AccessIndex; 878 if (InfoKind == BPFCoreSharedInfo::FIELD_EXISTENCE) 879 PatchImm = 1; 880 break; 881 } 882 883 // If the next Call (the top of the stack) is a BPFPreserveFieldInfoAI, 884 // the action will be extracting field info. 885 if (CallStack.size()) { 886 auto StackElem2 = CallStack.top(); 887 CallInfo CInfo2 = StackElem2.second; 888 if (CInfo2.Kind == BPFPreserveFieldInfoAI) { 889 InfoKind = CInfo2.AccessIndex; 890 assert(CallStack.size() == 1); 891 } 892 } 893 894 // Access Index 895 uint64_t AccessIndex = CInfo.AccessIndex; 896 AccessKey += ":" + std::to_string(AccessIndex); 897 898 MDNode *MDN = CInfo.Metadata; 899 // At this stage, it cannot be pointer type. 900 auto *CTy = cast<DICompositeType>(stripQualifiers(cast<DIType>(MDN))); 901 PatchImm = GetFieldInfo(InfoKind, CTy, AccessIndex, PatchImm, 902 CInfo.RecordAlignment); 903 } 904 905 // Access key is the 906 // "llvm." + type name + ":" + reloc type + ":" + patched imm + "$" + 907 // access string, 908 // uniquely identifying one relocation. 909 // The prefix "llvm." indicates this is a temporary global, which should 910 // not be emitted to ELF file. 911 AccessKey = "llvm." + TypeName + ":" + std::to_string(InfoKind) + ":" + 912 std::to_string(PatchImm) + "$" + AccessKey; 913 914 return Base; 915 } 916 917 MDNode *BPFAbstractMemberAccess::computeAccessKey(CallInst *Call, 918 CallInfo &CInfo, 919 std::string &AccessKey, 920 bool &IsInt32Ret) { 921 DIType *Ty = stripQualifiers(cast<DIType>(CInfo.Metadata), false); 922 assert(!Ty->getName().empty()); 923 924 int64_t PatchImm; 925 std::string AccessStr("0"); 926 if (CInfo.AccessIndex == BPFCoreSharedInfo::TYPE_EXISTENCE) { 927 PatchImm = 1; 928 } else if (CInfo.AccessIndex == BPFCoreSharedInfo::TYPE_SIZE) { 929 // typedef debuginfo type has size 0, get the eventual base type. 930 DIType *BaseTy = stripQualifiers(Ty, true); 931 PatchImm = BaseTy->getSizeInBits() / 8; 932 } else { 933 // ENUM_VALUE_EXISTENCE and ENUM_VALUE 934 IsInt32Ret = false; 935 936 const auto *CE = cast<ConstantExpr>(Call->getArgOperand(1)); 937 const GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 938 assert(GV->hasInitializer()); 939 const ConstantDataArray *DA = cast<ConstantDataArray>(GV->getInitializer()); 940 assert(DA->isString()); 941 StringRef ValueStr = DA->getAsString(); 942 943 // ValueStr format: <EnumeratorStr>:<Value> 944 size_t Separator = ValueStr.find_first_of(':'); 945 StringRef EnumeratorStr = ValueStr.substr(0, Separator); 946 947 // Find enumerator index in the debuginfo 948 DIType *BaseTy = stripQualifiers(Ty, true); 949 const auto *CTy = cast<DICompositeType>(BaseTy); 950 assert(CTy->getTag() == dwarf::DW_TAG_enumeration_type); 951 int EnumIndex = 0; 952 for (const auto Element : CTy->getElements()) { 953 const auto *Enum = cast<DIEnumerator>(Element); 954 if (Enum->getName() == EnumeratorStr) { 955 AccessStr = std::to_string(EnumIndex); 956 break; 957 } 958 EnumIndex++; 959 } 960 961 if (CInfo.AccessIndex == BPFCoreSharedInfo::ENUM_VALUE) { 962 StringRef EValueStr = ValueStr.substr(Separator + 1); 963 PatchImm = std::stoll(std::string(EValueStr)); 964 } else { 965 PatchImm = 1; 966 } 967 } 968 969 AccessKey = "llvm." + Ty->getName().str() + ":" + 970 std::to_string(CInfo.AccessIndex) + std::string(":") + 971 std::to_string(PatchImm) + std::string("$") + AccessStr; 972 973 return Ty; 974 } 975 976 /// Call/Kind is the base preserve_*_access_index() call. Attempts to do 977 /// transformation to a chain of relocable GEPs. 978 bool BPFAbstractMemberAccess::transformGEPChain(CallInst *Call, 979 CallInfo &CInfo) { 980 std::string AccessKey; 981 MDNode *TypeMeta; 982 Value *Base = nullptr; 983 bool IsInt32Ret; 984 985 IsInt32Ret = CInfo.Kind == BPFPreserveFieldInfoAI; 986 if (CInfo.Kind == BPFPreserveFieldInfoAI && CInfo.Metadata) { 987 TypeMeta = computeAccessKey(Call, CInfo, AccessKey, IsInt32Ret); 988 } else { 989 Base = computeBaseAndAccessKey(Call, CInfo, AccessKey, TypeMeta); 990 if (!Base) 991 return false; 992 } 993 994 BasicBlock *BB = Call->getParent(); 995 GlobalVariable *GV; 996 997 if (GEPGlobals.find(AccessKey) == GEPGlobals.end()) { 998 IntegerType *VarType; 999 if (IsInt32Ret) 1000 VarType = Type::getInt32Ty(BB->getContext()); // 32bit return value 1001 else 1002 VarType = Type::getInt64Ty(BB->getContext()); // 64bit ptr or enum value 1003 1004 GV = new GlobalVariable(*M, VarType, false, GlobalVariable::ExternalLinkage, 1005 nullptr, AccessKey); 1006 GV->addAttribute(BPFCoreSharedInfo::AmaAttr); 1007 GV->setMetadata(LLVMContext::MD_preserve_access_index, TypeMeta); 1008 GEPGlobals[AccessKey] = GV; 1009 } else { 1010 GV = GEPGlobals[AccessKey]; 1011 } 1012 1013 if (CInfo.Kind == BPFPreserveFieldInfoAI) { 1014 // Load the global variable which represents the returned field info. 1015 LoadInst *LDInst; 1016 if (IsInt32Ret) 1017 LDInst = new LoadInst(Type::getInt32Ty(BB->getContext()), GV, "", Call); 1018 else 1019 LDInst = new LoadInst(Type::getInt64Ty(BB->getContext()), GV, "", Call); 1020 1021 Instruction *PassThroughInst = 1022 BPFCoreSharedInfo::insertPassThrough(M, BB, LDInst, Call); 1023 Call->replaceAllUsesWith(PassThroughInst); 1024 Call->eraseFromParent(); 1025 return true; 1026 } 1027 1028 // For any original GEP Call and Base %2 like 1029 // %4 = bitcast %struct.net_device** %dev1 to i64* 1030 // it is transformed to: 1031 // %6 = load llvm.sk_buff:0:50$0:0:0:2:0 1032 // %7 = bitcast %struct.sk_buff* %2 to i8* 1033 // %8 = getelementptr i8, i8* %7, %6 1034 // %9 = bitcast i8* %8 to i64* 1035 // using %9 instead of %4 1036 // The original Call inst is removed. 1037 1038 // Load the global variable. 1039 auto *LDInst = new LoadInst(Type::getInt64Ty(BB->getContext()), GV, "", Call); 1040 1041 // Generate a BitCast 1042 auto *BCInst = new BitCastInst(Base, Type::getInt8PtrTy(BB->getContext())); 1043 BB->getInstList().insert(Call->getIterator(), BCInst); 1044 1045 // Generate a GetElementPtr 1046 auto *GEP = GetElementPtrInst::Create(Type::getInt8Ty(BB->getContext()), 1047 BCInst, LDInst); 1048 BB->getInstList().insert(Call->getIterator(), GEP); 1049 1050 // Generate a BitCast 1051 auto *BCInst2 = new BitCastInst(GEP, Call->getType()); 1052 BB->getInstList().insert(Call->getIterator(), BCInst2); 1053 1054 // For the following code, 1055 // Block0: 1056 // ... 1057 // if (...) goto Block1 else ... 1058 // Block1: 1059 // %6 = load llvm.sk_buff:0:50$0:0:0:2:0 1060 // %7 = bitcast %struct.sk_buff* %2 to i8* 1061 // %8 = getelementptr i8, i8* %7, %6 1062 // ... 1063 // goto CommonExit 1064 // Block2: 1065 // ... 1066 // if (...) goto Block3 else ... 1067 // Block3: 1068 // %6 = load llvm.bpf_map:0:40$0:0:0:2:0 1069 // %7 = bitcast %struct.sk_buff* %2 to i8* 1070 // %8 = getelementptr i8, i8* %7, %6 1071 // ... 1072 // goto CommonExit 1073 // CommonExit 1074 // SimplifyCFG may generate: 1075 // Block0: 1076 // ... 1077 // if (...) goto Block_Common else ... 1078 // Block2: 1079 // ... 1080 // if (...) goto Block_Common else ... 1081 // Block_Common: 1082 // PHI = [llvm.sk_buff:0:50$0:0:0:2:0, llvm.bpf_map:0:40$0:0:0:2:0] 1083 // %6 = load PHI 1084 // %7 = bitcast %struct.sk_buff* %2 to i8* 1085 // %8 = getelementptr i8, i8* %7, %6 1086 // ... 1087 // goto CommonExit 1088 // For the above code, we cannot perform proper relocation since 1089 // "load PHI" has two possible relocations. 1090 // 1091 // To prevent above tail merging, we use __builtin_bpf_passthrough() 1092 // where one of its parameters is a seq_num. Since two 1093 // __builtin_bpf_passthrough() funcs will always have different seq_num, 1094 // tail merging cannot happen. The __builtin_bpf_passthrough() will be 1095 // removed in the beginning of Target IR passes. 1096 // 1097 // This approach is also used in other places when global var 1098 // representing a relocation is used. 1099 Instruction *PassThroughInst = 1100 BPFCoreSharedInfo::insertPassThrough(M, BB, BCInst2, Call); 1101 Call->replaceAllUsesWith(PassThroughInst); 1102 Call->eraseFromParent(); 1103 1104 return true; 1105 } 1106 1107 bool BPFAbstractMemberAccess::doTransformation(Function &F) { 1108 bool Transformed = false; 1109 1110 // Collect PreserveDIAccessIndex Intrinsic call chains. 1111 // The call chains will be used to generate the access 1112 // patterns similar to GEP. 1113 collectAICallChains(F); 1114 1115 for (auto &C : BaseAICalls) 1116 Transformed = transformGEPChain(C.first, C.second) || Transformed; 1117 1118 return removePreserveAccessIndexIntrinsic(F) || Transformed; 1119 } 1120 1121 PreservedAnalyses 1122 BPFAbstractMemberAccessPass::run(Function &F, FunctionAnalysisManager &AM) { 1123 return BPFAbstractMemberAccess(TM).run(F) ? PreservedAnalyses::none() 1124 : PreservedAnalyses::all(); 1125 } 1126