1 //===- MemProfiler.cpp - memory allocation and access profiler ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of MemProfiler. Memory accesses are instrumented 10 // to increment the access count held in a shadow memory location, or 11 // alternatively to call into the runtime. Memory intrinsic calls (memmove, 12 // memcpy, memset) are changed to call the memory profiling runtime version 13 // instead. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/Transforms/Instrumentation/MemProfiler.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/StringRef.h" 21 #include "llvm/ADT/Triple.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/IR/Constant.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/GlobalValue.h" 27 #include "llvm/IR/IRBuilder.h" 28 #include "llvm/IR/Instruction.h" 29 #include "llvm/IR/IntrinsicInst.h" 30 #include "llvm/IR/Module.h" 31 #include "llvm/IR/Type.h" 32 #include "llvm/IR/Value.h" 33 #include "llvm/InitializePasses.h" 34 #include "llvm/Pass.h" 35 #include "llvm/ProfileData/InstrProf.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 39 #include "llvm/Transforms/Utils/ModuleUtils.h" 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "memprof" 44 45 constexpr int LLVM_MEM_PROFILER_VERSION = 1; 46 47 // Size of memory mapped to a single shadow location. 48 constexpr uint64_t DefaultShadowGranularity = 64; 49 50 // Scale from granularity down to shadow size. 51 constexpr uint64_t DefaultShadowScale = 3; 52 53 constexpr char MemProfModuleCtorName[] = "memprof.module_ctor"; 54 constexpr uint64_t MemProfCtorAndDtorPriority = 1; 55 // On Emscripten, the system needs more than one priorities for constructors. 56 constexpr uint64_t MemProfEmscriptenCtorAndDtorPriority = 50; 57 constexpr char MemProfInitName[] = "__memprof_init"; 58 constexpr char MemProfVersionCheckNamePrefix[] = 59 "__memprof_version_mismatch_check_v"; 60 61 constexpr char MemProfShadowMemoryDynamicAddress[] = 62 "__memprof_shadow_memory_dynamic_address"; 63 64 constexpr char MemProfFilenameVar[] = "__memprof_profile_filename"; 65 66 // Command-line flags. 67 68 static cl::opt<bool> ClInsertVersionCheck( 69 "memprof-guard-against-version-mismatch", 70 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, 71 cl::init(true)); 72 73 // This flag may need to be replaced with -f[no-]memprof-reads. 74 static cl::opt<bool> ClInstrumentReads("memprof-instrument-reads", 75 cl::desc("instrument read instructions"), 76 cl::Hidden, cl::init(true)); 77 78 static cl::opt<bool> 79 ClInstrumentWrites("memprof-instrument-writes", 80 cl::desc("instrument write instructions"), cl::Hidden, 81 cl::init(true)); 82 83 static cl::opt<bool> ClInstrumentAtomics( 84 "memprof-instrument-atomics", 85 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, 86 cl::init(true)); 87 88 static cl::opt<bool> ClUseCalls( 89 "memprof-use-callbacks", 90 cl::desc("Use callbacks instead of inline instrumentation sequences."), 91 cl::Hidden, cl::init(false)); 92 93 static cl::opt<std::string> 94 ClMemoryAccessCallbackPrefix("memprof-memory-access-callback-prefix", 95 cl::desc("Prefix for memory access callbacks"), 96 cl::Hidden, cl::init("__memprof_")); 97 98 // These flags allow to change the shadow mapping. 99 // The shadow mapping looks like 100 // Shadow = ((Mem & mask) >> scale) + offset 101 102 static cl::opt<int> ClMappingScale("memprof-mapping-scale", 103 cl::desc("scale of memprof shadow mapping"), 104 cl::Hidden, cl::init(DefaultShadowScale)); 105 106 static cl::opt<int> 107 ClMappingGranularity("memprof-mapping-granularity", 108 cl::desc("granularity of memprof shadow mapping"), 109 cl::Hidden, cl::init(DefaultShadowGranularity)); 110 111 static cl::opt<bool> ClStack("memprof-instrument-stack", 112 cl::desc("Instrument scalar stack variables"), 113 cl::Hidden, cl::init(false)); 114 115 // Debug flags. 116 117 static cl::opt<int> ClDebug("memprof-debug", cl::desc("debug"), cl::Hidden, 118 cl::init(0)); 119 120 static cl::opt<std::string> ClDebugFunc("memprof-debug-func", cl::Hidden, 121 cl::desc("Debug func")); 122 123 static cl::opt<int> ClDebugMin("memprof-debug-min", cl::desc("Debug min inst"), 124 cl::Hidden, cl::init(-1)); 125 126 static cl::opt<int> ClDebugMax("memprof-debug-max", cl::desc("Debug max inst"), 127 cl::Hidden, cl::init(-1)); 128 129 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 130 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 131 STATISTIC(NumSkippedStackReads, "Number of non-instrumented stack reads"); 132 STATISTIC(NumSkippedStackWrites, "Number of non-instrumented stack writes"); 133 134 namespace { 135 136 /// This struct defines the shadow mapping using the rule: 137 /// shadow = ((mem & mask) >> Scale) ADD DynamicShadowOffset. 138 struct ShadowMapping { 139 ShadowMapping() { 140 Scale = ClMappingScale; 141 Granularity = ClMappingGranularity; 142 Mask = ~(Granularity - 1); 143 } 144 145 int Scale; 146 int Granularity; 147 uint64_t Mask; // Computed as ~(Granularity-1) 148 }; 149 150 static uint64_t getCtorAndDtorPriority(Triple &TargetTriple) { 151 return TargetTriple.isOSEmscripten() ? MemProfEmscriptenCtorAndDtorPriority 152 : MemProfCtorAndDtorPriority; 153 } 154 155 struct InterestingMemoryAccess { 156 Value *Addr = nullptr; 157 bool IsWrite; 158 Type *AccessTy; 159 uint64_t TypeSize; 160 Value *MaybeMask = nullptr; 161 }; 162 163 /// Instrument the code in module to profile memory accesses. 164 class MemProfiler { 165 public: 166 MemProfiler(Module &M) { 167 C = &(M.getContext()); 168 LongSize = M.getDataLayout().getPointerSizeInBits(); 169 IntptrTy = Type::getIntNTy(*C, LongSize); 170 } 171 172 /// If it is an interesting memory access, populate information 173 /// about the access and return a InterestingMemoryAccess struct. 174 /// Otherwise return None. 175 Optional<InterestingMemoryAccess> 176 isInterestingMemoryAccess(Instruction *I) const; 177 178 void instrumentMop(Instruction *I, const DataLayout &DL, 179 InterestingMemoryAccess &Access); 180 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, 181 Value *Addr, uint32_t TypeSize, bool IsWrite); 182 void instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask, 183 Instruction *I, Value *Addr, Type *AccessTy, 184 bool IsWrite); 185 void instrumentMemIntrinsic(MemIntrinsic *MI); 186 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); 187 bool instrumentFunction(Function &F); 188 bool maybeInsertMemProfInitAtFunctionEntry(Function &F); 189 bool insertDynamicShadowAtFunctionEntry(Function &F); 190 191 private: 192 void initializeCallbacks(Module &M); 193 194 LLVMContext *C; 195 int LongSize; 196 Type *IntptrTy; 197 ShadowMapping Mapping; 198 199 // These arrays is indexed by AccessIsWrite 200 FunctionCallee MemProfMemoryAccessCallback[2]; 201 FunctionCallee MemProfMemoryAccessCallbackSized[2]; 202 203 FunctionCallee MemProfMemmove, MemProfMemcpy, MemProfMemset; 204 Value *DynamicShadowOffset = nullptr; 205 }; 206 207 class MemProfilerLegacyPass : public FunctionPass { 208 public: 209 static char ID; 210 211 explicit MemProfilerLegacyPass() : FunctionPass(ID) { 212 initializeMemProfilerLegacyPassPass(*PassRegistry::getPassRegistry()); 213 } 214 215 StringRef getPassName() const override { return "MemProfilerFunctionPass"; } 216 217 bool runOnFunction(Function &F) override { 218 MemProfiler Profiler(*F.getParent()); 219 return Profiler.instrumentFunction(F); 220 } 221 }; 222 223 class ModuleMemProfiler { 224 public: 225 ModuleMemProfiler(Module &M) { TargetTriple = Triple(M.getTargetTriple()); } 226 227 bool instrumentModule(Module &); 228 229 private: 230 Triple TargetTriple; 231 ShadowMapping Mapping; 232 Function *MemProfCtorFunction = nullptr; 233 }; 234 235 class ModuleMemProfilerLegacyPass : public ModulePass { 236 public: 237 static char ID; 238 239 explicit ModuleMemProfilerLegacyPass() : ModulePass(ID) { 240 initializeModuleMemProfilerLegacyPassPass(*PassRegistry::getPassRegistry()); 241 } 242 243 StringRef getPassName() const override { return "ModuleMemProfiler"; } 244 245 void getAnalysisUsage(AnalysisUsage &AU) const override {} 246 247 bool runOnModule(Module &M) override { 248 ModuleMemProfiler MemProfiler(M); 249 return MemProfiler.instrumentModule(M); 250 } 251 }; 252 253 } // end anonymous namespace 254 255 MemProfilerPass::MemProfilerPass() = default; 256 257 PreservedAnalyses MemProfilerPass::run(Function &F, 258 AnalysisManager<Function> &AM) { 259 Module &M = *F.getParent(); 260 MemProfiler Profiler(M); 261 if (Profiler.instrumentFunction(F)) 262 return PreservedAnalyses::none(); 263 return PreservedAnalyses::all(); 264 } 265 266 ModuleMemProfilerPass::ModuleMemProfilerPass() = default; 267 268 PreservedAnalyses ModuleMemProfilerPass::run(Module &M, 269 AnalysisManager<Module> &AM) { 270 ModuleMemProfiler Profiler(M); 271 if (Profiler.instrumentModule(M)) 272 return PreservedAnalyses::none(); 273 return PreservedAnalyses::all(); 274 } 275 276 char MemProfilerLegacyPass::ID = 0; 277 278 INITIALIZE_PASS_BEGIN(MemProfilerLegacyPass, "memprof", 279 "MemProfiler: profile memory allocations and accesses.", 280 false, false) 281 INITIALIZE_PASS_END(MemProfilerLegacyPass, "memprof", 282 "MemProfiler: profile memory allocations and accesses.", 283 false, false) 284 285 FunctionPass *llvm::createMemProfilerFunctionPass() { 286 return new MemProfilerLegacyPass(); 287 } 288 289 char ModuleMemProfilerLegacyPass::ID = 0; 290 291 INITIALIZE_PASS(ModuleMemProfilerLegacyPass, "memprof-module", 292 "MemProfiler: profile memory allocations and accesses." 293 "ModulePass", 294 false, false) 295 296 ModulePass *llvm::createModuleMemProfilerLegacyPassPass() { 297 return new ModuleMemProfilerLegacyPass(); 298 } 299 300 Value *MemProfiler::memToShadow(Value *Shadow, IRBuilder<> &IRB) { 301 // (Shadow & mask) >> scale 302 Shadow = IRB.CreateAnd(Shadow, Mapping.Mask); 303 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); 304 // (Shadow >> scale) | offset 305 assert(DynamicShadowOffset); 306 return IRB.CreateAdd(Shadow, DynamicShadowOffset); 307 } 308 309 // Instrument memset/memmove/memcpy 310 void MemProfiler::instrumentMemIntrinsic(MemIntrinsic *MI) { 311 IRBuilder<> IRB(MI); 312 if (isa<MemTransferInst>(MI)) { 313 IRB.CreateCall( 314 isa<MemMoveInst>(MI) ? MemProfMemmove : MemProfMemcpy, 315 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 316 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), 317 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 318 } else if (isa<MemSetInst>(MI)) { 319 IRB.CreateCall( 320 MemProfMemset, 321 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 322 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), 323 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 324 } 325 MI->eraseFromParent(); 326 } 327 328 Optional<InterestingMemoryAccess> 329 MemProfiler::isInterestingMemoryAccess(Instruction *I) const { 330 // Do not instrument the load fetching the dynamic shadow address. 331 if (DynamicShadowOffset == I) 332 return None; 333 334 InterestingMemoryAccess Access; 335 336 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 337 if (!ClInstrumentReads) 338 return None; 339 Access.IsWrite = false; 340 Access.AccessTy = LI->getType(); 341 Access.Addr = LI->getPointerOperand(); 342 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 343 if (!ClInstrumentWrites) 344 return None; 345 Access.IsWrite = true; 346 Access.AccessTy = SI->getValueOperand()->getType(); 347 Access.Addr = SI->getPointerOperand(); 348 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 349 if (!ClInstrumentAtomics) 350 return None; 351 Access.IsWrite = true; 352 Access.AccessTy = RMW->getValOperand()->getType(); 353 Access.Addr = RMW->getPointerOperand(); 354 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { 355 if (!ClInstrumentAtomics) 356 return None; 357 Access.IsWrite = true; 358 Access.AccessTy = XCHG->getCompareOperand()->getType(); 359 Access.Addr = XCHG->getPointerOperand(); 360 } else if (auto *CI = dyn_cast<CallInst>(I)) { 361 auto *F = CI->getCalledFunction(); 362 if (F && (F->getIntrinsicID() == Intrinsic::masked_load || 363 F->getIntrinsicID() == Intrinsic::masked_store)) { 364 unsigned OpOffset = 0; 365 if (F->getIntrinsicID() == Intrinsic::masked_store) { 366 if (!ClInstrumentWrites) 367 return None; 368 // Masked store has an initial operand for the value. 369 OpOffset = 1; 370 Access.AccessTy = CI->getArgOperand(0)->getType(); 371 Access.IsWrite = true; 372 } else { 373 if (!ClInstrumentReads) 374 return None; 375 Access.AccessTy = CI->getType(); 376 Access.IsWrite = false; 377 } 378 379 auto *BasePtr = CI->getOperand(0 + OpOffset); 380 Access.MaybeMask = CI->getOperand(2 + OpOffset); 381 Access.Addr = BasePtr; 382 } 383 } 384 385 if (!Access.Addr) 386 return None; 387 388 // Do not instrument acesses from different address spaces; we cannot deal 389 // with them. 390 Type *PtrTy = cast<PointerType>(Access.Addr->getType()->getScalarType()); 391 if (PtrTy->getPointerAddressSpace() != 0) 392 return None; 393 394 // Ignore swifterror addresses. 395 // swifterror memory addresses are mem2reg promoted by instruction 396 // selection. As such they cannot have regular uses like an instrumentation 397 // function and it makes no sense to track them as memory. 398 if (Access.Addr->isSwiftError()) 399 return None; 400 401 // Peel off GEPs and BitCasts. 402 auto *Addr = Access.Addr->stripInBoundsOffsets(); 403 404 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 405 // Do not instrument PGO counter updates. 406 if (GV->hasSection()) { 407 StringRef SectionName = GV->getSection(); 408 // Check if the global is in the PGO counters section. 409 auto OF = Triple(I->getModule()->getTargetTriple()).getObjectFormat(); 410 if (SectionName.endswith( 411 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false))) 412 return None; 413 } 414 415 // Do not instrument accesses to LLVM internal variables. 416 if (GV->getName().startswith("__llvm")) 417 return None; 418 } 419 420 const DataLayout &DL = I->getModule()->getDataLayout(); 421 Access.TypeSize = DL.getTypeStoreSizeInBits(Access.AccessTy); 422 return Access; 423 } 424 425 void MemProfiler::instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask, 426 Instruction *I, Value *Addr, 427 Type *AccessTy, bool IsWrite) { 428 auto *VTy = cast<FixedVectorType>(AccessTy); 429 uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType()); 430 unsigned Num = VTy->getNumElements(); 431 auto *Zero = ConstantInt::get(IntptrTy, 0); 432 for (unsigned Idx = 0; Idx < Num; ++Idx) { 433 Value *InstrumentedAddress = nullptr; 434 Instruction *InsertBefore = I; 435 if (auto *Vector = dyn_cast<ConstantVector>(Mask)) { 436 // dyn_cast as we might get UndefValue 437 if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) { 438 if (Masked->isZero()) 439 // Mask is constant false, so no instrumentation needed. 440 continue; 441 // If we have a true or undef value, fall through to instrumentAddress. 442 // with InsertBefore == I 443 } 444 } else { 445 IRBuilder<> IRB(I); 446 Value *MaskElem = IRB.CreateExtractElement(Mask, Idx); 447 Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false); 448 InsertBefore = ThenTerm; 449 } 450 451 IRBuilder<> IRB(InsertBefore); 452 InstrumentedAddress = 453 IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)}); 454 instrumentAddress(I, InsertBefore, InstrumentedAddress, ElemTypeSize, 455 IsWrite); 456 } 457 } 458 459 void MemProfiler::instrumentMop(Instruction *I, const DataLayout &DL, 460 InterestingMemoryAccess &Access) { 461 // Skip instrumentation of stack accesses unless requested. 462 if (!ClStack && isa<AllocaInst>(getUnderlyingObject(Access.Addr))) { 463 if (Access.IsWrite) 464 ++NumSkippedStackWrites; 465 else 466 ++NumSkippedStackReads; 467 return; 468 } 469 470 if (Access.IsWrite) 471 NumInstrumentedWrites++; 472 else 473 NumInstrumentedReads++; 474 475 if (Access.MaybeMask) { 476 instrumentMaskedLoadOrStore(DL, Access.MaybeMask, I, Access.Addr, 477 Access.AccessTy, Access.IsWrite); 478 } else { 479 // Since the access counts will be accumulated across the entire allocation, 480 // we only update the shadow access count for the first location and thus 481 // don't need to worry about alignment and type size. 482 instrumentAddress(I, I, Access.Addr, Access.TypeSize, Access.IsWrite); 483 } 484 } 485 486 void MemProfiler::instrumentAddress(Instruction *OrigIns, 487 Instruction *InsertBefore, Value *Addr, 488 uint32_t TypeSize, bool IsWrite) { 489 IRBuilder<> IRB(InsertBefore); 490 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 491 492 if (ClUseCalls) { 493 IRB.CreateCall(MemProfMemoryAccessCallback[IsWrite], AddrLong); 494 return; 495 } 496 497 // Create an inline sequence to compute shadow location, and increment the 498 // value by one. 499 Type *ShadowTy = Type::getInt64Ty(*C); 500 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); 501 Value *ShadowPtr = memToShadow(AddrLong, IRB); 502 Value *ShadowAddr = IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy); 503 Value *ShadowValue = IRB.CreateLoad(ShadowTy, ShadowAddr); 504 Value *Inc = ConstantInt::get(Type::getInt64Ty(*C), 1); 505 ShadowValue = IRB.CreateAdd(ShadowValue, Inc); 506 IRB.CreateStore(ShadowValue, ShadowAddr); 507 } 508 509 // Create the variable for the profile file name. 510 void createProfileFileNameVar(Module &M) { 511 const MDString *MemProfFilename = 512 dyn_cast_or_null<MDString>(M.getModuleFlag("MemProfProfileFilename")); 513 if (!MemProfFilename) 514 return; 515 assert(!MemProfFilename->getString().empty() && 516 "Unexpected MemProfProfileFilename metadata with empty string"); 517 Constant *ProfileNameConst = ConstantDataArray::getString( 518 M.getContext(), MemProfFilename->getString(), true); 519 GlobalVariable *ProfileNameVar = new GlobalVariable( 520 M, ProfileNameConst->getType(), /*isConstant=*/true, 521 GlobalValue::WeakAnyLinkage, ProfileNameConst, MemProfFilenameVar); 522 Triple TT(M.getTargetTriple()); 523 if (TT.supportsCOMDAT()) { 524 ProfileNameVar->setLinkage(GlobalValue::ExternalLinkage); 525 ProfileNameVar->setComdat(M.getOrInsertComdat(MemProfFilenameVar)); 526 } 527 } 528 529 bool ModuleMemProfiler::instrumentModule(Module &M) { 530 // Create a module constructor. 531 std::string MemProfVersion = std::to_string(LLVM_MEM_PROFILER_VERSION); 532 std::string VersionCheckName = 533 ClInsertVersionCheck ? (MemProfVersionCheckNamePrefix + MemProfVersion) 534 : ""; 535 std::tie(MemProfCtorFunction, std::ignore) = 536 createSanitizerCtorAndInitFunctions(M, MemProfModuleCtorName, 537 MemProfInitName, /*InitArgTypes=*/{}, 538 /*InitArgs=*/{}, VersionCheckName); 539 540 const uint64_t Priority = getCtorAndDtorPriority(TargetTriple); 541 appendToGlobalCtors(M, MemProfCtorFunction, Priority); 542 543 createProfileFileNameVar(M); 544 545 return true; 546 } 547 548 void MemProfiler::initializeCallbacks(Module &M) { 549 IRBuilder<> IRB(*C); 550 551 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { 552 const std::string TypeStr = AccessIsWrite ? "store" : "load"; 553 554 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy}; 555 SmallVector<Type *, 2> Args1{1, IntptrTy}; 556 MemProfMemoryAccessCallbackSized[AccessIsWrite] = 557 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr + "N", 558 FunctionType::get(IRB.getVoidTy(), Args2, false)); 559 560 MemProfMemoryAccessCallback[AccessIsWrite] = 561 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr, 562 FunctionType::get(IRB.getVoidTy(), Args1, false)); 563 } 564 MemProfMemmove = M.getOrInsertFunction( 565 ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(), 566 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); 567 MemProfMemcpy = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memcpy", 568 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 569 IRB.getInt8PtrTy(), IntptrTy); 570 MemProfMemset = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memset", 571 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 572 IRB.getInt32Ty(), IntptrTy); 573 } 574 575 bool MemProfiler::maybeInsertMemProfInitAtFunctionEntry(Function &F) { 576 // For each NSObject descendant having a +load method, this method is invoked 577 // by the ObjC runtime before any of the static constructors is called. 578 // Therefore we need to instrument such methods with a call to __memprof_init 579 // at the beginning in order to initialize our runtime before any access to 580 // the shadow memory. 581 // We cannot just ignore these methods, because they may call other 582 // instrumented functions. 583 if (F.getName().find(" load]") != std::string::npos) { 584 FunctionCallee MemProfInitFunction = 585 declareSanitizerInitFunction(*F.getParent(), MemProfInitName, {}); 586 IRBuilder<> IRB(&F.front(), F.front().begin()); 587 IRB.CreateCall(MemProfInitFunction, {}); 588 return true; 589 } 590 return false; 591 } 592 593 bool MemProfiler::insertDynamicShadowAtFunctionEntry(Function &F) { 594 IRBuilder<> IRB(&F.front().front()); 595 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal( 596 MemProfShadowMemoryDynamicAddress, IntptrTy); 597 if (F.getParent()->getPICLevel() == PICLevel::NotPIC) 598 cast<GlobalVariable>(GlobalDynamicAddress)->setDSOLocal(true); 599 DynamicShadowOffset = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress); 600 return true; 601 } 602 603 bool MemProfiler::instrumentFunction(Function &F) { 604 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) 605 return false; 606 if (ClDebugFunc == F.getName()) 607 return false; 608 if (F.getName().startswith("__memprof_")) 609 return false; 610 611 bool FunctionModified = false; 612 613 // If needed, insert __memprof_init. 614 // This function needs to be called even if the function body is not 615 // instrumented. 616 if (maybeInsertMemProfInitAtFunctionEntry(F)) 617 FunctionModified = true; 618 619 LLVM_DEBUG(dbgs() << "MEMPROF instrumenting:\n" << F << "\n"); 620 621 initializeCallbacks(*F.getParent()); 622 623 SmallVector<Instruction *, 16> ToInstrument; 624 625 // Fill the set of memory operations to instrument. 626 for (auto &BB : F) { 627 for (auto &Inst : BB) { 628 if (isInterestingMemoryAccess(&Inst) || isa<MemIntrinsic>(Inst)) 629 ToInstrument.push_back(&Inst); 630 } 631 } 632 633 if (ToInstrument.empty()) { 634 LLVM_DEBUG(dbgs() << "MEMPROF done instrumenting: " << FunctionModified 635 << " " << F << "\n"); 636 637 return FunctionModified; 638 } 639 640 FunctionModified |= insertDynamicShadowAtFunctionEntry(F); 641 642 int NumInstrumented = 0; 643 for (auto *Inst : ToInstrument) { 644 if (ClDebugMin < 0 || ClDebugMax < 0 || 645 (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { 646 Optional<InterestingMemoryAccess> Access = 647 isInterestingMemoryAccess(Inst); 648 if (Access) 649 instrumentMop(Inst, F.getParent()->getDataLayout(), *Access); 650 else 651 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); 652 } 653 NumInstrumented++; 654 } 655 656 if (NumInstrumented > 0) 657 FunctionModified = true; 658 659 LLVM_DEBUG(dbgs() << "MEMPROF done instrumenting: " << FunctionModified << " " 660 << F << "\n"); 661 662 return FunctionModified; 663 } 664