xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp (revision 271171e0d97b88ba2a7c3bf750c9672b484c1c13)
1 //===- MemProfiler.cpp - memory allocation and access profiler ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of MemProfiler. Memory accesses are instrumented
10 // to increment the access count held in a shadow memory location, or
11 // alternatively to call into the runtime. Memory intrinsic calls (memmove,
12 // memcpy, memset) are changed to call the memory profiling runtime version
13 // instead.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Transforms/Instrumentation/MemProfiler.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Constant.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/GlobalValue.h"
27 #include "llvm/IR/IRBuilder.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/IR/Value.h"
34 #include "llvm/InitializePasses.h"
35 #include "llvm/Pass.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Transforms/Instrumentation.h"
39 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
40 #include "llvm/Transforms/Utils/ModuleUtils.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "memprof"
45 
46 constexpr int LLVM_MEM_PROFILER_VERSION = 1;
47 
48 // Size of memory mapped to a single shadow location.
49 constexpr uint64_t DefaultShadowGranularity = 64;
50 
51 // Scale from granularity down to shadow size.
52 constexpr uint64_t DefaultShadowScale = 3;
53 
54 constexpr char MemProfModuleCtorName[] = "memprof.module_ctor";
55 constexpr uint64_t MemProfCtorAndDtorPriority = 1;
56 // On Emscripten, the system needs more than one priorities for constructors.
57 constexpr uint64_t MemProfEmscriptenCtorAndDtorPriority = 50;
58 constexpr char MemProfInitName[] = "__memprof_init";
59 constexpr char MemProfVersionCheckNamePrefix[] =
60     "__memprof_version_mismatch_check_v";
61 
62 constexpr char MemProfShadowMemoryDynamicAddress[] =
63     "__memprof_shadow_memory_dynamic_address";
64 
65 constexpr char MemProfFilenameVar[] = "__memprof_profile_filename";
66 
67 // Command-line flags.
68 
69 static cl::opt<bool> ClInsertVersionCheck(
70     "memprof-guard-against-version-mismatch",
71     cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
72     cl::init(true));
73 
74 // This flag may need to be replaced with -f[no-]memprof-reads.
75 static cl::opt<bool> ClInstrumentReads("memprof-instrument-reads",
76                                        cl::desc("instrument read instructions"),
77                                        cl::Hidden, cl::init(true));
78 
79 static cl::opt<bool>
80     ClInstrumentWrites("memprof-instrument-writes",
81                        cl::desc("instrument write instructions"), cl::Hidden,
82                        cl::init(true));
83 
84 static cl::opt<bool> ClInstrumentAtomics(
85     "memprof-instrument-atomics",
86     cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
87     cl::init(true));
88 
89 static cl::opt<bool> ClUseCalls(
90     "memprof-use-callbacks",
91     cl::desc("Use callbacks instead of inline instrumentation sequences."),
92     cl::Hidden, cl::init(false));
93 
94 static cl::opt<std::string>
95     ClMemoryAccessCallbackPrefix("memprof-memory-access-callback-prefix",
96                                  cl::desc("Prefix for memory access callbacks"),
97                                  cl::Hidden, cl::init("__memprof_"));
98 
99 // These flags allow to change the shadow mapping.
100 // The shadow mapping looks like
101 //    Shadow = ((Mem & mask) >> scale) + offset
102 
103 static cl::opt<int> ClMappingScale("memprof-mapping-scale",
104                                    cl::desc("scale of memprof shadow mapping"),
105                                    cl::Hidden, cl::init(DefaultShadowScale));
106 
107 static cl::opt<int>
108     ClMappingGranularity("memprof-mapping-granularity",
109                          cl::desc("granularity of memprof shadow mapping"),
110                          cl::Hidden, cl::init(DefaultShadowGranularity));
111 
112 static cl::opt<bool> ClStack("memprof-instrument-stack",
113                              cl::desc("Instrument scalar stack variables"),
114                              cl::Hidden, cl::init(false));
115 
116 // Debug flags.
117 
118 static cl::opt<int> ClDebug("memprof-debug", cl::desc("debug"), cl::Hidden,
119                             cl::init(0));
120 
121 static cl::opt<std::string> ClDebugFunc("memprof-debug-func", cl::Hidden,
122                                         cl::desc("Debug func"));
123 
124 static cl::opt<int> ClDebugMin("memprof-debug-min", cl::desc("Debug min inst"),
125                                cl::Hidden, cl::init(-1));
126 
127 static cl::opt<int> ClDebugMax("memprof-debug-max", cl::desc("Debug max inst"),
128                                cl::Hidden, cl::init(-1));
129 
130 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
131 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
132 STATISTIC(NumSkippedStackReads, "Number of non-instrumented stack reads");
133 STATISTIC(NumSkippedStackWrites, "Number of non-instrumented stack writes");
134 
135 namespace {
136 
137 /// This struct defines the shadow mapping using the rule:
138 ///   shadow = ((mem & mask) >> Scale) ADD DynamicShadowOffset.
139 struct ShadowMapping {
140   ShadowMapping() {
141     Scale = ClMappingScale;
142     Granularity = ClMappingGranularity;
143     Mask = ~(Granularity - 1);
144   }
145 
146   int Scale;
147   int Granularity;
148   uint64_t Mask; // Computed as ~(Granularity-1)
149 };
150 
151 static uint64_t getCtorAndDtorPriority(Triple &TargetTriple) {
152   return TargetTriple.isOSEmscripten() ? MemProfEmscriptenCtorAndDtorPriority
153                                        : MemProfCtorAndDtorPriority;
154 }
155 
156 struct InterestingMemoryAccess {
157   Value *Addr = nullptr;
158   bool IsWrite;
159   unsigned Alignment;
160   Type *AccessTy;
161   uint64_t TypeSize;
162   Value *MaybeMask = nullptr;
163 };
164 
165 /// Instrument the code in module to profile memory accesses.
166 class MemProfiler {
167 public:
168   MemProfiler(Module &M) {
169     C = &(M.getContext());
170     LongSize = M.getDataLayout().getPointerSizeInBits();
171     IntptrTy = Type::getIntNTy(*C, LongSize);
172   }
173 
174   /// If it is an interesting memory access, populate information
175   /// about the access and return a InterestingMemoryAccess struct.
176   /// Otherwise return None.
177   Optional<InterestingMemoryAccess>
178   isInterestingMemoryAccess(Instruction *I) const;
179 
180   void instrumentMop(Instruction *I, const DataLayout &DL,
181                      InterestingMemoryAccess &Access);
182   void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
183                          Value *Addr, uint32_t TypeSize, bool IsWrite);
184   void instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask,
185                                    Instruction *I, Value *Addr,
186                                    unsigned Alignment, Type *AccessTy,
187                                    bool IsWrite);
188   void instrumentMemIntrinsic(MemIntrinsic *MI);
189   Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
190   bool instrumentFunction(Function &F);
191   bool maybeInsertMemProfInitAtFunctionEntry(Function &F);
192   bool insertDynamicShadowAtFunctionEntry(Function &F);
193 
194 private:
195   void initializeCallbacks(Module &M);
196 
197   LLVMContext *C;
198   int LongSize;
199   Type *IntptrTy;
200   ShadowMapping Mapping;
201 
202   // These arrays is indexed by AccessIsWrite
203   FunctionCallee MemProfMemoryAccessCallback[2];
204   FunctionCallee MemProfMemoryAccessCallbackSized[2];
205 
206   FunctionCallee MemProfMemmove, MemProfMemcpy, MemProfMemset;
207   Value *DynamicShadowOffset = nullptr;
208 };
209 
210 class MemProfilerLegacyPass : public FunctionPass {
211 public:
212   static char ID;
213 
214   explicit MemProfilerLegacyPass() : FunctionPass(ID) {
215     initializeMemProfilerLegacyPassPass(*PassRegistry::getPassRegistry());
216   }
217 
218   StringRef getPassName() const override { return "MemProfilerFunctionPass"; }
219 
220   bool runOnFunction(Function &F) override {
221     MemProfiler Profiler(*F.getParent());
222     return Profiler.instrumentFunction(F);
223   }
224 };
225 
226 class ModuleMemProfiler {
227 public:
228   ModuleMemProfiler(Module &M) { TargetTriple = Triple(M.getTargetTriple()); }
229 
230   bool instrumentModule(Module &);
231 
232 private:
233   Triple TargetTriple;
234   ShadowMapping Mapping;
235   Function *MemProfCtorFunction = nullptr;
236 };
237 
238 class ModuleMemProfilerLegacyPass : public ModulePass {
239 public:
240   static char ID;
241 
242   explicit ModuleMemProfilerLegacyPass() : ModulePass(ID) {
243     initializeModuleMemProfilerLegacyPassPass(*PassRegistry::getPassRegistry());
244   }
245 
246   StringRef getPassName() const override { return "ModuleMemProfiler"; }
247 
248   void getAnalysisUsage(AnalysisUsage &AU) const override {}
249 
250   bool runOnModule(Module &M) override {
251     ModuleMemProfiler MemProfiler(M);
252     return MemProfiler.instrumentModule(M);
253   }
254 };
255 
256 } // end anonymous namespace
257 
258 MemProfilerPass::MemProfilerPass() {}
259 
260 PreservedAnalyses MemProfilerPass::run(Function &F,
261                                        AnalysisManager<Function> &AM) {
262   Module &M = *F.getParent();
263   MemProfiler Profiler(M);
264   if (Profiler.instrumentFunction(F))
265     return PreservedAnalyses::none();
266   return PreservedAnalyses::all();
267 }
268 
269 ModuleMemProfilerPass::ModuleMemProfilerPass() {}
270 
271 PreservedAnalyses ModuleMemProfilerPass::run(Module &M,
272                                              AnalysisManager<Module> &AM) {
273   ModuleMemProfiler Profiler(M);
274   if (Profiler.instrumentModule(M))
275     return PreservedAnalyses::none();
276   return PreservedAnalyses::all();
277 }
278 
279 char MemProfilerLegacyPass::ID = 0;
280 
281 INITIALIZE_PASS_BEGIN(MemProfilerLegacyPass, "memprof",
282                       "MemProfiler: profile memory allocations and accesses.",
283                       false, false)
284 INITIALIZE_PASS_END(MemProfilerLegacyPass, "memprof",
285                     "MemProfiler: profile memory allocations and accesses.",
286                     false, false)
287 
288 FunctionPass *llvm::createMemProfilerFunctionPass() {
289   return new MemProfilerLegacyPass();
290 }
291 
292 char ModuleMemProfilerLegacyPass::ID = 0;
293 
294 INITIALIZE_PASS(ModuleMemProfilerLegacyPass, "memprof-module",
295                 "MemProfiler: profile memory allocations and accesses."
296                 "ModulePass",
297                 false, false)
298 
299 ModulePass *llvm::createModuleMemProfilerLegacyPassPass() {
300   return new ModuleMemProfilerLegacyPass();
301 }
302 
303 Value *MemProfiler::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
304   // (Shadow & mask) >> scale
305   Shadow = IRB.CreateAnd(Shadow, Mapping.Mask);
306   Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
307   // (Shadow >> scale) | offset
308   assert(DynamicShadowOffset);
309   return IRB.CreateAdd(Shadow, DynamicShadowOffset);
310 }
311 
312 // Instrument memset/memmove/memcpy
313 void MemProfiler::instrumentMemIntrinsic(MemIntrinsic *MI) {
314   IRBuilder<> IRB(MI);
315   if (isa<MemTransferInst>(MI)) {
316     IRB.CreateCall(
317         isa<MemMoveInst>(MI) ? MemProfMemmove : MemProfMemcpy,
318         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
319          IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
320          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
321   } else if (isa<MemSetInst>(MI)) {
322     IRB.CreateCall(
323         MemProfMemset,
324         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
325          IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
326          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
327   }
328   MI->eraseFromParent();
329 }
330 
331 Optional<InterestingMemoryAccess>
332 MemProfiler::isInterestingMemoryAccess(Instruction *I) const {
333   // Do not instrument the load fetching the dynamic shadow address.
334   if (DynamicShadowOffset == I)
335     return None;
336 
337   InterestingMemoryAccess Access;
338 
339   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
340     if (!ClInstrumentReads)
341       return None;
342     Access.IsWrite = false;
343     Access.AccessTy = LI->getType();
344     Access.Alignment = LI->getAlignment();
345     Access.Addr = LI->getPointerOperand();
346   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
347     if (!ClInstrumentWrites)
348       return None;
349     Access.IsWrite = true;
350     Access.AccessTy = SI->getValueOperand()->getType();
351     Access.Alignment = SI->getAlignment();
352     Access.Addr = SI->getPointerOperand();
353   } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
354     if (!ClInstrumentAtomics)
355       return None;
356     Access.IsWrite = true;
357     Access.AccessTy = RMW->getValOperand()->getType();
358     Access.Alignment = 0;
359     Access.Addr = RMW->getPointerOperand();
360   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
361     if (!ClInstrumentAtomics)
362       return None;
363     Access.IsWrite = true;
364     Access.AccessTy = XCHG->getCompareOperand()->getType();
365     Access.Alignment = 0;
366     Access.Addr = XCHG->getPointerOperand();
367   } else if (auto *CI = dyn_cast<CallInst>(I)) {
368     auto *F = CI->getCalledFunction();
369     if (F && (F->getIntrinsicID() == Intrinsic::masked_load ||
370               F->getIntrinsicID() == Intrinsic::masked_store)) {
371       unsigned OpOffset = 0;
372       if (F->getIntrinsicID() == Intrinsic::masked_store) {
373         if (!ClInstrumentWrites)
374           return None;
375         // Masked store has an initial operand for the value.
376         OpOffset = 1;
377         Access.AccessTy = CI->getArgOperand(0)->getType();
378         Access.IsWrite = true;
379       } else {
380         if (!ClInstrumentReads)
381           return None;
382         Access.AccessTy = CI->getType();
383         Access.IsWrite = false;
384       }
385 
386       auto *BasePtr = CI->getOperand(0 + OpOffset);
387       if (auto *AlignmentConstant =
388               dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
389         Access.Alignment = (unsigned)AlignmentConstant->getZExtValue();
390       else
391         Access.Alignment = 1; // No alignment guarantees. We probably got Undef
392       Access.MaybeMask = CI->getOperand(2 + OpOffset);
393       Access.Addr = BasePtr;
394     }
395   }
396 
397   if (!Access.Addr)
398     return None;
399 
400   // Do not instrument acesses from different address spaces; we cannot deal
401   // with them.
402   Type *PtrTy = cast<PointerType>(Access.Addr->getType()->getScalarType());
403   if (PtrTy->getPointerAddressSpace() != 0)
404     return None;
405 
406   // Ignore swifterror addresses.
407   // swifterror memory addresses are mem2reg promoted by instruction
408   // selection. As such they cannot have regular uses like an instrumentation
409   // function and it makes no sense to track them as memory.
410   if (Access.Addr->isSwiftError())
411     return None;
412 
413   const DataLayout &DL = I->getModule()->getDataLayout();
414   Access.TypeSize = DL.getTypeStoreSizeInBits(Access.AccessTy);
415   return Access;
416 }
417 
418 void MemProfiler::instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask,
419                                               Instruction *I, Value *Addr,
420                                               unsigned Alignment,
421                                               Type *AccessTy, bool IsWrite) {
422   auto *VTy = cast<FixedVectorType>(AccessTy);
423   uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
424   unsigned Num = VTy->getNumElements();
425   auto *Zero = ConstantInt::get(IntptrTy, 0);
426   for (unsigned Idx = 0; Idx < Num; ++Idx) {
427     Value *InstrumentedAddress = nullptr;
428     Instruction *InsertBefore = I;
429     if (auto *Vector = dyn_cast<ConstantVector>(Mask)) {
430       // dyn_cast as we might get UndefValue
431       if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) {
432         if (Masked->isZero())
433           // Mask is constant false, so no instrumentation needed.
434           continue;
435         // If we have a true or undef value, fall through to instrumentAddress.
436         // with InsertBefore == I
437       }
438     } else {
439       IRBuilder<> IRB(I);
440       Value *MaskElem = IRB.CreateExtractElement(Mask, Idx);
441       Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false);
442       InsertBefore = ThenTerm;
443     }
444 
445     IRBuilder<> IRB(InsertBefore);
446     InstrumentedAddress =
447         IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)});
448     instrumentAddress(I, InsertBefore, InstrumentedAddress, ElemTypeSize,
449                       IsWrite);
450   }
451 }
452 
453 void MemProfiler::instrumentMop(Instruction *I, const DataLayout &DL,
454                                 InterestingMemoryAccess &Access) {
455   // Skip instrumentation of stack accesses unless requested.
456   if (!ClStack && isa<AllocaInst>(getUnderlyingObject(Access.Addr))) {
457     if (Access.IsWrite)
458       ++NumSkippedStackWrites;
459     else
460       ++NumSkippedStackReads;
461     return;
462   }
463 
464   if (Access.IsWrite)
465     NumInstrumentedWrites++;
466   else
467     NumInstrumentedReads++;
468 
469   if (Access.MaybeMask) {
470     instrumentMaskedLoadOrStore(DL, Access.MaybeMask, I, Access.Addr,
471                                 Access.Alignment, Access.AccessTy,
472                                 Access.IsWrite);
473   } else {
474     // Since the access counts will be accumulated across the entire allocation,
475     // we only update the shadow access count for the first location and thus
476     // don't need to worry about alignment and type size.
477     instrumentAddress(I, I, Access.Addr, Access.TypeSize, Access.IsWrite);
478   }
479 }
480 
481 void MemProfiler::instrumentAddress(Instruction *OrigIns,
482                                     Instruction *InsertBefore, Value *Addr,
483                                     uint32_t TypeSize, bool IsWrite) {
484   IRBuilder<> IRB(InsertBefore);
485   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
486 
487   if (ClUseCalls) {
488     IRB.CreateCall(MemProfMemoryAccessCallback[IsWrite], AddrLong);
489     return;
490   }
491 
492   // Create an inline sequence to compute shadow location, and increment the
493   // value by one.
494   Type *ShadowTy = Type::getInt64Ty(*C);
495   Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
496   Value *ShadowPtr = memToShadow(AddrLong, IRB);
497   Value *ShadowAddr = IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy);
498   Value *ShadowValue = IRB.CreateLoad(ShadowTy, ShadowAddr);
499   Value *Inc = ConstantInt::get(Type::getInt64Ty(*C), 1);
500   ShadowValue = IRB.CreateAdd(ShadowValue, Inc);
501   IRB.CreateStore(ShadowValue, ShadowAddr);
502 }
503 
504 // Create the variable for the profile file name.
505 void createProfileFileNameVar(Module &M) {
506   const MDString *MemProfFilename =
507       dyn_cast_or_null<MDString>(M.getModuleFlag("MemProfProfileFilename"));
508   if (!MemProfFilename)
509     return;
510   assert(!MemProfFilename->getString().empty() &&
511          "Unexpected MemProfProfileFilename metadata with empty string");
512   Constant *ProfileNameConst = ConstantDataArray::getString(
513       M.getContext(), MemProfFilename->getString(), true);
514   GlobalVariable *ProfileNameVar = new GlobalVariable(
515       M, ProfileNameConst->getType(), /*isConstant=*/true,
516       GlobalValue::WeakAnyLinkage, ProfileNameConst, MemProfFilenameVar);
517   Triple TT(M.getTargetTriple());
518   if (TT.supportsCOMDAT()) {
519     ProfileNameVar->setLinkage(GlobalValue::ExternalLinkage);
520     ProfileNameVar->setComdat(M.getOrInsertComdat(MemProfFilenameVar));
521   }
522 }
523 
524 bool ModuleMemProfiler::instrumentModule(Module &M) {
525   // Create a module constructor.
526   std::string MemProfVersion = std::to_string(LLVM_MEM_PROFILER_VERSION);
527   std::string VersionCheckName =
528       ClInsertVersionCheck ? (MemProfVersionCheckNamePrefix + MemProfVersion)
529                            : "";
530   std::tie(MemProfCtorFunction, std::ignore) =
531       createSanitizerCtorAndInitFunctions(M, MemProfModuleCtorName,
532                                           MemProfInitName, /*InitArgTypes=*/{},
533                                           /*InitArgs=*/{}, VersionCheckName);
534 
535   const uint64_t Priority = getCtorAndDtorPriority(TargetTriple);
536   appendToGlobalCtors(M, MemProfCtorFunction, Priority);
537 
538   createProfileFileNameVar(M);
539 
540   return true;
541 }
542 
543 void MemProfiler::initializeCallbacks(Module &M) {
544   IRBuilder<> IRB(*C);
545 
546   for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
547     const std::string TypeStr = AccessIsWrite ? "store" : "load";
548 
549     SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
550     SmallVector<Type *, 2> Args1{1, IntptrTy};
551     MemProfMemoryAccessCallbackSized[AccessIsWrite] =
552         M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr + "N",
553                               FunctionType::get(IRB.getVoidTy(), Args2, false));
554 
555     MemProfMemoryAccessCallback[AccessIsWrite] =
556         M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr,
557                               FunctionType::get(IRB.getVoidTy(), Args1, false));
558   }
559   MemProfMemmove = M.getOrInsertFunction(
560       ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
561       IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
562   MemProfMemcpy = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memcpy",
563                                         IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
564                                         IRB.getInt8PtrTy(), IntptrTy);
565   MemProfMemset = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memset",
566                                         IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
567                                         IRB.getInt32Ty(), IntptrTy);
568 }
569 
570 bool MemProfiler::maybeInsertMemProfInitAtFunctionEntry(Function &F) {
571   // For each NSObject descendant having a +load method, this method is invoked
572   // by the ObjC runtime before any of the static constructors is called.
573   // Therefore we need to instrument such methods with a call to __memprof_init
574   // at the beginning in order to initialize our runtime before any access to
575   // the shadow memory.
576   // We cannot just ignore these methods, because they may call other
577   // instrumented functions.
578   if (F.getName().find(" load]") != std::string::npos) {
579     FunctionCallee MemProfInitFunction =
580         declareSanitizerInitFunction(*F.getParent(), MemProfInitName, {});
581     IRBuilder<> IRB(&F.front(), F.front().begin());
582     IRB.CreateCall(MemProfInitFunction, {});
583     return true;
584   }
585   return false;
586 }
587 
588 bool MemProfiler::insertDynamicShadowAtFunctionEntry(Function &F) {
589   IRBuilder<> IRB(&F.front().front());
590   Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
591       MemProfShadowMemoryDynamicAddress, IntptrTy);
592   if (F.getParent()->getPICLevel() == PICLevel::NotPIC)
593     cast<GlobalVariable>(GlobalDynamicAddress)->setDSOLocal(true);
594   DynamicShadowOffset = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
595   return true;
596 }
597 
598 bool MemProfiler::instrumentFunction(Function &F) {
599   if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
600     return false;
601   if (ClDebugFunc == F.getName())
602     return false;
603   if (F.getName().startswith("__memprof_"))
604     return false;
605 
606   bool FunctionModified = false;
607 
608   // If needed, insert __memprof_init.
609   // This function needs to be called even if the function body is not
610   // instrumented.
611   if (maybeInsertMemProfInitAtFunctionEntry(F))
612     FunctionModified = true;
613 
614   LLVM_DEBUG(dbgs() << "MEMPROF instrumenting:\n" << F << "\n");
615 
616   initializeCallbacks(*F.getParent());
617 
618   FunctionModified |= insertDynamicShadowAtFunctionEntry(F);
619 
620   SmallVector<Instruction *, 16> ToInstrument;
621 
622   // Fill the set of memory operations to instrument.
623   for (auto &BB : F) {
624     for (auto &Inst : BB) {
625       if (isInterestingMemoryAccess(&Inst) || isa<MemIntrinsic>(Inst))
626         ToInstrument.push_back(&Inst);
627     }
628   }
629 
630   int NumInstrumented = 0;
631   for (auto *Inst : ToInstrument) {
632     if (ClDebugMin < 0 || ClDebugMax < 0 ||
633         (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
634       Optional<InterestingMemoryAccess> Access =
635           isInterestingMemoryAccess(Inst);
636       if (Access)
637         instrumentMop(Inst, F.getParent()->getDataLayout(), *Access);
638       else
639         instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
640     }
641     NumInstrumented++;
642   }
643 
644   if (NumInstrumented > 0)
645     FunctionModified = true;
646 
647   LLVM_DEBUG(dbgs() << "MEMPROF done instrumenting: " << FunctionModified << " "
648                     << F << "\n");
649 
650   return FunctionModified;
651 }
652