xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 //===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
11 /// reads.
12 ///
13 /// The algorithm of the tool is similar to Memcheck
14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
15 /// byte of the application memory, poison the shadow of the malloc-ed
16 /// or alloca-ed memory, load the shadow bits on every memory read,
17 /// propagate the shadow bits through some of the arithmetic
18 /// instruction (including MOV), store the shadow bits on every memory
19 /// write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
21 ///
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
36 ///
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
38 ///
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
44 ///
45 ///                           Origin tracking.
46 ///
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
50 ///
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
56 ///
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
61 /// practice.
62 ///
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwriting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
67 ///
68 ///                            Atomic handling.
69 ///
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
73 ///
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
83 ///
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
90 /// clean shadow.
91 ///
92 ///                      Instrumenting inline assembly.
93 ///
94 /// For inline assembly code LLVM has little idea about which memory locations
95 /// become initialized depending on the arguments. It can be possible to figure
96 /// out which arguments are meant to point to inputs and outputs, but the
97 /// actual semantics can be only visible at runtime. In the Linux kernel it's
98 /// also possible that the arguments only indicate the offset for a base taken
99 /// from a segment register, so it's dangerous to treat any asm() arguments as
100 /// pointers. We take a conservative approach generating calls to
101 ///   __msan_instrument_asm_store(ptr, size)
102 /// , which defer the memory unpoisoning to the runtime library.
103 /// The latter can perform more complex address checks to figure out whether
104 /// it's safe to touch the shadow memory.
105 /// Like with atomic operations, we call __msan_instrument_asm_store() before
106 /// the assembly call, so that changes to the shadow memory will be seen by
107 /// other threads together with main memory initialization.
108 ///
109 ///                  KernelMemorySanitizer (KMSAN) implementation.
110 ///
111 /// The major differences between KMSAN and MSan instrumentation are:
112 ///  - KMSAN always tracks the origins and implies msan-keep-going=true;
113 ///  - KMSAN allocates shadow and origin memory for each page separately, so
114 ///    there are no explicit accesses to shadow and origin in the
115 ///    instrumentation.
116 ///    Shadow and origin values for a particular X-byte memory location
117 ///    (X=1,2,4,8) are accessed through pointers obtained via the
118 ///      __msan_metadata_ptr_for_load_X(ptr)
119 ///      __msan_metadata_ptr_for_store_X(ptr)
120 ///    functions. The corresponding functions check that the X-byte accesses
121 ///    are possible and returns the pointers to shadow and origin memory.
122 ///    Arbitrary sized accesses are handled with:
123 ///      __msan_metadata_ptr_for_load_n(ptr, size)
124 ///      __msan_metadata_ptr_for_store_n(ptr, size);
125 ///  - TLS variables are stored in a single per-task struct. A call to a
126 ///    function __msan_get_context_state() returning a pointer to that struct
127 ///    is inserted into every instrumented function before the entry block;
128 ///  - __msan_warning() takes a 32-bit origin parameter;
129 ///  - local variables are poisoned with __msan_poison_alloca() upon function
130 ///    entry and unpoisoned with __msan_unpoison_alloca() before leaving the
131 ///    function;
132 ///  - the pass doesn't declare any global variables or add global constructors
133 ///    to the translation unit.
134 ///
135 /// Also, KMSAN currently ignores uninitialized memory passed into inline asm
136 /// calls, making sure we're on the safe side wrt. possible false positives.
137 ///
138 ///  KernelMemorySanitizer only supports X86_64 at the moment.
139 ///
140 //
141 // FIXME: This sanitizer does not yet handle scalable vectors
142 //
143 //===----------------------------------------------------------------------===//
144 
145 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
146 #include "llvm/ADT/APInt.h"
147 #include "llvm/ADT/ArrayRef.h"
148 #include "llvm/ADT/DepthFirstIterator.h"
149 #include "llvm/ADT/SmallSet.h"
150 #include "llvm/ADT/SmallString.h"
151 #include "llvm/ADT/SmallVector.h"
152 #include "llvm/ADT/StringExtras.h"
153 #include "llvm/ADT/StringRef.h"
154 #include "llvm/ADT/Triple.h"
155 #include "llvm/Analysis/TargetLibraryInfo.h"
156 #include "llvm/Analysis/ValueTracking.h"
157 #include "llvm/IR/Argument.h"
158 #include "llvm/IR/Attributes.h"
159 #include "llvm/IR/BasicBlock.h"
160 #include "llvm/IR/CallingConv.h"
161 #include "llvm/IR/Constant.h"
162 #include "llvm/IR/Constants.h"
163 #include "llvm/IR/DataLayout.h"
164 #include "llvm/IR/DerivedTypes.h"
165 #include "llvm/IR/Function.h"
166 #include "llvm/IR/GlobalValue.h"
167 #include "llvm/IR/GlobalVariable.h"
168 #include "llvm/IR/IRBuilder.h"
169 #include "llvm/IR/InlineAsm.h"
170 #include "llvm/IR/InstVisitor.h"
171 #include "llvm/IR/InstrTypes.h"
172 #include "llvm/IR/Instruction.h"
173 #include "llvm/IR/Instructions.h"
174 #include "llvm/IR/IntrinsicInst.h"
175 #include "llvm/IR/Intrinsics.h"
176 #include "llvm/IR/IntrinsicsX86.h"
177 #include "llvm/IR/LLVMContext.h"
178 #include "llvm/IR/MDBuilder.h"
179 #include "llvm/IR/Module.h"
180 #include "llvm/IR/Type.h"
181 #include "llvm/IR/Value.h"
182 #include "llvm/IR/ValueMap.h"
183 #include "llvm/InitializePasses.h"
184 #include "llvm/Pass.h"
185 #include "llvm/Support/Alignment.h"
186 #include "llvm/Support/AtomicOrdering.h"
187 #include "llvm/Support/Casting.h"
188 #include "llvm/Support/CommandLine.h"
189 #include "llvm/Support/Compiler.h"
190 #include "llvm/Support/Debug.h"
191 #include "llvm/Support/ErrorHandling.h"
192 #include "llvm/Support/MathExtras.h"
193 #include "llvm/Support/raw_ostream.h"
194 #include "llvm/Transforms/Instrumentation.h"
195 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
196 #include "llvm/Transforms/Utils/Local.h"
197 #include "llvm/Transforms/Utils/ModuleUtils.h"
198 #include <algorithm>
199 #include <cassert>
200 #include <cstddef>
201 #include <cstdint>
202 #include <memory>
203 #include <string>
204 #include <tuple>
205 
206 using namespace llvm;
207 
208 #define DEBUG_TYPE "msan"
209 
210 static const unsigned kOriginSize = 4;
211 static const Align kMinOriginAlignment = Align(4);
212 static const Align kShadowTLSAlignment = Align(8);
213 
214 // These constants must be kept in sync with the ones in msan.h.
215 static const unsigned kParamTLSSize = 800;
216 static const unsigned kRetvalTLSSize = 800;
217 
218 // Accesses sizes are powers of two: 1, 2, 4, 8.
219 static const size_t kNumberOfAccessSizes = 4;
220 
221 /// Track origins of uninitialized values.
222 ///
223 /// Adds a section to MemorySanitizer report that points to the allocation
224 /// (stack or heap) the uninitialized bits came from originally.
225 static cl::opt<int> ClTrackOrigins("msan-track-origins",
226        cl::desc("Track origins (allocation sites) of poisoned memory"),
227        cl::Hidden, cl::init(0));
228 
229 static cl::opt<bool> ClKeepGoing("msan-keep-going",
230        cl::desc("keep going after reporting a UMR"),
231        cl::Hidden, cl::init(false));
232 
233 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
234        cl::desc("poison uninitialized stack variables"),
235        cl::Hidden, cl::init(true));
236 
237 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
238        cl::desc("poison uninitialized stack variables with a call"),
239        cl::Hidden, cl::init(false));
240 
241 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
242        cl::desc("poison uninitialized stack variables with the given pattern"),
243        cl::Hidden, cl::init(0xff));
244 
245 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
246        cl::desc("poison undef temps"),
247        cl::Hidden, cl::init(true));
248 
249 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
250        cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
251        cl::Hidden, cl::init(true));
252 
253 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
254        cl::desc("exact handling of relational integer ICmp"),
255        cl::Hidden, cl::init(false));
256 
257 static cl::opt<bool> ClHandleLifetimeIntrinsics(
258     "msan-handle-lifetime-intrinsics",
259     cl::desc(
260         "when possible, poison scoped variables at the beginning of the scope "
261         "(slower, but more precise)"),
262     cl::Hidden, cl::init(true));
263 
264 // When compiling the Linux kernel, we sometimes see false positives related to
265 // MSan being unable to understand that inline assembly calls may initialize
266 // local variables.
267 // This flag makes the compiler conservatively unpoison every memory location
268 // passed into an assembly call. Note that this may cause false positives.
269 // Because it's impossible to figure out the array sizes, we can only unpoison
270 // the first sizeof(type) bytes for each type* pointer.
271 // The instrumentation is only enabled in KMSAN builds, and only if
272 // -msan-handle-asm-conservative is on. This is done because we may want to
273 // quickly disable assembly instrumentation when it breaks.
274 static cl::opt<bool> ClHandleAsmConservative(
275     "msan-handle-asm-conservative",
276     cl::desc("conservative handling of inline assembly"), cl::Hidden,
277     cl::init(true));
278 
279 // This flag controls whether we check the shadow of the address
280 // operand of load or store. Such bugs are very rare, since load from
281 // a garbage address typically results in SEGV, but still happen
282 // (e.g. only lower bits of address are garbage, or the access happens
283 // early at program startup where malloc-ed memory is more likely to
284 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
285 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
286        cl::desc("report accesses through a pointer which has poisoned shadow"),
287        cl::Hidden, cl::init(true));
288 
289 static cl::opt<bool> ClEagerChecks(
290     "msan-eager-checks",
291     cl::desc("check arguments and return values at function call boundaries"),
292     cl::Hidden, cl::init(false));
293 
294 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
295        cl::desc("print out instructions with default strict semantics"),
296        cl::Hidden, cl::init(false));
297 
298 static cl::opt<int> ClInstrumentationWithCallThreshold(
299     "msan-instrumentation-with-call-threshold",
300     cl::desc(
301         "If the function being instrumented requires more than "
302         "this number of checks and origin stores, use callbacks instead of "
303         "inline checks (-1 means never use callbacks)."),
304     cl::Hidden, cl::init(3500));
305 
306 static cl::opt<bool>
307     ClEnableKmsan("msan-kernel",
308                   cl::desc("Enable KernelMemorySanitizer instrumentation"),
309                   cl::Hidden, cl::init(false));
310 
311 static cl::opt<bool>
312     ClDisableChecks("msan-disable-checks",
313                     cl::desc("Apply no_sanitize to the whole file"), cl::Hidden,
314                     cl::init(false));
315 
316 // This is an experiment to enable handling of cases where shadow is a non-zero
317 // compile-time constant. For some unexplainable reason they were silently
318 // ignored in the instrumentation.
319 static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
320        cl::desc("Insert checks for constant shadow values"),
321        cl::Hidden, cl::init(false));
322 
323 // This is off by default because of a bug in gold:
324 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
325 static cl::opt<bool> ClWithComdat("msan-with-comdat",
326        cl::desc("Place MSan constructors in comdat sections"),
327        cl::Hidden, cl::init(false));
328 
329 // These options allow to specify custom memory map parameters
330 // See MemoryMapParams for details.
331 static cl::opt<uint64_t> ClAndMask("msan-and-mask",
332                                    cl::desc("Define custom MSan AndMask"),
333                                    cl::Hidden, cl::init(0));
334 
335 static cl::opt<uint64_t> ClXorMask("msan-xor-mask",
336                                    cl::desc("Define custom MSan XorMask"),
337                                    cl::Hidden, cl::init(0));
338 
339 static cl::opt<uint64_t> ClShadowBase("msan-shadow-base",
340                                       cl::desc("Define custom MSan ShadowBase"),
341                                       cl::Hidden, cl::init(0));
342 
343 static cl::opt<uint64_t> ClOriginBase("msan-origin-base",
344                                       cl::desc("Define custom MSan OriginBase"),
345                                       cl::Hidden, cl::init(0));
346 
347 const char kMsanModuleCtorName[] = "msan.module_ctor";
348 const char kMsanInitName[] = "__msan_init";
349 
350 namespace {
351 
352 // Memory map parameters used in application-to-shadow address calculation.
353 // Offset = (Addr & ~AndMask) ^ XorMask
354 // Shadow = ShadowBase + Offset
355 // Origin = OriginBase + Offset
356 struct MemoryMapParams {
357   uint64_t AndMask;
358   uint64_t XorMask;
359   uint64_t ShadowBase;
360   uint64_t OriginBase;
361 };
362 
363 struct PlatformMemoryMapParams {
364   const MemoryMapParams *bits32;
365   const MemoryMapParams *bits64;
366 };
367 
368 } // end anonymous namespace
369 
370 // i386 Linux
371 static const MemoryMapParams Linux_I386_MemoryMapParams = {
372   0x000080000000,  // AndMask
373   0,               // XorMask (not used)
374   0,               // ShadowBase (not used)
375   0x000040000000,  // OriginBase
376 };
377 
378 // x86_64 Linux
379 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
380 #ifdef MSAN_LINUX_X86_64_OLD_MAPPING
381   0x400000000000,  // AndMask
382   0,               // XorMask (not used)
383   0,               // ShadowBase (not used)
384   0x200000000000,  // OriginBase
385 #else
386   0,               // AndMask (not used)
387   0x500000000000,  // XorMask
388   0,               // ShadowBase (not used)
389   0x100000000000,  // OriginBase
390 #endif
391 };
392 
393 // mips64 Linux
394 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
395   0,               // AndMask (not used)
396   0x008000000000,  // XorMask
397   0,               // ShadowBase (not used)
398   0x002000000000,  // OriginBase
399 };
400 
401 // ppc64 Linux
402 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
403   0xE00000000000,  // AndMask
404   0x100000000000,  // XorMask
405   0x080000000000,  // ShadowBase
406   0x1C0000000000,  // OriginBase
407 };
408 
409 // s390x Linux
410 static const MemoryMapParams Linux_S390X_MemoryMapParams = {
411     0xC00000000000, // AndMask
412     0,              // XorMask (not used)
413     0x080000000000, // ShadowBase
414     0x1C0000000000, // OriginBase
415 };
416 
417 // aarch64 Linux
418 static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
419   0,               // AndMask (not used)
420   0x06000000000,   // XorMask
421   0,               // ShadowBase (not used)
422   0x01000000000,   // OriginBase
423 };
424 
425 // i386 FreeBSD
426 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
427   0x000180000000,  // AndMask
428   0x000040000000,  // XorMask
429   0x000020000000,  // ShadowBase
430   0x000700000000,  // OriginBase
431 };
432 
433 // x86_64 FreeBSD
434 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
435   0xc00000000000,  // AndMask
436   0x200000000000,  // XorMask
437   0x100000000000,  // ShadowBase
438   0x380000000000,  // OriginBase
439 };
440 
441 // x86_64 NetBSD
442 static const MemoryMapParams NetBSD_X86_64_MemoryMapParams = {
443   0,               // AndMask
444   0x500000000000,  // XorMask
445   0,               // ShadowBase
446   0x100000000000,  // OriginBase
447 };
448 
449 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
450   &Linux_I386_MemoryMapParams,
451   &Linux_X86_64_MemoryMapParams,
452 };
453 
454 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
455   nullptr,
456   &Linux_MIPS64_MemoryMapParams,
457 };
458 
459 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
460   nullptr,
461   &Linux_PowerPC64_MemoryMapParams,
462 };
463 
464 static const PlatformMemoryMapParams Linux_S390_MemoryMapParams = {
465     nullptr,
466     &Linux_S390X_MemoryMapParams,
467 };
468 
469 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
470   nullptr,
471   &Linux_AArch64_MemoryMapParams,
472 };
473 
474 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
475   &FreeBSD_I386_MemoryMapParams,
476   &FreeBSD_X86_64_MemoryMapParams,
477 };
478 
479 static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = {
480   nullptr,
481   &NetBSD_X86_64_MemoryMapParams,
482 };
483 
484 namespace {
485 
486 /// Instrument functions of a module to detect uninitialized reads.
487 ///
488 /// Instantiating MemorySanitizer inserts the msan runtime library API function
489 /// declarations into the module if they don't exist already. Instantiating
490 /// ensures the __msan_init function is in the list of global constructors for
491 /// the module.
492 class MemorySanitizer {
493 public:
494   MemorySanitizer(Module &M, MemorySanitizerOptions Options)
495       : CompileKernel(Options.Kernel), TrackOrigins(Options.TrackOrigins),
496         Recover(Options.Recover), EagerChecks(Options.EagerChecks) {
497     initializeModule(M);
498   }
499 
500   // MSan cannot be moved or copied because of MapParams.
501   MemorySanitizer(MemorySanitizer &&) = delete;
502   MemorySanitizer &operator=(MemorySanitizer &&) = delete;
503   MemorySanitizer(const MemorySanitizer &) = delete;
504   MemorySanitizer &operator=(const MemorySanitizer &) = delete;
505 
506   bool sanitizeFunction(Function &F, TargetLibraryInfo &TLI);
507 
508 private:
509   friend struct MemorySanitizerVisitor;
510   friend struct VarArgAMD64Helper;
511   friend struct VarArgMIPS64Helper;
512   friend struct VarArgAArch64Helper;
513   friend struct VarArgPowerPC64Helper;
514   friend struct VarArgSystemZHelper;
515 
516   void initializeModule(Module &M);
517   void initializeCallbacks(Module &M);
518   void createKernelApi(Module &M);
519   void createUserspaceApi(Module &M);
520 
521   /// True if we're compiling the Linux kernel.
522   bool CompileKernel;
523   /// Track origins (allocation points) of uninitialized values.
524   int TrackOrigins;
525   bool Recover;
526   bool EagerChecks;
527 
528   LLVMContext *C;
529   Type *IntptrTy;
530   Type *OriginTy;
531 
532   // XxxTLS variables represent the per-thread state in MSan and per-task state
533   // in KMSAN.
534   // For the userspace these point to thread-local globals. In the kernel land
535   // they point to the members of a per-task struct obtained via a call to
536   // __msan_get_context_state().
537 
538   /// Thread-local shadow storage for function parameters.
539   Value *ParamTLS;
540 
541   /// Thread-local origin storage for function parameters.
542   Value *ParamOriginTLS;
543 
544   /// Thread-local shadow storage for function return value.
545   Value *RetvalTLS;
546 
547   /// Thread-local origin storage for function return value.
548   Value *RetvalOriginTLS;
549 
550   /// Thread-local shadow storage for in-register va_arg function
551   /// parameters (x86_64-specific).
552   Value *VAArgTLS;
553 
554   /// Thread-local shadow storage for in-register va_arg function
555   /// parameters (x86_64-specific).
556   Value *VAArgOriginTLS;
557 
558   /// Thread-local shadow storage for va_arg overflow area
559   /// (x86_64-specific).
560   Value *VAArgOverflowSizeTLS;
561 
562   /// Are the instrumentation callbacks set up?
563   bool CallbacksInitialized = false;
564 
565   /// The run-time callback to print a warning.
566   FunctionCallee WarningFn;
567 
568   // These arrays are indexed by log2(AccessSize).
569   FunctionCallee MaybeWarningFn[kNumberOfAccessSizes];
570   FunctionCallee MaybeStoreOriginFn[kNumberOfAccessSizes];
571 
572   /// Run-time helper that generates a new origin value for a stack
573   /// allocation.
574   FunctionCallee MsanSetAllocaOrigin4Fn;
575 
576   /// Run-time helper that poisons stack on function entry.
577   FunctionCallee MsanPoisonStackFn;
578 
579   /// Run-time helper that records a store (or any event) of an
580   /// uninitialized value and returns an updated origin id encoding this info.
581   FunctionCallee MsanChainOriginFn;
582 
583   /// Run-time helper that paints an origin over a region.
584   FunctionCallee MsanSetOriginFn;
585 
586   /// MSan runtime replacements for memmove, memcpy and memset.
587   FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
588 
589   /// KMSAN callback for task-local function argument shadow.
590   StructType *MsanContextStateTy;
591   FunctionCallee MsanGetContextStateFn;
592 
593   /// Functions for poisoning/unpoisoning local variables
594   FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
595 
596   /// Each of the MsanMetadataPtrXxx functions returns a pair of shadow/origin
597   /// pointers.
598   FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
599   FunctionCallee MsanMetadataPtrForLoad_1_8[4];
600   FunctionCallee MsanMetadataPtrForStore_1_8[4];
601   FunctionCallee MsanInstrumentAsmStoreFn;
602 
603   /// Helper to choose between different MsanMetadataPtrXxx().
604   FunctionCallee getKmsanShadowOriginAccessFn(bool isStore, int size);
605 
606   /// Memory map parameters used in application-to-shadow calculation.
607   const MemoryMapParams *MapParams;
608 
609   /// Custom memory map parameters used when -msan-shadow-base or
610   // -msan-origin-base is provided.
611   MemoryMapParams CustomMapParams;
612 
613   MDNode *ColdCallWeights;
614 
615   /// Branch weights for origin store.
616   MDNode *OriginStoreWeights;
617 };
618 
619 void insertModuleCtor(Module &M) {
620   getOrCreateSanitizerCtorAndInitFunctions(
621       M, kMsanModuleCtorName, kMsanInitName,
622       /*InitArgTypes=*/{},
623       /*InitArgs=*/{},
624       // This callback is invoked when the functions are created the first
625       // time. Hook them into the global ctors list in that case:
626       [&](Function *Ctor, FunctionCallee) {
627         if (!ClWithComdat) {
628           appendToGlobalCtors(M, Ctor, 0);
629           return;
630         }
631         Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
632         Ctor->setComdat(MsanCtorComdat);
633         appendToGlobalCtors(M, Ctor, 0, Ctor);
634       });
635 }
636 
637 /// A legacy function pass for msan instrumentation.
638 ///
639 /// Instruments functions to detect uninitialized reads.
640 struct MemorySanitizerLegacyPass : public FunctionPass {
641   // Pass identification, replacement for typeid.
642   static char ID;
643 
644   MemorySanitizerLegacyPass(MemorySanitizerOptions Options = {})
645       : FunctionPass(ID), Options(Options) {
646     initializeMemorySanitizerLegacyPassPass(*PassRegistry::getPassRegistry());
647   }
648   StringRef getPassName() const override { return "MemorySanitizerLegacyPass"; }
649 
650   void getAnalysisUsage(AnalysisUsage &AU) const override {
651     AU.addRequired<TargetLibraryInfoWrapperPass>();
652   }
653 
654   bool runOnFunction(Function &F) override {
655     return MSan->sanitizeFunction(
656         F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F));
657   }
658   bool doInitialization(Module &M) override;
659 
660   Optional<MemorySanitizer> MSan;
661   MemorySanitizerOptions Options;
662 };
663 
664 template <class T> T getOptOrDefault(const cl::opt<T> &Opt, T Default) {
665   return (Opt.getNumOccurrences() > 0) ? Opt : Default;
666 }
667 
668 } // end anonymous namespace
669 
670 MemorySanitizerOptions::MemorySanitizerOptions(int TO, bool R, bool K,
671                                                bool EagerChecks)
672     : Kernel(getOptOrDefault(ClEnableKmsan, K)),
673       TrackOrigins(getOptOrDefault(ClTrackOrigins, Kernel ? 2 : TO)),
674       Recover(getOptOrDefault(ClKeepGoing, Kernel || R)),
675       EagerChecks(getOptOrDefault(ClEagerChecks, EagerChecks)) {}
676 
677 PreservedAnalyses MemorySanitizerPass::run(Function &F,
678                                            FunctionAnalysisManager &FAM) {
679   MemorySanitizer Msan(*F.getParent(), Options);
680   if (Msan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
681     return PreservedAnalyses::none();
682   return PreservedAnalyses::all();
683 }
684 
685 PreservedAnalyses
686 ModuleMemorySanitizerPass::run(Module &M, ModuleAnalysisManager &AM) {
687   if (Options.Kernel)
688     return PreservedAnalyses::all();
689   insertModuleCtor(M);
690   return PreservedAnalyses::none();
691 }
692 
693 void MemorySanitizerPass::printPipeline(
694     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
695   static_cast<PassInfoMixin<MemorySanitizerPass> *>(this)->printPipeline(
696       OS, MapClassName2PassName);
697   OS << "<";
698   if (Options.Recover)
699     OS << "recover;";
700   if (Options.Kernel)
701     OS << "kernel;";
702   if (Options.EagerChecks)
703     OS << "eager-checks;";
704   OS << "track-origins=" << Options.TrackOrigins;
705   OS << ">";
706 }
707 
708 char MemorySanitizerLegacyPass::ID = 0;
709 
710 INITIALIZE_PASS_BEGIN(MemorySanitizerLegacyPass, "msan",
711                       "MemorySanitizer: detects uninitialized reads.", false,
712                       false)
713 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
714 INITIALIZE_PASS_END(MemorySanitizerLegacyPass, "msan",
715                     "MemorySanitizer: detects uninitialized reads.", false,
716                     false)
717 
718 FunctionPass *
719 llvm::createMemorySanitizerLegacyPassPass(MemorySanitizerOptions Options) {
720   return new MemorySanitizerLegacyPass(Options);
721 }
722 
723 /// Create a non-const global initialized with the given string.
724 ///
725 /// Creates a writable global for Str so that we can pass it to the
726 /// run-time lib. Runtime uses first 4 bytes of the string to store the
727 /// frame ID, so the string needs to be mutable.
728 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
729                                                             StringRef Str) {
730   Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
731   return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
732                             GlobalValue::PrivateLinkage, StrConst, "");
733 }
734 
735 /// Create KMSAN API callbacks.
736 void MemorySanitizer::createKernelApi(Module &M) {
737   IRBuilder<> IRB(*C);
738 
739   // These will be initialized in insertKmsanPrologue().
740   RetvalTLS = nullptr;
741   RetvalOriginTLS = nullptr;
742   ParamTLS = nullptr;
743   ParamOriginTLS = nullptr;
744   VAArgTLS = nullptr;
745   VAArgOriginTLS = nullptr;
746   VAArgOverflowSizeTLS = nullptr;
747 
748   WarningFn = M.getOrInsertFunction("__msan_warning", IRB.getVoidTy(),
749                                     IRB.getInt32Ty());
750   // Requests the per-task context state (kmsan_context_state*) from the
751   // runtime library.
752   MsanContextStateTy = StructType::get(
753       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
754       ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8),
755       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
756       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), /* va_arg_origin */
757       IRB.getInt64Ty(), ArrayType::get(OriginTy, kParamTLSSize / 4), OriginTy,
758       OriginTy);
759   MsanGetContextStateFn = M.getOrInsertFunction(
760       "__msan_get_context_state", PointerType::get(MsanContextStateTy, 0));
761 
762   Type *RetTy = StructType::get(PointerType::get(IRB.getInt8Ty(), 0),
763                                 PointerType::get(IRB.getInt32Ty(), 0));
764 
765   for (int ind = 0, size = 1; ind < 4; ind++, size <<= 1) {
766     std::string name_load =
767         "__msan_metadata_ptr_for_load_" + std::to_string(size);
768     std::string name_store =
769         "__msan_metadata_ptr_for_store_" + std::to_string(size);
770     MsanMetadataPtrForLoad_1_8[ind] = M.getOrInsertFunction(
771         name_load, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
772     MsanMetadataPtrForStore_1_8[ind] = M.getOrInsertFunction(
773         name_store, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
774   }
775 
776   MsanMetadataPtrForLoadN = M.getOrInsertFunction(
777       "__msan_metadata_ptr_for_load_n", RetTy,
778       PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
779   MsanMetadataPtrForStoreN = M.getOrInsertFunction(
780       "__msan_metadata_ptr_for_store_n", RetTy,
781       PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
782 
783   // Functions for poisoning and unpoisoning memory.
784   MsanPoisonAllocaFn =
785       M.getOrInsertFunction("__msan_poison_alloca", IRB.getVoidTy(),
786                             IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy());
787   MsanUnpoisonAllocaFn = M.getOrInsertFunction(
788       "__msan_unpoison_alloca", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
789 }
790 
791 static Constant *getOrInsertGlobal(Module &M, StringRef Name, Type *Ty) {
792   return M.getOrInsertGlobal(Name, Ty, [&] {
793     return new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
794                               nullptr, Name, nullptr,
795                               GlobalVariable::InitialExecTLSModel);
796   });
797 }
798 
799 /// Insert declarations for userspace-specific functions and globals.
800 void MemorySanitizer::createUserspaceApi(Module &M) {
801   IRBuilder<> IRB(*C);
802 
803   // Create the callback.
804   // FIXME: this function should have "Cold" calling conv,
805   // which is not yet implemented.
806   StringRef WarningFnName = Recover ? "__msan_warning_with_origin"
807                                     : "__msan_warning_with_origin_noreturn";
808   WarningFn =
809       M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), IRB.getInt32Ty());
810 
811   // Create the global TLS variables.
812   RetvalTLS =
813       getOrInsertGlobal(M, "__msan_retval_tls",
814                         ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8));
815 
816   RetvalOriginTLS = getOrInsertGlobal(M, "__msan_retval_origin_tls", OriginTy);
817 
818   ParamTLS =
819       getOrInsertGlobal(M, "__msan_param_tls",
820                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
821 
822   ParamOriginTLS =
823       getOrInsertGlobal(M, "__msan_param_origin_tls",
824                         ArrayType::get(OriginTy, kParamTLSSize / 4));
825 
826   VAArgTLS =
827       getOrInsertGlobal(M, "__msan_va_arg_tls",
828                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
829 
830   VAArgOriginTLS =
831       getOrInsertGlobal(M, "__msan_va_arg_origin_tls",
832                         ArrayType::get(OriginTy, kParamTLSSize / 4));
833 
834   VAArgOverflowSizeTLS =
835       getOrInsertGlobal(M, "__msan_va_arg_overflow_size_tls", IRB.getInt64Ty());
836 
837   for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
838        AccessSizeIndex++) {
839     unsigned AccessSize = 1 << AccessSizeIndex;
840     std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
841     SmallVector<std::pair<unsigned, Attribute>, 2> MaybeWarningFnAttrs;
842     MaybeWarningFnAttrs.push_back(std::make_pair(
843         AttributeList::FirstArgIndex, Attribute::get(*C, Attribute::ZExt)));
844     MaybeWarningFnAttrs.push_back(std::make_pair(
845         AttributeList::FirstArgIndex + 1, Attribute::get(*C, Attribute::ZExt)));
846     MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
847         FunctionName, AttributeList::get(*C, MaybeWarningFnAttrs),
848         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
849 
850     FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
851     SmallVector<std::pair<unsigned, Attribute>, 2> MaybeStoreOriginFnAttrs;
852     MaybeStoreOriginFnAttrs.push_back(std::make_pair(
853         AttributeList::FirstArgIndex, Attribute::get(*C, Attribute::ZExt)));
854     MaybeStoreOriginFnAttrs.push_back(std::make_pair(
855         AttributeList::FirstArgIndex + 2, Attribute::get(*C, Attribute::ZExt)));
856     MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
857         FunctionName, AttributeList::get(*C, MaybeStoreOriginFnAttrs),
858         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt8PtrTy(),
859         IRB.getInt32Ty());
860   }
861 
862   MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
863     "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
864     IRB.getInt8PtrTy(), IntptrTy);
865   MsanPoisonStackFn =
866       M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
867                             IRB.getInt8PtrTy(), IntptrTy);
868 }
869 
870 /// Insert extern declaration of runtime-provided functions and globals.
871 void MemorySanitizer::initializeCallbacks(Module &M) {
872   // Only do this once.
873   if (CallbacksInitialized)
874     return;
875 
876   IRBuilder<> IRB(*C);
877   // Initialize callbacks that are common for kernel and userspace
878   // instrumentation.
879   MsanChainOriginFn = M.getOrInsertFunction(
880     "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
881   MsanSetOriginFn =
882       M.getOrInsertFunction("__msan_set_origin", IRB.getVoidTy(),
883                             IRB.getInt8PtrTy(), IntptrTy, IRB.getInt32Ty());
884   MemmoveFn = M.getOrInsertFunction(
885     "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
886     IRB.getInt8PtrTy(), IntptrTy);
887   MemcpyFn = M.getOrInsertFunction(
888     "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
889     IntptrTy);
890   MemsetFn = M.getOrInsertFunction(
891     "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
892     IntptrTy);
893 
894   MsanInstrumentAsmStoreFn =
895       M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(),
896                             PointerType::get(IRB.getInt8Ty(), 0), IntptrTy);
897 
898   if (CompileKernel) {
899     createKernelApi(M);
900   } else {
901     createUserspaceApi(M);
902   }
903   CallbacksInitialized = true;
904 }
905 
906 FunctionCallee MemorySanitizer::getKmsanShadowOriginAccessFn(bool isStore,
907                                                              int size) {
908   FunctionCallee *Fns =
909       isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
910   switch (size) {
911   case 1:
912     return Fns[0];
913   case 2:
914     return Fns[1];
915   case 4:
916     return Fns[2];
917   case 8:
918     return Fns[3];
919   default:
920     return nullptr;
921   }
922 }
923 
924 /// Module-level initialization.
925 ///
926 /// inserts a call to __msan_init to the module's constructor list.
927 void MemorySanitizer::initializeModule(Module &M) {
928   auto &DL = M.getDataLayout();
929 
930   bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0;
931   bool OriginPassed = ClOriginBase.getNumOccurrences() > 0;
932   // Check the overrides first
933   if (ShadowPassed || OriginPassed) {
934     CustomMapParams.AndMask = ClAndMask;
935     CustomMapParams.XorMask = ClXorMask;
936     CustomMapParams.ShadowBase = ClShadowBase;
937     CustomMapParams.OriginBase = ClOriginBase;
938     MapParams = &CustomMapParams;
939   } else {
940     Triple TargetTriple(M.getTargetTriple());
941     switch (TargetTriple.getOS()) {
942       case Triple::FreeBSD:
943         switch (TargetTriple.getArch()) {
944           case Triple::x86_64:
945             MapParams = FreeBSD_X86_MemoryMapParams.bits64;
946             break;
947           case Triple::x86:
948             MapParams = FreeBSD_X86_MemoryMapParams.bits32;
949             break;
950           default:
951             report_fatal_error("unsupported architecture");
952         }
953         break;
954       case Triple::NetBSD:
955         switch (TargetTriple.getArch()) {
956           case Triple::x86_64:
957             MapParams = NetBSD_X86_MemoryMapParams.bits64;
958             break;
959           default:
960             report_fatal_error("unsupported architecture");
961         }
962         break;
963       case Triple::Linux:
964         switch (TargetTriple.getArch()) {
965           case Triple::x86_64:
966             MapParams = Linux_X86_MemoryMapParams.bits64;
967             break;
968           case Triple::x86:
969             MapParams = Linux_X86_MemoryMapParams.bits32;
970             break;
971           case Triple::mips64:
972           case Triple::mips64el:
973             MapParams = Linux_MIPS_MemoryMapParams.bits64;
974             break;
975           case Triple::ppc64:
976           case Triple::ppc64le:
977             MapParams = Linux_PowerPC_MemoryMapParams.bits64;
978             break;
979           case Triple::systemz:
980             MapParams = Linux_S390_MemoryMapParams.bits64;
981             break;
982           case Triple::aarch64:
983           case Triple::aarch64_be:
984             MapParams = Linux_ARM_MemoryMapParams.bits64;
985             break;
986           default:
987             report_fatal_error("unsupported architecture");
988         }
989         break;
990       default:
991         report_fatal_error("unsupported operating system");
992     }
993   }
994 
995   C = &(M.getContext());
996   IRBuilder<> IRB(*C);
997   IntptrTy = IRB.getIntPtrTy(DL);
998   OriginTy = IRB.getInt32Ty();
999 
1000   ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
1001   OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
1002 
1003   if (!CompileKernel) {
1004     if (TrackOrigins)
1005       M.getOrInsertGlobal("__msan_track_origins", IRB.getInt32Ty(), [&] {
1006         return new GlobalVariable(
1007             M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1008             IRB.getInt32(TrackOrigins), "__msan_track_origins");
1009       });
1010 
1011     if (Recover)
1012       M.getOrInsertGlobal("__msan_keep_going", IRB.getInt32Ty(), [&] {
1013         return new GlobalVariable(M, IRB.getInt32Ty(), true,
1014                                   GlobalValue::WeakODRLinkage,
1015                                   IRB.getInt32(Recover), "__msan_keep_going");
1016       });
1017 }
1018 }
1019 
1020 bool MemorySanitizerLegacyPass::doInitialization(Module &M) {
1021   if (!Options.Kernel)
1022     insertModuleCtor(M);
1023   MSan.emplace(M, Options);
1024   return true;
1025 }
1026 
1027 namespace {
1028 
1029 /// A helper class that handles instrumentation of VarArg
1030 /// functions on a particular platform.
1031 ///
1032 /// Implementations are expected to insert the instrumentation
1033 /// necessary to propagate argument shadow through VarArg function
1034 /// calls. Visit* methods are called during an InstVisitor pass over
1035 /// the function, and should avoid creating new basic blocks. A new
1036 /// instance of this class is created for each instrumented function.
1037 struct VarArgHelper {
1038   virtual ~VarArgHelper() = default;
1039 
1040   /// Visit a CallBase.
1041   virtual void visitCallBase(CallBase &CB, IRBuilder<> &IRB) = 0;
1042 
1043   /// Visit a va_start call.
1044   virtual void visitVAStartInst(VAStartInst &I) = 0;
1045 
1046   /// Visit a va_copy call.
1047   virtual void visitVACopyInst(VACopyInst &I) = 0;
1048 
1049   /// Finalize function instrumentation.
1050   ///
1051   /// This method is called after visiting all interesting (see above)
1052   /// instructions in a function.
1053   virtual void finalizeInstrumentation() = 0;
1054 };
1055 
1056 struct MemorySanitizerVisitor;
1057 
1058 } // end anonymous namespace
1059 
1060 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
1061                                         MemorySanitizerVisitor &Visitor);
1062 
1063 static unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
1064   if (TypeSize <= 8) return 0;
1065   return Log2_32_Ceil((TypeSize + 7) / 8);
1066 }
1067 
1068 namespace {
1069 
1070 /// This class does all the work for a given function. Store and Load
1071 /// instructions store and load corresponding shadow and origin
1072 /// values. Most instructions propagate shadow from arguments to their
1073 /// return values. Certain instructions (most importantly, BranchInst)
1074 /// test their argument shadow and print reports (with a runtime call) if it's
1075 /// non-zero.
1076 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
1077   Function &F;
1078   MemorySanitizer &MS;
1079   SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
1080   ValueMap<Value*, Value*> ShadowMap, OriginMap;
1081   std::unique_ptr<VarArgHelper> VAHelper;
1082   const TargetLibraryInfo *TLI;
1083   Instruction *FnPrologueEnd;
1084 
1085   // The following flags disable parts of MSan instrumentation based on
1086   // exclusion list contents and command-line options.
1087   bool InsertChecks;
1088   bool PropagateShadow;
1089   bool PoisonStack;
1090   bool PoisonUndef;
1091 
1092   struct ShadowOriginAndInsertPoint {
1093     Value *Shadow;
1094     Value *Origin;
1095     Instruction *OrigIns;
1096 
1097     ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
1098       : Shadow(S), Origin(O), OrigIns(I) {}
1099   };
1100   SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
1101   bool InstrumentLifetimeStart = ClHandleLifetimeIntrinsics;
1102   SmallSet<AllocaInst *, 16> AllocaSet;
1103   SmallVector<std::pair<IntrinsicInst *, AllocaInst *>, 16> LifetimeStartList;
1104   SmallVector<StoreInst *, 16> StoreList;
1105 
1106   MemorySanitizerVisitor(Function &F, MemorySanitizer &MS,
1107                          const TargetLibraryInfo &TLI)
1108       : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)), TLI(&TLI) {
1109     bool SanitizeFunction =
1110         F.hasFnAttribute(Attribute::SanitizeMemory) && !ClDisableChecks;
1111     InsertChecks = SanitizeFunction;
1112     PropagateShadow = SanitizeFunction;
1113     PoisonStack = SanitizeFunction && ClPoisonStack;
1114     PoisonUndef = SanitizeFunction && ClPoisonUndef;
1115 
1116     // In the presence of unreachable blocks, we may see Phi nodes with
1117     // incoming nodes from such blocks. Since InstVisitor skips unreachable
1118     // blocks, such nodes will not have any shadow value associated with them.
1119     // It's easier to remove unreachable blocks than deal with missing shadow.
1120     removeUnreachableBlocks(F);
1121 
1122     MS.initializeCallbacks(*F.getParent());
1123     FnPrologueEnd = IRBuilder<>(F.getEntryBlock().getFirstNonPHI())
1124                         .CreateIntrinsic(Intrinsic::donothing, {}, {});
1125 
1126     if (MS.CompileKernel) {
1127       IRBuilder<> IRB(FnPrologueEnd);
1128       insertKmsanPrologue(IRB);
1129     }
1130 
1131     LLVM_DEBUG(if (!InsertChecks) dbgs()
1132                << "MemorySanitizer is not inserting checks into '"
1133                << F.getName() << "'\n");
1134   }
1135 
1136   bool isInPrologue(Instruction &I) {
1137     return I.getParent() == FnPrologueEnd->getParent() &&
1138            (&I == FnPrologueEnd || I.comesBefore(FnPrologueEnd));
1139   }
1140 
1141   Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
1142     if (MS.TrackOrigins <= 1) return V;
1143     return IRB.CreateCall(MS.MsanChainOriginFn, V);
1144   }
1145 
1146   Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
1147     const DataLayout &DL = F.getParent()->getDataLayout();
1148     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1149     if (IntptrSize == kOriginSize) return Origin;
1150     assert(IntptrSize == kOriginSize * 2);
1151     Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
1152     return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
1153   }
1154 
1155   /// Fill memory range with the given origin value.
1156   void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
1157                    unsigned Size, Align Alignment) {
1158     const DataLayout &DL = F.getParent()->getDataLayout();
1159     const Align IntptrAlignment = DL.getABITypeAlign(MS.IntptrTy);
1160     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1161     assert(IntptrAlignment >= kMinOriginAlignment);
1162     assert(IntptrSize >= kOriginSize);
1163 
1164     unsigned Ofs = 0;
1165     Align CurrentAlignment = Alignment;
1166     if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
1167       Value *IntptrOrigin = originToIntptr(IRB, Origin);
1168       Value *IntptrOriginPtr =
1169           IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
1170       for (unsigned i = 0; i < Size / IntptrSize; ++i) {
1171         Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
1172                        : IntptrOriginPtr;
1173         IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
1174         Ofs += IntptrSize / kOriginSize;
1175         CurrentAlignment = IntptrAlignment;
1176       }
1177     }
1178 
1179     for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
1180       Value *GEP =
1181           i ? IRB.CreateConstGEP1_32(MS.OriginTy, OriginPtr, i) : OriginPtr;
1182       IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
1183       CurrentAlignment = kMinOriginAlignment;
1184     }
1185   }
1186 
1187   void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
1188                    Value *OriginPtr, Align Alignment, bool AsCall) {
1189     const DataLayout &DL = F.getParent()->getDataLayout();
1190     const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1191     unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
1192     Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1193     if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1194       if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
1195         paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1196                     OriginAlignment);
1197       return;
1198     }
1199 
1200     unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
1201     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1202     if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1203       FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1204       Value *ConvertedShadow2 =
1205           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1206       CallBase *CB = IRB.CreateCall(
1207           Fn, {ConvertedShadow2,
1208                IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), Origin});
1209       CB->addParamAttr(0, Attribute::ZExt);
1210       CB->addParamAttr(2, Attribute::ZExt);
1211     } else {
1212       Value *Cmp = convertToBool(ConvertedShadow, IRB, "_mscmp");
1213       Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1214           Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
1215       IRBuilder<> IRBNew(CheckTerm);
1216       paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1217                   OriginAlignment);
1218     }
1219   }
1220 
1221   void materializeStores(bool InstrumentWithCalls) {
1222     for (StoreInst *SI : StoreList) {
1223       IRBuilder<> IRB(SI);
1224       Value *Val = SI->getValueOperand();
1225       Value *Addr = SI->getPointerOperand();
1226       Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1227       Value *ShadowPtr, *OriginPtr;
1228       Type *ShadowTy = Shadow->getType();
1229       const Align Alignment = SI->getAlign();
1230       const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1231       std::tie(ShadowPtr, OriginPtr) =
1232           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true);
1233 
1234       StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment);
1235       LLVM_DEBUG(dbgs() << "  STORE: " << *NewSI << "\n");
1236       (void)NewSI;
1237 
1238       if (SI->isAtomic())
1239         SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
1240 
1241       if (MS.TrackOrigins && !SI->isAtomic())
1242         storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1243                     OriginAlignment, InstrumentWithCalls);
1244     }
1245   }
1246 
1247   /// Helper function to insert a warning at IRB's current insert point.
1248   void insertWarningFn(IRBuilder<> &IRB, Value *Origin) {
1249     if (!Origin)
1250       Origin = (Value *)IRB.getInt32(0);
1251     assert(Origin->getType()->isIntegerTy());
1252     IRB.CreateCall(MS.WarningFn, Origin)->setCannotMerge();
1253     // FIXME: Insert UnreachableInst if !MS.Recover?
1254     // This may invalidate some of the following checks and needs to be done
1255     // at the very end.
1256   }
1257 
1258   void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
1259                            bool AsCall) {
1260     IRBuilder<> IRB(OrigIns);
1261     LLVM_DEBUG(dbgs() << "  SHAD0 : " << *Shadow << "\n");
1262     Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1263     LLVM_DEBUG(dbgs() << "  SHAD1 : " << *ConvertedShadow << "\n");
1264 
1265     if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1266       if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
1267         insertWarningFn(IRB, Origin);
1268       }
1269       return;
1270     }
1271 
1272     const DataLayout &DL = OrigIns->getModule()->getDataLayout();
1273 
1274     unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
1275     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1276     if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1277       FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1278       Value *ConvertedShadow2 =
1279           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1280       CallBase *CB = IRB.CreateCall(
1281           Fn, {ConvertedShadow2,
1282                MS.TrackOrigins && Origin ? Origin : (Value *)IRB.getInt32(0)});
1283       CB->addParamAttr(0, Attribute::ZExt);
1284       CB->addParamAttr(1, Attribute::ZExt);
1285     } else {
1286       Value *Cmp = convertToBool(ConvertedShadow, IRB, "_mscmp");
1287       Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1288           Cmp, OrigIns,
1289           /* Unreachable */ !MS.Recover, MS.ColdCallWeights);
1290 
1291       IRB.SetInsertPoint(CheckTerm);
1292       insertWarningFn(IRB, Origin);
1293       LLVM_DEBUG(dbgs() << "  CHECK: " << *Cmp << "\n");
1294     }
1295   }
1296 
1297   void materializeChecks(bool InstrumentWithCalls) {
1298     for (const auto &ShadowData : InstrumentationList) {
1299       Instruction *OrigIns = ShadowData.OrigIns;
1300       Value *Shadow = ShadowData.Shadow;
1301       Value *Origin = ShadowData.Origin;
1302       materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
1303     }
1304     LLVM_DEBUG(dbgs() << "DONE:\n" << F);
1305   }
1306 
1307   // Returns the last instruction in the new prologue
1308   void insertKmsanPrologue(IRBuilder<> &IRB) {
1309     Value *ContextState = IRB.CreateCall(MS.MsanGetContextStateFn, {});
1310     Constant *Zero = IRB.getInt32(0);
1311     MS.ParamTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1312                                 {Zero, IRB.getInt32(0)}, "param_shadow");
1313     MS.RetvalTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1314                                  {Zero, IRB.getInt32(1)}, "retval_shadow");
1315     MS.VAArgTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1316                                 {Zero, IRB.getInt32(2)}, "va_arg_shadow");
1317     MS.VAArgOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1318                                       {Zero, IRB.getInt32(3)}, "va_arg_origin");
1319     MS.VAArgOverflowSizeTLS =
1320         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1321                       {Zero, IRB.getInt32(4)}, "va_arg_overflow_size");
1322     MS.ParamOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1323                                       {Zero, IRB.getInt32(5)}, "param_origin");
1324     MS.RetvalOriginTLS =
1325         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1326                       {Zero, IRB.getInt32(6)}, "retval_origin");
1327   }
1328 
1329   /// Add MemorySanitizer instrumentation to a function.
1330   bool runOnFunction() {
1331     // Iterate all BBs in depth-first order and create shadow instructions
1332     // for all instructions (where applicable).
1333     // For PHI nodes we create dummy shadow PHIs which will be finalized later.
1334     for (BasicBlock *BB : depth_first(FnPrologueEnd->getParent()))
1335       visit(*BB);
1336 
1337     // Finalize PHI nodes.
1338     for (PHINode *PN : ShadowPHINodes) {
1339       PHINode *PNS = cast<PHINode>(getShadow(PN));
1340       PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1341       size_t NumValues = PN->getNumIncomingValues();
1342       for (size_t v = 0; v < NumValues; v++) {
1343         PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1344         if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1345       }
1346     }
1347 
1348     VAHelper->finalizeInstrumentation();
1349 
1350     // Poison llvm.lifetime.start intrinsics, if we haven't fallen back to
1351     // instrumenting only allocas.
1352     if (InstrumentLifetimeStart) {
1353       for (auto Item : LifetimeStartList) {
1354         instrumentAlloca(*Item.second, Item.first);
1355         AllocaSet.erase(Item.second);
1356       }
1357     }
1358     // Poison the allocas for which we didn't instrument the corresponding
1359     // lifetime intrinsics.
1360     for (AllocaInst *AI : AllocaSet)
1361       instrumentAlloca(*AI);
1362 
1363     bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
1364                                InstrumentationList.size() + StoreList.size() >
1365                                    (unsigned)ClInstrumentationWithCallThreshold;
1366 
1367     // Insert shadow value checks.
1368     materializeChecks(InstrumentWithCalls);
1369 
1370     // Delayed instrumentation of StoreInst.
1371     // This may not add new address checks.
1372     materializeStores(InstrumentWithCalls);
1373 
1374     return true;
1375   }
1376 
1377   /// Compute the shadow type that corresponds to a given Value.
1378   Type *getShadowTy(Value *V) {
1379     return getShadowTy(V->getType());
1380   }
1381 
1382   /// Compute the shadow type that corresponds to a given Type.
1383   Type *getShadowTy(Type *OrigTy) {
1384     if (!OrigTy->isSized()) {
1385       return nullptr;
1386     }
1387     // For integer type, shadow is the same as the original type.
1388     // This may return weird-sized types like i1.
1389     if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
1390       return IT;
1391     const DataLayout &DL = F.getParent()->getDataLayout();
1392     if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1393       uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
1394       return FixedVectorType::get(IntegerType::get(*MS.C, EltSize),
1395                                   cast<FixedVectorType>(VT)->getNumElements());
1396     }
1397     if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1398       return ArrayType::get(getShadowTy(AT->getElementType()),
1399                             AT->getNumElements());
1400     }
1401     if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
1402       SmallVector<Type*, 4> Elements;
1403       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1404         Elements.push_back(getShadowTy(ST->getElementType(i)));
1405       StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
1406       LLVM_DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
1407       return Res;
1408     }
1409     uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
1410     return IntegerType::get(*MS.C, TypeSize);
1411   }
1412 
1413   /// Flatten a vector type.
1414   Type *getShadowTyNoVec(Type *ty) {
1415     if (VectorType *vt = dyn_cast<VectorType>(ty))
1416       return IntegerType::get(*MS.C,
1417                               vt->getPrimitiveSizeInBits().getFixedSize());
1418     return ty;
1419   }
1420 
1421   /// Extract combined shadow of struct elements as a bool
1422   Value *collapseStructShadow(StructType *Struct, Value *Shadow,
1423                               IRBuilder<> &IRB) {
1424     Value *FalseVal = IRB.getIntN(/* width */ 1, /* value */ 0);
1425     Value *Aggregator = FalseVal;
1426 
1427     for (unsigned Idx = 0; Idx < Struct->getNumElements(); Idx++) {
1428       // Combine by ORing together each element's bool shadow
1429       Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
1430       Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1431       Value *ShadowBool = convertToBool(ShadowInner, IRB);
1432 
1433       if (Aggregator != FalseVal)
1434         Aggregator = IRB.CreateOr(Aggregator, ShadowBool);
1435       else
1436         Aggregator = ShadowBool;
1437     }
1438 
1439     return Aggregator;
1440   }
1441 
1442   // Extract combined shadow of array elements
1443   Value *collapseArrayShadow(ArrayType *Array, Value *Shadow,
1444                              IRBuilder<> &IRB) {
1445     if (!Array->getNumElements())
1446       return IRB.getIntN(/* width */ 1, /* value */ 0);
1447 
1448     Value *FirstItem = IRB.CreateExtractValue(Shadow, 0);
1449     Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1450 
1451     for (unsigned Idx = 1; Idx < Array->getNumElements(); Idx++) {
1452       Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
1453       Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1454       Aggregator = IRB.CreateOr(Aggregator, ShadowInner);
1455     }
1456     return Aggregator;
1457   }
1458 
1459   /// Convert a shadow value to it's flattened variant. The resulting
1460   /// shadow may not necessarily have the same bit width as the input
1461   /// value, but it will always be comparable to zero.
1462   Value *convertShadowToScalar(Value *V, IRBuilder<> &IRB) {
1463     if (StructType *Struct = dyn_cast<StructType>(V->getType()))
1464       return collapseStructShadow(Struct, V, IRB);
1465     if (ArrayType *Array = dyn_cast<ArrayType>(V->getType()))
1466       return collapseArrayShadow(Array, V, IRB);
1467     Type *Ty = V->getType();
1468     Type *NoVecTy = getShadowTyNoVec(Ty);
1469     if (Ty == NoVecTy) return V;
1470     return IRB.CreateBitCast(V, NoVecTy);
1471   }
1472 
1473   // Convert a scalar value to an i1 by comparing with 0
1474   Value *convertToBool(Value *V, IRBuilder<> &IRB, const Twine &name = "") {
1475     Type *VTy = V->getType();
1476     assert(VTy->isIntegerTy());
1477     if (VTy->getIntegerBitWidth() == 1)
1478       // Just converting a bool to a bool, so do nothing.
1479       return V;
1480     return IRB.CreateICmpNE(V, ConstantInt::get(VTy, 0), name);
1481   }
1482 
1483   /// Compute the integer shadow offset that corresponds to a given
1484   /// application address.
1485   ///
1486   /// Offset = (Addr & ~AndMask) ^ XorMask
1487   Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
1488     Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
1489 
1490     uint64_t AndMask = MS.MapParams->AndMask;
1491     if (AndMask)
1492       OffsetLong =
1493           IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
1494 
1495     uint64_t XorMask = MS.MapParams->XorMask;
1496     if (XorMask)
1497       OffsetLong =
1498           IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
1499     return OffsetLong;
1500   }
1501 
1502   /// Compute the shadow and origin addresses corresponding to a given
1503   /// application address.
1504   ///
1505   /// Shadow = ShadowBase + Offset
1506   /// Origin = (OriginBase + Offset) & ~3ULL
1507   std::pair<Value *, Value *>
1508   getShadowOriginPtrUserspace(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy,
1509                               MaybeAlign Alignment) {
1510     Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1511     Value *ShadowLong = ShadowOffset;
1512     uint64_t ShadowBase = MS.MapParams->ShadowBase;
1513     if (ShadowBase != 0) {
1514       ShadowLong =
1515         IRB.CreateAdd(ShadowLong,
1516                       ConstantInt::get(MS.IntptrTy, ShadowBase));
1517     }
1518     Value *ShadowPtr =
1519         IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
1520     Value *OriginPtr = nullptr;
1521     if (MS.TrackOrigins) {
1522       Value *OriginLong = ShadowOffset;
1523       uint64_t OriginBase = MS.MapParams->OriginBase;
1524       if (OriginBase != 0)
1525         OriginLong = IRB.CreateAdd(OriginLong,
1526                                    ConstantInt::get(MS.IntptrTy, OriginBase));
1527       if (!Alignment || *Alignment < kMinOriginAlignment) {
1528         uint64_t Mask = kMinOriginAlignment.value() - 1;
1529         OriginLong =
1530             IRB.CreateAnd(OriginLong, ConstantInt::get(MS.IntptrTy, ~Mask));
1531       }
1532       OriginPtr =
1533           IRB.CreateIntToPtr(OriginLong, PointerType::get(MS.OriginTy, 0));
1534     }
1535     return std::make_pair(ShadowPtr, OriginPtr);
1536   }
1537 
1538   std::pair<Value *, Value *> getShadowOriginPtrKernel(Value *Addr,
1539                                                        IRBuilder<> &IRB,
1540                                                        Type *ShadowTy,
1541                                                        bool isStore) {
1542     Value *ShadowOriginPtrs;
1543     const DataLayout &DL = F.getParent()->getDataLayout();
1544     int Size = DL.getTypeStoreSize(ShadowTy);
1545 
1546     FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(isStore, Size);
1547     Value *AddrCast =
1548         IRB.CreatePointerCast(Addr, PointerType::get(IRB.getInt8Ty(), 0));
1549     if (Getter) {
1550       ShadowOriginPtrs = IRB.CreateCall(Getter, AddrCast);
1551     } else {
1552       Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
1553       ShadowOriginPtrs = IRB.CreateCall(isStore ? MS.MsanMetadataPtrForStoreN
1554                                                 : MS.MsanMetadataPtrForLoadN,
1555                                         {AddrCast, SizeVal});
1556     }
1557     Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0);
1558     ShadowPtr = IRB.CreatePointerCast(ShadowPtr, PointerType::get(ShadowTy, 0));
1559     Value *OriginPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 1);
1560 
1561     return std::make_pair(ShadowPtr, OriginPtr);
1562   }
1563 
1564   std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB,
1565                                                  Type *ShadowTy,
1566                                                  MaybeAlign Alignment,
1567                                                  bool isStore) {
1568     if (MS.CompileKernel)
1569       return getShadowOriginPtrKernel(Addr, IRB, ShadowTy, isStore);
1570     return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1571   }
1572 
1573   /// Compute the shadow address for a given function argument.
1574   ///
1575   /// Shadow = ParamTLS+ArgOffset.
1576   Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
1577                                  int ArgOffset) {
1578     Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
1579     if (ArgOffset)
1580       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1581     return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1582                               "_msarg");
1583   }
1584 
1585   /// Compute the origin address for a given function argument.
1586   Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
1587                                  int ArgOffset) {
1588     if (!MS.TrackOrigins)
1589       return nullptr;
1590     Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
1591     if (ArgOffset)
1592       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1593     return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
1594                               "_msarg_o");
1595   }
1596 
1597   /// Compute the shadow address for a retval.
1598   Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
1599     return IRB.CreatePointerCast(MS.RetvalTLS,
1600                                  PointerType::get(getShadowTy(A), 0),
1601                                  "_msret");
1602   }
1603 
1604   /// Compute the origin address for a retval.
1605   Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
1606     // We keep a single origin for the entire retval. Might be too optimistic.
1607     return MS.RetvalOriginTLS;
1608   }
1609 
1610   /// Set SV to be the shadow value for V.
1611   void setShadow(Value *V, Value *SV) {
1612     assert(!ShadowMap.count(V) && "Values may only have one shadow");
1613     ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1614   }
1615 
1616   /// Set Origin to be the origin value for V.
1617   void setOrigin(Value *V, Value *Origin) {
1618     if (!MS.TrackOrigins) return;
1619     assert(!OriginMap.count(V) && "Values may only have one origin");
1620     LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << "  ==> " << *Origin << "\n");
1621     OriginMap[V] = Origin;
1622   }
1623 
1624   Constant *getCleanShadow(Type *OrigTy) {
1625     Type *ShadowTy = getShadowTy(OrigTy);
1626     if (!ShadowTy)
1627       return nullptr;
1628     return Constant::getNullValue(ShadowTy);
1629   }
1630 
1631   /// Create a clean shadow value for a given value.
1632   ///
1633   /// Clean shadow (all zeroes) means all bits of the value are defined
1634   /// (initialized).
1635   Constant *getCleanShadow(Value *V) {
1636     return getCleanShadow(V->getType());
1637   }
1638 
1639   /// Create a dirty shadow of a given shadow type.
1640   Constant *getPoisonedShadow(Type *ShadowTy) {
1641     assert(ShadowTy);
1642     if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1643       return Constant::getAllOnesValue(ShadowTy);
1644     if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1645       SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1646                                       getPoisonedShadow(AT->getElementType()));
1647       return ConstantArray::get(AT, Vals);
1648     }
1649     if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1650       SmallVector<Constant *, 4> Vals;
1651       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1652         Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1653       return ConstantStruct::get(ST, Vals);
1654     }
1655     llvm_unreachable("Unexpected shadow type");
1656   }
1657 
1658   /// Create a dirty shadow for a given value.
1659   Constant *getPoisonedShadow(Value *V) {
1660     Type *ShadowTy = getShadowTy(V);
1661     if (!ShadowTy)
1662       return nullptr;
1663     return getPoisonedShadow(ShadowTy);
1664   }
1665 
1666   /// Create a clean (zero) origin.
1667   Value *getCleanOrigin() {
1668     return Constant::getNullValue(MS.OriginTy);
1669   }
1670 
1671   /// Get the shadow value for a given Value.
1672   ///
1673   /// This function either returns the value set earlier with setShadow,
1674   /// or extracts if from ParamTLS (for function arguments).
1675   Value *getShadow(Value *V) {
1676     if (Instruction *I = dyn_cast<Instruction>(V)) {
1677       if (!PropagateShadow || I->getMetadata("nosanitize"))
1678         return getCleanShadow(V);
1679       // For instructions the shadow is already stored in the map.
1680       Value *Shadow = ShadowMap[V];
1681       if (!Shadow) {
1682         LLVM_DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
1683         (void)I;
1684         assert(Shadow && "No shadow for a value");
1685       }
1686       return Shadow;
1687     }
1688     if (UndefValue *U = dyn_cast<UndefValue>(V)) {
1689       Value *AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
1690                                                         : getCleanShadow(V);
1691       LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
1692       (void)U;
1693       return AllOnes;
1694     }
1695     if (Argument *A = dyn_cast<Argument>(V)) {
1696       // For arguments we compute the shadow on demand and store it in the map.
1697       Value **ShadowPtr = &ShadowMap[V];
1698       if (*ShadowPtr)
1699         return *ShadowPtr;
1700       Function *F = A->getParent();
1701       IRBuilder<> EntryIRB(FnPrologueEnd);
1702       unsigned ArgOffset = 0;
1703       const DataLayout &DL = F->getParent()->getDataLayout();
1704       for (auto &FArg : F->args()) {
1705         if (!FArg.getType()->isSized()) {
1706           LLVM_DEBUG(dbgs() << "Arg is not sized\n");
1707           continue;
1708         }
1709 
1710         unsigned Size = FArg.hasByValAttr()
1711                             ? DL.getTypeAllocSize(FArg.getParamByValType())
1712                             : DL.getTypeAllocSize(FArg.getType());
1713 
1714         if (A == &FArg) {
1715           bool Overflow = ArgOffset + Size > kParamTLSSize;
1716           if (FArg.hasByValAttr()) {
1717             // ByVal pointer itself has clean shadow. We copy the actual
1718             // argument shadow to the underlying memory.
1719             // Figure out maximal valid memcpy alignment.
1720             const Align ArgAlign = DL.getValueOrABITypeAlignment(
1721                 MaybeAlign(FArg.getParamAlignment()), FArg.getParamByValType());
1722             Value *CpShadowPtr, *CpOriginPtr;
1723             std::tie(CpShadowPtr, CpOriginPtr) =
1724                 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
1725                                    /*isStore*/ true);
1726             if (!PropagateShadow || Overflow) {
1727               // ParamTLS overflow.
1728               EntryIRB.CreateMemSet(
1729                   CpShadowPtr, Constant::getNullValue(EntryIRB.getInt8Ty()),
1730                   Size, ArgAlign);
1731             } else {
1732               Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1733               const Align CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1734               Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
1735                                                  CopyAlign, Size);
1736               LLVM_DEBUG(dbgs() << "  ByValCpy: " << *Cpy << "\n");
1737               (void)Cpy;
1738 
1739               if (MS.TrackOrigins) {
1740                 Value *OriginPtr =
1741                     getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1742                 // FIXME: OriginSize should be:
1743                 // alignTo(V % kMinOriginAlignment + Size, kMinOriginAlignment)
1744                 unsigned OriginSize = alignTo(Size, kMinOriginAlignment);
1745                 EntryIRB.CreateMemCpy(
1746                     CpOriginPtr,
1747                     /* by getShadowOriginPtr */ kMinOriginAlignment, OriginPtr,
1748                     /* by origin_tls[ArgOffset] */ kMinOriginAlignment,
1749                     OriginSize);
1750               }
1751             }
1752           }
1753 
1754           if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
1755               (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
1756             *ShadowPtr = getCleanShadow(V);
1757             setOrigin(A, getCleanOrigin());
1758           } else {
1759             // Shadow over TLS
1760             Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1761             *ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
1762                                                     kShadowTLSAlignment);
1763             if (MS.TrackOrigins) {
1764               Value *OriginPtr =
1765                   getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1766               setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
1767             }
1768           }
1769           LLVM_DEBUG(dbgs()
1770                      << "  ARG:    " << FArg << " ==> " << **ShadowPtr << "\n");
1771           break;
1772         }
1773 
1774         ArgOffset += alignTo(Size, kShadowTLSAlignment);
1775       }
1776       assert(*ShadowPtr && "Could not find shadow for an argument");
1777       return *ShadowPtr;
1778     }
1779     // For everything else the shadow is zero.
1780     return getCleanShadow(V);
1781   }
1782 
1783   /// Get the shadow for i-th argument of the instruction I.
1784   Value *getShadow(Instruction *I, int i) {
1785     return getShadow(I->getOperand(i));
1786   }
1787 
1788   /// Get the origin for a value.
1789   Value *getOrigin(Value *V) {
1790     if (!MS.TrackOrigins) return nullptr;
1791     if (!PropagateShadow) return getCleanOrigin();
1792     if (isa<Constant>(V)) return getCleanOrigin();
1793     assert((isa<Instruction>(V) || isa<Argument>(V)) &&
1794            "Unexpected value type in getOrigin()");
1795     if (Instruction *I = dyn_cast<Instruction>(V)) {
1796       if (I->getMetadata("nosanitize"))
1797         return getCleanOrigin();
1798     }
1799     Value *Origin = OriginMap[V];
1800     assert(Origin && "Missing origin");
1801     return Origin;
1802   }
1803 
1804   /// Get the origin for i-th argument of the instruction I.
1805   Value *getOrigin(Instruction *I, int i) {
1806     return getOrigin(I->getOperand(i));
1807   }
1808 
1809   /// Remember the place where a shadow check should be inserted.
1810   ///
1811   /// This location will be later instrumented with a check that will print a
1812   /// UMR warning in runtime if the shadow value is not 0.
1813   void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
1814     assert(Shadow);
1815     if (!InsertChecks) return;
1816 #ifndef NDEBUG
1817     Type *ShadowTy = Shadow->getType();
1818     assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy) ||
1819             isa<StructType>(ShadowTy) || isa<ArrayType>(ShadowTy)) &&
1820            "Can only insert checks for integer, vector, and aggregate shadow "
1821            "types");
1822 #endif
1823     InstrumentationList.push_back(
1824         ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1825   }
1826 
1827   /// Remember the place where a shadow check should be inserted.
1828   ///
1829   /// This location will be later instrumented with a check that will print a
1830   /// UMR warning in runtime if the value is not fully defined.
1831   void insertShadowCheck(Value *Val, Instruction *OrigIns) {
1832     assert(Val);
1833     Value *Shadow, *Origin;
1834     if (ClCheckConstantShadow) {
1835       Shadow = getShadow(Val);
1836       if (!Shadow) return;
1837       Origin = getOrigin(Val);
1838     } else {
1839       Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1840       if (!Shadow) return;
1841       Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1842     }
1843     insertShadowCheck(Shadow, Origin, OrigIns);
1844   }
1845 
1846   AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
1847     switch (a) {
1848       case AtomicOrdering::NotAtomic:
1849         return AtomicOrdering::NotAtomic;
1850       case AtomicOrdering::Unordered:
1851       case AtomicOrdering::Monotonic:
1852       case AtomicOrdering::Release:
1853         return AtomicOrdering::Release;
1854       case AtomicOrdering::Acquire:
1855       case AtomicOrdering::AcquireRelease:
1856         return AtomicOrdering::AcquireRelease;
1857       case AtomicOrdering::SequentiallyConsistent:
1858         return AtomicOrdering::SequentiallyConsistent;
1859     }
1860     llvm_unreachable("Unknown ordering");
1861   }
1862 
1863   Value *makeAddReleaseOrderingTable(IRBuilder<> &IRB) {
1864     constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
1865     uint32_t OrderingTable[NumOrderings] = {};
1866 
1867     OrderingTable[(int)AtomicOrderingCABI::relaxed] =
1868         OrderingTable[(int)AtomicOrderingCABI::release] =
1869             (int)AtomicOrderingCABI::release;
1870     OrderingTable[(int)AtomicOrderingCABI::consume] =
1871         OrderingTable[(int)AtomicOrderingCABI::acquire] =
1872             OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
1873                 (int)AtomicOrderingCABI::acq_rel;
1874     OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
1875         (int)AtomicOrderingCABI::seq_cst;
1876 
1877     return ConstantDataVector::get(IRB.getContext(),
1878                                    makeArrayRef(OrderingTable, NumOrderings));
1879   }
1880 
1881   AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1882     switch (a) {
1883       case AtomicOrdering::NotAtomic:
1884         return AtomicOrdering::NotAtomic;
1885       case AtomicOrdering::Unordered:
1886       case AtomicOrdering::Monotonic:
1887       case AtomicOrdering::Acquire:
1888         return AtomicOrdering::Acquire;
1889       case AtomicOrdering::Release:
1890       case AtomicOrdering::AcquireRelease:
1891         return AtomicOrdering::AcquireRelease;
1892       case AtomicOrdering::SequentiallyConsistent:
1893         return AtomicOrdering::SequentiallyConsistent;
1894     }
1895     llvm_unreachable("Unknown ordering");
1896   }
1897 
1898   Value *makeAddAcquireOrderingTable(IRBuilder<> &IRB) {
1899     constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
1900     uint32_t OrderingTable[NumOrderings] = {};
1901 
1902     OrderingTable[(int)AtomicOrderingCABI::relaxed] =
1903         OrderingTable[(int)AtomicOrderingCABI::acquire] =
1904             OrderingTable[(int)AtomicOrderingCABI::consume] =
1905                 (int)AtomicOrderingCABI::acquire;
1906     OrderingTable[(int)AtomicOrderingCABI::release] =
1907         OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
1908             (int)AtomicOrderingCABI::acq_rel;
1909     OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
1910         (int)AtomicOrderingCABI::seq_cst;
1911 
1912     return ConstantDataVector::get(IRB.getContext(),
1913                                    makeArrayRef(OrderingTable, NumOrderings));
1914   }
1915 
1916   // ------------------- Visitors.
1917   using InstVisitor<MemorySanitizerVisitor>::visit;
1918   void visit(Instruction &I) {
1919     if (I.getMetadata("nosanitize"))
1920       return;
1921     // Don't want to visit if we're in the prologue
1922     if (isInPrologue(I))
1923       return;
1924     InstVisitor<MemorySanitizerVisitor>::visit(I);
1925   }
1926 
1927   /// Instrument LoadInst
1928   ///
1929   /// Loads the corresponding shadow and (optionally) origin.
1930   /// Optionally, checks that the load address is fully defined.
1931   void visitLoadInst(LoadInst &I) {
1932     assert(I.getType()->isSized() && "Load type must have size");
1933     assert(!I.getMetadata("nosanitize"));
1934     IRBuilder<> IRB(I.getNextNode());
1935     Type *ShadowTy = getShadowTy(&I);
1936     Value *Addr = I.getPointerOperand();
1937     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
1938     const Align Alignment = assumeAligned(I.getAlignment());
1939     if (PropagateShadow) {
1940       std::tie(ShadowPtr, OriginPtr) =
1941           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
1942       setShadow(&I,
1943                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
1944     } else {
1945       setShadow(&I, getCleanShadow(&I));
1946     }
1947 
1948     if (ClCheckAccessAddress)
1949       insertShadowCheck(I.getPointerOperand(), &I);
1950 
1951     if (I.isAtomic())
1952       I.setOrdering(addAcquireOrdering(I.getOrdering()));
1953 
1954     if (MS.TrackOrigins) {
1955       if (PropagateShadow) {
1956         const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1957         setOrigin(
1958             &I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr, OriginAlignment));
1959       } else {
1960         setOrigin(&I, getCleanOrigin());
1961       }
1962     }
1963   }
1964 
1965   /// Instrument StoreInst
1966   ///
1967   /// Stores the corresponding shadow and (optionally) origin.
1968   /// Optionally, checks that the store address is fully defined.
1969   void visitStoreInst(StoreInst &I) {
1970     StoreList.push_back(&I);
1971     if (ClCheckAccessAddress)
1972       insertShadowCheck(I.getPointerOperand(), &I);
1973   }
1974 
1975   void handleCASOrRMW(Instruction &I) {
1976     assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1977 
1978     IRBuilder<> IRB(&I);
1979     Value *Addr = I.getOperand(0);
1980     Value *Val = I.getOperand(1);
1981     Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, Val->getType(), Align(1),
1982                                           /*isStore*/ true)
1983                            .first;
1984 
1985     if (ClCheckAccessAddress)
1986       insertShadowCheck(Addr, &I);
1987 
1988     // Only test the conditional argument of cmpxchg instruction.
1989     // The other argument can potentially be uninitialized, but we can not
1990     // detect this situation reliably without possible false positives.
1991     if (isa<AtomicCmpXchgInst>(I))
1992       insertShadowCheck(Val, &I);
1993 
1994     IRB.CreateStore(getCleanShadow(Val), ShadowPtr);
1995 
1996     setShadow(&I, getCleanShadow(&I));
1997     setOrigin(&I, getCleanOrigin());
1998   }
1999 
2000   void visitAtomicRMWInst(AtomicRMWInst &I) {
2001     handleCASOrRMW(I);
2002     I.setOrdering(addReleaseOrdering(I.getOrdering()));
2003   }
2004 
2005   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2006     handleCASOrRMW(I);
2007     I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
2008   }
2009 
2010   // Vector manipulation.
2011   void visitExtractElementInst(ExtractElementInst &I) {
2012     insertShadowCheck(I.getOperand(1), &I);
2013     IRBuilder<> IRB(&I);
2014     setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
2015               "_msprop"));
2016     setOrigin(&I, getOrigin(&I, 0));
2017   }
2018 
2019   void visitInsertElementInst(InsertElementInst &I) {
2020     insertShadowCheck(I.getOperand(2), &I);
2021     IRBuilder<> IRB(&I);
2022     setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
2023               I.getOperand(2), "_msprop"));
2024     setOriginForNaryOp(I);
2025   }
2026 
2027   void visitShuffleVectorInst(ShuffleVectorInst &I) {
2028     IRBuilder<> IRB(&I);
2029     setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
2030                                           I.getShuffleMask(), "_msprop"));
2031     setOriginForNaryOp(I);
2032   }
2033 
2034   // Casts.
2035   void visitSExtInst(SExtInst &I) {
2036     IRBuilder<> IRB(&I);
2037     setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
2038     setOrigin(&I, getOrigin(&I, 0));
2039   }
2040 
2041   void visitZExtInst(ZExtInst &I) {
2042     IRBuilder<> IRB(&I);
2043     setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
2044     setOrigin(&I, getOrigin(&I, 0));
2045   }
2046 
2047   void visitTruncInst(TruncInst &I) {
2048     IRBuilder<> IRB(&I);
2049     setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
2050     setOrigin(&I, getOrigin(&I, 0));
2051   }
2052 
2053   void visitBitCastInst(BitCastInst &I) {
2054     // Special case: if this is the bitcast (there is exactly 1 allowed) between
2055     // a musttail call and a ret, don't instrument. New instructions are not
2056     // allowed after a musttail call.
2057     if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
2058       if (CI->isMustTailCall())
2059         return;
2060     IRBuilder<> IRB(&I);
2061     setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
2062     setOrigin(&I, getOrigin(&I, 0));
2063   }
2064 
2065   void visitPtrToIntInst(PtrToIntInst &I) {
2066     IRBuilder<> IRB(&I);
2067     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
2068              "_msprop_ptrtoint"));
2069     setOrigin(&I, getOrigin(&I, 0));
2070   }
2071 
2072   void visitIntToPtrInst(IntToPtrInst &I) {
2073     IRBuilder<> IRB(&I);
2074     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
2075              "_msprop_inttoptr"));
2076     setOrigin(&I, getOrigin(&I, 0));
2077   }
2078 
2079   void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
2080   void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
2081   void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
2082   void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
2083   void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
2084   void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
2085 
2086   /// Propagate shadow for bitwise AND.
2087   ///
2088   /// This code is exact, i.e. if, for example, a bit in the left argument
2089   /// is defined and 0, then neither the value not definedness of the
2090   /// corresponding bit in B don't affect the resulting shadow.
2091   void visitAnd(BinaryOperator &I) {
2092     IRBuilder<> IRB(&I);
2093     //  "And" of 0 and a poisoned value results in unpoisoned value.
2094     //  1&1 => 1;     0&1 => 0;     p&1 => p;
2095     //  1&0 => 0;     0&0 => 0;     p&0 => 0;
2096     //  1&p => p;     0&p => 0;     p&p => p;
2097     //  S = (S1 & S2) | (V1 & S2) | (S1 & V2)
2098     Value *S1 = getShadow(&I, 0);
2099     Value *S2 = getShadow(&I, 1);
2100     Value *V1 = I.getOperand(0);
2101     Value *V2 = I.getOperand(1);
2102     if (V1->getType() != S1->getType()) {
2103       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
2104       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
2105     }
2106     Value *S1S2 = IRB.CreateAnd(S1, S2);
2107     Value *V1S2 = IRB.CreateAnd(V1, S2);
2108     Value *S1V2 = IRB.CreateAnd(S1, V2);
2109     setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
2110     setOriginForNaryOp(I);
2111   }
2112 
2113   void visitOr(BinaryOperator &I) {
2114     IRBuilder<> IRB(&I);
2115     //  "Or" of 1 and a poisoned value results in unpoisoned value.
2116     //  1|1 => 1;     0|1 => 1;     p|1 => 1;
2117     //  1|0 => 1;     0|0 => 0;     p|0 => p;
2118     //  1|p => 1;     0|p => p;     p|p => p;
2119     //  S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
2120     Value *S1 = getShadow(&I, 0);
2121     Value *S2 = getShadow(&I, 1);
2122     Value *V1 = IRB.CreateNot(I.getOperand(0));
2123     Value *V2 = IRB.CreateNot(I.getOperand(1));
2124     if (V1->getType() != S1->getType()) {
2125       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
2126       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
2127     }
2128     Value *S1S2 = IRB.CreateAnd(S1, S2);
2129     Value *V1S2 = IRB.CreateAnd(V1, S2);
2130     Value *S1V2 = IRB.CreateAnd(S1, V2);
2131     setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
2132     setOriginForNaryOp(I);
2133   }
2134 
2135   /// Default propagation of shadow and/or origin.
2136   ///
2137   /// This class implements the general case of shadow propagation, used in all
2138   /// cases where we don't know and/or don't care about what the operation
2139   /// actually does. It converts all input shadow values to a common type
2140   /// (extending or truncating as necessary), and bitwise OR's them.
2141   ///
2142   /// This is much cheaper than inserting checks (i.e. requiring inputs to be
2143   /// fully initialized), and less prone to false positives.
2144   ///
2145   /// This class also implements the general case of origin propagation. For a
2146   /// Nary operation, result origin is set to the origin of an argument that is
2147   /// not entirely initialized. If there is more than one such arguments, the
2148   /// rightmost of them is picked. It does not matter which one is picked if all
2149   /// arguments are initialized.
2150   template <bool CombineShadow>
2151   class Combiner {
2152     Value *Shadow = nullptr;
2153     Value *Origin = nullptr;
2154     IRBuilder<> &IRB;
2155     MemorySanitizerVisitor *MSV;
2156 
2157   public:
2158     Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB)
2159         : IRB(IRB), MSV(MSV) {}
2160 
2161     /// Add a pair of shadow and origin values to the mix.
2162     Combiner &Add(Value *OpShadow, Value *OpOrigin) {
2163       if (CombineShadow) {
2164         assert(OpShadow);
2165         if (!Shadow)
2166           Shadow = OpShadow;
2167         else {
2168           OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2169           Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
2170         }
2171       }
2172 
2173       if (MSV->MS.TrackOrigins) {
2174         assert(OpOrigin);
2175         if (!Origin) {
2176           Origin = OpOrigin;
2177         } else {
2178           Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2179           // No point in adding something that might result in 0 origin value.
2180           if (!ConstOrigin || !ConstOrigin->isNullValue()) {
2181             Value *FlatShadow = MSV->convertShadowToScalar(OpShadow, IRB);
2182             Value *Cond =
2183                 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
2184             Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
2185           }
2186         }
2187       }
2188       return *this;
2189     }
2190 
2191     /// Add an application value to the mix.
2192     Combiner &Add(Value *V) {
2193       Value *OpShadow = MSV->getShadow(V);
2194       Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
2195       return Add(OpShadow, OpOrigin);
2196     }
2197 
2198     /// Set the current combined values as the given instruction's shadow
2199     /// and origin.
2200     void Done(Instruction *I) {
2201       if (CombineShadow) {
2202         assert(Shadow);
2203         Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
2204         MSV->setShadow(I, Shadow);
2205       }
2206       if (MSV->MS.TrackOrigins) {
2207         assert(Origin);
2208         MSV->setOrigin(I, Origin);
2209       }
2210     }
2211   };
2212 
2213   using ShadowAndOriginCombiner = Combiner<true>;
2214   using OriginCombiner = Combiner<false>;
2215 
2216   /// Propagate origin for arbitrary operation.
2217   void setOriginForNaryOp(Instruction &I) {
2218     if (!MS.TrackOrigins) return;
2219     IRBuilder<> IRB(&I);
2220     OriginCombiner OC(this, IRB);
2221     for (Use &Op : I.operands())
2222       OC.Add(Op.get());
2223     OC.Done(&I);
2224   }
2225 
2226   size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
2227     assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
2228            "Vector of pointers is not a valid shadow type");
2229     return Ty->isVectorTy() ? cast<FixedVectorType>(Ty)->getNumElements() *
2230                                   Ty->getScalarSizeInBits()
2231                             : Ty->getPrimitiveSizeInBits();
2232   }
2233 
2234   /// Cast between two shadow types, extending or truncating as
2235   /// necessary.
2236   Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
2237                           bool Signed = false) {
2238     Type *srcTy = V->getType();
2239     size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2240     size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2241     if (srcSizeInBits > 1 && dstSizeInBits == 1)
2242       return IRB.CreateICmpNE(V, getCleanShadow(V));
2243 
2244     if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
2245       return IRB.CreateIntCast(V, dstTy, Signed);
2246     if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
2247         cast<FixedVectorType>(dstTy)->getNumElements() ==
2248             cast<FixedVectorType>(srcTy)->getNumElements())
2249       return IRB.CreateIntCast(V, dstTy, Signed);
2250     Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
2251     Value *V2 =
2252       IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
2253     return IRB.CreateBitCast(V2, dstTy);
2254     // TODO: handle struct types.
2255   }
2256 
2257   /// Cast an application value to the type of its own shadow.
2258   Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
2259     Type *ShadowTy = getShadowTy(V);
2260     if (V->getType() == ShadowTy)
2261       return V;
2262     if (V->getType()->isPtrOrPtrVectorTy())
2263       return IRB.CreatePtrToInt(V, ShadowTy);
2264     else
2265       return IRB.CreateBitCast(V, ShadowTy);
2266   }
2267 
2268   /// Propagate shadow for arbitrary operation.
2269   void handleShadowOr(Instruction &I) {
2270     IRBuilder<> IRB(&I);
2271     ShadowAndOriginCombiner SC(this, IRB);
2272     for (Use &Op : I.operands())
2273       SC.Add(Op.get());
2274     SC.Done(&I);
2275   }
2276 
2277   void visitFNeg(UnaryOperator &I) { handleShadowOr(I); }
2278 
2279   // Handle multiplication by constant.
2280   //
2281   // Handle a special case of multiplication by constant that may have one or
2282   // more zeros in the lower bits. This makes corresponding number of lower bits
2283   // of the result zero as well. We model it by shifting the other operand
2284   // shadow left by the required number of bits. Effectively, we transform
2285   // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
2286   // We use multiplication by 2**N instead of shift to cover the case of
2287   // multiplication by 0, which may occur in some elements of a vector operand.
2288   void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
2289                            Value *OtherArg) {
2290     Constant *ShadowMul;
2291     Type *Ty = ConstArg->getType();
2292     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2293       unsigned NumElements = cast<FixedVectorType>(VTy)->getNumElements();
2294       Type *EltTy = VTy->getElementType();
2295       SmallVector<Constant *, 16> Elements;
2296       for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
2297         if (ConstantInt *Elt =
2298                 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
2299           const APInt &V = Elt->getValue();
2300           APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
2301           Elements.push_back(ConstantInt::get(EltTy, V2));
2302         } else {
2303           Elements.push_back(ConstantInt::get(EltTy, 1));
2304         }
2305       }
2306       ShadowMul = ConstantVector::get(Elements);
2307     } else {
2308       if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2309         const APInt &V = Elt->getValue();
2310         APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
2311         ShadowMul = ConstantInt::get(Ty, V2);
2312       } else {
2313         ShadowMul = ConstantInt::get(Ty, 1);
2314       }
2315     }
2316 
2317     IRBuilder<> IRB(&I);
2318     setShadow(&I,
2319               IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
2320     setOrigin(&I, getOrigin(OtherArg));
2321   }
2322 
2323   void visitMul(BinaryOperator &I) {
2324     Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
2325     Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
2326     if (constOp0 && !constOp1)
2327       handleMulByConstant(I, constOp0, I.getOperand(1));
2328     else if (constOp1 && !constOp0)
2329       handleMulByConstant(I, constOp1, I.getOperand(0));
2330     else
2331       handleShadowOr(I);
2332   }
2333 
2334   void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
2335   void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
2336   void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
2337   void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
2338   void visitSub(BinaryOperator &I) { handleShadowOr(I); }
2339   void visitXor(BinaryOperator &I) { handleShadowOr(I); }
2340 
2341   void handleIntegerDiv(Instruction &I) {
2342     IRBuilder<> IRB(&I);
2343     // Strict on the second argument.
2344     insertShadowCheck(I.getOperand(1), &I);
2345     setShadow(&I, getShadow(&I, 0));
2346     setOrigin(&I, getOrigin(&I, 0));
2347   }
2348 
2349   void visitUDiv(BinaryOperator &I) { handleIntegerDiv(I); }
2350   void visitSDiv(BinaryOperator &I) { handleIntegerDiv(I); }
2351   void visitURem(BinaryOperator &I) { handleIntegerDiv(I); }
2352   void visitSRem(BinaryOperator &I) { handleIntegerDiv(I); }
2353 
2354   // Floating point division is side-effect free. We can not require that the
2355   // divisor is fully initialized and must propagate shadow. See PR37523.
2356   void visitFDiv(BinaryOperator &I) { handleShadowOr(I); }
2357   void visitFRem(BinaryOperator &I) { handleShadowOr(I); }
2358 
2359   /// Instrument == and != comparisons.
2360   ///
2361   /// Sometimes the comparison result is known even if some of the bits of the
2362   /// arguments are not.
2363   void handleEqualityComparison(ICmpInst &I) {
2364     IRBuilder<> IRB(&I);
2365     Value *A = I.getOperand(0);
2366     Value *B = I.getOperand(1);
2367     Value *Sa = getShadow(A);
2368     Value *Sb = getShadow(B);
2369 
2370     // Get rid of pointers and vectors of pointers.
2371     // For ints (and vectors of ints), types of A and Sa match,
2372     // and this is a no-op.
2373     A = IRB.CreatePointerCast(A, Sa->getType());
2374     B = IRB.CreatePointerCast(B, Sb->getType());
2375 
2376     // A == B  <==>  (C = A^B) == 0
2377     // A != B  <==>  (C = A^B) != 0
2378     // Sc = Sa | Sb
2379     Value *C = IRB.CreateXor(A, B);
2380     Value *Sc = IRB.CreateOr(Sa, Sb);
2381     // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
2382     // Result is defined if one of the following is true
2383     // * there is a defined 1 bit in C
2384     // * C is fully defined
2385     // Si = !(C & ~Sc) && Sc
2386     Value *Zero = Constant::getNullValue(Sc->getType());
2387     Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
2388     Value *Si =
2389       IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
2390                     IRB.CreateICmpEQ(
2391                       IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
2392     Si->setName("_msprop_icmp");
2393     setShadow(&I, Si);
2394     setOriginForNaryOp(I);
2395   }
2396 
2397   /// Build the lowest possible value of V, taking into account V's
2398   ///        uninitialized bits.
2399   Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
2400                                 bool isSigned) {
2401     if (isSigned) {
2402       // Split shadow into sign bit and other bits.
2403       Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
2404       Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
2405       // Maximise the undefined shadow bit, minimize other undefined bits.
2406       return
2407         IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
2408     } else {
2409       // Minimize undefined bits.
2410       return IRB.CreateAnd(A, IRB.CreateNot(Sa));
2411     }
2412   }
2413 
2414   /// Build the highest possible value of V, taking into account V's
2415   ///        uninitialized bits.
2416   Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
2417                                 bool isSigned) {
2418     if (isSigned) {
2419       // Split shadow into sign bit and other bits.
2420       Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
2421       Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
2422       // Minimise the undefined shadow bit, maximise other undefined bits.
2423       return
2424         IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
2425     } else {
2426       // Maximize undefined bits.
2427       return IRB.CreateOr(A, Sa);
2428     }
2429   }
2430 
2431   /// Instrument relational comparisons.
2432   ///
2433   /// This function does exact shadow propagation for all relational
2434   /// comparisons of integers, pointers and vectors of those.
2435   /// FIXME: output seems suboptimal when one of the operands is a constant
2436   void handleRelationalComparisonExact(ICmpInst &I) {
2437     IRBuilder<> IRB(&I);
2438     Value *A = I.getOperand(0);
2439     Value *B = I.getOperand(1);
2440     Value *Sa = getShadow(A);
2441     Value *Sb = getShadow(B);
2442 
2443     // Get rid of pointers and vectors of pointers.
2444     // For ints (and vectors of ints), types of A and Sa match,
2445     // and this is a no-op.
2446     A = IRB.CreatePointerCast(A, Sa->getType());
2447     B = IRB.CreatePointerCast(B, Sb->getType());
2448 
2449     // Let [a0, a1] be the interval of possible values of A, taking into account
2450     // its undefined bits. Let [b0, b1] be the interval of possible values of B.
2451     // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
2452     bool IsSigned = I.isSigned();
2453     Value *S1 = IRB.CreateICmp(I.getPredicate(),
2454                                getLowestPossibleValue(IRB, A, Sa, IsSigned),
2455                                getHighestPossibleValue(IRB, B, Sb, IsSigned));
2456     Value *S2 = IRB.CreateICmp(I.getPredicate(),
2457                                getHighestPossibleValue(IRB, A, Sa, IsSigned),
2458                                getLowestPossibleValue(IRB, B, Sb, IsSigned));
2459     Value *Si = IRB.CreateXor(S1, S2);
2460     setShadow(&I, Si);
2461     setOriginForNaryOp(I);
2462   }
2463 
2464   /// Instrument signed relational comparisons.
2465   ///
2466   /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
2467   /// bit of the shadow. Everything else is delegated to handleShadowOr().
2468   void handleSignedRelationalComparison(ICmpInst &I) {
2469     Constant *constOp;
2470     Value *op = nullptr;
2471     CmpInst::Predicate pre;
2472     if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
2473       op = I.getOperand(0);
2474       pre = I.getPredicate();
2475     } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
2476       op = I.getOperand(1);
2477       pre = I.getSwappedPredicate();
2478     } else {
2479       handleShadowOr(I);
2480       return;
2481     }
2482 
2483     if ((constOp->isNullValue() &&
2484          (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
2485         (constOp->isAllOnesValue() &&
2486          (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
2487       IRBuilder<> IRB(&I);
2488       Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
2489                                         "_msprop_icmp_s");
2490       setShadow(&I, Shadow);
2491       setOrigin(&I, getOrigin(op));
2492     } else {
2493       handleShadowOr(I);
2494     }
2495   }
2496 
2497   void visitICmpInst(ICmpInst &I) {
2498     if (!ClHandleICmp) {
2499       handleShadowOr(I);
2500       return;
2501     }
2502     if (I.isEquality()) {
2503       handleEqualityComparison(I);
2504       return;
2505     }
2506 
2507     assert(I.isRelational());
2508     if (ClHandleICmpExact) {
2509       handleRelationalComparisonExact(I);
2510       return;
2511     }
2512     if (I.isSigned()) {
2513       handleSignedRelationalComparison(I);
2514       return;
2515     }
2516 
2517     assert(I.isUnsigned());
2518     if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
2519       handleRelationalComparisonExact(I);
2520       return;
2521     }
2522 
2523     handleShadowOr(I);
2524   }
2525 
2526   void visitFCmpInst(FCmpInst &I) {
2527     handleShadowOr(I);
2528   }
2529 
2530   void handleShift(BinaryOperator &I) {
2531     IRBuilder<> IRB(&I);
2532     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2533     // Otherwise perform the same shift on S1.
2534     Value *S1 = getShadow(&I, 0);
2535     Value *S2 = getShadow(&I, 1);
2536     Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
2537                                    S2->getType());
2538     Value *V2 = I.getOperand(1);
2539     Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
2540     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2541     setOriginForNaryOp(I);
2542   }
2543 
2544   void visitShl(BinaryOperator &I) { handleShift(I); }
2545   void visitAShr(BinaryOperator &I) { handleShift(I); }
2546   void visitLShr(BinaryOperator &I) { handleShift(I); }
2547 
2548   void handleFunnelShift(IntrinsicInst &I) {
2549     IRBuilder<> IRB(&I);
2550     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2551     // Otherwise perform the same shift on S0 and S1.
2552     Value *S0 = getShadow(&I, 0);
2553     Value *S1 = getShadow(&I, 1);
2554     Value *S2 = getShadow(&I, 2);
2555     Value *S2Conv =
2556         IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType());
2557     Value *V2 = I.getOperand(2);
2558     Function *Intrin = Intrinsic::getDeclaration(
2559         I.getModule(), I.getIntrinsicID(), S2Conv->getType());
2560     Value *Shift = IRB.CreateCall(Intrin, {S0, S1, V2});
2561     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2562     setOriginForNaryOp(I);
2563   }
2564 
2565   /// Instrument llvm.memmove
2566   ///
2567   /// At this point we don't know if llvm.memmove will be inlined or not.
2568   /// If we don't instrument it and it gets inlined,
2569   /// our interceptor will not kick in and we will lose the memmove.
2570   /// If we instrument the call here, but it does not get inlined,
2571   /// we will memove the shadow twice: which is bad in case
2572   /// of overlapping regions. So, we simply lower the intrinsic to a call.
2573   ///
2574   /// Similar situation exists for memcpy and memset.
2575   void visitMemMoveInst(MemMoveInst &I) {
2576     IRBuilder<> IRB(&I);
2577     IRB.CreateCall(
2578         MS.MemmoveFn,
2579         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2580          IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2581          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2582     I.eraseFromParent();
2583   }
2584 
2585   // Similar to memmove: avoid copying shadow twice.
2586   // This is somewhat unfortunate as it may slowdown small constant memcpys.
2587   // FIXME: consider doing manual inline for small constant sizes and proper
2588   // alignment.
2589   void visitMemCpyInst(MemCpyInst &I) {
2590     IRBuilder<> IRB(&I);
2591     IRB.CreateCall(
2592         MS.MemcpyFn,
2593         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2594          IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2595          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2596     I.eraseFromParent();
2597   }
2598 
2599   // Same as memcpy.
2600   void visitMemSetInst(MemSetInst &I) {
2601     IRBuilder<> IRB(&I);
2602     IRB.CreateCall(
2603         MS.MemsetFn,
2604         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2605          IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2606          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2607     I.eraseFromParent();
2608   }
2609 
2610   void visitVAStartInst(VAStartInst &I) {
2611     VAHelper->visitVAStartInst(I);
2612   }
2613 
2614   void visitVACopyInst(VACopyInst &I) {
2615     VAHelper->visitVACopyInst(I);
2616   }
2617 
2618   /// Handle vector store-like intrinsics.
2619   ///
2620   /// Instrument intrinsics that look like a simple SIMD store: writes memory,
2621   /// has 1 pointer argument and 1 vector argument, returns void.
2622   bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
2623     IRBuilder<> IRB(&I);
2624     Value* Addr = I.getArgOperand(0);
2625     Value *Shadow = getShadow(&I, 1);
2626     Value *ShadowPtr, *OriginPtr;
2627 
2628     // We don't know the pointer alignment (could be unaligned SSE store!).
2629     // Have to assume to worst case.
2630     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2631         Addr, IRB, Shadow->getType(), Align(1), /*isStore*/ true);
2632     IRB.CreateAlignedStore(Shadow, ShadowPtr, Align(1));
2633 
2634     if (ClCheckAccessAddress)
2635       insertShadowCheck(Addr, &I);
2636 
2637     // FIXME: factor out common code from materializeStores
2638     if (MS.TrackOrigins) IRB.CreateStore(getOrigin(&I, 1), OriginPtr);
2639     return true;
2640   }
2641 
2642   /// Handle vector load-like intrinsics.
2643   ///
2644   /// Instrument intrinsics that look like a simple SIMD load: reads memory,
2645   /// has 1 pointer argument, returns a vector.
2646   bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
2647     IRBuilder<> IRB(&I);
2648     Value *Addr = I.getArgOperand(0);
2649 
2650     Type *ShadowTy = getShadowTy(&I);
2651     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
2652     if (PropagateShadow) {
2653       // We don't know the pointer alignment (could be unaligned SSE load!).
2654       // Have to assume to worst case.
2655       const Align Alignment = Align(1);
2656       std::tie(ShadowPtr, OriginPtr) =
2657           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2658       setShadow(&I,
2659                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
2660     } else {
2661       setShadow(&I, getCleanShadow(&I));
2662     }
2663 
2664     if (ClCheckAccessAddress)
2665       insertShadowCheck(Addr, &I);
2666 
2667     if (MS.TrackOrigins) {
2668       if (PropagateShadow)
2669         setOrigin(&I, IRB.CreateLoad(MS.OriginTy, OriginPtr));
2670       else
2671         setOrigin(&I, getCleanOrigin());
2672     }
2673     return true;
2674   }
2675 
2676   /// Handle (SIMD arithmetic)-like intrinsics.
2677   ///
2678   /// Instrument intrinsics with any number of arguments of the same type,
2679   /// equal to the return type. The type should be simple (no aggregates or
2680   /// pointers; vectors are fine).
2681   /// Caller guarantees that this intrinsic does not access memory.
2682   bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
2683     Type *RetTy = I.getType();
2684     if (!(RetTy->isIntOrIntVectorTy() ||
2685           RetTy->isFPOrFPVectorTy() ||
2686           RetTy->isX86_MMXTy()))
2687       return false;
2688 
2689     unsigned NumArgOperands = I.arg_size();
2690     for (unsigned i = 0; i < NumArgOperands; ++i) {
2691       Type *Ty = I.getArgOperand(i)->getType();
2692       if (Ty != RetTy)
2693         return false;
2694     }
2695 
2696     IRBuilder<> IRB(&I);
2697     ShadowAndOriginCombiner SC(this, IRB);
2698     for (unsigned i = 0; i < NumArgOperands; ++i)
2699       SC.Add(I.getArgOperand(i));
2700     SC.Done(&I);
2701 
2702     return true;
2703   }
2704 
2705   /// Heuristically instrument unknown intrinsics.
2706   ///
2707   /// The main purpose of this code is to do something reasonable with all
2708   /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
2709   /// We recognize several classes of intrinsics by their argument types and
2710   /// ModRefBehaviour and apply special instrumentation when we are reasonably
2711   /// sure that we know what the intrinsic does.
2712   ///
2713   /// We special-case intrinsics where this approach fails. See llvm.bswap
2714   /// handling as an example of that.
2715   bool handleUnknownIntrinsic(IntrinsicInst &I) {
2716     unsigned NumArgOperands = I.arg_size();
2717     if (NumArgOperands == 0)
2718       return false;
2719 
2720     if (NumArgOperands == 2 &&
2721         I.getArgOperand(0)->getType()->isPointerTy() &&
2722         I.getArgOperand(1)->getType()->isVectorTy() &&
2723         I.getType()->isVoidTy() &&
2724         !I.onlyReadsMemory()) {
2725       // This looks like a vector store.
2726       return handleVectorStoreIntrinsic(I);
2727     }
2728 
2729     if (NumArgOperands == 1 &&
2730         I.getArgOperand(0)->getType()->isPointerTy() &&
2731         I.getType()->isVectorTy() &&
2732         I.onlyReadsMemory()) {
2733       // This looks like a vector load.
2734       return handleVectorLoadIntrinsic(I);
2735     }
2736 
2737     if (I.doesNotAccessMemory())
2738       if (maybeHandleSimpleNomemIntrinsic(I))
2739         return true;
2740 
2741     // FIXME: detect and handle SSE maskstore/maskload
2742     return false;
2743   }
2744 
2745   void handleInvariantGroup(IntrinsicInst &I) {
2746     setShadow(&I, getShadow(&I, 0));
2747     setOrigin(&I, getOrigin(&I, 0));
2748   }
2749 
2750   void handleLifetimeStart(IntrinsicInst &I) {
2751     if (!PoisonStack)
2752       return;
2753     AllocaInst *AI = llvm::findAllocaForValue(I.getArgOperand(1));
2754     if (!AI)
2755       InstrumentLifetimeStart = false;
2756     LifetimeStartList.push_back(std::make_pair(&I, AI));
2757   }
2758 
2759   void handleBswap(IntrinsicInst &I) {
2760     IRBuilder<> IRB(&I);
2761     Value *Op = I.getArgOperand(0);
2762     Type *OpType = Op->getType();
2763     Function *BswapFunc = Intrinsic::getDeclaration(
2764       F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
2765     setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
2766     setOrigin(&I, getOrigin(Op));
2767   }
2768 
2769   // Instrument vector convert intrinsic.
2770   //
2771   // This function instruments intrinsics like cvtsi2ss:
2772   // %Out = int_xxx_cvtyyy(%ConvertOp)
2773   // or
2774   // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
2775   // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
2776   // number \p Out elements, and (if has 2 arguments) copies the rest of the
2777   // elements from \p CopyOp.
2778   // In most cases conversion involves floating-point value which may trigger a
2779   // hardware exception when not fully initialized. For this reason we require
2780   // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
2781   // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
2782   // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
2783   // return a fully initialized value.
2784   void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements,
2785                                     bool HasRoundingMode = false) {
2786     IRBuilder<> IRB(&I);
2787     Value *CopyOp, *ConvertOp;
2788 
2789     assert((!HasRoundingMode ||
2790             isa<ConstantInt>(I.getArgOperand(I.arg_size() - 1))) &&
2791            "Invalid rounding mode");
2792 
2793     switch (I.arg_size() - HasRoundingMode) {
2794     case 2:
2795       CopyOp = I.getArgOperand(0);
2796       ConvertOp = I.getArgOperand(1);
2797       break;
2798     case 1:
2799       ConvertOp = I.getArgOperand(0);
2800       CopyOp = nullptr;
2801       break;
2802     default:
2803       llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
2804     }
2805 
2806     // The first *NumUsedElements* elements of ConvertOp are converted to the
2807     // same number of output elements. The rest of the output is copied from
2808     // CopyOp, or (if not available) filled with zeroes.
2809     // Combine shadow for elements of ConvertOp that are used in this operation,
2810     // and insert a check.
2811     // FIXME: consider propagating shadow of ConvertOp, at least in the case of
2812     // int->any conversion.
2813     Value *ConvertShadow = getShadow(ConvertOp);
2814     Value *AggShadow = nullptr;
2815     if (ConvertOp->getType()->isVectorTy()) {
2816       AggShadow = IRB.CreateExtractElement(
2817           ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2818       for (int i = 1; i < NumUsedElements; ++i) {
2819         Value *MoreShadow = IRB.CreateExtractElement(
2820             ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2821         AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
2822       }
2823     } else {
2824       AggShadow = ConvertShadow;
2825     }
2826     assert(AggShadow->getType()->isIntegerTy());
2827     insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2828 
2829     // Build result shadow by zero-filling parts of CopyOp shadow that come from
2830     // ConvertOp.
2831     if (CopyOp) {
2832       assert(CopyOp->getType() == I.getType());
2833       assert(CopyOp->getType()->isVectorTy());
2834       Value *ResultShadow = getShadow(CopyOp);
2835       Type *EltTy = cast<VectorType>(ResultShadow->getType())->getElementType();
2836       for (int i = 0; i < NumUsedElements; ++i) {
2837         ResultShadow = IRB.CreateInsertElement(
2838             ResultShadow, ConstantInt::getNullValue(EltTy),
2839             ConstantInt::get(IRB.getInt32Ty(), i));
2840       }
2841       setShadow(&I, ResultShadow);
2842       setOrigin(&I, getOrigin(CopyOp));
2843     } else {
2844       setShadow(&I, getCleanShadow(&I));
2845       setOrigin(&I, getCleanOrigin());
2846     }
2847   }
2848 
2849   // Given a scalar or vector, extract lower 64 bits (or less), and return all
2850   // zeroes if it is zero, and all ones otherwise.
2851   Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2852     if (S->getType()->isVectorTy())
2853       S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
2854     assert(S->getType()->getPrimitiveSizeInBits() <= 64);
2855     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2856     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2857   }
2858 
2859   // Given a vector, extract its first element, and return all
2860   // zeroes if it is zero, and all ones otherwise.
2861   Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2862     Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
2863     Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
2864     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2865   }
2866 
2867   Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
2868     Type *T = S->getType();
2869     assert(T->isVectorTy());
2870     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2871     return IRB.CreateSExt(S2, T);
2872   }
2873 
2874   // Instrument vector shift intrinsic.
2875   //
2876   // This function instruments intrinsics like int_x86_avx2_psll_w.
2877   // Intrinsic shifts %In by %ShiftSize bits.
2878   // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
2879   // size, and the rest is ignored. Behavior is defined even if shift size is
2880   // greater than register (or field) width.
2881   void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
2882     assert(I.arg_size() == 2);
2883     IRBuilder<> IRB(&I);
2884     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2885     // Otherwise perform the same shift on S1.
2886     Value *S1 = getShadow(&I, 0);
2887     Value *S2 = getShadow(&I, 1);
2888     Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2889                              : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2890     Value *V1 = I.getOperand(0);
2891     Value *V2 = I.getOperand(1);
2892     Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
2893                                   {IRB.CreateBitCast(S1, V1->getType()), V2});
2894     Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
2895     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2896     setOriginForNaryOp(I);
2897   }
2898 
2899   // Get an X86_MMX-sized vector type.
2900   Type *getMMXVectorTy(unsigned EltSizeInBits) {
2901     const unsigned X86_MMXSizeInBits = 64;
2902     assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
2903            "Illegal MMX vector element size");
2904     return FixedVectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
2905                                 X86_MMXSizeInBits / EltSizeInBits);
2906   }
2907 
2908   // Returns a signed counterpart for an (un)signed-saturate-and-pack
2909   // intrinsic.
2910   Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
2911     switch (id) {
2912       case Intrinsic::x86_sse2_packsswb_128:
2913       case Intrinsic::x86_sse2_packuswb_128:
2914         return Intrinsic::x86_sse2_packsswb_128;
2915 
2916       case Intrinsic::x86_sse2_packssdw_128:
2917       case Intrinsic::x86_sse41_packusdw:
2918         return Intrinsic::x86_sse2_packssdw_128;
2919 
2920       case Intrinsic::x86_avx2_packsswb:
2921       case Intrinsic::x86_avx2_packuswb:
2922         return Intrinsic::x86_avx2_packsswb;
2923 
2924       case Intrinsic::x86_avx2_packssdw:
2925       case Intrinsic::x86_avx2_packusdw:
2926         return Intrinsic::x86_avx2_packssdw;
2927 
2928       case Intrinsic::x86_mmx_packsswb:
2929       case Intrinsic::x86_mmx_packuswb:
2930         return Intrinsic::x86_mmx_packsswb;
2931 
2932       case Intrinsic::x86_mmx_packssdw:
2933         return Intrinsic::x86_mmx_packssdw;
2934       default:
2935         llvm_unreachable("unexpected intrinsic id");
2936     }
2937   }
2938 
2939   // Instrument vector pack intrinsic.
2940   //
2941   // This function instruments intrinsics like x86_mmx_packsswb, that
2942   // packs elements of 2 input vectors into half as many bits with saturation.
2943   // Shadow is propagated with the signed variant of the same intrinsic applied
2944   // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
2945   // EltSizeInBits is used only for x86mmx arguments.
2946   void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
2947     assert(I.arg_size() == 2);
2948     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2949     IRBuilder<> IRB(&I);
2950     Value *S1 = getShadow(&I, 0);
2951     Value *S2 = getShadow(&I, 1);
2952     assert(isX86_MMX || S1->getType()->isVectorTy());
2953 
2954     // SExt and ICmpNE below must apply to individual elements of input vectors.
2955     // In case of x86mmx arguments, cast them to appropriate vector types and
2956     // back.
2957     Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
2958     if (isX86_MMX) {
2959       S1 = IRB.CreateBitCast(S1, T);
2960       S2 = IRB.CreateBitCast(S2, T);
2961     }
2962     Value *S1_ext = IRB.CreateSExt(
2963         IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
2964     Value *S2_ext = IRB.CreateSExt(
2965         IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
2966     if (isX86_MMX) {
2967       Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
2968       S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
2969       S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
2970     }
2971 
2972     Function *ShadowFn = Intrinsic::getDeclaration(
2973         F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
2974 
2975     Value *S =
2976         IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
2977     if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
2978     setShadow(&I, S);
2979     setOriginForNaryOp(I);
2980   }
2981 
2982   // Instrument sum-of-absolute-differences intrinsic.
2983   void handleVectorSadIntrinsic(IntrinsicInst &I) {
2984     const unsigned SignificantBitsPerResultElement = 16;
2985     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2986     Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
2987     unsigned ZeroBitsPerResultElement =
2988         ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
2989 
2990     IRBuilder<> IRB(&I);
2991     Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2992     S = IRB.CreateBitCast(S, ResTy);
2993     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2994                        ResTy);
2995     S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
2996     S = IRB.CreateBitCast(S, getShadowTy(&I));
2997     setShadow(&I, S);
2998     setOriginForNaryOp(I);
2999   }
3000 
3001   // Instrument multiply-add intrinsic.
3002   void handleVectorPmaddIntrinsic(IntrinsicInst &I,
3003                                   unsigned EltSizeInBits = 0) {
3004     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
3005     Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
3006     IRBuilder<> IRB(&I);
3007     Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
3008     S = IRB.CreateBitCast(S, ResTy);
3009     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
3010                        ResTy);
3011     S = IRB.CreateBitCast(S, getShadowTy(&I));
3012     setShadow(&I, S);
3013     setOriginForNaryOp(I);
3014   }
3015 
3016   // Instrument compare-packed intrinsic.
3017   // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
3018   // all-ones shadow.
3019   void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
3020     IRBuilder<> IRB(&I);
3021     Type *ResTy = getShadowTy(&I);
3022     Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
3023     Value *S = IRB.CreateSExt(
3024         IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
3025     setShadow(&I, S);
3026     setOriginForNaryOp(I);
3027   }
3028 
3029   // Instrument compare-scalar intrinsic.
3030   // This handles both cmp* intrinsics which return the result in the first
3031   // element of a vector, and comi* which return the result as i32.
3032   void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
3033     IRBuilder<> IRB(&I);
3034     Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
3035     Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
3036     setShadow(&I, S);
3037     setOriginForNaryOp(I);
3038   }
3039 
3040   // Instrument generic vector reduction intrinsics
3041   // by ORing together all their fields.
3042   void handleVectorReduceIntrinsic(IntrinsicInst &I) {
3043     IRBuilder<> IRB(&I);
3044     Value *S = IRB.CreateOrReduce(getShadow(&I, 0));
3045     setShadow(&I, S);
3046     setOrigin(&I, getOrigin(&I, 0));
3047   }
3048 
3049   // Instrument vector.reduce.or intrinsic.
3050   // Valid (non-poisoned) set bits in the operand pull low the
3051   // corresponding shadow bits.
3052   void handleVectorReduceOrIntrinsic(IntrinsicInst &I) {
3053     IRBuilder<> IRB(&I);
3054     Value *OperandShadow = getShadow(&I, 0);
3055     Value *OperandUnsetBits = IRB.CreateNot(I.getOperand(0));
3056     Value *OperandUnsetOrPoison = IRB.CreateOr(OperandUnsetBits, OperandShadow);
3057     // Bit N is clean if any field's bit N is 1 and unpoison
3058     Value *OutShadowMask = IRB.CreateAndReduce(OperandUnsetOrPoison);
3059     // Otherwise, it is clean if every field's bit N is unpoison
3060     Value *OrShadow = IRB.CreateOrReduce(OperandShadow);
3061     Value *S = IRB.CreateAnd(OutShadowMask, OrShadow);
3062 
3063     setShadow(&I, S);
3064     setOrigin(&I, getOrigin(&I, 0));
3065   }
3066 
3067   // Instrument vector.reduce.and intrinsic.
3068   // Valid (non-poisoned) unset bits in the operand pull down the
3069   // corresponding shadow bits.
3070   void handleVectorReduceAndIntrinsic(IntrinsicInst &I) {
3071     IRBuilder<> IRB(&I);
3072     Value *OperandShadow = getShadow(&I, 0);
3073     Value *OperandSetOrPoison = IRB.CreateOr(I.getOperand(0), OperandShadow);
3074     // Bit N is clean if any field's bit N is 0 and unpoison
3075     Value *OutShadowMask = IRB.CreateAndReduce(OperandSetOrPoison);
3076     // Otherwise, it is clean if every field's bit N is unpoison
3077     Value *OrShadow = IRB.CreateOrReduce(OperandShadow);
3078     Value *S = IRB.CreateAnd(OutShadowMask, OrShadow);
3079 
3080     setShadow(&I, S);
3081     setOrigin(&I, getOrigin(&I, 0));
3082   }
3083 
3084   void handleStmxcsr(IntrinsicInst &I) {
3085     IRBuilder<> IRB(&I);
3086     Value* Addr = I.getArgOperand(0);
3087     Type *Ty = IRB.getInt32Ty();
3088     Value *ShadowPtr =
3089         getShadowOriginPtr(Addr, IRB, Ty, Align(1), /*isStore*/ true).first;
3090 
3091     IRB.CreateStore(getCleanShadow(Ty),
3092                     IRB.CreatePointerCast(ShadowPtr, Ty->getPointerTo()));
3093 
3094     if (ClCheckAccessAddress)
3095       insertShadowCheck(Addr, &I);
3096   }
3097 
3098   void handleLdmxcsr(IntrinsicInst &I) {
3099     if (!InsertChecks) return;
3100 
3101     IRBuilder<> IRB(&I);
3102     Value *Addr = I.getArgOperand(0);
3103     Type *Ty = IRB.getInt32Ty();
3104     const Align Alignment = Align(1);
3105     Value *ShadowPtr, *OriginPtr;
3106     std::tie(ShadowPtr, OriginPtr) =
3107         getShadowOriginPtr(Addr, IRB, Ty, Alignment, /*isStore*/ false);
3108 
3109     if (ClCheckAccessAddress)
3110       insertShadowCheck(Addr, &I);
3111 
3112     Value *Shadow = IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment, "_ldmxcsr");
3113     Value *Origin = MS.TrackOrigins ? IRB.CreateLoad(MS.OriginTy, OriginPtr)
3114                                     : getCleanOrigin();
3115     insertShadowCheck(Shadow, Origin, &I);
3116   }
3117 
3118   void handleMaskedStore(IntrinsicInst &I) {
3119     IRBuilder<> IRB(&I);
3120     Value *V = I.getArgOperand(0);
3121     Value *Addr = I.getArgOperand(1);
3122     const Align Alignment(
3123         cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
3124     Value *Mask = I.getArgOperand(3);
3125     Value *Shadow = getShadow(V);
3126 
3127     Value *ShadowPtr;
3128     Value *OriginPtr;
3129     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3130         Addr, IRB, Shadow->getType(), Alignment, /*isStore*/ true);
3131 
3132     if (ClCheckAccessAddress) {
3133       insertShadowCheck(Addr, &I);
3134       // Uninitialized mask is kind of like uninitialized address, but not as
3135       // scary.
3136       insertShadowCheck(Mask, &I);
3137     }
3138 
3139     IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment, Mask);
3140 
3141     if (MS.TrackOrigins) {
3142       auto &DL = F.getParent()->getDataLayout();
3143       paintOrigin(IRB, getOrigin(V), OriginPtr,
3144                   DL.getTypeStoreSize(Shadow->getType()),
3145                   std::max(Alignment, kMinOriginAlignment));
3146     }
3147   }
3148 
3149   bool handleMaskedLoad(IntrinsicInst &I) {
3150     IRBuilder<> IRB(&I);
3151     Value *Addr = I.getArgOperand(0);
3152     const Align Alignment(
3153         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
3154     Value *Mask = I.getArgOperand(2);
3155     Value *PassThru = I.getArgOperand(3);
3156 
3157     Type *ShadowTy = getShadowTy(&I);
3158     Value *ShadowPtr, *OriginPtr;
3159     if (PropagateShadow) {
3160       std::tie(ShadowPtr, OriginPtr) =
3161           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
3162       setShadow(&I, IRB.CreateMaskedLoad(ShadowTy, ShadowPtr, Alignment, Mask,
3163                                          getShadow(PassThru), "_msmaskedld"));
3164     } else {
3165       setShadow(&I, getCleanShadow(&I));
3166     }
3167 
3168     if (ClCheckAccessAddress) {
3169       insertShadowCheck(Addr, &I);
3170       insertShadowCheck(Mask, &I);
3171     }
3172 
3173     if (MS.TrackOrigins) {
3174       if (PropagateShadow) {
3175         // Choose between PassThru's and the loaded value's origins.
3176         Value *MaskedPassThruShadow = IRB.CreateAnd(
3177             getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy));
3178 
3179         Value *Acc = IRB.CreateExtractElement(
3180             MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
3181         for (int i = 1, N = cast<FixedVectorType>(PassThru->getType())
3182                                 ->getNumElements();
3183              i < N; ++i) {
3184           Value *More = IRB.CreateExtractElement(
3185               MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), i));
3186           Acc = IRB.CreateOr(Acc, More);
3187         }
3188 
3189         Value *Origin = IRB.CreateSelect(
3190             IRB.CreateICmpNE(Acc, Constant::getNullValue(Acc->getType())),
3191             getOrigin(PassThru), IRB.CreateLoad(MS.OriginTy, OriginPtr));
3192 
3193         setOrigin(&I, Origin);
3194       } else {
3195         setOrigin(&I, getCleanOrigin());
3196       }
3197     }
3198     return true;
3199   }
3200 
3201   // Instrument BMI / BMI2 intrinsics.
3202   // All of these intrinsics are Z = I(X, Y)
3203   // where the types of all operands and the result match, and are either i32 or i64.
3204   // The following instrumentation happens to work for all of them:
3205   //   Sz = I(Sx, Y) | (sext (Sy != 0))
3206   void handleBmiIntrinsic(IntrinsicInst &I) {
3207     IRBuilder<> IRB(&I);
3208     Type *ShadowTy = getShadowTy(&I);
3209 
3210     // If any bit of the mask operand is poisoned, then the whole thing is.
3211     Value *SMask = getShadow(&I, 1);
3212     SMask = IRB.CreateSExt(IRB.CreateICmpNE(SMask, getCleanShadow(ShadowTy)),
3213                            ShadowTy);
3214     // Apply the same intrinsic to the shadow of the first operand.
3215     Value *S = IRB.CreateCall(I.getCalledFunction(),
3216                               {getShadow(&I, 0), I.getOperand(1)});
3217     S = IRB.CreateOr(SMask, S);
3218     setShadow(&I, S);
3219     setOriginForNaryOp(I);
3220   }
3221 
3222   SmallVector<int, 8> getPclmulMask(unsigned Width, bool OddElements) {
3223     SmallVector<int, 8> Mask;
3224     for (unsigned X = OddElements ? 1 : 0; X < Width; X += 2) {
3225       Mask.append(2, X);
3226     }
3227     return Mask;
3228   }
3229 
3230   // Instrument pclmul intrinsics.
3231   // These intrinsics operate either on odd or on even elements of the input
3232   // vectors, depending on the constant in the 3rd argument, ignoring the rest.
3233   // Replace the unused elements with copies of the used ones, ex:
3234   //   (0, 1, 2, 3) -> (0, 0, 2, 2) (even case)
3235   // or
3236   //   (0, 1, 2, 3) -> (1, 1, 3, 3) (odd case)
3237   // and then apply the usual shadow combining logic.
3238   void handlePclmulIntrinsic(IntrinsicInst &I) {
3239     IRBuilder<> IRB(&I);
3240     unsigned Width =
3241         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
3242     assert(isa<ConstantInt>(I.getArgOperand(2)) &&
3243            "pclmul 3rd operand must be a constant");
3244     unsigned Imm = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3245     Value *Shuf0 = IRB.CreateShuffleVector(getShadow(&I, 0),
3246                                            getPclmulMask(Width, Imm & 0x01));
3247     Value *Shuf1 = IRB.CreateShuffleVector(getShadow(&I, 1),
3248                                            getPclmulMask(Width, Imm & 0x10));
3249     ShadowAndOriginCombiner SOC(this, IRB);
3250     SOC.Add(Shuf0, getOrigin(&I, 0));
3251     SOC.Add(Shuf1, getOrigin(&I, 1));
3252     SOC.Done(&I);
3253   }
3254 
3255   // Instrument _mm_*_sd intrinsics
3256   void handleUnarySdIntrinsic(IntrinsicInst &I) {
3257     IRBuilder<> IRB(&I);
3258     Value *First = getShadow(&I, 0);
3259     Value *Second = getShadow(&I, 1);
3260     // High word of first operand, low word of second
3261     Value *Shadow =
3262         IRB.CreateShuffleVector(First, Second, llvm::makeArrayRef<int>({2, 1}));
3263 
3264     setShadow(&I, Shadow);
3265     setOriginForNaryOp(I);
3266   }
3267 
3268   void handleBinarySdIntrinsic(IntrinsicInst &I) {
3269     IRBuilder<> IRB(&I);
3270     Value *First = getShadow(&I, 0);
3271     Value *Second = getShadow(&I, 1);
3272     Value *OrShadow = IRB.CreateOr(First, Second);
3273     // High word of first operand, low word of both OR'd together
3274     Value *Shadow = IRB.CreateShuffleVector(First, OrShadow,
3275                                             llvm::makeArrayRef<int>({2, 1}));
3276 
3277     setShadow(&I, Shadow);
3278     setOriginForNaryOp(I);
3279   }
3280 
3281   // Instrument abs intrinsic.
3282   // handleUnknownIntrinsic can't handle it because of the last
3283   // is_int_min_poison argument which does not match the result type.
3284   void handleAbsIntrinsic(IntrinsicInst &I) {
3285     assert(I.getType()->isIntOrIntVectorTy());
3286     assert(I.getArgOperand(0)->getType() == I.getType());
3287 
3288     // FIXME: Handle is_int_min_poison.
3289     IRBuilder<> IRB(&I);
3290     setShadow(&I, getShadow(&I, 0));
3291     setOrigin(&I, getOrigin(&I, 0));
3292   }
3293 
3294   void visitIntrinsicInst(IntrinsicInst &I) {
3295     switch (I.getIntrinsicID()) {
3296     case Intrinsic::abs:
3297       handleAbsIntrinsic(I);
3298       break;
3299     case Intrinsic::lifetime_start:
3300       handleLifetimeStart(I);
3301       break;
3302     case Intrinsic::launder_invariant_group:
3303     case Intrinsic::strip_invariant_group:
3304       handleInvariantGroup(I);
3305       break;
3306     case Intrinsic::bswap:
3307       handleBswap(I);
3308       break;
3309     case Intrinsic::masked_store:
3310       handleMaskedStore(I);
3311       break;
3312     case Intrinsic::masked_load:
3313       handleMaskedLoad(I);
3314       break;
3315     case Intrinsic::vector_reduce_and:
3316       handleVectorReduceAndIntrinsic(I);
3317       break;
3318     case Intrinsic::vector_reduce_or:
3319       handleVectorReduceOrIntrinsic(I);
3320       break;
3321     case Intrinsic::vector_reduce_add:
3322     case Intrinsic::vector_reduce_xor:
3323     case Intrinsic::vector_reduce_mul:
3324       handleVectorReduceIntrinsic(I);
3325       break;
3326     case Intrinsic::x86_sse_stmxcsr:
3327       handleStmxcsr(I);
3328       break;
3329     case Intrinsic::x86_sse_ldmxcsr:
3330       handleLdmxcsr(I);
3331       break;
3332     case Intrinsic::x86_avx512_vcvtsd2usi64:
3333     case Intrinsic::x86_avx512_vcvtsd2usi32:
3334     case Intrinsic::x86_avx512_vcvtss2usi64:
3335     case Intrinsic::x86_avx512_vcvtss2usi32:
3336     case Intrinsic::x86_avx512_cvttss2usi64:
3337     case Intrinsic::x86_avx512_cvttss2usi:
3338     case Intrinsic::x86_avx512_cvttsd2usi64:
3339     case Intrinsic::x86_avx512_cvttsd2usi:
3340     case Intrinsic::x86_avx512_cvtusi2ss:
3341     case Intrinsic::x86_avx512_cvtusi642sd:
3342     case Intrinsic::x86_avx512_cvtusi642ss:
3343       handleVectorConvertIntrinsic(I, 1, true);
3344       break;
3345     case Intrinsic::x86_sse2_cvtsd2si64:
3346     case Intrinsic::x86_sse2_cvtsd2si:
3347     case Intrinsic::x86_sse2_cvtsd2ss:
3348     case Intrinsic::x86_sse2_cvttsd2si64:
3349     case Intrinsic::x86_sse2_cvttsd2si:
3350     case Intrinsic::x86_sse_cvtss2si64:
3351     case Intrinsic::x86_sse_cvtss2si:
3352     case Intrinsic::x86_sse_cvttss2si64:
3353     case Intrinsic::x86_sse_cvttss2si:
3354       handleVectorConvertIntrinsic(I, 1);
3355       break;
3356     case Intrinsic::x86_sse_cvtps2pi:
3357     case Intrinsic::x86_sse_cvttps2pi:
3358       handleVectorConvertIntrinsic(I, 2);
3359       break;
3360 
3361     case Intrinsic::x86_avx512_psll_w_512:
3362     case Intrinsic::x86_avx512_psll_d_512:
3363     case Intrinsic::x86_avx512_psll_q_512:
3364     case Intrinsic::x86_avx512_pslli_w_512:
3365     case Intrinsic::x86_avx512_pslli_d_512:
3366     case Intrinsic::x86_avx512_pslli_q_512:
3367     case Intrinsic::x86_avx512_psrl_w_512:
3368     case Intrinsic::x86_avx512_psrl_d_512:
3369     case Intrinsic::x86_avx512_psrl_q_512:
3370     case Intrinsic::x86_avx512_psra_w_512:
3371     case Intrinsic::x86_avx512_psra_d_512:
3372     case Intrinsic::x86_avx512_psra_q_512:
3373     case Intrinsic::x86_avx512_psrli_w_512:
3374     case Intrinsic::x86_avx512_psrli_d_512:
3375     case Intrinsic::x86_avx512_psrli_q_512:
3376     case Intrinsic::x86_avx512_psrai_w_512:
3377     case Intrinsic::x86_avx512_psrai_d_512:
3378     case Intrinsic::x86_avx512_psrai_q_512:
3379     case Intrinsic::x86_avx512_psra_q_256:
3380     case Intrinsic::x86_avx512_psra_q_128:
3381     case Intrinsic::x86_avx512_psrai_q_256:
3382     case Intrinsic::x86_avx512_psrai_q_128:
3383     case Intrinsic::x86_avx2_psll_w:
3384     case Intrinsic::x86_avx2_psll_d:
3385     case Intrinsic::x86_avx2_psll_q:
3386     case Intrinsic::x86_avx2_pslli_w:
3387     case Intrinsic::x86_avx2_pslli_d:
3388     case Intrinsic::x86_avx2_pslli_q:
3389     case Intrinsic::x86_avx2_psrl_w:
3390     case Intrinsic::x86_avx2_psrl_d:
3391     case Intrinsic::x86_avx2_psrl_q:
3392     case Intrinsic::x86_avx2_psra_w:
3393     case Intrinsic::x86_avx2_psra_d:
3394     case Intrinsic::x86_avx2_psrli_w:
3395     case Intrinsic::x86_avx2_psrli_d:
3396     case Intrinsic::x86_avx2_psrli_q:
3397     case Intrinsic::x86_avx2_psrai_w:
3398     case Intrinsic::x86_avx2_psrai_d:
3399     case Intrinsic::x86_sse2_psll_w:
3400     case Intrinsic::x86_sse2_psll_d:
3401     case Intrinsic::x86_sse2_psll_q:
3402     case Intrinsic::x86_sse2_pslli_w:
3403     case Intrinsic::x86_sse2_pslli_d:
3404     case Intrinsic::x86_sse2_pslli_q:
3405     case Intrinsic::x86_sse2_psrl_w:
3406     case Intrinsic::x86_sse2_psrl_d:
3407     case Intrinsic::x86_sse2_psrl_q:
3408     case Intrinsic::x86_sse2_psra_w:
3409     case Intrinsic::x86_sse2_psra_d:
3410     case Intrinsic::x86_sse2_psrli_w:
3411     case Intrinsic::x86_sse2_psrli_d:
3412     case Intrinsic::x86_sse2_psrli_q:
3413     case Intrinsic::x86_sse2_psrai_w:
3414     case Intrinsic::x86_sse2_psrai_d:
3415     case Intrinsic::x86_mmx_psll_w:
3416     case Intrinsic::x86_mmx_psll_d:
3417     case Intrinsic::x86_mmx_psll_q:
3418     case Intrinsic::x86_mmx_pslli_w:
3419     case Intrinsic::x86_mmx_pslli_d:
3420     case Intrinsic::x86_mmx_pslli_q:
3421     case Intrinsic::x86_mmx_psrl_w:
3422     case Intrinsic::x86_mmx_psrl_d:
3423     case Intrinsic::x86_mmx_psrl_q:
3424     case Intrinsic::x86_mmx_psra_w:
3425     case Intrinsic::x86_mmx_psra_d:
3426     case Intrinsic::x86_mmx_psrli_w:
3427     case Intrinsic::x86_mmx_psrli_d:
3428     case Intrinsic::x86_mmx_psrli_q:
3429     case Intrinsic::x86_mmx_psrai_w:
3430     case Intrinsic::x86_mmx_psrai_d:
3431       handleVectorShiftIntrinsic(I, /* Variable */ false);
3432       break;
3433     case Intrinsic::x86_avx2_psllv_d:
3434     case Intrinsic::x86_avx2_psllv_d_256:
3435     case Intrinsic::x86_avx512_psllv_d_512:
3436     case Intrinsic::x86_avx2_psllv_q:
3437     case Intrinsic::x86_avx2_psllv_q_256:
3438     case Intrinsic::x86_avx512_psllv_q_512:
3439     case Intrinsic::x86_avx2_psrlv_d:
3440     case Intrinsic::x86_avx2_psrlv_d_256:
3441     case Intrinsic::x86_avx512_psrlv_d_512:
3442     case Intrinsic::x86_avx2_psrlv_q:
3443     case Intrinsic::x86_avx2_psrlv_q_256:
3444     case Intrinsic::x86_avx512_psrlv_q_512:
3445     case Intrinsic::x86_avx2_psrav_d:
3446     case Intrinsic::x86_avx2_psrav_d_256:
3447     case Intrinsic::x86_avx512_psrav_d_512:
3448     case Intrinsic::x86_avx512_psrav_q_128:
3449     case Intrinsic::x86_avx512_psrav_q_256:
3450     case Intrinsic::x86_avx512_psrav_q_512:
3451       handleVectorShiftIntrinsic(I, /* Variable */ true);
3452       break;
3453 
3454     case Intrinsic::x86_sse2_packsswb_128:
3455     case Intrinsic::x86_sse2_packssdw_128:
3456     case Intrinsic::x86_sse2_packuswb_128:
3457     case Intrinsic::x86_sse41_packusdw:
3458     case Intrinsic::x86_avx2_packsswb:
3459     case Intrinsic::x86_avx2_packssdw:
3460     case Intrinsic::x86_avx2_packuswb:
3461     case Intrinsic::x86_avx2_packusdw:
3462       handleVectorPackIntrinsic(I);
3463       break;
3464 
3465     case Intrinsic::x86_mmx_packsswb:
3466     case Intrinsic::x86_mmx_packuswb:
3467       handleVectorPackIntrinsic(I, 16);
3468       break;
3469 
3470     case Intrinsic::x86_mmx_packssdw:
3471       handleVectorPackIntrinsic(I, 32);
3472       break;
3473 
3474     case Intrinsic::x86_mmx_psad_bw:
3475     case Intrinsic::x86_sse2_psad_bw:
3476     case Intrinsic::x86_avx2_psad_bw:
3477       handleVectorSadIntrinsic(I);
3478       break;
3479 
3480     case Intrinsic::x86_sse2_pmadd_wd:
3481     case Intrinsic::x86_avx2_pmadd_wd:
3482     case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
3483     case Intrinsic::x86_avx2_pmadd_ub_sw:
3484       handleVectorPmaddIntrinsic(I);
3485       break;
3486 
3487     case Intrinsic::x86_ssse3_pmadd_ub_sw:
3488       handleVectorPmaddIntrinsic(I, 8);
3489       break;
3490 
3491     case Intrinsic::x86_mmx_pmadd_wd:
3492       handleVectorPmaddIntrinsic(I, 16);
3493       break;
3494 
3495     case Intrinsic::x86_sse_cmp_ss:
3496     case Intrinsic::x86_sse2_cmp_sd:
3497     case Intrinsic::x86_sse_comieq_ss:
3498     case Intrinsic::x86_sse_comilt_ss:
3499     case Intrinsic::x86_sse_comile_ss:
3500     case Intrinsic::x86_sse_comigt_ss:
3501     case Intrinsic::x86_sse_comige_ss:
3502     case Intrinsic::x86_sse_comineq_ss:
3503     case Intrinsic::x86_sse_ucomieq_ss:
3504     case Intrinsic::x86_sse_ucomilt_ss:
3505     case Intrinsic::x86_sse_ucomile_ss:
3506     case Intrinsic::x86_sse_ucomigt_ss:
3507     case Intrinsic::x86_sse_ucomige_ss:
3508     case Intrinsic::x86_sse_ucomineq_ss:
3509     case Intrinsic::x86_sse2_comieq_sd:
3510     case Intrinsic::x86_sse2_comilt_sd:
3511     case Intrinsic::x86_sse2_comile_sd:
3512     case Intrinsic::x86_sse2_comigt_sd:
3513     case Intrinsic::x86_sse2_comige_sd:
3514     case Intrinsic::x86_sse2_comineq_sd:
3515     case Intrinsic::x86_sse2_ucomieq_sd:
3516     case Intrinsic::x86_sse2_ucomilt_sd:
3517     case Intrinsic::x86_sse2_ucomile_sd:
3518     case Intrinsic::x86_sse2_ucomigt_sd:
3519     case Intrinsic::x86_sse2_ucomige_sd:
3520     case Intrinsic::x86_sse2_ucomineq_sd:
3521       handleVectorCompareScalarIntrinsic(I);
3522       break;
3523 
3524     case Intrinsic::x86_sse_cmp_ps:
3525     case Intrinsic::x86_sse2_cmp_pd:
3526       // FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
3527       // generates reasonably looking IR that fails in the backend with "Do not
3528       // know how to split the result of this operator!".
3529       handleVectorComparePackedIntrinsic(I);
3530       break;
3531 
3532     case Intrinsic::x86_bmi_bextr_32:
3533     case Intrinsic::x86_bmi_bextr_64:
3534     case Intrinsic::x86_bmi_bzhi_32:
3535     case Intrinsic::x86_bmi_bzhi_64:
3536     case Intrinsic::x86_bmi_pdep_32:
3537     case Intrinsic::x86_bmi_pdep_64:
3538     case Intrinsic::x86_bmi_pext_32:
3539     case Intrinsic::x86_bmi_pext_64:
3540       handleBmiIntrinsic(I);
3541       break;
3542 
3543     case Intrinsic::x86_pclmulqdq:
3544     case Intrinsic::x86_pclmulqdq_256:
3545     case Intrinsic::x86_pclmulqdq_512:
3546       handlePclmulIntrinsic(I);
3547       break;
3548 
3549     case Intrinsic::x86_sse41_round_sd:
3550       handleUnarySdIntrinsic(I);
3551       break;
3552     case Intrinsic::x86_sse2_max_sd:
3553     case Intrinsic::x86_sse2_min_sd:
3554       handleBinarySdIntrinsic(I);
3555       break;
3556 
3557     case Intrinsic::fshl:
3558     case Intrinsic::fshr:
3559       handleFunnelShift(I);
3560       break;
3561 
3562     case Intrinsic::is_constant:
3563       // The result of llvm.is.constant() is always defined.
3564       setShadow(&I, getCleanShadow(&I));
3565       setOrigin(&I, getCleanOrigin());
3566       break;
3567 
3568     default:
3569       if (!handleUnknownIntrinsic(I))
3570         visitInstruction(I);
3571       break;
3572     }
3573   }
3574 
3575   void visitLibAtomicLoad(CallBase &CB) {
3576     // Since we use getNextNode here, we can't have CB terminate the BB.
3577     assert(isa<CallInst>(CB));
3578 
3579     IRBuilder<> IRB(&CB);
3580     Value *Size = CB.getArgOperand(0);
3581     Value *SrcPtr = CB.getArgOperand(1);
3582     Value *DstPtr = CB.getArgOperand(2);
3583     Value *Ordering = CB.getArgOperand(3);
3584     // Convert the call to have at least Acquire ordering to make sure
3585     // the shadow operations aren't reordered before it.
3586     Value *NewOrdering =
3587         IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering);
3588     CB.setArgOperand(3, NewOrdering);
3589 
3590     IRBuilder<> NextIRB(CB.getNextNode());
3591     NextIRB.SetCurrentDebugLocation(CB.getDebugLoc());
3592 
3593     Value *SrcShadowPtr, *SrcOriginPtr;
3594     std::tie(SrcShadowPtr, SrcOriginPtr) =
3595         getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
3596                            /*isStore*/ false);
3597     Value *DstShadowPtr =
3598         getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
3599                            /*isStore*/ true)
3600             .first;
3601 
3602     NextIRB.CreateMemCpy(DstShadowPtr, Align(1), SrcShadowPtr, Align(1), Size);
3603     if (MS.TrackOrigins) {
3604       Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
3605                                                    kMinOriginAlignment);
3606       Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
3607       NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
3608     }
3609   }
3610 
3611   void visitLibAtomicStore(CallBase &CB) {
3612     IRBuilder<> IRB(&CB);
3613     Value *Size = CB.getArgOperand(0);
3614     Value *DstPtr = CB.getArgOperand(2);
3615     Value *Ordering = CB.getArgOperand(3);
3616     // Convert the call to have at least Release ordering to make sure
3617     // the shadow operations aren't reordered after it.
3618     Value *NewOrdering =
3619         IRB.CreateExtractElement(makeAddReleaseOrderingTable(IRB), Ordering);
3620     CB.setArgOperand(3, NewOrdering);
3621 
3622     Value *DstShadowPtr =
3623         getShadowOriginPtr(DstPtr, IRB, IRB.getInt8Ty(), Align(1),
3624                            /*isStore*/ true)
3625             .first;
3626 
3627     // Atomic store always paints clean shadow/origin. See file header.
3628     IRB.CreateMemSet(DstShadowPtr, getCleanShadow(IRB.getInt8Ty()), Size,
3629                      Align(1));
3630   }
3631 
3632   void visitCallBase(CallBase &CB) {
3633     assert(!CB.getMetadata("nosanitize"));
3634     if (CB.isInlineAsm()) {
3635       // For inline asm (either a call to asm function, or callbr instruction),
3636       // do the usual thing: check argument shadow and mark all outputs as
3637       // clean. Note that any side effects of the inline asm that are not
3638       // immediately visible in its constraints are not handled.
3639       if (ClHandleAsmConservative && MS.CompileKernel)
3640         visitAsmInstruction(CB);
3641       else
3642         visitInstruction(CB);
3643       return;
3644     }
3645     LibFunc LF;
3646     if (TLI->getLibFunc(CB, LF)) {
3647       // libatomic.a functions need to have special handling because there isn't
3648       // a good way to intercept them or compile the library with
3649       // instrumentation.
3650       switch (LF) {
3651       case LibFunc_atomic_load:
3652         if (!isa<CallInst>(CB)) {
3653           llvm::errs() << "MSAN -- cannot instrument invoke of libatomic load."
3654                           "Ignoring!\n";
3655           break;
3656         }
3657         visitLibAtomicLoad(CB);
3658         return;
3659       case LibFunc_atomic_store:
3660         visitLibAtomicStore(CB);
3661         return;
3662       default:
3663         break;
3664       }
3665     }
3666 
3667     if (auto *Call = dyn_cast<CallInst>(&CB)) {
3668       assert(!isa<IntrinsicInst>(Call) && "intrinsics are handled elsewhere");
3669 
3670       // We are going to insert code that relies on the fact that the callee
3671       // will become a non-readonly function after it is instrumented by us. To
3672       // prevent this code from being optimized out, mark that function
3673       // non-readonly in advance.
3674       AttributeMask B;
3675       B.addAttribute(Attribute::ReadOnly)
3676           .addAttribute(Attribute::ReadNone)
3677           .addAttribute(Attribute::WriteOnly)
3678           .addAttribute(Attribute::ArgMemOnly)
3679           .addAttribute(Attribute::Speculatable);
3680 
3681       Call->removeFnAttrs(B);
3682       if (Function *Func = Call->getCalledFunction()) {
3683         Func->removeFnAttrs(B);
3684       }
3685 
3686       maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
3687     }
3688     IRBuilder<> IRB(&CB);
3689     bool MayCheckCall = MS.EagerChecks;
3690     if (Function *Func = CB.getCalledFunction()) {
3691       // __sanitizer_unaligned_{load,store} functions may be called by users
3692       // and always expects shadows in the TLS. So don't check them.
3693       MayCheckCall &= !Func->getName().startswith("__sanitizer_unaligned_");
3694     }
3695 
3696     unsigned ArgOffset = 0;
3697     LLVM_DEBUG(dbgs() << "  CallSite: " << CB << "\n");
3698     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
3699          ++ArgIt) {
3700       Value *A = *ArgIt;
3701       unsigned i = ArgIt - CB.arg_begin();
3702       if (!A->getType()->isSized()) {
3703         LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << CB << "\n");
3704         continue;
3705       }
3706       unsigned Size = 0;
3707       const DataLayout &DL = F.getParent()->getDataLayout();
3708 
3709       bool ByVal = CB.paramHasAttr(i, Attribute::ByVal);
3710       bool NoUndef = CB.paramHasAttr(i, Attribute::NoUndef);
3711       bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
3712 
3713       if (EagerCheck) {
3714         insertShadowCheck(A, &CB);
3715         Size = DL.getTypeAllocSize(A->getType());
3716       } else {
3717         Value *Store = nullptr;
3718         // Compute the Shadow for arg even if it is ByVal, because
3719         // in that case getShadow() will copy the actual arg shadow to
3720         // __msan_param_tls.
3721         Value *ArgShadow = getShadow(A);
3722         Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
3723         LLVM_DEBUG(dbgs() << "  Arg#" << i << ": " << *A
3724                           << " Shadow: " << *ArgShadow << "\n");
3725         if (ByVal) {
3726           // ByVal requires some special handling as it's too big for a single
3727           // load
3728           assert(A->getType()->isPointerTy() &&
3729                  "ByVal argument is not a pointer!");
3730           Size = DL.getTypeAllocSize(CB.getParamByValType(i));
3731           if (ArgOffset + Size > kParamTLSSize)
3732             break;
3733           const MaybeAlign ParamAlignment(CB.getParamAlign(i));
3734           MaybeAlign Alignment = llvm::None;
3735           if (ParamAlignment)
3736             Alignment = std::min(*ParamAlignment, kShadowTLSAlignment);
3737           Value *AShadowPtr, *AOriginPtr;
3738           std::tie(AShadowPtr, AOriginPtr) =
3739               getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment,
3740                                  /*isStore*/ false);
3741           if (!PropagateShadow) {
3742             Store = IRB.CreateMemSet(ArgShadowBase,
3743                                      Constant::getNullValue(IRB.getInt8Ty()),
3744                                      Size, Alignment);
3745           } else {
3746             Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
3747                                      Alignment, Size);
3748             if (MS.TrackOrigins) {
3749               Value *ArgOriginBase = getOriginPtrForArgument(A, IRB, ArgOffset);
3750               // FIXME: OriginSize should be:
3751               // alignTo(A % kMinOriginAlignment + Size, kMinOriginAlignment)
3752               unsigned OriginSize = alignTo(Size, kMinOriginAlignment);
3753               IRB.CreateMemCpy(
3754                   ArgOriginBase,
3755                   /* by origin_tls[ArgOffset] */ kMinOriginAlignment,
3756                   AOriginPtr,
3757                   /* by getShadowOriginPtr */ kMinOriginAlignment, OriginSize);
3758             }
3759           }
3760         } else {
3761           // Any other parameters mean we need bit-grained tracking of uninit
3762           // data
3763           Size = DL.getTypeAllocSize(A->getType());
3764           if (ArgOffset + Size > kParamTLSSize)
3765             break;
3766           Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
3767                                          kShadowTLSAlignment);
3768           Constant *Cst = dyn_cast<Constant>(ArgShadow);
3769           if (MS.TrackOrigins && !(Cst && Cst->isNullValue())) {
3770             IRB.CreateStore(getOrigin(A),
3771                             getOriginPtrForArgument(A, IRB, ArgOffset));
3772           }
3773         }
3774         (void)Store;
3775         assert(Store != nullptr);
3776         LLVM_DEBUG(dbgs() << "  Param:" << *Store << "\n");
3777       }
3778       assert(Size != 0);
3779       ArgOffset += alignTo(Size, kShadowTLSAlignment);
3780     }
3781     LLVM_DEBUG(dbgs() << "  done with call args\n");
3782 
3783     FunctionType *FT = CB.getFunctionType();
3784     if (FT->isVarArg()) {
3785       VAHelper->visitCallBase(CB, IRB);
3786     }
3787 
3788     // Now, get the shadow for the RetVal.
3789     if (!CB.getType()->isSized())
3790       return;
3791     // Don't emit the epilogue for musttail call returns.
3792     if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
3793       return;
3794 
3795     if (MayCheckCall && CB.hasRetAttr(Attribute::NoUndef)) {
3796       setShadow(&CB, getCleanShadow(&CB));
3797       setOrigin(&CB, getCleanOrigin());
3798       return;
3799     }
3800 
3801     IRBuilder<> IRBBefore(&CB);
3802     // Until we have full dynamic coverage, make sure the retval shadow is 0.
3803     Value *Base = getShadowPtrForRetval(&CB, IRBBefore);
3804     IRBBefore.CreateAlignedStore(getCleanShadow(&CB), Base,
3805                                  kShadowTLSAlignment);
3806     BasicBlock::iterator NextInsn;
3807     if (isa<CallInst>(CB)) {
3808       NextInsn = ++CB.getIterator();
3809       assert(NextInsn != CB.getParent()->end());
3810     } else {
3811       BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
3812       if (!NormalDest->getSinglePredecessor()) {
3813         // FIXME: this case is tricky, so we are just conservative here.
3814         // Perhaps we need to split the edge between this BB and NormalDest,
3815         // but a naive attempt to use SplitEdge leads to a crash.
3816         setShadow(&CB, getCleanShadow(&CB));
3817         setOrigin(&CB, getCleanOrigin());
3818         return;
3819       }
3820       // FIXME: NextInsn is likely in a basic block that has not been visited yet.
3821       // Anything inserted there will be instrumented by MSan later!
3822       NextInsn = NormalDest->getFirstInsertionPt();
3823       assert(NextInsn != NormalDest->end() &&
3824              "Could not find insertion point for retval shadow load");
3825     }
3826     IRBuilder<> IRBAfter(&*NextInsn);
3827     Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
3828         getShadowTy(&CB), getShadowPtrForRetval(&CB, IRBAfter),
3829         kShadowTLSAlignment, "_msret");
3830     setShadow(&CB, RetvalShadow);
3831     if (MS.TrackOrigins)
3832       setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy,
3833                                          getOriginPtrForRetval(IRBAfter)));
3834   }
3835 
3836   bool isAMustTailRetVal(Value *RetVal) {
3837     if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
3838       RetVal = I->getOperand(0);
3839     }
3840     if (auto *I = dyn_cast<CallInst>(RetVal)) {
3841       return I->isMustTailCall();
3842     }
3843     return false;
3844   }
3845 
3846   void visitReturnInst(ReturnInst &I) {
3847     IRBuilder<> IRB(&I);
3848     Value *RetVal = I.getReturnValue();
3849     if (!RetVal) return;
3850     // Don't emit the epilogue for musttail call returns.
3851     if (isAMustTailRetVal(RetVal)) return;
3852     Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
3853     bool HasNoUndef =
3854         F.hasRetAttribute(Attribute::NoUndef);
3855     bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
3856     // FIXME: Consider using SpecialCaseList to specify a list of functions that
3857     // must always return fully initialized values. For now, we hardcode "main".
3858     bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (F.getName() == "main");
3859 
3860     Value *Shadow = getShadow(RetVal);
3861     bool StoreOrigin = true;
3862     if (EagerCheck) {
3863       insertShadowCheck(RetVal, &I);
3864       Shadow = getCleanShadow(RetVal);
3865       StoreOrigin = false;
3866     }
3867 
3868     // The caller may still expect information passed over TLS if we pass our
3869     // check
3870     if (StoreShadow) {
3871       IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
3872       if (MS.TrackOrigins && StoreOrigin)
3873         IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
3874     }
3875   }
3876 
3877   void visitPHINode(PHINode &I) {
3878     IRBuilder<> IRB(&I);
3879     if (!PropagateShadow) {
3880       setShadow(&I, getCleanShadow(&I));
3881       setOrigin(&I, getCleanOrigin());
3882       return;
3883     }
3884 
3885     ShadowPHINodes.push_back(&I);
3886     setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
3887                                 "_msphi_s"));
3888     if (MS.TrackOrigins)
3889       setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
3890                                   "_msphi_o"));
3891   }
3892 
3893   Value *getLocalVarDescription(AllocaInst &I) {
3894     SmallString<2048> StackDescriptionStorage;
3895     raw_svector_ostream StackDescription(StackDescriptionStorage);
3896     // We create a string with a description of the stack allocation and
3897     // pass it into __msan_set_alloca_origin.
3898     // It will be printed by the run-time if stack-originated UMR is found.
3899     // The first 4 bytes of the string are set to '----' and will be replaced
3900     // by __msan_va_arg_overflow_size_tls at the first call.
3901     StackDescription << "----" << I.getName() << "@" << F.getName();
3902     return createPrivateNonConstGlobalForString(*F.getParent(),
3903                                                 StackDescription.str());
3904   }
3905 
3906   void poisonAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
3907     if (PoisonStack && ClPoisonStackWithCall) {
3908       IRB.CreateCall(MS.MsanPoisonStackFn,
3909                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
3910     } else {
3911       Value *ShadowBase, *OriginBase;
3912       std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
3913           &I, IRB, IRB.getInt8Ty(), Align(1), /*isStore*/ true);
3914 
3915       Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
3916       IRB.CreateMemSet(ShadowBase, PoisonValue, Len, I.getAlign());
3917     }
3918 
3919     if (PoisonStack && MS.TrackOrigins) {
3920       Value *Descr = getLocalVarDescription(I);
3921       IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
3922                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
3923                       IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
3924                       IRB.CreatePointerCast(&F, MS.IntptrTy)});
3925     }
3926   }
3927 
3928   void poisonAllocaKmsan(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
3929     Value *Descr = getLocalVarDescription(I);
3930     if (PoisonStack) {
3931       IRB.CreateCall(MS.MsanPoisonAllocaFn,
3932                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
3933                       IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())});
3934     } else {
3935       IRB.CreateCall(MS.MsanUnpoisonAllocaFn,
3936                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
3937     }
3938   }
3939 
3940   void instrumentAlloca(AllocaInst &I, Instruction *InsPoint = nullptr) {
3941     if (!InsPoint)
3942       InsPoint = &I;
3943     IRBuilder<> IRB(InsPoint->getNextNode());
3944     const DataLayout &DL = F.getParent()->getDataLayout();
3945     uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType());
3946     Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize);
3947     if (I.isArrayAllocation())
3948       Len = IRB.CreateMul(Len, I.getArraySize());
3949 
3950     if (MS.CompileKernel)
3951       poisonAllocaKmsan(I, IRB, Len);
3952     else
3953       poisonAllocaUserspace(I, IRB, Len);
3954   }
3955 
3956   void visitAllocaInst(AllocaInst &I) {
3957     setShadow(&I, getCleanShadow(&I));
3958     setOrigin(&I, getCleanOrigin());
3959     // We'll get to this alloca later unless it's poisoned at the corresponding
3960     // llvm.lifetime.start.
3961     AllocaSet.insert(&I);
3962   }
3963 
3964   void visitSelectInst(SelectInst& I) {
3965     IRBuilder<> IRB(&I);
3966     // a = select b, c, d
3967     Value *B = I.getCondition();
3968     Value *C = I.getTrueValue();
3969     Value *D = I.getFalseValue();
3970     Value *Sb = getShadow(B);
3971     Value *Sc = getShadow(C);
3972     Value *Sd = getShadow(D);
3973 
3974     // Result shadow if condition shadow is 0.
3975     Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
3976     Value *Sa1;
3977     if (I.getType()->isAggregateType()) {
3978       // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
3979       // an extra "select". This results in much more compact IR.
3980       // Sa = select Sb, poisoned, (select b, Sc, Sd)
3981       Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
3982     } else {
3983       // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
3984       // If Sb (condition is poisoned), look for bits in c and d that are equal
3985       // and both unpoisoned.
3986       // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
3987 
3988       // Cast arguments to shadow-compatible type.
3989       C = CreateAppToShadowCast(IRB, C);
3990       D = CreateAppToShadowCast(IRB, D);
3991 
3992       // Result shadow if condition shadow is 1.
3993       Sa1 = IRB.CreateOr({IRB.CreateXor(C, D), Sc, Sd});
3994     }
3995     Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
3996     setShadow(&I, Sa);
3997     if (MS.TrackOrigins) {
3998       // Origins are always i32, so any vector conditions must be flattened.
3999       // FIXME: consider tracking vector origins for app vectors?
4000       if (B->getType()->isVectorTy()) {
4001         Type *FlatTy = getShadowTyNoVec(B->getType());
4002         B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
4003                                 ConstantInt::getNullValue(FlatTy));
4004         Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
4005                                       ConstantInt::getNullValue(FlatTy));
4006       }
4007       // a = select b, c, d
4008       // Oa = Sb ? Ob : (b ? Oc : Od)
4009       setOrigin(
4010           &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
4011                                IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
4012                                                 getOrigin(I.getFalseValue()))));
4013     }
4014   }
4015 
4016   void visitLandingPadInst(LandingPadInst &I) {
4017     // Do nothing.
4018     // See https://github.com/google/sanitizers/issues/504
4019     setShadow(&I, getCleanShadow(&I));
4020     setOrigin(&I, getCleanOrigin());
4021   }
4022 
4023   void visitCatchSwitchInst(CatchSwitchInst &I) {
4024     setShadow(&I, getCleanShadow(&I));
4025     setOrigin(&I, getCleanOrigin());
4026   }
4027 
4028   void visitFuncletPadInst(FuncletPadInst &I) {
4029     setShadow(&I, getCleanShadow(&I));
4030     setOrigin(&I, getCleanOrigin());
4031   }
4032 
4033   void visitGetElementPtrInst(GetElementPtrInst &I) {
4034     handleShadowOr(I);
4035   }
4036 
4037   void visitExtractValueInst(ExtractValueInst &I) {
4038     IRBuilder<> IRB(&I);
4039     Value *Agg = I.getAggregateOperand();
4040     LLVM_DEBUG(dbgs() << "ExtractValue:  " << I << "\n");
4041     Value *AggShadow = getShadow(Agg);
4042     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
4043     Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
4044     LLVM_DEBUG(dbgs() << "   ResShadow:  " << *ResShadow << "\n");
4045     setShadow(&I, ResShadow);
4046     setOriginForNaryOp(I);
4047   }
4048 
4049   void visitInsertValueInst(InsertValueInst &I) {
4050     IRBuilder<> IRB(&I);
4051     LLVM_DEBUG(dbgs() << "InsertValue:  " << I << "\n");
4052     Value *AggShadow = getShadow(I.getAggregateOperand());
4053     Value *InsShadow = getShadow(I.getInsertedValueOperand());
4054     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
4055     LLVM_DEBUG(dbgs() << "   InsShadow:  " << *InsShadow << "\n");
4056     Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
4057     LLVM_DEBUG(dbgs() << "   Res:        " << *Res << "\n");
4058     setShadow(&I, Res);
4059     setOriginForNaryOp(I);
4060   }
4061 
4062   void dumpInst(Instruction &I) {
4063     if (CallInst *CI = dyn_cast<CallInst>(&I)) {
4064       errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
4065     } else {
4066       errs() << "ZZZ " << I.getOpcodeName() << "\n";
4067     }
4068     errs() << "QQQ " << I << "\n";
4069   }
4070 
4071   void visitResumeInst(ResumeInst &I) {
4072     LLVM_DEBUG(dbgs() << "Resume: " << I << "\n");
4073     // Nothing to do here.
4074   }
4075 
4076   void visitCleanupReturnInst(CleanupReturnInst &CRI) {
4077     LLVM_DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
4078     // Nothing to do here.
4079   }
4080 
4081   void visitCatchReturnInst(CatchReturnInst &CRI) {
4082     LLVM_DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
4083     // Nothing to do here.
4084   }
4085 
4086   void instrumentAsmArgument(Value *Operand, Instruction &I, IRBuilder<> &IRB,
4087                              const DataLayout &DL, bool isOutput) {
4088     // For each assembly argument, we check its value for being initialized.
4089     // If the argument is a pointer, we assume it points to a single element
4090     // of the corresponding type (or to a 8-byte word, if the type is unsized).
4091     // Each such pointer is instrumented with a call to the runtime library.
4092     Type *OpType = Operand->getType();
4093     // Check the operand value itself.
4094     insertShadowCheck(Operand, &I);
4095     if (!OpType->isPointerTy() || !isOutput) {
4096       assert(!isOutput);
4097       return;
4098     }
4099     Type *ElType = OpType->getPointerElementType();
4100     if (!ElType->isSized())
4101       return;
4102     int Size = DL.getTypeStoreSize(ElType);
4103     Value *Ptr = IRB.CreatePointerCast(Operand, IRB.getInt8PtrTy());
4104     Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
4105     IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Ptr, SizeVal});
4106   }
4107 
4108   /// Get the number of output arguments returned by pointers.
4109   int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
4110     int NumRetOutputs = 0;
4111     int NumOutputs = 0;
4112     Type *RetTy = cast<Value>(CB)->getType();
4113     if (!RetTy->isVoidTy()) {
4114       // Register outputs are returned via the CallInst return value.
4115       auto *ST = dyn_cast<StructType>(RetTy);
4116       if (ST)
4117         NumRetOutputs = ST->getNumElements();
4118       else
4119         NumRetOutputs = 1;
4120     }
4121     InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
4122     for (const InlineAsm::ConstraintInfo &Info : Constraints) {
4123       switch (Info.Type) {
4124       case InlineAsm::isOutput:
4125         NumOutputs++;
4126         break;
4127       default:
4128         break;
4129       }
4130     }
4131     return NumOutputs - NumRetOutputs;
4132   }
4133 
4134   void visitAsmInstruction(Instruction &I) {
4135     // Conservative inline assembly handling: check for poisoned shadow of
4136     // asm() arguments, then unpoison the result and all the memory locations
4137     // pointed to by those arguments.
4138     // An inline asm() statement in C++ contains lists of input and output
4139     // arguments used by the assembly code. These are mapped to operands of the
4140     // CallInst as follows:
4141     //  - nR register outputs ("=r) are returned by value in a single structure
4142     //  (SSA value of the CallInst);
4143     //  - nO other outputs ("=m" and others) are returned by pointer as first
4144     // nO operands of the CallInst;
4145     //  - nI inputs ("r", "m" and others) are passed to CallInst as the
4146     // remaining nI operands.
4147     // The total number of asm() arguments in the source is nR+nO+nI, and the
4148     // corresponding CallInst has nO+nI+1 operands (the last operand is the
4149     // function to be called).
4150     const DataLayout &DL = F.getParent()->getDataLayout();
4151     CallBase *CB = cast<CallBase>(&I);
4152     IRBuilder<> IRB(&I);
4153     InlineAsm *IA = cast<InlineAsm>(CB->getCalledOperand());
4154     int OutputArgs = getNumOutputArgs(IA, CB);
4155     // The last operand of a CallInst is the function itself.
4156     int NumOperands = CB->getNumOperands() - 1;
4157 
4158     // Check input arguments. Doing so before unpoisoning output arguments, so
4159     // that we won't overwrite uninit values before checking them.
4160     for (int i = OutputArgs; i < NumOperands; i++) {
4161       Value *Operand = CB->getOperand(i);
4162       instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ false);
4163     }
4164     // Unpoison output arguments. This must happen before the actual InlineAsm
4165     // call, so that the shadow for memory published in the asm() statement
4166     // remains valid.
4167     for (int i = 0; i < OutputArgs; i++) {
4168       Value *Operand = CB->getOperand(i);
4169       instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ true);
4170     }
4171 
4172     setShadow(&I, getCleanShadow(&I));
4173     setOrigin(&I, getCleanOrigin());
4174   }
4175 
4176   void visitFreezeInst(FreezeInst &I) {
4177     // Freeze always returns a fully defined value.
4178     setShadow(&I, getCleanShadow(&I));
4179     setOrigin(&I, getCleanOrigin());
4180   }
4181 
4182   void visitInstruction(Instruction &I) {
4183     // Everything else: stop propagating and check for poisoned shadow.
4184     if (ClDumpStrictInstructions)
4185       dumpInst(I);
4186     LLVM_DEBUG(dbgs() << "DEFAULT: " << I << "\n");
4187     for (size_t i = 0, n = I.getNumOperands(); i < n; i++) {
4188       Value *Operand = I.getOperand(i);
4189       if (Operand->getType()->isSized())
4190         insertShadowCheck(Operand, &I);
4191     }
4192     setShadow(&I, getCleanShadow(&I));
4193     setOrigin(&I, getCleanOrigin());
4194   }
4195 };
4196 
4197 /// AMD64-specific implementation of VarArgHelper.
4198 struct VarArgAMD64Helper : public VarArgHelper {
4199   // An unfortunate workaround for asymmetric lowering of va_arg stuff.
4200   // See a comment in visitCallBase for more details.
4201   static const unsigned AMD64GpEndOffset = 48;  // AMD64 ABI Draft 0.99.6 p3.5.7
4202   static const unsigned AMD64FpEndOffsetSSE = 176;
4203   // If SSE is disabled, fp_offset in va_list is zero.
4204   static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
4205 
4206   unsigned AMD64FpEndOffset;
4207   Function &F;
4208   MemorySanitizer &MS;
4209   MemorySanitizerVisitor &MSV;
4210   Value *VAArgTLSCopy = nullptr;
4211   Value *VAArgTLSOriginCopy = nullptr;
4212   Value *VAArgOverflowSize = nullptr;
4213 
4214   SmallVector<CallInst*, 16> VAStartInstrumentationList;
4215 
4216   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
4217 
4218   VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
4219                     MemorySanitizerVisitor &MSV)
4220       : F(F), MS(MS), MSV(MSV) {
4221     AMD64FpEndOffset = AMD64FpEndOffsetSSE;
4222     for (const auto &Attr : F.getAttributes().getFnAttrs()) {
4223       if (Attr.isStringAttribute() &&
4224           (Attr.getKindAsString() == "target-features")) {
4225         if (Attr.getValueAsString().contains("-sse"))
4226           AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
4227         break;
4228       }
4229     }
4230   }
4231 
4232   ArgKind classifyArgument(Value* arg) {
4233     // A very rough approximation of X86_64 argument classification rules.
4234     Type *T = arg->getType();
4235     if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
4236       return AK_FloatingPoint;
4237     if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
4238       return AK_GeneralPurpose;
4239     if (T->isPointerTy())
4240       return AK_GeneralPurpose;
4241     return AK_Memory;
4242   }
4243 
4244   // For VarArg functions, store the argument shadow in an ABI-specific format
4245   // that corresponds to va_list layout.
4246   // We do this because Clang lowers va_arg in the frontend, and this pass
4247   // only sees the low level code that deals with va_list internals.
4248   // A much easier alternative (provided that Clang emits va_arg instructions)
4249   // would have been to associate each live instance of va_list with a copy of
4250   // MSanParamTLS, and extract shadow on va_arg() call in the argument list
4251   // order.
4252   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4253     unsigned GpOffset = 0;
4254     unsigned FpOffset = AMD64GpEndOffset;
4255     unsigned OverflowOffset = AMD64FpEndOffset;
4256     const DataLayout &DL = F.getParent()->getDataLayout();
4257     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
4258          ++ArgIt) {
4259       Value *A = *ArgIt;
4260       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
4261       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
4262       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
4263       if (IsByVal) {
4264         // ByVal arguments always go to the overflow area.
4265         // Fixed arguments passed through the overflow area will be stepped
4266         // over by va_start, so don't count them towards the offset.
4267         if (IsFixed)
4268           continue;
4269         assert(A->getType()->isPointerTy());
4270         Type *RealTy = CB.getParamByValType(ArgNo);
4271         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
4272         Value *ShadowBase = getShadowPtrForVAArgument(
4273             RealTy, IRB, OverflowOffset, alignTo(ArgSize, 8));
4274         Value *OriginBase = nullptr;
4275         if (MS.TrackOrigins)
4276           OriginBase = getOriginPtrForVAArgument(RealTy, IRB, OverflowOffset);
4277         OverflowOffset += alignTo(ArgSize, 8);
4278         if (!ShadowBase)
4279           continue;
4280         Value *ShadowPtr, *OriginPtr;
4281         std::tie(ShadowPtr, OriginPtr) =
4282             MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment,
4283                                    /*isStore*/ false);
4284 
4285         IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr,
4286                          kShadowTLSAlignment, ArgSize);
4287         if (MS.TrackOrigins)
4288           IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment, OriginPtr,
4289                            kShadowTLSAlignment, ArgSize);
4290       } else {
4291         ArgKind AK = classifyArgument(A);
4292         if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
4293           AK = AK_Memory;
4294         if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
4295           AK = AK_Memory;
4296         Value *ShadowBase, *OriginBase = nullptr;
4297         switch (AK) {
4298           case AK_GeneralPurpose:
4299             ShadowBase =
4300                 getShadowPtrForVAArgument(A->getType(), IRB, GpOffset, 8);
4301             if (MS.TrackOrigins)
4302               OriginBase =
4303                   getOriginPtrForVAArgument(A->getType(), IRB, GpOffset);
4304             GpOffset += 8;
4305             break;
4306           case AK_FloatingPoint:
4307             ShadowBase =
4308                 getShadowPtrForVAArgument(A->getType(), IRB, FpOffset, 16);
4309             if (MS.TrackOrigins)
4310               OriginBase =
4311                   getOriginPtrForVAArgument(A->getType(), IRB, FpOffset);
4312             FpOffset += 16;
4313             break;
4314           case AK_Memory:
4315             if (IsFixed)
4316               continue;
4317             uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4318             ShadowBase =
4319                 getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset, 8);
4320             if (MS.TrackOrigins)
4321               OriginBase =
4322                   getOriginPtrForVAArgument(A->getType(), IRB, OverflowOffset);
4323             OverflowOffset += alignTo(ArgSize, 8);
4324         }
4325         // Take fixed arguments into account for GpOffset and FpOffset,
4326         // but don't actually store shadows for them.
4327         // TODO(glider): don't call get*PtrForVAArgument() for them.
4328         if (IsFixed)
4329           continue;
4330         if (!ShadowBase)
4331           continue;
4332         Value *Shadow = MSV.getShadow(A);
4333         IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment);
4334         if (MS.TrackOrigins) {
4335           Value *Origin = MSV.getOrigin(A);
4336           unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
4337           MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
4338                           std::max(kShadowTLSAlignment, kMinOriginAlignment));
4339         }
4340       }
4341     }
4342     Constant *OverflowSize =
4343       ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
4344     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
4345   }
4346 
4347   /// Compute the shadow address for a given va_arg.
4348   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4349                                    unsigned ArgOffset, unsigned ArgSize) {
4350     // Make sure we don't overflow __msan_va_arg_tls.
4351     if (ArgOffset + ArgSize > kParamTLSSize)
4352       return nullptr;
4353     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4354     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4355     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4356                               "_msarg_va_s");
4357   }
4358 
4359   /// Compute the origin address for a given va_arg.
4360   Value *getOriginPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) {
4361     Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
4362     // getOriginPtrForVAArgument() is always called after
4363     // getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
4364     // overflow.
4365     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4366     return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
4367                               "_msarg_va_o");
4368   }
4369 
4370   void unpoisonVAListTagForInst(IntrinsicInst &I) {
4371     IRBuilder<> IRB(&I);
4372     Value *VAListTag = I.getArgOperand(0);
4373     Value *ShadowPtr, *OriginPtr;
4374     const Align Alignment = Align(8);
4375     std::tie(ShadowPtr, OriginPtr) =
4376         MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment,
4377                                /*isStore*/ true);
4378 
4379     // Unpoison the whole __va_list_tag.
4380     // FIXME: magic ABI constants.
4381     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4382                      /* size */ 24, Alignment, false);
4383     // We shouldn't need to zero out the origins, as they're only checked for
4384     // nonzero shadow.
4385   }
4386 
4387   void visitVAStartInst(VAStartInst &I) override {
4388     if (F.getCallingConv() == CallingConv::Win64)
4389       return;
4390     VAStartInstrumentationList.push_back(&I);
4391     unpoisonVAListTagForInst(I);
4392   }
4393 
4394   void visitVACopyInst(VACopyInst &I) override {
4395     if (F.getCallingConv() == CallingConv::Win64) return;
4396     unpoisonVAListTagForInst(I);
4397   }
4398 
4399   void finalizeInstrumentation() override {
4400     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4401            "finalizeInstrumentation called twice");
4402     if (!VAStartInstrumentationList.empty()) {
4403       // If there is a va_start in this function, make a backup copy of
4404       // va_arg_tls somewhere in the function entry block.
4405       IRBuilder<> IRB(MSV.FnPrologueEnd);
4406       VAArgOverflowSize =
4407           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4408       Value *CopySize =
4409         IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
4410                       VAArgOverflowSize);
4411       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4412       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4413       if (MS.TrackOrigins) {
4414         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4415         IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS,
4416                          Align(8), CopySize);
4417       }
4418     }
4419 
4420     // Instrument va_start.
4421     // Copy va_list shadow from the backup copy of the TLS contents.
4422     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4423       CallInst *OrigInst = VAStartInstrumentationList[i];
4424       IRBuilder<> IRB(OrigInst->getNextNode());
4425       Value *VAListTag = OrigInst->getArgOperand(0);
4426 
4427       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4428       Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
4429           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4430                         ConstantInt::get(MS.IntptrTy, 16)),
4431           PointerType::get(RegSaveAreaPtrTy, 0));
4432       Value *RegSaveAreaPtr =
4433           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4434       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4435       const Align Alignment = Align(16);
4436       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4437           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4438                                  Alignment, /*isStore*/ true);
4439       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4440                        AMD64FpEndOffset);
4441       if (MS.TrackOrigins)
4442         IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
4443                          Alignment, AMD64FpEndOffset);
4444       Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4445       Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
4446           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4447                         ConstantInt::get(MS.IntptrTy, 8)),
4448           PointerType::get(OverflowArgAreaPtrTy, 0));
4449       Value *OverflowArgAreaPtr =
4450           IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
4451       Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
4452       std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
4453           MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
4454                                  Alignment, /*isStore*/ true);
4455       Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
4456                                              AMD64FpEndOffset);
4457       IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
4458                        VAArgOverflowSize);
4459       if (MS.TrackOrigins) {
4460         SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
4461                                         AMD64FpEndOffset);
4462         IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
4463                          VAArgOverflowSize);
4464       }
4465     }
4466   }
4467 };
4468 
4469 /// MIPS64-specific implementation of VarArgHelper.
4470 struct VarArgMIPS64Helper : public VarArgHelper {
4471   Function &F;
4472   MemorySanitizer &MS;
4473   MemorySanitizerVisitor &MSV;
4474   Value *VAArgTLSCopy = nullptr;
4475   Value *VAArgSize = nullptr;
4476 
4477   SmallVector<CallInst*, 16> VAStartInstrumentationList;
4478 
4479   VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
4480                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4481 
4482   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4483     unsigned VAArgOffset = 0;
4484     const DataLayout &DL = F.getParent()->getDataLayout();
4485     for (auto ArgIt = CB.arg_begin() + CB.getFunctionType()->getNumParams(),
4486               End = CB.arg_end();
4487          ArgIt != End; ++ArgIt) {
4488       Triple TargetTriple(F.getParent()->getTargetTriple());
4489       Value *A = *ArgIt;
4490       Value *Base;
4491       uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4492       if (TargetTriple.getArch() == Triple::mips64) {
4493         // Adjusting the shadow for argument with size < 8 to match the placement
4494         // of bits in big endian system
4495         if (ArgSize < 8)
4496           VAArgOffset += (8 - ArgSize);
4497       }
4498       Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset, ArgSize);
4499       VAArgOffset += ArgSize;
4500       VAArgOffset = alignTo(VAArgOffset, 8);
4501       if (!Base)
4502         continue;
4503       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
4504     }
4505 
4506     Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
4507     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
4508     // a new class member i.e. it is the total size of all VarArgs.
4509     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
4510   }
4511 
4512   /// Compute the shadow address for a given va_arg.
4513   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4514                                    unsigned ArgOffset, unsigned ArgSize) {
4515     // Make sure we don't overflow __msan_va_arg_tls.
4516     if (ArgOffset + ArgSize > kParamTLSSize)
4517       return nullptr;
4518     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4519     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4520     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4521                               "_msarg");
4522   }
4523 
4524   void visitVAStartInst(VAStartInst &I) override {
4525     IRBuilder<> IRB(&I);
4526     VAStartInstrumentationList.push_back(&I);
4527     Value *VAListTag = I.getArgOperand(0);
4528     Value *ShadowPtr, *OriginPtr;
4529     const Align Alignment = Align(8);
4530     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4531         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4532     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4533                      /* size */ 8, Alignment, false);
4534   }
4535 
4536   void visitVACopyInst(VACopyInst &I) override {
4537     IRBuilder<> IRB(&I);
4538     VAStartInstrumentationList.push_back(&I);
4539     Value *VAListTag = I.getArgOperand(0);
4540     Value *ShadowPtr, *OriginPtr;
4541     const Align Alignment = Align(8);
4542     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4543         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4544     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4545                      /* size */ 8, Alignment, false);
4546   }
4547 
4548   void finalizeInstrumentation() override {
4549     assert(!VAArgSize && !VAArgTLSCopy &&
4550            "finalizeInstrumentation called twice");
4551     IRBuilder<> IRB(MSV.FnPrologueEnd);
4552     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4553     Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
4554                                     VAArgSize);
4555 
4556     if (!VAStartInstrumentationList.empty()) {
4557       // If there is a va_start in this function, make a backup copy of
4558       // va_arg_tls somewhere in the function entry block.
4559       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4560       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4561     }
4562 
4563     // Instrument va_start.
4564     // Copy va_list shadow from the backup copy of the TLS contents.
4565     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4566       CallInst *OrigInst = VAStartInstrumentationList[i];
4567       IRBuilder<> IRB(OrigInst->getNextNode());
4568       Value *VAListTag = OrigInst->getArgOperand(0);
4569       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4570       Value *RegSaveAreaPtrPtr =
4571           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4572                              PointerType::get(RegSaveAreaPtrTy, 0));
4573       Value *RegSaveAreaPtr =
4574           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4575       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4576       const Align Alignment = Align(8);
4577       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4578           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4579                                  Alignment, /*isStore*/ true);
4580       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4581                        CopySize);
4582     }
4583   }
4584 };
4585 
4586 /// AArch64-specific implementation of VarArgHelper.
4587 struct VarArgAArch64Helper : public VarArgHelper {
4588   static const unsigned kAArch64GrArgSize = 64;
4589   static const unsigned kAArch64VrArgSize = 128;
4590 
4591   static const unsigned AArch64GrBegOffset = 0;
4592   static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
4593   // Make VR space aligned to 16 bytes.
4594   static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
4595   static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
4596                                              + kAArch64VrArgSize;
4597   static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
4598 
4599   Function &F;
4600   MemorySanitizer &MS;
4601   MemorySanitizerVisitor &MSV;
4602   Value *VAArgTLSCopy = nullptr;
4603   Value *VAArgOverflowSize = nullptr;
4604 
4605   SmallVector<CallInst*, 16> VAStartInstrumentationList;
4606 
4607   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
4608 
4609   VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
4610                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4611 
4612   ArgKind classifyArgument(Value* arg) {
4613     Type *T = arg->getType();
4614     if (T->isFPOrFPVectorTy())
4615       return AK_FloatingPoint;
4616     if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
4617         || (T->isPointerTy()))
4618       return AK_GeneralPurpose;
4619     return AK_Memory;
4620   }
4621 
4622   // The instrumentation stores the argument shadow in a non ABI-specific
4623   // format because it does not know which argument is named (since Clang,
4624   // like x86_64 case, lowers the va_args in the frontend and this pass only
4625   // sees the low level code that deals with va_list internals).
4626   // The first seven GR registers are saved in the first 56 bytes of the
4627   // va_arg tls arra, followers by the first 8 FP/SIMD registers, and then
4628   // the remaining arguments.
4629   // Using constant offset within the va_arg TLS array allows fast copy
4630   // in the finalize instrumentation.
4631   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4632     unsigned GrOffset = AArch64GrBegOffset;
4633     unsigned VrOffset = AArch64VrBegOffset;
4634     unsigned OverflowOffset = AArch64VAEndOffset;
4635 
4636     const DataLayout &DL = F.getParent()->getDataLayout();
4637     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
4638          ++ArgIt) {
4639       Value *A = *ArgIt;
4640       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
4641       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
4642       ArgKind AK = classifyArgument(A);
4643       if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
4644         AK = AK_Memory;
4645       if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
4646         AK = AK_Memory;
4647       Value *Base;
4648       switch (AK) {
4649         case AK_GeneralPurpose:
4650           Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset, 8);
4651           GrOffset += 8;
4652           break;
4653         case AK_FloatingPoint:
4654           Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset, 8);
4655           VrOffset += 16;
4656           break;
4657         case AK_Memory:
4658           // Don't count fixed arguments in the overflow area - va_start will
4659           // skip right over them.
4660           if (IsFixed)
4661             continue;
4662           uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4663           Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset,
4664                                            alignTo(ArgSize, 8));
4665           OverflowOffset += alignTo(ArgSize, 8);
4666           break;
4667       }
4668       // Count Gp/Vr fixed arguments to their respective offsets, but don't
4669       // bother to actually store a shadow.
4670       if (IsFixed)
4671         continue;
4672       if (!Base)
4673         continue;
4674       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
4675     }
4676     Constant *OverflowSize =
4677       ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
4678     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
4679   }
4680 
4681   /// Compute the shadow address for a given va_arg.
4682   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4683                                    unsigned ArgOffset, unsigned ArgSize) {
4684     // Make sure we don't overflow __msan_va_arg_tls.
4685     if (ArgOffset + ArgSize > kParamTLSSize)
4686       return nullptr;
4687     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4688     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4689     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4690                               "_msarg");
4691   }
4692 
4693   void visitVAStartInst(VAStartInst &I) override {
4694     IRBuilder<> IRB(&I);
4695     VAStartInstrumentationList.push_back(&I);
4696     Value *VAListTag = I.getArgOperand(0);
4697     Value *ShadowPtr, *OriginPtr;
4698     const Align Alignment = Align(8);
4699     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4700         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4701     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4702                      /* size */ 32, Alignment, false);
4703   }
4704 
4705   void visitVACopyInst(VACopyInst &I) override {
4706     IRBuilder<> IRB(&I);
4707     VAStartInstrumentationList.push_back(&I);
4708     Value *VAListTag = I.getArgOperand(0);
4709     Value *ShadowPtr, *OriginPtr;
4710     const Align Alignment = Align(8);
4711     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4712         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4713     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4714                      /* size */ 32, Alignment, false);
4715   }
4716 
4717   // Retrieve a va_list field of 'void*' size.
4718   Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
4719     Value *SaveAreaPtrPtr =
4720       IRB.CreateIntToPtr(
4721         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4722                       ConstantInt::get(MS.IntptrTy, offset)),
4723         Type::getInt64PtrTy(*MS.C));
4724     return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
4725   }
4726 
4727   // Retrieve a va_list field of 'int' size.
4728   Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
4729     Value *SaveAreaPtr =
4730       IRB.CreateIntToPtr(
4731         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4732                       ConstantInt::get(MS.IntptrTy, offset)),
4733         Type::getInt32PtrTy(*MS.C));
4734     Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
4735     return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
4736   }
4737 
4738   void finalizeInstrumentation() override {
4739     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4740            "finalizeInstrumentation called twice");
4741     if (!VAStartInstrumentationList.empty()) {
4742       // If there is a va_start in this function, make a backup copy of
4743       // va_arg_tls somewhere in the function entry block.
4744       IRBuilder<> IRB(MSV.FnPrologueEnd);
4745       VAArgOverflowSize =
4746           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4747       Value *CopySize =
4748         IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
4749                       VAArgOverflowSize);
4750       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4751       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4752     }
4753 
4754     Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
4755     Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
4756 
4757     // Instrument va_start, copy va_list shadow from the backup copy of
4758     // the TLS contents.
4759     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4760       CallInst *OrigInst = VAStartInstrumentationList[i];
4761       IRBuilder<> IRB(OrigInst->getNextNode());
4762 
4763       Value *VAListTag = OrigInst->getArgOperand(0);
4764 
4765       // The variadic ABI for AArch64 creates two areas to save the incoming
4766       // argument registers (one for 64-bit general register xn-x7 and another
4767       // for 128-bit FP/SIMD vn-v7).
4768       // We need then to propagate the shadow arguments on both regions
4769       // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
4770       // The remaining arguments are saved on shadow for 'va::stack'.
4771       // One caveat is it requires only to propagate the non-named arguments,
4772       // however on the call site instrumentation 'all' the arguments are
4773       // saved. So to copy the shadow values from the va_arg TLS array
4774       // we need to adjust the offset for both GR and VR fields based on
4775       // the __{gr,vr}_offs value (since they are stores based on incoming
4776       // named arguments).
4777 
4778       // Read the stack pointer from the va_list.
4779       Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
4780 
4781       // Read both the __gr_top and __gr_off and add them up.
4782       Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
4783       Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
4784 
4785       Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
4786 
4787       // Read both the __vr_top and __vr_off and add them up.
4788       Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
4789       Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
4790 
4791       Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
4792 
4793       // It does not know how many named arguments is being used and, on the
4794       // callsite all the arguments were saved.  Since __gr_off is defined as
4795       // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
4796       // argument by ignoring the bytes of shadow from named arguments.
4797       Value *GrRegSaveAreaShadowPtrOff =
4798         IRB.CreateAdd(GrArgSize, GrOffSaveArea);
4799 
4800       Value *GrRegSaveAreaShadowPtr =
4801           MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4802                                  Align(8), /*isStore*/ true)
4803               .first;
4804 
4805       Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4806                                               GrRegSaveAreaShadowPtrOff);
4807       Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
4808 
4809       IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, Align(8), GrSrcPtr, Align(8),
4810                        GrCopySize);
4811 
4812       // Again, but for FP/SIMD values.
4813       Value *VrRegSaveAreaShadowPtrOff =
4814           IRB.CreateAdd(VrArgSize, VrOffSaveArea);
4815 
4816       Value *VrRegSaveAreaShadowPtr =
4817           MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4818                                  Align(8), /*isStore*/ true)
4819               .first;
4820 
4821       Value *VrSrcPtr = IRB.CreateInBoundsGEP(
4822         IRB.getInt8Ty(),
4823         IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4824                               IRB.getInt32(AArch64VrBegOffset)),
4825         VrRegSaveAreaShadowPtrOff);
4826       Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
4827 
4828       IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, Align(8), VrSrcPtr, Align(8),
4829                        VrCopySize);
4830 
4831       // And finally for remaining arguments.
4832       Value *StackSaveAreaShadowPtr =
4833           MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.getInt8Ty(),
4834                                  Align(16), /*isStore*/ true)
4835               .first;
4836 
4837       Value *StackSrcPtr =
4838         IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4839                               IRB.getInt32(AArch64VAEndOffset));
4840 
4841       IRB.CreateMemCpy(StackSaveAreaShadowPtr, Align(16), StackSrcPtr,
4842                        Align(16), VAArgOverflowSize);
4843     }
4844   }
4845 };
4846 
4847 /// PowerPC64-specific implementation of VarArgHelper.
4848 struct VarArgPowerPC64Helper : public VarArgHelper {
4849   Function &F;
4850   MemorySanitizer &MS;
4851   MemorySanitizerVisitor &MSV;
4852   Value *VAArgTLSCopy = nullptr;
4853   Value *VAArgSize = nullptr;
4854 
4855   SmallVector<CallInst*, 16> VAStartInstrumentationList;
4856 
4857   VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
4858                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4859 
4860   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4861     // For PowerPC, we need to deal with alignment of stack arguments -
4862     // they are mostly aligned to 8 bytes, but vectors and i128 arrays
4863     // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
4864     // For that reason, we compute current offset from stack pointer (which is
4865     // always properly aligned), and offset for the first vararg, then subtract
4866     // them.
4867     unsigned VAArgBase;
4868     Triple TargetTriple(F.getParent()->getTargetTriple());
4869     // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
4870     // and 32 bytes for ABIv2.  This is usually determined by target
4871     // endianness, but in theory could be overridden by function attribute.
4872     if (TargetTriple.getArch() == Triple::ppc64)
4873       VAArgBase = 48;
4874     else
4875       VAArgBase = 32;
4876     unsigned VAArgOffset = VAArgBase;
4877     const DataLayout &DL = F.getParent()->getDataLayout();
4878     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
4879          ++ArgIt) {
4880       Value *A = *ArgIt;
4881       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
4882       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
4883       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
4884       if (IsByVal) {
4885         assert(A->getType()->isPointerTy());
4886         Type *RealTy = CB.getParamByValType(ArgNo);
4887         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
4888         MaybeAlign ArgAlign = CB.getParamAlign(ArgNo);
4889         if (!ArgAlign || *ArgAlign < Align(8))
4890           ArgAlign = Align(8);
4891         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
4892         if (!IsFixed) {
4893           Value *Base = getShadowPtrForVAArgument(
4894               RealTy, IRB, VAArgOffset - VAArgBase, ArgSize);
4895           if (Base) {
4896             Value *AShadowPtr, *AOriginPtr;
4897             std::tie(AShadowPtr, AOriginPtr) =
4898                 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
4899                                        kShadowTLSAlignment, /*isStore*/ false);
4900 
4901             IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
4902                              kShadowTLSAlignment, ArgSize);
4903           }
4904         }
4905         VAArgOffset += alignTo(ArgSize, 8);
4906       } else {
4907         Value *Base;
4908         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4909         uint64_t ArgAlign = 8;
4910         if (A->getType()->isArrayTy()) {
4911           // Arrays are aligned to element size, except for long double
4912           // arrays, which are aligned to 8 bytes.
4913           Type *ElementTy = A->getType()->getArrayElementType();
4914           if (!ElementTy->isPPC_FP128Ty())
4915             ArgAlign = DL.getTypeAllocSize(ElementTy);
4916         } else if (A->getType()->isVectorTy()) {
4917           // Vectors are naturally aligned.
4918           ArgAlign = DL.getTypeAllocSize(A->getType());
4919         }
4920         if (ArgAlign < 8)
4921           ArgAlign = 8;
4922         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
4923         if (DL.isBigEndian()) {
4924           // Adjusting the shadow for argument with size < 8 to match the placement
4925           // of bits in big endian system
4926           if (ArgSize < 8)
4927             VAArgOffset += (8 - ArgSize);
4928         }
4929         if (!IsFixed) {
4930           Base = getShadowPtrForVAArgument(A->getType(), IRB,
4931                                            VAArgOffset - VAArgBase, ArgSize);
4932           if (Base)
4933             IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
4934         }
4935         VAArgOffset += ArgSize;
4936         VAArgOffset = alignTo(VAArgOffset, 8);
4937       }
4938       if (IsFixed)
4939         VAArgBase = VAArgOffset;
4940     }
4941 
4942     Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(),
4943                                                 VAArgOffset - VAArgBase);
4944     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
4945     // a new class member i.e. it is the total size of all VarArgs.
4946     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
4947   }
4948 
4949   /// Compute the shadow address for a given va_arg.
4950   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4951                                    unsigned ArgOffset, unsigned ArgSize) {
4952     // Make sure we don't overflow __msan_va_arg_tls.
4953     if (ArgOffset + ArgSize > kParamTLSSize)
4954       return nullptr;
4955     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4956     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4957     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4958                               "_msarg");
4959   }
4960 
4961   void visitVAStartInst(VAStartInst &I) override {
4962     IRBuilder<> IRB(&I);
4963     VAStartInstrumentationList.push_back(&I);
4964     Value *VAListTag = I.getArgOperand(0);
4965     Value *ShadowPtr, *OriginPtr;
4966     const Align Alignment = Align(8);
4967     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4968         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4969     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4970                      /* size */ 8, Alignment, false);
4971   }
4972 
4973   void visitVACopyInst(VACopyInst &I) override {
4974     IRBuilder<> IRB(&I);
4975     Value *VAListTag = I.getArgOperand(0);
4976     Value *ShadowPtr, *OriginPtr;
4977     const Align Alignment = Align(8);
4978     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4979         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4980     // Unpoison the whole __va_list_tag.
4981     // FIXME: magic ABI constants.
4982     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4983                      /* size */ 8, Alignment, false);
4984   }
4985 
4986   void finalizeInstrumentation() override {
4987     assert(!VAArgSize && !VAArgTLSCopy &&
4988            "finalizeInstrumentation called twice");
4989     IRBuilder<> IRB(MSV.FnPrologueEnd);
4990     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4991     Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
4992                                     VAArgSize);
4993 
4994     if (!VAStartInstrumentationList.empty()) {
4995       // If there is a va_start in this function, make a backup copy of
4996       // va_arg_tls somewhere in the function entry block.
4997       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4998       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4999     }
5000 
5001     // Instrument va_start.
5002     // Copy va_list shadow from the backup copy of the TLS contents.
5003     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
5004       CallInst *OrigInst = VAStartInstrumentationList[i];
5005       IRBuilder<> IRB(OrigInst->getNextNode());
5006       Value *VAListTag = OrigInst->getArgOperand(0);
5007       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
5008       Value *RegSaveAreaPtrPtr =
5009           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5010                              PointerType::get(RegSaveAreaPtrTy, 0));
5011       Value *RegSaveAreaPtr =
5012           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
5013       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5014       const Align Alignment = Align(8);
5015       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5016           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
5017                                  Alignment, /*isStore*/ true);
5018       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5019                        CopySize);
5020     }
5021   }
5022 };
5023 
5024 /// SystemZ-specific implementation of VarArgHelper.
5025 struct VarArgSystemZHelper : public VarArgHelper {
5026   static const unsigned SystemZGpOffset = 16;
5027   static const unsigned SystemZGpEndOffset = 56;
5028   static const unsigned SystemZFpOffset = 128;
5029   static const unsigned SystemZFpEndOffset = 160;
5030   static const unsigned SystemZMaxVrArgs = 8;
5031   static const unsigned SystemZRegSaveAreaSize = 160;
5032   static const unsigned SystemZOverflowOffset = 160;
5033   static const unsigned SystemZVAListTagSize = 32;
5034   static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
5035   static const unsigned SystemZRegSaveAreaPtrOffset = 24;
5036 
5037   Function &F;
5038   MemorySanitizer &MS;
5039   MemorySanitizerVisitor &MSV;
5040   Value *VAArgTLSCopy = nullptr;
5041   Value *VAArgTLSOriginCopy = nullptr;
5042   Value *VAArgOverflowSize = nullptr;
5043 
5044   SmallVector<CallInst *, 16> VAStartInstrumentationList;
5045 
5046   enum class ArgKind {
5047     GeneralPurpose,
5048     FloatingPoint,
5049     Vector,
5050     Memory,
5051     Indirect,
5052   };
5053 
5054   enum class ShadowExtension { None, Zero, Sign };
5055 
5056   VarArgSystemZHelper(Function &F, MemorySanitizer &MS,
5057                       MemorySanitizerVisitor &MSV)
5058       : F(F), MS(MS), MSV(MSV) {}
5059 
5060   ArgKind classifyArgument(Type *T, bool IsSoftFloatABI) {
5061     // T is a SystemZABIInfo::classifyArgumentType() output, and there are
5062     // only a few possibilities of what it can be. In particular, enums, single
5063     // element structs and large types have already been taken care of.
5064 
5065     // Some i128 and fp128 arguments are converted to pointers only in the
5066     // back end.
5067     if (T->isIntegerTy(128) || T->isFP128Ty())
5068       return ArgKind::Indirect;
5069     if (T->isFloatingPointTy())
5070       return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
5071     if (T->isIntegerTy() || T->isPointerTy())
5072       return ArgKind::GeneralPurpose;
5073     if (T->isVectorTy())
5074       return ArgKind::Vector;
5075     return ArgKind::Memory;
5076   }
5077 
5078   ShadowExtension getShadowExtension(const CallBase &CB, unsigned ArgNo) {
5079     // ABI says: "One of the simple integer types no more than 64 bits wide.
5080     // ... If such an argument is shorter than 64 bits, replace it by a full
5081     // 64-bit integer representing the same number, using sign or zero
5082     // extension". Shadow for an integer argument has the same type as the
5083     // argument itself, so it can be sign or zero extended as well.
5084     bool ZExt = CB.paramHasAttr(ArgNo, Attribute::ZExt);
5085     bool SExt = CB.paramHasAttr(ArgNo, Attribute::SExt);
5086     if (ZExt) {
5087       assert(!SExt);
5088       return ShadowExtension::Zero;
5089     }
5090     if (SExt) {
5091       assert(!ZExt);
5092       return ShadowExtension::Sign;
5093     }
5094     return ShadowExtension::None;
5095   }
5096 
5097   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
5098     bool IsSoftFloatABI = CB.getCalledFunction()
5099                               ->getFnAttribute("use-soft-float")
5100                               .getValueAsBool();
5101     unsigned GpOffset = SystemZGpOffset;
5102     unsigned FpOffset = SystemZFpOffset;
5103     unsigned VrIndex = 0;
5104     unsigned OverflowOffset = SystemZOverflowOffset;
5105     const DataLayout &DL = F.getParent()->getDataLayout();
5106     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
5107          ++ArgIt) {
5108       Value *A = *ArgIt;
5109       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
5110       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
5111       // SystemZABIInfo does not produce ByVal parameters.
5112       assert(!CB.paramHasAttr(ArgNo, Attribute::ByVal));
5113       Type *T = A->getType();
5114       ArgKind AK = classifyArgument(T, IsSoftFloatABI);
5115       if (AK == ArgKind::Indirect) {
5116         T = PointerType::get(T, 0);
5117         AK = ArgKind::GeneralPurpose;
5118       }
5119       if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
5120         AK = ArgKind::Memory;
5121       if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
5122         AK = ArgKind::Memory;
5123       if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
5124         AK = ArgKind::Memory;
5125       Value *ShadowBase = nullptr;
5126       Value *OriginBase = nullptr;
5127       ShadowExtension SE = ShadowExtension::None;
5128       switch (AK) {
5129       case ArgKind::GeneralPurpose: {
5130         // Always keep track of GpOffset, but store shadow only for varargs.
5131         uint64_t ArgSize = 8;
5132         if (GpOffset + ArgSize <= kParamTLSSize) {
5133           if (!IsFixed) {
5134             SE = getShadowExtension(CB, ArgNo);
5135             uint64_t GapSize = 0;
5136             if (SE == ShadowExtension::None) {
5137               uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
5138               assert(ArgAllocSize <= ArgSize);
5139               GapSize = ArgSize - ArgAllocSize;
5140             }
5141             ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
5142             if (MS.TrackOrigins)
5143               OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
5144           }
5145           GpOffset += ArgSize;
5146         } else {
5147           GpOffset = kParamTLSSize;
5148         }
5149         break;
5150       }
5151       case ArgKind::FloatingPoint: {
5152         // Always keep track of FpOffset, but store shadow only for varargs.
5153         uint64_t ArgSize = 8;
5154         if (FpOffset + ArgSize <= kParamTLSSize) {
5155           if (!IsFixed) {
5156             // PoP says: "A short floating-point datum requires only the
5157             // left-most 32 bit positions of a floating-point register".
5158             // Therefore, in contrast to AK_GeneralPurpose and AK_Memory,
5159             // don't extend shadow and don't mind the gap.
5160             ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
5161             if (MS.TrackOrigins)
5162               OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
5163           }
5164           FpOffset += ArgSize;
5165         } else {
5166           FpOffset = kParamTLSSize;
5167         }
5168         break;
5169       }
5170       case ArgKind::Vector: {
5171         // Keep track of VrIndex. No need to store shadow, since vector varargs
5172         // go through AK_Memory.
5173         assert(IsFixed);
5174         VrIndex++;
5175         break;
5176       }
5177       case ArgKind::Memory: {
5178         // Keep track of OverflowOffset and store shadow only for varargs.
5179         // Ignore fixed args, since we need to copy only the vararg portion of
5180         // the overflow area shadow.
5181         if (!IsFixed) {
5182           uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
5183           uint64_t ArgSize = alignTo(ArgAllocSize, 8);
5184           if (OverflowOffset + ArgSize <= kParamTLSSize) {
5185             SE = getShadowExtension(CB, ArgNo);
5186             uint64_t GapSize =
5187                 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
5188             ShadowBase =
5189                 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
5190             if (MS.TrackOrigins)
5191               OriginBase =
5192                   getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
5193             OverflowOffset += ArgSize;
5194           } else {
5195             OverflowOffset = kParamTLSSize;
5196           }
5197         }
5198         break;
5199       }
5200       case ArgKind::Indirect:
5201         llvm_unreachable("Indirect must be converted to GeneralPurpose");
5202       }
5203       if (ShadowBase == nullptr)
5204         continue;
5205       Value *Shadow = MSV.getShadow(A);
5206       if (SE != ShadowExtension::None)
5207         Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.getInt64Ty(),
5208                                       /*Signed*/ SE == ShadowExtension::Sign);
5209       ShadowBase = IRB.CreateIntToPtr(
5210           ShadowBase, PointerType::get(Shadow->getType(), 0), "_msarg_va_s");
5211       IRB.CreateStore(Shadow, ShadowBase);
5212       if (MS.TrackOrigins) {
5213         Value *Origin = MSV.getOrigin(A);
5214         unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
5215         MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
5216                         kMinOriginAlignment);
5217       }
5218     }
5219     Constant *OverflowSize = ConstantInt::get(
5220         IRB.getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
5221     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5222   }
5223 
5224   Value *getShadowAddrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
5225     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
5226     return IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
5227   }
5228 
5229   Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) {
5230     Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
5231     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
5232     return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
5233                               "_msarg_va_o");
5234   }
5235 
5236   void unpoisonVAListTagForInst(IntrinsicInst &I) {
5237     IRBuilder<> IRB(&I);
5238     Value *VAListTag = I.getArgOperand(0);
5239     Value *ShadowPtr, *OriginPtr;
5240     const Align Alignment = Align(8);
5241     std::tie(ShadowPtr, OriginPtr) =
5242         MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment,
5243                                /*isStore*/ true);
5244     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
5245                      SystemZVAListTagSize, Alignment, false);
5246   }
5247 
5248   void visitVAStartInst(VAStartInst &I) override {
5249     VAStartInstrumentationList.push_back(&I);
5250     unpoisonVAListTagForInst(I);
5251   }
5252 
5253   void visitVACopyInst(VACopyInst &I) override { unpoisonVAListTagForInst(I); }
5254 
5255   void copyRegSaveArea(IRBuilder<> &IRB, Value *VAListTag) {
5256     Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
5257     Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
5258         IRB.CreateAdd(
5259             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5260             ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
5261         PointerType::get(RegSaveAreaPtrTy, 0));
5262     Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
5263     Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5264     const Align Alignment = Align(8);
5265     std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5266         MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment,
5267                                /*isStore*/ true);
5268     // TODO(iii): copy only fragments filled by visitCallBase()
5269     IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5270                      SystemZRegSaveAreaSize);
5271     if (MS.TrackOrigins)
5272       IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
5273                        Alignment, SystemZRegSaveAreaSize);
5274   }
5275 
5276   void copyOverflowArea(IRBuilder<> &IRB, Value *VAListTag) {
5277     Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C);
5278     Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
5279         IRB.CreateAdd(
5280             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5281             ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
5282         PointerType::get(OverflowArgAreaPtrTy, 0));
5283     Value *OverflowArgAreaPtr =
5284         IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
5285     Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
5286     const Align Alignment = Align(8);
5287     std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
5288         MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
5289                                Alignment, /*isStore*/ true);
5290     Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
5291                                            SystemZOverflowOffset);
5292     IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
5293                      VAArgOverflowSize);
5294     if (MS.TrackOrigins) {
5295       SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
5296                                       SystemZOverflowOffset);
5297       IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
5298                        VAArgOverflowSize);
5299     }
5300   }
5301 
5302   void finalizeInstrumentation() override {
5303     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5304            "finalizeInstrumentation called twice");
5305     if (!VAStartInstrumentationList.empty()) {
5306       // If there is a va_start in this function, make a backup copy of
5307       // va_arg_tls somewhere in the function entry block.
5308       IRBuilder<> IRB(MSV.FnPrologueEnd);
5309       VAArgOverflowSize =
5310           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
5311       Value *CopySize =
5312           IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
5313                         VAArgOverflowSize);
5314       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
5315       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
5316       if (MS.TrackOrigins) {
5317         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
5318         IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS,
5319                          Align(8), CopySize);
5320       }
5321     }
5322 
5323     // Instrument va_start.
5324     // Copy va_list shadow from the backup copy of the TLS contents.
5325     for (size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.size();
5326          VaStartNo < VaStartNum; VaStartNo++) {
5327       CallInst *OrigInst = VAStartInstrumentationList[VaStartNo];
5328       IRBuilder<> IRB(OrigInst->getNextNode());
5329       Value *VAListTag = OrigInst->getArgOperand(0);
5330       copyRegSaveArea(IRB, VAListTag);
5331       copyOverflowArea(IRB, VAListTag);
5332     }
5333   }
5334 };
5335 
5336 /// A no-op implementation of VarArgHelper.
5337 struct VarArgNoOpHelper : public VarArgHelper {
5338   VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
5339                    MemorySanitizerVisitor &MSV) {}
5340 
5341   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {}
5342 
5343   void visitVAStartInst(VAStartInst &I) override {}
5344 
5345   void visitVACopyInst(VACopyInst &I) override {}
5346 
5347   void finalizeInstrumentation() override {}
5348 };
5349 
5350 } // end anonymous namespace
5351 
5352 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
5353                                         MemorySanitizerVisitor &Visitor) {
5354   // VarArg handling is only implemented on AMD64. False positives are possible
5355   // on other platforms.
5356   Triple TargetTriple(Func.getParent()->getTargetTriple());
5357   if (TargetTriple.getArch() == Triple::x86_64)
5358     return new VarArgAMD64Helper(Func, Msan, Visitor);
5359   else if (TargetTriple.isMIPS64())
5360     return new VarArgMIPS64Helper(Func, Msan, Visitor);
5361   else if (TargetTriple.getArch() == Triple::aarch64)
5362     return new VarArgAArch64Helper(Func, Msan, Visitor);
5363   else if (TargetTriple.getArch() == Triple::ppc64 ||
5364            TargetTriple.getArch() == Triple::ppc64le)
5365     return new VarArgPowerPC64Helper(Func, Msan, Visitor);
5366   else if (TargetTriple.getArch() == Triple::systemz)
5367     return new VarArgSystemZHelper(Func, Msan, Visitor);
5368   else
5369     return new VarArgNoOpHelper(Func, Msan, Visitor);
5370 }
5371 
5372 bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) {
5373   if (!CompileKernel && F.getName() == kMsanModuleCtorName)
5374     return false;
5375 
5376   if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
5377     return false;
5378 
5379   MemorySanitizerVisitor Visitor(F, *this, TLI);
5380 
5381   // Clear out readonly/readnone attributes.
5382   AttributeMask B;
5383   B.addAttribute(Attribute::ReadOnly)
5384       .addAttribute(Attribute::ReadNone)
5385       .addAttribute(Attribute::WriteOnly)
5386       .addAttribute(Attribute::ArgMemOnly)
5387       .addAttribute(Attribute::Speculatable);
5388   F.removeFnAttrs(B);
5389 
5390   return Visitor.runOnFunction();
5391 }
5392