xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
11 /// reads.
12 ///
13 /// The algorithm of the tool is similar to Memcheck
14 /// (https://static.usenix.org/event/usenix05/tech/general/full_papers/seward/seward_html/usenix2005.html)
15 /// We associate a few shadow bits with every byte of the application memory,
16 /// poison the shadow of the malloc-ed or alloca-ed memory, load the shadow,
17 /// bits on every memory read, propagate the shadow bits through some of the
18 /// arithmetic instruction (including MOV), store the shadow bits on every
19 /// memory write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
21 ///
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
36 ///
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
38 ///
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
44 ///
45 ///                           Origin tracking.
46 ///
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
50 ///
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
56 ///
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
61 /// practice.
62 ///
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwriting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
67 ///
68 ///                            Atomic handling.
69 ///
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
73 ///
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
83 ///
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
90 /// clean shadow.
91 ///
92 ///                      Instrumenting inline assembly.
93 ///
94 /// For inline assembly code LLVM has little idea about which memory locations
95 /// become initialized depending on the arguments. It can be possible to figure
96 /// out which arguments are meant to point to inputs and outputs, but the
97 /// actual semantics can be only visible at runtime. In the Linux kernel it's
98 /// also possible that the arguments only indicate the offset for a base taken
99 /// from a segment register, so it's dangerous to treat any asm() arguments as
100 /// pointers. We take a conservative approach generating calls to
101 ///   __msan_instrument_asm_store(ptr, size)
102 /// , which defer the memory unpoisoning to the runtime library.
103 /// The latter can perform more complex address checks to figure out whether
104 /// it's safe to touch the shadow memory.
105 /// Like with atomic operations, we call __msan_instrument_asm_store() before
106 /// the assembly call, so that changes to the shadow memory will be seen by
107 /// other threads together with main memory initialization.
108 ///
109 ///                  KernelMemorySanitizer (KMSAN) implementation.
110 ///
111 /// The major differences between KMSAN and MSan instrumentation are:
112 ///  - KMSAN always tracks the origins and implies msan-keep-going=true;
113 ///  - KMSAN allocates shadow and origin memory for each page separately, so
114 ///    there are no explicit accesses to shadow and origin in the
115 ///    instrumentation.
116 ///    Shadow and origin values for a particular X-byte memory location
117 ///    (X=1,2,4,8) are accessed through pointers obtained via the
118 ///      __msan_metadata_ptr_for_load_X(ptr)
119 ///      __msan_metadata_ptr_for_store_X(ptr)
120 ///    functions. The corresponding functions check that the X-byte accesses
121 ///    are possible and returns the pointers to shadow and origin memory.
122 ///    Arbitrary sized accesses are handled with:
123 ///      __msan_metadata_ptr_for_load_n(ptr, size)
124 ///      __msan_metadata_ptr_for_store_n(ptr, size);
125 ///    Note that the sanitizer code has to deal with how shadow/origin pairs
126 ///    returned by the these functions are represented in different ABIs. In
127 ///    the X86_64 ABI they are returned in RDX:RAX, in PowerPC64 they are
128 ///    returned in r3 and r4, and in the SystemZ ABI they are written to memory
129 ///    pointed to by a hidden parameter.
130 ///  - TLS variables are stored in a single per-task struct. A call to a
131 ///    function __msan_get_context_state() returning a pointer to that struct
132 ///    is inserted into every instrumented function before the entry block;
133 ///  - __msan_warning() takes a 32-bit origin parameter;
134 ///  - local variables are poisoned with __msan_poison_alloca() upon function
135 ///    entry and unpoisoned with __msan_unpoison_alloca() before leaving the
136 ///    function;
137 ///  - the pass doesn't declare any global variables or add global constructors
138 ///    to the translation unit.
139 ///
140 /// Also, KMSAN currently ignores uninitialized memory passed into inline asm
141 /// calls, making sure we're on the safe side wrt. possible false positives.
142 ///
143 ///  KernelMemorySanitizer only supports X86_64, SystemZ and PowerPC64 at the
144 ///  moment.
145 ///
146 //
147 // FIXME: This sanitizer does not yet handle scalable vectors
148 //
149 //===----------------------------------------------------------------------===//
150 
151 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
152 #include "llvm/ADT/APInt.h"
153 #include "llvm/ADT/ArrayRef.h"
154 #include "llvm/ADT/DenseMap.h"
155 #include "llvm/ADT/DepthFirstIterator.h"
156 #include "llvm/ADT/SetVector.h"
157 #include "llvm/ADT/SmallPtrSet.h"
158 #include "llvm/ADT/SmallVector.h"
159 #include "llvm/ADT/StringExtras.h"
160 #include "llvm/ADT/StringRef.h"
161 #include "llvm/ADT/bit.h"
162 #include "llvm/Analysis/GlobalsModRef.h"
163 #include "llvm/Analysis/TargetLibraryInfo.h"
164 #include "llvm/Analysis/ValueTracking.h"
165 #include "llvm/IR/Argument.h"
166 #include "llvm/IR/AttributeMask.h"
167 #include "llvm/IR/Attributes.h"
168 #include "llvm/IR/BasicBlock.h"
169 #include "llvm/IR/CallingConv.h"
170 #include "llvm/IR/Constant.h"
171 #include "llvm/IR/Constants.h"
172 #include "llvm/IR/DataLayout.h"
173 #include "llvm/IR/DerivedTypes.h"
174 #include "llvm/IR/Function.h"
175 #include "llvm/IR/GlobalValue.h"
176 #include "llvm/IR/GlobalVariable.h"
177 #include "llvm/IR/IRBuilder.h"
178 #include "llvm/IR/InlineAsm.h"
179 #include "llvm/IR/InstVisitor.h"
180 #include "llvm/IR/InstrTypes.h"
181 #include "llvm/IR/Instruction.h"
182 #include "llvm/IR/Instructions.h"
183 #include "llvm/IR/IntrinsicInst.h"
184 #include "llvm/IR/Intrinsics.h"
185 #include "llvm/IR/IntrinsicsAArch64.h"
186 #include "llvm/IR/IntrinsicsX86.h"
187 #include "llvm/IR/MDBuilder.h"
188 #include "llvm/IR/Module.h"
189 #include "llvm/IR/Type.h"
190 #include "llvm/IR/Value.h"
191 #include "llvm/IR/ValueMap.h"
192 #include "llvm/Support/Alignment.h"
193 #include "llvm/Support/AtomicOrdering.h"
194 #include "llvm/Support/Casting.h"
195 #include "llvm/Support/CommandLine.h"
196 #include "llvm/Support/Debug.h"
197 #include "llvm/Support/DebugCounter.h"
198 #include "llvm/Support/ErrorHandling.h"
199 #include "llvm/Support/MathExtras.h"
200 #include "llvm/Support/raw_ostream.h"
201 #include "llvm/TargetParser/Triple.h"
202 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
203 #include "llvm/Transforms/Utils/Instrumentation.h"
204 #include "llvm/Transforms/Utils/Local.h"
205 #include "llvm/Transforms/Utils/ModuleUtils.h"
206 #include <algorithm>
207 #include <cassert>
208 #include <cstddef>
209 #include <cstdint>
210 #include <memory>
211 #include <numeric>
212 #include <string>
213 #include <tuple>
214 
215 using namespace llvm;
216 
217 #define DEBUG_TYPE "msan"
218 
219 DEBUG_COUNTER(DebugInsertCheck, "msan-insert-check",
220               "Controls which checks to insert");
221 
222 DEBUG_COUNTER(DebugInstrumentInstruction, "msan-instrument-instruction",
223               "Controls which instruction to instrument");
224 
225 static const unsigned kOriginSize = 4;
226 static const Align kMinOriginAlignment = Align(4);
227 static const Align kShadowTLSAlignment = Align(8);
228 
229 // These constants must be kept in sync with the ones in msan.h.
230 static const unsigned kParamTLSSize = 800;
231 static const unsigned kRetvalTLSSize = 800;
232 
233 // Accesses sizes are powers of two: 1, 2, 4, 8.
234 static const size_t kNumberOfAccessSizes = 4;
235 
236 /// Track origins of uninitialized values.
237 ///
238 /// Adds a section to MemorySanitizer report that points to the allocation
239 /// (stack or heap) the uninitialized bits came from originally.
240 static cl::opt<int> ClTrackOrigins(
241     "msan-track-origins",
242     cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden,
243     cl::init(0));
244 
245 static cl::opt<bool> ClKeepGoing("msan-keep-going",
246                                  cl::desc("keep going after reporting a UMR"),
247                                  cl::Hidden, cl::init(false));
248 
249 static cl::opt<bool>
250     ClPoisonStack("msan-poison-stack",
251                   cl::desc("poison uninitialized stack variables"), cl::Hidden,
252                   cl::init(true));
253 
254 static cl::opt<bool> ClPoisonStackWithCall(
255     "msan-poison-stack-with-call",
256     cl::desc("poison uninitialized stack variables with a call"), cl::Hidden,
257     cl::init(false));
258 
259 static cl::opt<int> ClPoisonStackPattern(
260     "msan-poison-stack-pattern",
261     cl::desc("poison uninitialized stack variables with the given pattern"),
262     cl::Hidden, cl::init(0xff));
263 
264 static cl::opt<bool>
265     ClPrintStackNames("msan-print-stack-names",
266                       cl::desc("Print name of local stack variable"),
267                       cl::Hidden, cl::init(true));
268 
269 static cl::opt<bool>
270     ClPoisonUndef("msan-poison-undef",
271                   cl::desc("Poison fully undef temporary values. "
272                            "Partially undefined constant vectors "
273                            "are unaffected by this flag (see "
274                            "-msan-poison-undef-vectors)."),
275                   cl::Hidden, cl::init(true));
276 
277 static cl::opt<bool> ClPoisonUndefVectors(
278     "msan-poison-undef-vectors",
279     cl::desc("Precisely poison partially undefined constant vectors. "
280              "If false (legacy behavior), the entire vector is "
281              "considered fully initialized, which may lead to false "
282              "negatives. Fully undefined constant vectors are "
283              "unaffected by this flag (see -msan-poison-undef)."),
284     cl::Hidden, cl::init(false));
285 
286 static cl::opt<bool> ClPreciseDisjointOr(
287     "msan-precise-disjoint-or",
288     cl::desc("Precisely poison disjoint OR. If false (legacy behavior), "
289              "disjointedness is ignored (i.e., 1|1 is initialized)."),
290     cl::Hidden, cl::init(false));
291 
292 static cl::opt<bool>
293     ClHandleICmp("msan-handle-icmp",
294                  cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
295                  cl::Hidden, cl::init(true));
296 
297 static cl::opt<bool>
298     ClHandleICmpExact("msan-handle-icmp-exact",
299                       cl::desc("exact handling of relational integer ICmp"),
300                       cl::Hidden, cl::init(true));
301 
302 static cl::opt<bool> ClHandleLifetimeIntrinsics(
303     "msan-handle-lifetime-intrinsics",
304     cl::desc(
305         "when possible, poison scoped variables at the beginning of the scope "
306         "(slower, but more precise)"),
307     cl::Hidden, cl::init(true));
308 
309 // When compiling the Linux kernel, we sometimes see false positives related to
310 // MSan being unable to understand that inline assembly calls may initialize
311 // local variables.
312 // This flag makes the compiler conservatively unpoison every memory location
313 // passed into an assembly call. Note that this may cause false positives.
314 // Because it's impossible to figure out the array sizes, we can only unpoison
315 // the first sizeof(type) bytes for each type* pointer.
316 static cl::opt<bool> ClHandleAsmConservative(
317     "msan-handle-asm-conservative",
318     cl::desc("conservative handling of inline assembly"), cl::Hidden,
319     cl::init(true));
320 
321 // This flag controls whether we check the shadow of the address
322 // operand of load or store. Such bugs are very rare, since load from
323 // a garbage address typically results in SEGV, but still happen
324 // (e.g. only lower bits of address are garbage, or the access happens
325 // early at program startup where malloc-ed memory is more likely to
326 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
327 static cl::opt<bool> ClCheckAccessAddress(
328     "msan-check-access-address",
329     cl::desc("report accesses through a pointer which has poisoned shadow"),
330     cl::Hidden, cl::init(true));
331 
332 static cl::opt<bool> ClEagerChecks(
333     "msan-eager-checks",
334     cl::desc("check arguments and return values at function call boundaries"),
335     cl::Hidden, cl::init(false));
336 
337 static cl::opt<bool> ClDumpStrictInstructions(
338     "msan-dump-strict-instructions",
339     cl::desc("print out instructions with default strict semantics i.e.,"
340              "check that all the inputs are fully initialized, and mark "
341              "the output as fully initialized. These semantics are applied "
342              "to instructions that could not be handled explicitly nor "
343              "heuristically."),
344     cl::Hidden, cl::init(false));
345 
346 // Currently, all the heuristically handled instructions are specifically
347 // IntrinsicInst. However, we use the broader "HeuristicInstructions" name
348 // to parallel 'msan-dump-strict-instructions', and to keep the door open to
349 // handling non-intrinsic instructions heuristically.
350 static cl::opt<bool> ClDumpHeuristicInstructions(
351     "msan-dump-heuristic-instructions",
352     cl::desc("Prints 'unknown' instructions that were handled heuristically. "
353              "Use -msan-dump-strict-instructions to print instructions that "
354              "could not be handled explicitly nor heuristically."),
355     cl::Hidden, cl::init(false));
356 
357 static cl::opt<int> ClInstrumentationWithCallThreshold(
358     "msan-instrumentation-with-call-threshold",
359     cl::desc(
360         "If the function being instrumented requires more than "
361         "this number of checks and origin stores, use callbacks instead of "
362         "inline checks (-1 means never use callbacks)."),
363     cl::Hidden, cl::init(3500));
364 
365 static cl::opt<bool>
366     ClEnableKmsan("msan-kernel",
367                   cl::desc("Enable KernelMemorySanitizer instrumentation"),
368                   cl::Hidden, cl::init(false));
369 
370 static cl::opt<bool>
371     ClDisableChecks("msan-disable-checks",
372                     cl::desc("Apply no_sanitize to the whole file"), cl::Hidden,
373                     cl::init(false));
374 
375 static cl::opt<bool>
376     ClCheckConstantShadow("msan-check-constant-shadow",
377                           cl::desc("Insert checks for constant shadow values"),
378                           cl::Hidden, cl::init(true));
379 
380 // This is off by default because of a bug in gold:
381 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
382 static cl::opt<bool>
383     ClWithComdat("msan-with-comdat",
384                  cl::desc("Place MSan constructors in comdat sections"),
385                  cl::Hidden, cl::init(false));
386 
387 // These options allow to specify custom memory map parameters
388 // See MemoryMapParams for details.
389 static cl::opt<uint64_t> ClAndMask("msan-and-mask",
390                                    cl::desc("Define custom MSan AndMask"),
391                                    cl::Hidden, cl::init(0));
392 
393 static cl::opt<uint64_t> ClXorMask("msan-xor-mask",
394                                    cl::desc("Define custom MSan XorMask"),
395                                    cl::Hidden, cl::init(0));
396 
397 static cl::opt<uint64_t> ClShadowBase("msan-shadow-base",
398                                       cl::desc("Define custom MSan ShadowBase"),
399                                       cl::Hidden, cl::init(0));
400 
401 static cl::opt<uint64_t> ClOriginBase("msan-origin-base",
402                                       cl::desc("Define custom MSan OriginBase"),
403                                       cl::Hidden, cl::init(0));
404 
405 static cl::opt<int>
406     ClDisambiguateWarning("msan-disambiguate-warning-threshold",
407                           cl::desc("Define threshold for number of checks per "
408                                    "debug location to force origin update."),
409                           cl::Hidden, cl::init(3));
410 
411 const char kMsanModuleCtorName[] = "msan.module_ctor";
412 const char kMsanInitName[] = "__msan_init";
413 
414 namespace {
415 
416 // Memory map parameters used in application-to-shadow address calculation.
417 // Offset = (Addr & ~AndMask) ^ XorMask
418 // Shadow = ShadowBase + Offset
419 // Origin = OriginBase + Offset
420 struct MemoryMapParams {
421   uint64_t AndMask;
422   uint64_t XorMask;
423   uint64_t ShadowBase;
424   uint64_t OriginBase;
425 };
426 
427 struct PlatformMemoryMapParams {
428   const MemoryMapParams *bits32;
429   const MemoryMapParams *bits64;
430 };
431 
432 } // end anonymous namespace
433 
434 // i386 Linux
435 static const MemoryMapParams Linux_I386_MemoryMapParams = {
436     0x000080000000, // AndMask
437     0,              // XorMask (not used)
438     0,              // ShadowBase (not used)
439     0x000040000000, // OriginBase
440 };
441 
442 // x86_64 Linux
443 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
444     0,              // AndMask (not used)
445     0x500000000000, // XorMask
446     0,              // ShadowBase (not used)
447     0x100000000000, // OriginBase
448 };
449 
450 // mips32 Linux
451 // FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
452 // after picking good constants
453 
454 // mips64 Linux
455 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
456     0,              // AndMask (not used)
457     0x008000000000, // XorMask
458     0,              // ShadowBase (not used)
459     0x002000000000, // OriginBase
460 };
461 
462 // ppc32 Linux
463 // FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
464 // after picking good constants
465 
466 // ppc64 Linux
467 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
468     0xE00000000000, // AndMask
469     0x100000000000, // XorMask
470     0x080000000000, // ShadowBase
471     0x1C0000000000, // OriginBase
472 };
473 
474 // s390x Linux
475 static const MemoryMapParams Linux_S390X_MemoryMapParams = {
476     0xC00000000000, // AndMask
477     0,              // XorMask (not used)
478     0x080000000000, // ShadowBase
479     0x1C0000000000, // OriginBase
480 };
481 
482 // arm32 Linux
483 // FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
484 // after picking good constants
485 
486 // aarch64 Linux
487 static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
488     0,               // AndMask (not used)
489     0x0B00000000000, // XorMask
490     0,               // ShadowBase (not used)
491     0x0200000000000, // OriginBase
492 };
493 
494 // loongarch64 Linux
495 static const MemoryMapParams Linux_LoongArch64_MemoryMapParams = {
496     0,              // AndMask (not used)
497     0x500000000000, // XorMask
498     0,              // ShadowBase (not used)
499     0x100000000000, // OriginBase
500 };
501 
502 // riscv32 Linux
503 // FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
504 // after picking good constants
505 
506 // aarch64 FreeBSD
507 static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams = {
508     0x1800000000000, // AndMask
509     0x0400000000000, // XorMask
510     0x0200000000000, // ShadowBase
511     0x0700000000000, // OriginBase
512 };
513 
514 // i386 FreeBSD
515 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
516     0x000180000000, // AndMask
517     0x000040000000, // XorMask
518     0x000020000000, // ShadowBase
519     0x000700000000, // OriginBase
520 };
521 
522 // x86_64 FreeBSD
523 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
524     0xc00000000000, // AndMask
525     0x200000000000, // XorMask
526     0x100000000000, // ShadowBase
527     0x380000000000, // OriginBase
528 };
529 
530 // x86_64 NetBSD
531 static const MemoryMapParams NetBSD_X86_64_MemoryMapParams = {
532     0,              // AndMask
533     0x500000000000, // XorMask
534     0,              // ShadowBase
535     0x100000000000, // OriginBase
536 };
537 
538 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
539     &Linux_I386_MemoryMapParams,
540     &Linux_X86_64_MemoryMapParams,
541 };
542 
543 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
544     nullptr,
545     &Linux_MIPS64_MemoryMapParams,
546 };
547 
548 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
549     nullptr,
550     &Linux_PowerPC64_MemoryMapParams,
551 };
552 
553 static const PlatformMemoryMapParams Linux_S390_MemoryMapParams = {
554     nullptr,
555     &Linux_S390X_MemoryMapParams,
556 };
557 
558 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
559     nullptr,
560     &Linux_AArch64_MemoryMapParams,
561 };
562 
563 static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams = {
564     nullptr,
565     &Linux_LoongArch64_MemoryMapParams,
566 };
567 
568 static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams = {
569     nullptr,
570     &FreeBSD_AArch64_MemoryMapParams,
571 };
572 
573 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
574     &FreeBSD_I386_MemoryMapParams,
575     &FreeBSD_X86_64_MemoryMapParams,
576 };
577 
578 static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = {
579     nullptr,
580     &NetBSD_X86_64_MemoryMapParams,
581 };
582 
583 namespace {
584 
585 /// Instrument functions of a module to detect uninitialized reads.
586 ///
587 /// Instantiating MemorySanitizer inserts the msan runtime library API function
588 /// declarations into the module if they don't exist already. Instantiating
589 /// ensures the __msan_init function is in the list of global constructors for
590 /// the module.
591 class MemorySanitizer {
592 public:
MemorySanitizer(Module & M,MemorySanitizerOptions Options)593   MemorySanitizer(Module &M, MemorySanitizerOptions Options)
594       : CompileKernel(Options.Kernel), TrackOrigins(Options.TrackOrigins),
595         Recover(Options.Recover), EagerChecks(Options.EagerChecks) {
596     initializeModule(M);
597   }
598 
599   // MSan cannot be moved or copied because of MapParams.
600   MemorySanitizer(MemorySanitizer &&) = delete;
601   MemorySanitizer &operator=(MemorySanitizer &&) = delete;
602   MemorySanitizer(const MemorySanitizer &) = delete;
603   MemorySanitizer &operator=(const MemorySanitizer &) = delete;
604 
605   bool sanitizeFunction(Function &F, TargetLibraryInfo &TLI);
606 
607 private:
608   friend struct MemorySanitizerVisitor;
609   friend struct VarArgHelperBase;
610   friend struct VarArgAMD64Helper;
611   friend struct VarArgAArch64Helper;
612   friend struct VarArgPowerPC64Helper;
613   friend struct VarArgPowerPC32Helper;
614   friend struct VarArgSystemZHelper;
615   friend struct VarArgI386Helper;
616   friend struct VarArgGenericHelper;
617 
618   void initializeModule(Module &M);
619   void initializeCallbacks(Module &M, const TargetLibraryInfo &TLI);
620   void createKernelApi(Module &M, const TargetLibraryInfo &TLI);
621   void createUserspaceApi(Module &M, const TargetLibraryInfo &TLI);
622 
623   template <typename... ArgsTy>
624   FunctionCallee getOrInsertMsanMetadataFunction(Module &M, StringRef Name,
625                                                  ArgsTy... Args);
626 
627   /// True if we're compiling the Linux kernel.
628   bool CompileKernel;
629   /// Track origins (allocation points) of uninitialized values.
630   int TrackOrigins;
631   bool Recover;
632   bool EagerChecks;
633 
634   Triple TargetTriple;
635   LLVMContext *C;
636   Type *IntptrTy; ///< Integer type with the size of a ptr in default AS.
637   Type *OriginTy;
638   PointerType *PtrTy; ///< Integer type with the size of a ptr in default AS.
639 
640   // XxxTLS variables represent the per-thread state in MSan and per-task state
641   // in KMSAN.
642   // For the userspace these point to thread-local globals. In the kernel land
643   // they point to the members of a per-task struct obtained via a call to
644   // __msan_get_context_state().
645 
646   /// Thread-local shadow storage for function parameters.
647   Value *ParamTLS;
648 
649   /// Thread-local origin storage for function parameters.
650   Value *ParamOriginTLS;
651 
652   /// Thread-local shadow storage for function return value.
653   Value *RetvalTLS;
654 
655   /// Thread-local origin storage for function return value.
656   Value *RetvalOriginTLS;
657 
658   /// Thread-local shadow storage for in-register va_arg function.
659   Value *VAArgTLS;
660 
661   /// Thread-local shadow storage for in-register va_arg function.
662   Value *VAArgOriginTLS;
663 
664   /// Thread-local shadow storage for va_arg overflow area.
665   Value *VAArgOverflowSizeTLS;
666 
667   /// Are the instrumentation callbacks set up?
668   bool CallbacksInitialized = false;
669 
670   /// The run-time callback to print a warning.
671   FunctionCallee WarningFn;
672 
673   // These arrays are indexed by log2(AccessSize).
674   FunctionCallee MaybeWarningFn[kNumberOfAccessSizes];
675   FunctionCallee MaybeWarningVarSizeFn;
676   FunctionCallee MaybeStoreOriginFn[kNumberOfAccessSizes];
677 
678   /// Run-time helper that generates a new origin value for a stack
679   /// allocation.
680   FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
681   // No description version
682   FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
683 
684   /// Run-time helper that poisons stack on function entry.
685   FunctionCallee MsanPoisonStackFn;
686 
687   /// Run-time helper that records a store (or any event) of an
688   /// uninitialized value and returns an updated origin id encoding this info.
689   FunctionCallee MsanChainOriginFn;
690 
691   /// Run-time helper that paints an origin over a region.
692   FunctionCallee MsanSetOriginFn;
693 
694   /// MSan runtime replacements for memmove, memcpy and memset.
695   FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
696 
697   /// KMSAN callback for task-local function argument shadow.
698   StructType *MsanContextStateTy;
699   FunctionCallee MsanGetContextStateFn;
700 
701   /// Functions for poisoning/unpoisoning local variables
702   FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
703 
704   /// Pair of shadow/origin pointers.
705   Type *MsanMetadata;
706 
707   /// Each of the MsanMetadataPtrXxx functions returns a MsanMetadata.
708   FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
709   FunctionCallee MsanMetadataPtrForLoad_1_8[4];
710   FunctionCallee MsanMetadataPtrForStore_1_8[4];
711   FunctionCallee MsanInstrumentAsmStoreFn;
712 
713   /// Storage for return values of the MsanMetadataPtrXxx functions.
714   Value *MsanMetadataAlloca;
715 
716   /// Helper to choose between different MsanMetadataPtrXxx().
717   FunctionCallee getKmsanShadowOriginAccessFn(bool isStore, int size);
718 
719   /// Memory map parameters used in application-to-shadow calculation.
720   const MemoryMapParams *MapParams;
721 
722   /// Custom memory map parameters used when -msan-shadow-base or
723   // -msan-origin-base is provided.
724   MemoryMapParams CustomMapParams;
725 
726   MDNode *ColdCallWeights;
727 
728   /// Branch weights for origin store.
729   MDNode *OriginStoreWeights;
730 };
731 
insertModuleCtor(Module & M)732 void insertModuleCtor(Module &M) {
733   getOrCreateSanitizerCtorAndInitFunctions(
734       M, kMsanModuleCtorName, kMsanInitName,
735       /*InitArgTypes=*/{},
736       /*InitArgs=*/{},
737       // This callback is invoked when the functions are created the first
738       // time. Hook them into the global ctors list in that case:
739       [&](Function *Ctor, FunctionCallee) {
740         if (!ClWithComdat) {
741           appendToGlobalCtors(M, Ctor, 0);
742           return;
743         }
744         Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
745         Ctor->setComdat(MsanCtorComdat);
746         appendToGlobalCtors(M, Ctor, 0, Ctor);
747       });
748 }
749 
getOptOrDefault(const cl::opt<T> & Opt,T Default)750 template <class T> T getOptOrDefault(const cl::opt<T> &Opt, T Default) {
751   return (Opt.getNumOccurrences() > 0) ? Opt : Default;
752 }
753 
754 } // end anonymous namespace
755 
MemorySanitizerOptions(int TO,bool R,bool K,bool EagerChecks)756 MemorySanitizerOptions::MemorySanitizerOptions(int TO, bool R, bool K,
757                                                bool EagerChecks)
758     : Kernel(getOptOrDefault(ClEnableKmsan, K)),
759       TrackOrigins(getOptOrDefault(ClTrackOrigins, Kernel ? 2 : TO)),
760       Recover(getOptOrDefault(ClKeepGoing, Kernel || R)),
761       EagerChecks(getOptOrDefault(ClEagerChecks, EagerChecks)) {}
762 
run(Module & M,ModuleAnalysisManager & AM)763 PreservedAnalyses MemorySanitizerPass::run(Module &M,
764                                            ModuleAnalysisManager &AM) {
765   // Return early if nosanitize_memory module flag is present for the module.
766   if (checkIfAlreadyInstrumented(M, "nosanitize_memory"))
767     return PreservedAnalyses::all();
768   bool Modified = false;
769   if (!Options.Kernel) {
770     insertModuleCtor(M);
771     Modified = true;
772   }
773 
774   auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
775   for (Function &F : M) {
776     if (F.empty())
777       continue;
778     MemorySanitizer Msan(*F.getParent(), Options);
779     Modified |=
780         Msan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F));
781   }
782 
783   if (!Modified)
784     return PreservedAnalyses::all();
785 
786   PreservedAnalyses PA = PreservedAnalyses::none();
787   // GlobalsAA is considered stateless and does not get invalidated unless
788   // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
789   // make changes that require GlobalsAA to be invalidated.
790   PA.abandon<GlobalsAA>();
791   return PA;
792 }
793 
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)794 void MemorySanitizerPass::printPipeline(
795     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
796   static_cast<PassInfoMixin<MemorySanitizerPass> *>(this)->printPipeline(
797       OS, MapClassName2PassName);
798   OS << '<';
799   if (Options.Recover)
800     OS << "recover;";
801   if (Options.Kernel)
802     OS << "kernel;";
803   if (Options.EagerChecks)
804     OS << "eager-checks;";
805   OS << "track-origins=" << Options.TrackOrigins;
806   OS << '>';
807 }
808 
809 /// Create a non-const global initialized with the given string.
810 ///
811 /// Creates a writable global for Str so that we can pass it to the
812 /// run-time lib. Runtime uses first 4 bytes of the string to store the
813 /// frame ID, so the string needs to be mutable.
createPrivateConstGlobalForString(Module & M,StringRef Str)814 static GlobalVariable *createPrivateConstGlobalForString(Module &M,
815                                                          StringRef Str) {
816   Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
817   return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/true,
818                             GlobalValue::PrivateLinkage, StrConst, "");
819 }
820 
821 template <typename... ArgsTy>
822 FunctionCallee
getOrInsertMsanMetadataFunction(Module & M,StringRef Name,ArgsTy...Args)823 MemorySanitizer::getOrInsertMsanMetadataFunction(Module &M, StringRef Name,
824                                                  ArgsTy... Args) {
825   if (TargetTriple.getArch() == Triple::systemz) {
826     // SystemZ ABI: shadow/origin pair is returned via a hidden parameter.
827     return M.getOrInsertFunction(Name, Type::getVoidTy(*C), PtrTy,
828                                  std::forward<ArgsTy>(Args)...);
829   }
830 
831   return M.getOrInsertFunction(Name, MsanMetadata,
832                                std::forward<ArgsTy>(Args)...);
833 }
834 
835 /// Create KMSAN API callbacks.
createKernelApi(Module & M,const TargetLibraryInfo & TLI)836 void MemorySanitizer::createKernelApi(Module &M, const TargetLibraryInfo &TLI) {
837   IRBuilder<> IRB(*C);
838 
839   // These will be initialized in insertKmsanPrologue().
840   RetvalTLS = nullptr;
841   RetvalOriginTLS = nullptr;
842   ParamTLS = nullptr;
843   ParamOriginTLS = nullptr;
844   VAArgTLS = nullptr;
845   VAArgOriginTLS = nullptr;
846   VAArgOverflowSizeTLS = nullptr;
847 
848   WarningFn = M.getOrInsertFunction("__msan_warning",
849                                     TLI.getAttrList(C, {0}, /*Signed=*/false),
850                                     IRB.getVoidTy(), IRB.getInt32Ty());
851 
852   // Requests the per-task context state (kmsan_context_state*) from the
853   // runtime library.
854   MsanContextStateTy = StructType::get(
855       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
856       ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8),
857       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
858       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), /* va_arg_origin */
859       IRB.getInt64Ty(), ArrayType::get(OriginTy, kParamTLSSize / 4), OriginTy,
860       OriginTy);
861   MsanGetContextStateFn =
862       M.getOrInsertFunction("__msan_get_context_state", PtrTy);
863 
864   MsanMetadata = StructType::get(PtrTy, PtrTy);
865 
866   for (int ind = 0, size = 1; ind < 4; ind++, size <<= 1) {
867     std::string name_load =
868         "__msan_metadata_ptr_for_load_" + std::to_string(size);
869     std::string name_store =
870         "__msan_metadata_ptr_for_store_" + std::to_string(size);
871     MsanMetadataPtrForLoad_1_8[ind] =
872         getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
873     MsanMetadataPtrForStore_1_8[ind] =
874         getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
875   }
876 
877   MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
878       M, "__msan_metadata_ptr_for_load_n", PtrTy, IntptrTy);
879   MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
880       M, "__msan_metadata_ptr_for_store_n", PtrTy, IntptrTy);
881 
882   // Functions for poisoning and unpoisoning memory.
883   MsanPoisonAllocaFn = M.getOrInsertFunction(
884       "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
885   MsanUnpoisonAllocaFn = M.getOrInsertFunction(
886       "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
887 }
888 
getOrInsertGlobal(Module & M,StringRef Name,Type * Ty)889 static Constant *getOrInsertGlobal(Module &M, StringRef Name, Type *Ty) {
890   return M.getOrInsertGlobal(Name, Ty, [&] {
891     return new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
892                               nullptr, Name, nullptr,
893                               GlobalVariable::InitialExecTLSModel);
894   });
895 }
896 
897 /// Insert declarations for userspace-specific functions and globals.
createUserspaceApi(Module & M,const TargetLibraryInfo & TLI)898 void MemorySanitizer::createUserspaceApi(Module &M,
899                                          const TargetLibraryInfo &TLI) {
900   IRBuilder<> IRB(*C);
901 
902   // Create the callback.
903   // FIXME: this function should have "Cold" calling conv,
904   // which is not yet implemented.
905   if (TrackOrigins) {
906     StringRef WarningFnName = Recover ? "__msan_warning_with_origin"
907                                       : "__msan_warning_with_origin_noreturn";
908     WarningFn = M.getOrInsertFunction(WarningFnName,
909                                       TLI.getAttrList(C, {0}, /*Signed=*/false),
910                                       IRB.getVoidTy(), IRB.getInt32Ty());
911   } else {
912     StringRef WarningFnName =
913         Recover ? "__msan_warning" : "__msan_warning_noreturn";
914     WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
915   }
916 
917   // Create the global TLS variables.
918   RetvalTLS =
919       getOrInsertGlobal(M, "__msan_retval_tls",
920                         ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8));
921 
922   RetvalOriginTLS = getOrInsertGlobal(M, "__msan_retval_origin_tls", OriginTy);
923 
924   ParamTLS =
925       getOrInsertGlobal(M, "__msan_param_tls",
926                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
927 
928   ParamOriginTLS =
929       getOrInsertGlobal(M, "__msan_param_origin_tls",
930                         ArrayType::get(OriginTy, kParamTLSSize / 4));
931 
932   VAArgTLS =
933       getOrInsertGlobal(M, "__msan_va_arg_tls",
934                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
935 
936   VAArgOriginTLS =
937       getOrInsertGlobal(M, "__msan_va_arg_origin_tls",
938                         ArrayType::get(OriginTy, kParamTLSSize / 4));
939 
940   VAArgOverflowSizeTLS = getOrInsertGlobal(M, "__msan_va_arg_overflow_size_tls",
941                                            IRB.getIntPtrTy(M.getDataLayout()));
942 
943   for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
944        AccessSizeIndex++) {
945     unsigned AccessSize = 1 << AccessSizeIndex;
946     std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
947     MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
948         FunctionName, TLI.getAttrList(C, {0, 1}, /*Signed=*/false),
949         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
950     MaybeWarningVarSizeFn = M.getOrInsertFunction(
951         "__msan_maybe_warning_N", TLI.getAttrList(C, {}, /*Signed=*/false),
952         IRB.getVoidTy(), PtrTy, IRB.getInt64Ty(), IRB.getInt32Ty());
953     FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
954     MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
955         FunctionName, TLI.getAttrList(C, {0, 2}, /*Signed=*/false),
956         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
957         IRB.getInt32Ty());
958   }
959 
960   MsanSetAllocaOriginWithDescriptionFn =
961       M.getOrInsertFunction("__msan_set_alloca_origin_with_descr",
962                             IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
963   MsanSetAllocaOriginNoDescriptionFn =
964       M.getOrInsertFunction("__msan_set_alloca_origin_no_descr",
965                             IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
966   MsanPoisonStackFn = M.getOrInsertFunction("__msan_poison_stack",
967                                             IRB.getVoidTy(), PtrTy, IntptrTy);
968 }
969 
970 /// Insert extern declaration of runtime-provided functions and globals.
initializeCallbacks(Module & M,const TargetLibraryInfo & TLI)971 void MemorySanitizer::initializeCallbacks(Module &M,
972                                           const TargetLibraryInfo &TLI) {
973   // Only do this once.
974   if (CallbacksInitialized)
975     return;
976 
977   IRBuilder<> IRB(*C);
978   // Initialize callbacks that are common for kernel and userspace
979   // instrumentation.
980   MsanChainOriginFn = M.getOrInsertFunction(
981       "__msan_chain_origin",
982       TLI.getAttrList(C, {0}, /*Signed=*/false, /*Ret=*/true), IRB.getInt32Ty(),
983       IRB.getInt32Ty());
984   MsanSetOriginFn = M.getOrInsertFunction(
985       "__msan_set_origin", TLI.getAttrList(C, {2}, /*Signed=*/false),
986       IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
987   MemmoveFn =
988       M.getOrInsertFunction("__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
989   MemcpyFn =
990       M.getOrInsertFunction("__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
991   MemsetFn = M.getOrInsertFunction("__msan_memset",
992                                    TLI.getAttrList(C, {1}, /*Signed=*/true),
993                                    PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
994 
995   MsanInstrumentAsmStoreFn = M.getOrInsertFunction(
996       "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
997 
998   if (CompileKernel) {
999     createKernelApi(M, TLI);
1000   } else {
1001     createUserspaceApi(M, TLI);
1002   }
1003   CallbacksInitialized = true;
1004 }
1005 
getKmsanShadowOriginAccessFn(bool isStore,int size)1006 FunctionCallee MemorySanitizer::getKmsanShadowOriginAccessFn(bool isStore,
1007                                                              int size) {
1008   FunctionCallee *Fns =
1009       isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
1010   switch (size) {
1011   case 1:
1012     return Fns[0];
1013   case 2:
1014     return Fns[1];
1015   case 4:
1016     return Fns[2];
1017   case 8:
1018     return Fns[3];
1019   default:
1020     return nullptr;
1021   }
1022 }
1023 
1024 /// Module-level initialization.
1025 ///
1026 /// inserts a call to __msan_init to the module's constructor list.
initializeModule(Module & M)1027 void MemorySanitizer::initializeModule(Module &M) {
1028   auto &DL = M.getDataLayout();
1029 
1030   TargetTriple = M.getTargetTriple();
1031 
1032   bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0;
1033   bool OriginPassed = ClOriginBase.getNumOccurrences() > 0;
1034   // Check the overrides first
1035   if (ShadowPassed || OriginPassed) {
1036     CustomMapParams.AndMask = ClAndMask;
1037     CustomMapParams.XorMask = ClXorMask;
1038     CustomMapParams.ShadowBase = ClShadowBase;
1039     CustomMapParams.OriginBase = ClOriginBase;
1040     MapParams = &CustomMapParams;
1041   } else {
1042     switch (TargetTriple.getOS()) {
1043     case Triple::FreeBSD:
1044       switch (TargetTriple.getArch()) {
1045       case Triple::aarch64:
1046         MapParams = FreeBSD_ARM_MemoryMapParams.bits64;
1047         break;
1048       case Triple::x86_64:
1049         MapParams = FreeBSD_X86_MemoryMapParams.bits64;
1050         break;
1051       case Triple::x86:
1052         MapParams = FreeBSD_X86_MemoryMapParams.bits32;
1053         break;
1054       default:
1055         report_fatal_error("unsupported architecture");
1056       }
1057       break;
1058     case Triple::NetBSD:
1059       switch (TargetTriple.getArch()) {
1060       case Triple::x86_64:
1061         MapParams = NetBSD_X86_MemoryMapParams.bits64;
1062         break;
1063       default:
1064         report_fatal_error("unsupported architecture");
1065       }
1066       break;
1067     case Triple::Linux:
1068       switch (TargetTriple.getArch()) {
1069       case Triple::x86_64:
1070         MapParams = Linux_X86_MemoryMapParams.bits64;
1071         break;
1072       case Triple::x86:
1073         MapParams = Linux_X86_MemoryMapParams.bits32;
1074         break;
1075       case Triple::mips64:
1076       case Triple::mips64el:
1077         MapParams = Linux_MIPS_MemoryMapParams.bits64;
1078         break;
1079       case Triple::ppc64:
1080       case Triple::ppc64le:
1081         MapParams = Linux_PowerPC_MemoryMapParams.bits64;
1082         break;
1083       case Triple::systemz:
1084         MapParams = Linux_S390_MemoryMapParams.bits64;
1085         break;
1086       case Triple::aarch64:
1087       case Triple::aarch64_be:
1088         MapParams = Linux_ARM_MemoryMapParams.bits64;
1089         break;
1090       case Triple::loongarch64:
1091         MapParams = Linux_LoongArch_MemoryMapParams.bits64;
1092         break;
1093       default:
1094         report_fatal_error("unsupported architecture");
1095       }
1096       break;
1097     default:
1098       report_fatal_error("unsupported operating system");
1099     }
1100   }
1101 
1102   C = &(M.getContext());
1103   IRBuilder<> IRB(*C);
1104   IntptrTy = IRB.getIntPtrTy(DL);
1105   OriginTy = IRB.getInt32Ty();
1106   PtrTy = IRB.getPtrTy();
1107 
1108   ColdCallWeights = MDBuilder(*C).createUnlikelyBranchWeights();
1109   OriginStoreWeights = MDBuilder(*C).createUnlikelyBranchWeights();
1110 
1111   if (!CompileKernel) {
1112     if (TrackOrigins)
1113       M.getOrInsertGlobal("__msan_track_origins", IRB.getInt32Ty(), [&] {
1114         return new GlobalVariable(
1115             M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1116             IRB.getInt32(TrackOrigins), "__msan_track_origins");
1117       });
1118 
1119     if (Recover)
1120       M.getOrInsertGlobal("__msan_keep_going", IRB.getInt32Ty(), [&] {
1121         return new GlobalVariable(M, IRB.getInt32Ty(), true,
1122                                   GlobalValue::WeakODRLinkage,
1123                                   IRB.getInt32(Recover), "__msan_keep_going");
1124       });
1125   }
1126 }
1127 
1128 namespace {
1129 
1130 /// A helper class that handles instrumentation of VarArg
1131 /// functions on a particular platform.
1132 ///
1133 /// Implementations are expected to insert the instrumentation
1134 /// necessary to propagate argument shadow through VarArg function
1135 /// calls. Visit* methods are called during an InstVisitor pass over
1136 /// the function, and should avoid creating new basic blocks. A new
1137 /// instance of this class is created for each instrumented function.
1138 struct VarArgHelper {
1139   virtual ~VarArgHelper() = default;
1140 
1141   /// Visit a CallBase.
1142   virtual void visitCallBase(CallBase &CB, IRBuilder<> &IRB) = 0;
1143 
1144   /// Visit a va_start call.
1145   virtual void visitVAStartInst(VAStartInst &I) = 0;
1146 
1147   /// Visit a va_copy call.
1148   virtual void visitVACopyInst(VACopyInst &I) = 0;
1149 
1150   /// Finalize function instrumentation.
1151   ///
1152   /// This method is called after visiting all interesting (see above)
1153   /// instructions in a function.
1154   virtual void finalizeInstrumentation() = 0;
1155 };
1156 
1157 struct MemorySanitizerVisitor;
1158 
1159 } // end anonymous namespace
1160 
1161 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
1162                                         MemorySanitizerVisitor &Visitor);
1163 
TypeSizeToSizeIndex(TypeSize TS)1164 static unsigned TypeSizeToSizeIndex(TypeSize TS) {
1165   if (TS.isScalable())
1166     // Scalable types unconditionally take slowpaths.
1167     return kNumberOfAccessSizes;
1168   unsigned TypeSizeFixed = TS.getFixedValue();
1169   if (TypeSizeFixed <= 8)
1170     return 0;
1171   return Log2_32_Ceil((TypeSizeFixed + 7) / 8);
1172 }
1173 
1174 namespace {
1175 
1176 /// Helper class to attach debug information of the given instruction onto new
1177 /// instructions inserted after.
1178 class NextNodeIRBuilder : public IRBuilder<> {
1179 public:
NextNodeIRBuilder(Instruction * IP)1180   explicit NextNodeIRBuilder(Instruction *IP) : IRBuilder<>(IP->getNextNode()) {
1181     SetCurrentDebugLocation(IP->getDebugLoc());
1182   }
1183 };
1184 
1185 /// This class does all the work for a given function. Store and Load
1186 /// instructions store and load corresponding shadow and origin
1187 /// values. Most instructions propagate shadow from arguments to their
1188 /// return values. Certain instructions (most importantly, BranchInst)
1189 /// test their argument shadow and print reports (with a runtime call) if it's
1190 /// non-zero.
1191 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
1192   Function &F;
1193   MemorySanitizer &MS;
1194   SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
1195   ValueMap<Value *, Value *> ShadowMap, OriginMap;
1196   std::unique_ptr<VarArgHelper> VAHelper;
1197   const TargetLibraryInfo *TLI;
1198   Instruction *FnPrologueEnd;
1199   SmallVector<Instruction *, 16> Instructions;
1200 
1201   // The following flags disable parts of MSan instrumentation based on
1202   // exclusion list contents and command-line options.
1203   bool InsertChecks;
1204   bool PropagateShadow;
1205   bool PoisonStack;
1206   bool PoisonUndef;
1207   bool PoisonUndefVectors;
1208 
1209   struct ShadowOriginAndInsertPoint {
1210     Value *Shadow;
1211     Value *Origin;
1212     Instruction *OrigIns;
1213 
ShadowOriginAndInsertPoint__anonb346f5430811::MemorySanitizerVisitor::ShadowOriginAndInsertPoint1214     ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
1215         : Shadow(S), Origin(O), OrigIns(I) {}
1216   };
1217   SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
1218   DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1219   bool InstrumentLifetimeStart = ClHandleLifetimeIntrinsics;
1220   SmallSetVector<AllocaInst *, 16> AllocaSet;
1221   SmallVector<std::pair<IntrinsicInst *, AllocaInst *>, 16> LifetimeStartList;
1222   SmallVector<StoreInst *, 16> StoreList;
1223   int64_t SplittableBlocksCount = 0;
1224 
MemorySanitizerVisitor__anonb346f5430811::MemorySanitizerVisitor1225   MemorySanitizerVisitor(Function &F, MemorySanitizer &MS,
1226                          const TargetLibraryInfo &TLI)
1227       : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)), TLI(&TLI) {
1228     bool SanitizeFunction =
1229         F.hasFnAttribute(Attribute::SanitizeMemory) && !ClDisableChecks;
1230     InsertChecks = SanitizeFunction;
1231     PropagateShadow = SanitizeFunction;
1232     PoisonStack = SanitizeFunction && ClPoisonStack;
1233     PoisonUndef = SanitizeFunction && ClPoisonUndef;
1234     PoisonUndefVectors = SanitizeFunction && ClPoisonUndefVectors;
1235 
1236     // In the presence of unreachable blocks, we may see Phi nodes with
1237     // incoming nodes from such blocks. Since InstVisitor skips unreachable
1238     // blocks, such nodes will not have any shadow value associated with them.
1239     // It's easier to remove unreachable blocks than deal with missing shadow.
1240     removeUnreachableBlocks(F);
1241 
1242     MS.initializeCallbacks(*F.getParent(), TLI);
1243     FnPrologueEnd =
1244         IRBuilder<>(&F.getEntryBlock(), F.getEntryBlock().getFirstNonPHIIt())
1245             .CreateIntrinsic(Intrinsic::donothing, {});
1246 
1247     if (MS.CompileKernel) {
1248       IRBuilder<> IRB(FnPrologueEnd);
1249       insertKmsanPrologue(IRB);
1250     }
1251 
1252     LLVM_DEBUG(if (!InsertChecks) dbgs()
1253                << "MemorySanitizer is not inserting checks into '"
1254                << F.getName() << "'\n");
1255   }
1256 
instrumentWithCalls__anonb346f5430811::MemorySanitizerVisitor1257   bool instrumentWithCalls(Value *V) {
1258     // Constants likely will be eliminated by follow-up passes.
1259     if (isa<Constant>(V))
1260       return false;
1261     ++SplittableBlocksCount;
1262     return ClInstrumentationWithCallThreshold >= 0 &&
1263            SplittableBlocksCount > ClInstrumentationWithCallThreshold;
1264   }
1265 
isInPrologue__anonb346f5430811::MemorySanitizerVisitor1266   bool isInPrologue(Instruction &I) {
1267     return I.getParent() == FnPrologueEnd->getParent() &&
1268            (&I == FnPrologueEnd || I.comesBefore(FnPrologueEnd));
1269   }
1270 
1271   // Creates a new origin and records the stack trace. In general we can call
1272   // this function for any origin manipulation we like. However it will cost
1273   // runtime resources. So use this wisely only if it can provide additional
1274   // information helpful to a user.
updateOrigin__anonb346f5430811::MemorySanitizerVisitor1275   Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
1276     if (MS.TrackOrigins <= 1)
1277       return V;
1278     return IRB.CreateCall(MS.MsanChainOriginFn, V);
1279   }
1280 
originToIntptr__anonb346f5430811::MemorySanitizerVisitor1281   Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
1282     const DataLayout &DL = F.getDataLayout();
1283     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1284     if (IntptrSize == kOriginSize)
1285       return Origin;
1286     assert(IntptrSize == kOriginSize * 2);
1287     Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
1288     return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
1289   }
1290 
1291   /// Fill memory range with the given origin value.
paintOrigin__anonb346f5430811::MemorySanitizerVisitor1292   void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
1293                    TypeSize TS, Align Alignment) {
1294     const DataLayout &DL = F.getDataLayout();
1295     const Align IntptrAlignment = DL.getABITypeAlign(MS.IntptrTy);
1296     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1297     assert(IntptrAlignment >= kMinOriginAlignment);
1298     assert(IntptrSize >= kOriginSize);
1299 
1300     // Note: The loop based formation works for fixed length vectors too,
1301     // however we prefer to unroll and specialize alignment below.
1302     if (TS.isScalable()) {
1303       Value *Size = IRB.CreateTypeSize(MS.IntptrTy, TS);
1304       Value *RoundUp =
1305           IRB.CreateAdd(Size, ConstantInt::get(MS.IntptrTy, kOriginSize - 1));
1306       Value *End =
1307           IRB.CreateUDiv(RoundUp, ConstantInt::get(MS.IntptrTy, kOriginSize));
1308       auto [InsertPt, Index] =
1309           SplitBlockAndInsertSimpleForLoop(End, IRB.GetInsertPoint());
1310       IRB.SetInsertPoint(InsertPt);
1311 
1312       Value *GEP = IRB.CreateGEP(MS.OriginTy, OriginPtr, Index);
1313       IRB.CreateAlignedStore(Origin, GEP, kMinOriginAlignment);
1314       return;
1315     }
1316 
1317     unsigned Size = TS.getFixedValue();
1318 
1319     unsigned Ofs = 0;
1320     Align CurrentAlignment = Alignment;
1321     if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
1322       Value *IntptrOrigin = originToIntptr(IRB, Origin);
1323       Value *IntptrOriginPtr = IRB.CreatePointerCast(OriginPtr, MS.PtrTy);
1324       for (unsigned i = 0; i < Size / IntptrSize; ++i) {
1325         Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
1326                        : IntptrOriginPtr;
1327         IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
1328         Ofs += IntptrSize / kOriginSize;
1329         CurrentAlignment = IntptrAlignment;
1330       }
1331     }
1332 
1333     for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
1334       Value *GEP =
1335           i ? IRB.CreateConstGEP1_32(MS.OriginTy, OriginPtr, i) : OriginPtr;
1336       IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
1337       CurrentAlignment = kMinOriginAlignment;
1338     }
1339   }
1340 
storeOrigin__anonb346f5430811::MemorySanitizerVisitor1341   void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
1342                    Value *OriginPtr, Align Alignment) {
1343     const DataLayout &DL = F.getDataLayout();
1344     const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1345     TypeSize StoreSize = DL.getTypeStoreSize(Shadow->getType());
1346     // ZExt cannot convert between vector and scalar
1347     Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1348     if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1349       if (!ClCheckConstantShadow || ConstantShadow->isZeroValue()) {
1350         // Origin is not needed: value is initialized or const shadow is
1351         // ignored.
1352         return;
1353       }
1354       if (llvm::isKnownNonZero(ConvertedShadow, DL)) {
1355         // Copy origin as the value is definitely uninitialized.
1356         paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1357                     OriginAlignment);
1358         return;
1359       }
1360       // Fallback to runtime check, which still can be optimized out later.
1361     }
1362 
1363     TypeSize TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
1364     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1365     if (instrumentWithCalls(ConvertedShadow) &&
1366         SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1367       FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1368       Value *ConvertedShadow2 =
1369           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1370       CallBase *CB = IRB.CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1371       CB->addParamAttr(0, Attribute::ZExt);
1372       CB->addParamAttr(2, Attribute::ZExt);
1373     } else {
1374       Value *Cmp = convertToBool(ConvertedShadow, IRB, "_mscmp");
1375       Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1376           Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
1377       IRBuilder<> IRBNew(CheckTerm);
1378       paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1379                   OriginAlignment);
1380     }
1381   }
1382 
materializeStores__anonb346f5430811::MemorySanitizerVisitor1383   void materializeStores() {
1384     for (StoreInst *SI : StoreList) {
1385       IRBuilder<> IRB(SI);
1386       Value *Val = SI->getValueOperand();
1387       Value *Addr = SI->getPointerOperand();
1388       Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1389       Value *ShadowPtr, *OriginPtr;
1390       Type *ShadowTy = Shadow->getType();
1391       const Align Alignment = SI->getAlign();
1392       const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1393       std::tie(ShadowPtr, OriginPtr) =
1394           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true);
1395 
1396       [[maybe_unused]] StoreInst *NewSI =
1397           IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment);
1398       LLVM_DEBUG(dbgs() << "  STORE: " << *NewSI << "\n");
1399 
1400       if (SI->isAtomic())
1401         SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
1402 
1403       if (MS.TrackOrigins && !SI->isAtomic())
1404         storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1405                     OriginAlignment);
1406     }
1407   }
1408 
1409   // Returns true if Debug Location corresponds to multiple warnings.
shouldDisambiguateWarningLocation__anonb346f5430811::MemorySanitizerVisitor1410   bool shouldDisambiguateWarningLocation(const DebugLoc &DebugLoc) {
1411     if (MS.TrackOrigins < 2)
1412       return false;
1413 
1414     if (LazyWarningDebugLocationCount.empty())
1415       for (const auto &I : InstrumentationList)
1416         ++LazyWarningDebugLocationCount[I.OrigIns->getDebugLoc()];
1417 
1418     return LazyWarningDebugLocationCount[DebugLoc] >= ClDisambiguateWarning;
1419   }
1420 
1421   /// Helper function to insert a warning at IRB's current insert point.
insertWarningFn__anonb346f5430811::MemorySanitizerVisitor1422   void insertWarningFn(IRBuilder<> &IRB, Value *Origin) {
1423     if (!Origin)
1424       Origin = (Value *)IRB.getInt32(0);
1425     assert(Origin->getType()->isIntegerTy());
1426 
1427     if (shouldDisambiguateWarningLocation(IRB.getCurrentDebugLocation())) {
1428       // Try to create additional origin with debug info of the last origin
1429       // instruction. It may provide additional information to the user.
1430       if (Instruction *OI = dyn_cast_or_null<Instruction>(Origin)) {
1431         assert(MS.TrackOrigins);
1432         auto NewDebugLoc = OI->getDebugLoc();
1433         // Origin update with missing or the same debug location provides no
1434         // additional value.
1435         if (NewDebugLoc && NewDebugLoc != IRB.getCurrentDebugLocation()) {
1436           // Insert update just before the check, so we call runtime only just
1437           // before the report.
1438           IRBuilder<> IRBOrigin(&*IRB.GetInsertPoint());
1439           IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1440           Origin = updateOrigin(Origin, IRBOrigin);
1441         }
1442       }
1443     }
1444 
1445     if (MS.CompileKernel || MS.TrackOrigins)
1446       IRB.CreateCall(MS.WarningFn, Origin)->setCannotMerge();
1447     else
1448       IRB.CreateCall(MS.WarningFn)->setCannotMerge();
1449     // FIXME: Insert UnreachableInst if !MS.Recover?
1450     // This may invalidate some of the following checks and needs to be done
1451     // at the very end.
1452   }
1453 
materializeOneCheck__anonb346f5430811::MemorySanitizerVisitor1454   void materializeOneCheck(IRBuilder<> &IRB, Value *ConvertedShadow,
1455                            Value *Origin) {
1456     const DataLayout &DL = F.getDataLayout();
1457     TypeSize TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
1458     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1459     if (instrumentWithCalls(ConvertedShadow) && !MS.CompileKernel) {
1460       // ZExt cannot convert between vector and scalar
1461       ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1462       Value *ConvertedShadow2 =
1463           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1464 
1465       if (SizeIndex < kNumberOfAccessSizes) {
1466         FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1467         CallBase *CB = IRB.CreateCall(
1468             Fn,
1469             {ConvertedShadow2,
1470              MS.TrackOrigins && Origin ? Origin : (Value *)IRB.getInt32(0)});
1471         CB->addParamAttr(0, Attribute::ZExt);
1472         CB->addParamAttr(1, Attribute::ZExt);
1473       } else {
1474         FunctionCallee Fn = MS.MaybeWarningVarSizeFn;
1475         Value *ShadowAlloca = IRB.CreateAlloca(ConvertedShadow2->getType(), 0u);
1476         IRB.CreateStore(ConvertedShadow2, ShadowAlloca);
1477         unsigned ShadowSize = DL.getTypeAllocSize(ConvertedShadow2->getType());
1478         CallBase *CB = IRB.CreateCall(
1479             Fn,
1480             {ShadowAlloca, ConstantInt::get(IRB.getInt64Ty(), ShadowSize),
1481              MS.TrackOrigins && Origin ? Origin : (Value *)IRB.getInt32(0)});
1482         CB->addParamAttr(1, Attribute::ZExt);
1483         CB->addParamAttr(2, Attribute::ZExt);
1484       }
1485     } else {
1486       Value *Cmp = convertToBool(ConvertedShadow, IRB, "_mscmp");
1487       Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1488           Cmp, &*IRB.GetInsertPoint(),
1489           /* Unreachable */ !MS.Recover, MS.ColdCallWeights);
1490 
1491       IRB.SetInsertPoint(CheckTerm);
1492       insertWarningFn(IRB, Origin);
1493       LLVM_DEBUG(dbgs() << "  CHECK: " << *Cmp << "\n");
1494     }
1495   }
1496 
materializeInstructionChecks__anonb346f5430811::MemorySanitizerVisitor1497   void materializeInstructionChecks(
1498       ArrayRef<ShadowOriginAndInsertPoint> InstructionChecks) {
1499     const DataLayout &DL = F.getDataLayout();
1500     // Disable combining in some cases. TrackOrigins checks each shadow to pick
1501     // correct origin.
1502     bool Combine = !MS.TrackOrigins;
1503     Instruction *Instruction = InstructionChecks.front().OrigIns;
1504     Value *Shadow = nullptr;
1505     for (const auto &ShadowData : InstructionChecks) {
1506       assert(ShadowData.OrigIns == Instruction);
1507       IRBuilder<> IRB(Instruction);
1508 
1509       Value *ConvertedShadow = ShadowData.Shadow;
1510 
1511       if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1512         if (!ClCheckConstantShadow || ConstantShadow->isZeroValue()) {
1513           // Skip, value is initialized or const shadow is ignored.
1514           continue;
1515         }
1516         if (llvm::isKnownNonZero(ConvertedShadow, DL)) {
1517           // Report as the value is definitely uninitialized.
1518           insertWarningFn(IRB, ShadowData.Origin);
1519           if (!MS.Recover)
1520             return; // Always fail and stop here, not need to check the rest.
1521           // Skip entire instruction,
1522           continue;
1523         }
1524         // Fallback to runtime check, which still can be optimized out later.
1525       }
1526 
1527       if (!Combine) {
1528         materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1529         continue;
1530       }
1531 
1532       if (!Shadow) {
1533         Shadow = ConvertedShadow;
1534         continue;
1535       }
1536 
1537       Shadow = convertToBool(Shadow, IRB, "_mscmp");
1538       ConvertedShadow = convertToBool(ConvertedShadow, IRB, "_mscmp");
1539       Shadow = IRB.CreateOr(Shadow, ConvertedShadow, "_msor");
1540     }
1541 
1542     if (Shadow) {
1543       assert(Combine);
1544       IRBuilder<> IRB(Instruction);
1545       materializeOneCheck(IRB, Shadow, nullptr);
1546     }
1547   }
1548 
materializeChecks__anonb346f5430811::MemorySanitizerVisitor1549   void materializeChecks() {
1550 #ifndef NDEBUG
1551     // For assert below.
1552     SmallPtrSet<Instruction *, 16> Done;
1553 #endif
1554 
1555     for (auto I = InstrumentationList.begin();
1556          I != InstrumentationList.end();) {
1557       auto OrigIns = I->OrigIns;
1558       // Checks are grouped by the original instruction. We call all
1559       // `insertShadowCheck` for an instruction at once.
1560       assert(Done.insert(OrigIns).second);
1561       auto J = std::find_if(I + 1, InstrumentationList.end(),
1562                             [OrigIns](const ShadowOriginAndInsertPoint &R) {
1563                               return OrigIns != R.OrigIns;
1564                             });
1565       // Process all checks of instruction at once.
1566       materializeInstructionChecks(ArrayRef<ShadowOriginAndInsertPoint>(I, J));
1567       I = J;
1568     }
1569 
1570     LLVM_DEBUG(dbgs() << "DONE:\n" << F);
1571   }
1572 
1573   // Returns the last instruction in the new prologue
insertKmsanPrologue__anonb346f5430811::MemorySanitizerVisitor1574   void insertKmsanPrologue(IRBuilder<> &IRB) {
1575     Value *ContextState = IRB.CreateCall(MS.MsanGetContextStateFn, {});
1576     Constant *Zero = IRB.getInt32(0);
1577     MS.ParamTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1578                                 {Zero, IRB.getInt32(0)}, "param_shadow");
1579     MS.RetvalTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1580                                  {Zero, IRB.getInt32(1)}, "retval_shadow");
1581     MS.VAArgTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1582                                 {Zero, IRB.getInt32(2)}, "va_arg_shadow");
1583     MS.VAArgOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1584                                       {Zero, IRB.getInt32(3)}, "va_arg_origin");
1585     MS.VAArgOverflowSizeTLS =
1586         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1587                       {Zero, IRB.getInt32(4)}, "va_arg_overflow_size");
1588     MS.ParamOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1589                                       {Zero, IRB.getInt32(5)}, "param_origin");
1590     MS.RetvalOriginTLS =
1591         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1592                       {Zero, IRB.getInt32(6)}, "retval_origin");
1593     if (MS.TargetTriple.getArch() == Triple::systemz)
1594       MS.MsanMetadataAlloca = IRB.CreateAlloca(MS.MsanMetadata, 0u);
1595   }
1596 
1597   /// Add MemorySanitizer instrumentation to a function.
runOnFunction__anonb346f5430811::MemorySanitizerVisitor1598   bool runOnFunction() {
1599     // Iterate all BBs in depth-first order and create shadow instructions
1600     // for all instructions (where applicable).
1601     // For PHI nodes we create dummy shadow PHIs which will be finalized later.
1602     for (BasicBlock *BB : depth_first(FnPrologueEnd->getParent()))
1603       visit(*BB);
1604 
1605     // `visit` above only collects instructions. Process them after iterating
1606     // CFG to avoid requirement on CFG transformations.
1607     for (Instruction *I : Instructions)
1608       InstVisitor<MemorySanitizerVisitor>::visit(*I);
1609 
1610     // Finalize PHI nodes.
1611     for (PHINode *PN : ShadowPHINodes) {
1612       PHINode *PNS = cast<PHINode>(getShadow(PN));
1613       PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1614       size_t NumValues = PN->getNumIncomingValues();
1615       for (size_t v = 0; v < NumValues; v++) {
1616         PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1617         if (PNO)
1618           PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1619       }
1620     }
1621 
1622     VAHelper->finalizeInstrumentation();
1623 
1624     // Poison llvm.lifetime.start intrinsics, if we haven't fallen back to
1625     // instrumenting only allocas.
1626     if (InstrumentLifetimeStart) {
1627       for (auto Item : LifetimeStartList) {
1628         instrumentAlloca(*Item.second, Item.first);
1629         AllocaSet.remove(Item.second);
1630       }
1631     }
1632     // Poison the allocas for which we didn't instrument the corresponding
1633     // lifetime intrinsics.
1634     for (AllocaInst *AI : AllocaSet)
1635       instrumentAlloca(*AI);
1636 
1637     // Insert shadow value checks.
1638     materializeChecks();
1639 
1640     // Delayed instrumentation of StoreInst.
1641     // This may not add new address checks.
1642     materializeStores();
1643 
1644     return true;
1645   }
1646 
1647   /// Compute the shadow type that corresponds to a given Value.
getShadowTy__anonb346f5430811::MemorySanitizerVisitor1648   Type *getShadowTy(Value *V) { return getShadowTy(V->getType()); }
1649 
1650   /// Compute the shadow type that corresponds to a given Type.
getShadowTy__anonb346f5430811::MemorySanitizerVisitor1651   Type *getShadowTy(Type *OrigTy) {
1652     if (!OrigTy->isSized()) {
1653       return nullptr;
1654     }
1655     // For integer type, shadow is the same as the original type.
1656     // This may return weird-sized types like i1.
1657     if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
1658       return IT;
1659     const DataLayout &DL = F.getDataLayout();
1660     if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1661       uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
1662       return VectorType::get(IntegerType::get(*MS.C, EltSize),
1663                              VT->getElementCount());
1664     }
1665     if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1666       return ArrayType::get(getShadowTy(AT->getElementType()),
1667                             AT->getNumElements());
1668     }
1669     if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
1670       SmallVector<Type *, 4> Elements;
1671       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1672         Elements.push_back(getShadowTy(ST->getElementType(i)));
1673       StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
1674       LLVM_DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
1675       return Res;
1676     }
1677     uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
1678     return IntegerType::get(*MS.C, TypeSize);
1679   }
1680 
1681   /// Extract combined shadow of struct elements as a bool
collapseStructShadow__anonb346f5430811::MemorySanitizerVisitor1682   Value *collapseStructShadow(StructType *Struct, Value *Shadow,
1683                               IRBuilder<> &IRB) {
1684     Value *FalseVal = IRB.getIntN(/* width */ 1, /* value */ 0);
1685     Value *Aggregator = FalseVal;
1686 
1687     for (unsigned Idx = 0; Idx < Struct->getNumElements(); Idx++) {
1688       // Combine by ORing together each element's bool shadow
1689       Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
1690       Value *ShadowBool = convertToBool(ShadowItem, IRB);
1691 
1692       if (Aggregator != FalseVal)
1693         Aggregator = IRB.CreateOr(Aggregator, ShadowBool);
1694       else
1695         Aggregator = ShadowBool;
1696     }
1697 
1698     return Aggregator;
1699   }
1700 
1701   // Extract combined shadow of array elements
collapseArrayShadow__anonb346f5430811::MemorySanitizerVisitor1702   Value *collapseArrayShadow(ArrayType *Array, Value *Shadow,
1703                              IRBuilder<> &IRB) {
1704     if (!Array->getNumElements())
1705       return IRB.getIntN(/* width */ 1, /* value */ 0);
1706 
1707     Value *FirstItem = IRB.CreateExtractValue(Shadow, 0);
1708     Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1709 
1710     for (unsigned Idx = 1; Idx < Array->getNumElements(); Idx++) {
1711       Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
1712       Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1713       Aggregator = IRB.CreateOr(Aggregator, ShadowInner);
1714     }
1715     return Aggregator;
1716   }
1717 
1718   /// Convert a shadow value to it's flattened variant. The resulting
1719   /// shadow may not necessarily have the same bit width as the input
1720   /// value, but it will always be comparable to zero.
convertShadowToScalar__anonb346f5430811::MemorySanitizerVisitor1721   Value *convertShadowToScalar(Value *V, IRBuilder<> &IRB) {
1722     if (StructType *Struct = dyn_cast<StructType>(V->getType()))
1723       return collapseStructShadow(Struct, V, IRB);
1724     if (ArrayType *Array = dyn_cast<ArrayType>(V->getType()))
1725       return collapseArrayShadow(Array, V, IRB);
1726     if (isa<VectorType>(V->getType())) {
1727       if (isa<ScalableVectorType>(V->getType()))
1728         return convertShadowToScalar(IRB.CreateOrReduce(V), IRB);
1729       unsigned BitWidth =
1730           V->getType()->getPrimitiveSizeInBits().getFixedValue();
1731       return IRB.CreateBitCast(V, IntegerType::get(*MS.C, BitWidth));
1732     }
1733     return V;
1734   }
1735 
1736   // Convert a scalar value to an i1 by comparing with 0
convertToBool__anonb346f5430811::MemorySanitizerVisitor1737   Value *convertToBool(Value *V, IRBuilder<> &IRB, const Twine &name = "") {
1738     Type *VTy = V->getType();
1739     if (!VTy->isIntegerTy())
1740       return convertToBool(convertShadowToScalar(V, IRB), IRB, name);
1741     if (VTy->getIntegerBitWidth() == 1)
1742       // Just converting a bool to a bool, so do nothing.
1743       return V;
1744     return IRB.CreateICmpNE(V, ConstantInt::get(VTy, 0), name);
1745   }
1746 
ptrToIntPtrType__anonb346f5430811::MemorySanitizerVisitor1747   Type *ptrToIntPtrType(Type *PtrTy) const {
1748     if (VectorType *VectTy = dyn_cast<VectorType>(PtrTy)) {
1749       return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1750                              VectTy->getElementCount());
1751     }
1752     assert(PtrTy->isIntOrPtrTy());
1753     return MS.IntptrTy;
1754   }
1755 
getPtrToShadowPtrType__anonb346f5430811::MemorySanitizerVisitor1756   Type *getPtrToShadowPtrType(Type *IntPtrTy, Type *ShadowTy) const {
1757     if (VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1758       return VectorType::get(
1759           getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1760           VectTy->getElementCount());
1761     }
1762     assert(IntPtrTy == MS.IntptrTy);
1763     return MS.PtrTy;
1764   }
1765 
constToIntPtr__anonb346f5430811::MemorySanitizerVisitor1766   Constant *constToIntPtr(Type *IntPtrTy, uint64_t C) const {
1767     if (VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1768       return ConstantVector::getSplat(
1769           VectTy->getElementCount(),
1770           constToIntPtr(VectTy->getElementType(), C));
1771     }
1772     assert(IntPtrTy == MS.IntptrTy);
1773     return ConstantInt::get(MS.IntptrTy, C);
1774   }
1775 
1776   /// Returns the integer shadow offset that corresponds to a given
1777   /// application address, whereby:
1778   ///
1779   ///     Offset = (Addr & ~AndMask) ^ XorMask
1780   ///     Shadow = ShadowBase + Offset
1781   ///     Origin = (OriginBase + Offset) & ~Alignment
1782   ///
1783   /// Note: for efficiency, many shadow mappings only require use the XorMask
1784   ///       and OriginBase; the AndMask and ShadowBase are often zero.
getShadowPtrOffset__anonb346f5430811::MemorySanitizerVisitor1785   Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
1786     Type *IntptrTy = ptrToIntPtrType(Addr->getType());
1787     Value *OffsetLong = IRB.CreatePointerCast(Addr, IntptrTy);
1788 
1789     if (uint64_t AndMask = MS.MapParams->AndMask)
1790       OffsetLong = IRB.CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1791 
1792     if (uint64_t XorMask = MS.MapParams->XorMask)
1793       OffsetLong = IRB.CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1794     return OffsetLong;
1795   }
1796 
1797   /// Compute the shadow and origin addresses corresponding to a given
1798   /// application address.
1799   ///
1800   /// Shadow = ShadowBase + Offset
1801   /// Origin = (OriginBase + Offset) & ~3ULL
1802   /// Addr can be a ptr or <N x ptr>. In both cases ShadowTy the shadow type of
1803   /// a single pointee.
1804   /// Returns <shadow_ptr, origin_ptr> or <<N x shadow_ptr>, <N x origin_ptr>>.
1805   std::pair<Value *, Value *>
getShadowOriginPtrUserspace__anonb346f5430811::MemorySanitizerVisitor1806   getShadowOriginPtrUserspace(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy,
1807                               MaybeAlign Alignment) {
1808     VectorType *VectTy = dyn_cast<VectorType>(Addr->getType());
1809     if (!VectTy) {
1810       assert(Addr->getType()->isPointerTy());
1811     } else {
1812       assert(VectTy->getElementType()->isPointerTy());
1813     }
1814     Type *IntptrTy = ptrToIntPtrType(Addr->getType());
1815     Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1816     Value *ShadowLong = ShadowOffset;
1817     if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1818       ShadowLong =
1819           IRB.CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1820     }
1821     Value *ShadowPtr = IRB.CreateIntToPtr(
1822         ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1823 
1824     Value *OriginPtr = nullptr;
1825     if (MS.TrackOrigins) {
1826       Value *OriginLong = ShadowOffset;
1827       uint64_t OriginBase = MS.MapParams->OriginBase;
1828       if (OriginBase != 0)
1829         OriginLong =
1830             IRB.CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1831       if (!Alignment || *Alignment < kMinOriginAlignment) {
1832         uint64_t Mask = kMinOriginAlignment.value() - 1;
1833         OriginLong = IRB.CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1834       }
1835       OriginPtr = IRB.CreateIntToPtr(
1836           OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1837     }
1838     return std::make_pair(ShadowPtr, OriginPtr);
1839   }
1840 
1841   template <typename... ArgsTy>
createMetadataCall__anonb346f5430811::MemorySanitizerVisitor1842   Value *createMetadataCall(IRBuilder<> &IRB, FunctionCallee Callee,
1843                             ArgsTy... Args) {
1844     if (MS.TargetTriple.getArch() == Triple::systemz) {
1845       IRB.CreateCall(Callee,
1846                      {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1847       return IRB.CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1848     }
1849 
1850     return IRB.CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1851   }
1852 
getShadowOriginPtrKernelNoVec__anonb346f5430811::MemorySanitizerVisitor1853   std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(Value *Addr,
1854                                                             IRBuilder<> &IRB,
1855                                                             Type *ShadowTy,
1856                                                             bool isStore) {
1857     Value *ShadowOriginPtrs;
1858     const DataLayout &DL = F.getDataLayout();
1859     TypeSize Size = DL.getTypeStoreSize(ShadowTy);
1860 
1861     FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(isStore, Size);
1862     Value *AddrCast = IRB.CreatePointerCast(Addr, MS.PtrTy);
1863     if (Getter) {
1864       ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1865     } else {
1866       Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
1867       ShadowOriginPtrs = createMetadataCall(
1868           IRB,
1869           isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1870           AddrCast, SizeVal);
1871     }
1872     Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0);
1873     ShadowPtr = IRB.CreatePointerCast(ShadowPtr, MS.PtrTy);
1874     Value *OriginPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 1);
1875 
1876     return std::make_pair(ShadowPtr, OriginPtr);
1877   }
1878 
1879   /// Addr can be a ptr or <N x ptr>. In both cases ShadowTy the shadow type of
1880   /// a single pointee.
1881   /// Returns <shadow_ptr, origin_ptr> or <<N x shadow_ptr>, <N x origin_ptr>>.
getShadowOriginPtrKernel__anonb346f5430811::MemorySanitizerVisitor1882   std::pair<Value *, Value *> getShadowOriginPtrKernel(Value *Addr,
1883                                                        IRBuilder<> &IRB,
1884                                                        Type *ShadowTy,
1885                                                        bool isStore) {
1886     VectorType *VectTy = dyn_cast<VectorType>(Addr->getType());
1887     if (!VectTy) {
1888       assert(Addr->getType()->isPointerTy());
1889       return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy, isStore);
1890     }
1891 
1892     // TODO: Support callbacs with vectors of addresses.
1893     unsigned NumElements = cast<FixedVectorType>(VectTy)->getNumElements();
1894     Value *ShadowPtrs = ConstantInt::getNullValue(
1895         FixedVectorType::get(IRB.getPtrTy(), NumElements));
1896     Value *OriginPtrs = nullptr;
1897     if (MS.TrackOrigins)
1898       OriginPtrs = ConstantInt::getNullValue(
1899           FixedVectorType::get(IRB.getPtrTy(), NumElements));
1900     for (unsigned i = 0; i < NumElements; ++i) {
1901       Value *OneAddr =
1902           IRB.CreateExtractElement(Addr, ConstantInt::get(IRB.getInt32Ty(), i));
1903       auto [ShadowPtr, OriginPtr] =
1904           getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy, isStore);
1905 
1906       ShadowPtrs = IRB.CreateInsertElement(
1907           ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.getInt32Ty(), i));
1908       if (MS.TrackOrigins)
1909         OriginPtrs = IRB.CreateInsertElement(
1910             OriginPtrs, OriginPtr, ConstantInt::get(IRB.getInt32Ty(), i));
1911     }
1912     return {ShadowPtrs, OriginPtrs};
1913   }
1914 
getShadowOriginPtr__anonb346f5430811::MemorySanitizerVisitor1915   std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB,
1916                                                  Type *ShadowTy,
1917                                                  MaybeAlign Alignment,
1918                                                  bool isStore) {
1919     if (MS.CompileKernel)
1920       return getShadowOriginPtrKernel(Addr, IRB, ShadowTy, isStore);
1921     return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1922   }
1923 
1924   /// Compute the shadow address for a given function argument.
1925   ///
1926   /// Shadow = ParamTLS+ArgOffset.
getShadowPtrForArgument__anonb346f5430811::MemorySanitizerVisitor1927   Value *getShadowPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
1928     Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
1929     if (ArgOffset)
1930       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1931     return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg");
1932   }
1933 
1934   /// Compute the origin address for a given function argument.
getOriginPtrForArgument__anonb346f5430811::MemorySanitizerVisitor1935   Value *getOriginPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
1936     if (!MS.TrackOrigins)
1937       return nullptr;
1938     Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
1939     if (ArgOffset)
1940       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1941     return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg_o");
1942   }
1943 
1944   /// Compute the shadow address for a retval.
getShadowPtrForRetval__anonb346f5430811::MemorySanitizerVisitor1945   Value *getShadowPtrForRetval(IRBuilder<> &IRB) {
1946     return IRB.CreatePointerCast(MS.RetvalTLS, IRB.getPtrTy(0), "_msret");
1947   }
1948 
1949   /// Compute the origin address for a retval.
getOriginPtrForRetval__anonb346f5430811::MemorySanitizerVisitor1950   Value *getOriginPtrForRetval() {
1951     // We keep a single origin for the entire retval. Might be too optimistic.
1952     return MS.RetvalOriginTLS;
1953   }
1954 
1955   /// Set SV to be the shadow value for V.
setShadow__anonb346f5430811::MemorySanitizerVisitor1956   void setShadow(Value *V, Value *SV) {
1957     assert(!ShadowMap.count(V) && "Values may only have one shadow");
1958     ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1959   }
1960 
1961   /// Set Origin to be the origin value for V.
setOrigin__anonb346f5430811::MemorySanitizerVisitor1962   void setOrigin(Value *V, Value *Origin) {
1963     if (!MS.TrackOrigins)
1964       return;
1965     assert(!OriginMap.count(V) && "Values may only have one origin");
1966     LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << "  ==> " << *Origin << "\n");
1967     OriginMap[V] = Origin;
1968   }
1969 
getCleanShadow__anonb346f5430811::MemorySanitizerVisitor1970   Constant *getCleanShadow(Type *OrigTy) {
1971     Type *ShadowTy = getShadowTy(OrigTy);
1972     if (!ShadowTy)
1973       return nullptr;
1974     return Constant::getNullValue(ShadowTy);
1975   }
1976 
1977   /// Create a clean shadow value for a given value.
1978   ///
1979   /// Clean shadow (all zeroes) means all bits of the value are defined
1980   /// (initialized).
getCleanShadow__anonb346f5430811::MemorySanitizerVisitor1981   Constant *getCleanShadow(Value *V) { return getCleanShadow(V->getType()); }
1982 
1983   /// Create a dirty shadow of a given shadow type.
getPoisonedShadow__anonb346f5430811::MemorySanitizerVisitor1984   Constant *getPoisonedShadow(Type *ShadowTy) {
1985     assert(ShadowTy);
1986     if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1987       return Constant::getAllOnesValue(ShadowTy);
1988     if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1989       SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1990                                       getPoisonedShadow(AT->getElementType()));
1991       return ConstantArray::get(AT, Vals);
1992     }
1993     if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1994       SmallVector<Constant *, 4> Vals;
1995       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1996         Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1997       return ConstantStruct::get(ST, Vals);
1998     }
1999     llvm_unreachable("Unexpected shadow type");
2000   }
2001 
2002   /// Create a dirty shadow for a given value.
getPoisonedShadow__anonb346f5430811::MemorySanitizerVisitor2003   Constant *getPoisonedShadow(Value *V) {
2004     Type *ShadowTy = getShadowTy(V);
2005     if (!ShadowTy)
2006       return nullptr;
2007     return getPoisonedShadow(ShadowTy);
2008   }
2009 
2010   /// Create a clean (zero) origin.
getCleanOrigin__anonb346f5430811::MemorySanitizerVisitor2011   Value *getCleanOrigin() { return Constant::getNullValue(MS.OriginTy); }
2012 
2013   /// Get the shadow value for a given Value.
2014   ///
2015   /// This function either returns the value set earlier with setShadow,
2016   /// or extracts if from ParamTLS (for function arguments).
getShadow__anonb346f5430811::MemorySanitizerVisitor2017   Value *getShadow(Value *V) {
2018     if (Instruction *I = dyn_cast<Instruction>(V)) {
2019       if (!PropagateShadow || I->getMetadata(LLVMContext::MD_nosanitize))
2020         return getCleanShadow(V);
2021       // For instructions the shadow is already stored in the map.
2022       Value *Shadow = ShadowMap[V];
2023       if (!Shadow) {
2024         LLVM_DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
2025         assert(Shadow && "No shadow for a value");
2026       }
2027       return Shadow;
2028     }
2029     // Handle fully undefined values
2030     // (partially undefined constant vectors are handled later)
2031     if ([[maybe_unused]] UndefValue *U = dyn_cast<UndefValue>(V)) {
2032       Value *AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
2033                                                         : getCleanShadow(V);
2034       LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
2035       return AllOnes;
2036     }
2037     if (Argument *A = dyn_cast<Argument>(V)) {
2038       // For arguments we compute the shadow on demand and store it in the map.
2039       Value *&ShadowPtr = ShadowMap[V];
2040       if (ShadowPtr)
2041         return ShadowPtr;
2042       Function *F = A->getParent();
2043       IRBuilder<> EntryIRB(FnPrologueEnd);
2044       unsigned ArgOffset = 0;
2045       const DataLayout &DL = F->getDataLayout();
2046       for (auto &FArg : F->args()) {
2047         if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2048           LLVM_DEBUG(dbgs() << (FArg.getType()->isScalableTy()
2049                                     ? "vscale not fully supported\n"
2050                                     : "Arg is not sized\n"));
2051           if (A == &FArg) {
2052             ShadowPtr = getCleanShadow(V);
2053             setOrigin(A, getCleanOrigin());
2054             break;
2055           }
2056           continue;
2057         }
2058 
2059         unsigned Size = FArg.hasByValAttr()
2060                             ? DL.getTypeAllocSize(FArg.getParamByValType())
2061                             : DL.getTypeAllocSize(FArg.getType());
2062 
2063         if (A == &FArg) {
2064           bool Overflow = ArgOffset + Size > kParamTLSSize;
2065           if (FArg.hasByValAttr()) {
2066             // ByVal pointer itself has clean shadow. We copy the actual
2067             // argument shadow to the underlying memory.
2068             // Figure out maximal valid memcpy alignment.
2069             const Align ArgAlign = DL.getValueOrABITypeAlignment(
2070                 FArg.getParamAlign(), FArg.getParamByValType());
2071             Value *CpShadowPtr, *CpOriginPtr;
2072             std::tie(CpShadowPtr, CpOriginPtr) =
2073                 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2074                                    /*isStore*/ true);
2075             if (!PropagateShadow || Overflow) {
2076               // ParamTLS overflow.
2077               EntryIRB.CreateMemSet(
2078                   CpShadowPtr, Constant::getNullValue(EntryIRB.getInt8Ty()),
2079                   Size, ArgAlign);
2080             } else {
2081               Value *Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2082               const Align CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
2083               [[maybe_unused]] Value *Cpy = EntryIRB.CreateMemCpy(
2084                   CpShadowPtr, CopyAlign, Base, CopyAlign, Size);
2085               LLVM_DEBUG(dbgs() << "  ByValCpy: " << *Cpy << "\n");
2086 
2087               if (MS.TrackOrigins) {
2088                 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2089                 // FIXME: OriginSize should be:
2090                 // alignTo(V % kMinOriginAlignment + Size, kMinOriginAlignment)
2091                 unsigned OriginSize = alignTo(Size, kMinOriginAlignment);
2092                 EntryIRB.CreateMemCpy(
2093                     CpOriginPtr,
2094                     /* by getShadowOriginPtr */ kMinOriginAlignment, OriginPtr,
2095                     /* by origin_tls[ArgOffset] */ kMinOriginAlignment,
2096                     OriginSize);
2097               }
2098             }
2099           }
2100 
2101           if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2102               (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2103             ShadowPtr = getCleanShadow(V);
2104             setOrigin(A, getCleanOrigin());
2105           } else {
2106             // Shadow over TLS
2107             Value *Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2108             ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
2109                                                    kShadowTLSAlignment);
2110             if (MS.TrackOrigins) {
2111               Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2112               setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2113             }
2114           }
2115           LLVM_DEBUG(dbgs()
2116                      << "  ARG:    " << FArg << " ==> " << *ShadowPtr << "\n");
2117           break;
2118         }
2119 
2120         ArgOffset += alignTo(Size, kShadowTLSAlignment);
2121       }
2122       assert(ShadowPtr && "Could not find shadow for an argument");
2123       return ShadowPtr;
2124     }
2125 
2126     // Check for partially-undefined constant vectors
2127     // TODO: scalable vectors (this is hard because we do not have IRBuilder)
2128     if (isa<FixedVectorType>(V->getType()) && isa<Constant>(V) &&
2129         cast<Constant>(V)->containsUndefOrPoisonElement() && PropagateShadow &&
2130         PoisonUndefVectors) {
2131       unsigned NumElems = cast<FixedVectorType>(V->getType())->getNumElements();
2132       SmallVector<Constant *, 32> ShadowVector(NumElems);
2133       for (unsigned i = 0; i != NumElems; ++i) {
2134         Constant *Elem = cast<Constant>(V)->getAggregateElement(i);
2135         ShadowVector[i] = isa<UndefValue>(Elem) ? getPoisonedShadow(Elem)
2136                                                 : getCleanShadow(Elem);
2137       }
2138 
2139       Value *ShadowConstant = ConstantVector::get(ShadowVector);
2140       LLVM_DEBUG(dbgs() << "Partial undef constant vector: " << *V << " ==> "
2141                         << *ShadowConstant << "\n");
2142 
2143       return ShadowConstant;
2144     }
2145 
2146     // TODO: partially-undefined constant arrays, structures, and nested types
2147 
2148     // For everything else the shadow is zero.
2149     return getCleanShadow(V);
2150   }
2151 
2152   /// Get the shadow for i-th argument of the instruction I.
getShadow__anonb346f5430811::MemorySanitizerVisitor2153   Value *getShadow(Instruction *I, int i) {
2154     return getShadow(I->getOperand(i));
2155   }
2156 
2157   /// Get the origin for a value.
getOrigin__anonb346f5430811::MemorySanitizerVisitor2158   Value *getOrigin(Value *V) {
2159     if (!MS.TrackOrigins)
2160       return nullptr;
2161     if (!PropagateShadow || isa<Constant>(V) || isa<InlineAsm>(V))
2162       return getCleanOrigin();
2163     assert((isa<Instruction>(V) || isa<Argument>(V)) &&
2164            "Unexpected value type in getOrigin()");
2165     if (Instruction *I = dyn_cast<Instruction>(V)) {
2166       if (I->getMetadata(LLVMContext::MD_nosanitize))
2167         return getCleanOrigin();
2168     }
2169     Value *Origin = OriginMap[V];
2170     assert(Origin && "Missing origin");
2171     return Origin;
2172   }
2173 
2174   /// Get the origin for i-th argument of the instruction I.
getOrigin__anonb346f5430811::MemorySanitizerVisitor2175   Value *getOrigin(Instruction *I, int i) {
2176     return getOrigin(I->getOperand(i));
2177   }
2178 
2179   /// Remember the place where a shadow check should be inserted.
2180   ///
2181   /// This location will be later instrumented with a check that will print a
2182   /// UMR warning in runtime if the shadow value is not 0.
insertCheckShadow__anonb346f5430811::MemorySanitizerVisitor2183   void insertCheckShadow(Value *Shadow, Value *Origin, Instruction *OrigIns) {
2184     assert(Shadow);
2185     if (!InsertChecks)
2186       return;
2187 
2188     if (!DebugCounter::shouldExecute(DebugInsertCheck)) {
2189       LLVM_DEBUG(dbgs() << "Skipping check of " << *Shadow << " before "
2190                         << *OrigIns << "\n");
2191       return;
2192     }
2193 #ifndef NDEBUG
2194     Type *ShadowTy = Shadow->getType();
2195     assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy) ||
2196             isa<StructType>(ShadowTy) || isa<ArrayType>(ShadowTy)) &&
2197            "Can only insert checks for integer, vector, and aggregate shadow "
2198            "types");
2199 #endif
2200     InstrumentationList.push_back(
2201         ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2202   }
2203 
2204   /// Get shadow for value, and remember the place where a shadow check should
2205   /// be inserted.
2206   ///
2207   /// This location will be later instrumented with a check that will print a
2208   /// UMR warning in runtime if the value is not fully defined.
insertCheckShadowOf__anonb346f5430811::MemorySanitizerVisitor2209   void insertCheckShadowOf(Value *Val, Instruction *OrigIns) {
2210     assert(Val);
2211     Value *Shadow, *Origin;
2212     if (ClCheckConstantShadow) {
2213       Shadow = getShadow(Val);
2214       if (!Shadow)
2215         return;
2216       Origin = getOrigin(Val);
2217     } else {
2218       Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
2219       if (!Shadow)
2220         return;
2221       Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
2222     }
2223     insertCheckShadow(Shadow, Origin, OrigIns);
2224   }
2225 
addReleaseOrdering__anonb346f5430811::MemorySanitizerVisitor2226   AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
2227     switch (a) {
2228     case AtomicOrdering::NotAtomic:
2229       return AtomicOrdering::NotAtomic;
2230     case AtomicOrdering::Unordered:
2231     case AtomicOrdering::Monotonic:
2232     case AtomicOrdering::Release:
2233       return AtomicOrdering::Release;
2234     case AtomicOrdering::Acquire:
2235     case AtomicOrdering::AcquireRelease:
2236       return AtomicOrdering::AcquireRelease;
2237     case AtomicOrdering::SequentiallyConsistent:
2238       return AtomicOrdering::SequentiallyConsistent;
2239     }
2240     llvm_unreachable("Unknown ordering");
2241   }
2242 
makeAddReleaseOrderingTable__anonb346f5430811::MemorySanitizerVisitor2243   Value *makeAddReleaseOrderingTable(IRBuilder<> &IRB) {
2244     constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2245     uint32_t OrderingTable[NumOrderings] = {};
2246 
2247     OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2248         OrderingTable[(int)AtomicOrderingCABI::release] =
2249             (int)AtomicOrderingCABI::release;
2250     OrderingTable[(int)AtomicOrderingCABI::consume] =
2251         OrderingTable[(int)AtomicOrderingCABI::acquire] =
2252             OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2253                 (int)AtomicOrderingCABI::acq_rel;
2254     OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2255         (int)AtomicOrderingCABI::seq_cst;
2256 
2257     return ConstantDataVector::get(IRB.getContext(), OrderingTable);
2258   }
2259 
addAcquireOrdering__anonb346f5430811::MemorySanitizerVisitor2260   AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
2261     switch (a) {
2262     case AtomicOrdering::NotAtomic:
2263       return AtomicOrdering::NotAtomic;
2264     case AtomicOrdering::Unordered:
2265     case AtomicOrdering::Monotonic:
2266     case AtomicOrdering::Acquire:
2267       return AtomicOrdering::Acquire;
2268     case AtomicOrdering::Release:
2269     case AtomicOrdering::AcquireRelease:
2270       return AtomicOrdering::AcquireRelease;
2271     case AtomicOrdering::SequentiallyConsistent:
2272       return AtomicOrdering::SequentiallyConsistent;
2273     }
2274     llvm_unreachable("Unknown ordering");
2275   }
2276 
makeAddAcquireOrderingTable__anonb346f5430811::MemorySanitizerVisitor2277   Value *makeAddAcquireOrderingTable(IRBuilder<> &IRB) {
2278     constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2279     uint32_t OrderingTable[NumOrderings] = {};
2280 
2281     OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2282         OrderingTable[(int)AtomicOrderingCABI::acquire] =
2283             OrderingTable[(int)AtomicOrderingCABI::consume] =
2284                 (int)AtomicOrderingCABI::acquire;
2285     OrderingTable[(int)AtomicOrderingCABI::release] =
2286         OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2287             (int)AtomicOrderingCABI::acq_rel;
2288     OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2289         (int)AtomicOrderingCABI::seq_cst;
2290 
2291     return ConstantDataVector::get(IRB.getContext(), OrderingTable);
2292   }
2293 
2294   // ------------------- Visitors.
2295   using InstVisitor<MemorySanitizerVisitor>::visit;
visit__anonb346f5430811::MemorySanitizerVisitor2296   void visit(Instruction &I) {
2297     if (I.getMetadata(LLVMContext::MD_nosanitize))
2298       return;
2299     // Don't want to visit if we're in the prologue
2300     if (isInPrologue(I))
2301       return;
2302     if (!DebugCounter::shouldExecute(DebugInstrumentInstruction)) {
2303       LLVM_DEBUG(dbgs() << "Skipping instruction: " << I << "\n");
2304       // We still need to set the shadow and origin to clean values.
2305       setShadow(&I, getCleanShadow(&I));
2306       setOrigin(&I, getCleanOrigin());
2307       return;
2308     }
2309 
2310     Instructions.push_back(&I);
2311   }
2312 
2313   /// Instrument LoadInst
2314   ///
2315   /// Loads the corresponding shadow and (optionally) origin.
2316   /// Optionally, checks that the load address is fully defined.
visitLoadInst__anonb346f5430811::MemorySanitizerVisitor2317   void visitLoadInst(LoadInst &I) {
2318     assert(I.getType()->isSized() && "Load type must have size");
2319     assert(!I.getMetadata(LLVMContext::MD_nosanitize));
2320     NextNodeIRBuilder IRB(&I);
2321     Type *ShadowTy = getShadowTy(&I);
2322     Value *Addr = I.getPointerOperand();
2323     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
2324     const Align Alignment = I.getAlign();
2325     if (PropagateShadow) {
2326       std::tie(ShadowPtr, OriginPtr) =
2327           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2328       setShadow(&I,
2329                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
2330     } else {
2331       setShadow(&I, getCleanShadow(&I));
2332     }
2333 
2334     if (ClCheckAccessAddress)
2335       insertCheckShadowOf(I.getPointerOperand(), &I);
2336 
2337     if (I.isAtomic())
2338       I.setOrdering(addAcquireOrdering(I.getOrdering()));
2339 
2340     if (MS.TrackOrigins) {
2341       if (PropagateShadow) {
2342         const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
2343         setOrigin(
2344             &I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr, OriginAlignment));
2345       } else {
2346         setOrigin(&I, getCleanOrigin());
2347       }
2348     }
2349   }
2350 
2351   /// Instrument StoreInst
2352   ///
2353   /// Stores the corresponding shadow and (optionally) origin.
2354   /// Optionally, checks that the store address is fully defined.
visitStoreInst__anonb346f5430811::MemorySanitizerVisitor2355   void visitStoreInst(StoreInst &I) {
2356     StoreList.push_back(&I);
2357     if (ClCheckAccessAddress)
2358       insertCheckShadowOf(I.getPointerOperand(), &I);
2359   }
2360 
handleCASOrRMW__anonb346f5430811::MemorySanitizerVisitor2361   void handleCASOrRMW(Instruction &I) {
2362     assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
2363 
2364     IRBuilder<> IRB(&I);
2365     Value *Addr = I.getOperand(0);
2366     Value *Val = I.getOperand(1);
2367     Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val), Align(1),
2368                                           /*isStore*/ true)
2369                            .first;
2370 
2371     if (ClCheckAccessAddress)
2372       insertCheckShadowOf(Addr, &I);
2373 
2374     // Only test the conditional argument of cmpxchg instruction.
2375     // The other argument can potentially be uninitialized, but we can not
2376     // detect this situation reliably without possible false positives.
2377     if (isa<AtomicCmpXchgInst>(I))
2378       insertCheckShadowOf(Val, &I);
2379 
2380     IRB.CreateStore(getCleanShadow(Val), ShadowPtr);
2381 
2382     setShadow(&I, getCleanShadow(&I));
2383     setOrigin(&I, getCleanOrigin());
2384   }
2385 
visitAtomicRMWInst__anonb346f5430811::MemorySanitizerVisitor2386   void visitAtomicRMWInst(AtomicRMWInst &I) {
2387     handleCASOrRMW(I);
2388     I.setOrdering(addReleaseOrdering(I.getOrdering()));
2389   }
2390 
visitAtomicCmpXchgInst__anonb346f5430811::MemorySanitizerVisitor2391   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2392     handleCASOrRMW(I);
2393     I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
2394   }
2395 
2396   // Vector manipulation.
visitExtractElementInst__anonb346f5430811::MemorySanitizerVisitor2397   void visitExtractElementInst(ExtractElementInst &I) {
2398     insertCheckShadowOf(I.getOperand(1), &I);
2399     IRBuilder<> IRB(&I);
2400     setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
2401                                            "_msprop"));
2402     setOrigin(&I, getOrigin(&I, 0));
2403   }
2404 
visitInsertElementInst__anonb346f5430811::MemorySanitizerVisitor2405   void visitInsertElementInst(InsertElementInst &I) {
2406     insertCheckShadowOf(I.getOperand(2), &I);
2407     IRBuilder<> IRB(&I);
2408     auto *Shadow0 = getShadow(&I, 0);
2409     auto *Shadow1 = getShadow(&I, 1);
2410     setShadow(&I, IRB.CreateInsertElement(Shadow0, Shadow1, I.getOperand(2),
2411                                           "_msprop"));
2412     setOriginForNaryOp(I);
2413   }
2414 
visitShuffleVectorInst__anonb346f5430811::MemorySanitizerVisitor2415   void visitShuffleVectorInst(ShuffleVectorInst &I) {
2416     IRBuilder<> IRB(&I);
2417     auto *Shadow0 = getShadow(&I, 0);
2418     auto *Shadow1 = getShadow(&I, 1);
2419     setShadow(&I, IRB.CreateShuffleVector(Shadow0, Shadow1, I.getShuffleMask(),
2420                                           "_msprop"));
2421     setOriginForNaryOp(I);
2422   }
2423 
2424   // Casts.
visitSExtInst__anonb346f5430811::MemorySanitizerVisitor2425   void visitSExtInst(SExtInst &I) {
2426     IRBuilder<> IRB(&I);
2427     setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
2428     setOrigin(&I, getOrigin(&I, 0));
2429   }
2430 
visitZExtInst__anonb346f5430811::MemorySanitizerVisitor2431   void visitZExtInst(ZExtInst &I) {
2432     IRBuilder<> IRB(&I);
2433     setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
2434     setOrigin(&I, getOrigin(&I, 0));
2435   }
2436 
visitTruncInst__anonb346f5430811::MemorySanitizerVisitor2437   void visitTruncInst(TruncInst &I) {
2438     IRBuilder<> IRB(&I);
2439     setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
2440     setOrigin(&I, getOrigin(&I, 0));
2441   }
2442 
visitBitCastInst__anonb346f5430811::MemorySanitizerVisitor2443   void visitBitCastInst(BitCastInst &I) {
2444     // Special case: if this is the bitcast (there is exactly 1 allowed) between
2445     // a musttail call and a ret, don't instrument. New instructions are not
2446     // allowed after a musttail call.
2447     if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
2448       if (CI->isMustTailCall())
2449         return;
2450     IRBuilder<> IRB(&I);
2451     setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
2452     setOrigin(&I, getOrigin(&I, 0));
2453   }
2454 
visitPtrToIntInst__anonb346f5430811::MemorySanitizerVisitor2455   void visitPtrToIntInst(PtrToIntInst &I) {
2456     IRBuilder<> IRB(&I);
2457     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
2458                                     "_msprop_ptrtoint"));
2459     setOrigin(&I, getOrigin(&I, 0));
2460   }
2461 
visitIntToPtrInst__anonb346f5430811::MemorySanitizerVisitor2462   void visitIntToPtrInst(IntToPtrInst &I) {
2463     IRBuilder<> IRB(&I);
2464     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
2465                                     "_msprop_inttoptr"));
2466     setOrigin(&I, getOrigin(&I, 0));
2467   }
2468 
visitFPToSIInst__anonb346f5430811::MemorySanitizerVisitor2469   void visitFPToSIInst(CastInst &I) { handleShadowOr(I); }
visitFPToUIInst__anonb346f5430811::MemorySanitizerVisitor2470   void visitFPToUIInst(CastInst &I) { handleShadowOr(I); }
visitSIToFPInst__anonb346f5430811::MemorySanitizerVisitor2471   void visitSIToFPInst(CastInst &I) { handleShadowOr(I); }
visitUIToFPInst__anonb346f5430811::MemorySanitizerVisitor2472   void visitUIToFPInst(CastInst &I) { handleShadowOr(I); }
visitFPExtInst__anonb346f5430811::MemorySanitizerVisitor2473   void visitFPExtInst(CastInst &I) { handleShadowOr(I); }
visitFPTruncInst__anonb346f5430811::MemorySanitizerVisitor2474   void visitFPTruncInst(CastInst &I) { handleShadowOr(I); }
2475 
2476   /// Propagate shadow for bitwise AND.
2477   ///
2478   /// This code is exact, i.e. if, for example, a bit in the left argument
2479   /// is defined and 0, then neither the value not definedness of the
2480   /// corresponding bit in B don't affect the resulting shadow.
visitAnd__anonb346f5430811::MemorySanitizerVisitor2481   void visitAnd(BinaryOperator &I) {
2482     IRBuilder<> IRB(&I);
2483     //  "And" of 0 and a poisoned value results in unpoisoned value.
2484     //  1&1 => 1;     0&1 => 0;     p&1 => p;
2485     //  1&0 => 0;     0&0 => 0;     p&0 => 0;
2486     //  1&p => p;     0&p => 0;     p&p => p;
2487     //  S = (S1 & S2) | (V1 & S2) | (S1 & V2)
2488     Value *S1 = getShadow(&I, 0);
2489     Value *S2 = getShadow(&I, 1);
2490     Value *V1 = I.getOperand(0);
2491     Value *V2 = I.getOperand(1);
2492     if (V1->getType() != S1->getType()) {
2493       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
2494       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
2495     }
2496     Value *S1S2 = IRB.CreateAnd(S1, S2);
2497     Value *V1S2 = IRB.CreateAnd(V1, S2);
2498     Value *S1V2 = IRB.CreateAnd(S1, V2);
2499     setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
2500     setOriginForNaryOp(I);
2501   }
2502 
visitOr__anonb346f5430811::MemorySanitizerVisitor2503   void visitOr(BinaryOperator &I) {
2504     IRBuilder<> IRB(&I);
2505     //  "Or" of 1 and a poisoned value results in unpoisoned value:
2506     //    1|1 => 1;     0|1 => 1;     p|1 => 1;
2507     //    1|0 => 1;     0|0 => 0;     p|0 => p;
2508     //    1|p => 1;     0|p => p;     p|p => p;
2509     //
2510     //    S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
2511     //
2512     //  Addendum if the "Or" is "disjoint":
2513     //    1|1 => p;
2514     //    S = S | (V1 & V2)
2515     Value *S1 = getShadow(&I, 0);
2516     Value *S2 = getShadow(&I, 1);
2517     Value *V1 = I.getOperand(0);
2518     Value *V2 = I.getOperand(1);
2519     if (V1->getType() != S1->getType()) {
2520       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
2521       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
2522     }
2523 
2524     Value *NotV1 = IRB.CreateNot(V1);
2525     Value *NotV2 = IRB.CreateNot(V2);
2526 
2527     Value *S1S2 = IRB.CreateAnd(S1, S2);
2528     Value *S2NotV1 = IRB.CreateAnd(NotV1, S2);
2529     Value *S1NotV2 = IRB.CreateAnd(S1, NotV2);
2530 
2531     Value *S = IRB.CreateOr({S1S2, S2NotV1, S1NotV2});
2532 
2533     if (ClPreciseDisjointOr && cast<PossiblyDisjointInst>(&I)->isDisjoint()) {
2534       Value *V1V2 = IRB.CreateAnd(V1, V2);
2535       S = IRB.CreateOr(S, V1V2, "_ms_disjoint");
2536     }
2537 
2538     setShadow(&I, S);
2539     setOriginForNaryOp(I);
2540   }
2541 
2542   /// Default propagation of shadow and/or origin.
2543   ///
2544   /// This class implements the general case of shadow propagation, used in all
2545   /// cases where we don't know and/or don't care about what the operation
2546   /// actually does. It converts all input shadow values to a common type
2547   /// (extending or truncating as necessary), and bitwise OR's them.
2548   ///
2549   /// This is much cheaper than inserting checks (i.e. requiring inputs to be
2550   /// fully initialized), and less prone to false positives.
2551   ///
2552   /// This class also implements the general case of origin propagation. For a
2553   /// Nary operation, result origin is set to the origin of an argument that is
2554   /// not entirely initialized. If there is more than one such arguments, the
2555   /// rightmost of them is picked. It does not matter which one is picked if all
2556   /// arguments are initialized.
2557   template <bool CombineShadow> class Combiner {
2558     Value *Shadow = nullptr;
2559     Value *Origin = nullptr;
2560     IRBuilder<> &IRB;
2561     MemorySanitizerVisitor *MSV;
2562 
2563   public:
Combiner(MemorySanitizerVisitor * MSV,IRBuilder<> & IRB)2564     Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB)
2565         : IRB(IRB), MSV(MSV) {}
2566 
2567     /// Add a pair of shadow and origin values to the mix.
Add(Value * OpShadow,Value * OpOrigin)2568     Combiner &Add(Value *OpShadow, Value *OpOrigin) {
2569       if (CombineShadow) {
2570         assert(OpShadow);
2571         if (!Shadow)
2572           Shadow = OpShadow;
2573         else {
2574           OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2575           Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
2576         }
2577       }
2578 
2579       if (MSV->MS.TrackOrigins) {
2580         assert(OpOrigin);
2581         if (!Origin) {
2582           Origin = OpOrigin;
2583         } else {
2584           Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2585           // No point in adding something that might result in 0 origin value.
2586           if (!ConstOrigin || !ConstOrigin->isNullValue()) {
2587             Value *Cond = MSV->convertToBool(OpShadow, IRB);
2588             Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
2589           }
2590         }
2591       }
2592       return *this;
2593     }
2594 
2595     /// Add an application value to the mix.
Add(Value * V)2596     Combiner &Add(Value *V) {
2597       Value *OpShadow = MSV->getShadow(V);
2598       Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
2599       return Add(OpShadow, OpOrigin);
2600     }
2601 
2602     /// Set the current combined values as the given instruction's shadow
2603     /// and origin.
Done(Instruction * I)2604     void Done(Instruction *I) {
2605       if (CombineShadow) {
2606         assert(Shadow);
2607         Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
2608         MSV->setShadow(I, Shadow);
2609       }
2610       if (MSV->MS.TrackOrigins) {
2611         assert(Origin);
2612         MSV->setOrigin(I, Origin);
2613       }
2614     }
2615 
2616     /// Store the current combined value at the specified origin
2617     /// location.
DoneAndStoreOrigin(TypeSize TS,Value * OriginPtr)2618     void DoneAndStoreOrigin(TypeSize TS, Value *OriginPtr) {
2619       if (MSV->MS.TrackOrigins) {
2620         assert(Origin);
2621         MSV->paintOrigin(IRB, Origin, OriginPtr, TS, kMinOriginAlignment);
2622       }
2623     }
2624   };
2625 
2626   using ShadowAndOriginCombiner = Combiner<true>;
2627   using OriginCombiner = Combiner<false>;
2628 
2629   /// Propagate origin for arbitrary operation.
setOriginForNaryOp__anonb346f5430811::MemorySanitizerVisitor2630   void setOriginForNaryOp(Instruction &I) {
2631     if (!MS.TrackOrigins)
2632       return;
2633     IRBuilder<> IRB(&I);
2634     OriginCombiner OC(this, IRB);
2635     for (Use &Op : I.operands())
2636       OC.Add(Op.get());
2637     OC.Done(&I);
2638   }
2639 
VectorOrPrimitiveTypeSizeInBits__anonb346f5430811::MemorySanitizerVisitor2640   size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
2641     assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
2642            "Vector of pointers is not a valid shadow type");
2643     return Ty->isVectorTy() ? cast<FixedVectorType>(Ty)->getNumElements() *
2644                                   Ty->getScalarSizeInBits()
2645                             : Ty->getPrimitiveSizeInBits();
2646   }
2647 
2648   /// Cast between two shadow types, extending or truncating as
2649   /// necessary.
CreateShadowCast__anonb346f5430811::MemorySanitizerVisitor2650   Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
2651                           bool Signed = false) {
2652     Type *srcTy = V->getType();
2653     if (srcTy == dstTy)
2654       return V;
2655     size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2656     size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2657     if (srcSizeInBits > 1 && dstSizeInBits == 1)
2658       return IRB.CreateICmpNE(V, getCleanShadow(V));
2659 
2660     if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
2661       return IRB.CreateIntCast(V, dstTy, Signed);
2662     if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
2663         cast<VectorType>(dstTy)->getElementCount() ==
2664             cast<VectorType>(srcTy)->getElementCount())
2665       return IRB.CreateIntCast(V, dstTy, Signed);
2666     Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
2667     Value *V2 =
2668         IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
2669     return IRB.CreateBitCast(V2, dstTy);
2670     // TODO: handle struct types.
2671   }
2672 
2673   /// Cast an application value to the type of its own shadow.
CreateAppToShadowCast__anonb346f5430811::MemorySanitizerVisitor2674   Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
2675     Type *ShadowTy = getShadowTy(V);
2676     if (V->getType() == ShadowTy)
2677       return V;
2678     if (V->getType()->isPtrOrPtrVectorTy())
2679       return IRB.CreatePtrToInt(V, ShadowTy);
2680     else
2681       return IRB.CreateBitCast(V, ShadowTy);
2682   }
2683 
2684   /// Propagate shadow for arbitrary operation.
handleShadowOr__anonb346f5430811::MemorySanitizerVisitor2685   void handleShadowOr(Instruction &I) {
2686     IRBuilder<> IRB(&I);
2687     ShadowAndOriginCombiner SC(this, IRB);
2688     for (Use &Op : I.operands())
2689       SC.Add(Op.get());
2690     SC.Done(&I);
2691   }
2692 
2693   /// Propagate shadow for 1- or 2-vector intrinsics that combine adjacent
2694   /// fields.
2695   ///
2696   /// e.g., <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16>)
2697   ///       <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8>, <16 x i8>)
handlePairwiseShadowOrIntrinsic__anonb346f5430811::MemorySanitizerVisitor2698   void handlePairwiseShadowOrIntrinsic(IntrinsicInst &I) {
2699     assert(I.arg_size() == 1 || I.arg_size() == 2);
2700 
2701     assert(I.getType()->isVectorTy());
2702     assert(I.getArgOperand(0)->getType()->isVectorTy());
2703 
2704     FixedVectorType *ParamType =
2705         cast<FixedVectorType>(I.getArgOperand(0)->getType());
2706     assert((I.arg_size() != 2) ||
2707            (ParamType == cast<FixedVectorType>(I.getArgOperand(1)->getType())));
2708     [[maybe_unused]] FixedVectorType *ReturnType =
2709         cast<FixedVectorType>(I.getType());
2710     assert(ParamType->getNumElements() * I.arg_size() ==
2711            2 * ReturnType->getNumElements());
2712 
2713     IRBuilder<> IRB(&I);
2714     unsigned Width = ParamType->getNumElements() * I.arg_size();
2715 
2716     // Horizontal OR of shadow
2717     SmallVector<int, 8> EvenMask;
2718     SmallVector<int, 8> OddMask;
2719     for (unsigned X = 0; X < Width; X += 2) {
2720       EvenMask.push_back(X);
2721       OddMask.push_back(X + 1);
2722     }
2723 
2724     Value *FirstArgShadow = getShadow(&I, 0);
2725     Value *EvenShadow;
2726     Value *OddShadow;
2727     if (I.arg_size() == 2) {
2728       Value *SecondArgShadow = getShadow(&I, 1);
2729       EvenShadow =
2730           IRB.CreateShuffleVector(FirstArgShadow, SecondArgShadow, EvenMask);
2731       OddShadow =
2732           IRB.CreateShuffleVector(FirstArgShadow, SecondArgShadow, OddMask);
2733     } else {
2734       EvenShadow = IRB.CreateShuffleVector(FirstArgShadow, EvenMask);
2735       OddShadow = IRB.CreateShuffleVector(FirstArgShadow, OddMask);
2736     }
2737 
2738     Value *OrShadow = IRB.CreateOr(EvenShadow, OddShadow);
2739     OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&I));
2740 
2741     setShadow(&I, OrShadow);
2742     setOriginForNaryOp(I);
2743   }
2744 
2745   /// Propagate shadow for 1- or 2-vector intrinsics that combine adjacent
2746   /// fields, with the parameters reinterpreted to have elements of a specified
2747   /// width. For example:
2748   ///     @llvm.x86.ssse3.phadd.w(<1 x i64> [[VAR1]], <1 x i64> [[VAR2]])
2749   /// conceptually operates on
2750   ///     (<4 x i16> [[VAR1]], <4 x i16> [[VAR2]])
2751   /// and can be handled with ReinterpretElemWidth == 16.
handlePairwiseShadowOrIntrinsic__anonb346f5430811::MemorySanitizerVisitor2752   void handlePairwiseShadowOrIntrinsic(IntrinsicInst &I,
2753                                        int ReinterpretElemWidth) {
2754     assert(I.arg_size() == 1 || I.arg_size() == 2);
2755 
2756     assert(I.getType()->isVectorTy());
2757     assert(I.getArgOperand(0)->getType()->isVectorTy());
2758 
2759     FixedVectorType *ParamType =
2760         cast<FixedVectorType>(I.getArgOperand(0)->getType());
2761     assert((I.arg_size() != 2) ||
2762            (ParamType == cast<FixedVectorType>(I.getArgOperand(1)->getType())));
2763 
2764     [[maybe_unused]] FixedVectorType *ReturnType =
2765         cast<FixedVectorType>(I.getType());
2766     assert(ParamType->getNumElements() * I.arg_size() ==
2767            2 * ReturnType->getNumElements());
2768 
2769     IRBuilder<> IRB(&I);
2770 
2771     unsigned TotalNumElems = ParamType->getNumElements() * I.arg_size();
2772     FixedVectorType *ReinterpretShadowTy = nullptr;
2773     assert(isAligned(Align(ReinterpretElemWidth),
2774                      ParamType->getPrimitiveSizeInBits()));
2775     ReinterpretShadowTy = FixedVectorType::get(
2776         IRB.getIntNTy(ReinterpretElemWidth),
2777         ParamType->getPrimitiveSizeInBits() / ReinterpretElemWidth);
2778     TotalNumElems = ReinterpretShadowTy->getNumElements() * I.arg_size();
2779 
2780     // Horizontal OR of shadow
2781     SmallVector<int, 8> EvenMask;
2782     SmallVector<int, 8> OddMask;
2783     for (unsigned X = 0; X < TotalNumElems - 1; X += 2) {
2784       EvenMask.push_back(X);
2785       OddMask.push_back(X + 1);
2786     }
2787 
2788     Value *FirstArgShadow = getShadow(&I, 0);
2789     FirstArgShadow = IRB.CreateBitCast(FirstArgShadow, ReinterpretShadowTy);
2790 
2791     // If we had two parameters each with an odd number of elements, the total
2792     // number of elements is even, but we have never seen this in extant
2793     // instruction sets, so we enforce that each parameter must have an even
2794     // number of elements.
2795     assert(isAligned(
2796         Align(2),
2797         cast<FixedVectorType>(FirstArgShadow->getType())->getNumElements()));
2798 
2799     Value *EvenShadow;
2800     Value *OddShadow;
2801     if (I.arg_size() == 2) {
2802       Value *SecondArgShadow = getShadow(&I, 1);
2803       SecondArgShadow = IRB.CreateBitCast(SecondArgShadow, ReinterpretShadowTy);
2804 
2805       EvenShadow =
2806           IRB.CreateShuffleVector(FirstArgShadow, SecondArgShadow, EvenMask);
2807       OddShadow =
2808           IRB.CreateShuffleVector(FirstArgShadow, SecondArgShadow, OddMask);
2809     } else {
2810       EvenShadow = IRB.CreateShuffleVector(FirstArgShadow, EvenMask);
2811       OddShadow = IRB.CreateShuffleVector(FirstArgShadow, OddMask);
2812     }
2813 
2814     Value *OrShadow = IRB.CreateOr(EvenShadow, OddShadow);
2815     OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&I));
2816 
2817     setShadow(&I, OrShadow);
2818     setOriginForNaryOp(I);
2819   }
2820 
visitFNeg__anonb346f5430811::MemorySanitizerVisitor2821   void visitFNeg(UnaryOperator &I) { handleShadowOr(I); }
2822 
2823   // Handle multiplication by constant.
2824   //
2825   // Handle a special case of multiplication by constant that may have one or
2826   // more zeros in the lower bits. This makes corresponding number of lower bits
2827   // of the result zero as well. We model it by shifting the other operand
2828   // shadow left by the required number of bits. Effectively, we transform
2829   // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
2830   // We use multiplication by 2**N instead of shift to cover the case of
2831   // multiplication by 0, which may occur in some elements of a vector operand.
handleMulByConstant__anonb346f5430811::MemorySanitizerVisitor2832   void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
2833                            Value *OtherArg) {
2834     Constant *ShadowMul;
2835     Type *Ty = ConstArg->getType();
2836     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2837       unsigned NumElements = cast<FixedVectorType>(VTy)->getNumElements();
2838       Type *EltTy = VTy->getElementType();
2839       SmallVector<Constant *, 16> Elements;
2840       for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
2841         if (ConstantInt *Elt =
2842                 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
2843           const APInt &V = Elt->getValue();
2844           APInt V2 = APInt(V.getBitWidth(), 1) << V.countr_zero();
2845           Elements.push_back(ConstantInt::get(EltTy, V2));
2846         } else {
2847           Elements.push_back(ConstantInt::get(EltTy, 1));
2848         }
2849       }
2850       ShadowMul = ConstantVector::get(Elements);
2851     } else {
2852       if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2853         const APInt &V = Elt->getValue();
2854         APInt V2 = APInt(V.getBitWidth(), 1) << V.countr_zero();
2855         ShadowMul = ConstantInt::get(Ty, V2);
2856       } else {
2857         ShadowMul = ConstantInt::get(Ty, 1);
2858       }
2859     }
2860 
2861     IRBuilder<> IRB(&I);
2862     setShadow(&I,
2863               IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
2864     setOrigin(&I, getOrigin(OtherArg));
2865   }
2866 
visitMul__anonb346f5430811::MemorySanitizerVisitor2867   void visitMul(BinaryOperator &I) {
2868     Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
2869     Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
2870     if (constOp0 && !constOp1)
2871       handleMulByConstant(I, constOp0, I.getOperand(1));
2872     else if (constOp1 && !constOp0)
2873       handleMulByConstant(I, constOp1, I.getOperand(0));
2874     else
2875       handleShadowOr(I);
2876   }
2877 
visitFAdd__anonb346f5430811::MemorySanitizerVisitor2878   void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
visitFSub__anonb346f5430811::MemorySanitizerVisitor2879   void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
visitFMul__anonb346f5430811::MemorySanitizerVisitor2880   void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
visitAdd__anonb346f5430811::MemorySanitizerVisitor2881   void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
visitSub__anonb346f5430811::MemorySanitizerVisitor2882   void visitSub(BinaryOperator &I) { handleShadowOr(I); }
visitXor__anonb346f5430811::MemorySanitizerVisitor2883   void visitXor(BinaryOperator &I) { handleShadowOr(I); }
2884 
handleIntegerDiv__anonb346f5430811::MemorySanitizerVisitor2885   void handleIntegerDiv(Instruction &I) {
2886     IRBuilder<> IRB(&I);
2887     // Strict on the second argument.
2888     insertCheckShadowOf(I.getOperand(1), &I);
2889     setShadow(&I, getShadow(&I, 0));
2890     setOrigin(&I, getOrigin(&I, 0));
2891   }
2892 
visitUDiv__anonb346f5430811::MemorySanitizerVisitor2893   void visitUDiv(BinaryOperator &I) { handleIntegerDiv(I); }
visitSDiv__anonb346f5430811::MemorySanitizerVisitor2894   void visitSDiv(BinaryOperator &I) { handleIntegerDiv(I); }
visitURem__anonb346f5430811::MemorySanitizerVisitor2895   void visitURem(BinaryOperator &I) { handleIntegerDiv(I); }
visitSRem__anonb346f5430811::MemorySanitizerVisitor2896   void visitSRem(BinaryOperator &I) { handleIntegerDiv(I); }
2897 
2898   // Floating point division is side-effect free. We can not require that the
2899   // divisor is fully initialized and must propagate shadow. See PR37523.
visitFDiv__anonb346f5430811::MemorySanitizerVisitor2900   void visitFDiv(BinaryOperator &I) { handleShadowOr(I); }
visitFRem__anonb346f5430811::MemorySanitizerVisitor2901   void visitFRem(BinaryOperator &I) { handleShadowOr(I); }
2902 
2903   /// Instrument == and != comparisons.
2904   ///
2905   /// Sometimes the comparison result is known even if some of the bits of the
2906   /// arguments are not.
handleEqualityComparison__anonb346f5430811::MemorySanitizerVisitor2907   void handleEqualityComparison(ICmpInst &I) {
2908     IRBuilder<> IRB(&I);
2909     Value *A = I.getOperand(0);
2910     Value *B = I.getOperand(1);
2911     Value *Sa = getShadow(A);
2912     Value *Sb = getShadow(B);
2913 
2914     // Get rid of pointers and vectors of pointers.
2915     // For ints (and vectors of ints), types of A and Sa match,
2916     // and this is a no-op.
2917     A = IRB.CreatePointerCast(A, Sa->getType());
2918     B = IRB.CreatePointerCast(B, Sb->getType());
2919 
2920     // A == B  <==>  (C = A^B) == 0
2921     // A != B  <==>  (C = A^B) != 0
2922     // Sc = Sa | Sb
2923     Value *C = IRB.CreateXor(A, B);
2924     Value *Sc = IRB.CreateOr(Sa, Sb);
2925     // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
2926     // Result is defined if one of the following is true
2927     // * there is a defined 1 bit in C
2928     // * C is fully defined
2929     // Si = !(C & ~Sc) && Sc
2930     Value *Zero = Constant::getNullValue(Sc->getType());
2931     Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
2932     Value *LHS = IRB.CreateICmpNE(Sc, Zero);
2933     Value *RHS =
2934         IRB.CreateICmpEQ(IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero);
2935     Value *Si = IRB.CreateAnd(LHS, RHS);
2936     Si->setName("_msprop_icmp");
2937     setShadow(&I, Si);
2938     setOriginForNaryOp(I);
2939   }
2940 
2941   /// Instrument relational comparisons.
2942   ///
2943   /// This function does exact shadow propagation for all relational
2944   /// comparisons of integers, pointers and vectors of those.
2945   /// FIXME: output seems suboptimal when one of the operands is a constant
handleRelationalComparisonExact__anonb346f5430811::MemorySanitizerVisitor2946   void handleRelationalComparisonExact(ICmpInst &I) {
2947     IRBuilder<> IRB(&I);
2948     Value *A = I.getOperand(0);
2949     Value *B = I.getOperand(1);
2950     Value *Sa = getShadow(A);
2951     Value *Sb = getShadow(B);
2952 
2953     // Get rid of pointers and vectors of pointers.
2954     // For ints (and vectors of ints), types of A and Sa match,
2955     // and this is a no-op.
2956     A = IRB.CreatePointerCast(A, Sa->getType());
2957     B = IRB.CreatePointerCast(B, Sb->getType());
2958 
2959     // Let [a0, a1] be the interval of possible values of A, taking into account
2960     // its undefined bits. Let [b0, b1] be the interval of possible values of B.
2961     // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
2962     bool IsSigned = I.isSigned();
2963 
2964     auto GetMinMaxUnsigned = [&](Value *V, Value *S) {
2965       if (IsSigned) {
2966         // Sign-flip to map from signed range to unsigned range. Relation A vs B
2967         // should be preserved, if checked with `getUnsignedPredicate()`.
2968         // Relationship between Amin, Amax, Bmin, Bmax also will not be
2969         // affected, as they are created by effectively adding/substructing from
2970         // A (or B) a value, derived from shadow, with no overflow, either
2971         // before or after sign flip.
2972         APInt MinVal =
2973             APInt::getSignedMinValue(V->getType()->getScalarSizeInBits());
2974         V = IRB.CreateXor(V, ConstantInt::get(V->getType(), MinVal));
2975       }
2976       // Minimize undefined bits.
2977       Value *Min = IRB.CreateAnd(V, IRB.CreateNot(S));
2978       Value *Max = IRB.CreateOr(V, S);
2979       return std::make_pair(Min, Max);
2980     };
2981 
2982     auto [Amin, Amax] = GetMinMaxUnsigned(A, Sa);
2983     auto [Bmin, Bmax] = GetMinMaxUnsigned(B, Sb);
2984     Value *S1 = IRB.CreateICmp(I.getUnsignedPredicate(), Amin, Bmax);
2985     Value *S2 = IRB.CreateICmp(I.getUnsignedPredicate(), Amax, Bmin);
2986 
2987     Value *Si = IRB.CreateXor(S1, S2);
2988     setShadow(&I, Si);
2989     setOriginForNaryOp(I);
2990   }
2991 
2992   /// Instrument signed relational comparisons.
2993   ///
2994   /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
2995   /// bit of the shadow. Everything else is delegated to handleShadowOr().
handleSignedRelationalComparison__anonb346f5430811::MemorySanitizerVisitor2996   void handleSignedRelationalComparison(ICmpInst &I) {
2997     Constant *constOp;
2998     Value *op = nullptr;
2999     CmpInst::Predicate pre;
3000     if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
3001       op = I.getOperand(0);
3002       pre = I.getPredicate();
3003     } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
3004       op = I.getOperand(1);
3005       pre = I.getSwappedPredicate();
3006     } else {
3007       handleShadowOr(I);
3008       return;
3009     }
3010 
3011     if ((constOp->isNullValue() &&
3012          (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
3013         (constOp->isAllOnesValue() &&
3014          (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
3015       IRBuilder<> IRB(&I);
3016       Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
3017                                         "_msprop_icmp_s");
3018       setShadow(&I, Shadow);
3019       setOrigin(&I, getOrigin(op));
3020     } else {
3021       handleShadowOr(I);
3022     }
3023   }
3024 
visitICmpInst__anonb346f5430811::MemorySanitizerVisitor3025   void visitICmpInst(ICmpInst &I) {
3026     if (!ClHandleICmp) {
3027       handleShadowOr(I);
3028       return;
3029     }
3030     if (I.isEquality()) {
3031       handleEqualityComparison(I);
3032       return;
3033     }
3034 
3035     assert(I.isRelational());
3036     if (ClHandleICmpExact) {
3037       handleRelationalComparisonExact(I);
3038       return;
3039     }
3040     if (I.isSigned()) {
3041       handleSignedRelationalComparison(I);
3042       return;
3043     }
3044 
3045     assert(I.isUnsigned());
3046     if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
3047       handleRelationalComparisonExact(I);
3048       return;
3049     }
3050 
3051     handleShadowOr(I);
3052   }
3053 
visitFCmpInst__anonb346f5430811::MemorySanitizerVisitor3054   void visitFCmpInst(FCmpInst &I) { handleShadowOr(I); }
3055 
handleShift__anonb346f5430811::MemorySanitizerVisitor3056   void handleShift(BinaryOperator &I) {
3057     IRBuilder<> IRB(&I);
3058     // If any of the S2 bits are poisoned, the whole thing is poisoned.
3059     // Otherwise perform the same shift on S1.
3060     Value *S1 = getShadow(&I, 0);
3061     Value *S2 = getShadow(&I, 1);
3062     Value *S2Conv =
3063         IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType());
3064     Value *V2 = I.getOperand(1);
3065     Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
3066     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
3067     setOriginForNaryOp(I);
3068   }
3069 
visitShl__anonb346f5430811::MemorySanitizerVisitor3070   void visitShl(BinaryOperator &I) { handleShift(I); }
visitAShr__anonb346f5430811::MemorySanitizerVisitor3071   void visitAShr(BinaryOperator &I) { handleShift(I); }
visitLShr__anonb346f5430811::MemorySanitizerVisitor3072   void visitLShr(BinaryOperator &I) { handleShift(I); }
3073 
handleFunnelShift__anonb346f5430811::MemorySanitizerVisitor3074   void handleFunnelShift(IntrinsicInst &I) {
3075     IRBuilder<> IRB(&I);
3076     // If any of the S2 bits are poisoned, the whole thing is poisoned.
3077     // Otherwise perform the same shift on S0 and S1.
3078     Value *S0 = getShadow(&I, 0);
3079     Value *S1 = getShadow(&I, 1);
3080     Value *S2 = getShadow(&I, 2);
3081     Value *S2Conv =
3082         IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType());
3083     Value *V2 = I.getOperand(2);
3084     Value *Shift = IRB.CreateIntrinsic(I.getIntrinsicID(), S2Conv->getType(),
3085                                        {S0, S1, V2});
3086     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
3087     setOriginForNaryOp(I);
3088   }
3089 
3090   /// Instrument llvm.memmove
3091   ///
3092   /// At this point we don't know if llvm.memmove will be inlined or not.
3093   /// If we don't instrument it and it gets inlined,
3094   /// our interceptor will not kick in and we will lose the memmove.
3095   /// If we instrument the call here, but it does not get inlined,
3096   /// we will memove the shadow twice: which is bad in case
3097   /// of overlapping regions. So, we simply lower the intrinsic to a call.
3098   ///
3099   /// Similar situation exists for memcpy and memset.
visitMemMoveInst__anonb346f5430811::MemorySanitizerVisitor3100   void visitMemMoveInst(MemMoveInst &I) {
3101     getShadow(I.getArgOperand(1)); // Ensure shadow initialized
3102     IRBuilder<> IRB(&I);
3103     IRB.CreateCall(MS.MemmoveFn,
3104                    {I.getArgOperand(0), I.getArgOperand(1),
3105                     IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3106     I.eraseFromParent();
3107   }
3108 
3109   /// Instrument memcpy
3110   ///
3111   /// Similar to memmove: avoid copying shadow twice. This is somewhat
3112   /// unfortunate as it may slowdown small constant memcpys.
3113   /// FIXME: consider doing manual inline for small constant sizes and proper
3114   /// alignment.
3115   ///
3116   /// Note: This also handles memcpy.inline, which promises no calls to external
3117   /// functions as an optimization. However, with instrumentation enabled this
3118   /// is difficult to promise; additionally, we know that the MSan runtime
3119   /// exists and provides __msan_memcpy(). Therefore, we assume that with
3120   /// instrumentation it's safe to turn memcpy.inline into a call to
3121   /// __msan_memcpy(). Should this be wrong, such as when implementing memcpy()
3122   /// itself, instrumentation should be disabled with the no_sanitize attribute.
visitMemCpyInst__anonb346f5430811::MemorySanitizerVisitor3123   void visitMemCpyInst(MemCpyInst &I) {
3124     getShadow(I.getArgOperand(1)); // Ensure shadow initialized
3125     IRBuilder<> IRB(&I);
3126     IRB.CreateCall(MS.MemcpyFn,
3127                    {I.getArgOperand(0), I.getArgOperand(1),
3128                     IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3129     I.eraseFromParent();
3130   }
3131 
3132   // Same as memcpy.
visitMemSetInst__anonb346f5430811::MemorySanitizerVisitor3133   void visitMemSetInst(MemSetInst &I) {
3134     IRBuilder<> IRB(&I);
3135     IRB.CreateCall(
3136         MS.MemsetFn,
3137         {I.getArgOperand(0),
3138          IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
3139          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3140     I.eraseFromParent();
3141   }
3142 
visitVAStartInst__anonb346f5430811::MemorySanitizerVisitor3143   void visitVAStartInst(VAStartInst &I) { VAHelper->visitVAStartInst(I); }
3144 
visitVACopyInst__anonb346f5430811::MemorySanitizerVisitor3145   void visitVACopyInst(VACopyInst &I) { VAHelper->visitVACopyInst(I); }
3146 
3147   /// Handle vector store-like intrinsics.
3148   ///
3149   /// Instrument intrinsics that look like a simple SIMD store: writes memory,
3150   /// has 1 pointer argument and 1 vector argument, returns void.
handleVectorStoreIntrinsic__anonb346f5430811::MemorySanitizerVisitor3151   bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
3152     assert(I.arg_size() == 2);
3153 
3154     IRBuilder<> IRB(&I);
3155     Value *Addr = I.getArgOperand(0);
3156     Value *Shadow = getShadow(&I, 1);
3157     Value *ShadowPtr, *OriginPtr;
3158 
3159     // We don't know the pointer alignment (could be unaligned SSE store!).
3160     // Have to assume to worst case.
3161     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3162         Addr, IRB, Shadow->getType(), Align(1), /*isStore*/ true);
3163     IRB.CreateAlignedStore(Shadow, ShadowPtr, Align(1));
3164 
3165     if (ClCheckAccessAddress)
3166       insertCheckShadowOf(Addr, &I);
3167 
3168     // FIXME: factor out common code from materializeStores
3169     if (MS.TrackOrigins)
3170       IRB.CreateStore(getOrigin(&I, 1), OriginPtr);
3171     return true;
3172   }
3173 
3174   /// Handle vector load-like intrinsics.
3175   ///
3176   /// Instrument intrinsics that look like a simple SIMD load: reads memory,
3177   /// has 1 pointer argument, returns a vector.
handleVectorLoadIntrinsic__anonb346f5430811::MemorySanitizerVisitor3178   bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
3179     assert(I.arg_size() == 1);
3180 
3181     IRBuilder<> IRB(&I);
3182     Value *Addr = I.getArgOperand(0);
3183 
3184     Type *ShadowTy = getShadowTy(&I);
3185     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
3186     if (PropagateShadow) {
3187       // We don't know the pointer alignment (could be unaligned SSE load!).
3188       // Have to assume to worst case.
3189       const Align Alignment = Align(1);
3190       std::tie(ShadowPtr, OriginPtr) =
3191           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
3192       setShadow(&I,
3193                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
3194     } else {
3195       setShadow(&I, getCleanShadow(&I));
3196     }
3197 
3198     if (ClCheckAccessAddress)
3199       insertCheckShadowOf(Addr, &I);
3200 
3201     if (MS.TrackOrigins) {
3202       if (PropagateShadow)
3203         setOrigin(&I, IRB.CreateLoad(MS.OriginTy, OriginPtr));
3204       else
3205         setOrigin(&I, getCleanOrigin());
3206     }
3207     return true;
3208   }
3209 
3210   /// Handle (SIMD arithmetic)-like intrinsics.
3211   ///
3212   /// Instrument intrinsics with any number of arguments of the same type [*],
3213   /// equal to the return type, plus a specified number of trailing flags of
3214   /// any type.
3215   ///
3216   /// [*] The type should be simple (no aggregates or pointers; vectors are
3217   /// fine).
3218   ///
3219   /// Caller guarantees that this intrinsic does not access memory.
3220   ///
3221   /// TODO: "horizontal"/"pairwise" intrinsics are often incorrectly matched by
3222   ///       by this handler.
3223   [[maybe_unused]] bool
maybeHandleSimpleNomemIntrinsic__anonb346f5430811::MemorySanitizerVisitor3224   maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I,
3225                                   unsigned int trailingFlags) {
3226     Type *RetTy = I.getType();
3227     if (!(RetTy->isIntOrIntVectorTy() || RetTy->isFPOrFPVectorTy()))
3228       return false;
3229 
3230     unsigned NumArgOperands = I.arg_size();
3231     assert(NumArgOperands >= trailingFlags);
3232     for (unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3233       Type *Ty = I.getArgOperand(i)->getType();
3234       if (Ty != RetTy)
3235         return false;
3236     }
3237 
3238     IRBuilder<> IRB(&I);
3239     ShadowAndOriginCombiner SC(this, IRB);
3240     for (unsigned i = 0; i < NumArgOperands; ++i)
3241       SC.Add(I.getArgOperand(i));
3242     SC.Done(&I);
3243 
3244     return true;
3245   }
3246 
3247   /// Heuristically instrument unknown intrinsics.
3248   ///
3249   /// The main purpose of this code is to do something reasonable with all
3250   /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
3251   /// We recognize several classes of intrinsics by their argument types and
3252   /// ModRefBehaviour and apply special instrumentation when we are reasonably
3253   /// sure that we know what the intrinsic does.
3254   ///
3255   /// We special-case intrinsics where this approach fails. See llvm.bswap
3256   /// handling as an example of that.
handleUnknownIntrinsicUnlogged__anonb346f5430811::MemorySanitizerVisitor3257   bool handleUnknownIntrinsicUnlogged(IntrinsicInst &I) {
3258     unsigned NumArgOperands = I.arg_size();
3259     if (NumArgOperands == 0)
3260       return false;
3261 
3262     if (NumArgOperands == 2 && I.getArgOperand(0)->getType()->isPointerTy() &&
3263         I.getArgOperand(1)->getType()->isVectorTy() &&
3264         I.getType()->isVoidTy() && !I.onlyReadsMemory()) {
3265       // This looks like a vector store.
3266       return handleVectorStoreIntrinsic(I);
3267     }
3268 
3269     if (NumArgOperands == 1 && I.getArgOperand(0)->getType()->isPointerTy() &&
3270         I.getType()->isVectorTy() && I.onlyReadsMemory()) {
3271       // This looks like a vector load.
3272       return handleVectorLoadIntrinsic(I);
3273     }
3274 
3275     if (I.doesNotAccessMemory())
3276       if (maybeHandleSimpleNomemIntrinsic(I, /*trailingFlags=*/0))
3277         return true;
3278 
3279     // FIXME: detect and handle SSE maskstore/maskload?
3280     // Some cases are now handled in handleAVXMasked{Load,Store}.
3281     return false;
3282   }
3283 
handleUnknownIntrinsic__anonb346f5430811::MemorySanitizerVisitor3284   bool handleUnknownIntrinsic(IntrinsicInst &I) {
3285     if (handleUnknownIntrinsicUnlogged(I)) {
3286       if (ClDumpHeuristicInstructions)
3287         dumpInst(I);
3288 
3289       LLVM_DEBUG(dbgs() << "UNKNOWN INSTRUCTION HANDLED HEURISTICALLY: " << I
3290                         << "\n");
3291       return true;
3292     } else
3293       return false;
3294   }
3295 
handleInvariantGroup__anonb346f5430811::MemorySanitizerVisitor3296   void handleInvariantGroup(IntrinsicInst &I) {
3297     setShadow(&I, getShadow(&I, 0));
3298     setOrigin(&I, getOrigin(&I, 0));
3299   }
3300 
handleLifetimeStart__anonb346f5430811::MemorySanitizerVisitor3301   void handleLifetimeStart(IntrinsicInst &I) {
3302     if (!PoisonStack)
3303       return;
3304     AllocaInst *AI = llvm::findAllocaForValue(I.getArgOperand(1));
3305     if (!AI)
3306       InstrumentLifetimeStart = false;
3307     LifetimeStartList.push_back(std::make_pair(&I, AI));
3308   }
3309 
handleBswap__anonb346f5430811::MemorySanitizerVisitor3310   void handleBswap(IntrinsicInst &I) {
3311     IRBuilder<> IRB(&I);
3312     Value *Op = I.getArgOperand(0);
3313     Type *OpType = Op->getType();
3314     setShadow(&I, IRB.CreateIntrinsic(Intrinsic::bswap, ArrayRef(&OpType, 1),
3315                                       getShadow(Op)));
3316     setOrigin(&I, getOrigin(Op));
3317   }
3318 
3319   // Uninitialized bits are ok if they appear after the leading/trailing 0's
3320   // and a 1. If the input is all zero, it is fully initialized iff
3321   // !is_zero_poison.
3322   //
3323   // e.g., for ctlz, with little-endian, if 0/1 are initialized bits with
3324   // concrete value 0/1, and ? is an uninitialized bit:
3325   //       - 0001 0??? is fully initialized
3326   //       - 000? ???? is fully uninitialized (*)
3327   //       - ???? ???? is fully uninitialized
3328   //       - 0000 0000 is fully uninitialized if is_zero_poison,
3329   //                      fully initialized   otherwise
3330   //
3331   // (*) TODO: arguably, since the number of zeros is in the range [3, 8], we
3332   //     only need to poison 4 bits.
3333   //
3334   // OutputShadow =
3335   //      ((ConcreteZerosCount >= ShadowZerosCount) && !AllZeroShadow)
3336   //   || (is_zero_poison && AllZeroSrc)
handleCountLeadingTrailingZeros__anonb346f5430811::MemorySanitizerVisitor3337   void handleCountLeadingTrailingZeros(IntrinsicInst &I) {
3338     IRBuilder<> IRB(&I);
3339     Value *Src = I.getArgOperand(0);
3340     Value *SrcShadow = getShadow(Src);
3341 
3342     Value *False = IRB.getInt1(false);
3343     Value *ConcreteZerosCount = IRB.CreateIntrinsic(
3344         I.getType(), I.getIntrinsicID(), {Src, /*is_zero_poison=*/False});
3345     Value *ShadowZerosCount = IRB.CreateIntrinsic(
3346         I.getType(), I.getIntrinsicID(), {SrcShadow, /*is_zero_poison=*/False});
3347 
3348     Value *CompareConcreteZeros = IRB.CreateICmpUGE(
3349         ConcreteZerosCount, ShadowZerosCount, "_mscz_cmp_zeros");
3350 
3351     Value *NotAllZeroShadow =
3352         IRB.CreateIsNotNull(SrcShadow, "_mscz_shadow_not_null");
3353     Value *OutputShadow =
3354         IRB.CreateAnd(CompareConcreteZeros, NotAllZeroShadow, "_mscz_main");
3355 
3356     // If zero poison is requested, mix in with the shadow
3357     Constant *IsZeroPoison = cast<Constant>(I.getOperand(1));
3358     if (!IsZeroPoison->isZeroValue()) {
3359       Value *BoolZeroPoison = IRB.CreateIsNull(Src, "_mscz_bzp");
3360       OutputShadow = IRB.CreateOr(OutputShadow, BoolZeroPoison, "_mscz_bs");
3361     }
3362 
3363     OutputShadow = IRB.CreateSExt(OutputShadow, getShadowTy(Src), "_mscz_os");
3364 
3365     setShadow(&I, OutputShadow);
3366     setOriginForNaryOp(I);
3367   }
3368 
3369   /// Handle Arm NEON vector convert intrinsics.
3370   ///
3371   /// e.g., <4 x i32> @llvm.aarch64.neon.fcvtpu.v4i32.v4f32(<4 x float>)
3372   ///      i32 @llvm.aarch64.neon.fcvtms.i32.f64(double)
3373   ///
3374   /// For x86 SSE vector convert intrinsics, see
3375   /// handleSSEVectorConvertIntrinsic().
handleNEONVectorConvertIntrinsic__anonb346f5430811::MemorySanitizerVisitor3376   void handleNEONVectorConvertIntrinsic(IntrinsicInst &I) {
3377     assert(I.arg_size() == 1);
3378 
3379     IRBuilder<> IRB(&I);
3380     Value *S0 = getShadow(&I, 0);
3381 
3382     /// For scalars:
3383     /// Since they are converting from floating-point to integer, the output is
3384     /// - fully uninitialized if *any* bit of the input is uninitialized
3385     /// - fully ininitialized if all bits of the input are ininitialized
3386     /// We apply the same principle on a per-field basis for vectors.
3387     Value *OutShadow = IRB.CreateSExt(IRB.CreateICmpNE(S0, getCleanShadow(S0)),
3388                                       getShadowTy(&I));
3389     setShadow(&I, OutShadow);
3390     setOriginForNaryOp(I);
3391   }
3392 
3393   /// Some instructions have additional zero-elements in the return type
3394   /// e.g., <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64>, ...)
3395   ///
3396   /// This function will return a vector type with the same number of elements
3397   /// as the input, but same per-element width as the return value e.g.,
3398   /// <8 x i8>.
maybeShrinkVectorShadowType__anonb346f5430811::MemorySanitizerVisitor3399   FixedVectorType *maybeShrinkVectorShadowType(Value *Src, IntrinsicInst &I) {
3400     assert(isa<FixedVectorType>(getShadowTy(&I)));
3401     FixedVectorType *ShadowType = cast<FixedVectorType>(getShadowTy(&I));
3402 
3403     // TODO: generalize beyond 2x?
3404     if (ShadowType->getElementCount() ==
3405         cast<VectorType>(Src->getType())->getElementCount() * 2)
3406       ShadowType = FixedVectorType::getHalfElementsVectorType(ShadowType);
3407 
3408     assert(ShadowType->getElementCount() ==
3409            cast<VectorType>(Src->getType())->getElementCount());
3410 
3411     return ShadowType;
3412   }
3413 
3414   /// Doubles the length of a vector shadow (filled with zeros) if necessary to
3415   /// match the length of the shadow for the instruction.
3416   /// This is more type-safe than CreateShadowCast().
maybeExtendVectorShadowWithZeros__anonb346f5430811::MemorySanitizerVisitor3417   Value *maybeExtendVectorShadowWithZeros(Value *Shadow, IntrinsicInst &I) {
3418     IRBuilder<> IRB(&I);
3419     assert(isa<FixedVectorType>(Shadow->getType()));
3420     assert(isa<FixedVectorType>(I.getType()));
3421 
3422     Value *FullShadow = getCleanShadow(&I);
3423     assert(cast<FixedVectorType>(Shadow->getType())->getNumElements() <=
3424            cast<FixedVectorType>(FullShadow->getType())->getNumElements());
3425     assert(cast<FixedVectorType>(Shadow->getType())->getScalarType() ==
3426            cast<FixedVectorType>(FullShadow->getType())->getScalarType());
3427 
3428     if (Shadow->getType() == FullShadow->getType()) {
3429       FullShadow = Shadow;
3430     } else {
3431       // TODO: generalize beyond 2x?
3432       SmallVector<int, 32> ShadowMask(
3433           cast<FixedVectorType>(FullShadow->getType())->getNumElements());
3434       std::iota(ShadowMask.begin(), ShadowMask.end(), 0);
3435 
3436       // Append zeros
3437       FullShadow =
3438           IRB.CreateShuffleVector(Shadow, getCleanShadow(Shadow), ShadowMask);
3439     }
3440 
3441     return FullShadow;
3442   }
3443 
3444   /// Handle x86 SSE vector conversion.
3445   ///
3446   /// e.g., single-precision to half-precision conversion:
3447   ///      <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0)
3448   ///      <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0)
3449   ///
3450   ///      floating-point to integer:
3451   ///      <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>)
3452   ///      <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>)
3453   ///
3454   /// Note: if the output has more elements, they are zero-initialized (and
3455   /// therefore the shadow will also be initialized).
3456   ///
3457   /// This differs from handleSSEVectorConvertIntrinsic() because it
3458   /// propagates uninitialized shadow (instead of checking the shadow).
handleSSEVectorConvertIntrinsicByProp__anonb346f5430811::MemorySanitizerVisitor3459   void handleSSEVectorConvertIntrinsicByProp(IntrinsicInst &I,
3460                                              bool HasRoundingMode) {
3461     if (HasRoundingMode) {
3462       assert(I.arg_size() == 2);
3463       [[maybe_unused]] Value *RoundingMode = I.getArgOperand(1);
3464       assert(RoundingMode->getType()->isIntegerTy());
3465     } else {
3466       assert(I.arg_size() == 1);
3467     }
3468 
3469     Value *Src = I.getArgOperand(0);
3470     assert(Src->getType()->isVectorTy());
3471 
3472     // The return type might have more elements than the input.
3473     // Temporarily shrink the return type's number of elements.
3474     VectorType *ShadowType = maybeShrinkVectorShadowType(Src, I);
3475 
3476     IRBuilder<> IRB(&I);
3477     Value *S0 = getShadow(&I, 0);
3478 
3479     /// For scalars:
3480     /// Since they are converting to and/or from floating-point, the output is:
3481     /// - fully uninitialized if *any* bit of the input is uninitialized
3482     /// - fully ininitialized if all bits of the input are ininitialized
3483     /// We apply the same principle on a per-field basis for vectors.
3484     Value *Shadow =
3485         IRB.CreateSExt(IRB.CreateICmpNE(S0, getCleanShadow(S0)), ShadowType);
3486 
3487     // The return type might have more elements than the input.
3488     // Extend the return type back to its original width if necessary.
3489     Value *FullShadow = maybeExtendVectorShadowWithZeros(Shadow, I);
3490 
3491     setShadow(&I, FullShadow);
3492     setOriginForNaryOp(I);
3493   }
3494 
3495   // Instrument x86 SSE vector convert intrinsic.
3496   //
3497   // This function instruments intrinsics like cvtsi2ss:
3498   // %Out = int_xxx_cvtyyy(%ConvertOp)
3499   // or
3500   // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
3501   // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
3502   // number \p Out elements, and (if has 2 arguments) copies the rest of the
3503   // elements from \p CopyOp.
3504   // In most cases conversion involves floating-point value which may trigger a
3505   // hardware exception when not fully initialized. For this reason we require
3506   // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
3507   // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
3508   // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
3509   // return a fully initialized value.
3510   //
3511   // For Arm NEON vector convert intrinsics, see
3512   // handleNEONVectorConvertIntrinsic().
handleSSEVectorConvertIntrinsic__anonb346f5430811::MemorySanitizerVisitor3513   void handleSSEVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements,
3514                                        bool HasRoundingMode = false) {
3515     IRBuilder<> IRB(&I);
3516     Value *CopyOp, *ConvertOp;
3517 
3518     assert((!HasRoundingMode ||
3519             isa<ConstantInt>(I.getArgOperand(I.arg_size() - 1))) &&
3520            "Invalid rounding mode");
3521 
3522     switch (I.arg_size() - HasRoundingMode) {
3523     case 2:
3524       CopyOp = I.getArgOperand(0);
3525       ConvertOp = I.getArgOperand(1);
3526       break;
3527     case 1:
3528       ConvertOp = I.getArgOperand(0);
3529       CopyOp = nullptr;
3530       break;
3531     default:
3532       llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
3533     }
3534 
3535     // The first *NumUsedElements* elements of ConvertOp are converted to the
3536     // same number of output elements. The rest of the output is copied from
3537     // CopyOp, or (if not available) filled with zeroes.
3538     // Combine shadow for elements of ConvertOp that are used in this operation,
3539     // and insert a check.
3540     // FIXME: consider propagating shadow of ConvertOp, at least in the case of
3541     // int->any conversion.
3542     Value *ConvertShadow = getShadow(ConvertOp);
3543     Value *AggShadow = nullptr;
3544     if (ConvertOp->getType()->isVectorTy()) {
3545       AggShadow = IRB.CreateExtractElement(
3546           ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
3547       for (int i = 1; i < NumUsedElements; ++i) {
3548         Value *MoreShadow = IRB.CreateExtractElement(
3549             ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
3550         AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
3551       }
3552     } else {
3553       AggShadow = ConvertShadow;
3554     }
3555     assert(AggShadow->getType()->isIntegerTy());
3556     insertCheckShadow(AggShadow, getOrigin(ConvertOp), &I);
3557 
3558     // Build result shadow by zero-filling parts of CopyOp shadow that come from
3559     // ConvertOp.
3560     if (CopyOp) {
3561       assert(CopyOp->getType() == I.getType());
3562       assert(CopyOp->getType()->isVectorTy());
3563       Value *ResultShadow = getShadow(CopyOp);
3564       Type *EltTy = cast<VectorType>(ResultShadow->getType())->getElementType();
3565       for (int i = 0; i < NumUsedElements; ++i) {
3566         ResultShadow = IRB.CreateInsertElement(
3567             ResultShadow, ConstantInt::getNullValue(EltTy),
3568             ConstantInt::get(IRB.getInt32Ty(), i));
3569       }
3570       setShadow(&I, ResultShadow);
3571       setOrigin(&I, getOrigin(CopyOp));
3572     } else {
3573       setShadow(&I, getCleanShadow(&I));
3574       setOrigin(&I, getCleanOrigin());
3575     }
3576   }
3577 
3578   // Given a scalar or vector, extract lower 64 bits (or less), and return all
3579   // zeroes if it is zero, and all ones otherwise.
Lower64ShadowExtend__anonb346f5430811::MemorySanitizerVisitor3580   Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
3581     if (S->getType()->isVectorTy())
3582       S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
3583     assert(S->getType()->getPrimitiveSizeInBits() <= 64);
3584     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
3585     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
3586   }
3587 
3588   // Given a vector, extract its first element, and return all
3589   // zeroes if it is zero, and all ones otherwise.
LowerElementShadowExtend__anonb346f5430811::MemorySanitizerVisitor3590   Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
3591     Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
3592     Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
3593     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
3594   }
3595 
VariableShadowExtend__anonb346f5430811::MemorySanitizerVisitor3596   Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
3597     Type *T = S->getType();
3598     assert(T->isVectorTy());
3599     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
3600     return IRB.CreateSExt(S2, T);
3601   }
3602 
3603   // Instrument vector shift intrinsic.
3604   //
3605   // This function instruments intrinsics like int_x86_avx2_psll_w.
3606   // Intrinsic shifts %In by %ShiftSize bits.
3607   // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
3608   // size, and the rest is ignored. Behavior is defined even if shift size is
3609   // greater than register (or field) width.
handleVectorShiftIntrinsic__anonb346f5430811::MemorySanitizerVisitor3610   void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
3611     assert(I.arg_size() == 2);
3612     IRBuilder<> IRB(&I);
3613     // If any of the S2 bits are poisoned, the whole thing is poisoned.
3614     // Otherwise perform the same shift on S1.
3615     Value *S1 = getShadow(&I, 0);
3616     Value *S2 = getShadow(&I, 1);
3617     Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
3618                              : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
3619     Value *V1 = I.getOperand(0);
3620     Value *V2 = I.getOperand(1);
3621     Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
3622                                   {IRB.CreateBitCast(S1, V1->getType()), V2});
3623     Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
3624     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
3625     setOriginForNaryOp(I);
3626   }
3627 
3628   // Get an MMX-sized vector type.
getMMXVectorTy__anonb346f5430811::MemorySanitizerVisitor3629   Type *getMMXVectorTy(unsigned EltSizeInBits) {
3630     const unsigned X86_MMXSizeInBits = 64;
3631     assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3632            "Illegal MMX vector element size");
3633     return FixedVectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
3634                                 X86_MMXSizeInBits / EltSizeInBits);
3635   }
3636 
3637   // Returns a signed counterpart for an (un)signed-saturate-and-pack
3638   // intrinsic.
getSignedPackIntrinsic__anonb346f5430811::MemorySanitizerVisitor3639   Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
3640     switch (id) {
3641     case Intrinsic::x86_sse2_packsswb_128:
3642     case Intrinsic::x86_sse2_packuswb_128:
3643       return Intrinsic::x86_sse2_packsswb_128;
3644 
3645     case Intrinsic::x86_sse2_packssdw_128:
3646     case Intrinsic::x86_sse41_packusdw:
3647       return Intrinsic::x86_sse2_packssdw_128;
3648 
3649     case Intrinsic::x86_avx2_packsswb:
3650     case Intrinsic::x86_avx2_packuswb:
3651       return Intrinsic::x86_avx2_packsswb;
3652 
3653     case Intrinsic::x86_avx2_packssdw:
3654     case Intrinsic::x86_avx2_packusdw:
3655       return Intrinsic::x86_avx2_packssdw;
3656 
3657     case Intrinsic::x86_mmx_packsswb:
3658     case Intrinsic::x86_mmx_packuswb:
3659       return Intrinsic::x86_mmx_packsswb;
3660 
3661     case Intrinsic::x86_mmx_packssdw:
3662       return Intrinsic::x86_mmx_packssdw;
3663     default:
3664       llvm_unreachable("unexpected intrinsic id");
3665     }
3666   }
3667 
3668   // Instrument vector pack intrinsic.
3669   //
3670   // This function instruments intrinsics like x86_mmx_packsswb, that
3671   // packs elements of 2 input vectors into half as many bits with saturation.
3672   // Shadow is propagated with the signed variant of the same intrinsic applied
3673   // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
3674   // MMXEltSizeInBits is used only for x86mmx arguments.
handleVectorPackIntrinsic__anonb346f5430811::MemorySanitizerVisitor3675   void handleVectorPackIntrinsic(IntrinsicInst &I,
3676                                  unsigned MMXEltSizeInBits = 0) {
3677     assert(I.arg_size() == 2);
3678     IRBuilder<> IRB(&I);
3679     Value *S1 = getShadow(&I, 0);
3680     Value *S2 = getShadow(&I, 1);
3681     assert(S1->getType()->isVectorTy());
3682 
3683     // SExt and ICmpNE below must apply to individual elements of input vectors.
3684     // In case of x86mmx arguments, cast them to appropriate vector types and
3685     // back.
3686     Type *T =
3687         MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) : S1->getType();
3688     if (MMXEltSizeInBits) {
3689       S1 = IRB.CreateBitCast(S1, T);
3690       S2 = IRB.CreateBitCast(S2, T);
3691     }
3692     Value *S1_ext =
3693         IRB.CreateSExt(IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
3694     Value *S2_ext =
3695         IRB.CreateSExt(IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
3696     if (MMXEltSizeInBits) {
3697       S1_ext = IRB.CreateBitCast(S1_ext, getMMXVectorTy(64));
3698       S2_ext = IRB.CreateBitCast(S2_ext, getMMXVectorTy(64));
3699     }
3700 
3701     Value *S = IRB.CreateIntrinsic(getSignedPackIntrinsic(I.getIntrinsicID()),
3702                                    {S1_ext, S2_ext}, /*FMFSource=*/nullptr,
3703                                    "_msprop_vector_pack");
3704     if (MMXEltSizeInBits)
3705       S = IRB.CreateBitCast(S, getShadowTy(&I));
3706     setShadow(&I, S);
3707     setOriginForNaryOp(I);
3708   }
3709 
3710   // Convert `Mask` into `<n x i1>`.
createDppMask__anonb346f5430811::MemorySanitizerVisitor3711   Constant *createDppMask(unsigned Width, unsigned Mask) {
3712     SmallVector<Constant *, 4> R(Width);
3713     for (auto &M : R) {
3714       M = ConstantInt::getBool(F.getContext(), Mask & 1);
3715       Mask >>= 1;
3716     }
3717     return ConstantVector::get(R);
3718   }
3719 
3720   // Calculate output shadow as array of booleans `<n x i1>`, assuming if any
3721   // arg is poisoned, entire dot product is poisoned.
findDppPoisonedOutput__anonb346f5430811::MemorySanitizerVisitor3722   Value *findDppPoisonedOutput(IRBuilder<> &IRB, Value *S, unsigned SrcMask,
3723                                unsigned DstMask) {
3724     const unsigned Width =
3725         cast<FixedVectorType>(S->getType())->getNumElements();
3726 
3727     S = IRB.CreateSelect(createDppMask(Width, SrcMask), S,
3728                          Constant::getNullValue(S->getType()));
3729     Value *SElem = IRB.CreateOrReduce(S);
3730     Value *IsClean = IRB.CreateIsNull(SElem, "_msdpp");
3731     Value *DstMaskV = createDppMask(Width, DstMask);
3732 
3733     return IRB.CreateSelect(
3734         IsClean, Constant::getNullValue(DstMaskV->getType()), DstMaskV);
3735   }
3736 
3737   // See `Intel Intrinsics Guide` for `_dp_p*` instructions.
3738   //
3739   // 2 and 4 element versions produce single scalar of dot product, and then
3740   // puts it into elements of output vector, selected by 4 lowest bits of the
3741   // mask. Top 4 bits of the mask control which elements of input to use for dot
3742   // product.
3743   //
3744   // 8 element version mask still has only 4 bit for input, and 4 bit for output
3745   // mask. According to the spec it just operates as 4 element version on first
3746   // 4 elements of inputs and output, and then on last 4 elements of inputs and
3747   // output.
handleDppIntrinsic__anonb346f5430811::MemorySanitizerVisitor3748   void handleDppIntrinsic(IntrinsicInst &I) {
3749     IRBuilder<> IRB(&I);
3750 
3751     Value *S0 = getShadow(&I, 0);
3752     Value *S1 = getShadow(&I, 1);
3753     Value *S = IRB.CreateOr(S0, S1);
3754 
3755     const unsigned Width =
3756         cast<FixedVectorType>(S->getType())->getNumElements();
3757     assert(Width == 2 || Width == 4 || Width == 8);
3758 
3759     const unsigned Mask = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3760     const unsigned SrcMask = Mask >> 4;
3761     const unsigned DstMask = Mask & 0xf;
3762 
3763     // Calculate shadow as `<n x i1>`.
3764     Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3765     if (Width == 8) {
3766       // First 4 elements of shadow are already calculated. `makeDppShadow`
3767       // operats on 32 bit masks, so we can just shift masks, and repeat.
3768       SI1 = IRB.CreateOr(
3769           SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3770     }
3771     // Extend to real size of shadow, poisoning either all or none bits of an
3772     // element.
3773     S = IRB.CreateSExt(SI1, S->getType(), "_msdpp");
3774 
3775     setShadow(&I, S);
3776     setOriginForNaryOp(I);
3777   }
3778 
convertBlendvToSelectMask__anonb346f5430811::MemorySanitizerVisitor3779   Value *convertBlendvToSelectMask(IRBuilder<> &IRB, Value *C) {
3780     C = CreateAppToShadowCast(IRB, C);
3781     FixedVectorType *FVT = cast<FixedVectorType>(C->getType());
3782     unsigned ElSize = FVT->getElementType()->getPrimitiveSizeInBits();
3783     C = IRB.CreateAShr(C, ElSize - 1);
3784     FVT = FixedVectorType::get(IRB.getInt1Ty(), FVT->getNumElements());
3785     return IRB.CreateTrunc(C, FVT);
3786   }
3787 
3788   // `blendv(f, t, c)` is effectively `select(c[top_bit], t, f)`.
handleBlendvIntrinsic__anonb346f5430811::MemorySanitizerVisitor3789   void handleBlendvIntrinsic(IntrinsicInst &I) {
3790     Value *C = I.getOperand(2);
3791     Value *T = I.getOperand(1);
3792     Value *F = I.getOperand(0);
3793 
3794     Value *Sc = getShadow(&I, 2);
3795     Value *Oc = MS.TrackOrigins ? getOrigin(C) : nullptr;
3796 
3797     {
3798       IRBuilder<> IRB(&I);
3799       // Extract top bit from condition and its shadow.
3800       C = convertBlendvToSelectMask(IRB, C);
3801       Sc = convertBlendvToSelectMask(IRB, Sc);
3802 
3803       setShadow(C, Sc);
3804       setOrigin(C, Oc);
3805     }
3806 
3807     handleSelectLikeInst(I, C, T, F);
3808   }
3809 
3810   // Instrument sum-of-absolute-differences intrinsic.
handleVectorSadIntrinsic__anonb346f5430811::MemorySanitizerVisitor3811   void handleVectorSadIntrinsic(IntrinsicInst &I, bool IsMMX = false) {
3812     const unsigned SignificantBitsPerResultElement = 16;
3813     Type *ResTy = IsMMX ? IntegerType::get(*MS.C, 64) : I.getType();
3814     unsigned ZeroBitsPerResultElement =
3815         ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
3816 
3817     IRBuilder<> IRB(&I);
3818     auto *Shadow0 = getShadow(&I, 0);
3819     auto *Shadow1 = getShadow(&I, 1);
3820     Value *S = IRB.CreateOr(Shadow0, Shadow1);
3821     S = IRB.CreateBitCast(S, ResTy);
3822     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
3823                        ResTy);
3824     S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
3825     S = IRB.CreateBitCast(S, getShadowTy(&I));
3826     setShadow(&I, S);
3827     setOriginForNaryOp(I);
3828   }
3829 
3830   // Instrument multiply-add intrinsic.
handleVectorPmaddIntrinsic__anonb346f5430811::MemorySanitizerVisitor3831   void handleVectorPmaddIntrinsic(IntrinsicInst &I,
3832                                   unsigned MMXEltSizeInBits = 0) {
3833     Type *ResTy =
3834         MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits * 2) : I.getType();
3835     IRBuilder<> IRB(&I);
3836     auto *Shadow0 = getShadow(&I, 0);
3837     auto *Shadow1 = getShadow(&I, 1);
3838     Value *S = IRB.CreateOr(Shadow0, Shadow1);
3839     S = IRB.CreateBitCast(S, ResTy);
3840     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
3841                        ResTy);
3842     S = IRB.CreateBitCast(S, getShadowTy(&I));
3843     setShadow(&I, S);
3844     setOriginForNaryOp(I);
3845   }
3846 
3847   // Instrument compare-packed intrinsic.
3848   // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
3849   // all-ones shadow.
handleVectorComparePackedIntrinsic__anonb346f5430811::MemorySanitizerVisitor3850   void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
3851     IRBuilder<> IRB(&I);
3852     Type *ResTy = getShadowTy(&I);
3853     auto *Shadow0 = getShadow(&I, 0);
3854     auto *Shadow1 = getShadow(&I, 1);
3855     Value *S0 = IRB.CreateOr(Shadow0, Shadow1);
3856     Value *S = IRB.CreateSExt(
3857         IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
3858     setShadow(&I, S);
3859     setOriginForNaryOp(I);
3860   }
3861 
3862   // Instrument compare-scalar intrinsic.
3863   // This handles both cmp* intrinsics which return the result in the first
3864   // element of a vector, and comi* which return the result as i32.
handleVectorCompareScalarIntrinsic__anonb346f5430811::MemorySanitizerVisitor3865   void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
3866     IRBuilder<> IRB(&I);
3867     auto *Shadow0 = getShadow(&I, 0);
3868     auto *Shadow1 = getShadow(&I, 1);
3869     Value *S0 = IRB.CreateOr(Shadow0, Shadow1);
3870     Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
3871     setShadow(&I, S);
3872     setOriginForNaryOp(I);
3873   }
3874 
3875   // Instrument generic vector reduction intrinsics
3876   // by ORing together all their fields.
3877   //
3878   // If AllowShadowCast is true, the return type does not need to be the same
3879   // type as the fields
3880   // e.g., declare i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8>)
handleVectorReduceIntrinsic__anonb346f5430811::MemorySanitizerVisitor3881   void handleVectorReduceIntrinsic(IntrinsicInst &I, bool AllowShadowCast) {
3882     assert(I.arg_size() == 1);
3883 
3884     IRBuilder<> IRB(&I);
3885     Value *S = IRB.CreateOrReduce(getShadow(&I, 0));
3886     if (AllowShadowCast)
3887       S = CreateShadowCast(IRB, S, getShadowTy(&I));
3888     else
3889       assert(S->getType() == getShadowTy(&I));
3890     setShadow(&I, S);
3891     setOriginForNaryOp(I);
3892   }
3893 
3894   // Similar to handleVectorReduceIntrinsic but with an initial starting value.
3895   // e.g., call float @llvm.vector.reduce.fadd.f32.v2f32(float %a0, <2 x float>
3896   // %a1)
3897   //       shadow = shadow[a0] | shadow[a1.0] | shadow[a1.1]
3898   //
3899   // The type of the return value, initial starting value, and elements of the
3900   // vector must be identical.
handleVectorReduceWithStarterIntrinsic__anonb346f5430811::MemorySanitizerVisitor3901   void handleVectorReduceWithStarterIntrinsic(IntrinsicInst &I) {
3902     assert(I.arg_size() == 2);
3903 
3904     IRBuilder<> IRB(&I);
3905     Value *Shadow0 = getShadow(&I, 0);
3906     Value *Shadow1 = IRB.CreateOrReduce(getShadow(&I, 1));
3907     assert(Shadow0->getType() == Shadow1->getType());
3908     Value *S = IRB.CreateOr(Shadow0, Shadow1);
3909     assert(S->getType() == getShadowTy(&I));
3910     setShadow(&I, S);
3911     setOriginForNaryOp(I);
3912   }
3913 
3914   // Instrument vector.reduce.or intrinsic.
3915   // Valid (non-poisoned) set bits in the operand pull low the
3916   // corresponding shadow bits.
handleVectorReduceOrIntrinsic__anonb346f5430811::MemorySanitizerVisitor3917   void handleVectorReduceOrIntrinsic(IntrinsicInst &I) {
3918     assert(I.arg_size() == 1);
3919 
3920     IRBuilder<> IRB(&I);
3921     Value *OperandShadow = getShadow(&I, 0);
3922     Value *OperandUnsetBits = IRB.CreateNot(I.getOperand(0));
3923     Value *OperandUnsetOrPoison = IRB.CreateOr(OperandUnsetBits, OperandShadow);
3924     // Bit N is clean if any field's bit N is 1 and unpoison
3925     Value *OutShadowMask = IRB.CreateAndReduce(OperandUnsetOrPoison);
3926     // Otherwise, it is clean if every field's bit N is unpoison
3927     Value *OrShadow = IRB.CreateOrReduce(OperandShadow);
3928     Value *S = IRB.CreateAnd(OutShadowMask, OrShadow);
3929 
3930     setShadow(&I, S);
3931     setOrigin(&I, getOrigin(&I, 0));
3932   }
3933 
3934   // Instrument vector.reduce.and intrinsic.
3935   // Valid (non-poisoned) unset bits in the operand pull down the
3936   // corresponding shadow bits.
handleVectorReduceAndIntrinsic__anonb346f5430811::MemorySanitizerVisitor3937   void handleVectorReduceAndIntrinsic(IntrinsicInst &I) {
3938     assert(I.arg_size() == 1);
3939 
3940     IRBuilder<> IRB(&I);
3941     Value *OperandShadow = getShadow(&I, 0);
3942     Value *OperandSetOrPoison = IRB.CreateOr(I.getOperand(0), OperandShadow);
3943     // Bit N is clean if any field's bit N is 0 and unpoison
3944     Value *OutShadowMask = IRB.CreateAndReduce(OperandSetOrPoison);
3945     // Otherwise, it is clean if every field's bit N is unpoison
3946     Value *OrShadow = IRB.CreateOrReduce(OperandShadow);
3947     Value *S = IRB.CreateAnd(OutShadowMask, OrShadow);
3948 
3949     setShadow(&I, S);
3950     setOrigin(&I, getOrigin(&I, 0));
3951   }
3952 
handleStmxcsr__anonb346f5430811::MemorySanitizerVisitor3953   void handleStmxcsr(IntrinsicInst &I) {
3954     IRBuilder<> IRB(&I);
3955     Value *Addr = I.getArgOperand(0);
3956     Type *Ty = IRB.getInt32Ty();
3957     Value *ShadowPtr =
3958         getShadowOriginPtr(Addr, IRB, Ty, Align(1), /*isStore*/ true).first;
3959 
3960     IRB.CreateStore(getCleanShadow(Ty), ShadowPtr);
3961 
3962     if (ClCheckAccessAddress)
3963       insertCheckShadowOf(Addr, &I);
3964   }
3965 
handleLdmxcsr__anonb346f5430811::MemorySanitizerVisitor3966   void handleLdmxcsr(IntrinsicInst &I) {
3967     if (!InsertChecks)
3968       return;
3969 
3970     IRBuilder<> IRB(&I);
3971     Value *Addr = I.getArgOperand(0);
3972     Type *Ty = IRB.getInt32Ty();
3973     const Align Alignment = Align(1);
3974     Value *ShadowPtr, *OriginPtr;
3975     std::tie(ShadowPtr, OriginPtr) =
3976         getShadowOriginPtr(Addr, IRB, Ty, Alignment, /*isStore*/ false);
3977 
3978     if (ClCheckAccessAddress)
3979       insertCheckShadowOf(Addr, &I);
3980 
3981     Value *Shadow = IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment, "_ldmxcsr");
3982     Value *Origin = MS.TrackOrigins ? IRB.CreateLoad(MS.OriginTy, OriginPtr)
3983                                     : getCleanOrigin();
3984     insertCheckShadow(Shadow, Origin, &I);
3985   }
3986 
handleMaskedExpandLoad__anonb346f5430811::MemorySanitizerVisitor3987   void handleMaskedExpandLoad(IntrinsicInst &I) {
3988     IRBuilder<> IRB(&I);
3989     Value *Ptr = I.getArgOperand(0);
3990     MaybeAlign Align = I.getParamAlign(0);
3991     Value *Mask = I.getArgOperand(1);
3992     Value *PassThru = I.getArgOperand(2);
3993 
3994     if (ClCheckAccessAddress) {
3995       insertCheckShadowOf(Ptr, &I);
3996       insertCheckShadowOf(Mask, &I);
3997     }
3998 
3999     if (!PropagateShadow) {
4000       setShadow(&I, getCleanShadow(&I));
4001       setOrigin(&I, getCleanOrigin());
4002       return;
4003     }
4004 
4005     Type *ShadowTy = getShadowTy(&I);
4006     Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
4007     auto [ShadowPtr, OriginPtr] =
4008         getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align, /*isStore*/ false);
4009 
4010     Value *Shadow =
4011         IRB.CreateMaskedExpandLoad(ShadowTy, ShadowPtr, Align, Mask,
4012                                    getShadow(PassThru), "_msmaskedexpload");
4013 
4014     setShadow(&I, Shadow);
4015 
4016     // TODO: Store origins.
4017     setOrigin(&I, getCleanOrigin());
4018   }
4019 
handleMaskedCompressStore__anonb346f5430811::MemorySanitizerVisitor4020   void handleMaskedCompressStore(IntrinsicInst &I) {
4021     IRBuilder<> IRB(&I);
4022     Value *Values = I.getArgOperand(0);
4023     Value *Ptr = I.getArgOperand(1);
4024     MaybeAlign Align = I.getParamAlign(1);
4025     Value *Mask = I.getArgOperand(2);
4026 
4027     if (ClCheckAccessAddress) {
4028       insertCheckShadowOf(Ptr, &I);
4029       insertCheckShadowOf(Mask, &I);
4030     }
4031 
4032     Value *Shadow = getShadow(Values);
4033     Type *ElementShadowTy =
4034         getShadowTy(cast<VectorType>(Values->getType())->getElementType());
4035     auto [ShadowPtr, OriginPtrs] =
4036         getShadowOriginPtr(Ptr, IRB, ElementShadowTy, Align, /*isStore*/ true);
4037 
4038     IRB.CreateMaskedCompressStore(Shadow, ShadowPtr, Align, Mask);
4039 
4040     // TODO: Store origins.
4041   }
4042 
handleMaskedGather__anonb346f5430811::MemorySanitizerVisitor4043   void handleMaskedGather(IntrinsicInst &I) {
4044     IRBuilder<> IRB(&I);
4045     Value *Ptrs = I.getArgOperand(0);
4046     const Align Alignment(
4047         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
4048     Value *Mask = I.getArgOperand(2);
4049     Value *PassThru = I.getArgOperand(3);
4050 
4051     Type *PtrsShadowTy = getShadowTy(Ptrs);
4052     if (ClCheckAccessAddress) {
4053       insertCheckShadowOf(Mask, &I);
4054       Value *MaskedPtrShadow = IRB.CreateSelect(
4055           Mask, getShadow(Ptrs), Constant::getNullValue((PtrsShadowTy)),
4056           "_msmaskedptrs");
4057       insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &I);
4058     }
4059 
4060     if (!PropagateShadow) {
4061       setShadow(&I, getCleanShadow(&I));
4062       setOrigin(&I, getCleanOrigin());
4063       return;
4064     }
4065 
4066     Type *ShadowTy = getShadowTy(&I);
4067     Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
4068     auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4069         Ptrs, IRB, ElementShadowTy, Alignment, /*isStore*/ false);
4070 
4071     Value *Shadow =
4072         IRB.CreateMaskedGather(ShadowTy, ShadowPtrs, Alignment, Mask,
4073                                getShadow(PassThru), "_msmaskedgather");
4074 
4075     setShadow(&I, Shadow);
4076 
4077     // TODO: Store origins.
4078     setOrigin(&I, getCleanOrigin());
4079   }
4080 
handleMaskedScatter__anonb346f5430811::MemorySanitizerVisitor4081   void handleMaskedScatter(IntrinsicInst &I) {
4082     IRBuilder<> IRB(&I);
4083     Value *Values = I.getArgOperand(0);
4084     Value *Ptrs = I.getArgOperand(1);
4085     const Align Alignment(
4086         cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
4087     Value *Mask = I.getArgOperand(3);
4088 
4089     Type *PtrsShadowTy = getShadowTy(Ptrs);
4090     if (ClCheckAccessAddress) {
4091       insertCheckShadowOf(Mask, &I);
4092       Value *MaskedPtrShadow = IRB.CreateSelect(
4093           Mask, getShadow(Ptrs), Constant::getNullValue((PtrsShadowTy)),
4094           "_msmaskedptrs");
4095       insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &I);
4096     }
4097 
4098     Value *Shadow = getShadow(Values);
4099     Type *ElementShadowTy =
4100         getShadowTy(cast<VectorType>(Values->getType())->getElementType());
4101     auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4102         Ptrs, IRB, ElementShadowTy, Alignment, /*isStore*/ true);
4103 
4104     IRB.CreateMaskedScatter(Shadow, ShadowPtrs, Alignment, Mask);
4105 
4106     // TODO: Store origin.
4107   }
4108 
4109   // Intrinsic::masked_store
4110   //
4111   // Note: handleAVXMaskedStore handles AVX/AVX2 variants, though AVX512 masked
4112   //       stores are lowered to Intrinsic::masked_store.
handleMaskedStore__anonb346f5430811::MemorySanitizerVisitor4113   void handleMaskedStore(IntrinsicInst &I) {
4114     IRBuilder<> IRB(&I);
4115     Value *V = I.getArgOperand(0);
4116     Value *Ptr = I.getArgOperand(1);
4117     const Align Alignment(
4118         cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
4119     Value *Mask = I.getArgOperand(3);
4120     Value *Shadow = getShadow(V);
4121 
4122     if (ClCheckAccessAddress) {
4123       insertCheckShadowOf(Ptr, &I);
4124       insertCheckShadowOf(Mask, &I);
4125     }
4126 
4127     Value *ShadowPtr;
4128     Value *OriginPtr;
4129     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
4130         Ptr, IRB, Shadow->getType(), Alignment, /*isStore*/ true);
4131 
4132     IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment, Mask);
4133 
4134     if (!MS.TrackOrigins)
4135       return;
4136 
4137     auto &DL = F.getDataLayout();
4138     paintOrigin(IRB, getOrigin(V), OriginPtr,
4139                 DL.getTypeStoreSize(Shadow->getType()),
4140                 std::max(Alignment, kMinOriginAlignment));
4141   }
4142 
4143   // Intrinsic::masked_load
4144   //
4145   // Note: handleAVXMaskedLoad handles AVX/AVX2 variants, though AVX512 masked
4146   //       loads are lowered to Intrinsic::masked_load.
handleMaskedLoad__anonb346f5430811::MemorySanitizerVisitor4147   void handleMaskedLoad(IntrinsicInst &I) {
4148     IRBuilder<> IRB(&I);
4149     Value *Ptr = I.getArgOperand(0);
4150     const Align Alignment(
4151         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
4152     Value *Mask = I.getArgOperand(2);
4153     Value *PassThru = I.getArgOperand(3);
4154 
4155     if (ClCheckAccessAddress) {
4156       insertCheckShadowOf(Ptr, &I);
4157       insertCheckShadowOf(Mask, &I);
4158     }
4159 
4160     if (!PropagateShadow) {
4161       setShadow(&I, getCleanShadow(&I));
4162       setOrigin(&I, getCleanOrigin());
4163       return;
4164     }
4165 
4166     Type *ShadowTy = getShadowTy(&I);
4167     Value *ShadowPtr, *OriginPtr;
4168     std::tie(ShadowPtr, OriginPtr) =
4169         getShadowOriginPtr(Ptr, IRB, ShadowTy, Alignment, /*isStore*/ false);
4170     setShadow(&I, IRB.CreateMaskedLoad(ShadowTy, ShadowPtr, Alignment, Mask,
4171                                        getShadow(PassThru), "_msmaskedld"));
4172 
4173     if (!MS.TrackOrigins)
4174       return;
4175 
4176     // Choose between PassThru's and the loaded value's origins.
4177     Value *MaskedPassThruShadow = IRB.CreateAnd(
4178         getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy));
4179 
4180     Value *NotNull = convertToBool(MaskedPassThruShadow, IRB, "_mscmp");
4181 
4182     Value *PtrOrigin = IRB.CreateLoad(MS.OriginTy, OriginPtr);
4183     Value *Origin = IRB.CreateSelect(NotNull, getOrigin(PassThru), PtrOrigin);
4184 
4185     setOrigin(&I, Origin);
4186   }
4187 
4188   // e.g., void @llvm.x86.avx.maskstore.ps.256(ptr, <8 x i32>, <8 x float>)
4189   //                                           dst  mask       src
4190   //
4191   // AVX512 masked stores are lowered to Intrinsic::masked_load and are handled
4192   // by handleMaskedStore.
4193   //
4194   // This function handles AVX and AVX2 masked stores; these use the MSBs of a
4195   // vector of integers, unlike the LLVM masked intrinsics, which require a
4196   // vector of booleans. X86InstCombineIntrinsic.cpp::simplifyX86MaskedLoad
4197   // mentions that the x86 backend does not know how to efficiently convert
4198   // from a vector of booleans back into the AVX mask format; therefore, they
4199   // (and we) do not reduce AVX/AVX2 masked intrinsics into LLVM masked
4200   // intrinsics.
handleAVXMaskedStore__anonb346f5430811::MemorySanitizerVisitor4201   void handleAVXMaskedStore(IntrinsicInst &I) {
4202     assert(I.arg_size() == 3);
4203 
4204     IRBuilder<> IRB(&I);
4205 
4206     Value *Dst = I.getArgOperand(0);
4207     assert(Dst->getType()->isPointerTy() && "Destination is not a pointer!");
4208 
4209     Value *Mask = I.getArgOperand(1);
4210     assert(isa<VectorType>(Mask->getType()) && "Mask is not a vector!");
4211 
4212     Value *Src = I.getArgOperand(2);
4213     assert(isa<VectorType>(Src->getType()) && "Source is not a vector!");
4214 
4215     const Align Alignment = Align(1);
4216 
4217     Value *SrcShadow = getShadow(Src);
4218 
4219     if (ClCheckAccessAddress) {
4220       insertCheckShadowOf(Dst, &I);
4221       insertCheckShadowOf(Mask, &I);
4222     }
4223 
4224     Value *DstShadowPtr;
4225     Value *DstOriginPtr;
4226     std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
4227         Dst, IRB, SrcShadow->getType(), Alignment, /*isStore*/ true);
4228 
4229     SmallVector<Value *, 2> ShadowArgs;
4230     ShadowArgs.append(1, DstShadowPtr);
4231     ShadowArgs.append(1, Mask);
4232     // The intrinsic may require floating-point but shadows can be arbitrary
4233     // bit patterns, of which some would be interpreted as "invalid"
4234     // floating-point values (NaN etc.); we assume the intrinsic will happily
4235     // copy them.
4236     ShadowArgs.append(1, IRB.CreateBitCast(SrcShadow, Src->getType()));
4237 
4238     CallInst *CI =
4239         IRB.CreateIntrinsic(IRB.getVoidTy(), I.getIntrinsicID(), ShadowArgs);
4240     setShadow(&I, CI);
4241 
4242     if (!MS.TrackOrigins)
4243       return;
4244 
4245     // Approximation only
4246     auto &DL = F.getDataLayout();
4247     paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
4248                 DL.getTypeStoreSize(SrcShadow->getType()),
4249                 std::max(Alignment, kMinOriginAlignment));
4250   }
4251 
4252   // e.g., <8 x float> @llvm.x86.avx.maskload.ps.256(ptr, <8 x i32>)
4253   //       return                                    src  mask
4254   //
4255   // Masked-off values are replaced with 0, which conveniently also represents
4256   // initialized memory.
4257   //
4258   // AVX512 masked stores are lowered to Intrinsic::masked_load and are handled
4259   // by handleMaskedStore.
4260   //
4261   // We do not combine this with handleMaskedLoad; see comment in
4262   // handleAVXMaskedStore for the rationale.
4263   //
4264   // This is subtly different than handleIntrinsicByApplyingToShadow(I, 1)
4265   // because we need to apply getShadowOriginPtr, not getShadow, to the first
4266   // parameter.
handleAVXMaskedLoad__anonb346f5430811::MemorySanitizerVisitor4267   void handleAVXMaskedLoad(IntrinsicInst &I) {
4268     assert(I.arg_size() == 2);
4269 
4270     IRBuilder<> IRB(&I);
4271 
4272     Value *Src = I.getArgOperand(0);
4273     assert(Src->getType()->isPointerTy() && "Source is not a pointer!");
4274 
4275     Value *Mask = I.getArgOperand(1);
4276     assert(isa<VectorType>(Mask->getType()) && "Mask is not a vector!");
4277 
4278     const Align Alignment = Align(1);
4279 
4280     if (ClCheckAccessAddress) {
4281       insertCheckShadowOf(Mask, &I);
4282     }
4283 
4284     Type *SrcShadowTy = getShadowTy(Src);
4285     Value *SrcShadowPtr, *SrcOriginPtr;
4286     std::tie(SrcShadowPtr, SrcOriginPtr) =
4287         getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment, /*isStore*/ false);
4288 
4289     SmallVector<Value *, 2> ShadowArgs;
4290     ShadowArgs.append(1, SrcShadowPtr);
4291     ShadowArgs.append(1, Mask);
4292 
4293     CallInst *CI =
4294         IRB.CreateIntrinsic(I.getType(), I.getIntrinsicID(), ShadowArgs);
4295     // The AVX masked load intrinsics do not have integer variants. We use the
4296     // floating-point variants, which will happily copy the shadows even if
4297     // they are interpreted as "invalid" floating-point values (NaN etc.).
4298     setShadow(&I, IRB.CreateBitCast(CI, getShadowTy(&I)));
4299 
4300     if (!MS.TrackOrigins)
4301       return;
4302 
4303     // The "pass-through" value is always zero (initialized). To the extent
4304     // that that results in initialized aligned 4-byte chunks, the origin value
4305     // is ignored. It is therefore correct to simply copy the origin from src.
4306     Value *PtrSrcOrigin = IRB.CreateLoad(MS.OriginTy, SrcOriginPtr);
4307     setOrigin(&I, PtrSrcOrigin);
4308   }
4309 
4310   // Test whether the mask indices are initialized, only checking the bits that
4311   // are actually used.
4312   //
4313   // e.g., if Idx is <32 x i16>, only (log2(32) == 5) bits of each index are
4314   //       used/checked.
maskedCheckAVXIndexShadow__anonb346f5430811::MemorySanitizerVisitor4315   void maskedCheckAVXIndexShadow(IRBuilder<> &IRB, Value *Idx, Instruction *I) {
4316     assert(isFixedIntVector(Idx));
4317     auto IdxVectorSize =
4318         cast<FixedVectorType>(Idx->getType())->getNumElements();
4319     assert(isPowerOf2_64(IdxVectorSize));
4320 
4321     // Compiler isn't smart enough, let's help it
4322     if (isa<Constant>(Idx))
4323       return;
4324 
4325     Value *Truncated = IRB.CreateTrunc(
4326         Idx,
4327         FixedVectorType::get(Type::getIntNTy(*MS.C, Log2_64(IdxVectorSize)),
4328                              IdxVectorSize));
4329     insertCheckShadow(Truncated, getOrigin(Idx), I);
4330   }
4331 
4332   // Instrument AVX permutation intrinsic.
4333   // We apply the same permutation (argument index 1) to the shadow.
handleAVXVpermilvar__anonb346f5430811::MemorySanitizerVisitor4334   void handleAVXVpermilvar(IntrinsicInst &I) {
4335     IRBuilder<> IRB(&I);
4336     Value *Shadow = getShadow(&I, 0);
4337     maskedCheckAVXIndexShadow(IRB, I.getArgOperand(1), &I);
4338 
4339     // Shadows are integer-ish types but some intrinsics require a
4340     // different (e.g., floating-point) type.
4341     Shadow = IRB.CreateBitCast(Shadow, I.getArgOperand(0)->getType());
4342     CallInst *CI = IRB.CreateIntrinsic(I.getType(), I.getIntrinsicID(),
4343                                        {Shadow, I.getArgOperand(1)});
4344 
4345     setShadow(&I, IRB.CreateBitCast(CI, getShadowTy(&I)));
4346     setOriginForNaryOp(I);
4347   }
4348 
4349   // Instrument AVX permutation intrinsic.
4350   // We apply the same permutation (argument index 1) to the shadows.
handleAVXVpermi2var__anonb346f5430811::MemorySanitizerVisitor4351   void handleAVXVpermi2var(IntrinsicInst &I) {
4352     assert(I.arg_size() == 3);
4353     assert(isa<FixedVectorType>(I.getArgOperand(0)->getType()));
4354     assert(isa<FixedVectorType>(I.getArgOperand(1)->getType()));
4355     assert(isa<FixedVectorType>(I.getArgOperand(2)->getType()));
4356     [[maybe_unused]] auto ArgVectorSize =
4357         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
4358     assert(cast<FixedVectorType>(I.getArgOperand(1)->getType())
4359                ->getNumElements() == ArgVectorSize);
4360     assert(cast<FixedVectorType>(I.getArgOperand(2)->getType())
4361                ->getNumElements() == ArgVectorSize);
4362     assert(I.getArgOperand(0)->getType() == I.getArgOperand(2)->getType());
4363     assert(I.getType() == I.getArgOperand(0)->getType());
4364     assert(I.getArgOperand(1)->getType()->isIntOrIntVectorTy());
4365     IRBuilder<> IRB(&I);
4366     Value *AShadow = getShadow(&I, 0);
4367     Value *Idx = I.getArgOperand(1);
4368     Value *BShadow = getShadow(&I, 2);
4369 
4370     maskedCheckAVXIndexShadow(IRB, Idx, &I);
4371 
4372     // Shadows are integer-ish types but some intrinsics require a
4373     // different (e.g., floating-point) type.
4374     AShadow = IRB.CreateBitCast(AShadow, I.getArgOperand(0)->getType());
4375     BShadow = IRB.CreateBitCast(BShadow, I.getArgOperand(2)->getType());
4376     CallInst *CI = IRB.CreateIntrinsic(I.getType(), I.getIntrinsicID(),
4377                                        {AShadow, Idx, BShadow});
4378     setShadow(&I, IRB.CreateBitCast(CI, getShadowTy(&I)));
4379     setOriginForNaryOp(I);
4380   }
4381 
isFixedIntVectorTy__anonb346f5430811::MemorySanitizerVisitor4382   [[maybe_unused]] static bool isFixedIntVectorTy(const Type *T) {
4383     return isa<FixedVectorType>(T) && T->isIntOrIntVectorTy();
4384   }
4385 
isFixedFPVectorTy__anonb346f5430811::MemorySanitizerVisitor4386   [[maybe_unused]] static bool isFixedFPVectorTy(const Type *T) {
4387     return isa<FixedVectorType>(T) && T->isFPOrFPVectorTy();
4388   }
4389 
isFixedIntVector__anonb346f5430811::MemorySanitizerVisitor4390   [[maybe_unused]] static bool isFixedIntVector(const Value *V) {
4391     return isFixedIntVectorTy(V->getType());
4392   }
4393 
isFixedFPVector__anonb346f5430811::MemorySanitizerVisitor4394   [[maybe_unused]] static bool isFixedFPVector(const Value *V) {
4395     return isFixedFPVectorTy(V->getType());
4396   }
4397 
4398   // e.g., call <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512
4399   //                           (<16 x float> a, <16 x i32> writethru, i16 mask,
4400   //                           i32 rounding)
4401   //
4402   // dst[i] = mask[i] ? convert(a[i]) : writethru[i]
4403   // dst_shadow[i] = mask[i] ? all_or_nothing(a_shadow[i]) : writethru_shadow[i]
4404   //    where all_or_nothing(x) is fully uninitialized if x has any
4405   //    uninitialized bits
handleAVX512VectorConvertFPToInt__anonb346f5430811::MemorySanitizerVisitor4406   void handleAVX512VectorConvertFPToInt(IntrinsicInst &I) {
4407     IRBuilder<> IRB(&I);
4408 
4409     assert(I.arg_size() == 4);
4410     Value *A = I.getOperand(0);
4411     Value *WriteThrough = I.getOperand(1);
4412     Value *Mask = I.getOperand(2);
4413     Value *RoundingMode = I.getOperand(3);
4414 
4415     assert(isFixedFPVector(A));
4416     assert(isFixedIntVector(WriteThrough));
4417 
4418     unsigned ANumElements =
4419         cast<FixedVectorType>(A->getType())->getNumElements();
4420     assert(ANumElements ==
4421            cast<FixedVectorType>(WriteThrough->getType())->getNumElements());
4422 
4423     assert(Mask->getType()->isIntegerTy());
4424     assert(Mask->getType()->getScalarSizeInBits() == ANumElements);
4425     insertCheckShadowOf(Mask, &I);
4426 
4427     assert(RoundingMode->getType()->isIntegerTy());
4428     // Only four bits of the rounding mode are used, though it's very
4429     // unusual to have uninitialized bits there (more commonly, it's a
4430     // constant).
4431     insertCheckShadowOf(RoundingMode, &I);
4432 
4433     assert(I.getType() == WriteThrough->getType());
4434 
4435     // Convert i16 mask to <16 x i1>
4436     Mask = IRB.CreateBitCast(
4437         Mask, FixedVectorType::get(IRB.getInt1Ty(), ANumElements));
4438 
4439     Value *AShadow = getShadow(A);
4440     /// For scalars:
4441     /// Since they are converting from floating-point, the output is:
4442     /// - fully uninitialized if *any* bit of the input is uninitialized
4443     /// - fully ininitialized if all bits of the input are ininitialized
4444     /// We apply the same principle on a per-element basis for vectors.
4445     AShadow = IRB.CreateSExt(IRB.CreateICmpNE(AShadow, getCleanShadow(A)),
4446                              getShadowTy(A));
4447 
4448     Value *WriteThroughShadow = getShadow(WriteThrough);
4449     Value *Shadow = IRB.CreateSelect(Mask, AShadow, WriteThroughShadow);
4450 
4451     setShadow(&I, Shadow);
4452     setOriginForNaryOp(I);
4453   }
4454 
4455   // Instrument BMI / BMI2 intrinsics.
4456   // All of these intrinsics are Z = I(X, Y)
4457   // where the types of all operands and the result match, and are either i32 or
4458   // i64. The following instrumentation happens to work for all of them:
4459   //   Sz = I(Sx, Y) | (sext (Sy != 0))
handleBmiIntrinsic__anonb346f5430811::MemorySanitizerVisitor4460   void handleBmiIntrinsic(IntrinsicInst &I) {
4461     IRBuilder<> IRB(&I);
4462     Type *ShadowTy = getShadowTy(&I);
4463 
4464     // If any bit of the mask operand is poisoned, then the whole thing is.
4465     Value *SMask = getShadow(&I, 1);
4466     SMask = IRB.CreateSExt(IRB.CreateICmpNE(SMask, getCleanShadow(ShadowTy)),
4467                            ShadowTy);
4468     // Apply the same intrinsic to the shadow of the first operand.
4469     Value *S = IRB.CreateCall(I.getCalledFunction(),
4470                               {getShadow(&I, 0), I.getOperand(1)});
4471     S = IRB.CreateOr(SMask, S);
4472     setShadow(&I, S);
4473     setOriginForNaryOp(I);
4474   }
4475 
getPclmulMask__anonb346f5430811::MemorySanitizerVisitor4476   static SmallVector<int, 8> getPclmulMask(unsigned Width, bool OddElements) {
4477     SmallVector<int, 8> Mask;
4478     for (unsigned X = OddElements ? 1 : 0; X < Width; X += 2) {
4479       Mask.append(2, X);
4480     }
4481     return Mask;
4482   }
4483 
4484   // Instrument pclmul intrinsics.
4485   // These intrinsics operate either on odd or on even elements of the input
4486   // vectors, depending on the constant in the 3rd argument, ignoring the rest.
4487   // Replace the unused elements with copies of the used ones, ex:
4488   //   (0, 1, 2, 3) -> (0, 0, 2, 2) (even case)
4489   // or
4490   //   (0, 1, 2, 3) -> (1, 1, 3, 3) (odd case)
4491   // and then apply the usual shadow combining logic.
handlePclmulIntrinsic__anonb346f5430811::MemorySanitizerVisitor4492   void handlePclmulIntrinsic(IntrinsicInst &I) {
4493     IRBuilder<> IRB(&I);
4494     unsigned Width =
4495         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
4496     assert(isa<ConstantInt>(I.getArgOperand(2)) &&
4497            "pclmul 3rd operand must be a constant");
4498     unsigned Imm = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
4499     Value *Shuf0 = IRB.CreateShuffleVector(getShadow(&I, 0),
4500                                            getPclmulMask(Width, Imm & 0x01));
4501     Value *Shuf1 = IRB.CreateShuffleVector(getShadow(&I, 1),
4502                                            getPclmulMask(Width, Imm & 0x10));
4503     ShadowAndOriginCombiner SOC(this, IRB);
4504     SOC.Add(Shuf0, getOrigin(&I, 0));
4505     SOC.Add(Shuf1, getOrigin(&I, 1));
4506     SOC.Done(&I);
4507   }
4508 
4509   // Instrument _mm_*_sd|ss intrinsics
handleUnarySdSsIntrinsic__anonb346f5430811::MemorySanitizerVisitor4510   void handleUnarySdSsIntrinsic(IntrinsicInst &I) {
4511     IRBuilder<> IRB(&I);
4512     unsigned Width =
4513         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
4514     Value *First = getShadow(&I, 0);
4515     Value *Second = getShadow(&I, 1);
4516     // First element of second operand, remaining elements of first operand
4517     SmallVector<int, 16> Mask;
4518     Mask.push_back(Width);
4519     for (unsigned i = 1; i < Width; i++)
4520       Mask.push_back(i);
4521     Value *Shadow = IRB.CreateShuffleVector(First, Second, Mask);
4522 
4523     setShadow(&I, Shadow);
4524     setOriginForNaryOp(I);
4525   }
4526 
handleVtestIntrinsic__anonb346f5430811::MemorySanitizerVisitor4527   void handleVtestIntrinsic(IntrinsicInst &I) {
4528     IRBuilder<> IRB(&I);
4529     Value *Shadow0 = getShadow(&I, 0);
4530     Value *Shadow1 = getShadow(&I, 1);
4531     Value *Or = IRB.CreateOr(Shadow0, Shadow1);
4532     Value *NZ = IRB.CreateICmpNE(Or, Constant::getNullValue(Or->getType()));
4533     Value *Scalar = convertShadowToScalar(NZ, IRB);
4534     Value *Shadow = IRB.CreateZExt(Scalar, getShadowTy(&I));
4535 
4536     setShadow(&I, Shadow);
4537     setOriginForNaryOp(I);
4538   }
4539 
handleBinarySdSsIntrinsic__anonb346f5430811::MemorySanitizerVisitor4540   void handleBinarySdSsIntrinsic(IntrinsicInst &I) {
4541     IRBuilder<> IRB(&I);
4542     unsigned Width =
4543         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
4544     Value *First = getShadow(&I, 0);
4545     Value *Second = getShadow(&I, 1);
4546     Value *OrShadow = IRB.CreateOr(First, Second);
4547     // First element of both OR'd together, remaining elements of first operand
4548     SmallVector<int, 16> Mask;
4549     Mask.push_back(Width);
4550     for (unsigned i = 1; i < Width; i++)
4551       Mask.push_back(i);
4552     Value *Shadow = IRB.CreateShuffleVector(First, OrShadow, Mask);
4553 
4554     setShadow(&I, Shadow);
4555     setOriginForNaryOp(I);
4556   }
4557 
4558   // _mm_round_ps / _mm_round_ps.
4559   // Similar to maybeHandleSimpleNomemIntrinsic except
4560   // the second argument is guranteed to be a constant integer.
handleRoundPdPsIntrinsic__anonb346f5430811::MemorySanitizerVisitor4561   void handleRoundPdPsIntrinsic(IntrinsicInst &I) {
4562     assert(I.getArgOperand(0)->getType() == I.getType());
4563     assert(I.arg_size() == 2);
4564     assert(isa<ConstantInt>(I.getArgOperand(1)));
4565 
4566     IRBuilder<> IRB(&I);
4567     ShadowAndOriginCombiner SC(this, IRB);
4568     SC.Add(I.getArgOperand(0));
4569     SC.Done(&I);
4570   }
4571 
4572   // Instrument @llvm.abs intrinsic.
4573   //
4574   // e.g., i32       @llvm.abs.i32  (i32       <Src>, i1 <is_int_min_poison>)
4575   //       <4 x i32> @llvm.abs.v4i32(<4 x i32> <Src>, i1 <is_int_min_poison>)
handleAbsIntrinsic__anonb346f5430811::MemorySanitizerVisitor4576   void handleAbsIntrinsic(IntrinsicInst &I) {
4577     assert(I.arg_size() == 2);
4578     Value *Src = I.getArgOperand(0);
4579     Value *IsIntMinPoison = I.getArgOperand(1);
4580 
4581     assert(I.getType()->isIntOrIntVectorTy());
4582 
4583     assert(Src->getType() == I.getType());
4584 
4585     assert(IsIntMinPoison->getType()->isIntegerTy());
4586     assert(IsIntMinPoison->getType()->getIntegerBitWidth() == 1);
4587 
4588     IRBuilder<> IRB(&I);
4589     Value *SrcShadow = getShadow(Src);
4590 
4591     APInt MinVal =
4592         APInt::getSignedMinValue(Src->getType()->getScalarSizeInBits());
4593     Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
4594     Value *SrcIsMin = IRB.CreateICmp(CmpInst::ICMP_EQ, Src, MinValVec);
4595 
4596     Value *PoisonedShadow = getPoisonedShadow(Src);
4597     Value *PoisonedIfIntMinShadow =
4598         IRB.CreateSelect(SrcIsMin, PoisonedShadow, SrcShadow);
4599     Value *Shadow =
4600         IRB.CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);
4601 
4602     setShadow(&I, Shadow);
4603     setOrigin(&I, getOrigin(&I, 0));
4604   }
4605 
handleIsFpClass__anonb346f5430811::MemorySanitizerVisitor4606   void handleIsFpClass(IntrinsicInst &I) {
4607     IRBuilder<> IRB(&I);
4608     Value *Shadow = getShadow(&I, 0);
4609     setShadow(&I, IRB.CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4610     setOrigin(&I, getOrigin(&I, 0));
4611   }
4612 
handleArithmeticWithOverflow__anonb346f5430811::MemorySanitizerVisitor4613   void handleArithmeticWithOverflow(IntrinsicInst &I) {
4614     IRBuilder<> IRB(&I);
4615     Value *Shadow0 = getShadow(&I, 0);
4616     Value *Shadow1 = getShadow(&I, 1);
4617     Value *ShadowElt0 = IRB.CreateOr(Shadow0, Shadow1);
4618     Value *ShadowElt1 =
4619         IRB.CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4620 
4621     Value *Shadow = PoisonValue::get(getShadowTy(&I));
4622     Shadow = IRB.CreateInsertValue(Shadow, ShadowElt0, 0);
4623     Shadow = IRB.CreateInsertValue(Shadow, ShadowElt1, 1);
4624 
4625     setShadow(&I, Shadow);
4626     setOriginForNaryOp(I);
4627   }
4628 
extractLowerShadow__anonb346f5430811::MemorySanitizerVisitor4629   Value *extractLowerShadow(IRBuilder<> &IRB, Value *V) {
4630     assert(isa<FixedVectorType>(V->getType()));
4631     assert(cast<FixedVectorType>(V->getType())->getNumElements() > 0);
4632     Value *Shadow = getShadow(V);
4633     return IRB.CreateExtractElement(Shadow,
4634                                     ConstantInt::get(IRB.getInt32Ty(), 0));
4635   }
4636 
4637   // Handle llvm.x86.avx512.mask.pmov{,s,us}.*.512
4638   //
4639   // e.g., call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512
4640   //         (<8 x i64>, <16 x i8>, i8)
4641   //          A           WriteThru  Mask
4642   //
4643   //       call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512
4644   //         (<16 x i32>, <16 x i8>, i16)
4645   //
4646   // Dst[i]        = Mask[i] ? truncate_or_saturate(A[i]) : WriteThru[i]
4647   // Dst_shadow[i] = Mask[i] ? truncate(A_shadow[i])      : WriteThru_shadow[i]
4648   //
4649   // If Dst has more elements than A, the excess elements are zeroed (and the
4650   // corresponding shadow is initialized).
4651   //
4652   // Note: for PMOV (truncation), handleIntrinsicByApplyingToShadow is precise
4653   //       and is much faster than this handler.
handleAVX512VectorDownConvert__anonb346f5430811::MemorySanitizerVisitor4654   void handleAVX512VectorDownConvert(IntrinsicInst &I) {
4655     IRBuilder<> IRB(&I);
4656 
4657     assert(I.arg_size() == 3);
4658     Value *A = I.getOperand(0);
4659     Value *WriteThrough = I.getOperand(1);
4660     Value *Mask = I.getOperand(2);
4661 
4662     assert(isFixedIntVector(A));
4663     assert(isFixedIntVector(WriteThrough));
4664 
4665     unsigned ANumElements =
4666         cast<FixedVectorType>(A->getType())->getNumElements();
4667     unsigned OutputNumElements =
4668         cast<FixedVectorType>(WriteThrough->getType())->getNumElements();
4669     assert(ANumElements == OutputNumElements ||
4670            ANumElements * 2 == OutputNumElements);
4671 
4672     assert(Mask->getType()->isIntegerTy());
4673     assert(Mask->getType()->getScalarSizeInBits() == ANumElements);
4674     insertCheckShadowOf(Mask, &I);
4675 
4676     assert(I.getType() == WriteThrough->getType());
4677 
4678     // Widen the mask, if necessary, to have one bit per element of the output
4679     // vector.
4680     // We want the extra bits to have '1's, so that the CreateSelect will
4681     // select the values from AShadow instead of WriteThroughShadow ("maskless"
4682     // versions of the intrinsics are sometimes implemented using an all-1's
4683     // mask and an undefined value for WriteThroughShadow). We accomplish this
4684     // by using bitwise NOT before and after the ZExt.
4685     if (ANumElements != OutputNumElements) {
4686       Mask = IRB.CreateNot(Mask);
4687       Mask = IRB.CreateZExt(Mask, Type::getIntNTy(*MS.C, OutputNumElements),
4688                             "_ms_widen_mask");
4689       Mask = IRB.CreateNot(Mask);
4690     }
4691     Mask = IRB.CreateBitCast(
4692         Mask, FixedVectorType::get(IRB.getInt1Ty(), OutputNumElements));
4693 
4694     Value *AShadow = getShadow(A);
4695 
4696     // The return type might have more elements than the input.
4697     // Temporarily shrink the return type's number of elements.
4698     VectorType *ShadowType = maybeShrinkVectorShadowType(A, I);
4699 
4700     // PMOV truncates; PMOVS/PMOVUS uses signed/unsigned saturation.
4701     // This handler treats them all as truncation, which leads to some rare
4702     // false positives in the cases where the truncated bytes could
4703     // unambiguously saturate the value e.g., if A = ??????10 ????????
4704     // (big-endian), the unsigned saturated byte conversion is 11111111 i.e.,
4705     // fully defined, but the truncated byte is ????????.
4706     //
4707     // TODO: use GetMinMaxUnsigned() to handle saturation precisely.
4708     AShadow = IRB.CreateTrunc(AShadow, ShadowType, "_ms_trunc_shadow");
4709     AShadow = maybeExtendVectorShadowWithZeros(AShadow, I);
4710 
4711     Value *WriteThroughShadow = getShadow(WriteThrough);
4712 
4713     Value *Shadow = IRB.CreateSelect(Mask, AShadow, WriteThroughShadow);
4714     setShadow(&I, Shadow);
4715     setOriginForNaryOp(I);
4716   }
4717 
4718   // For sh.* compiler intrinsics:
4719   //   llvm.x86.avx512fp16.mask.{add/sub/mul/div/max/min}.sh.round
4720   //     (<8 x half>, <8 x half>, <8 x half>, i8,  i32)
4721   //      A           B           WriteThru   Mask RoundingMode
4722   //
4723   // DstShadow[0] = Mask[0] ? (AShadow[0] | BShadow[0]) : WriteThruShadow[0]
4724   // DstShadow[1..7] = AShadow[1..7]
visitGenericScalarHalfwordInst__anonb346f5430811::MemorySanitizerVisitor4725   void visitGenericScalarHalfwordInst(IntrinsicInst &I) {
4726     IRBuilder<> IRB(&I);
4727 
4728     assert(I.arg_size() == 5);
4729     Value *A = I.getOperand(0);
4730     Value *B = I.getOperand(1);
4731     Value *WriteThrough = I.getOperand(2);
4732     Value *Mask = I.getOperand(3);
4733     Value *RoundingMode = I.getOperand(4);
4734 
4735     // Technically, we could probably just check whether the LSB is
4736     // initialized, but intuitively it feels like a partly uninitialized mask
4737     // is unintended, and we should warn the user immediately.
4738     insertCheckShadowOf(Mask, &I);
4739     insertCheckShadowOf(RoundingMode, &I);
4740 
4741     assert(isa<FixedVectorType>(A->getType()));
4742     unsigned NumElements =
4743         cast<FixedVectorType>(A->getType())->getNumElements();
4744     assert(NumElements == 8);
4745     assert(A->getType() == B->getType());
4746     assert(B->getType() == WriteThrough->getType());
4747     assert(Mask->getType()->getPrimitiveSizeInBits() == NumElements);
4748     assert(RoundingMode->getType()->isIntegerTy());
4749 
4750     Value *ALowerShadow = extractLowerShadow(IRB, A);
4751     Value *BLowerShadow = extractLowerShadow(IRB, B);
4752 
4753     Value *ABLowerShadow = IRB.CreateOr(ALowerShadow, BLowerShadow);
4754 
4755     Value *WriteThroughLowerShadow = extractLowerShadow(IRB, WriteThrough);
4756 
4757     Mask = IRB.CreateBitCast(
4758         Mask, FixedVectorType::get(IRB.getInt1Ty(), NumElements));
4759     Value *MaskLower =
4760         IRB.CreateExtractElement(Mask, ConstantInt::get(IRB.getInt32Ty(), 0));
4761 
4762     Value *AShadow = getShadow(A);
4763     Value *DstLowerShadow =
4764         IRB.CreateSelect(MaskLower, ABLowerShadow, WriteThroughLowerShadow);
4765     Value *DstShadow = IRB.CreateInsertElement(
4766         AShadow, DstLowerShadow, ConstantInt::get(IRB.getInt32Ty(), 0),
4767         "_msprop");
4768 
4769     setShadow(&I, DstShadow);
4770     setOriginForNaryOp(I);
4771   }
4772 
4773   // Handle Arm NEON vector load intrinsics (vld*).
4774   //
4775   // The WithLane instructions (ld[234]lane) are similar to:
4776   //     call {<4 x i32>, <4 x i32>, <4 x i32>}
4777   //          @llvm.aarch64.neon.ld3lane.v4i32.p0
4778   //              (<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i64 %lane, ptr
4779   //              %A)
4780   //
4781   // The non-WithLane instructions (ld[234], ld1x[234], ld[234]r) are similar
4782   // to:
4783   //     call {<8 x i8>, <8 x i8>} @llvm.aarch64.neon.ld2.v8i8.p0(ptr %A)
handleNEONVectorLoad__anonb346f5430811::MemorySanitizerVisitor4784   void handleNEONVectorLoad(IntrinsicInst &I, bool WithLane) {
4785     unsigned int numArgs = I.arg_size();
4786 
4787     // Return type is a struct of vectors of integers or floating-point
4788     assert(I.getType()->isStructTy());
4789     [[maybe_unused]] StructType *RetTy = cast<StructType>(I.getType());
4790     assert(RetTy->getNumElements() > 0);
4791     assert(RetTy->getElementType(0)->isIntOrIntVectorTy() ||
4792            RetTy->getElementType(0)->isFPOrFPVectorTy());
4793     for (unsigned int i = 0; i < RetTy->getNumElements(); i++)
4794       assert(RetTy->getElementType(i) == RetTy->getElementType(0));
4795 
4796     if (WithLane) {
4797       // 2, 3 or 4 vectors, plus lane number, plus input pointer
4798       assert(4 <= numArgs && numArgs <= 6);
4799 
4800       // Return type is a struct of the input vectors
4801       assert(RetTy->getNumElements() + 2 == numArgs);
4802       for (unsigned int i = 0; i < RetTy->getNumElements(); i++)
4803         assert(I.getArgOperand(i)->getType() == RetTy->getElementType(0));
4804     } else {
4805       assert(numArgs == 1);
4806     }
4807 
4808     IRBuilder<> IRB(&I);
4809 
4810     SmallVector<Value *, 6> ShadowArgs;
4811     if (WithLane) {
4812       for (unsigned int i = 0; i < numArgs - 2; i++)
4813         ShadowArgs.push_back(getShadow(I.getArgOperand(i)));
4814 
4815       // Lane number, passed verbatim
4816       Value *LaneNumber = I.getArgOperand(numArgs - 2);
4817       ShadowArgs.push_back(LaneNumber);
4818 
4819       // TODO: blend shadow of lane number into output shadow?
4820       insertCheckShadowOf(LaneNumber, &I);
4821     }
4822 
4823     Value *Src = I.getArgOperand(numArgs - 1);
4824     assert(Src->getType()->isPointerTy() && "Source is not a pointer!");
4825 
4826     Type *SrcShadowTy = getShadowTy(Src);
4827     auto [SrcShadowPtr, SrcOriginPtr] =
4828         getShadowOriginPtr(Src, IRB, SrcShadowTy, Align(1), /*isStore*/ false);
4829     ShadowArgs.push_back(SrcShadowPtr);
4830 
4831     // The NEON vector load instructions handled by this function all have
4832     // integer variants. It is easier to use those rather than trying to cast
4833     // a struct of vectors of floats into a struct of vectors of integers.
4834     CallInst *CI =
4835         IRB.CreateIntrinsic(getShadowTy(&I), I.getIntrinsicID(), ShadowArgs);
4836     setShadow(&I, CI);
4837 
4838     if (!MS.TrackOrigins)
4839       return;
4840 
4841     Value *PtrSrcOrigin = IRB.CreateLoad(MS.OriginTy, SrcOriginPtr);
4842     setOrigin(&I, PtrSrcOrigin);
4843   }
4844 
4845   /// Handle Arm NEON vector store intrinsics (vst{2,3,4}, vst1x_{2,3,4},
4846   /// and vst{2,3,4}lane).
4847   ///
4848   /// Arm NEON vector store intrinsics have the output address (pointer) as the
4849   /// last argument, with the initial arguments being the inputs (and lane
4850   /// number for vst{2,3,4}lane). They return void.
4851   ///
4852   /// - st4 interleaves the output e.g., st4 (inA, inB, inC, inD, outP) writes
4853   ///   abcdabcdabcdabcd... into *outP
4854   /// - st1_x4 is non-interleaved e.g., st1_x4 (inA, inB, inC, inD, outP)
4855   ///   writes aaaa...bbbb...cccc...dddd... into *outP
4856   /// - st4lane has arguments of (inA, inB, inC, inD, lane, outP)
4857   /// These instructions can all be instrumented with essentially the same
4858   /// MSan logic, simply by applying the corresponding intrinsic to the shadow.
handleNEONVectorStoreIntrinsic__anonb346f5430811::MemorySanitizerVisitor4859   void handleNEONVectorStoreIntrinsic(IntrinsicInst &I, bool useLane) {
4860     IRBuilder<> IRB(&I);
4861 
4862     // Don't use getNumOperands() because it includes the callee
4863     int numArgOperands = I.arg_size();
4864 
4865     // The last arg operand is the output (pointer)
4866     assert(numArgOperands >= 1);
4867     Value *Addr = I.getArgOperand(numArgOperands - 1);
4868     assert(Addr->getType()->isPointerTy());
4869     int skipTrailingOperands = 1;
4870 
4871     if (ClCheckAccessAddress)
4872       insertCheckShadowOf(Addr, &I);
4873 
4874     // Second-last operand is the lane number (for vst{2,3,4}lane)
4875     if (useLane) {
4876       skipTrailingOperands++;
4877       assert(numArgOperands >= static_cast<int>(skipTrailingOperands));
4878       assert(isa<IntegerType>(
4879           I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
4880     }
4881 
4882     SmallVector<Value *, 8> ShadowArgs;
4883     // All the initial operands are the inputs
4884     for (int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
4885       assert(isa<FixedVectorType>(I.getArgOperand(i)->getType()));
4886       Value *Shadow = getShadow(&I, i);
4887       ShadowArgs.append(1, Shadow);
4888     }
4889 
4890     // MSan's GetShadowTy assumes the LHS is the type we want the shadow for
4891     // e.g., for:
4892     //     [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
4893     // we know the type of the output (and its shadow) is <16 x i8>.
4894     //
4895     // Arm NEON VST is unusual because the last argument is the output address:
4896     //     define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) {
4897     //         call void @llvm.aarch64.neon.st2.v16i8.p0
4898     //                   (<16 x i8> [[A]], <16 x i8> [[B]], ptr [[P]])
4899     // and we have no type information about P's operand. We must manually
4900     // compute the type (<16 x i8> x 2).
4901     FixedVectorType *OutputVectorTy = FixedVectorType::get(
4902         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getElementType(),
4903         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements() *
4904             (numArgOperands - skipTrailingOperands));
4905     Type *OutputShadowTy = getShadowTy(OutputVectorTy);
4906 
4907     if (useLane)
4908       ShadowArgs.append(1,
4909                         I.getArgOperand(numArgOperands - skipTrailingOperands));
4910 
4911     Value *OutputShadowPtr, *OutputOriginPtr;
4912     // AArch64 NEON does not need alignment (unless OS requires it)
4913     std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
4914         Addr, IRB, OutputShadowTy, Align(1), /*isStore*/ true);
4915     ShadowArgs.append(1, OutputShadowPtr);
4916 
4917     CallInst *CI =
4918         IRB.CreateIntrinsic(IRB.getVoidTy(), I.getIntrinsicID(), ShadowArgs);
4919     setShadow(&I, CI);
4920 
4921     if (MS.TrackOrigins) {
4922       // TODO: if we modelled the vst* instruction more precisely, we could
4923       // more accurately track the origins (e.g., if both inputs are
4924       // uninitialized for vst2, we currently blame the second input, even
4925       // though part of the output depends only on the first input).
4926       //
4927       // This is particularly imprecise for vst{2,3,4}lane, since only one
4928       // lane of each input is actually copied to the output.
4929       OriginCombiner OC(this, IRB);
4930       for (int i = 0; i < numArgOperands - skipTrailingOperands; i++)
4931         OC.Add(I.getArgOperand(i));
4932 
4933       const DataLayout &DL = F.getDataLayout();
4934       OC.DoneAndStoreOrigin(DL.getTypeStoreSize(OutputVectorTy),
4935                             OutputOriginPtr);
4936     }
4937   }
4938 
4939   /// Handle intrinsics by applying the intrinsic to the shadows.
4940   ///
4941   /// The trailing arguments are passed verbatim to the intrinsic, though any
4942   /// uninitialized trailing arguments can also taint the shadow e.g., for an
4943   /// intrinsic with one trailing verbatim argument:
4944   ///     out = intrinsic(var1, var2, opType)
4945   /// we compute:
4946   ///     shadow[out] =
4947   ///         intrinsic(shadow[var1], shadow[var2], opType) | shadow[opType]
4948   ///
4949   /// Typically, shadowIntrinsicID will be specified by the caller to be
4950   /// I.getIntrinsicID(), but the caller can choose to replace it with another
4951   /// intrinsic of the same type.
4952   ///
4953   /// CAUTION: this assumes that the intrinsic will handle arbitrary
4954   ///          bit-patterns (for example, if the intrinsic accepts floats for
4955   ///          var1, we require that it doesn't care if inputs are NaNs).
4956   ///
4957   /// For example, this can be applied to the Arm NEON vector table intrinsics
4958   /// (tbl{1,2,3,4}).
4959   ///
4960   /// The origin is approximated using setOriginForNaryOp.
handleIntrinsicByApplyingToShadow__anonb346f5430811::MemorySanitizerVisitor4961   void handleIntrinsicByApplyingToShadow(IntrinsicInst &I,
4962                                          Intrinsic::ID shadowIntrinsicID,
4963                                          unsigned int trailingVerbatimArgs) {
4964     IRBuilder<> IRB(&I);
4965 
4966     assert(trailingVerbatimArgs < I.arg_size());
4967 
4968     SmallVector<Value *, 8> ShadowArgs;
4969     // Don't use getNumOperands() because it includes the callee
4970     for (unsigned int i = 0; i < I.arg_size() - trailingVerbatimArgs; i++) {
4971       Value *Shadow = getShadow(&I, i);
4972 
4973       // Shadows are integer-ish types but some intrinsics require a
4974       // different (e.g., floating-point) type.
4975       ShadowArgs.push_back(
4976           IRB.CreateBitCast(Shadow, I.getArgOperand(i)->getType()));
4977     }
4978 
4979     for (unsigned int i = I.arg_size() - trailingVerbatimArgs; i < I.arg_size();
4980          i++) {
4981       Value *Arg = I.getArgOperand(i);
4982       ShadowArgs.push_back(Arg);
4983     }
4984 
4985     CallInst *CI =
4986         IRB.CreateIntrinsic(I.getType(), shadowIntrinsicID, ShadowArgs);
4987     Value *CombinedShadow = CI;
4988 
4989     // Combine the computed shadow with the shadow of trailing args
4990     for (unsigned int i = I.arg_size() - trailingVerbatimArgs; i < I.arg_size();
4991          i++) {
4992       Value *Shadow =
4993           CreateShadowCast(IRB, getShadow(&I, i), CombinedShadow->getType());
4994       CombinedShadow = IRB.CreateOr(Shadow, CombinedShadow, "_msprop");
4995     }
4996 
4997     setShadow(&I, IRB.CreateBitCast(CombinedShadow, getShadowTy(&I)));
4998 
4999     setOriginForNaryOp(I);
5000   }
5001 
5002   // Approximation only
5003   //
5004   // e.g., <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64)
handleNEONVectorMultiplyIntrinsic__anonb346f5430811::MemorySanitizerVisitor5005   void handleNEONVectorMultiplyIntrinsic(IntrinsicInst &I) {
5006     assert(I.arg_size() == 2);
5007 
5008     handleShadowOr(I);
5009   }
5010 
visitIntrinsicInst__anonb346f5430811::MemorySanitizerVisitor5011   void visitIntrinsicInst(IntrinsicInst &I) {
5012     switch (I.getIntrinsicID()) {
5013     case Intrinsic::uadd_with_overflow:
5014     case Intrinsic::sadd_with_overflow:
5015     case Intrinsic::usub_with_overflow:
5016     case Intrinsic::ssub_with_overflow:
5017     case Intrinsic::umul_with_overflow:
5018     case Intrinsic::smul_with_overflow:
5019       handleArithmeticWithOverflow(I);
5020       break;
5021     case Intrinsic::abs:
5022       handleAbsIntrinsic(I);
5023       break;
5024     case Intrinsic::bitreverse:
5025       handleIntrinsicByApplyingToShadow(I, I.getIntrinsicID(),
5026                                         /*trailingVerbatimArgs*/ 0);
5027       break;
5028     case Intrinsic::is_fpclass:
5029       handleIsFpClass(I);
5030       break;
5031     case Intrinsic::lifetime_start:
5032       handleLifetimeStart(I);
5033       break;
5034     case Intrinsic::launder_invariant_group:
5035     case Intrinsic::strip_invariant_group:
5036       handleInvariantGroup(I);
5037       break;
5038     case Intrinsic::bswap:
5039       handleBswap(I);
5040       break;
5041     case Intrinsic::ctlz:
5042     case Intrinsic::cttz:
5043       handleCountLeadingTrailingZeros(I);
5044       break;
5045     case Intrinsic::masked_compressstore:
5046       handleMaskedCompressStore(I);
5047       break;
5048     case Intrinsic::masked_expandload:
5049       handleMaskedExpandLoad(I);
5050       break;
5051     case Intrinsic::masked_gather:
5052       handleMaskedGather(I);
5053       break;
5054     case Intrinsic::masked_scatter:
5055       handleMaskedScatter(I);
5056       break;
5057     case Intrinsic::masked_store:
5058       handleMaskedStore(I);
5059       break;
5060     case Intrinsic::masked_load:
5061       handleMaskedLoad(I);
5062       break;
5063     case Intrinsic::vector_reduce_and:
5064       handleVectorReduceAndIntrinsic(I);
5065       break;
5066     case Intrinsic::vector_reduce_or:
5067       handleVectorReduceOrIntrinsic(I);
5068       break;
5069 
5070     case Intrinsic::vector_reduce_add:
5071     case Intrinsic::vector_reduce_xor:
5072     case Intrinsic::vector_reduce_mul:
5073     // Signed/Unsigned Min/Max
5074     // TODO: handling similarly to AND/OR may be more precise.
5075     case Intrinsic::vector_reduce_smax:
5076     case Intrinsic::vector_reduce_smin:
5077     case Intrinsic::vector_reduce_umax:
5078     case Intrinsic::vector_reduce_umin:
5079     // TODO: this has no false positives, but arguably we should check that all
5080     // the bits are initialized.
5081     case Intrinsic::vector_reduce_fmax:
5082     case Intrinsic::vector_reduce_fmin:
5083       handleVectorReduceIntrinsic(I, /*AllowShadowCast=*/false);
5084       break;
5085 
5086     case Intrinsic::vector_reduce_fadd:
5087     case Intrinsic::vector_reduce_fmul:
5088       handleVectorReduceWithStarterIntrinsic(I);
5089       break;
5090 
5091     case Intrinsic::x86_sse_stmxcsr:
5092       handleStmxcsr(I);
5093       break;
5094     case Intrinsic::x86_sse_ldmxcsr:
5095       handleLdmxcsr(I);
5096       break;
5097     case Intrinsic::x86_avx512_vcvtsd2usi64:
5098     case Intrinsic::x86_avx512_vcvtsd2usi32:
5099     case Intrinsic::x86_avx512_vcvtss2usi64:
5100     case Intrinsic::x86_avx512_vcvtss2usi32:
5101     case Intrinsic::x86_avx512_cvttss2usi64:
5102     case Intrinsic::x86_avx512_cvttss2usi:
5103     case Intrinsic::x86_avx512_cvttsd2usi64:
5104     case Intrinsic::x86_avx512_cvttsd2usi:
5105     case Intrinsic::x86_avx512_cvtusi2ss:
5106     case Intrinsic::x86_avx512_cvtusi642sd:
5107     case Intrinsic::x86_avx512_cvtusi642ss:
5108       handleSSEVectorConvertIntrinsic(I, 1, true);
5109       break;
5110     case Intrinsic::x86_sse2_cvtsd2si64:
5111     case Intrinsic::x86_sse2_cvtsd2si:
5112     case Intrinsic::x86_sse2_cvtsd2ss:
5113     case Intrinsic::x86_sse2_cvttsd2si64:
5114     case Intrinsic::x86_sse2_cvttsd2si:
5115     case Intrinsic::x86_sse_cvtss2si64:
5116     case Intrinsic::x86_sse_cvtss2si:
5117     case Intrinsic::x86_sse_cvttss2si64:
5118     case Intrinsic::x86_sse_cvttss2si:
5119       handleSSEVectorConvertIntrinsic(I, 1);
5120       break;
5121     case Intrinsic::x86_sse_cvtps2pi:
5122     case Intrinsic::x86_sse_cvttps2pi:
5123       handleSSEVectorConvertIntrinsic(I, 2);
5124       break;
5125 
5126       // TODO:
5127       //   <1 x i64> @llvm.x86.sse.cvtpd2pi(<2 x double>)
5128       //   <2 x double> @llvm.x86.sse.cvtpi2pd(<1 x i64>)
5129       //   <4 x float> @llvm.x86.sse.cvtpi2ps(<4 x float>, <1 x i64>)
5130 
5131     case Intrinsic::x86_vcvtps2ph_128:
5132     case Intrinsic::x86_vcvtps2ph_256: {
5133       handleSSEVectorConvertIntrinsicByProp(I, /*HasRoundingMode=*/true);
5134       break;
5135     }
5136 
5137     case Intrinsic::x86_sse2_cvtpd2ps:
5138     case Intrinsic::x86_sse2_cvtps2dq:
5139     case Intrinsic::x86_sse2_cvtpd2dq:
5140     case Intrinsic::x86_sse2_cvttps2dq:
5141     case Intrinsic::x86_sse2_cvttpd2dq:
5142     case Intrinsic::x86_avx_cvt_pd2_ps_256:
5143     case Intrinsic::x86_avx_cvt_ps2dq_256:
5144     case Intrinsic::x86_avx_cvt_pd2dq_256:
5145     case Intrinsic::x86_avx_cvtt_ps2dq_256:
5146     case Intrinsic::x86_avx_cvtt_pd2dq_256: {
5147       handleSSEVectorConvertIntrinsicByProp(I, /*HasRoundingMode=*/false);
5148       break;
5149     }
5150 
5151     case Intrinsic::x86_avx512_psll_w_512:
5152     case Intrinsic::x86_avx512_psll_d_512:
5153     case Intrinsic::x86_avx512_psll_q_512:
5154     case Intrinsic::x86_avx512_pslli_w_512:
5155     case Intrinsic::x86_avx512_pslli_d_512:
5156     case Intrinsic::x86_avx512_pslli_q_512:
5157     case Intrinsic::x86_avx512_psrl_w_512:
5158     case Intrinsic::x86_avx512_psrl_d_512:
5159     case Intrinsic::x86_avx512_psrl_q_512:
5160     case Intrinsic::x86_avx512_psra_w_512:
5161     case Intrinsic::x86_avx512_psra_d_512:
5162     case Intrinsic::x86_avx512_psra_q_512:
5163     case Intrinsic::x86_avx512_psrli_w_512:
5164     case Intrinsic::x86_avx512_psrli_d_512:
5165     case Intrinsic::x86_avx512_psrli_q_512:
5166     case Intrinsic::x86_avx512_psrai_w_512:
5167     case Intrinsic::x86_avx512_psrai_d_512:
5168     case Intrinsic::x86_avx512_psrai_q_512:
5169     case Intrinsic::x86_avx512_psra_q_256:
5170     case Intrinsic::x86_avx512_psra_q_128:
5171     case Intrinsic::x86_avx512_psrai_q_256:
5172     case Intrinsic::x86_avx512_psrai_q_128:
5173     case Intrinsic::x86_avx2_psll_w:
5174     case Intrinsic::x86_avx2_psll_d:
5175     case Intrinsic::x86_avx2_psll_q:
5176     case Intrinsic::x86_avx2_pslli_w:
5177     case Intrinsic::x86_avx2_pslli_d:
5178     case Intrinsic::x86_avx2_pslli_q:
5179     case Intrinsic::x86_avx2_psrl_w:
5180     case Intrinsic::x86_avx2_psrl_d:
5181     case Intrinsic::x86_avx2_psrl_q:
5182     case Intrinsic::x86_avx2_psra_w:
5183     case Intrinsic::x86_avx2_psra_d:
5184     case Intrinsic::x86_avx2_psrli_w:
5185     case Intrinsic::x86_avx2_psrli_d:
5186     case Intrinsic::x86_avx2_psrli_q:
5187     case Intrinsic::x86_avx2_psrai_w:
5188     case Intrinsic::x86_avx2_psrai_d:
5189     case Intrinsic::x86_sse2_psll_w:
5190     case Intrinsic::x86_sse2_psll_d:
5191     case Intrinsic::x86_sse2_psll_q:
5192     case Intrinsic::x86_sse2_pslli_w:
5193     case Intrinsic::x86_sse2_pslli_d:
5194     case Intrinsic::x86_sse2_pslli_q:
5195     case Intrinsic::x86_sse2_psrl_w:
5196     case Intrinsic::x86_sse2_psrl_d:
5197     case Intrinsic::x86_sse2_psrl_q:
5198     case Intrinsic::x86_sse2_psra_w:
5199     case Intrinsic::x86_sse2_psra_d:
5200     case Intrinsic::x86_sse2_psrli_w:
5201     case Intrinsic::x86_sse2_psrli_d:
5202     case Intrinsic::x86_sse2_psrli_q:
5203     case Intrinsic::x86_sse2_psrai_w:
5204     case Intrinsic::x86_sse2_psrai_d:
5205     case Intrinsic::x86_mmx_psll_w:
5206     case Intrinsic::x86_mmx_psll_d:
5207     case Intrinsic::x86_mmx_psll_q:
5208     case Intrinsic::x86_mmx_pslli_w:
5209     case Intrinsic::x86_mmx_pslli_d:
5210     case Intrinsic::x86_mmx_pslli_q:
5211     case Intrinsic::x86_mmx_psrl_w:
5212     case Intrinsic::x86_mmx_psrl_d:
5213     case Intrinsic::x86_mmx_psrl_q:
5214     case Intrinsic::x86_mmx_psra_w:
5215     case Intrinsic::x86_mmx_psra_d:
5216     case Intrinsic::x86_mmx_psrli_w:
5217     case Intrinsic::x86_mmx_psrli_d:
5218     case Intrinsic::x86_mmx_psrli_q:
5219     case Intrinsic::x86_mmx_psrai_w:
5220     case Intrinsic::x86_mmx_psrai_d:
5221     case Intrinsic::aarch64_neon_rshrn:
5222     case Intrinsic::aarch64_neon_sqrshl:
5223     case Intrinsic::aarch64_neon_sqrshrn:
5224     case Intrinsic::aarch64_neon_sqrshrun:
5225     case Intrinsic::aarch64_neon_sqshl:
5226     case Intrinsic::aarch64_neon_sqshlu:
5227     case Intrinsic::aarch64_neon_sqshrn:
5228     case Intrinsic::aarch64_neon_sqshrun:
5229     case Intrinsic::aarch64_neon_srshl:
5230     case Intrinsic::aarch64_neon_sshl:
5231     case Intrinsic::aarch64_neon_uqrshl:
5232     case Intrinsic::aarch64_neon_uqrshrn:
5233     case Intrinsic::aarch64_neon_uqshl:
5234     case Intrinsic::aarch64_neon_uqshrn:
5235     case Intrinsic::aarch64_neon_urshl:
5236     case Intrinsic::aarch64_neon_ushl:
5237       // Not handled here: aarch64_neon_vsli (vector shift left and insert)
5238       handleVectorShiftIntrinsic(I, /* Variable */ false);
5239       break;
5240     case Intrinsic::x86_avx2_psllv_d:
5241     case Intrinsic::x86_avx2_psllv_d_256:
5242     case Intrinsic::x86_avx512_psllv_d_512:
5243     case Intrinsic::x86_avx2_psllv_q:
5244     case Intrinsic::x86_avx2_psllv_q_256:
5245     case Intrinsic::x86_avx512_psllv_q_512:
5246     case Intrinsic::x86_avx2_psrlv_d:
5247     case Intrinsic::x86_avx2_psrlv_d_256:
5248     case Intrinsic::x86_avx512_psrlv_d_512:
5249     case Intrinsic::x86_avx2_psrlv_q:
5250     case Intrinsic::x86_avx2_psrlv_q_256:
5251     case Intrinsic::x86_avx512_psrlv_q_512:
5252     case Intrinsic::x86_avx2_psrav_d:
5253     case Intrinsic::x86_avx2_psrav_d_256:
5254     case Intrinsic::x86_avx512_psrav_d_512:
5255     case Intrinsic::x86_avx512_psrav_q_128:
5256     case Intrinsic::x86_avx512_psrav_q_256:
5257     case Intrinsic::x86_avx512_psrav_q_512:
5258       handleVectorShiftIntrinsic(I, /* Variable */ true);
5259       break;
5260 
5261     case Intrinsic::x86_sse2_packsswb_128:
5262     case Intrinsic::x86_sse2_packssdw_128:
5263     case Intrinsic::x86_sse2_packuswb_128:
5264     case Intrinsic::x86_sse41_packusdw:
5265     case Intrinsic::x86_avx2_packsswb:
5266     case Intrinsic::x86_avx2_packssdw:
5267     case Intrinsic::x86_avx2_packuswb:
5268     case Intrinsic::x86_avx2_packusdw:
5269       handleVectorPackIntrinsic(I);
5270       break;
5271 
5272     case Intrinsic::x86_sse41_pblendvb:
5273     case Intrinsic::x86_sse41_blendvpd:
5274     case Intrinsic::x86_sse41_blendvps:
5275     case Intrinsic::x86_avx_blendv_pd_256:
5276     case Intrinsic::x86_avx_blendv_ps_256:
5277     case Intrinsic::x86_avx2_pblendvb:
5278       handleBlendvIntrinsic(I);
5279       break;
5280 
5281     case Intrinsic::x86_avx_dp_ps_256:
5282     case Intrinsic::x86_sse41_dppd:
5283     case Intrinsic::x86_sse41_dpps:
5284       handleDppIntrinsic(I);
5285       break;
5286 
5287     case Intrinsic::x86_mmx_packsswb:
5288     case Intrinsic::x86_mmx_packuswb:
5289       handleVectorPackIntrinsic(I, 16);
5290       break;
5291 
5292     case Intrinsic::x86_mmx_packssdw:
5293       handleVectorPackIntrinsic(I, 32);
5294       break;
5295 
5296     case Intrinsic::x86_mmx_psad_bw:
5297       handleVectorSadIntrinsic(I, true);
5298       break;
5299     case Intrinsic::x86_sse2_psad_bw:
5300     case Intrinsic::x86_avx2_psad_bw:
5301       handleVectorSadIntrinsic(I);
5302       break;
5303 
5304     case Intrinsic::x86_sse2_pmadd_wd:
5305     case Intrinsic::x86_avx2_pmadd_wd:
5306     case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
5307     case Intrinsic::x86_avx2_pmadd_ub_sw:
5308       handleVectorPmaddIntrinsic(I);
5309       break;
5310 
5311     case Intrinsic::x86_ssse3_pmadd_ub_sw:
5312       handleVectorPmaddIntrinsic(I, 8);
5313       break;
5314 
5315     case Intrinsic::x86_mmx_pmadd_wd:
5316       handleVectorPmaddIntrinsic(I, 16);
5317       break;
5318 
5319     case Intrinsic::x86_sse_cmp_ss:
5320     case Intrinsic::x86_sse2_cmp_sd:
5321     case Intrinsic::x86_sse_comieq_ss:
5322     case Intrinsic::x86_sse_comilt_ss:
5323     case Intrinsic::x86_sse_comile_ss:
5324     case Intrinsic::x86_sse_comigt_ss:
5325     case Intrinsic::x86_sse_comige_ss:
5326     case Intrinsic::x86_sse_comineq_ss:
5327     case Intrinsic::x86_sse_ucomieq_ss:
5328     case Intrinsic::x86_sse_ucomilt_ss:
5329     case Intrinsic::x86_sse_ucomile_ss:
5330     case Intrinsic::x86_sse_ucomigt_ss:
5331     case Intrinsic::x86_sse_ucomige_ss:
5332     case Intrinsic::x86_sse_ucomineq_ss:
5333     case Intrinsic::x86_sse2_comieq_sd:
5334     case Intrinsic::x86_sse2_comilt_sd:
5335     case Intrinsic::x86_sse2_comile_sd:
5336     case Intrinsic::x86_sse2_comigt_sd:
5337     case Intrinsic::x86_sse2_comige_sd:
5338     case Intrinsic::x86_sse2_comineq_sd:
5339     case Intrinsic::x86_sse2_ucomieq_sd:
5340     case Intrinsic::x86_sse2_ucomilt_sd:
5341     case Intrinsic::x86_sse2_ucomile_sd:
5342     case Intrinsic::x86_sse2_ucomigt_sd:
5343     case Intrinsic::x86_sse2_ucomige_sd:
5344     case Intrinsic::x86_sse2_ucomineq_sd:
5345       handleVectorCompareScalarIntrinsic(I);
5346       break;
5347 
5348     case Intrinsic::x86_avx_cmp_pd_256:
5349     case Intrinsic::x86_avx_cmp_ps_256:
5350     case Intrinsic::x86_sse2_cmp_pd:
5351     case Intrinsic::x86_sse_cmp_ps:
5352       handleVectorComparePackedIntrinsic(I);
5353       break;
5354 
5355     case Intrinsic::x86_bmi_bextr_32:
5356     case Intrinsic::x86_bmi_bextr_64:
5357     case Intrinsic::x86_bmi_bzhi_32:
5358     case Intrinsic::x86_bmi_bzhi_64:
5359     case Intrinsic::x86_bmi_pdep_32:
5360     case Intrinsic::x86_bmi_pdep_64:
5361     case Intrinsic::x86_bmi_pext_32:
5362     case Intrinsic::x86_bmi_pext_64:
5363       handleBmiIntrinsic(I);
5364       break;
5365 
5366     case Intrinsic::x86_pclmulqdq:
5367     case Intrinsic::x86_pclmulqdq_256:
5368     case Intrinsic::x86_pclmulqdq_512:
5369       handlePclmulIntrinsic(I);
5370       break;
5371 
5372     case Intrinsic::x86_avx_round_pd_256:
5373     case Intrinsic::x86_avx_round_ps_256:
5374     case Intrinsic::x86_sse41_round_pd:
5375     case Intrinsic::x86_sse41_round_ps:
5376       handleRoundPdPsIntrinsic(I);
5377       break;
5378 
5379     case Intrinsic::x86_sse41_round_sd:
5380     case Intrinsic::x86_sse41_round_ss:
5381       handleUnarySdSsIntrinsic(I);
5382       break;
5383 
5384     case Intrinsic::x86_sse2_max_sd:
5385     case Intrinsic::x86_sse_max_ss:
5386     case Intrinsic::x86_sse2_min_sd:
5387     case Intrinsic::x86_sse_min_ss:
5388       handleBinarySdSsIntrinsic(I);
5389       break;
5390 
5391     case Intrinsic::x86_avx_vtestc_pd:
5392     case Intrinsic::x86_avx_vtestc_pd_256:
5393     case Intrinsic::x86_avx_vtestc_ps:
5394     case Intrinsic::x86_avx_vtestc_ps_256:
5395     case Intrinsic::x86_avx_vtestnzc_pd:
5396     case Intrinsic::x86_avx_vtestnzc_pd_256:
5397     case Intrinsic::x86_avx_vtestnzc_ps:
5398     case Intrinsic::x86_avx_vtestnzc_ps_256:
5399     case Intrinsic::x86_avx_vtestz_pd:
5400     case Intrinsic::x86_avx_vtestz_pd_256:
5401     case Intrinsic::x86_avx_vtestz_ps:
5402     case Intrinsic::x86_avx_vtestz_ps_256:
5403     case Intrinsic::x86_avx_ptestc_256:
5404     case Intrinsic::x86_avx_ptestnzc_256:
5405     case Intrinsic::x86_avx_ptestz_256:
5406     case Intrinsic::x86_sse41_ptestc:
5407     case Intrinsic::x86_sse41_ptestnzc:
5408     case Intrinsic::x86_sse41_ptestz:
5409       handleVtestIntrinsic(I);
5410       break;
5411 
5412     // Packed Horizontal Add/Subtract
5413     case Intrinsic::x86_ssse3_phadd_w:
5414     case Intrinsic::x86_ssse3_phadd_w_128:
5415     case Intrinsic::x86_avx2_phadd_w:
5416     case Intrinsic::x86_ssse3_phsub_w:
5417     case Intrinsic::x86_ssse3_phsub_w_128:
5418     case Intrinsic::x86_avx2_phsub_w: {
5419       handlePairwiseShadowOrIntrinsic(I, /*ReinterpretElemWidth=*/16);
5420       break;
5421     }
5422 
5423     // Packed Horizontal Add/Subtract
5424     case Intrinsic::x86_ssse3_phadd_d:
5425     case Intrinsic::x86_ssse3_phadd_d_128:
5426     case Intrinsic::x86_avx2_phadd_d:
5427     case Intrinsic::x86_ssse3_phsub_d:
5428     case Intrinsic::x86_ssse3_phsub_d_128:
5429     case Intrinsic::x86_avx2_phsub_d: {
5430       handlePairwiseShadowOrIntrinsic(I, /*ReinterpretElemWidth=*/32);
5431       break;
5432     }
5433 
5434     // Packed Horizontal Add/Subtract and Saturate
5435     case Intrinsic::x86_ssse3_phadd_sw:
5436     case Intrinsic::x86_ssse3_phadd_sw_128:
5437     case Intrinsic::x86_avx2_phadd_sw:
5438     case Intrinsic::x86_ssse3_phsub_sw:
5439     case Intrinsic::x86_ssse3_phsub_sw_128:
5440     case Intrinsic::x86_avx2_phsub_sw: {
5441       handlePairwiseShadowOrIntrinsic(I, /*ReinterpretElemWidth=*/16);
5442       break;
5443     }
5444 
5445     // Packed Single/Double Precision Floating-Point Horizontal Add
5446     case Intrinsic::x86_sse3_hadd_ps:
5447     case Intrinsic::x86_sse3_hadd_pd:
5448     case Intrinsic::x86_avx_hadd_pd_256:
5449     case Intrinsic::x86_avx_hadd_ps_256:
5450     case Intrinsic::x86_sse3_hsub_ps:
5451     case Intrinsic::x86_sse3_hsub_pd:
5452     case Intrinsic::x86_avx_hsub_pd_256:
5453     case Intrinsic::x86_avx_hsub_ps_256: {
5454       handlePairwiseShadowOrIntrinsic(I);
5455       break;
5456     }
5457 
5458     case Intrinsic::x86_avx_maskstore_ps:
5459     case Intrinsic::x86_avx_maskstore_pd:
5460     case Intrinsic::x86_avx_maskstore_ps_256:
5461     case Intrinsic::x86_avx_maskstore_pd_256:
5462     case Intrinsic::x86_avx2_maskstore_d:
5463     case Intrinsic::x86_avx2_maskstore_q:
5464     case Intrinsic::x86_avx2_maskstore_d_256:
5465     case Intrinsic::x86_avx2_maskstore_q_256: {
5466       handleAVXMaskedStore(I);
5467       break;
5468     }
5469 
5470     case Intrinsic::x86_avx_maskload_ps:
5471     case Intrinsic::x86_avx_maskload_pd:
5472     case Intrinsic::x86_avx_maskload_ps_256:
5473     case Intrinsic::x86_avx_maskload_pd_256:
5474     case Intrinsic::x86_avx2_maskload_d:
5475     case Intrinsic::x86_avx2_maskload_q:
5476     case Intrinsic::x86_avx2_maskload_d_256:
5477     case Intrinsic::x86_avx2_maskload_q_256: {
5478       handleAVXMaskedLoad(I);
5479       break;
5480     }
5481 
5482     // Packed
5483     case Intrinsic::x86_avx512fp16_add_ph_512:
5484     case Intrinsic::x86_avx512fp16_sub_ph_512:
5485     case Intrinsic::x86_avx512fp16_mul_ph_512:
5486     case Intrinsic::x86_avx512fp16_div_ph_512:
5487     case Intrinsic::x86_avx512fp16_max_ph_512:
5488     case Intrinsic::x86_avx512fp16_min_ph_512:
5489     case Intrinsic::x86_avx512_min_ps_512:
5490     case Intrinsic::x86_avx512_min_pd_512:
5491     case Intrinsic::x86_avx512_max_ps_512:
5492     case Intrinsic::x86_avx512_max_pd_512: {
5493       // These AVX512 variants contain the rounding mode as a trailing flag.
5494       // Earlier variants do not have a trailing flag and are already handled
5495       // by maybeHandleSimpleNomemIntrinsic(I, 0) via handleUnknownIntrinsic.
5496       [[maybe_unused]] bool Success =
5497           maybeHandleSimpleNomemIntrinsic(I, /*trailingFlags=*/1);
5498       assert(Success);
5499       break;
5500     }
5501 
5502     case Intrinsic::x86_avx_vpermilvar_pd:
5503     case Intrinsic::x86_avx_vpermilvar_pd_256:
5504     case Intrinsic::x86_avx512_vpermilvar_pd_512:
5505     case Intrinsic::x86_avx_vpermilvar_ps:
5506     case Intrinsic::x86_avx_vpermilvar_ps_256:
5507     case Intrinsic::x86_avx512_vpermilvar_ps_512: {
5508       handleAVXVpermilvar(I);
5509       break;
5510     }
5511 
5512     case Intrinsic::x86_avx512_vpermi2var_d_128:
5513     case Intrinsic::x86_avx512_vpermi2var_d_256:
5514     case Intrinsic::x86_avx512_vpermi2var_d_512:
5515     case Intrinsic::x86_avx512_vpermi2var_hi_128:
5516     case Intrinsic::x86_avx512_vpermi2var_hi_256:
5517     case Intrinsic::x86_avx512_vpermi2var_hi_512:
5518     case Intrinsic::x86_avx512_vpermi2var_pd_128:
5519     case Intrinsic::x86_avx512_vpermi2var_pd_256:
5520     case Intrinsic::x86_avx512_vpermi2var_pd_512:
5521     case Intrinsic::x86_avx512_vpermi2var_ps_128:
5522     case Intrinsic::x86_avx512_vpermi2var_ps_256:
5523     case Intrinsic::x86_avx512_vpermi2var_ps_512:
5524     case Intrinsic::x86_avx512_vpermi2var_q_128:
5525     case Intrinsic::x86_avx512_vpermi2var_q_256:
5526     case Intrinsic::x86_avx512_vpermi2var_q_512:
5527     case Intrinsic::x86_avx512_vpermi2var_qi_128:
5528     case Intrinsic::x86_avx512_vpermi2var_qi_256:
5529     case Intrinsic::x86_avx512_vpermi2var_qi_512:
5530       handleAVXVpermi2var(I);
5531       break;
5532 
5533     case Intrinsic::x86_avx512_mask_cvtps2dq_512: {
5534       handleAVX512VectorConvertFPToInt(I);
5535       break;
5536     }
5537 
5538     // AVX512 PMOV: Packed MOV, with truncation
5539     // Precisely handled by applying the same intrinsic to the shadow
5540     case Intrinsic::x86_avx512_mask_pmov_dw_512:
5541     case Intrinsic::x86_avx512_mask_pmov_db_512:
5542     case Intrinsic::x86_avx512_mask_pmov_qb_512:
5543     case Intrinsic::x86_avx512_mask_pmov_qw_512: {
5544       // Intrinsic::x86_avx512_mask_pmov_{qd,wb}_512 were removed in
5545       // f608dc1f5775ee880e8ea30e2d06ab5a4a935c22
5546       handleIntrinsicByApplyingToShadow(I, I.getIntrinsicID(),
5547                                         /*trailingVerbatimArgs=*/1);
5548       break;
5549     }
5550 
5551     // AVX512 PMVOV{S,US}: Packed MOV, with signed/unsigned saturation
5552     // Approximately handled using the corresponding truncation intrinsic
5553     // TODO: improve handleAVX512VectorDownConvert to precisely model saturation
5554     case Intrinsic::x86_avx512_mask_pmovs_dw_512:
5555     case Intrinsic::x86_avx512_mask_pmovus_dw_512: {
5556       handleIntrinsicByApplyingToShadow(I,
5557                                         Intrinsic::x86_avx512_mask_pmov_dw_512,
5558                                         /* trailingVerbatimArgs=*/1);
5559       break;
5560     }
5561 
5562     case Intrinsic::x86_avx512_mask_pmovs_db_512:
5563     case Intrinsic::x86_avx512_mask_pmovus_db_512: {
5564       handleIntrinsicByApplyingToShadow(I,
5565                                         Intrinsic::x86_avx512_mask_pmov_db_512,
5566                                         /* trailingVerbatimArgs=*/1);
5567       break;
5568     }
5569 
5570     case Intrinsic::x86_avx512_mask_pmovs_qb_512:
5571     case Intrinsic::x86_avx512_mask_pmovus_qb_512: {
5572       handleIntrinsicByApplyingToShadow(I,
5573                                         Intrinsic::x86_avx512_mask_pmov_qb_512,
5574                                         /* trailingVerbatimArgs=*/1);
5575       break;
5576     }
5577 
5578     case Intrinsic::x86_avx512_mask_pmovs_qw_512:
5579     case Intrinsic::x86_avx512_mask_pmovus_qw_512: {
5580       handleIntrinsicByApplyingToShadow(I,
5581                                         Intrinsic::x86_avx512_mask_pmov_qw_512,
5582                                         /* trailingVerbatimArgs=*/1);
5583       break;
5584     }
5585 
5586     case Intrinsic::x86_avx512_mask_pmovs_qd_512:
5587     case Intrinsic::x86_avx512_mask_pmovus_qd_512:
5588     case Intrinsic::x86_avx512_mask_pmovs_wb_512:
5589     case Intrinsic::x86_avx512_mask_pmovus_wb_512: {
5590       // Since Intrinsic::x86_avx512_mask_pmov_{qd,wb}_512 do not exist, we
5591       // cannot use handleIntrinsicByApplyingToShadow. Instead, we call the
5592       // slow-path handler.
5593       handleAVX512VectorDownConvert(I);
5594       break;
5595     }
5596 
5597     // AVX512 FP16 Arithmetic
5598     case Intrinsic::x86_avx512fp16_mask_add_sh_round:
5599     case Intrinsic::x86_avx512fp16_mask_sub_sh_round:
5600     case Intrinsic::x86_avx512fp16_mask_mul_sh_round:
5601     case Intrinsic::x86_avx512fp16_mask_div_sh_round:
5602     case Intrinsic::x86_avx512fp16_mask_max_sh_round:
5603     case Intrinsic::x86_avx512fp16_mask_min_sh_round: {
5604       visitGenericScalarHalfwordInst(I);
5605       break;
5606     }
5607 
5608     case Intrinsic::fshl:
5609     case Intrinsic::fshr:
5610       handleFunnelShift(I);
5611       break;
5612 
5613     case Intrinsic::is_constant:
5614       // The result of llvm.is.constant() is always defined.
5615       setShadow(&I, getCleanShadow(&I));
5616       setOrigin(&I, getCleanOrigin());
5617       break;
5618 
5619     // TODO: handling max/min similarly to AND/OR may be more precise
5620     // Floating-Point Maximum/Minimum Pairwise
5621     case Intrinsic::aarch64_neon_fmaxp:
5622     case Intrinsic::aarch64_neon_fminp:
5623     // Floating-Point Maximum/Minimum Number Pairwise
5624     case Intrinsic::aarch64_neon_fmaxnmp:
5625     case Intrinsic::aarch64_neon_fminnmp:
5626     // Signed/Unsigned Maximum/Minimum Pairwise
5627     case Intrinsic::aarch64_neon_smaxp:
5628     case Intrinsic::aarch64_neon_sminp:
5629     case Intrinsic::aarch64_neon_umaxp:
5630     case Intrinsic::aarch64_neon_uminp:
5631     // Add Pairwise
5632     case Intrinsic::aarch64_neon_addp:
5633     // Floating-point Add Pairwise
5634     case Intrinsic::aarch64_neon_faddp:
5635     // Add Long Pairwise
5636     case Intrinsic::aarch64_neon_saddlp:
5637     case Intrinsic::aarch64_neon_uaddlp: {
5638       handlePairwiseShadowOrIntrinsic(I);
5639       break;
5640     }
5641 
5642     // Floating-point Convert to integer, rounding to nearest with ties to Away
5643     case Intrinsic::aarch64_neon_fcvtas:
5644     case Intrinsic::aarch64_neon_fcvtau:
5645     // Floating-point convert to integer, rounding toward minus infinity
5646     case Intrinsic::aarch64_neon_fcvtms:
5647     case Intrinsic::aarch64_neon_fcvtmu:
5648     // Floating-point convert to integer, rounding to nearest with ties to even
5649     case Intrinsic::aarch64_neon_fcvtns:
5650     case Intrinsic::aarch64_neon_fcvtnu:
5651     // Floating-point convert to integer, rounding toward plus infinity
5652     case Intrinsic::aarch64_neon_fcvtps:
5653     case Intrinsic::aarch64_neon_fcvtpu:
5654     // Floating-point Convert to integer, rounding toward Zero
5655     case Intrinsic::aarch64_neon_fcvtzs:
5656     case Intrinsic::aarch64_neon_fcvtzu:
5657     // Floating-point convert to lower precision narrow, rounding to odd
5658     case Intrinsic::aarch64_neon_fcvtxn: {
5659       handleNEONVectorConvertIntrinsic(I);
5660       break;
5661     }
5662 
5663     // Add reduction to scalar
5664     case Intrinsic::aarch64_neon_faddv:
5665     case Intrinsic::aarch64_neon_saddv:
5666     case Intrinsic::aarch64_neon_uaddv:
5667     // Signed/Unsigned min/max (Vector)
5668     // TODO: handling similarly to AND/OR may be more precise.
5669     case Intrinsic::aarch64_neon_smaxv:
5670     case Intrinsic::aarch64_neon_sminv:
5671     case Intrinsic::aarch64_neon_umaxv:
5672     case Intrinsic::aarch64_neon_uminv:
5673     // Floating-point min/max (vector)
5674     // The f{min,max}"nm"v variants handle NaN differently than f{min,max}v,
5675     // but our shadow propagation is the same.
5676     case Intrinsic::aarch64_neon_fmaxv:
5677     case Intrinsic::aarch64_neon_fminv:
5678     case Intrinsic::aarch64_neon_fmaxnmv:
5679     case Intrinsic::aarch64_neon_fminnmv:
5680     // Sum long across vector
5681     case Intrinsic::aarch64_neon_saddlv:
5682     case Intrinsic::aarch64_neon_uaddlv:
5683       handleVectorReduceIntrinsic(I, /*AllowShadowCast=*/true);
5684       break;
5685 
5686     case Intrinsic::aarch64_neon_ld1x2:
5687     case Intrinsic::aarch64_neon_ld1x3:
5688     case Intrinsic::aarch64_neon_ld1x4:
5689     case Intrinsic::aarch64_neon_ld2:
5690     case Intrinsic::aarch64_neon_ld3:
5691     case Intrinsic::aarch64_neon_ld4:
5692     case Intrinsic::aarch64_neon_ld2r:
5693     case Intrinsic::aarch64_neon_ld3r:
5694     case Intrinsic::aarch64_neon_ld4r: {
5695       handleNEONVectorLoad(I, /*WithLane=*/false);
5696       break;
5697     }
5698 
5699     case Intrinsic::aarch64_neon_ld2lane:
5700     case Intrinsic::aarch64_neon_ld3lane:
5701     case Intrinsic::aarch64_neon_ld4lane: {
5702       handleNEONVectorLoad(I, /*WithLane=*/true);
5703       break;
5704     }
5705 
5706     // Saturating extract narrow
5707     case Intrinsic::aarch64_neon_sqxtn:
5708     case Intrinsic::aarch64_neon_sqxtun:
5709     case Intrinsic::aarch64_neon_uqxtn:
5710       // These only have one argument, but we (ab)use handleShadowOr because it
5711       // does work on single argument intrinsics and will typecast the shadow
5712       // (and update the origin).
5713       handleShadowOr(I);
5714       break;
5715 
5716     case Intrinsic::aarch64_neon_st1x2:
5717     case Intrinsic::aarch64_neon_st1x3:
5718     case Intrinsic::aarch64_neon_st1x4:
5719     case Intrinsic::aarch64_neon_st2:
5720     case Intrinsic::aarch64_neon_st3:
5721     case Intrinsic::aarch64_neon_st4: {
5722       handleNEONVectorStoreIntrinsic(I, false);
5723       break;
5724     }
5725 
5726     case Intrinsic::aarch64_neon_st2lane:
5727     case Intrinsic::aarch64_neon_st3lane:
5728     case Intrinsic::aarch64_neon_st4lane: {
5729       handleNEONVectorStoreIntrinsic(I, true);
5730       break;
5731     }
5732 
5733     // Arm NEON vector table intrinsics have the source/table register(s) as
5734     // arguments, followed by the index register. They return the output.
5735     //
5736     // 'TBL writes a zero if an index is out-of-range, while TBX leaves the
5737     //  original value unchanged in the destination register.'
5738     // Conveniently, zero denotes a clean shadow, which means out-of-range
5739     // indices for TBL will initialize the user data with zero and also clean
5740     // the shadow. (For TBX, neither the user data nor the shadow will be
5741     // updated, which is also correct.)
5742     case Intrinsic::aarch64_neon_tbl1:
5743     case Intrinsic::aarch64_neon_tbl2:
5744     case Intrinsic::aarch64_neon_tbl3:
5745     case Intrinsic::aarch64_neon_tbl4:
5746     case Intrinsic::aarch64_neon_tbx1:
5747     case Intrinsic::aarch64_neon_tbx2:
5748     case Intrinsic::aarch64_neon_tbx3:
5749     case Intrinsic::aarch64_neon_tbx4: {
5750       // The last trailing argument (index register) should be handled verbatim
5751       handleIntrinsicByApplyingToShadow(
5752           I, /*shadowIntrinsicID=*/I.getIntrinsicID(),
5753           /*trailingVerbatimArgs*/ 1);
5754       break;
5755     }
5756 
5757     case Intrinsic::aarch64_neon_fmulx:
5758     case Intrinsic::aarch64_neon_pmul:
5759     case Intrinsic::aarch64_neon_pmull:
5760     case Intrinsic::aarch64_neon_smull:
5761     case Intrinsic::aarch64_neon_pmull64:
5762     case Intrinsic::aarch64_neon_umull: {
5763       handleNEONVectorMultiplyIntrinsic(I);
5764       break;
5765     }
5766 
5767     case Intrinsic::scmp:
5768     case Intrinsic::ucmp: {
5769       handleShadowOr(I);
5770       break;
5771     }
5772 
5773     default:
5774       if (!handleUnknownIntrinsic(I))
5775         visitInstruction(I);
5776       break;
5777     }
5778   }
5779 
visitLibAtomicLoad__anonb346f5430811::MemorySanitizerVisitor5780   void visitLibAtomicLoad(CallBase &CB) {
5781     // Since we use getNextNode here, we can't have CB terminate the BB.
5782     assert(isa<CallInst>(CB));
5783 
5784     IRBuilder<> IRB(&CB);
5785     Value *Size = CB.getArgOperand(0);
5786     Value *SrcPtr = CB.getArgOperand(1);
5787     Value *DstPtr = CB.getArgOperand(2);
5788     Value *Ordering = CB.getArgOperand(3);
5789     // Convert the call to have at least Acquire ordering to make sure
5790     // the shadow operations aren't reordered before it.
5791     Value *NewOrdering =
5792         IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering);
5793     CB.setArgOperand(3, NewOrdering);
5794 
5795     NextNodeIRBuilder NextIRB(&CB);
5796     Value *SrcShadowPtr, *SrcOriginPtr;
5797     std::tie(SrcShadowPtr, SrcOriginPtr) =
5798         getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
5799                            /*isStore*/ false);
5800     Value *DstShadowPtr =
5801         getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
5802                            /*isStore*/ true)
5803             .first;
5804 
5805     NextIRB.CreateMemCpy(DstShadowPtr, Align(1), SrcShadowPtr, Align(1), Size);
5806     if (MS.TrackOrigins) {
5807       Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
5808                                                    kMinOriginAlignment);
5809       Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
5810       NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
5811     }
5812   }
5813 
visitLibAtomicStore__anonb346f5430811::MemorySanitizerVisitor5814   void visitLibAtomicStore(CallBase &CB) {
5815     IRBuilder<> IRB(&CB);
5816     Value *Size = CB.getArgOperand(0);
5817     Value *DstPtr = CB.getArgOperand(2);
5818     Value *Ordering = CB.getArgOperand(3);
5819     // Convert the call to have at least Release ordering to make sure
5820     // the shadow operations aren't reordered after it.
5821     Value *NewOrdering =
5822         IRB.CreateExtractElement(makeAddReleaseOrderingTable(IRB), Ordering);
5823     CB.setArgOperand(3, NewOrdering);
5824 
5825     Value *DstShadowPtr =
5826         getShadowOriginPtr(DstPtr, IRB, IRB.getInt8Ty(), Align(1),
5827                            /*isStore*/ true)
5828             .first;
5829 
5830     // Atomic store always paints clean shadow/origin. See file header.
5831     IRB.CreateMemSet(DstShadowPtr, getCleanShadow(IRB.getInt8Ty()), Size,
5832                      Align(1));
5833   }
5834 
visitCallBase__anonb346f5430811::MemorySanitizerVisitor5835   void visitCallBase(CallBase &CB) {
5836     assert(!CB.getMetadata(LLVMContext::MD_nosanitize));
5837     if (CB.isInlineAsm()) {
5838       // For inline asm (either a call to asm function, or callbr instruction),
5839       // do the usual thing: check argument shadow and mark all outputs as
5840       // clean. Note that any side effects of the inline asm that are not
5841       // immediately visible in its constraints are not handled.
5842       if (ClHandleAsmConservative)
5843         visitAsmInstruction(CB);
5844       else
5845         visitInstruction(CB);
5846       return;
5847     }
5848     LibFunc LF;
5849     if (TLI->getLibFunc(CB, LF)) {
5850       // libatomic.a functions need to have special handling because there isn't
5851       // a good way to intercept them or compile the library with
5852       // instrumentation.
5853       switch (LF) {
5854       case LibFunc_atomic_load:
5855         if (!isa<CallInst>(CB)) {
5856           llvm::errs() << "MSAN -- cannot instrument invoke of libatomic load."
5857                           "Ignoring!\n";
5858           break;
5859         }
5860         visitLibAtomicLoad(CB);
5861         return;
5862       case LibFunc_atomic_store:
5863         visitLibAtomicStore(CB);
5864         return;
5865       default:
5866         break;
5867       }
5868     }
5869 
5870     if (auto *Call = dyn_cast<CallInst>(&CB)) {
5871       assert(!isa<IntrinsicInst>(Call) && "intrinsics are handled elsewhere");
5872 
5873       // We are going to insert code that relies on the fact that the callee
5874       // will become a non-readonly function after it is instrumented by us. To
5875       // prevent this code from being optimized out, mark that function
5876       // non-readonly in advance.
5877       // TODO: We can likely do better than dropping memory() completely here.
5878       AttributeMask B;
5879       B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
5880 
5881       Call->removeFnAttrs(B);
5882       if (Function *Func = Call->getCalledFunction()) {
5883         Func->removeFnAttrs(B);
5884       }
5885 
5886       maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
5887     }
5888     IRBuilder<> IRB(&CB);
5889     bool MayCheckCall = MS.EagerChecks;
5890     if (Function *Func = CB.getCalledFunction()) {
5891       // __sanitizer_unaligned_{load,store} functions may be called by users
5892       // and always expects shadows in the TLS. So don't check them.
5893       MayCheckCall &= !Func->getName().starts_with("__sanitizer_unaligned_");
5894     }
5895 
5896     unsigned ArgOffset = 0;
5897     LLVM_DEBUG(dbgs() << "  CallSite: " << CB << "\n");
5898     for (const auto &[i, A] : llvm::enumerate(CB.args())) {
5899       if (!A->getType()->isSized()) {
5900         LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << CB << "\n");
5901         continue;
5902       }
5903 
5904       if (A->getType()->isScalableTy()) {
5905         LLVM_DEBUG(dbgs() << "Arg  " << i << " is vscale: " << CB << "\n");
5906         // Handle as noundef, but don't reserve tls slots.
5907         insertCheckShadowOf(A, &CB);
5908         continue;
5909       }
5910 
5911       unsigned Size = 0;
5912       const DataLayout &DL = F.getDataLayout();
5913 
5914       bool ByVal = CB.paramHasAttr(i, Attribute::ByVal);
5915       bool NoUndef = CB.paramHasAttr(i, Attribute::NoUndef);
5916       bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
5917 
5918       if (EagerCheck) {
5919         insertCheckShadowOf(A, &CB);
5920         Size = DL.getTypeAllocSize(A->getType());
5921       } else {
5922         [[maybe_unused]] Value *Store = nullptr;
5923         // Compute the Shadow for arg even if it is ByVal, because
5924         // in that case getShadow() will copy the actual arg shadow to
5925         // __msan_param_tls.
5926         Value *ArgShadow = getShadow(A);
5927         Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
5928         LLVM_DEBUG(dbgs() << "  Arg#" << i << ": " << *A
5929                           << " Shadow: " << *ArgShadow << "\n");
5930         if (ByVal) {
5931           // ByVal requires some special handling as it's too big for a single
5932           // load
5933           assert(A->getType()->isPointerTy() &&
5934                  "ByVal argument is not a pointer!");
5935           Size = DL.getTypeAllocSize(CB.getParamByValType(i));
5936           if (ArgOffset + Size > kParamTLSSize)
5937             break;
5938           const MaybeAlign ParamAlignment(CB.getParamAlign(i));
5939           MaybeAlign Alignment = std::nullopt;
5940           if (ParamAlignment)
5941             Alignment = std::min(*ParamAlignment, kShadowTLSAlignment);
5942           Value *AShadowPtr, *AOriginPtr;
5943           std::tie(AShadowPtr, AOriginPtr) =
5944               getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment,
5945                                  /*isStore*/ false);
5946           if (!PropagateShadow) {
5947             Store = IRB.CreateMemSet(ArgShadowBase,
5948                                      Constant::getNullValue(IRB.getInt8Ty()),
5949                                      Size, Alignment);
5950           } else {
5951             Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
5952                                      Alignment, Size);
5953             if (MS.TrackOrigins) {
5954               Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
5955               // FIXME: OriginSize should be:
5956               // alignTo(A % kMinOriginAlignment + Size, kMinOriginAlignment)
5957               unsigned OriginSize = alignTo(Size, kMinOriginAlignment);
5958               IRB.CreateMemCpy(
5959                   ArgOriginBase,
5960                   /* by origin_tls[ArgOffset] */ kMinOriginAlignment,
5961                   AOriginPtr,
5962                   /* by getShadowOriginPtr */ kMinOriginAlignment, OriginSize);
5963             }
5964           }
5965         } else {
5966           // Any other parameters mean we need bit-grained tracking of uninit
5967           // data
5968           Size = DL.getTypeAllocSize(A->getType());
5969           if (ArgOffset + Size > kParamTLSSize)
5970             break;
5971           Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
5972                                          kShadowTLSAlignment);
5973           Constant *Cst = dyn_cast<Constant>(ArgShadow);
5974           if (MS.TrackOrigins && !(Cst && Cst->isNullValue())) {
5975             IRB.CreateStore(getOrigin(A),
5976                             getOriginPtrForArgument(IRB, ArgOffset));
5977           }
5978         }
5979         assert(Store != nullptr);
5980         LLVM_DEBUG(dbgs() << "  Param:" << *Store << "\n");
5981       }
5982       assert(Size != 0);
5983       ArgOffset += alignTo(Size, kShadowTLSAlignment);
5984     }
5985     LLVM_DEBUG(dbgs() << "  done with call args\n");
5986 
5987     FunctionType *FT = CB.getFunctionType();
5988     if (FT->isVarArg()) {
5989       VAHelper->visitCallBase(CB, IRB);
5990     }
5991 
5992     // Now, get the shadow for the RetVal.
5993     if (!CB.getType()->isSized())
5994       return;
5995     // Don't emit the epilogue for musttail call returns.
5996     if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
5997       return;
5998 
5999     if (MayCheckCall && CB.hasRetAttr(Attribute::NoUndef)) {
6000       setShadow(&CB, getCleanShadow(&CB));
6001       setOrigin(&CB, getCleanOrigin());
6002       return;
6003     }
6004 
6005     IRBuilder<> IRBBefore(&CB);
6006     // Until we have full dynamic coverage, make sure the retval shadow is 0.
6007     Value *Base = getShadowPtrForRetval(IRBBefore);
6008     IRBBefore.CreateAlignedStore(getCleanShadow(&CB), Base,
6009                                  kShadowTLSAlignment);
6010     BasicBlock::iterator NextInsn;
6011     if (isa<CallInst>(CB)) {
6012       NextInsn = ++CB.getIterator();
6013       assert(NextInsn != CB.getParent()->end());
6014     } else {
6015       BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
6016       if (!NormalDest->getSinglePredecessor()) {
6017         // FIXME: this case is tricky, so we are just conservative here.
6018         // Perhaps we need to split the edge between this BB and NormalDest,
6019         // but a naive attempt to use SplitEdge leads to a crash.
6020         setShadow(&CB, getCleanShadow(&CB));
6021         setOrigin(&CB, getCleanOrigin());
6022         return;
6023       }
6024       // FIXME: NextInsn is likely in a basic block that has not been visited
6025       // yet. Anything inserted there will be instrumented by MSan later!
6026       NextInsn = NormalDest->getFirstInsertionPt();
6027       assert(NextInsn != NormalDest->end() &&
6028              "Could not find insertion point for retval shadow load");
6029     }
6030     IRBuilder<> IRBAfter(&*NextInsn);
6031     Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
6032         getShadowTy(&CB), getShadowPtrForRetval(IRBAfter), kShadowTLSAlignment,
6033         "_msret");
6034     setShadow(&CB, RetvalShadow);
6035     if (MS.TrackOrigins)
6036       setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
6037   }
6038 
isAMustTailRetVal__anonb346f5430811::MemorySanitizerVisitor6039   bool isAMustTailRetVal(Value *RetVal) {
6040     if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
6041       RetVal = I->getOperand(0);
6042     }
6043     if (auto *I = dyn_cast<CallInst>(RetVal)) {
6044       return I->isMustTailCall();
6045     }
6046     return false;
6047   }
6048 
visitReturnInst__anonb346f5430811::MemorySanitizerVisitor6049   void visitReturnInst(ReturnInst &I) {
6050     IRBuilder<> IRB(&I);
6051     Value *RetVal = I.getReturnValue();
6052     if (!RetVal)
6053       return;
6054     // Don't emit the epilogue for musttail call returns.
6055     if (isAMustTailRetVal(RetVal))
6056       return;
6057     Value *ShadowPtr = getShadowPtrForRetval(IRB);
6058     bool HasNoUndef = F.hasRetAttribute(Attribute::NoUndef);
6059     bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
6060     // FIXME: Consider using SpecialCaseList to specify a list of functions that
6061     // must always return fully initialized values. For now, we hardcode "main".
6062     bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (F.getName() == "main");
6063 
6064     Value *Shadow = getShadow(RetVal);
6065     bool StoreOrigin = true;
6066     if (EagerCheck) {
6067       insertCheckShadowOf(RetVal, &I);
6068       Shadow = getCleanShadow(RetVal);
6069       StoreOrigin = false;
6070     }
6071 
6072     // The caller may still expect information passed over TLS if we pass our
6073     // check
6074     if (StoreShadow) {
6075       IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
6076       if (MS.TrackOrigins && StoreOrigin)
6077         IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
6078     }
6079   }
6080 
visitPHINode__anonb346f5430811::MemorySanitizerVisitor6081   void visitPHINode(PHINode &I) {
6082     IRBuilder<> IRB(&I);
6083     if (!PropagateShadow) {
6084       setShadow(&I, getCleanShadow(&I));
6085       setOrigin(&I, getCleanOrigin());
6086       return;
6087     }
6088 
6089     ShadowPHINodes.push_back(&I);
6090     setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
6091                                 "_msphi_s"));
6092     if (MS.TrackOrigins)
6093       setOrigin(
6094           &I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), "_msphi_o"));
6095   }
6096 
getLocalVarIdptr__anonb346f5430811::MemorySanitizerVisitor6097   Value *getLocalVarIdptr(AllocaInst &I) {
6098     ConstantInt *IntConst =
6099         ConstantInt::get(Type::getInt32Ty((*F.getParent()).getContext()), 0);
6100     return new GlobalVariable(*F.getParent(), IntConst->getType(),
6101                               /*isConstant=*/false, GlobalValue::PrivateLinkage,
6102                               IntConst);
6103   }
6104 
getLocalVarDescription__anonb346f5430811::MemorySanitizerVisitor6105   Value *getLocalVarDescription(AllocaInst &I) {
6106     return createPrivateConstGlobalForString(*F.getParent(), I.getName());
6107   }
6108 
poisonAllocaUserspace__anonb346f5430811::MemorySanitizerVisitor6109   void poisonAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
6110     if (PoisonStack && ClPoisonStackWithCall) {
6111       IRB.CreateCall(MS.MsanPoisonStackFn, {&I, Len});
6112     } else {
6113       Value *ShadowBase, *OriginBase;
6114       std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
6115           &I, IRB, IRB.getInt8Ty(), Align(1), /*isStore*/ true);
6116 
6117       Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
6118       IRB.CreateMemSet(ShadowBase, PoisonValue, Len, I.getAlign());
6119     }
6120 
6121     if (PoisonStack && MS.TrackOrigins) {
6122       Value *Idptr = getLocalVarIdptr(I);
6123       if (ClPrintStackNames) {
6124         Value *Descr = getLocalVarDescription(I);
6125         IRB.CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
6126                        {&I, Len, Idptr, Descr});
6127       } else {
6128         IRB.CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
6129       }
6130     }
6131   }
6132 
poisonAllocaKmsan__anonb346f5430811::MemorySanitizerVisitor6133   void poisonAllocaKmsan(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
6134     Value *Descr = getLocalVarDescription(I);
6135     if (PoisonStack) {
6136       IRB.CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
6137     } else {
6138       IRB.CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
6139     }
6140   }
6141 
instrumentAlloca__anonb346f5430811::MemorySanitizerVisitor6142   void instrumentAlloca(AllocaInst &I, Instruction *InsPoint = nullptr) {
6143     if (!InsPoint)
6144       InsPoint = &I;
6145     NextNodeIRBuilder IRB(InsPoint);
6146     const DataLayout &DL = F.getDataLayout();
6147     TypeSize TS = DL.getTypeAllocSize(I.getAllocatedType());
6148     Value *Len = IRB.CreateTypeSize(MS.IntptrTy, TS);
6149     if (I.isArrayAllocation())
6150       Len = IRB.CreateMul(Len,
6151                           IRB.CreateZExtOrTrunc(I.getArraySize(), MS.IntptrTy));
6152 
6153     if (MS.CompileKernel)
6154       poisonAllocaKmsan(I, IRB, Len);
6155     else
6156       poisonAllocaUserspace(I, IRB, Len);
6157   }
6158 
visitAllocaInst__anonb346f5430811::MemorySanitizerVisitor6159   void visitAllocaInst(AllocaInst &I) {
6160     setShadow(&I, getCleanShadow(&I));
6161     setOrigin(&I, getCleanOrigin());
6162     // We'll get to this alloca later unless it's poisoned at the corresponding
6163     // llvm.lifetime.start.
6164     AllocaSet.insert(&I);
6165   }
6166 
visitSelectInst__anonb346f5430811::MemorySanitizerVisitor6167   void visitSelectInst(SelectInst &I) {
6168     // a = select b, c, d
6169     Value *B = I.getCondition();
6170     Value *C = I.getTrueValue();
6171     Value *D = I.getFalseValue();
6172 
6173     handleSelectLikeInst(I, B, C, D);
6174   }
6175 
handleSelectLikeInst__anonb346f5430811::MemorySanitizerVisitor6176   void handleSelectLikeInst(Instruction &I, Value *B, Value *C, Value *D) {
6177     IRBuilder<> IRB(&I);
6178 
6179     Value *Sb = getShadow(B);
6180     Value *Sc = getShadow(C);
6181     Value *Sd = getShadow(D);
6182 
6183     Value *Ob = MS.TrackOrigins ? getOrigin(B) : nullptr;
6184     Value *Oc = MS.TrackOrigins ? getOrigin(C) : nullptr;
6185     Value *Od = MS.TrackOrigins ? getOrigin(D) : nullptr;
6186 
6187     // Result shadow if condition shadow is 0.
6188     Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
6189     Value *Sa1;
6190     if (I.getType()->isAggregateType()) {
6191       // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
6192       // an extra "select". This results in much more compact IR.
6193       // Sa = select Sb, poisoned, (select b, Sc, Sd)
6194       Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
6195     } else {
6196       // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
6197       // If Sb (condition is poisoned), look for bits in c and d that are equal
6198       // and both unpoisoned.
6199       // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
6200 
6201       // Cast arguments to shadow-compatible type.
6202       C = CreateAppToShadowCast(IRB, C);
6203       D = CreateAppToShadowCast(IRB, D);
6204 
6205       // Result shadow if condition shadow is 1.
6206       Sa1 = IRB.CreateOr({IRB.CreateXor(C, D), Sc, Sd});
6207     }
6208     Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
6209     setShadow(&I, Sa);
6210     if (MS.TrackOrigins) {
6211       // Origins are always i32, so any vector conditions must be flattened.
6212       // FIXME: consider tracking vector origins for app vectors?
6213       if (B->getType()->isVectorTy()) {
6214         B = convertToBool(B, IRB);
6215         Sb = convertToBool(Sb, IRB);
6216       }
6217       // a = select b, c, d
6218       // Oa = Sb ? Ob : (b ? Oc : Od)
6219       setOrigin(&I, IRB.CreateSelect(Sb, Ob, IRB.CreateSelect(B, Oc, Od)));
6220     }
6221   }
6222 
visitLandingPadInst__anonb346f5430811::MemorySanitizerVisitor6223   void visitLandingPadInst(LandingPadInst &I) {
6224     // Do nothing.
6225     // See https://github.com/google/sanitizers/issues/504
6226     setShadow(&I, getCleanShadow(&I));
6227     setOrigin(&I, getCleanOrigin());
6228   }
6229 
visitCatchSwitchInst__anonb346f5430811::MemorySanitizerVisitor6230   void visitCatchSwitchInst(CatchSwitchInst &I) {
6231     setShadow(&I, getCleanShadow(&I));
6232     setOrigin(&I, getCleanOrigin());
6233   }
6234 
visitFuncletPadInst__anonb346f5430811::MemorySanitizerVisitor6235   void visitFuncletPadInst(FuncletPadInst &I) {
6236     setShadow(&I, getCleanShadow(&I));
6237     setOrigin(&I, getCleanOrigin());
6238   }
6239 
visitGetElementPtrInst__anonb346f5430811::MemorySanitizerVisitor6240   void visitGetElementPtrInst(GetElementPtrInst &I) { handleShadowOr(I); }
6241 
visitExtractValueInst__anonb346f5430811::MemorySanitizerVisitor6242   void visitExtractValueInst(ExtractValueInst &I) {
6243     IRBuilder<> IRB(&I);
6244     Value *Agg = I.getAggregateOperand();
6245     LLVM_DEBUG(dbgs() << "ExtractValue:  " << I << "\n");
6246     Value *AggShadow = getShadow(Agg);
6247     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
6248     Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
6249     LLVM_DEBUG(dbgs() << "   ResShadow:  " << *ResShadow << "\n");
6250     setShadow(&I, ResShadow);
6251     setOriginForNaryOp(I);
6252   }
6253 
visitInsertValueInst__anonb346f5430811::MemorySanitizerVisitor6254   void visitInsertValueInst(InsertValueInst &I) {
6255     IRBuilder<> IRB(&I);
6256     LLVM_DEBUG(dbgs() << "InsertValue:  " << I << "\n");
6257     Value *AggShadow = getShadow(I.getAggregateOperand());
6258     Value *InsShadow = getShadow(I.getInsertedValueOperand());
6259     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
6260     LLVM_DEBUG(dbgs() << "   InsShadow:  " << *InsShadow << "\n");
6261     Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
6262     LLVM_DEBUG(dbgs() << "   Res:        " << *Res << "\n");
6263     setShadow(&I, Res);
6264     setOriginForNaryOp(I);
6265   }
6266 
dumpInst__anonb346f5430811::MemorySanitizerVisitor6267   void dumpInst(Instruction &I) {
6268     if (CallInst *CI = dyn_cast<CallInst>(&I)) {
6269       errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
6270     } else {
6271       errs() << "ZZZ " << I.getOpcodeName() << "\n";
6272     }
6273     errs() << "QQQ " << I << "\n";
6274   }
6275 
visitResumeInst__anonb346f5430811::MemorySanitizerVisitor6276   void visitResumeInst(ResumeInst &I) {
6277     LLVM_DEBUG(dbgs() << "Resume: " << I << "\n");
6278     // Nothing to do here.
6279   }
6280 
visitCleanupReturnInst__anonb346f5430811::MemorySanitizerVisitor6281   void visitCleanupReturnInst(CleanupReturnInst &CRI) {
6282     LLVM_DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
6283     // Nothing to do here.
6284   }
6285 
visitCatchReturnInst__anonb346f5430811::MemorySanitizerVisitor6286   void visitCatchReturnInst(CatchReturnInst &CRI) {
6287     LLVM_DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
6288     // Nothing to do here.
6289   }
6290 
instrumentAsmArgument__anonb346f5430811::MemorySanitizerVisitor6291   void instrumentAsmArgument(Value *Operand, Type *ElemTy, Instruction &I,
6292                              IRBuilder<> &IRB, const DataLayout &DL,
6293                              bool isOutput) {
6294     // For each assembly argument, we check its value for being initialized.
6295     // If the argument is a pointer, we assume it points to a single element
6296     // of the corresponding type (or to a 8-byte word, if the type is unsized).
6297     // Each such pointer is instrumented with a call to the runtime library.
6298     Type *OpType = Operand->getType();
6299     // Check the operand value itself.
6300     insertCheckShadowOf(Operand, &I);
6301     if (!OpType->isPointerTy() || !isOutput) {
6302       assert(!isOutput);
6303       return;
6304     }
6305     if (!ElemTy->isSized())
6306       return;
6307     auto Size = DL.getTypeStoreSize(ElemTy);
6308     Value *SizeVal = IRB.CreateTypeSize(MS.IntptrTy, Size);
6309     if (MS.CompileKernel) {
6310       IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
6311     } else {
6312       // ElemTy, derived from elementtype(), does not encode the alignment of
6313       // the pointer. Conservatively assume that the shadow memory is unaligned.
6314       // When Size is large, avoid StoreInst as it would expand to many
6315       // instructions.
6316       auto [ShadowPtr, _] =
6317           getShadowOriginPtrUserspace(Operand, IRB, IRB.getInt8Ty(), Align(1));
6318       if (Size <= 32)
6319         IRB.CreateAlignedStore(getCleanShadow(ElemTy), ShadowPtr, Align(1));
6320       else
6321         IRB.CreateMemSet(ShadowPtr, ConstantInt::getNullValue(IRB.getInt8Ty()),
6322                          SizeVal, Align(1));
6323     }
6324   }
6325 
6326   /// Get the number of output arguments returned by pointers.
getNumOutputArgs__anonb346f5430811::MemorySanitizerVisitor6327   int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
6328     int NumRetOutputs = 0;
6329     int NumOutputs = 0;
6330     Type *RetTy = cast<Value>(CB)->getType();
6331     if (!RetTy->isVoidTy()) {
6332       // Register outputs are returned via the CallInst return value.
6333       auto *ST = dyn_cast<StructType>(RetTy);
6334       if (ST)
6335         NumRetOutputs = ST->getNumElements();
6336       else
6337         NumRetOutputs = 1;
6338     }
6339     InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
6340     for (const InlineAsm::ConstraintInfo &Info : Constraints) {
6341       switch (Info.Type) {
6342       case InlineAsm::isOutput:
6343         NumOutputs++;
6344         break;
6345       default:
6346         break;
6347       }
6348     }
6349     return NumOutputs - NumRetOutputs;
6350   }
6351 
visitAsmInstruction__anonb346f5430811::MemorySanitizerVisitor6352   void visitAsmInstruction(Instruction &I) {
6353     // Conservative inline assembly handling: check for poisoned shadow of
6354     // asm() arguments, then unpoison the result and all the memory locations
6355     // pointed to by those arguments.
6356     // An inline asm() statement in C++ contains lists of input and output
6357     // arguments used by the assembly code. These are mapped to operands of the
6358     // CallInst as follows:
6359     //  - nR register outputs ("=r) are returned by value in a single structure
6360     //  (SSA value of the CallInst);
6361     //  - nO other outputs ("=m" and others) are returned by pointer as first
6362     // nO operands of the CallInst;
6363     //  - nI inputs ("r", "m" and others) are passed to CallInst as the
6364     // remaining nI operands.
6365     // The total number of asm() arguments in the source is nR+nO+nI, and the
6366     // corresponding CallInst has nO+nI+1 operands (the last operand is the
6367     // function to be called).
6368     const DataLayout &DL = F.getDataLayout();
6369     CallBase *CB = cast<CallBase>(&I);
6370     IRBuilder<> IRB(&I);
6371     InlineAsm *IA = cast<InlineAsm>(CB->getCalledOperand());
6372     int OutputArgs = getNumOutputArgs(IA, CB);
6373     // The last operand of a CallInst is the function itself.
6374     int NumOperands = CB->getNumOperands() - 1;
6375 
6376     // Check input arguments. Doing so before unpoisoning output arguments, so
6377     // that we won't overwrite uninit values before checking them.
6378     for (int i = OutputArgs; i < NumOperands; i++) {
6379       Value *Operand = CB->getOperand(i);
6380       instrumentAsmArgument(Operand, CB->getParamElementType(i), I, IRB, DL,
6381                             /*isOutput*/ false);
6382     }
6383     // Unpoison output arguments. This must happen before the actual InlineAsm
6384     // call, so that the shadow for memory published in the asm() statement
6385     // remains valid.
6386     for (int i = 0; i < OutputArgs; i++) {
6387       Value *Operand = CB->getOperand(i);
6388       instrumentAsmArgument(Operand, CB->getParamElementType(i), I, IRB, DL,
6389                             /*isOutput*/ true);
6390     }
6391 
6392     setShadow(&I, getCleanShadow(&I));
6393     setOrigin(&I, getCleanOrigin());
6394   }
6395 
visitFreezeInst__anonb346f5430811::MemorySanitizerVisitor6396   void visitFreezeInst(FreezeInst &I) {
6397     // Freeze always returns a fully defined value.
6398     setShadow(&I, getCleanShadow(&I));
6399     setOrigin(&I, getCleanOrigin());
6400   }
6401 
visitInstruction__anonb346f5430811::MemorySanitizerVisitor6402   void visitInstruction(Instruction &I) {
6403     // Everything else: stop propagating and check for poisoned shadow.
6404     if (ClDumpStrictInstructions)
6405       dumpInst(I);
6406     LLVM_DEBUG(dbgs() << "DEFAULT: " << I << "\n");
6407     for (size_t i = 0, n = I.getNumOperands(); i < n; i++) {
6408       Value *Operand = I.getOperand(i);
6409       if (Operand->getType()->isSized())
6410         insertCheckShadowOf(Operand, &I);
6411     }
6412     setShadow(&I, getCleanShadow(&I));
6413     setOrigin(&I, getCleanOrigin());
6414   }
6415 };
6416 
6417 struct VarArgHelperBase : public VarArgHelper {
6418   Function &F;
6419   MemorySanitizer &MS;
6420   MemorySanitizerVisitor &MSV;
6421   SmallVector<CallInst *, 16> VAStartInstrumentationList;
6422   const unsigned VAListTagSize;
6423 
VarArgHelperBase__anonb346f5430811::VarArgHelperBase6424   VarArgHelperBase(Function &F, MemorySanitizer &MS,
6425                    MemorySanitizerVisitor &MSV, unsigned VAListTagSize)
6426       : F(F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
6427 
getShadowAddrForVAArgument__anonb346f5430811::VarArgHelperBase6428   Value *getShadowAddrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
6429     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
6430     return IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
6431   }
6432 
6433   /// Compute the shadow address for a given va_arg.
getShadowPtrForVAArgument__anonb346f5430811::VarArgHelperBase6434   Value *getShadowPtrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
6435     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
6436     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
6437     return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_s");
6438   }
6439 
6440   /// Compute the shadow address for a given va_arg.
getShadowPtrForVAArgument__anonb346f5430811::VarArgHelperBase6441   Value *getShadowPtrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset,
6442                                    unsigned ArgSize) {
6443     // Make sure we don't overflow __msan_va_arg_tls.
6444     if (ArgOffset + ArgSize > kParamTLSSize)
6445       return nullptr;
6446     return getShadowPtrForVAArgument(IRB, ArgOffset);
6447   }
6448 
6449   /// Compute the origin address for a given va_arg.
getOriginPtrForVAArgument__anonb346f5430811::VarArgHelperBase6450   Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) {
6451     Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
6452     // getOriginPtrForVAArgument() is always called after
6453     // getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
6454     // overflow.
6455     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
6456     return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_o");
6457   }
6458 
CleanUnusedTLS__anonb346f5430811::VarArgHelperBase6459   void CleanUnusedTLS(IRBuilder<> &IRB, Value *ShadowBase,
6460                       unsigned BaseOffset) {
6461     // The tails of __msan_va_arg_tls is not large enough to fit full
6462     // value shadow, but it will be copied to backup anyway. Make it
6463     // clean.
6464     if (BaseOffset >= kParamTLSSize)
6465       return;
6466     Value *TailSize =
6467         ConstantInt::getSigned(IRB.getInt32Ty(), kParamTLSSize - BaseOffset);
6468     IRB.CreateMemSet(ShadowBase, ConstantInt::getNullValue(IRB.getInt8Ty()),
6469                      TailSize, Align(8));
6470   }
6471 
unpoisonVAListTagForInst__anonb346f5430811::VarArgHelperBase6472   void unpoisonVAListTagForInst(IntrinsicInst &I) {
6473     IRBuilder<> IRB(&I);
6474     Value *VAListTag = I.getArgOperand(0);
6475     const Align Alignment = Align(8);
6476     auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
6477         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
6478     // Unpoison the whole __va_list_tag.
6479     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
6480                      VAListTagSize, Alignment, false);
6481   }
6482 
visitVAStartInst__anonb346f5430811::VarArgHelperBase6483   void visitVAStartInst(VAStartInst &I) override {
6484     if (F.getCallingConv() == CallingConv::Win64)
6485       return;
6486     VAStartInstrumentationList.push_back(&I);
6487     unpoisonVAListTagForInst(I);
6488   }
6489 
visitVACopyInst__anonb346f5430811::VarArgHelperBase6490   void visitVACopyInst(VACopyInst &I) override {
6491     if (F.getCallingConv() == CallingConv::Win64)
6492       return;
6493     unpoisonVAListTagForInst(I);
6494   }
6495 };
6496 
6497 /// AMD64-specific implementation of VarArgHelper.
6498 struct VarArgAMD64Helper : public VarArgHelperBase {
6499   // An unfortunate workaround for asymmetric lowering of va_arg stuff.
6500   // See a comment in visitCallBase for more details.
6501   static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
6502   static const unsigned AMD64FpEndOffsetSSE = 176;
6503   // If SSE is disabled, fp_offset in va_list is zero.
6504   static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
6505 
6506   unsigned AMD64FpEndOffset;
6507   AllocaInst *VAArgTLSCopy = nullptr;
6508   AllocaInst *VAArgTLSOriginCopy = nullptr;
6509   Value *VAArgOverflowSize = nullptr;
6510 
6511   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
6512 
VarArgAMD64Helper__anonb346f5430811::VarArgAMD64Helper6513   VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
6514                     MemorySanitizerVisitor &MSV)
6515       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/24) {
6516     AMD64FpEndOffset = AMD64FpEndOffsetSSE;
6517     for (const auto &Attr : F.getAttributes().getFnAttrs()) {
6518       if (Attr.isStringAttribute() &&
6519           (Attr.getKindAsString() == "target-features")) {
6520         if (Attr.getValueAsString().contains("-sse"))
6521           AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
6522         break;
6523       }
6524     }
6525   }
6526 
classifyArgument__anonb346f5430811::VarArgAMD64Helper6527   ArgKind classifyArgument(Value *arg) {
6528     // A very rough approximation of X86_64 argument classification rules.
6529     Type *T = arg->getType();
6530     if (T->isX86_FP80Ty())
6531       return AK_Memory;
6532     if (T->isFPOrFPVectorTy())
6533       return AK_FloatingPoint;
6534     if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
6535       return AK_GeneralPurpose;
6536     if (T->isPointerTy())
6537       return AK_GeneralPurpose;
6538     return AK_Memory;
6539   }
6540 
6541   // For VarArg functions, store the argument shadow in an ABI-specific format
6542   // that corresponds to va_list layout.
6543   // We do this because Clang lowers va_arg in the frontend, and this pass
6544   // only sees the low level code that deals with va_list internals.
6545   // A much easier alternative (provided that Clang emits va_arg instructions)
6546   // would have been to associate each live instance of va_list with a copy of
6547   // MSanParamTLS, and extract shadow on va_arg() call in the argument list
6548   // order.
visitCallBase__anonb346f5430811::VarArgAMD64Helper6549   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
6550     unsigned GpOffset = 0;
6551     unsigned FpOffset = AMD64GpEndOffset;
6552     unsigned OverflowOffset = AMD64FpEndOffset;
6553     const DataLayout &DL = F.getDataLayout();
6554 
6555     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
6556       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
6557       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
6558       if (IsByVal) {
6559         // ByVal arguments always go to the overflow area.
6560         // Fixed arguments passed through the overflow area will be stepped
6561         // over by va_start, so don't count them towards the offset.
6562         if (IsFixed)
6563           continue;
6564         assert(A->getType()->isPointerTy());
6565         Type *RealTy = CB.getParamByValType(ArgNo);
6566         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
6567         uint64_t AlignedSize = alignTo(ArgSize, 8);
6568         unsigned BaseOffset = OverflowOffset;
6569         Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
6570         Value *OriginBase = nullptr;
6571         if (MS.TrackOrigins)
6572           OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
6573         OverflowOffset += AlignedSize;
6574 
6575         if (OverflowOffset > kParamTLSSize) {
6576           CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
6577           continue; // We have no space to copy shadow there.
6578         }
6579 
6580         Value *ShadowPtr, *OriginPtr;
6581         std::tie(ShadowPtr, OriginPtr) =
6582             MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment,
6583                                    /*isStore*/ false);
6584         IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr,
6585                          kShadowTLSAlignment, ArgSize);
6586         if (MS.TrackOrigins)
6587           IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment, OriginPtr,
6588                            kShadowTLSAlignment, ArgSize);
6589       } else {
6590         ArgKind AK = classifyArgument(A);
6591         if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
6592           AK = AK_Memory;
6593         if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
6594           AK = AK_Memory;
6595         Value *ShadowBase, *OriginBase = nullptr;
6596         switch (AK) {
6597         case AK_GeneralPurpose:
6598           ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
6599           if (MS.TrackOrigins)
6600             OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
6601           GpOffset += 8;
6602           assert(GpOffset <= kParamTLSSize);
6603           break;
6604         case AK_FloatingPoint:
6605           ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
6606           if (MS.TrackOrigins)
6607             OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
6608           FpOffset += 16;
6609           assert(FpOffset <= kParamTLSSize);
6610           break;
6611         case AK_Memory:
6612           if (IsFixed)
6613             continue;
6614           uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
6615           uint64_t AlignedSize = alignTo(ArgSize, 8);
6616           unsigned BaseOffset = OverflowOffset;
6617           ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
6618           if (MS.TrackOrigins) {
6619             OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
6620           }
6621           OverflowOffset += AlignedSize;
6622           if (OverflowOffset > kParamTLSSize) {
6623             // We have no space to copy shadow there.
6624             CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
6625             continue;
6626           }
6627         }
6628         // Take fixed arguments into account for GpOffset and FpOffset,
6629         // but don't actually store shadows for them.
6630         // TODO(glider): don't call get*PtrForVAArgument() for them.
6631         if (IsFixed)
6632           continue;
6633         Value *Shadow = MSV.getShadow(A);
6634         IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment);
6635         if (MS.TrackOrigins) {
6636           Value *Origin = MSV.getOrigin(A);
6637           TypeSize StoreSize = DL.getTypeStoreSize(Shadow->getType());
6638           MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
6639                           std::max(kShadowTLSAlignment, kMinOriginAlignment));
6640         }
6641       }
6642     }
6643     Constant *OverflowSize =
6644         ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
6645     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
6646   }
6647 
finalizeInstrumentation__anonb346f5430811::VarArgAMD64Helper6648   void finalizeInstrumentation() override {
6649     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
6650            "finalizeInstrumentation called twice");
6651     if (!VAStartInstrumentationList.empty()) {
6652       // If there is a va_start in this function, make a backup copy of
6653       // va_arg_tls somewhere in the function entry block.
6654       IRBuilder<> IRB(MSV.FnPrologueEnd);
6655       VAArgOverflowSize =
6656           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
6657       Value *CopySize = IRB.CreateAdd(
6658           ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
6659       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
6660       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
6661       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
6662                        CopySize, kShadowTLSAlignment, false);
6663 
6664       Value *SrcSize = IRB.CreateBinaryIntrinsic(
6665           Intrinsic::umin, CopySize,
6666           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
6667       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
6668                        kShadowTLSAlignment, SrcSize);
6669       if (MS.TrackOrigins) {
6670         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
6671         VAArgTLSOriginCopy->setAlignment(kShadowTLSAlignment);
6672         IRB.CreateMemCpy(VAArgTLSOriginCopy, kShadowTLSAlignment,
6673                          MS.VAArgOriginTLS, kShadowTLSAlignment, SrcSize);
6674       }
6675     }
6676 
6677     // Instrument va_start.
6678     // Copy va_list shadow from the backup copy of the TLS contents.
6679     for (CallInst *OrigInst : VAStartInstrumentationList) {
6680       NextNodeIRBuilder IRB(OrigInst);
6681       Value *VAListTag = OrigInst->getArgOperand(0);
6682 
6683       Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
6684           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
6685                         ConstantInt::get(MS.IntptrTy, 16)),
6686           MS.PtrTy);
6687       Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
6688       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6689       const Align Alignment = Align(16);
6690       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6691           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
6692                                  Alignment, /*isStore*/ true);
6693       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6694                        AMD64FpEndOffset);
6695       if (MS.TrackOrigins)
6696         IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
6697                          Alignment, AMD64FpEndOffset);
6698       Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
6699           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
6700                         ConstantInt::get(MS.IntptrTy, 8)),
6701           MS.PtrTy);
6702       Value *OverflowArgAreaPtr =
6703           IRB.CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
6704       Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
6705       std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
6706           MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
6707                                  Alignment, /*isStore*/ true);
6708       Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
6709                                              AMD64FpEndOffset);
6710       IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
6711                        VAArgOverflowSize);
6712       if (MS.TrackOrigins) {
6713         SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
6714                                         AMD64FpEndOffset);
6715         IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
6716                          VAArgOverflowSize);
6717       }
6718     }
6719   }
6720 };
6721 
6722 /// AArch64-specific implementation of VarArgHelper.
6723 struct VarArgAArch64Helper : public VarArgHelperBase {
6724   static const unsigned kAArch64GrArgSize = 64;
6725   static const unsigned kAArch64VrArgSize = 128;
6726 
6727   static const unsigned AArch64GrBegOffset = 0;
6728   static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
6729   // Make VR space aligned to 16 bytes.
6730   static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
6731   static const unsigned AArch64VrEndOffset =
6732       AArch64VrBegOffset + kAArch64VrArgSize;
6733   static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
6734 
6735   AllocaInst *VAArgTLSCopy = nullptr;
6736   Value *VAArgOverflowSize = nullptr;
6737 
6738   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
6739 
VarArgAArch64Helper__anonb346f5430811::VarArgAArch64Helper6740   VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
6741                       MemorySanitizerVisitor &MSV)
6742       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/32) {}
6743 
6744   // A very rough approximation of aarch64 argument classification rules.
classifyArgument__anonb346f5430811::VarArgAArch64Helper6745   std::pair<ArgKind, uint64_t> classifyArgument(Type *T) {
6746     if (T->isIntOrPtrTy() && T->getPrimitiveSizeInBits() <= 64)
6747       return {AK_GeneralPurpose, 1};
6748     if (T->isFloatingPointTy() && T->getPrimitiveSizeInBits() <= 128)
6749       return {AK_FloatingPoint, 1};
6750 
6751     if (T->isArrayTy()) {
6752       auto R = classifyArgument(T->getArrayElementType());
6753       R.second *= T->getScalarType()->getArrayNumElements();
6754       return R;
6755     }
6756 
6757     if (const FixedVectorType *FV = dyn_cast<FixedVectorType>(T)) {
6758       auto R = classifyArgument(FV->getScalarType());
6759       R.second *= FV->getNumElements();
6760       return R;
6761     }
6762 
6763     LLVM_DEBUG(errs() << "Unknown vararg type: " << *T << "\n");
6764     return {AK_Memory, 0};
6765   }
6766 
6767   // The instrumentation stores the argument shadow in a non ABI-specific
6768   // format because it does not know which argument is named (since Clang,
6769   // like x86_64 case, lowers the va_args in the frontend and this pass only
6770   // sees the low level code that deals with va_list internals).
6771   // The first seven GR registers are saved in the first 56 bytes of the
6772   // va_arg tls arra, followed by the first 8 FP/SIMD registers, and then
6773   // the remaining arguments.
6774   // Using constant offset within the va_arg TLS array allows fast copy
6775   // in the finalize instrumentation.
visitCallBase__anonb346f5430811::VarArgAArch64Helper6776   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
6777     unsigned GrOffset = AArch64GrBegOffset;
6778     unsigned VrOffset = AArch64VrBegOffset;
6779     unsigned OverflowOffset = AArch64VAEndOffset;
6780 
6781     const DataLayout &DL = F.getDataLayout();
6782     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
6783       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
6784       auto [AK, RegNum] = classifyArgument(A->getType());
6785       if (AK == AK_GeneralPurpose &&
6786           (GrOffset + RegNum * 8) > AArch64GrEndOffset)
6787         AK = AK_Memory;
6788       if (AK == AK_FloatingPoint &&
6789           (VrOffset + RegNum * 16) > AArch64VrEndOffset)
6790         AK = AK_Memory;
6791       Value *Base;
6792       switch (AK) {
6793       case AK_GeneralPurpose:
6794         Base = getShadowPtrForVAArgument(IRB, GrOffset);
6795         GrOffset += 8 * RegNum;
6796         break;
6797       case AK_FloatingPoint:
6798         Base = getShadowPtrForVAArgument(IRB, VrOffset);
6799         VrOffset += 16 * RegNum;
6800         break;
6801       case AK_Memory:
6802         // Don't count fixed arguments in the overflow area - va_start will
6803         // skip right over them.
6804         if (IsFixed)
6805           continue;
6806         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
6807         uint64_t AlignedSize = alignTo(ArgSize, 8);
6808         unsigned BaseOffset = OverflowOffset;
6809         Base = getShadowPtrForVAArgument(IRB, BaseOffset);
6810         OverflowOffset += AlignedSize;
6811         if (OverflowOffset > kParamTLSSize) {
6812           // We have no space to copy shadow there.
6813           CleanUnusedTLS(IRB, Base, BaseOffset);
6814           continue;
6815         }
6816         break;
6817       }
6818       // Count Gp/Vr fixed arguments to their respective offsets, but don't
6819       // bother to actually store a shadow.
6820       if (IsFixed)
6821         continue;
6822       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
6823     }
6824     Constant *OverflowSize =
6825         ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
6826     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
6827   }
6828 
6829   // Retrieve a va_list field of 'void*' size.
getVAField64__anonb346f5430811::VarArgAArch64Helper6830   Value *getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
6831     Value *SaveAreaPtrPtr = IRB.CreateIntToPtr(
6832         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
6833                       ConstantInt::get(MS.IntptrTy, offset)),
6834         MS.PtrTy);
6835     return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
6836   }
6837 
6838   // Retrieve a va_list field of 'int' size.
getVAField32__anonb346f5430811::VarArgAArch64Helper6839   Value *getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
6840     Value *SaveAreaPtr = IRB.CreateIntToPtr(
6841         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
6842                       ConstantInt::get(MS.IntptrTy, offset)),
6843         MS.PtrTy);
6844     Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
6845     return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
6846   }
6847 
finalizeInstrumentation__anonb346f5430811::VarArgAArch64Helper6848   void finalizeInstrumentation() override {
6849     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
6850            "finalizeInstrumentation called twice");
6851     if (!VAStartInstrumentationList.empty()) {
6852       // If there is a va_start in this function, make a backup copy of
6853       // va_arg_tls somewhere in the function entry block.
6854       IRBuilder<> IRB(MSV.FnPrologueEnd);
6855       VAArgOverflowSize =
6856           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
6857       Value *CopySize = IRB.CreateAdd(
6858           ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
6859       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
6860       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
6861       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
6862                        CopySize, kShadowTLSAlignment, false);
6863 
6864       Value *SrcSize = IRB.CreateBinaryIntrinsic(
6865           Intrinsic::umin, CopySize,
6866           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
6867       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
6868                        kShadowTLSAlignment, SrcSize);
6869     }
6870 
6871     Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
6872     Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
6873 
6874     // Instrument va_start, copy va_list shadow from the backup copy of
6875     // the TLS contents.
6876     for (CallInst *OrigInst : VAStartInstrumentationList) {
6877       NextNodeIRBuilder IRB(OrigInst);
6878 
6879       Value *VAListTag = OrigInst->getArgOperand(0);
6880 
6881       // The variadic ABI for AArch64 creates two areas to save the incoming
6882       // argument registers (one for 64-bit general register xn-x7 and another
6883       // for 128-bit FP/SIMD vn-v7).
6884       // We need then to propagate the shadow arguments on both regions
6885       // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
6886       // The remaining arguments are saved on shadow for 'va::stack'.
6887       // One caveat is it requires only to propagate the non-named arguments,
6888       // however on the call site instrumentation 'all' the arguments are
6889       // saved. So to copy the shadow values from the va_arg TLS array
6890       // we need to adjust the offset for both GR and VR fields based on
6891       // the __{gr,vr}_offs value (since they are stores based on incoming
6892       // named arguments).
6893       Type *RegSaveAreaPtrTy = IRB.getPtrTy();
6894 
6895       // Read the stack pointer from the va_list.
6896       Value *StackSaveAreaPtr =
6897           IRB.CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
6898 
6899       // Read both the __gr_top and __gr_off and add them up.
6900       Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
6901       Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
6902 
6903       Value *GrRegSaveAreaPtr = IRB.CreateIntToPtr(
6904           IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
6905 
6906       // Read both the __vr_top and __vr_off and add them up.
6907       Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
6908       Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
6909 
6910       Value *VrRegSaveAreaPtr = IRB.CreateIntToPtr(
6911           IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
6912 
6913       // It does not know how many named arguments is being used and, on the
6914       // callsite all the arguments were saved.  Since __gr_off is defined as
6915       // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
6916       // argument by ignoring the bytes of shadow from named arguments.
6917       Value *GrRegSaveAreaShadowPtrOff =
6918           IRB.CreateAdd(GrArgSize, GrOffSaveArea);
6919 
6920       Value *GrRegSaveAreaShadowPtr =
6921           MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
6922                                  Align(8), /*isStore*/ true)
6923               .first;
6924 
6925       Value *GrSrcPtr =
6926           IRB.CreateInBoundsPtrAdd(VAArgTLSCopy, GrRegSaveAreaShadowPtrOff);
6927       Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
6928 
6929       IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, Align(8), GrSrcPtr, Align(8),
6930                        GrCopySize);
6931 
6932       // Again, but for FP/SIMD values.
6933       Value *VrRegSaveAreaShadowPtrOff =
6934           IRB.CreateAdd(VrArgSize, VrOffSaveArea);
6935 
6936       Value *VrRegSaveAreaShadowPtr =
6937           MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
6938                                  Align(8), /*isStore*/ true)
6939               .first;
6940 
6941       Value *VrSrcPtr = IRB.CreateInBoundsPtrAdd(
6942           IRB.CreateInBoundsPtrAdd(VAArgTLSCopy,
6943                                    IRB.getInt32(AArch64VrBegOffset)),
6944           VrRegSaveAreaShadowPtrOff);
6945       Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
6946 
6947       IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, Align(8), VrSrcPtr, Align(8),
6948                        VrCopySize);
6949 
6950       // And finally for remaining arguments.
6951       Value *StackSaveAreaShadowPtr =
6952           MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.getInt8Ty(),
6953                                  Align(16), /*isStore*/ true)
6954               .first;
6955 
6956       Value *StackSrcPtr = IRB.CreateInBoundsPtrAdd(
6957           VAArgTLSCopy, IRB.getInt32(AArch64VAEndOffset));
6958 
6959       IRB.CreateMemCpy(StackSaveAreaShadowPtr, Align(16), StackSrcPtr,
6960                        Align(16), VAArgOverflowSize);
6961     }
6962   }
6963 };
6964 
6965 /// PowerPC64-specific implementation of VarArgHelper.
6966 struct VarArgPowerPC64Helper : public VarArgHelperBase {
6967   AllocaInst *VAArgTLSCopy = nullptr;
6968   Value *VAArgSize = nullptr;
6969 
VarArgPowerPC64Helper__anonb346f5430811::VarArgPowerPC64Helper6970   VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
6971                         MemorySanitizerVisitor &MSV)
6972       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/8) {}
6973 
visitCallBase__anonb346f5430811::VarArgPowerPC64Helper6974   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
6975     // For PowerPC, we need to deal with alignment of stack arguments -
6976     // they are mostly aligned to 8 bytes, but vectors and i128 arrays
6977     // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
6978     // For that reason, we compute current offset from stack pointer (which is
6979     // always properly aligned), and offset for the first vararg, then subtract
6980     // them.
6981     unsigned VAArgBase;
6982     Triple TargetTriple(F.getParent()->getTargetTriple());
6983     // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
6984     // and 32 bytes for ABIv2.  This is usually determined by target
6985     // endianness, but in theory could be overridden by function attribute.
6986     if (TargetTriple.isPPC64ELFv2ABI())
6987       VAArgBase = 32;
6988     else
6989       VAArgBase = 48;
6990     unsigned VAArgOffset = VAArgBase;
6991     const DataLayout &DL = F.getDataLayout();
6992     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
6993       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
6994       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
6995       if (IsByVal) {
6996         assert(A->getType()->isPointerTy());
6997         Type *RealTy = CB.getParamByValType(ArgNo);
6998         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
6999         Align ArgAlign = CB.getParamAlign(ArgNo).value_or(Align(8));
7000         if (ArgAlign < 8)
7001           ArgAlign = Align(8);
7002         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
7003         if (!IsFixed) {
7004           Value *Base =
7005               getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7006           if (Base) {
7007             Value *AShadowPtr, *AOriginPtr;
7008             std::tie(AShadowPtr, AOriginPtr) =
7009                 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
7010                                        kShadowTLSAlignment, /*isStore*/ false);
7011 
7012             IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
7013                              kShadowTLSAlignment, ArgSize);
7014           }
7015         }
7016         VAArgOffset += alignTo(ArgSize, Align(8));
7017       } else {
7018         Value *Base;
7019         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
7020         Align ArgAlign = Align(8);
7021         if (A->getType()->isArrayTy()) {
7022           // Arrays are aligned to element size, except for long double
7023           // arrays, which are aligned to 8 bytes.
7024           Type *ElementTy = A->getType()->getArrayElementType();
7025           if (!ElementTy->isPPC_FP128Ty())
7026             ArgAlign = Align(DL.getTypeAllocSize(ElementTy));
7027         } else if (A->getType()->isVectorTy()) {
7028           // Vectors are naturally aligned.
7029           ArgAlign = Align(ArgSize);
7030         }
7031         if (ArgAlign < 8)
7032           ArgAlign = Align(8);
7033         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
7034         if (DL.isBigEndian()) {
7035           // Adjusting the shadow for argument with size < 8 to match the
7036           // placement of bits in big endian system
7037           if (ArgSize < 8)
7038             VAArgOffset += (8 - ArgSize);
7039         }
7040         if (!IsFixed) {
7041           Base =
7042               getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7043           if (Base)
7044             IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
7045         }
7046         VAArgOffset += ArgSize;
7047         VAArgOffset = alignTo(VAArgOffset, Align(8));
7048       }
7049       if (IsFixed)
7050         VAArgBase = VAArgOffset;
7051     }
7052 
7053     Constant *TotalVAArgSize =
7054         ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7055     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
7056     // a new class member i.e. it is the total size of all VarArgs.
7057     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7058   }
7059 
finalizeInstrumentation__anonb346f5430811::VarArgPowerPC64Helper7060   void finalizeInstrumentation() override {
7061     assert(!VAArgSize && !VAArgTLSCopy &&
7062            "finalizeInstrumentation called twice");
7063     IRBuilder<> IRB(MSV.FnPrologueEnd);
7064     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
7065     Value *CopySize = VAArgSize;
7066 
7067     if (!VAStartInstrumentationList.empty()) {
7068       // If there is a va_start in this function, make a backup copy of
7069       // va_arg_tls somewhere in the function entry block.
7070 
7071       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7072       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
7073       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
7074                        CopySize, kShadowTLSAlignment, false);
7075 
7076       Value *SrcSize = IRB.CreateBinaryIntrinsic(
7077           Intrinsic::umin, CopySize,
7078           ConstantInt::get(IRB.getInt64Ty(), kParamTLSSize));
7079       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
7080                        kShadowTLSAlignment, SrcSize);
7081     }
7082 
7083     // Instrument va_start.
7084     // Copy va_list shadow from the backup copy of the TLS contents.
7085     for (CallInst *OrigInst : VAStartInstrumentationList) {
7086       NextNodeIRBuilder IRB(OrigInst);
7087       Value *VAListTag = OrigInst->getArgOperand(0);
7088       Value *RegSaveAreaPtrPtr = IRB.CreatePtrToInt(VAListTag, MS.IntptrTy);
7089 
7090       RegSaveAreaPtrPtr = IRB.CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
7091 
7092       Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
7093       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7094       const DataLayout &DL = F.getDataLayout();
7095       unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
7096       const Align Alignment = Align(IntptrSize);
7097       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7098           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
7099                                  Alignment, /*isStore*/ true);
7100       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7101                        CopySize);
7102     }
7103   }
7104 };
7105 
7106 /// PowerPC32-specific implementation of VarArgHelper.
7107 struct VarArgPowerPC32Helper : public VarArgHelperBase {
7108   AllocaInst *VAArgTLSCopy = nullptr;
7109   Value *VAArgSize = nullptr;
7110 
VarArgPowerPC32Helper__anonb346f5430811::VarArgPowerPC32Helper7111   VarArgPowerPC32Helper(Function &F, MemorySanitizer &MS,
7112                         MemorySanitizerVisitor &MSV)
7113       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/12) {}
7114 
visitCallBase__anonb346f5430811::VarArgPowerPC32Helper7115   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
7116     unsigned VAArgBase;
7117     // Parameter save area is 8 bytes from frame pointer in PPC32
7118     VAArgBase = 8;
7119     unsigned VAArgOffset = VAArgBase;
7120     const DataLayout &DL = F.getDataLayout();
7121     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
7122     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
7123       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
7124       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
7125       if (IsByVal) {
7126         assert(A->getType()->isPointerTy());
7127         Type *RealTy = CB.getParamByValType(ArgNo);
7128         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
7129         Align ArgAlign = CB.getParamAlign(ArgNo).value_or(Align(IntptrSize));
7130         if (ArgAlign < IntptrSize)
7131           ArgAlign = Align(IntptrSize);
7132         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
7133         if (!IsFixed) {
7134           Value *Base =
7135               getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7136           if (Base) {
7137             Value *AShadowPtr, *AOriginPtr;
7138             std::tie(AShadowPtr, AOriginPtr) =
7139                 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
7140                                        kShadowTLSAlignment, /*isStore*/ false);
7141 
7142             IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
7143                              kShadowTLSAlignment, ArgSize);
7144           }
7145         }
7146         VAArgOffset += alignTo(ArgSize, Align(IntptrSize));
7147       } else {
7148         Value *Base;
7149         Type *ArgTy = A->getType();
7150 
7151         // On PPC 32 floating point variable arguments are stored in separate
7152         // area: fp_save_area = reg_save_area + 4*8. We do not copy shaodow for
7153         // them as they will be found when checking call arguments.
7154         if (!ArgTy->isFloatingPointTy()) {
7155           uint64_t ArgSize = DL.getTypeAllocSize(ArgTy);
7156           Align ArgAlign = Align(IntptrSize);
7157           if (ArgTy->isArrayTy()) {
7158             // Arrays are aligned to element size, except for long double
7159             // arrays, which are aligned to 8 bytes.
7160             Type *ElementTy = ArgTy->getArrayElementType();
7161             if (!ElementTy->isPPC_FP128Ty())
7162               ArgAlign = Align(DL.getTypeAllocSize(ElementTy));
7163           } else if (ArgTy->isVectorTy()) {
7164             // Vectors are naturally aligned.
7165             ArgAlign = Align(ArgSize);
7166           }
7167           if (ArgAlign < IntptrSize)
7168             ArgAlign = Align(IntptrSize);
7169           VAArgOffset = alignTo(VAArgOffset, ArgAlign);
7170           if (DL.isBigEndian()) {
7171             // Adjusting the shadow for argument with size < IntptrSize to match
7172             // the placement of bits in big endian system
7173             if (ArgSize < IntptrSize)
7174               VAArgOffset += (IntptrSize - ArgSize);
7175           }
7176           if (!IsFixed) {
7177             Base = getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase,
7178                                              ArgSize);
7179             if (Base)
7180               IRB.CreateAlignedStore(MSV.getShadow(A), Base,
7181                                      kShadowTLSAlignment);
7182           }
7183           VAArgOffset += ArgSize;
7184           VAArgOffset = alignTo(VAArgOffset, Align(IntptrSize));
7185         }
7186       }
7187     }
7188 
7189     Constant *TotalVAArgSize =
7190         ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7191     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
7192     // a new class member i.e. it is the total size of all VarArgs.
7193     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7194   }
7195 
finalizeInstrumentation__anonb346f5430811::VarArgPowerPC32Helper7196   void finalizeInstrumentation() override {
7197     assert(!VAArgSize && !VAArgTLSCopy &&
7198            "finalizeInstrumentation called twice");
7199     IRBuilder<> IRB(MSV.FnPrologueEnd);
7200     VAArgSize = IRB.CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
7201     Value *CopySize = VAArgSize;
7202 
7203     if (!VAStartInstrumentationList.empty()) {
7204       // If there is a va_start in this function, make a backup copy of
7205       // va_arg_tls somewhere in the function entry block.
7206 
7207       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7208       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
7209       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
7210                        CopySize, kShadowTLSAlignment, false);
7211 
7212       Value *SrcSize = IRB.CreateBinaryIntrinsic(
7213           Intrinsic::umin, CopySize,
7214           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
7215       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
7216                        kShadowTLSAlignment, SrcSize);
7217     }
7218 
7219     // Instrument va_start.
7220     // Copy va_list shadow from the backup copy of the TLS contents.
7221     for (CallInst *OrigInst : VAStartInstrumentationList) {
7222       NextNodeIRBuilder IRB(OrigInst);
7223       Value *VAListTag = OrigInst->getArgOperand(0);
7224       Value *RegSaveAreaPtrPtr = IRB.CreatePtrToInt(VAListTag, MS.IntptrTy);
7225       Value *RegSaveAreaSize = CopySize;
7226 
7227       // In PPC32 va_list_tag is a struct
7228       RegSaveAreaPtrPtr =
7229           IRB.CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
7230 
7231       // On PPC 32 reg_save_area can only hold 32 bytes of data
7232       RegSaveAreaSize = IRB.CreateBinaryIntrinsic(
7233           Intrinsic::umin, CopySize, ConstantInt::get(MS.IntptrTy, 32));
7234 
7235       RegSaveAreaPtrPtr = IRB.CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
7236       Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
7237 
7238       const DataLayout &DL = F.getDataLayout();
7239       unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
7240       const Align Alignment = Align(IntptrSize);
7241 
7242       { // Copy reg save area
7243         Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7244         std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7245             MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
7246                                    Alignment, /*isStore*/ true);
7247         IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy,
7248                          Alignment, RegSaveAreaSize);
7249 
7250         RegSaveAreaShadowPtr =
7251             IRB.CreatePtrToInt(RegSaveAreaShadowPtr, MS.IntptrTy);
7252         Value *FPSaveArea = IRB.CreateAdd(RegSaveAreaShadowPtr,
7253                                           ConstantInt::get(MS.IntptrTy, 32));
7254         FPSaveArea = IRB.CreateIntToPtr(FPSaveArea, MS.PtrTy);
7255         // We fill fp shadow with zeroes as uninitialized fp args should have
7256         // been found during call base check
7257         IRB.CreateMemSet(FPSaveArea, ConstantInt::getNullValue(IRB.getInt8Ty()),
7258                          ConstantInt::get(MS.IntptrTy, 32), Alignment);
7259       }
7260 
7261       { // Copy overflow area
7262         // RegSaveAreaSize is min(CopySize, 32) -> no overflow can occur
7263         Value *OverflowAreaSize = IRB.CreateSub(CopySize, RegSaveAreaSize);
7264 
7265         Value *OverflowAreaPtrPtr = IRB.CreatePtrToInt(VAListTag, MS.IntptrTy);
7266         OverflowAreaPtrPtr =
7267             IRB.CreateAdd(OverflowAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 4));
7268         OverflowAreaPtrPtr = IRB.CreateIntToPtr(OverflowAreaPtrPtr, MS.PtrTy);
7269 
7270         Value *OverflowAreaPtr = IRB.CreateLoad(MS.PtrTy, OverflowAreaPtrPtr);
7271 
7272         Value *OverflowAreaShadowPtr, *OverflowAreaOriginPtr;
7273         std::tie(OverflowAreaShadowPtr, OverflowAreaOriginPtr) =
7274             MSV.getShadowOriginPtr(OverflowAreaPtr, IRB, IRB.getInt8Ty(),
7275                                    Alignment, /*isStore*/ true);
7276 
7277         Value *OverflowVAArgTLSCopyPtr =
7278             IRB.CreatePtrToInt(VAArgTLSCopy, MS.IntptrTy);
7279         OverflowVAArgTLSCopyPtr =
7280             IRB.CreateAdd(OverflowVAArgTLSCopyPtr, RegSaveAreaSize);
7281 
7282         OverflowVAArgTLSCopyPtr =
7283             IRB.CreateIntToPtr(OverflowVAArgTLSCopyPtr, MS.PtrTy);
7284         IRB.CreateMemCpy(OverflowAreaShadowPtr, Alignment,
7285                          OverflowVAArgTLSCopyPtr, Alignment, OverflowAreaSize);
7286       }
7287     }
7288   }
7289 };
7290 
7291 /// SystemZ-specific implementation of VarArgHelper.
7292 struct VarArgSystemZHelper : public VarArgHelperBase {
7293   static const unsigned SystemZGpOffset = 16;
7294   static const unsigned SystemZGpEndOffset = 56;
7295   static const unsigned SystemZFpOffset = 128;
7296   static const unsigned SystemZFpEndOffset = 160;
7297   static const unsigned SystemZMaxVrArgs = 8;
7298   static const unsigned SystemZRegSaveAreaSize = 160;
7299   static const unsigned SystemZOverflowOffset = 160;
7300   static const unsigned SystemZVAListTagSize = 32;
7301   static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
7302   static const unsigned SystemZRegSaveAreaPtrOffset = 24;
7303 
7304   bool IsSoftFloatABI;
7305   AllocaInst *VAArgTLSCopy = nullptr;
7306   AllocaInst *VAArgTLSOriginCopy = nullptr;
7307   Value *VAArgOverflowSize = nullptr;
7308 
7309   enum class ArgKind {
7310     GeneralPurpose,
7311     FloatingPoint,
7312     Vector,
7313     Memory,
7314     Indirect,
7315   };
7316 
7317   enum class ShadowExtension { None, Zero, Sign };
7318 
VarArgSystemZHelper__anonb346f5430811::VarArgSystemZHelper7319   VarArgSystemZHelper(Function &F, MemorySanitizer &MS,
7320                       MemorySanitizerVisitor &MSV)
7321       : VarArgHelperBase(F, MS, MSV, SystemZVAListTagSize),
7322         IsSoftFloatABI(F.getFnAttribute("use-soft-float").getValueAsBool()) {}
7323 
classifyArgument__anonb346f5430811::VarArgSystemZHelper7324   ArgKind classifyArgument(Type *T) {
7325     // T is a SystemZABIInfo::classifyArgumentType() output, and there are
7326     // only a few possibilities of what it can be. In particular, enums, single
7327     // element structs and large types have already been taken care of.
7328 
7329     // Some i128 and fp128 arguments are converted to pointers only in the
7330     // back end.
7331     if (T->isIntegerTy(128) || T->isFP128Ty())
7332       return ArgKind::Indirect;
7333     if (T->isFloatingPointTy())
7334       return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
7335     if (T->isIntegerTy() || T->isPointerTy())
7336       return ArgKind::GeneralPurpose;
7337     if (T->isVectorTy())
7338       return ArgKind::Vector;
7339     return ArgKind::Memory;
7340   }
7341 
getShadowExtension__anonb346f5430811::VarArgSystemZHelper7342   ShadowExtension getShadowExtension(const CallBase &CB, unsigned ArgNo) {
7343     // ABI says: "One of the simple integer types no more than 64 bits wide.
7344     // ... If such an argument is shorter than 64 bits, replace it by a full
7345     // 64-bit integer representing the same number, using sign or zero
7346     // extension". Shadow for an integer argument has the same type as the
7347     // argument itself, so it can be sign or zero extended as well.
7348     bool ZExt = CB.paramHasAttr(ArgNo, Attribute::ZExt);
7349     bool SExt = CB.paramHasAttr(ArgNo, Attribute::SExt);
7350     if (ZExt) {
7351       assert(!SExt);
7352       return ShadowExtension::Zero;
7353     }
7354     if (SExt) {
7355       assert(!ZExt);
7356       return ShadowExtension::Sign;
7357     }
7358     return ShadowExtension::None;
7359   }
7360 
visitCallBase__anonb346f5430811::VarArgSystemZHelper7361   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
7362     unsigned GpOffset = SystemZGpOffset;
7363     unsigned FpOffset = SystemZFpOffset;
7364     unsigned VrIndex = 0;
7365     unsigned OverflowOffset = SystemZOverflowOffset;
7366     const DataLayout &DL = F.getDataLayout();
7367     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
7368       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
7369       // SystemZABIInfo does not produce ByVal parameters.
7370       assert(!CB.paramHasAttr(ArgNo, Attribute::ByVal));
7371       Type *T = A->getType();
7372       ArgKind AK = classifyArgument(T);
7373       if (AK == ArgKind::Indirect) {
7374         T = MS.PtrTy;
7375         AK = ArgKind::GeneralPurpose;
7376       }
7377       if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
7378         AK = ArgKind::Memory;
7379       if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
7380         AK = ArgKind::Memory;
7381       if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
7382         AK = ArgKind::Memory;
7383       Value *ShadowBase = nullptr;
7384       Value *OriginBase = nullptr;
7385       ShadowExtension SE = ShadowExtension::None;
7386       switch (AK) {
7387       case ArgKind::GeneralPurpose: {
7388         // Always keep track of GpOffset, but store shadow only for varargs.
7389         uint64_t ArgSize = 8;
7390         if (GpOffset + ArgSize <= kParamTLSSize) {
7391           if (!IsFixed) {
7392             SE = getShadowExtension(CB, ArgNo);
7393             uint64_t GapSize = 0;
7394             if (SE == ShadowExtension::None) {
7395               uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
7396               assert(ArgAllocSize <= ArgSize);
7397               GapSize = ArgSize - ArgAllocSize;
7398             }
7399             ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
7400             if (MS.TrackOrigins)
7401               OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
7402           }
7403           GpOffset += ArgSize;
7404         } else {
7405           GpOffset = kParamTLSSize;
7406         }
7407         break;
7408       }
7409       case ArgKind::FloatingPoint: {
7410         // Always keep track of FpOffset, but store shadow only for varargs.
7411         uint64_t ArgSize = 8;
7412         if (FpOffset + ArgSize <= kParamTLSSize) {
7413           if (!IsFixed) {
7414             // PoP says: "A short floating-point datum requires only the
7415             // left-most 32 bit positions of a floating-point register".
7416             // Therefore, in contrast to AK_GeneralPurpose and AK_Memory,
7417             // don't extend shadow and don't mind the gap.
7418             ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
7419             if (MS.TrackOrigins)
7420               OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
7421           }
7422           FpOffset += ArgSize;
7423         } else {
7424           FpOffset = kParamTLSSize;
7425         }
7426         break;
7427       }
7428       case ArgKind::Vector: {
7429         // Keep track of VrIndex. No need to store shadow, since vector varargs
7430         // go through AK_Memory.
7431         assert(IsFixed);
7432         VrIndex++;
7433         break;
7434       }
7435       case ArgKind::Memory: {
7436         // Keep track of OverflowOffset and store shadow only for varargs.
7437         // Ignore fixed args, since we need to copy only the vararg portion of
7438         // the overflow area shadow.
7439         if (!IsFixed) {
7440           uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
7441           uint64_t ArgSize = alignTo(ArgAllocSize, 8);
7442           if (OverflowOffset + ArgSize <= kParamTLSSize) {
7443             SE = getShadowExtension(CB, ArgNo);
7444             uint64_t GapSize =
7445                 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
7446             ShadowBase =
7447                 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
7448             if (MS.TrackOrigins)
7449               OriginBase =
7450                   getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
7451             OverflowOffset += ArgSize;
7452           } else {
7453             OverflowOffset = kParamTLSSize;
7454           }
7455         }
7456         break;
7457       }
7458       case ArgKind::Indirect:
7459         llvm_unreachable("Indirect must be converted to GeneralPurpose");
7460       }
7461       if (ShadowBase == nullptr)
7462         continue;
7463       Value *Shadow = MSV.getShadow(A);
7464       if (SE != ShadowExtension::None)
7465         Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.getInt64Ty(),
7466                                       /*Signed*/ SE == ShadowExtension::Sign);
7467       ShadowBase = IRB.CreateIntToPtr(ShadowBase, MS.PtrTy, "_msarg_va_s");
7468       IRB.CreateStore(Shadow, ShadowBase);
7469       if (MS.TrackOrigins) {
7470         Value *Origin = MSV.getOrigin(A);
7471         TypeSize StoreSize = DL.getTypeStoreSize(Shadow->getType());
7472         MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
7473                         kMinOriginAlignment);
7474       }
7475     }
7476     Constant *OverflowSize = ConstantInt::get(
7477         IRB.getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
7478     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7479   }
7480 
copyRegSaveArea__anonb346f5430811::VarArgSystemZHelper7481   void copyRegSaveArea(IRBuilder<> &IRB, Value *VAListTag) {
7482     Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
7483         IRB.CreateAdd(
7484             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
7485             ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
7486         MS.PtrTy);
7487     Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
7488     Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7489     const Align Alignment = Align(8);
7490     std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7491         MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment,
7492                                /*isStore*/ true);
7493     // TODO(iii): copy only fragments filled by visitCallBase()
7494     // TODO(iii): support packed-stack && !use-soft-float
7495     // For use-soft-float functions, it is enough to copy just the GPRs.
7496     unsigned RegSaveAreaSize =
7497         IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
7498     IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7499                      RegSaveAreaSize);
7500     if (MS.TrackOrigins)
7501       IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
7502                        Alignment, RegSaveAreaSize);
7503   }
7504 
7505   // FIXME: This implementation limits OverflowOffset to kParamTLSSize, so we
7506   // don't know real overflow size and can't clear shadow beyond kParamTLSSize.
copyOverflowArea__anonb346f5430811::VarArgSystemZHelper7507   void copyOverflowArea(IRBuilder<> &IRB, Value *VAListTag) {
7508     Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
7509         IRB.CreateAdd(
7510             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
7511             ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
7512         MS.PtrTy);
7513     Value *OverflowArgAreaPtr = IRB.CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
7514     Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
7515     const Align Alignment = Align(8);
7516     std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
7517         MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
7518                                Alignment, /*isStore*/ true);
7519     Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
7520                                            SystemZOverflowOffset);
7521     IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
7522                      VAArgOverflowSize);
7523     if (MS.TrackOrigins) {
7524       SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
7525                                       SystemZOverflowOffset);
7526       IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
7527                        VAArgOverflowSize);
7528     }
7529   }
7530 
finalizeInstrumentation__anonb346f5430811::VarArgSystemZHelper7531   void finalizeInstrumentation() override {
7532     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7533            "finalizeInstrumentation called twice");
7534     if (!VAStartInstrumentationList.empty()) {
7535       // If there is a va_start in this function, make a backup copy of
7536       // va_arg_tls somewhere in the function entry block.
7537       IRBuilder<> IRB(MSV.FnPrologueEnd);
7538       VAArgOverflowSize =
7539           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
7540       Value *CopySize =
7541           IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
7542                         VAArgOverflowSize);
7543       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7544       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
7545       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
7546                        CopySize, kShadowTLSAlignment, false);
7547 
7548       Value *SrcSize = IRB.CreateBinaryIntrinsic(
7549           Intrinsic::umin, CopySize,
7550           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
7551       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
7552                        kShadowTLSAlignment, SrcSize);
7553       if (MS.TrackOrigins) {
7554         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7555         VAArgTLSOriginCopy->setAlignment(kShadowTLSAlignment);
7556         IRB.CreateMemCpy(VAArgTLSOriginCopy, kShadowTLSAlignment,
7557                          MS.VAArgOriginTLS, kShadowTLSAlignment, SrcSize);
7558       }
7559     }
7560 
7561     // Instrument va_start.
7562     // Copy va_list shadow from the backup copy of the TLS contents.
7563     for (CallInst *OrigInst : VAStartInstrumentationList) {
7564       NextNodeIRBuilder IRB(OrigInst);
7565       Value *VAListTag = OrigInst->getArgOperand(0);
7566       copyRegSaveArea(IRB, VAListTag);
7567       copyOverflowArea(IRB, VAListTag);
7568     }
7569   }
7570 };
7571 
7572 /// i386-specific implementation of VarArgHelper.
7573 struct VarArgI386Helper : public VarArgHelperBase {
7574   AllocaInst *VAArgTLSCopy = nullptr;
7575   Value *VAArgSize = nullptr;
7576 
VarArgI386Helper__anonb346f5430811::VarArgI386Helper7577   VarArgI386Helper(Function &F, MemorySanitizer &MS,
7578                    MemorySanitizerVisitor &MSV)
7579       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/4) {}
7580 
visitCallBase__anonb346f5430811::VarArgI386Helper7581   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
7582     const DataLayout &DL = F.getDataLayout();
7583     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
7584     unsigned VAArgOffset = 0;
7585     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
7586       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
7587       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
7588       if (IsByVal) {
7589         assert(A->getType()->isPointerTy());
7590         Type *RealTy = CB.getParamByValType(ArgNo);
7591         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
7592         Align ArgAlign = CB.getParamAlign(ArgNo).value_or(Align(IntptrSize));
7593         if (ArgAlign < IntptrSize)
7594           ArgAlign = Align(IntptrSize);
7595         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
7596         if (!IsFixed) {
7597           Value *Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
7598           if (Base) {
7599             Value *AShadowPtr, *AOriginPtr;
7600             std::tie(AShadowPtr, AOriginPtr) =
7601                 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
7602                                        kShadowTLSAlignment, /*isStore*/ false);
7603 
7604             IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
7605                              kShadowTLSAlignment, ArgSize);
7606           }
7607           VAArgOffset += alignTo(ArgSize, Align(IntptrSize));
7608         }
7609       } else {
7610         Value *Base;
7611         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
7612         Align ArgAlign = Align(IntptrSize);
7613         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
7614         if (DL.isBigEndian()) {
7615           // Adjusting the shadow for argument with size < IntptrSize to match
7616           // the placement of bits in big endian system
7617           if (ArgSize < IntptrSize)
7618             VAArgOffset += (IntptrSize - ArgSize);
7619         }
7620         if (!IsFixed) {
7621           Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
7622           if (Base)
7623             IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
7624           VAArgOffset += ArgSize;
7625           VAArgOffset = alignTo(VAArgOffset, Align(IntptrSize));
7626         }
7627       }
7628     }
7629 
7630     Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
7631     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
7632     // a new class member i.e. it is the total size of all VarArgs.
7633     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7634   }
7635 
finalizeInstrumentation__anonb346f5430811::VarArgI386Helper7636   void finalizeInstrumentation() override {
7637     assert(!VAArgSize && !VAArgTLSCopy &&
7638            "finalizeInstrumentation called twice");
7639     IRBuilder<> IRB(MSV.FnPrologueEnd);
7640     VAArgSize = IRB.CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
7641     Value *CopySize = VAArgSize;
7642 
7643     if (!VAStartInstrumentationList.empty()) {
7644       // If there is a va_start in this function, make a backup copy of
7645       // va_arg_tls somewhere in the function entry block.
7646       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7647       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
7648       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
7649                        CopySize, kShadowTLSAlignment, false);
7650 
7651       Value *SrcSize = IRB.CreateBinaryIntrinsic(
7652           Intrinsic::umin, CopySize,
7653           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
7654       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
7655                        kShadowTLSAlignment, SrcSize);
7656     }
7657 
7658     // Instrument va_start.
7659     // Copy va_list shadow from the backup copy of the TLS contents.
7660     for (CallInst *OrigInst : VAStartInstrumentationList) {
7661       NextNodeIRBuilder IRB(OrigInst);
7662       Value *VAListTag = OrigInst->getArgOperand(0);
7663       Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
7664       Value *RegSaveAreaPtrPtr =
7665           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
7666                              PointerType::get(*MS.C, 0));
7667       Value *RegSaveAreaPtr =
7668           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
7669       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7670       const DataLayout &DL = F.getDataLayout();
7671       unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
7672       const Align Alignment = Align(IntptrSize);
7673       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7674           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
7675                                  Alignment, /*isStore*/ true);
7676       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7677                        CopySize);
7678     }
7679   }
7680 };
7681 
7682 /// Implementation of VarArgHelper that is used for ARM32, MIPS, RISCV,
7683 /// LoongArch64.
7684 struct VarArgGenericHelper : public VarArgHelperBase {
7685   AllocaInst *VAArgTLSCopy = nullptr;
7686   Value *VAArgSize = nullptr;
7687 
VarArgGenericHelper__anonb346f5430811::VarArgGenericHelper7688   VarArgGenericHelper(Function &F, MemorySanitizer &MS,
7689                       MemorySanitizerVisitor &MSV, const unsigned VAListTagSize)
7690       : VarArgHelperBase(F, MS, MSV, VAListTagSize) {}
7691 
visitCallBase__anonb346f5430811::VarArgGenericHelper7692   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
7693     unsigned VAArgOffset = 0;
7694     const DataLayout &DL = F.getDataLayout();
7695     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
7696     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
7697       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
7698       if (IsFixed)
7699         continue;
7700       uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
7701       if (DL.isBigEndian()) {
7702         // Adjusting the shadow for argument with size < IntptrSize to match the
7703         // placement of bits in big endian system
7704         if (ArgSize < IntptrSize)
7705           VAArgOffset += (IntptrSize - ArgSize);
7706       }
7707       Value *Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
7708       VAArgOffset += ArgSize;
7709       VAArgOffset = alignTo(VAArgOffset, IntptrSize);
7710       if (!Base)
7711         continue;
7712       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
7713     }
7714 
7715     Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
7716     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
7717     // a new class member i.e. it is the total size of all VarArgs.
7718     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7719   }
7720 
finalizeInstrumentation__anonb346f5430811::VarArgGenericHelper7721   void finalizeInstrumentation() override {
7722     assert(!VAArgSize && !VAArgTLSCopy &&
7723            "finalizeInstrumentation called twice");
7724     IRBuilder<> IRB(MSV.FnPrologueEnd);
7725     VAArgSize = IRB.CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
7726     Value *CopySize = VAArgSize;
7727 
7728     if (!VAStartInstrumentationList.empty()) {
7729       // If there is a va_start in this function, make a backup copy of
7730       // va_arg_tls somewhere in the function entry block.
7731       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7732       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
7733       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
7734                        CopySize, kShadowTLSAlignment, false);
7735 
7736       Value *SrcSize = IRB.CreateBinaryIntrinsic(
7737           Intrinsic::umin, CopySize,
7738           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
7739       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
7740                        kShadowTLSAlignment, SrcSize);
7741     }
7742 
7743     // Instrument va_start.
7744     // Copy va_list shadow from the backup copy of the TLS contents.
7745     for (CallInst *OrigInst : VAStartInstrumentationList) {
7746       NextNodeIRBuilder IRB(OrigInst);
7747       Value *VAListTag = OrigInst->getArgOperand(0);
7748       Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
7749       Value *RegSaveAreaPtrPtr =
7750           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
7751                              PointerType::get(*MS.C, 0));
7752       Value *RegSaveAreaPtr =
7753           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
7754       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7755       const DataLayout &DL = F.getDataLayout();
7756       unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
7757       const Align Alignment = Align(IntptrSize);
7758       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7759           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
7760                                  Alignment, /*isStore*/ true);
7761       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7762                        CopySize);
7763     }
7764   }
7765 };
7766 
7767 // ARM32, Loongarch64, MIPS and RISCV share the same calling conventions
7768 // regarding VAArgs.
7769 using VarArgARM32Helper = VarArgGenericHelper;
7770 using VarArgRISCVHelper = VarArgGenericHelper;
7771 using VarArgMIPSHelper = VarArgGenericHelper;
7772 using VarArgLoongArch64Helper = VarArgGenericHelper;
7773 
7774 /// A no-op implementation of VarArgHelper.
7775 struct VarArgNoOpHelper : public VarArgHelper {
VarArgNoOpHelper__anonb346f5430811::VarArgNoOpHelper7776   VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
7777                    MemorySanitizerVisitor &MSV) {}
7778 
visitCallBase__anonb346f5430811::VarArgNoOpHelper7779   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {}
7780 
visitVAStartInst__anonb346f5430811::VarArgNoOpHelper7781   void visitVAStartInst(VAStartInst &I) override {}
7782 
visitVACopyInst__anonb346f5430811::VarArgNoOpHelper7783   void visitVACopyInst(VACopyInst &I) override {}
7784 
finalizeInstrumentation__anonb346f5430811::VarArgNoOpHelper7785   void finalizeInstrumentation() override {}
7786 };
7787 
7788 } // end anonymous namespace
7789 
CreateVarArgHelper(Function & Func,MemorySanitizer & Msan,MemorySanitizerVisitor & Visitor)7790 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
7791                                         MemorySanitizerVisitor &Visitor) {
7792   // VarArg handling is only implemented on AMD64. False positives are possible
7793   // on other platforms.
7794   Triple TargetTriple(Func.getParent()->getTargetTriple());
7795 
7796   if (TargetTriple.getArch() == Triple::x86)
7797     return new VarArgI386Helper(Func, Msan, Visitor);
7798 
7799   if (TargetTriple.getArch() == Triple::x86_64)
7800     return new VarArgAMD64Helper(Func, Msan, Visitor);
7801 
7802   if (TargetTriple.isARM())
7803     return new VarArgARM32Helper(Func, Msan, Visitor, /*VAListTagSize=*/4);
7804 
7805   if (TargetTriple.isAArch64())
7806     return new VarArgAArch64Helper(Func, Msan, Visitor);
7807 
7808   if (TargetTriple.isSystemZ())
7809     return new VarArgSystemZHelper(Func, Msan, Visitor);
7810 
7811   // On PowerPC32 VAListTag is a struct
7812   // {char, char, i16 padding, char *, char *}
7813   if (TargetTriple.isPPC32())
7814     return new VarArgPowerPC32Helper(Func, Msan, Visitor);
7815 
7816   if (TargetTriple.isPPC64())
7817     return new VarArgPowerPC64Helper(Func, Msan, Visitor);
7818 
7819   if (TargetTriple.isRISCV32())
7820     return new VarArgRISCVHelper(Func, Msan, Visitor, /*VAListTagSize=*/4);
7821 
7822   if (TargetTriple.isRISCV64())
7823     return new VarArgRISCVHelper(Func, Msan, Visitor, /*VAListTagSize=*/8);
7824 
7825   if (TargetTriple.isMIPS32())
7826     return new VarArgMIPSHelper(Func, Msan, Visitor, /*VAListTagSize=*/4);
7827 
7828   if (TargetTriple.isMIPS64())
7829     return new VarArgMIPSHelper(Func, Msan, Visitor, /*VAListTagSize=*/8);
7830 
7831   if (TargetTriple.isLoongArch64())
7832     return new VarArgLoongArch64Helper(Func, Msan, Visitor,
7833                                        /*VAListTagSize=*/8);
7834 
7835   return new VarArgNoOpHelper(Func, Msan, Visitor);
7836 }
7837 
sanitizeFunction(Function & F,TargetLibraryInfo & TLI)7838 bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) {
7839   if (!CompileKernel && F.getName() == kMsanModuleCtorName)
7840     return false;
7841 
7842   if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
7843     return false;
7844 
7845   MemorySanitizerVisitor Visitor(F, *this, TLI);
7846 
7847   // Clear out memory attributes.
7848   AttributeMask B;
7849   B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
7850   F.removeFnAttrs(B);
7851 
7852   return Visitor.runOnFunction();
7853 }
7854