xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===- AddressSanitizer.cpp - memory error detector -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address basic correctness
10 // checker.
11 // Details of the algorithm:
12 //  https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13 //
14 // FIXME: This sanitizer does not yet handle scalable vectors
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/DepthFirstIterator.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/Analysis/GlobalsModRef.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/StackSafetyAnalysis.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/BinaryFormat/MachO.h"
34 #include "llvm/Demangle/Demangle.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Comdat.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DIBuilder.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DebugInfoMetadata.h"
44 #include "llvm/IR/DebugLoc.h"
45 #include "llvm/IR/DerivedTypes.h"
46 #include "llvm/IR/EHPersonalities.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/GlobalAlias.h"
49 #include "llvm/IR/GlobalValue.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/IRBuilder.h"
52 #include "llvm/IR/InlineAsm.h"
53 #include "llvm/IR/InstVisitor.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/Intrinsics.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/MDBuilder.h"
61 #include "llvm/IR/Metadata.h"
62 #include "llvm/IR/Module.h"
63 #include "llvm/IR/Type.h"
64 #include "llvm/IR/Use.h"
65 #include "llvm/IR/Value.h"
66 #include "llvm/MC/MCSectionMachO.h"
67 #include "llvm/Support/Casting.h"
68 #include "llvm/Support/CommandLine.h"
69 #include "llvm/Support/Debug.h"
70 #include "llvm/Support/ErrorHandling.h"
71 #include "llvm/Support/MathExtras.h"
72 #include "llvm/Support/raw_ostream.h"
73 #include "llvm/TargetParser/Triple.h"
74 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
75 #include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
76 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
77 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
78 #include "llvm/Transforms/Utils/Instrumentation.h"
79 #include "llvm/Transforms/Utils/Local.h"
80 #include "llvm/Transforms/Utils/ModuleUtils.h"
81 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
82 #include <algorithm>
83 #include <cassert>
84 #include <cstddef>
85 #include <cstdint>
86 #include <iomanip>
87 #include <limits>
88 #include <sstream>
89 #include <string>
90 #include <tuple>
91 
92 using namespace llvm;
93 
94 #define DEBUG_TYPE "asan"
95 
96 static const uint64_t kDefaultShadowScale = 3;
97 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
98 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
99 static const uint64_t kDynamicShadowSentinel =
100     std::numeric_limits<uint64_t>::max();
101 static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF;  // < 2G.
102 static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL;
103 static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
104 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
105 static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
106 static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
107 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
108 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
109 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
110 static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
111 static const uint64_t kRISCV64_ShadowOffset64 = kDynamicShadowSentinel;
112 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
113 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
114 static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
115 static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
116 static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
117 static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
118 static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
119 static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
120 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
121 static const uint64_t kWebAssemblyShadowOffset = 0;
122 
123 // The shadow memory space is dynamically allocated.
124 static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel;
125 
126 static const size_t kMinStackMallocSize = 1 << 6;   // 64B
127 static const size_t kMaxStackMallocSize = 1 << 16;  // 64K
128 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
129 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
130 
131 const char kAsanModuleCtorName[] = "asan.module_ctor";
132 const char kAsanModuleDtorName[] = "asan.module_dtor";
133 static const uint64_t kAsanCtorAndDtorPriority = 1;
134 // On Emscripten, the system needs more than one priorities for constructors.
135 static const uint64_t kAsanEmscriptenCtorAndDtorPriority = 50;
136 const char kAsanReportErrorTemplate[] = "__asan_report_";
137 const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
138 const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
139 const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
140 const char kAsanUnregisterImageGlobalsName[] =
141     "__asan_unregister_image_globals";
142 const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
143 const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
144 const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
145 const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
146 const char kAsanInitName[] = "__asan_init";
147 const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
148 const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
149 const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
150 const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
151 static const int kMaxAsanStackMallocSizeClass = 10;
152 const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
153 const char kAsanStackMallocAlwaysNameTemplate[] =
154     "__asan_stack_malloc_always_";
155 const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
156 const char kAsanGenPrefix[] = "___asan_gen_";
157 const char kODRGenPrefix[] = "__odr_asan_gen_";
158 const char kSanCovGenPrefix[] = "__sancov_gen_";
159 const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
160 const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
161 const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
162 
163 // ASan version script has __asan_* wildcard. Triple underscore prevents a
164 // linker (gold) warning about attempting to export a local symbol.
165 const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
166 
167 const char kAsanOptionDetectUseAfterReturn[] =
168     "__asan_option_detect_stack_use_after_return";
169 
170 const char kAsanShadowMemoryDynamicAddress[] =
171     "__asan_shadow_memory_dynamic_address";
172 
173 const char kAsanAllocaPoison[] = "__asan_alloca_poison";
174 const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
175 
176 const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
177 const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
178 const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
179 const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
180 
181 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
182 static const size_t kNumberOfAccessSizes = 5;
183 
184 static const uint64_t kAllocaRzSize = 32;
185 
186 // ASanAccessInfo implementation constants.
187 constexpr size_t kCompileKernelShift = 0;
188 constexpr size_t kCompileKernelMask = 0x1;
189 constexpr size_t kAccessSizeIndexShift = 1;
190 constexpr size_t kAccessSizeIndexMask = 0xf;
191 constexpr size_t kIsWriteShift = 5;
192 constexpr size_t kIsWriteMask = 0x1;
193 
194 // Command-line flags.
195 
196 static cl::opt<bool> ClEnableKasan(
197     "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
198     cl::Hidden, cl::init(false));
199 
200 static cl::opt<bool> ClRecover(
201     "asan-recover",
202     cl::desc("Enable recovery mode (continue-after-error)."),
203     cl::Hidden, cl::init(false));
204 
205 static cl::opt<bool> ClInsertVersionCheck(
206     "asan-guard-against-version-mismatch",
207     cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
208     cl::init(true));
209 
210 // This flag may need to be replaced with -f[no-]asan-reads.
211 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
212                                        cl::desc("instrument read instructions"),
213                                        cl::Hidden, cl::init(true));
214 
215 static cl::opt<bool> ClInstrumentWrites(
216     "asan-instrument-writes", cl::desc("instrument write instructions"),
217     cl::Hidden, cl::init(true));
218 
219 static cl::opt<bool>
220     ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
221                      cl::Hidden, cl::desc("Use Stack Safety analysis results"),
222                      cl::Optional);
223 
224 static cl::opt<bool> ClInstrumentAtomics(
225     "asan-instrument-atomics",
226     cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
227     cl::init(true));
228 
229 static cl::opt<bool>
230     ClInstrumentByval("asan-instrument-byval",
231                       cl::desc("instrument byval call arguments"), cl::Hidden,
232                       cl::init(true));
233 
234 static cl::opt<bool> ClAlwaysSlowPath(
235     "asan-always-slow-path",
236     cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
237     cl::init(false));
238 
239 static cl::opt<bool> ClForceDynamicShadow(
240     "asan-force-dynamic-shadow",
241     cl::desc("Load shadow address into a local variable for each function"),
242     cl::Hidden, cl::init(false));
243 
244 static cl::opt<bool>
245     ClWithIfunc("asan-with-ifunc",
246                 cl::desc("Access dynamic shadow through an ifunc global on "
247                          "platforms that support this"),
248                 cl::Hidden, cl::init(true));
249 
250 static cl::opt<bool> ClWithIfuncSuppressRemat(
251     "asan-with-ifunc-suppress-remat",
252     cl::desc("Suppress rematerialization of dynamic shadow address by passing "
253              "it through inline asm in prologue."),
254     cl::Hidden, cl::init(true));
255 
256 // This flag limits the number of instructions to be instrumented
257 // in any given BB. Normally, this should be set to unlimited (INT_MAX),
258 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
259 // set it to 10000.
260 static cl::opt<int> ClMaxInsnsToInstrumentPerBB(
261     "asan-max-ins-per-bb", cl::init(10000),
262     cl::desc("maximal number of instructions to instrument in any given BB"),
263     cl::Hidden);
264 
265 // This flag may need to be replaced with -f[no]asan-stack.
266 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
267                              cl::Hidden, cl::init(true));
268 static cl::opt<uint32_t> ClMaxInlinePoisoningSize(
269     "asan-max-inline-poisoning-size",
270     cl::desc(
271         "Inline shadow poisoning for blocks up to the given size in bytes."),
272     cl::Hidden, cl::init(64));
273 
274 static cl::opt<AsanDetectStackUseAfterReturnMode> ClUseAfterReturn(
275     "asan-use-after-return",
276     cl::desc("Sets the mode of detection for stack-use-after-return."),
277     cl::values(
278         clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never",
279                    "Never detect stack use after return."),
280         clEnumValN(
281             AsanDetectStackUseAfterReturnMode::Runtime, "runtime",
282             "Detect stack use after return if "
283             "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
284         clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always",
285                    "Always detect stack use after return.")),
286     cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime));
287 
288 static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
289                                         cl::desc("Create redzones for byval "
290                                                  "arguments (extra copy "
291                                                  "required)"), cl::Hidden,
292                                         cl::init(true));
293 
294 static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
295                                      cl::desc("Check stack-use-after-scope"),
296                                      cl::Hidden, cl::init(false));
297 
298 // This flag may need to be replaced with -f[no]asan-globals.
299 static cl::opt<bool> ClGlobals("asan-globals",
300                                cl::desc("Handle global objects"), cl::Hidden,
301                                cl::init(true));
302 
303 static cl::opt<bool> ClInitializers("asan-initialization-order",
304                                     cl::desc("Handle C++ initializer order"),
305                                     cl::Hidden, cl::init(true));
306 
307 static cl::opt<bool> ClInvalidPointerPairs(
308     "asan-detect-invalid-pointer-pair",
309     cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
310     cl::init(false));
311 
312 static cl::opt<bool> ClInvalidPointerCmp(
313     "asan-detect-invalid-pointer-cmp",
314     cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
315     cl::init(false));
316 
317 static cl::opt<bool> ClInvalidPointerSub(
318     "asan-detect-invalid-pointer-sub",
319     cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
320     cl::init(false));
321 
322 static cl::opt<unsigned> ClRealignStack(
323     "asan-realign-stack",
324     cl::desc("Realign stack to the value of this flag (power of two)"),
325     cl::Hidden, cl::init(32));
326 
327 static cl::opt<int> ClInstrumentationWithCallsThreshold(
328     "asan-instrumentation-with-call-threshold",
329     cl::desc("If the function being instrumented contains more than "
330              "this number of memory accesses, use callbacks instead of "
331              "inline checks (-1 means never use callbacks)."),
332     cl::Hidden, cl::init(7000));
333 
334 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
335     "asan-memory-access-callback-prefix",
336     cl::desc("Prefix for memory access callbacks"), cl::Hidden,
337     cl::init("__asan_"));
338 
339 static cl::opt<bool> ClKasanMemIntrinCallbackPrefix(
340     "asan-kernel-mem-intrinsic-prefix",
341     cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
342     cl::init(false));
343 
344 static cl::opt<bool>
345     ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
346                                cl::desc("instrument dynamic allocas"),
347                                cl::Hidden, cl::init(true));
348 
349 static cl::opt<bool> ClSkipPromotableAllocas(
350     "asan-skip-promotable-allocas",
351     cl::desc("Do not instrument promotable allocas"), cl::Hidden,
352     cl::init(true));
353 
354 static cl::opt<AsanCtorKind> ClConstructorKind(
355     "asan-constructor-kind",
356     cl::desc("Sets the ASan constructor kind"),
357     cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
358                clEnumValN(AsanCtorKind::Global, "global",
359                           "Use global constructors")),
360     cl::init(AsanCtorKind::Global), cl::Hidden);
361 // These flags allow to change the shadow mapping.
362 // The shadow mapping looks like
363 //    Shadow = (Mem >> scale) + offset
364 
365 static cl::opt<int> ClMappingScale("asan-mapping-scale",
366                                    cl::desc("scale of asan shadow mapping"),
367                                    cl::Hidden, cl::init(0));
368 
369 static cl::opt<uint64_t>
370     ClMappingOffset("asan-mapping-offset",
371                     cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
372                     cl::Hidden, cl::init(0));
373 
374 // Optimization flags. Not user visible, used mostly for testing
375 // and benchmarking the tool.
376 
377 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
378                            cl::Hidden, cl::init(true));
379 
380 static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
381                                          cl::desc("Optimize callbacks"),
382                                          cl::Hidden, cl::init(false));
383 
384 static cl::opt<bool> ClOptSameTemp(
385     "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
386     cl::Hidden, cl::init(true));
387 
388 static cl::opt<bool> ClOptGlobals("asan-opt-globals",
389                                   cl::desc("Don't instrument scalar globals"),
390                                   cl::Hidden, cl::init(true));
391 
392 static cl::opt<bool> ClOptStack(
393     "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
394     cl::Hidden, cl::init(false));
395 
396 static cl::opt<bool> ClDynamicAllocaStack(
397     "asan-stack-dynamic-alloca",
398     cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
399     cl::init(true));
400 
401 static cl::opt<uint32_t> ClForceExperiment(
402     "asan-force-experiment",
403     cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
404     cl::init(0));
405 
406 static cl::opt<bool>
407     ClUsePrivateAlias("asan-use-private-alias",
408                       cl::desc("Use private aliases for global variables"),
409                       cl::Hidden, cl::init(true));
410 
411 static cl::opt<bool>
412     ClUseOdrIndicator("asan-use-odr-indicator",
413                       cl::desc("Use odr indicators to improve ODR reporting"),
414                       cl::Hidden, cl::init(true));
415 
416 static cl::opt<bool>
417     ClUseGlobalsGC("asan-globals-live-support",
418                    cl::desc("Use linker features to support dead "
419                             "code stripping of globals"),
420                    cl::Hidden, cl::init(true));
421 
422 // This is on by default even though there is a bug in gold:
423 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
424 static cl::opt<bool>
425     ClWithComdat("asan-with-comdat",
426                  cl::desc("Place ASan constructors in comdat sections"),
427                  cl::Hidden, cl::init(true));
428 
429 static cl::opt<AsanDtorKind> ClOverrideDestructorKind(
430     "asan-destructor-kind",
431     cl::desc("Sets the ASan destructor kind. The default is to use the value "
432              "provided to the pass constructor"),
433     cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
434                clEnumValN(AsanDtorKind::Global, "global",
435                           "Use global destructors")),
436     cl::init(AsanDtorKind::Invalid), cl::Hidden);
437 
438 // Debug flags.
439 
440 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
441                             cl::init(0));
442 
443 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
444                                  cl::Hidden, cl::init(0));
445 
446 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden,
447                                         cl::desc("Debug func"));
448 
449 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
450                                cl::Hidden, cl::init(-1));
451 
452 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
453                                cl::Hidden, cl::init(-1));
454 
455 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
456 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
457 STATISTIC(NumOptimizedAccessesToGlobalVar,
458           "Number of optimized accesses to global vars");
459 STATISTIC(NumOptimizedAccessesToStackVar,
460           "Number of optimized accesses to stack vars");
461 
462 namespace {
463 
464 /// This struct defines the shadow mapping using the rule:
465 ///   shadow = (mem >> Scale) ADD-or-OR Offset.
466 /// If InGlobal is true, then
467 ///   extern char __asan_shadow[];
468 ///   shadow = (mem >> Scale) + &__asan_shadow
469 struct ShadowMapping {
470   int Scale;
471   uint64_t Offset;
472   bool OrShadowOffset;
473   bool InGlobal;
474 };
475 
476 } // end anonymous namespace
477 
getShadowMapping(const Triple & TargetTriple,int LongSize,bool IsKasan)478 static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
479                                       bool IsKasan) {
480   bool IsAndroid = TargetTriple.isAndroid();
481   bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
482                TargetTriple.isDriverKit();
483   bool IsMacOS = TargetTriple.isMacOSX();
484   bool IsFreeBSD = TargetTriple.isOSFreeBSD();
485   bool IsNetBSD = TargetTriple.isOSNetBSD();
486   bool IsPS = TargetTriple.isPS();
487   bool IsLinux = TargetTriple.isOSLinux();
488   bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
489                  TargetTriple.getArch() == Triple::ppc64le;
490   bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
491   bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
492   bool IsMIPSN32ABI = TargetTriple.isABIN32();
493   bool IsMIPS32 = TargetTriple.isMIPS32();
494   bool IsMIPS64 = TargetTriple.isMIPS64();
495   bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
496   bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
497                    TargetTriple.getArch() == Triple::aarch64_be;
498   bool IsLoongArch64 = TargetTriple.isLoongArch64();
499   bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
500   bool IsWindows = TargetTriple.isOSWindows();
501   bool IsFuchsia = TargetTriple.isOSFuchsia();
502   bool IsAMDGPU = TargetTriple.isAMDGPU();
503   bool IsHaiku = TargetTriple.isOSHaiku();
504   bool IsWasm = TargetTriple.isWasm();
505 
506   ShadowMapping Mapping;
507 
508   Mapping.Scale = kDefaultShadowScale;
509   if (ClMappingScale.getNumOccurrences() > 0) {
510     Mapping.Scale = ClMappingScale;
511   }
512 
513   if (LongSize == 32) {
514     if (IsAndroid)
515       Mapping.Offset = kDynamicShadowSentinel;
516     else if (IsMIPSN32ABI)
517       Mapping.Offset = kMIPS_ShadowOffsetN32;
518     else if (IsMIPS32)
519       Mapping.Offset = kMIPS32_ShadowOffset32;
520     else if (IsFreeBSD)
521       Mapping.Offset = kFreeBSD_ShadowOffset32;
522     else if (IsNetBSD)
523       Mapping.Offset = kNetBSD_ShadowOffset32;
524     else if (IsIOS)
525       Mapping.Offset = kDynamicShadowSentinel;
526     else if (IsWindows)
527       Mapping.Offset = kWindowsShadowOffset32;
528     else if (IsWasm)
529       Mapping.Offset = kWebAssemblyShadowOffset;
530     else
531       Mapping.Offset = kDefaultShadowOffset32;
532   } else {  // LongSize == 64
533     // Fuchsia is always PIE, which means that the beginning of the address
534     // space is always available.
535     if (IsFuchsia)
536       Mapping.Offset = 0;
537     else if (IsPPC64)
538       Mapping.Offset = kPPC64_ShadowOffset64;
539     else if (IsSystemZ)
540       Mapping.Offset = kSystemZ_ShadowOffset64;
541     else if (IsFreeBSD && IsAArch64)
542         Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
543     else if (IsFreeBSD && !IsMIPS64) {
544       if (IsKasan)
545         Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
546       else
547         Mapping.Offset = kFreeBSD_ShadowOffset64;
548     } else if (IsNetBSD) {
549       if (IsKasan)
550         Mapping.Offset = kNetBSDKasan_ShadowOffset64;
551       else
552         Mapping.Offset = kNetBSD_ShadowOffset64;
553     } else if (IsPS)
554       Mapping.Offset = kPS_ShadowOffset64;
555     else if (IsLinux && IsX86_64) {
556       if (IsKasan)
557         Mapping.Offset = kLinuxKasan_ShadowOffset64;
558       else
559         Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
560                           (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
561     } else if (IsWindows && IsX86_64) {
562       Mapping.Offset = kWindowsShadowOffset64;
563     } else if (IsMIPS64)
564       Mapping.Offset = kMIPS64_ShadowOffset64;
565     else if (IsIOS)
566       Mapping.Offset = kDynamicShadowSentinel;
567     else if (IsMacOS && IsAArch64)
568       Mapping.Offset = kDynamicShadowSentinel;
569     else if (IsAArch64)
570       Mapping.Offset = kAArch64_ShadowOffset64;
571     else if (IsLoongArch64)
572       Mapping.Offset = kLoongArch64_ShadowOffset64;
573     else if (IsRISCV64)
574       Mapping.Offset = kRISCV64_ShadowOffset64;
575     else if (IsAMDGPU)
576       Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
577                         (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
578     else if (IsHaiku && IsX86_64)
579       Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
580                         (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
581     else
582       Mapping.Offset = kDefaultShadowOffset64;
583   }
584 
585   if (ClForceDynamicShadow) {
586     Mapping.Offset = kDynamicShadowSentinel;
587   }
588 
589   if (ClMappingOffset.getNumOccurrences() > 0) {
590     Mapping.Offset = ClMappingOffset;
591   }
592 
593   // OR-ing shadow offset if more efficient (at least on x86) if the offset
594   // is a power of two, but on ppc64 and loongarch64 we have to use add since
595   // the shadow offset is not necessarily 1/8-th of the address space.  On
596   // SystemZ, we could OR the constant in a single instruction, but it's more
597   // efficient to load it once and use indexed addressing.
598   Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
599                            !IsRISCV64 && !IsLoongArch64 &&
600                            !(Mapping.Offset & (Mapping.Offset - 1)) &&
601                            Mapping.Offset != kDynamicShadowSentinel;
602   bool IsAndroidWithIfuncSupport =
603       IsAndroid && !TargetTriple.isAndroidVersionLT(21);
604   Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
605 
606   return Mapping;
607 }
608 
609 namespace llvm {
getAddressSanitizerParams(const Triple & TargetTriple,int LongSize,bool IsKasan,uint64_t * ShadowBase,int * MappingScale,bool * OrShadowOffset)610 void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
611                                bool IsKasan, uint64_t *ShadowBase,
612                                int *MappingScale, bool *OrShadowOffset) {
613   auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
614   *ShadowBase = Mapping.Offset;
615   *MappingScale = Mapping.Scale;
616   *OrShadowOffset = Mapping.OrShadowOffset;
617 }
618 
removeASanIncompatibleFnAttributes(Function & F,bool ReadsArgMem)619 void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem) {
620   // Sanitizer checks read from shadow, which invalidates memory(argmem: *).
621   //
622   // This is not only true for sanitized functions, because AttrInfer can
623   // infer those attributes on libc functions, which is not true if those
624   // are instrumented (Android) or intercepted.
625   //
626   // We might want to model ASan shadow memory more opaquely to get rid of
627   // this problem altogether, by hiding the shadow memory write in an
628   // intrinsic, essentially like in the AArch64StackTagging pass. But that's
629   // for another day.
630 
631   // The API is weird. `onlyReadsMemory` actually means "does not write", and
632   // `onlyWritesMemory` actually means "does not read". So we reconstruct
633   // "accesses memory" && "does not read" <=> "writes".
634   bool Changed = false;
635   if (!F.doesNotAccessMemory()) {
636     bool WritesMemory = !F.onlyReadsMemory();
637     bool ReadsMemory = !F.onlyWritesMemory();
638     if ((WritesMemory && !ReadsMemory) || F.onlyAccessesArgMemory()) {
639       F.removeFnAttr(Attribute::Memory);
640       Changed = true;
641     }
642   }
643   if (ReadsArgMem) {
644     for (Argument &A : F.args()) {
645       if (A.hasAttribute(Attribute::WriteOnly)) {
646         A.removeAttr(Attribute::WriteOnly);
647         Changed = true;
648       }
649     }
650   }
651   if (Changed) {
652     // nobuiltin makes sure later passes don't restore assumptions about
653     // the function.
654     F.addFnAttr(Attribute::NoBuiltin);
655   }
656 }
657 
ASanAccessInfo(int32_t Packed)658 ASanAccessInfo::ASanAccessInfo(int32_t Packed)
659     : Packed(Packed),
660       AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask),
661       IsWrite((Packed >> kIsWriteShift) & kIsWriteMask),
662       CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {}
663 
ASanAccessInfo(bool IsWrite,bool CompileKernel,uint8_t AccessSizeIndex)664 ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel,
665                                uint8_t AccessSizeIndex)
666     : Packed((IsWrite << kIsWriteShift) +
667              (CompileKernel << kCompileKernelShift) +
668              (AccessSizeIndex << kAccessSizeIndexShift)),
669       AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
670       CompileKernel(CompileKernel) {}
671 
672 } // namespace llvm
673 
getRedzoneSizeForScale(int MappingScale)674 static uint64_t getRedzoneSizeForScale(int MappingScale) {
675   // Redzone used for stack and globals is at least 32 bytes.
676   // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
677   return std::max(32U, 1U << MappingScale);
678 }
679 
GetCtorAndDtorPriority(Triple & TargetTriple)680 static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) {
681   if (TargetTriple.isOSEmscripten()) {
682     return kAsanEmscriptenCtorAndDtorPriority;
683   } else {
684     return kAsanCtorAndDtorPriority;
685   }
686 }
687 
genName(StringRef suffix)688 static Twine genName(StringRef suffix) {
689   return Twine(kAsanGenPrefix) + suffix;
690 }
691 
692 namespace {
693 /// Helper RAII class to post-process inserted asan runtime calls during a
694 /// pass on a single Function. Upon end of scope, detects and applies the
695 /// required funclet OpBundle.
696 class RuntimeCallInserter {
697   Function *OwnerFn = nullptr;
698   bool TrackInsertedCalls = false;
699   SmallVector<CallInst *> InsertedCalls;
700 
701 public:
RuntimeCallInserter(Function & Fn)702   RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
703     if (Fn.hasPersonalityFn()) {
704       auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
705       if (isScopedEHPersonality(Personality))
706         TrackInsertedCalls = true;
707     }
708   }
709 
~RuntimeCallInserter()710   ~RuntimeCallInserter() {
711     if (InsertedCalls.empty())
712       return;
713     assert(TrackInsertedCalls && "Calls were wrongly tracked");
714 
715     DenseMap<BasicBlock *, ColorVector> BlockColors = colorEHFunclets(*OwnerFn);
716     for (CallInst *CI : InsertedCalls) {
717       BasicBlock *BB = CI->getParent();
718       assert(BB && "Instruction doesn't belong to a BasicBlock");
719       assert(BB->getParent() == OwnerFn &&
720              "Instruction doesn't belong to the expected Function!");
721 
722       ColorVector &Colors = BlockColors[BB];
723       // funclet opbundles are only valid in monochromatic BBs.
724       // Note that unreachable BBs are seen as colorless by colorEHFunclets()
725       // and will be DCE'ed later.
726       if (Colors.empty())
727         continue;
728       if (Colors.size() != 1) {
729         OwnerFn->getContext().emitError(
730             "Instruction's BasicBlock is not monochromatic");
731         continue;
732       }
733 
734       BasicBlock *Color = Colors.front();
735       BasicBlock::iterator EHPadIt = Color->getFirstNonPHIIt();
736 
737       if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
738         // Replace CI with a clone with an added funclet OperandBundle
739         OperandBundleDef OB("funclet", &*EHPadIt);
740         auto *NewCall = CallBase::addOperandBundle(CI, LLVMContext::OB_funclet,
741                                                    OB, CI->getIterator());
742         NewCall->copyMetadata(*CI);
743         CI->replaceAllUsesWith(NewCall);
744         CI->eraseFromParent();
745       }
746     }
747   }
748 
createRuntimeCall(IRBuilder<> & IRB,FunctionCallee Callee,ArrayRef<Value * > Args={},const Twine & Name="")749   CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
750                               ArrayRef<Value *> Args = {},
751                               const Twine &Name = "") {
752     assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
753 
754     CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
755     if (TrackInsertedCalls)
756       InsertedCalls.push_back(Inst);
757     return Inst;
758   }
759 };
760 
761 /// AddressSanitizer: instrument the code in module to find memory bugs.
762 struct AddressSanitizer {
AddressSanitizer__anon4b8f1cd00211::AddressSanitizer763   AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
764                    int InstrumentationWithCallsThreshold,
765                    uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
766                    bool Recover = false, bool UseAfterScope = false,
767                    AsanDetectStackUseAfterReturnMode UseAfterReturn =
768                        AsanDetectStackUseAfterReturnMode::Runtime)
769       : M(M),
770         CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
771                                                             : CompileKernel),
772         Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
773         UseAfterScope(UseAfterScope || ClUseAfterScope),
774         UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
775                                                             : UseAfterReturn),
776         SSGI(SSGI),
777         InstrumentationWithCallsThreshold(
778             ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
779                 ? ClInstrumentationWithCallsThreshold
780                 : InstrumentationWithCallsThreshold),
781         MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
782                                    ? ClMaxInlinePoisoningSize
783                                    : MaxInlinePoisoningSize) {
784     C = &(M.getContext());
785     DL = &M.getDataLayout();
786     LongSize = M.getDataLayout().getPointerSizeInBits();
787     IntptrTy = Type::getIntNTy(*C, LongSize);
788     PtrTy = PointerType::getUnqual(*C);
789     Int32Ty = Type::getInt32Ty(*C);
790     TargetTriple = M.getTargetTriple();
791 
792     Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
793 
794     assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
795   }
796 
getAllocaSizeInBytes__anon4b8f1cd00211::AddressSanitizer797   TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
798     return *AI.getAllocationSize(AI.getDataLayout());
799   }
800 
801   /// Check if we want (and can) handle this alloca.
802   bool isInterestingAlloca(const AllocaInst &AI);
803 
804   bool ignoreAccess(Instruction *Inst, Value *Ptr);
805   void getInterestingMemoryOperands(
806       Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
807 
808   void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
809                      InterestingMemoryOperand &O, bool UseCalls,
810                      const DataLayout &DL, RuntimeCallInserter &RTCI);
811   void instrumentPointerComparisonOrSubtraction(Instruction *I,
812                                                 RuntimeCallInserter &RTCI);
813   void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
814                          Value *Addr, MaybeAlign Alignment,
815                          uint32_t TypeStoreSize, bool IsWrite,
816                          Value *SizeArgument, bool UseCalls, uint32_t Exp,
817                          RuntimeCallInserter &RTCI);
818   Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
819                                        Instruction *InsertBefore, Value *Addr,
820                                        uint32_t TypeStoreSize, bool IsWrite,
821                                        Value *SizeArgument);
822   Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
823                                     bool Recover);
824   void instrumentUnusualSizeOrAlignment(Instruction *I,
825                                         Instruction *InsertBefore, Value *Addr,
826                                         TypeSize TypeStoreSize, bool IsWrite,
827                                         Value *SizeArgument, bool UseCalls,
828                                         uint32_t Exp,
829                                         RuntimeCallInserter &RTCI);
830   void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
831                                    Type *IntptrTy, Value *Mask, Value *EVL,
832                                    Value *Stride, Instruction *I, Value *Addr,
833                                    MaybeAlign Alignment, unsigned Granularity,
834                                    Type *OpType, bool IsWrite,
835                                    Value *SizeArgument, bool UseCalls,
836                                    uint32_t Exp, RuntimeCallInserter &RTCI);
837   Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
838                            Value *ShadowValue, uint32_t TypeStoreSize);
839   Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
840                                  bool IsWrite, size_t AccessSizeIndex,
841                                  Value *SizeArgument, uint32_t Exp,
842                                  RuntimeCallInserter &RTCI);
843   void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
844   Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
845   bool suppressInstrumentationSiteForDebug(int &Instrumented);
846   bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
847   bool maybeInsertAsanInitAtFunctionEntry(Function &F);
848   bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
849   void markEscapedLocalAllocas(Function &F);
850 
851 private:
852   friend struct FunctionStackPoisoner;
853 
854   void initializeCallbacks(const TargetLibraryInfo *TLI);
855 
856   bool LooksLikeCodeInBug11395(Instruction *I);
857   bool GlobalIsLinkerInitialized(GlobalVariable *G);
858   bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
859                     TypeSize TypeStoreSize) const;
860 
861   /// Helper to cleanup per-function state.
862   struct FunctionStateRAII {
863     AddressSanitizer *Pass;
864 
FunctionStateRAII__anon4b8f1cd00211::AddressSanitizer::FunctionStateRAII865     FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
866       assert(Pass->ProcessedAllocas.empty() &&
867              "last pass forgot to clear cache");
868       assert(!Pass->LocalDynamicShadow);
869     }
870 
~FunctionStateRAII__anon4b8f1cd00211::AddressSanitizer::FunctionStateRAII871     ~FunctionStateRAII() {
872       Pass->LocalDynamicShadow = nullptr;
873       Pass->ProcessedAllocas.clear();
874     }
875   };
876 
877   Module &M;
878   LLVMContext *C;
879   const DataLayout *DL;
880   Triple TargetTriple;
881   int LongSize;
882   bool CompileKernel;
883   bool Recover;
884   bool UseAfterScope;
885   AsanDetectStackUseAfterReturnMode UseAfterReturn;
886   Type *IntptrTy;
887   Type *Int32Ty;
888   PointerType *PtrTy;
889   ShadowMapping Mapping;
890   FunctionCallee AsanHandleNoReturnFunc;
891   FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
892   Constant *AsanShadowGlobal;
893 
894   // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
895   FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
896   FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
897 
898   // These arrays is indexed by AccessIsWrite and Experiment.
899   FunctionCallee AsanErrorCallbackSized[2][2];
900   FunctionCallee AsanMemoryAccessCallbackSized[2][2];
901 
902   FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
903   Value *LocalDynamicShadow = nullptr;
904   const StackSafetyGlobalInfo *SSGI;
905   DenseMap<const AllocaInst *, bool> ProcessedAllocas;
906 
907   FunctionCallee AMDGPUAddressShared;
908   FunctionCallee AMDGPUAddressPrivate;
909   int InstrumentationWithCallsThreshold;
910   uint32_t MaxInlinePoisoningSize;
911 };
912 
913 class ModuleAddressSanitizer {
914 public:
ModuleAddressSanitizer(Module & M,bool InsertVersionCheck,bool CompileKernel=false,bool Recover=false,bool UseGlobalsGC=true,bool UseOdrIndicator=true,AsanDtorKind DestructorKind=AsanDtorKind::Global,AsanCtorKind ConstructorKind=AsanCtorKind::Global)915   ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
916                          bool CompileKernel = false, bool Recover = false,
917                          bool UseGlobalsGC = true, bool UseOdrIndicator = true,
918                          AsanDtorKind DestructorKind = AsanDtorKind::Global,
919                          AsanCtorKind ConstructorKind = AsanCtorKind::Global)
920       : M(M),
921         CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
922                                                             : CompileKernel),
923         InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
924                                ? ClInsertVersionCheck
925                                : InsertVersionCheck),
926         Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
927         UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
928         // Enable aliases as they should have no downside with ODR indicators.
929         UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
930                             ? ClUsePrivateAlias
931                             : UseOdrIndicator),
932         UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
933                             ? ClUseOdrIndicator
934                             : UseOdrIndicator),
935         // Not a typo: ClWithComdat is almost completely pointless without
936         // ClUseGlobalsGC (because then it only works on modules without
937         // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
938         // and both suffer from gold PR19002 for which UseGlobalsGC constructor
939         // argument is designed as workaround. Therefore, disable both
940         // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
941         // do globals-gc.
942         UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
943         DestructorKind(DestructorKind),
944         ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
945                             ? ClConstructorKind
946                             : ConstructorKind) {
947     C = &(M.getContext());
948     int LongSize = M.getDataLayout().getPointerSizeInBits();
949     IntptrTy = Type::getIntNTy(*C, LongSize);
950     PtrTy = PointerType::getUnqual(*C);
951     TargetTriple = M.getTargetTriple();
952     Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
953 
954     if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
955       this->DestructorKind = ClOverrideDestructorKind;
956     assert(this->DestructorKind != AsanDtorKind::Invalid);
957   }
958 
959   bool instrumentModule();
960 
961 private:
962   void initializeCallbacks();
963 
964   void instrumentGlobals(IRBuilder<> &IRB, bool *CtorComdat);
965   void InstrumentGlobalsCOFF(IRBuilder<> &IRB,
966                              ArrayRef<GlobalVariable *> ExtendedGlobals,
967                              ArrayRef<Constant *> MetadataInitializers);
968   void instrumentGlobalsELF(IRBuilder<> &IRB,
969                             ArrayRef<GlobalVariable *> ExtendedGlobals,
970                             ArrayRef<Constant *> MetadataInitializers,
971                             const std::string &UniqueModuleId);
972   void InstrumentGlobalsMachO(IRBuilder<> &IRB,
973                               ArrayRef<GlobalVariable *> ExtendedGlobals,
974                               ArrayRef<Constant *> MetadataInitializers);
975   void
976   InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB,
977                                      ArrayRef<GlobalVariable *> ExtendedGlobals,
978                                      ArrayRef<Constant *> MetadataInitializers);
979 
980   GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
981                                        StringRef OriginalName);
982   void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
983                                   StringRef InternalSuffix);
984   Instruction *CreateAsanModuleDtor();
985 
986   const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
987   bool shouldInstrumentGlobal(GlobalVariable *G) const;
988   bool ShouldUseMachOGlobalsSection() const;
989   StringRef getGlobalMetadataSection() const;
990   void poisonOneInitializer(Function &GlobalInit);
991   void createInitializerPoisonCalls();
getMinRedzoneSizeForGlobal() const992   uint64_t getMinRedzoneSizeForGlobal() const {
993     return getRedzoneSizeForScale(Mapping.Scale);
994   }
995   uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
996   int GetAsanVersion() const;
997   GlobalVariable *getOrCreateModuleName();
998 
999   Module &M;
1000   bool CompileKernel;
1001   bool InsertVersionCheck;
1002   bool Recover;
1003   bool UseGlobalsGC;
1004   bool UsePrivateAlias;
1005   bool UseOdrIndicator;
1006   bool UseCtorComdat;
1007   AsanDtorKind DestructorKind;
1008   AsanCtorKind ConstructorKind;
1009   Type *IntptrTy;
1010   PointerType *PtrTy;
1011   LLVMContext *C;
1012   Triple TargetTriple;
1013   ShadowMapping Mapping;
1014   FunctionCallee AsanPoisonGlobals;
1015   FunctionCallee AsanUnpoisonGlobals;
1016   FunctionCallee AsanRegisterGlobals;
1017   FunctionCallee AsanUnregisterGlobals;
1018   FunctionCallee AsanRegisterImageGlobals;
1019   FunctionCallee AsanUnregisterImageGlobals;
1020   FunctionCallee AsanRegisterElfGlobals;
1021   FunctionCallee AsanUnregisterElfGlobals;
1022 
1023   Function *AsanCtorFunction = nullptr;
1024   Function *AsanDtorFunction = nullptr;
1025   GlobalVariable *ModuleName = nullptr;
1026 };
1027 
1028 // Stack poisoning does not play well with exception handling.
1029 // When an exception is thrown, we essentially bypass the code
1030 // that unpoisones the stack. This is why the run-time library has
1031 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
1032 // stack in the interceptor. This however does not work inside the
1033 // actual function which catches the exception. Most likely because the
1034 // compiler hoists the load of the shadow value somewhere too high.
1035 // This causes asan to report a non-existing bug on 453.povray.
1036 // It sounds like an LLVM bug.
1037 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
1038   Function &F;
1039   AddressSanitizer &ASan;
1040   RuntimeCallInserter &RTCI;
1041   DIBuilder DIB;
1042   LLVMContext *C;
1043   Type *IntptrTy;
1044   Type *IntptrPtrTy;
1045   ShadowMapping Mapping;
1046 
1047   SmallVector<AllocaInst *, 16> AllocaVec;
1048   SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
1049   SmallVector<Instruction *, 8> RetVec;
1050 
1051   FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
1052       AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
1053   FunctionCallee AsanSetShadowFunc[0x100] = {};
1054   FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1055   FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1056 
1057   // Stores a place and arguments of poisoning/unpoisoning call for alloca.
1058   struct AllocaPoisonCall {
1059     IntrinsicInst *InsBefore;
1060     AllocaInst *AI;
1061     uint64_t Size;
1062     bool DoPoison;
1063   };
1064   SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
1065   SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
1066   bool HasUntracedLifetimeIntrinsic = false;
1067 
1068   SmallVector<AllocaInst *, 1> DynamicAllocaVec;
1069   SmallVector<IntrinsicInst *, 1> StackRestoreVec;
1070   AllocaInst *DynamicAllocaLayout = nullptr;
1071   IntrinsicInst *LocalEscapeCall = nullptr;
1072 
1073   bool HasInlineAsm = false;
1074   bool HasReturnsTwiceCall = false;
1075   bool PoisonStack;
1076 
FunctionStackPoisoner__anon4b8f1cd00211::FunctionStackPoisoner1077   FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1078                         RuntimeCallInserter &RTCI)
1079       : F(F), ASan(ASan), RTCI(RTCI),
1080         DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
1081         IntptrTy(ASan.IntptrTy),
1082         IntptrPtrTy(PointerType::get(IntptrTy->getContext(), 0)),
1083         Mapping(ASan.Mapping),
1084         PoisonStack(ClStack && !F.getParent()->getTargetTriple().isAMDGPU()) {}
1085 
runOnFunction__anon4b8f1cd00211::FunctionStackPoisoner1086   bool runOnFunction() {
1087     if (!PoisonStack)
1088       return false;
1089 
1090     if (ClRedzoneByvalArgs)
1091       copyArgsPassedByValToAllocas();
1092 
1093     // Collect alloca, ret, lifetime instructions etc.
1094     for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1095 
1096     if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1097 
1098     initializeCallbacks(*F.getParent());
1099 
1100     if (HasUntracedLifetimeIntrinsic) {
1101       // If there are lifetime intrinsics which couldn't be traced back to an
1102       // alloca, we may not know exactly when a variable enters scope, and
1103       // therefore should "fail safe" by not poisoning them.
1104       StaticAllocaPoisonCallVec.clear();
1105       DynamicAllocaPoisonCallVec.clear();
1106     }
1107 
1108     processDynamicAllocas();
1109     processStaticAllocas();
1110 
1111     if (ClDebugStack) {
1112       LLVM_DEBUG(dbgs() << F);
1113     }
1114     return true;
1115   }
1116 
1117   // Arguments marked with the "byval" attribute are implicitly copied without
1118   // using an alloca instruction.  To produce redzones for those arguments, we
1119   // copy them a second time into memory allocated with an alloca instruction.
1120   void copyArgsPassedByValToAllocas();
1121 
1122   // Finds all Alloca instructions and puts
1123   // poisoned red zones around all of them.
1124   // Then unpoison everything back before the function returns.
1125   void processStaticAllocas();
1126   void processDynamicAllocas();
1127 
1128   void createDynamicAllocasInitStorage();
1129 
1130   // ----------------------- Visitors.
1131   /// Collect all Ret instructions, or the musttail call instruction if it
1132   /// precedes the return instruction.
visitReturnInst__anon4b8f1cd00211::FunctionStackPoisoner1133   void visitReturnInst(ReturnInst &RI) {
1134     if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1135       RetVec.push_back(CI);
1136     else
1137       RetVec.push_back(&RI);
1138   }
1139 
1140   /// Collect all Resume instructions.
visitResumeInst__anon4b8f1cd00211::FunctionStackPoisoner1141   void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1142 
1143   /// Collect all CatchReturnInst instructions.
visitCleanupReturnInst__anon4b8f1cd00211::FunctionStackPoisoner1144   void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1145 
unpoisonDynamicAllocasBeforeInst__anon4b8f1cd00211::FunctionStackPoisoner1146   void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1147                                         Value *SavedStack) {
1148     IRBuilder<> IRB(InstBefore);
1149     Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1150     // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1151     // need to adjust extracted SP to compute the address of the most recent
1152     // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1153     // this purpose.
1154     if (!isa<ReturnInst>(InstBefore)) {
1155       Value *DynamicAreaOffset = IRB.CreateIntrinsic(
1156           Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1157 
1158       DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1159                                      DynamicAreaOffset);
1160     }
1161 
1162     RTCI.createRuntimeCall(
1163         IRB, AsanAllocasUnpoisonFunc,
1164         {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1165   }
1166 
1167   // Unpoison dynamic allocas redzones.
unpoisonDynamicAllocas__anon4b8f1cd00211::FunctionStackPoisoner1168   void unpoisonDynamicAllocas() {
1169     for (Instruction *Ret : RetVec)
1170       unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1171 
1172     for (Instruction *StackRestoreInst : StackRestoreVec)
1173       unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1174                                        StackRestoreInst->getOperand(0));
1175   }
1176 
1177   // Deploy and poison redzones around dynamic alloca call. To do this, we
1178   // should replace this call with another one with changed parameters and
1179   // replace all its uses with new address, so
1180   //   addr = alloca type, old_size, align
1181   // is replaced by
1182   //   new_size = (old_size + additional_size) * sizeof(type)
1183   //   tmp = alloca i8, new_size, max(align, 32)
1184   //   addr = tmp + 32 (first 32 bytes are for the left redzone).
1185   // Additional_size is added to make new memory allocation contain not only
1186   // requested memory, but also left, partial and right redzones.
1187   void handleDynamicAllocaCall(AllocaInst *AI);
1188 
1189   /// Collect Alloca instructions we want (and can) handle.
visitAllocaInst__anon4b8f1cd00211::FunctionStackPoisoner1190   void visitAllocaInst(AllocaInst &AI) {
1191     // FIXME: Handle scalable vectors instead of ignoring them.
1192     const Type *AllocaType = AI.getAllocatedType();
1193     const auto *STy = dyn_cast<StructType>(AllocaType);
1194     if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1195         (STy && STy->containsHomogeneousScalableVectorTypes())) {
1196       if (AI.isStaticAlloca()) {
1197         // Skip over allocas that are present *before* the first instrumented
1198         // alloca, we don't want to move those around.
1199         if (AllocaVec.empty())
1200           return;
1201 
1202         StaticAllocasToMoveUp.push_back(&AI);
1203       }
1204       return;
1205     }
1206 
1207     if (!AI.isStaticAlloca())
1208       DynamicAllocaVec.push_back(&AI);
1209     else
1210       AllocaVec.push_back(&AI);
1211   }
1212 
1213   /// Collect lifetime intrinsic calls to check for use-after-scope
1214   /// errors.
visitIntrinsicInst__anon4b8f1cd00211::FunctionStackPoisoner1215   void visitIntrinsicInst(IntrinsicInst &II) {
1216     Intrinsic::ID ID = II.getIntrinsicID();
1217     if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1218     if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1219     if (!ASan.UseAfterScope)
1220       return;
1221     if (!II.isLifetimeStartOrEnd())
1222       return;
1223     // Found lifetime intrinsic, add ASan instrumentation if necessary.
1224     auto *Size = cast<ConstantInt>(II.getArgOperand(0));
1225     // If size argument is undefined, don't do anything.
1226     if (Size->isMinusOne()) return;
1227     // Check that size doesn't saturate uint64_t and can
1228     // be stored in IntptrTy.
1229     const uint64_t SizeValue = Size->getValue().getLimitedValue();
1230     if (SizeValue == ~0ULL ||
1231         !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
1232       return;
1233     // Find alloca instruction that corresponds to llvm.lifetime argument.
1234     // Currently we can only handle lifetime markers pointing to the
1235     // beginning of the alloca.
1236     AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true);
1237     if (!AI) {
1238       HasUntracedLifetimeIntrinsic = true;
1239       return;
1240     }
1241     // We're interested only in allocas we can handle.
1242     if (!ASan.isInterestingAlloca(*AI))
1243       return;
1244     bool DoPoison = (ID == Intrinsic::lifetime_end);
1245     AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
1246     if (AI->isStaticAlloca())
1247       StaticAllocaPoisonCallVec.push_back(APC);
1248     else if (ClInstrumentDynamicAllocas)
1249       DynamicAllocaPoisonCallVec.push_back(APC);
1250   }
1251 
visitCallBase__anon4b8f1cd00211::FunctionStackPoisoner1252   void visitCallBase(CallBase &CB) {
1253     if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1254       HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1255       HasReturnsTwiceCall |= CI->canReturnTwice();
1256     }
1257   }
1258 
1259   // ---------------------- Helpers.
1260   void initializeCallbacks(Module &M);
1261 
1262   // Copies bytes from ShadowBytes into shadow memory for indexes where
1263   // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1264   // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1265   void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1266                     IRBuilder<> &IRB, Value *ShadowBase);
1267   void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1268                     size_t Begin, size_t End, IRBuilder<> &IRB,
1269                     Value *ShadowBase);
1270   void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1271                           ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1272                           size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1273 
1274   void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1275 
1276   Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1277                                bool Dynamic);
1278   PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1279                      Instruction *ThenTerm, Value *ValueIfFalse);
1280 };
1281 
1282 } // end anonymous namespace
1283 
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)1284 void AddressSanitizerPass::printPipeline(
1285     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1286   static_cast<PassInfoMixin<AddressSanitizerPass> *>(this)->printPipeline(
1287       OS, MapClassName2PassName);
1288   OS << '<';
1289   if (Options.CompileKernel)
1290     OS << "kernel;";
1291   if (Options.UseAfterScope)
1292     OS << "use-after-scope";
1293   OS << '>';
1294 }
1295 
AddressSanitizerPass(const AddressSanitizerOptions & Options,bool UseGlobalGC,bool UseOdrIndicator,AsanDtorKind DestructorKind,AsanCtorKind ConstructorKind)1296 AddressSanitizerPass::AddressSanitizerPass(
1297     const AddressSanitizerOptions &Options, bool UseGlobalGC,
1298     bool UseOdrIndicator, AsanDtorKind DestructorKind,
1299     AsanCtorKind ConstructorKind)
1300     : Options(Options), UseGlobalGC(UseGlobalGC),
1301       UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1302       ConstructorKind(ConstructorKind) {}
1303 
run(Module & M,ModuleAnalysisManager & MAM)1304 PreservedAnalyses AddressSanitizerPass::run(Module &M,
1305                                             ModuleAnalysisManager &MAM) {
1306   // Return early if nosanitize_address module flag is present for the module.
1307   // This implies that asan pass has already run before.
1308   if (checkIfAlreadyInstrumented(M, "nosanitize_address"))
1309     return PreservedAnalyses::all();
1310 
1311   ModuleAddressSanitizer ModuleSanitizer(
1312       M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1313       UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1314   bool Modified = false;
1315   auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1316   const StackSafetyGlobalInfo *const SSGI =
1317       ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr;
1318   for (Function &F : M) {
1319     if (F.empty())
1320       continue;
1321     if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
1322       continue;
1323     if (!ClDebugFunc.empty() && ClDebugFunc == F.getName())
1324       continue;
1325     if (F.getName().starts_with("__asan_"))
1326       continue;
1327     if (F.isPresplitCoroutine())
1328       continue;
1329     AddressSanitizer FunctionSanitizer(
1330         M, SSGI, Options.InstrumentationWithCallsThreshold,
1331         Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1332         Options.UseAfterScope, Options.UseAfterReturn);
1333     const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1334     Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
1335   }
1336   Modified |= ModuleSanitizer.instrumentModule();
1337   if (!Modified)
1338     return PreservedAnalyses::all();
1339 
1340   PreservedAnalyses PA = PreservedAnalyses::none();
1341   // GlobalsAA is considered stateless and does not get invalidated unless
1342   // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1343   // make changes that require GlobalsAA to be invalidated.
1344   PA.abandon<GlobalsAA>();
1345   return PA;
1346 }
1347 
TypeStoreSizeToSizeIndex(uint32_t TypeSize)1348 static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize) {
1349   size_t Res = llvm::countr_zero(TypeSize / 8);
1350   assert(Res < kNumberOfAccessSizes);
1351   return Res;
1352 }
1353 
1354 /// Check if \p G has been created by a trusted compiler pass.
GlobalWasGeneratedByCompiler(GlobalVariable * G)1355 static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) {
1356   // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1357   if (G->getName().starts_with("llvm.") ||
1358       // Do not instrument gcov counter arrays.
1359       G->getName().starts_with("__llvm_gcov_ctr") ||
1360       // Do not instrument rtti proxy symbols for function sanitizer.
1361       G->getName().starts_with("__llvm_rtti_proxy"))
1362     return true;
1363 
1364   // Do not instrument asan globals.
1365   if (G->getName().starts_with(kAsanGenPrefix) ||
1366       G->getName().starts_with(kSanCovGenPrefix) ||
1367       G->getName().starts_with(kODRGenPrefix))
1368     return true;
1369 
1370   return false;
1371 }
1372 
isUnsupportedAMDGPUAddrspace(Value * Addr)1373 static bool isUnsupportedAMDGPUAddrspace(Value *Addr) {
1374   Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1375   unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1376   if (AddrSpace == 3 || AddrSpace == 5)
1377     return true;
1378   return false;
1379 }
1380 
memToShadow(Value * Shadow,IRBuilder<> & IRB)1381 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1382   // Shadow >> scale
1383   Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1384   if (Mapping.Offset == 0) return Shadow;
1385   // (Shadow >> scale) | offset
1386   Value *ShadowBase;
1387   if (LocalDynamicShadow)
1388     ShadowBase = LocalDynamicShadow;
1389   else
1390     ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1391   if (Mapping.OrShadowOffset)
1392     return IRB.CreateOr(Shadow, ShadowBase);
1393   else
1394     return IRB.CreateAdd(Shadow, ShadowBase);
1395 }
1396 
1397 // Instrument memset/memmove/memcpy
instrumentMemIntrinsic(MemIntrinsic * MI,RuntimeCallInserter & RTCI)1398 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
1399                                               RuntimeCallInserter &RTCI) {
1400   InstrumentationIRBuilder IRB(MI);
1401   if (isa<MemTransferInst>(MI)) {
1402     RTCI.createRuntimeCall(
1403         IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1404         {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1405          IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
1406          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1407   } else if (isa<MemSetInst>(MI)) {
1408     RTCI.createRuntimeCall(
1409         IRB, AsanMemset,
1410         {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1411          IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1412          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1413   }
1414   MI->eraseFromParent();
1415 }
1416 
1417 /// Check if we want (and can) handle this alloca.
isInterestingAlloca(const AllocaInst & AI)1418 bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1419   auto [It, Inserted] = ProcessedAllocas.try_emplace(&AI);
1420 
1421   if (!Inserted)
1422     return It->getSecond();
1423 
1424   bool IsInteresting =
1425       (AI.getAllocatedType()->isSized() &&
1426        // alloca() may be called with 0 size, ignore it.
1427        ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1428        // We are only interested in allocas not promotable to registers.
1429        // Promotable allocas are common under -O0.
1430        (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) &&
1431        // inalloca allocas are not treated as static, and we don't want
1432        // dynamic alloca instrumentation for them as well.
1433        !AI.isUsedWithInAlloca() &&
1434        // swifterror allocas are register promoted by ISel
1435        !AI.isSwiftError() &&
1436        // safe allocas are not interesting
1437        !(SSGI && SSGI->isSafe(AI)));
1438 
1439   It->second = IsInteresting;
1440   return IsInteresting;
1441 }
1442 
ignoreAccess(Instruction * Inst,Value * Ptr)1443 bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1444   // Instrument accesses from different address spaces only for AMDGPU.
1445   Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1446   if (PtrTy->getPointerAddressSpace() != 0 &&
1447       !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
1448     return true;
1449 
1450   // Ignore swifterror addresses.
1451   // swifterror memory addresses are mem2reg promoted by instruction
1452   // selection. As such they cannot have regular uses like an instrumentation
1453   // function and it makes no sense to track them as memory.
1454   if (Ptr->isSwiftError())
1455     return true;
1456 
1457   // Treat memory accesses to promotable allocas as non-interesting since they
1458   // will not cause memory violations. This greatly speeds up the instrumented
1459   // executable at -O0.
1460   if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1461     if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1462       return true;
1463 
1464   if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1465       findAllocaForValue(Ptr))
1466     return true;
1467 
1468   return false;
1469 }
1470 
getInterestingMemoryOperands(Instruction * I,SmallVectorImpl<InterestingMemoryOperand> & Interesting)1471 void AddressSanitizer::getInterestingMemoryOperands(
1472     Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
1473   // Do not instrument the load fetching the dynamic shadow address.
1474   if (LocalDynamicShadow == I)
1475     return;
1476 
1477   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1478     if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1479       return;
1480     Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1481                              LI->getType(), LI->getAlign());
1482   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1483     if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1484       return;
1485     Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1486                              SI->getValueOperand()->getType(), SI->getAlign());
1487   } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1488     if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1489       return;
1490     Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1491                              RMW->getValOperand()->getType(), std::nullopt);
1492   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1493     if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1494       return;
1495     Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1496                              XCHG->getCompareOperand()->getType(),
1497                              std::nullopt);
1498   } else if (auto CI = dyn_cast<CallInst>(I)) {
1499     switch (CI->getIntrinsicID()) {
1500     case Intrinsic::masked_load:
1501     case Intrinsic::masked_store:
1502     case Intrinsic::masked_gather:
1503     case Intrinsic::masked_scatter: {
1504       bool IsWrite = CI->getType()->isVoidTy();
1505       // Masked store has an initial operand for the value.
1506       unsigned OpOffset = IsWrite ? 1 : 0;
1507       if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1508         return;
1509 
1510       auto BasePtr = CI->getOperand(OpOffset);
1511       if (ignoreAccess(I, BasePtr))
1512         return;
1513       Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1514       MaybeAlign Alignment = Align(1);
1515       // Otherwise no alignment guarantees. We probably got Undef.
1516       if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1517         Alignment = Op->getMaybeAlignValue();
1518       Value *Mask = CI->getOperand(2 + OpOffset);
1519       Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1520       break;
1521     }
1522     case Intrinsic::masked_expandload:
1523     case Intrinsic::masked_compressstore: {
1524       bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1525       unsigned OpOffset = IsWrite ? 1 : 0;
1526       if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1527         return;
1528       auto BasePtr = CI->getOperand(OpOffset);
1529       if (ignoreAccess(I, BasePtr))
1530         return;
1531       MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1532       Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1533 
1534       IRBuilder IB(I);
1535       Value *Mask = CI->getOperand(1 + OpOffset);
1536       // Use the popcount of Mask as the effective vector length.
1537       Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1538       Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1539       Value *EVL = IB.CreateAddReduce(ExtMask);
1540       Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1541       Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1542                                EVL);
1543       break;
1544     }
1545     case Intrinsic::vp_load:
1546     case Intrinsic::vp_store:
1547     case Intrinsic::experimental_vp_strided_load:
1548     case Intrinsic::experimental_vp_strided_store: {
1549       auto *VPI = cast<VPIntrinsic>(CI);
1550       unsigned IID = CI->getIntrinsicID();
1551       bool IsWrite = CI->getType()->isVoidTy();
1552       if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1553         return;
1554       unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1555       Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1556       MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1557       Value *Stride = nullptr;
1558       if (IID == Intrinsic::experimental_vp_strided_store ||
1559           IID == Intrinsic::experimental_vp_strided_load) {
1560         Stride = VPI->getOperand(PtrOpNo + 1);
1561         // Use the pointer alignment as the element alignment if the stride is a
1562         // mutiple of the pointer alignment. Otherwise, the element alignment
1563         // should be Align(1).
1564         unsigned PointerAlign = Alignment.valueOrOne().value();
1565         if (!isa<ConstantInt>(Stride) ||
1566             cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1567           Alignment = Align(1);
1568       }
1569       Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1570                                VPI->getMaskParam(), VPI->getVectorLengthParam(),
1571                                Stride);
1572       break;
1573     }
1574     case Intrinsic::vp_gather:
1575     case Intrinsic::vp_scatter: {
1576       auto *VPI = cast<VPIntrinsic>(CI);
1577       unsigned IID = CI->getIntrinsicID();
1578       bool IsWrite = IID == Intrinsic::vp_scatter;
1579       if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1580         return;
1581       unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1582       Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1583       MaybeAlign Alignment = VPI->getPointerAlignment();
1584       Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1585                                VPI->getMaskParam(),
1586                                VPI->getVectorLengthParam());
1587       break;
1588     }
1589     default:
1590       for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1591         if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1592             ignoreAccess(I, CI->getArgOperand(ArgNo)))
1593           continue;
1594         Type *Ty = CI->getParamByValType(ArgNo);
1595         Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1596       }
1597     }
1598   }
1599 }
1600 
isPointerOperand(Value * V)1601 static bool isPointerOperand(Value *V) {
1602   return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1603 }
1604 
1605 // This is a rough heuristic; it may cause both false positives and
1606 // false negatives. The proper implementation requires cooperation with
1607 // the frontend.
isInterestingPointerComparison(Instruction * I)1608 static bool isInterestingPointerComparison(Instruction *I) {
1609   if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1610     if (!Cmp->isRelational())
1611       return false;
1612   } else {
1613     return false;
1614   }
1615   return isPointerOperand(I->getOperand(0)) &&
1616          isPointerOperand(I->getOperand(1));
1617 }
1618 
1619 // This is a rough heuristic; it may cause both false positives and
1620 // false negatives. The proper implementation requires cooperation with
1621 // the frontend.
isInterestingPointerSubtraction(Instruction * I)1622 static bool isInterestingPointerSubtraction(Instruction *I) {
1623   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
1624     if (BO->getOpcode() != Instruction::Sub)
1625       return false;
1626   } else {
1627     return false;
1628   }
1629   return isPointerOperand(I->getOperand(0)) &&
1630          isPointerOperand(I->getOperand(1));
1631 }
1632 
GlobalIsLinkerInitialized(GlobalVariable * G)1633 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1634   // If a global variable does not have dynamic initialization we don't
1635   // have to instrument it.  However, if a global does not have initializer
1636   // at all, we assume it has dynamic initializer (in other TU).
1637   if (!G->hasInitializer())
1638     return false;
1639 
1640   if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1641     return false;
1642 
1643   return true;
1644 }
1645 
instrumentPointerComparisonOrSubtraction(Instruction * I,RuntimeCallInserter & RTCI)1646 void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1647     Instruction *I, RuntimeCallInserter &RTCI) {
1648   IRBuilder<> IRB(I);
1649   FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1650   Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1651   for (Value *&i : Param) {
1652     if (i->getType()->isPointerTy())
1653       i = IRB.CreatePointerCast(i, IntptrTy);
1654   }
1655   RTCI.createRuntimeCall(IRB, F, Param);
1656 }
1657 
doInstrumentAddress(AddressSanitizer * Pass,Instruction * I,Instruction * InsertBefore,Value * Addr,MaybeAlign Alignment,unsigned Granularity,TypeSize TypeStoreSize,bool IsWrite,Value * SizeArgument,bool UseCalls,uint32_t Exp,RuntimeCallInserter & RTCI)1658 static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1659                                 Instruction *InsertBefore, Value *Addr,
1660                                 MaybeAlign Alignment, unsigned Granularity,
1661                                 TypeSize TypeStoreSize, bool IsWrite,
1662                                 Value *SizeArgument, bool UseCalls,
1663                                 uint32_t Exp, RuntimeCallInserter &RTCI) {
1664   // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1665   // if the data is properly aligned.
1666   if (!TypeStoreSize.isScalable()) {
1667     const auto FixedSize = TypeStoreSize.getFixedValue();
1668     switch (FixedSize) {
1669     case 8:
1670     case 16:
1671     case 32:
1672     case 64:
1673     case 128:
1674       if (!Alignment || *Alignment >= Granularity ||
1675           *Alignment >= FixedSize / 8)
1676         return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1677                                        FixedSize, IsWrite, nullptr, UseCalls,
1678                                        Exp, RTCI);
1679     }
1680   }
1681   Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1682                                          IsWrite, nullptr, UseCalls, Exp, RTCI);
1683 }
1684 
instrumentMaskedLoadOrStore(AddressSanitizer * Pass,const DataLayout & DL,Type * IntptrTy,Value * Mask,Value * EVL,Value * Stride,Instruction * I,Value * Addr,MaybeAlign Alignment,unsigned Granularity,Type * OpType,bool IsWrite,Value * SizeArgument,bool UseCalls,uint32_t Exp,RuntimeCallInserter & RTCI)1685 void AddressSanitizer::instrumentMaskedLoadOrStore(
1686     AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1687     Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1688     MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1689     Value *SizeArgument, bool UseCalls, uint32_t Exp,
1690     RuntimeCallInserter &RTCI) {
1691   auto *VTy = cast<VectorType>(OpType);
1692   TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1693   auto Zero = ConstantInt::get(IntptrTy, 0);
1694 
1695   IRBuilder IB(I);
1696   Instruction *LoopInsertBefore = I;
1697   if (EVL) {
1698     // The end argument of SplitBlockAndInsertForLane is assumed bigger
1699     // than zero, so we should check whether EVL is zero here.
1700     Type *EVLType = EVL->getType();
1701     Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1702     LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1703     IB.SetInsertPoint(LoopInsertBefore);
1704     // Cast EVL to IntptrTy.
1705     EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1706     // To avoid undefined behavior for extracting with out of range index, use
1707     // the minimum of evl and element count as trip count.
1708     Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1709     EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1710   } else {
1711     EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1712   }
1713 
1714   // Cast Stride to IntptrTy.
1715   if (Stride)
1716     Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1717 
1718   SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore->getIterator(),
1719                                  [&](IRBuilderBase &IRB, Value *Index) {
1720     Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1721     if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1722       if (MaskElemC->isZero())
1723         // No check
1724         return;
1725       // Unconditional check
1726     } else {
1727       // Conditional check
1728       Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1729           MaskElem, &*IRB.GetInsertPoint(), false);
1730       IRB.SetInsertPoint(ThenTerm);
1731     }
1732 
1733     Value *InstrumentedAddress;
1734     if (isa<VectorType>(Addr->getType())) {
1735       assert(
1736           cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1737           "Expected vector of pointer.");
1738       InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1739     } else if (Stride) {
1740       Index = IRB.CreateMul(Index, Stride);
1741       InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1742     } else {
1743       InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1744     }
1745     doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
1746                         Alignment, Granularity, ElemTypeSize, IsWrite,
1747                         SizeArgument, UseCalls, Exp, RTCI);
1748   });
1749 }
1750 
instrumentMop(ObjectSizeOffsetVisitor & ObjSizeVis,InterestingMemoryOperand & O,bool UseCalls,const DataLayout & DL,RuntimeCallInserter & RTCI)1751 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1752                                      InterestingMemoryOperand &O, bool UseCalls,
1753                                      const DataLayout &DL,
1754                                      RuntimeCallInserter &RTCI) {
1755   Value *Addr = O.getPtr();
1756 
1757   // Optimization experiments.
1758   // The experiments can be used to evaluate potential optimizations that remove
1759   // instrumentation (assess false negatives). Instead of completely removing
1760   // some instrumentation, you set Exp to a non-zero value (mask of optimization
1761   // experiments that want to remove instrumentation of this instruction).
1762   // If Exp is non-zero, this pass will emit special calls into runtime
1763   // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1764   // make runtime terminate the program in a special way (with a different
1765   // exit status). Then you run the new compiler on a buggy corpus, collect
1766   // the special terminations (ideally, you don't see them at all -- no false
1767   // negatives) and make the decision on the optimization.
1768   uint32_t Exp = ClForceExperiment;
1769 
1770   if (ClOpt && ClOptGlobals) {
1771     // If initialization order checking is disabled, a simple access to a
1772     // dynamically initialized global is always valid.
1773     GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr));
1774     if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1775         isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1776       NumOptimizedAccessesToGlobalVar++;
1777       return;
1778     }
1779   }
1780 
1781   if (ClOpt && ClOptStack) {
1782     // A direct inbounds access to a stack variable is always valid.
1783     if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
1784         isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1785       NumOptimizedAccessesToStackVar++;
1786       return;
1787     }
1788   }
1789 
1790   if (O.IsWrite)
1791     NumInstrumentedWrites++;
1792   else
1793     NumInstrumentedReads++;
1794 
1795   unsigned Granularity = 1 << Mapping.Scale;
1796   if (O.MaybeMask) {
1797     instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1798                                 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1799                                 Granularity, O.OpType, O.IsWrite, nullptr,
1800                                 UseCalls, Exp, RTCI);
1801   } else {
1802     doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1803                         Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
1804                         UseCalls, Exp, RTCI);
1805   }
1806 }
1807 
generateCrashCode(Instruction * InsertBefore,Value * Addr,bool IsWrite,size_t AccessSizeIndex,Value * SizeArgument,uint32_t Exp,RuntimeCallInserter & RTCI)1808 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1809                                                  Value *Addr, bool IsWrite,
1810                                                  size_t AccessSizeIndex,
1811                                                  Value *SizeArgument,
1812                                                  uint32_t Exp,
1813                                                  RuntimeCallInserter &RTCI) {
1814   InstrumentationIRBuilder IRB(InsertBefore);
1815   Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1816   CallInst *Call = nullptr;
1817   if (SizeArgument) {
1818     if (Exp == 0)
1819       Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1820                                     {Addr, SizeArgument});
1821     else
1822       Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1823                                     {Addr, SizeArgument, ExpVal});
1824   } else {
1825     if (Exp == 0)
1826       Call = RTCI.createRuntimeCall(
1827           IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1828     else
1829       Call = RTCI.createRuntimeCall(
1830           IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1831   }
1832 
1833   Call->setCannotMerge();
1834   return Call;
1835 }
1836 
createSlowPathCmp(IRBuilder<> & IRB,Value * AddrLong,Value * ShadowValue,uint32_t TypeStoreSize)1837 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1838                                            Value *ShadowValue,
1839                                            uint32_t TypeStoreSize) {
1840   size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1841   // Addr & (Granularity - 1)
1842   Value *LastAccessedByte =
1843       IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1844   // (Addr & (Granularity - 1)) + size - 1
1845   if (TypeStoreSize / 8 > 1)
1846     LastAccessedByte = IRB.CreateAdd(
1847         LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1848   // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1849   LastAccessedByte =
1850       IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1851   // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1852   return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1853 }
1854 
instrumentAMDGPUAddress(Instruction * OrigIns,Instruction * InsertBefore,Value * Addr,uint32_t TypeStoreSize,bool IsWrite,Value * SizeArgument)1855 Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1856     Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1857     uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1858   // Do not instrument unsupported addrspaces.
1859   if (isUnsupportedAMDGPUAddrspace(Addr))
1860     return nullptr;
1861   Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1862   // Follow host instrumentation for global and constant addresses.
1863   if (PtrTy->getPointerAddressSpace() != 0)
1864     return InsertBefore;
1865   // Instrument generic addresses in supported addressspaces.
1866   IRBuilder<> IRB(InsertBefore);
1867   Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1868   Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1869   Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1870   Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1871   Value *AddrSpaceZeroLanding =
1872       SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1873   InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1874   return InsertBefore;
1875 }
1876 
genAMDGPUReportBlock(IRBuilder<> & IRB,Value * Cond,bool Recover)1877 Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1878                                                     Value *Cond, bool Recover) {
1879   Module &M = *IRB.GetInsertBlock()->getModule();
1880   Value *ReportCond = Cond;
1881   if (!Recover) {
1882     auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1883                                         IRB.getInt1Ty());
1884     ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1885   }
1886 
1887   auto *Trm =
1888       SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1889                                 MDBuilder(*C).createUnlikelyBranchWeights());
1890   Trm->getParent()->setName("asan.report");
1891 
1892   if (Recover)
1893     return Trm;
1894 
1895   Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1896   IRB.SetInsertPoint(Trm);
1897   return IRB.CreateCall(
1898       M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1899 }
1900 
instrumentAddress(Instruction * OrigIns,Instruction * InsertBefore,Value * Addr,MaybeAlign Alignment,uint32_t TypeStoreSize,bool IsWrite,Value * SizeArgument,bool UseCalls,uint32_t Exp,RuntimeCallInserter & RTCI)1901 void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1902                                          Instruction *InsertBefore, Value *Addr,
1903                                          MaybeAlign Alignment,
1904                                          uint32_t TypeStoreSize, bool IsWrite,
1905                                          Value *SizeArgument, bool UseCalls,
1906                                          uint32_t Exp,
1907                                          RuntimeCallInserter &RTCI) {
1908   if (TargetTriple.isAMDGPU()) {
1909     InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1910                                            TypeStoreSize, IsWrite, SizeArgument);
1911     if (!InsertBefore)
1912       return;
1913   }
1914 
1915   InstrumentationIRBuilder IRB(InsertBefore);
1916   size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1917 
1918   if (UseCalls && ClOptimizeCallbacks) {
1919     const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1920     IRB.CreateIntrinsic(Intrinsic::asan_check_memaccess, {},
1921                         {IRB.CreatePointerCast(Addr, PtrTy),
1922                          ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1923     return;
1924   }
1925 
1926   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1927   if (UseCalls) {
1928     if (Exp == 0)
1929       RTCI.createRuntimeCall(
1930           IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1931     else
1932       RTCI.createRuntimeCall(
1933           IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1934           {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1935     return;
1936   }
1937 
1938   Type *ShadowTy =
1939       IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1940   Type *ShadowPtrTy = PointerType::get(*C, 0);
1941   Value *ShadowPtr = memToShadow(AddrLong, IRB);
1942   const uint64_t ShadowAlign =
1943       std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1944   Value *ShadowValue = IRB.CreateAlignedLoad(
1945       ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1946 
1947   Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1948   size_t Granularity = 1ULL << Mapping.Scale;
1949   Instruction *CrashTerm = nullptr;
1950 
1951   bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1952 
1953   if (TargetTriple.isAMDGCN()) {
1954     if (GenSlowPath) {
1955       auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1956       Cmp = IRB.CreateAnd(Cmp, Cmp2);
1957     }
1958     CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1959   } else if (GenSlowPath) {
1960     // We use branch weights for the slow path check, to indicate that the slow
1961     // path is rarely taken. This seems to be the case for SPEC benchmarks.
1962     Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1963         Cmp, InsertBefore, false, MDBuilder(*C).createUnlikelyBranchWeights());
1964     assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1965     BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1966     IRB.SetInsertPoint(CheckTerm);
1967     Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1968     if (Recover) {
1969       CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1970     } else {
1971       BasicBlock *CrashBlock =
1972         BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1973       CrashTerm = new UnreachableInst(*C, CrashBlock);
1974       BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1975       ReplaceInstWithInst(CheckTerm, NewTerm);
1976     }
1977   } else {
1978     CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1979   }
1980 
1981   Instruction *Crash = generateCrashCode(
1982       CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1983   if (OrigIns->getDebugLoc())
1984     Crash->setDebugLoc(OrigIns->getDebugLoc());
1985 }
1986 
1987 // Instrument unusual size or unusual alignment.
1988 // We can not do it with a single check, so we do 1-byte check for the first
1989 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1990 // to report the actual access size.
instrumentUnusualSizeOrAlignment(Instruction * I,Instruction * InsertBefore,Value * Addr,TypeSize TypeStoreSize,bool IsWrite,Value * SizeArgument,bool UseCalls,uint32_t Exp,RuntimeCallInserter & RTCI)1991 void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1992     Instruction *I, Instruction *InsertBefore, Value *Addr,
1993     TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
1994     uint32_t Exp, RuntimeCallInserter &RTCI) {
1995   InstrumentationIRBuilder IRB(InsertBefore);
1996   Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
1997   Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
1998 
1999   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
2000   if (UseCalls) {
2001     if (Exp == 0)
2002       RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
2003                              {AddrLong, Size});
2004     else
2005       RTCI.createRuntimeCall(
2006           IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
2007           {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
2008   } else {
2009     Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
2010     Value *LastByte = IRB.CreateIntToPtr(
2011         IRB.CreateAdd(AddrLong, SizeMinusOne),
2012         Addr->getType());
2013     instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
2014                       RTCI);
2015     instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
2016                       Exp, RTCI);
2017   }
2018 }
2019 
poisonOneInitializer(Function & GlobalInit)2020 void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit) {
2021   // Set up the arguments to our poison/unpoison functions.
2022   IRBuilder<> IRB(&GlobalInit.front(),
2023                   GlobalInit.front().getFirstInsertionPt());
2024 
2025   // Add a call to poison all external globals before the given function starts.
2026   Value *ModuleNameAddr =
2027       ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy);
2028   IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2029 
2030   // Add calls to unpoison all globals before each return instruction.
2031   for (auto &BB : GlobalInit)
2032     if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
2033       CallInst::Create(AsanUnpoisonGlobals, "", RI->getIterator());
2034 }
2035 
createInitializerPoisonCalls()2036 void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2037   GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
2038   if (!GV)
2039     return;
2040 
2041   ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
2042   if (!CA)
2043     return;
2044 
2045   for (Use &OP : CA->operands()) {
2046     if (isa<ConstantAggregateZero>(OP)) continue;
2047     ConstantStruct *CS = cast<ConstantStruct>(OP);
2048 
2049     // Must have a function or null ptr.
2050     if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
2051       if (F->getName() == kAsanModuleCtorName) continue;
2052       auto *Priority = cast<ConstantInt>(CS->getOperand(0));
2053       // Don't instrument CTORs that will run before asan.module_ctor.
2054       if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
2055         continue;
2056       poisonOneInitializer(*F);
2057     }
2058   }
2059 }
2060 
2061 const GlobalVariable *
getExcludedAliasedGlobal(const GlobalAlias & GA) const2062 ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
2063   // In case this function should be expanded to include rules that do not just
2064   // apply when CompileKernel is true, either guard all existing rules with an
2065   // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
2066   // should also apply to user space.
2067   assert(CompileKernel && "Only expecting to be called when compiling kernel");
2068 
2069   const Constant *C = GA.getAliasee();
2070 
2071   // When compiling the kernel, globals that are aliased by symbols prefixed
2072   // by "__" are special and cannot be padded with a redzone.
2073   if (GA.getName().starts_with("__"))
2074     return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
2075 
2076   return nullptr;
2077 }
2078 
shouldInstrumentGlobal(GlobalVariable * G) const2079 bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
2080   Type *Ty = G->getValueType();
2081   LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
2082 
2083   if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
2084     return false;
2085   if (!Ty->isSized()) return false;
2086   if (!G->hasInitializer()) return false;
2087   // Globals in address space 1 and 4 are supported for AMDGPU.
2088   if (G->getAddressSpace() &&
2089       !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G)))
2090     return false;
2091   if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
2092   // Two problems with thread-locals:
2093   //   - The address of the main thread's copy can't be computed at link-time.
2094   //   - Need to poison all copies, not just the main thread's one.
2095   if (G->isThreadLocal()) return false;
2096   // For now, just ignore this Global if the alignment is large.
2097   if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
2098 
2099   // For non-COFF targets, only instrument globals known to be defined by this
2100   // TU.
2101   // FIXME: We can instrument comdat globals on ELF if we are using the
2102   // GC-friendly metadata scheme.
2103   if (!TargetTriple.isOSBinFormatCOFF()) {
2104     if (!G->hasExactDefinition() || G->hasComdat())
2105       return false;
2106   } else {
2107     // On COFF, don't instrument non-ODR linkages.
2108     if (G->isInterposable())
2109       return false;
2110     // If the global has AvailableExternally linkage, then it is not in this
2111     // module, which means it does not need to be instrumented.
2112     if (G->hasAvailableExternallyLinkage())
2113       return false;
2114   }
2115 
2116   // If a comdat is present, it must have a selection kind that implies ODR
2117   // semantics: no duplicates, any, or exact match.
2118   if (Comdat *C = G->getComdat()) {
2119     switch (C->getSelectionKind()) {
2120     case Comdat::Any:
2121     case Comdat::ExactMatch:
2122     case Comdat::NoDeduplicate:
2123       break;
2124     case Comdat::Largest:
2125     case Comdat::SameSize:
2126       return false;
2127     }
2128   }
2129 
2130   if (G->hasSection()) {
2131     // The kernel uses explicit sections for mostly special global variables
2132     // that we should not instrument. E.g. the kernel may rely on their layout
2133     // without redzones, or remove them at link time ("discard.*"), etc.
2134     if (CompileKernel)
2135       return false;
2136 
2137     StringRef Section = G->getSection();
2138 
2139     // Globals from llvm.metadata aren't emitted, do not instrument them.
2140     if (Section == "llvm.metadata") return false;
2141     // Do not instrument globals from special LLVM sections.
2142     if (Section.contains("__llvm") || Section.contains("__LLVM"))
2143       return false;
2144 
2145     // Do not instrument function pointers to initialization and termination
2146     // routines: dynamic linker will not properly handle redzones.
2147     if (Section.starts_with(".preinit_array") ||
2148         Section.starts_with(".init_array") ||
2149         Section.starts_with(".fini_array")) {
2150       return false;
2151     }
2152 
2153     // Do not instrument user-defined sections (with names resembling
2154     // valid C identifiers)
2155     if (TargetTriple.isOSBinFormatELF()) {
2156       if (llvm::all_of(Section,
2157                        [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2158         return false;
2159     }
2160 
2161     // On COFF, if the section name contains '$', it is highly likely that the
2162     // user is using section sorting to create an array of globals similar to
2163     // the way initialization callbacks are registered in .init_array and
2164     // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2165     // to such globals is counterproductive, because the intent is that they
2166     // will form an array, and out-of-bounds accesses are expected.
2167     // See https://github.com/google/sanitizers/issues/305
2168     // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2169     if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2170       LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2171                         << *G << "\n");
2172       return false;
2173     }
2174 
2175     if (TargetTriple.isOSBinFormatMachO()) {
2176       StringRef ParsedSegment, ParsedSection;
2177       unsigned TAA = 0, StubSize = 0;
2178       bool TAAParsed;
2179       cantFail(MCSectionMachO::ParseSectionSpecifier(
2180           Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2181 
2182       // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2183       // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2184       // them.
2185       if (ParsedSegment == "__OBJC" ||
2186           (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2187         LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2188         return false;
2189       }
2190       // See https://github.com/google/sanitizers/issues/32
2191       // Constant CFString instances are compiled in the following way:
2192       //  -- the string buffer is emitted into
2193       //     __TEXT,__cstring,cstring_literals
2194       //  -- the constant NSConstantString structure referencing that buffer
2195       //     is placed into __DATA,__cfstring
2196       // Therefore there's no point in placing redzones into __DATA,__cfstring.
2197       // Moreover, it causes the linker to crash on OS X 10.7
2198       if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2199         LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2200         return false;
2201       }
2202       // The linker merges the contents of cstring_literals and removes the
2203       // trailing zeroes.
2204       if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2205         LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2206         return false;
2207       }
2208     }
2209   }
2210 
2211   if (CompileKernel) {
2212     // Globals that prefixed by "__" are special and cannot be padded with a
2213     // redzone.
2214     if (G->getName().starts_with("__"))
2215       return false;
2216   }
2217 
2218   return true;
2219 }
2220 
2221 // On Mach-O platforms, we emit global metadata in a separate section of the
2222 // binary in order to allow the linker to properly dead strip. This is only
2223 // supported on recent versions of ld64.
ShouldUseMachOGlobalsSection() const2224 bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2225   if (!TargetTriple.isOSBinFormatMachO())
2226     return false;
2227 
2228   if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2229     return true;
2230   if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2231     return true;
2232   if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2233     return true;
2234   if (TargetTriple.isDriverKit())
2235     return true;
2236   if (TargetTriple.isXROS())
2237     return true;
2238 
2239   return false;
2240 }
2241 
getGlobalMetadataSection() const2242 StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2243   switch (TargetTriple.getObjectFormat()) {
2244   case Triple::COFF:  return ".ASAN$GL";
2245   case Triple::ELF:   return "asan_globals";
2246   case Triple::MachO: return "__DATA,__asan_globals,regular";
2247   case Triple::Wasm:
2248   case Triple::GOFF:
2249   case Triple::SPIRV:
2250   case Triple::XCOFF:
2251   case Triple::DXContainer:
2252     report_fatal_error(
2253         "ModuleAddressSanitizer not implemented for object file format");
2254   case Triple::UnknownObjectFormat:
2255     break;
2256   }
2257   llvm_unreachable("unsupported object format");
2258 }
2259 
initializeCallbacks()2260 void ModuleAddressSanitizer::initializeCallbacks() {
2261   IRBuilder<> IRB(*C);
2262 
2263   // Declare our poisoning and unpoisoning functions.
2264   AsanPoisonGlobals =
2265       M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2266   AsanUnpoisonGlobals =
2267       M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2268 
2269   // Declare functions that register/unregister globals.
2270   AsanRegisterGlobals = M.getOrInsertFunction(
2271       kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2272   AsanUnregisterGlobals = M.getOrInsertFunction(
2273       kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2274 
2275   // Declare the functions that find globals in a shared object and then invoke
2276   // the (un)register function on them.
2277   AsanRegisterImageGlobals = M.getOrInsertFunction(
2278       kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2279   AsanUnregisterImageGlobals = M.getOrInsertFunction(
2280       kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2281 
2282   AsanRegisterElfGlobals =
2283       M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2284                             IntptrTy, IntptrTy, IntptrTy);
2285   AsanUnregisterElfGlobals =
2286       M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2287                             IntptrTy, IntptrTy, IntptrTy);
2288 }
2289 
2290 // Put the metadata and the instrumented global in the same group. This ensures
2291 // that the metadata is discarded if the instrumented global is discarded.
SetComdatForGlobalMetadata(GlobalVariable * G,GlobalVariable * Metadata,StringRef InternalSuffix)2292 void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2293     GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2294   Module &M = *G->getParent();
2295   Comdat *C = G->getComdat();
2296   if (!C) {
2297     if (!G->hasName()) {
2298       // If G is unnamed, it must be internal. Give it an artificial name
2299       // so we can put it in a comdat.
2300       assert(G->hasLocalLinkage());
2301       G->setName(genName("anon_global"));
2302     }
2303 
2304     if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2305       std::string Name = std::string(G->getName());
2306       Name += InternalSuffix;
2307       C = M.getOrInsertComdat(Name);
2308     } else {
2309       C = M.getOrInsertComdat(G->getName());
2310     }
2311 
2312     // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2313     // linkage to internal linkage so that a symbol table entry is emitted. This
2314     // is necessary in order to create the comdat group.
2315     if (TargetTriple.isOSBinFormatCOFF()) {
2316       C->setSelectionKind(Comdat::NoDeduplicate);
2317       if (G->hasPrivateLinkage())
2318         G->setLinkage(GlobalValue::InternalLinkage);
2319     }
2320     G->setComdat(C);
2321   }
2322 
2323   assert(G->hasComdat());
2324   Metadata->setComdat(G->getComdat());
2325 }
2326 
2327 // Create a separate metadata global and put it in the appropriate ASan
2328 // global registration section.
2329 GlobalVariable *
CreateMetadataGlobal(Constant * Initializer,StringRef OriginalName)2330 ModuleAddressSanitizer::CreateMetadataGlobal(Constant *Initializer,
2331                                              StringRef OriginalName) {
2332   auto Linkage = TargetTriple.isOSBinFormatMachO()
2333                      ? GlobalVariable::InternalLinkage
2334                      : GlobalVariable::PrivateLinkage;
2335   GlobalVariable *Metadata = new GlobalVariable(
2336       M, Initializer->getType(), false, Linkage, Initializer,
2337       Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2338   Metadata->setSection(getGlobalMetadataSection());
2339   // Place metadata in a large section for x86-64 ELF binaries to mitigate
2340   // relocation pressure.
2341   setGlobalVariableLargeSection(TargetTriple, *Metadata);
2342   return Metadata;
2343 }
2344 
CreateAsanModuleDtor()2345 Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2346   AsanDtorFunction = Function::createWithDefaultAttr(
2347       FunctionType::get(Type::getVoidTy(*C), false),
2348       GlobalValue::InternalLinkage, 0, kAsanModuleDtorName, &M);
2349   AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2350   // Ensure Dtor cannot be discarded, even if in a comdat.
2351   appendToUsed(M, {AsanDtorFunction});
2352   BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2353 
2354   return ReturnInst::Create(*C, AsanDtorBB);
2355 }
2356 
InstrumentGlobalsCOFF(IRBuilder<> & IRB,ArrayRef<GlobalVariable * > ExtendedGlobals,ArrayRef<Constant * > MetadataInitializers)2357 void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2358     IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2359     ArrayRef<Constant *> MetadataInitializers) {
2360   assert(ExtendedGlobals.size() == MetadataInitializers.size());
2361   auto &DL = M.getDataLayout();
2362 
2363   SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2364   for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2365     Constant *Initializer = MetadataInitializers[i];
2366     GlobalVariable *G = ExtendedGlobals[i];
2367     GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2368     MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2369     Metadata->setMetadata(LLVMContext::MD_associated, MD);
2370     MetadataGlobals[i] = Metadata;
2371 
2372     // The MSVC linker always inserts padding when linking incrementally. We
2373     // cope with that by aligning each struct to its size, which must be a power
2374     // of two.
2375     unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2376     assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2377            "global metadata will not be padded appropriately");
2378     Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2379 
2380     SetComdatForGlobalMetadata(G, Metadata, "");
2381   }
2382 
2383   // Update llvm.compiler.used, adding the new metadata globals. This is
2384   // needed so that during LTO these variables stay alive.
2385   if (!MetadataGlobals.empty())
2386     appendToCompilerUsed(M, MetadataGlobals);
2387 }
2388 
instrumentGlobalsELF(IRBuilder<> & IRB,ArrayRef<GlobalVariable * > ExtendedGlobals,ArrayRef<Constant * > MetadataInitializers,const std::string & UniqueModuleId)2389 void ModuleAddressSanitizer::instrumentGlobalsELF(
2390     IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2391     ArrayRef<Constant *> MetadataInitializers,
2392     const std::string &UniqueModuleId) {
2393   assert(ExtendedGlobals.size() == MetadataInitializers.size());
2394 
2395   // Putting globals in a comdat changes the semantic and potentially cause
2396   // false negative odr violations at link time. If odr indicators are used, we
2397   // keep the comdat sections, as link time odr violations will be dectected on
2398   // the odr indicator symbols.
2399   bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2400 
2401   SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2402   for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2403     GlobalVariable *G = ExtendedGlobals[i];
2404     GlobalVariable *Metadata =
2405         CreateMetadataGlobal(MetadataInitializers[i], G->getName());
2406     MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2407     Metadata->setMetadata(LLVMContext::MD_associated, MD);
2408     MetadataGlobals[i] = Metadata;
2409 
2410     if (UseComdatForGlobalsGC)
2411       SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2412   }
2413 
2414   // Update llvm.compiler.used, adding the new metadata globals. This is
2415   // needed so that during LTO these variables stay alive.
2416   if (!MetadataGlobals.empty())
2417     appendToCompilerUsed(M, MetadataGlobals);
2418 
2419   // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2420   // to look up the loaded image that contains it. Second, we can store in it
2421   // whether registration has already occurred, to prevent duplicate
2422   // registration.
2423   //
2424   // Common linkage ensures that there is only one global per shared library.
2425   GlobalVariable *RegisteredFlag = new GlobalVariable(
2426       M, IntptrTy, false, GlobalVariable::CommonLinkage,
2427       ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2428   RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
2429 
2430   // Create start and stop symbols.
2431   GlobalVariable *StartELFMetadata = new GlobalVariable(
2432       M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2433       "__start_" + getGlobalMetadataSection());
2434   StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
2435   GlobalVariable *StopELFMetadata = new GlobalVariable(
2436       M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2437       "__stop_" + getGlobalMetadataSection());
2438   StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
2439 
2440   // Create a call to register the globals with the runtime.
2441   if (ConstructorKind == AsanCtorKind::Global)
2442     IRB.CreateCall(AsanRegisterElfGlobals,
2443                  {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2444                   IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2445                   IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2446 
2447   // We also need to unregister globals at the end, e.g., when a shared library
2448   // gets closed.
2449   if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2450     IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2451     IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2452                        {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2453                         IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2454                         IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2455   }
2456 }
2457 
InstrumentGlobalsMachO(IRBuilder<> & IRB,ArrayRef<GlobalVariable * > ExtendedGlobals,ArrayRef<Constant * > MetadataInitializers)2458 void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2459     IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2460     ArrayRef<Constant *> MetadataInitializers) {
2461   assert(ExtendedGlobals.size() == MetadataInitializers.size());
2462 
2463   // On recent Mach-O platforms, use a structure which binds the liveness of
2464   // the global variable to the metadata struct. Keep the list of "Liveness" GV
2465   // created to be added to llvm.compiler.used
2466   StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2467   SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2468 
2469   for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2470     Constant *Initializer = MetadataInitializers[i];
2471     GlobalVariable *G = ExtendedGlobals[i];
2472     GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2473 
2474     // On recent Mach-O platforms, we emit the global metadata in a way that
2475     // allows the linker to properly strip dead globals.
2476     auto LivenessBinder =
2477         ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2478                             ConstantExpr::getPointerCast(Metadata, IntptrTy));
2479     GlobalVariable *Liveness = new GlobalVariable(
2480         M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2481         Twine("__asan_binder_") + G->getName());
2482     Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2483     LivenessGlobals[i] = Liveness;
2484   }
2485 
2486   // Update llvm.compiler.used, adding the new liveness globals. This is
2487   // needed so that during LTO these variables stay alive. The alternative
2488   // would be to have the linker handling the LTO symbols, but libLTO
2489   // current API does not expose access to the section for each symbol.
2490   if (!LivenessGlobals.empty())
2491     appendToCompilerUsed(M, LivenessGlobals);
2492 
2493   // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2494   // to look up the loaded image that contains it. Second, we can store in it
2495   // whether registration has already occurred, to prevent duplicate
2496   // registration.
2497   //
2498   // common linkage ensures that there is only one global per shared library.
2499   GlobalVariable *RegisteredFlag = new GlobalVariable(
2500       M, IntptrTy, false, GlobalVariable::CommonLinkage,
2501       ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2502   RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
2503 
2504   if (ConstructorKind == AsanCtorKind::Global)
2505     IRB.CreateCall(AsanRegisterImageGlobals,
2506                  {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2507 
2508   // We also need to unregister globals at the end, e.g., when a shared library
2509   // gets closed.
2510   if (DestructorKind != AsanDtorKind::None) {
2511     IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2512     IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2513                        {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2514   }
2515 }
2516 
InstrumentGlobalsWithMetadataArray(IRBuilder<> & IRB,ArrayRef<GlobalVariable * > ExtendedGlobals,ArrayRef<Constant * > MetadataInitializers)2517 void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2518     IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2519     ArrayRef<Constant *> MetadataInitializers) {
2520   assert(ExtendedGlobals.size() == MetadataInitializers.size());
2521   unsigned N = ExtendedGlobals.size();
2522   assert(N > 0);
2523 
2524   // On platforms that don't have a custom metadata section, we emit an array
2525   // of global metadata structures.
2526   ArrayType *ArrayOfGlobalStructTy =
2527       ArrayType::get(MetadataInitializers[0]->getType(), N);
2528   auto AllGlobals = new GlobalVariable(
2529       M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2530       ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2531   if (Mapping.Scale > 3)
2532     AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2533 
2534   if (ConstructorKind == AsanCtorKind::Global)
2535     IRB.CreateCall(AsanRegisterGlobals,
2536                  {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2537                   ConstantInt::get(IntptrTy, N)});
2538 
2539   // We also need to unregister globals at the end, e.g., when a shared library
2540   // gets closed.
2541   if (DestructorKind != AsanDtorKind::None) {
2542     IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2543     IrbDtor.CreateCall(AsanUnregisterGlobals,
2544                        {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2545                         ConstantInt::get(IntptrTy, N)});
2546   }
2547 }
2548 
2549 // This function replaces all global variables with new variables that have
2550 // trailing redzones. It also creates a function that poisons
2551 // redzones and inserts this function into llvm.global_ctors.
2552 // Sets *CtorComdat to true if the global registration code emitted into the
2553 // asan constructor is comdat-compatible.
instrumentGlobals(IRBuilder<> & IRB,bool * CtorComdat)2554 void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB,
2555                                                bool *CtorComdat) {
2556   // Build set of globals that are aliased by some GA, where
2557   // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2558   SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2559   if (CompileKernel) {
2560     for (auto &GA : M.aliases()) {
2561       if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2562         AliasedGlobalExclusions.insert(GV);
2563     }
2564   }
2565 
2566   SmallVector<GlobalVariable *, 16> GlobalsToChange;
2567   for (auto &G : M.globals()) {
2568     if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2569       GlobalsToChange.push_back(&G);
2570   }
2571 
2572   size_t n = GlobalsToChange.size();
2573   auto &DL = M.getDataLayout();
2574 
2575   // A global is described by a structure
2576   //   size_t beg;
2577   //   size_t size;
2578   //   size_t size_with_redzone;
2579   //   const char *name;
2580   //   const char *module_name;
2581   //   size_t has_dynamic_init;
2582   //   size_t padding_for_windows_msvc_incremental_link;
2583   //   size_t odr_indicator;
2584   // We initialize an array of such structures and pass it to a run-time call.
2585   StructType *GlobalStructTy =
2586       StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2587                       IntptrTy, IntptrTy, IntptrTy);
2588   SmallVector<GlobalVariable *, 16> NewGlobals(n);
2589   SmallVector<Constant *, 16> Initializers(n);
2590 
2591   for (size_t i = 0; i < n; i++) {
2592     GlobalVariable *G = GlobalsToChange[i];
2593 
2594     GlobalValue::SanitizerMetadata MD;
2595     if (G->hasSanitizerMetadata())
2596       MD = G->getSanitizerMetadata();
2597 
2598     // The runtime library tries demangling symbol names in the descriptor but
2599     // functionality like __cxa_demangle may be unavailable (e.g.
2600     // -static-libstdc++). So we demangle the symbol names here.
2601     std::string NameForGlobal = G->getName().str();
2602     GlobalVariable *Name =
2603         createPrivateGlobalForString(M, llvm::demangle(NameForGlobal),
2604                                      /*AllowMerging*/ true, genName("global"));
2605 
2606     Type *Ty = G->getValueType();
2607     const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2608     const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2609     Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2610 
2611     StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2612     Constant *NewInitializer = ConstantStruct::get(
2613         NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2614 
2615     // Create a new global variable with enough space for a redzone.
2616     GlobalValue::LinkageTypes Linkage = G->getLinkage();
2617     if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2618       Linkage = GlobalValue::InternalLinkage;
2619     GlobalVariable *NewGlobal = new GlobalVariable(
2620         M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2621         G->getThreadLocalMode(), G->getAddressSpace());
2622     NewGlobal->copyAttributesFrom(G);
2623     NewGlobal->setComdat(G->getComdat());
2624     NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2625     // Don't fold globals with redzones. ODR violation detector and redzone
2626     // poisoning implicitly creates a dependence on the global's address, so it
2627     // is no longer valid for it to be marked unnamed_addr.
2628     NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
2629 
2630     // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2631     if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2632         G->isConstant()) {
2633       auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2634       if (Seq && Seq->isCString())
2635         NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2636     }
2637 
2638     // Transfer the debug info and type metadata.  The payload starts at offset
2639     // zero so we can copy the metadata over as is.
2640     NewGlobal->copyMetadata(G, 0);
2641 
2642     Value *Indices2[2];
2643     Indices2[0] = IRB.getInt32(0);
2644     Indices2[1] = IRB.getInt32(0);
2645 
2646     G->replaceAllUsesWith(
2647         ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2648     NewGlobal->takeName(G);
2649     G->eraseFromParent();
2650     NewGlobals[i] = NewGlobal;
2651 
2652     Constant *ODRIndicator = ConstantPointerNull::get(PtrTy);
2653     GlobalValue *InstrumentedGlobal = NewGlobal;
2654 
2655     bool CanUsePrivateAliases =
2656         TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2657         TargetTriple.isOSBinFormatWasm();
2658     if (CanUsePrivateAliases && UsePrivateAlias) {
2659       // Create local alias for NewGlobal to avoid crash on ODR between
2660       // instrumented and non-instrumented libraries.
2661       InstrumentedGlobal =
2662           GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal);
2663     }
2664 
2665     // ODR should not happen for local linkage.
2666     if (NewGlobal->hasLocalLinkage()) {
2667       ODRIndicator =
2668           ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), PtrTy);
2669     } else if (UseOdrIndicator) {
2670       // With local aliases, we need to provide another externally visible
2671       // symbol __odr_asan_XXX to detect ODR violation.
2672       auto *ODRIndicatorSym =
2673           new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2674                              Constant::getNullValue(IRB.getInt8Ty()),
2675                              kODRGenPrefix + NameForGlobal, nullptr,
2676                              NewGlobal->getThreadLocalMode());
2677 
2678       // Set meaningful attributes for indicator symbol.
2679       ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2680       ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2681       ODRIndicatorSym->setAlignment(Align(1));
2682       ODRIndicator = ODRIndicatorSym;
2683     }
2684 
2685     Constant *Initializer = ConstantStruct::get(
2686         GlobalStructTy,
2687         ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2688         ConstantInt::get(IntptrTy, SizeInBytes),
2689         ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2690         ConstantExpr::getPointerCast(Name, IntptrTy),
2691         ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy),
2692         ConstantInt::get(IntptrTy, MD.IsDynInit),
2693         Constant::getNullValue(IntptrTy),
2694         ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
2695 
2696     LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2697 
2698     Initializers[i] = Initializer;
2699   }
2700 
2701   // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2702   // ConstantMerge'ing them.
2703   SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2704   for (size_t i = 0; i < n; i++) {
2705     GlobalVariable *G = NewGlobals[i];
2706     if (G->getName().empty()) continue;
2707     GlobalsToAddToUsedList.push_back(G);
2708   }
2709   appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2710 
2711   if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2712     // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2713     // linkage unit will only have one module constructor, and (b) the register
2714     // function will be called. The module destructor is not created when n ==
2715     // 0.
2716     *CtorComdat = true;
2717     instrumentGlobalsELF(IRB, NewGlobals, Initializers, getUniqueModuleId(&M));
2718   } else if (n == 0) {
2719     // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2720     // all compile units will have identical module constructor/destructor.
2721     *CtorComdat = TargetTriple.isOSBinFormatELF();
2722   } else {
2723     *CtorComdat = false;
2724     if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2725       InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2726     } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2727       InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2728     } else {
2729       InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2730     }
2731   }
2732 
2733   // Create calls for poisoning before initializers run and unpoisoning after.
2734   if (ClInitializers)
2735     createInitializerPoisonCalls();
2736 
2737   LLVM_DEBUG(dbgs() << M);
2738 }
2739 
2740 uint64_t
getRedzoneSizeForGlobal(uint64_t SizeInBytes) const2741 ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2742   constexpr uint64_t kMaxRZ = 1 << 18;
2743   const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2744 
2745   uint64_t RZ = 0;
2746   if (SizeInBytes <= MinRZ / 2) {
2747     // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2748     // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2749     // half of MinRZ.
2750     RZ = MinRZ - SizeInBytes;
2751   } else {
2752     // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2753     RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2754 
2755     // Round up to multiple of MinRZ.
2756     if (SizeInBytes % MinRZ)
2757       RZ += MinRZ - (SizeInBytes % MinRZ);
2758   }
2759 
2760   assert((RZ + SizeInBytes) % MinRZ == 0);
2761 
2762   return RZ;
2763 }
2764 
GetAsanVersion() const2765 int ModuleAddressSanitizer::GetAsanVersion() const {
2766   int LongSize = M.getDataLayout().getPointerSizeInBits();
2767   bool isAndroid = M.getTargetTriple().isAndroid();
2768   int Version = 8;
2769   // 32-bit Android is one version ahead because of the switch to dynamic
2770   // shadow.
2771   Version += (LongSize == 32 && isAndroid);
2772   return Version;
2773 }
2774 
getOrCreateModuleName()2775 GlobalVariable *ModuleAddressSanitizer::getOrCreateModuleName() {
2776   if (!ModuleName) {
2777     // We shouldn't merge same module names, as this string serves as unique
2778     // module ID in runtime.
2779     ModuleName =
2780         createPrivateGlobalForString(M, M.getModuleIdentifier(),
2781                                      /*AllowMerging*/ false, genName("module"));
2782   }
2783   return ModuleName;
2784 }
2785 
instrumentModule()2786 bool ModuleAddressSanitizer::instrumentModule() {
2787   initializeCallbacks();
2788 
2789   for (Function &F : M)
2790     removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/false);
2791 
2792   // Create a module constructor. A destructor is created lazily because not all
2793   // platforms, and not all modules need it.
2794   if (ConstructorKind == AsanCtorKind::Global) {
2795     if (CompileKernel) {
2796       // The kernel always builds with its own runtime, and therefore does not
2797       // need the init and version check calls.
2798       AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2799     } else {
2800       std::string AsanVersion = std::to_string(GetAsanVersion());
2801       std::string VersionCheckName =
2802           InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2803       std::tie(AsanCtorFunction, std::ignore) =
2804           createSanitizerCtorAndInitFunctions(
2805               M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
2806               /*InitArgs=*/{}, VersionCheckName);
2807     }
2808   }
2809 
2810   bool CtorComdat = true;
2811   if (ClGlobals) {
2812     assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2813     if (AsanCtorFunction) {
2814       IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2815       instrumentGlobals(IRB, &CtorComdat);
2816     } else {
2817       IRBuilder<> IRB(*C);
2818       instrumentGlobals(IRB, &CtorComdat);
2819     }
2820   }
2821 
2822   const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2823 
2824   // Put the constructor and destructor in comdat if both
2825   // (1) global instrumentation is not TU-specific
2826   // (2) target is ELF.
2827   if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2828     if (AsanCtorFunction) {
2829       AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2830       appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2831     }
2832     if (AsanDtorFunction) {
2833       AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2834       appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2835     }
2836   } else {
2837     if (AsanCtorFunction)
2838       appendToGlobalCtors(M, AsanCtorFunction, Priority);
2839     if (AsanDtorFunction)
2840       appendToGlobalDtors(M, AsanDtorFunction, Priority);
2841   }
2842 
2843   return true;
2844 }
2845 
initializeCallbacks(const TargetLibraryInfo * TLI)2846 void AddressSanitizer::initializeCallbacks(const TargetLibraryInfo *TLI) {
2847   IRBuilder<> IRB(*C);
2848   // Create __asan_report* callbacks.
2849   // IsWrite, TypeSize and Exp are encoded in the function name.
2850   for (int Exp = 0; Exp < 2; Exp++) {
2851     for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2852       const std::string TypeStr = AccessIsWrite ? "store" : "load";
2853       const std::string ExpStr = Exp ? "exp_" : "";
2854       const std::string EndingStr = Recover ? "_noabort" : "";
2855 
2856       SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2857       SmallVector<Type *, 2> Args1{1, IntptrTy};
2858       AttributeList AL2;
2859       AttributeList AL1;
2860       if (Exp) {
2861         Type *ExpType = Type::getInt32Ty(*C);
2862         Args2.push_back(ExpType);
2863         Args1.push_back(ExpType);
2864         if (auto AK = TLI->getExtAttrForI32Param(false)) {
2865           AL2 = AL2.addParamAttribute(*C, 2, AK);
2866           AL1 = AL1.addParamAttribute(*C, 1, AK);
2867         }
2868       }
2869       AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2870           kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2871           FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2872 
2873       AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2874           ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2875           FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2876 
2877       for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2878            AccessSizeIndex++) {
2879         const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2880         AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2881             M.getOrInsertFunction(
2882                 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2883                 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2884 
2885         AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2886             M.getOrInsertFunction(
2887                 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2888                 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2889       }
2890     }
2891   }
2892 
2893   const std::string MemIntrinCallbackPrefix =
2894       (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2895           ? std::string("")
2896           : ClMemoryAccessCallbackPrefix;
2897   AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2898                                       PtrTy, PtrTy, PtrTy, IntptrTy);
2899   AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2900                                      PtrTy, PtrTy, IntptrTy);
2901   AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2902                                      TLI->getAttrList(C, {1}, /*Signed=*/false),
2903                                      PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2904 
2905   AsanHandleNoReturnFunc =
2906       M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2907 
2908   AsanPtrCmpFunction =
2909       M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2910   AsanPtrSubFunction =
2911       M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2912   if (Mapping.InGlobal)
2913     AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2914                                            ArrayType::get(IRB.getInt8Ty(), 0));
2915 
2916   AMDGPUAddressShared =
2917       M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2918   AMDGPUAddressPrivate =
2919       M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2920 }
2921 
maybeInsertAsanInitAtFunctionEntry(Function & F)2922 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2923   // For each NSObject descendant having a +load method, this method is invoked
2924   // by the ObjC runtime before any of the static constructors is called.
2925   // Therefore we need to instrument such methods with a call to __asan_init
2926   // at the beginning in order to initialize our runtime before any access to
2927   // the shadow memory.
2928   // We cannot just ignore these methods, because they may call other
2929   // instrumented functions.
2930   if (F.getName().contains(" load]")) {
2931     FunctionCallee AsanInitFunction =
2932         declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2933     IRBuilder<> IRB(&F.front(), F.front().begin());
2934     IRB.CreateCall(AsanInitFunction, {});
2935     return true;
2936   }
2937   return false;
2938 }
2939 
maybeInsertDynamicShadowAtFunctionEntry(Function & F)2940 bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2941   // Generate code only when dynamic addressing is needed.
2942   if (Mapping.Offset != kDynamicShadowSentinel)
2943     return false;
2944 
2945   IRBuilder<> IRB(&F.front().front());
2946   if (Mapping.InGlobal) {
2947     if (ClWithIfuncSuppressRemat) {
2948       // An empty inline asm with input reg == output reg.
2949       // An opaque pointer-to-int cast, basically.
2950       InlineAsm *Asm = InlineAsm::get(
2951           FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2952           StringRef(""), StringRef("=r,0"),
2953           /*hasSideEffects=*/false);
2954       LocalDynamicShadow =
2955           IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2956     } else {
2957       LocalDynamicShadow =
2958           IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2959     }
2960   } else {
2961     Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2962         kAsanShadowMemoryDynamicAddress, IntptrTy);
2963     LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2964   }
2965   return true;
2966 }
2967 
markEscapedLocalAllocas(Function & F)2968 void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2969   // Find the one possible call to llvm.localescape and pre-mark allocas passed
2970   // to it as uninteresting. This assumes we haven't started processing allocas
2971   // yet. This check is done up front because iterating the use list in
2972   // isInterestingAlloca would be algorithmically slower.
2973   assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2974 
2975   // Try to get the declaration of llvm.localescape. If it's not in the module,
2976   // we can exit early.
2977   if (!F.getParent()->getFunction("llvm.localescape")) return;
2978 
2979   // Look for a call to llvm.localescape call in the entry block. It can't be in
2980   // any other block.
2981   for (Instruction &I : F.getEntryBlock()) {
2982     IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
2983     if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2984       // We found a call. Mark all the allocas passed in as uninteresting.
2985       for (Value *Arg : II->args()) {
2986         AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2987         assert(AI && AI->isStaticAlloca() &&
2988                "non-static alloca arg to localescape");
2989         ProcessedAllocas[AI] = false;
2990       }
2991       break;
2992     }
2993   }
2994 }
2995 
suppressInstrumentationSiteForDebug(int & Instrumented)2996 bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
2997   bool ShouldInstrument =
2998       ClDebugMin < 0 || ClDebugMax < 0 ||
2999       (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
3000   Instrumented++;
3001   return !ShouldInstrument;
3002 }
3003 
instrumentFunction(Function & F,const TargetLibraryInfo * TLI)3004 bool AddressSanitizer::instrumentFunction(Function &F,
3005                                           const TargetLibraryInfo *TLI) {
3006   bool FunctionModified = false;
3007 
3008   // Do not apply any instrumentation for naked functions.
3009   if (F.hasFnAttribute(Attribute::Naked))
3010     return FunctionModified;
3011 
3012   // If needed, insert __asan_init before checking for SanitizeAddress attr.
3013   // This function needs to be called even if the function body is not
3014   // instrumented.
3015   if (maybeInsertAsanInitAtFunctionEntry(F))
3016     FunctionModified = true;
3017 
3018   // Leave if the function doesn't need instrumentation.
3019   if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
3020 
3021   if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3022     return FunctionModified;
3023 
3024   LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
3025 
3026   initializeCallbacks(TLI);
3027 
3028   FunctionStateRAII CleanupObj(this);
3029 
3030   RuntimeCallInserter RTCI(F);
3031 
3032   FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
3033 
3034   // We can't instrument allocas used with llvm.localescape. Only static allocas
3035   // can be passed to that intrinsic.
3036   markEscapedLocalAllocas(F);
3037 
3038   // We want to instrument every address only once per basic block (unless there
3039   // are calls between uses).
3040   SmallPtrSet<Value *, 16> TempsToInstrument;
3041   SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
3042   SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
3043   SmallVector<Instruction *, 8> NoReturnCalls;
3044   SmallVector<BasicBlock *, 16> AllBlocks;
3045   SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
3046 
3047   // Fill the set of memory operations to instrument.
3048   for (auto &BB : F) {
3049     AllBlocks.push_back(&BB);
3050     TempsToInstrument.clear();
3051     int NumInsnsPerBB = 0;
3052     for (auto &Inst : BB) {
3053       if (LooksLikeCodeInBug11395(&Inst)) return false;
3054       // Skip instructions inserted by another instrumentation.
3055       if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
3056         continue;
3057       SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
3058       getInterestingMemoryOperands(&Inst, InterestingOperands);
3059 
3060       if (!InterestingOperands.empty()) {
3061         for (auto &Operand : InterestingOperands) {
3062           if (ClOpt && ClOptSameTemp) {
3063             Value *Ptr = Operand.getPtr();
3064             // If we have a mask, skip instrumentation if we've already
3065             // instrumented the full object. But don't add to TempsToInstrument
3066             // because we might get another load/store with a different mask.
3067             if (Operand.MaybeMask) {
3068               if (TempsToInstrument.count(Ptr))
3069                 continue; // We've seen this (whole) temp in the current BB.
3070             } else {
3071               if (!TempsToInstrument.insert(Ptr).second)
3072                 continue; // We've seen this temp in the current BB.
3073             }
3074           }
3075           OperandsToInstrument.push_back(Operand);
3076           NumInsnsPerBB++;
3077         }
3078       } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
3079                   isInterestingPointerComparison(&Inst)) ||
3080                  ((ClInvalidPointerPairs || ClInvalidPointerSub) &&
3081                   isInterestingPointerSubtraction(&Inst))) {
3082         PointerComparisonsOrSubtracts.push_back(&Inst);
3083       } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
3084         // ok, take it.
3085         IntrinToInstrument.push_back(MI);
3086         NumInsnsPerBB++;
3087       } else {
3088         if (auto *CB = dyn_cast<CallBase>(&Inst)) {
3089           // A call inside BB.
3090           TempsToInstrument.clear();
3091           if (CB->doesNotReturn())
3092             NoReturnCalls.push_back(CB);
3093         }
3094         if (CallInst *CI = dyn_cast<CallInst>(&Inst))
3095           maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
3096       }
3097       if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
3098     }
3099   }
3100 
3101   bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3102                    OperandsToInstrument.size() + IntrinToInstrument.size() >
3103                        (unsigned)InstrumentationWithCallsThreshold);
3104   const DataLayout &DL = F.getDataLayout();
3105   ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext());
3106 
3107   // Instrument.
3108   int NumInstrumented = 0;
3109   for (auto &Operand : OperandsToInstrument) {
3110     if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3111       instrumentMop(ObjSizeVis, Operand, UseCalls,
3112                     F.getDataLayout(), RTCI);
3113     FunctionModified = true;
3114   }
3115   for (auto *Inst : IntrinToInstrument) {
3116     if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3117       instrumentMemIntrinsic(Inst, RTCI);
3118     FunctionModified = true;
3119   }
3120 
3121   FunctionStackPoisoner FSP(F, *this, RTCI);
3122   bool ChangedStack = FSP.runOnFunction();
3123 
3124   // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
3125   // See e.g. https://github.com/google/sanitizers/issues/37
3126   for (auto *CI : NoReturnCalls) {
3127     IRBuilder<> IRB(CI);
3128     RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3129   }
3130 
3131   for (auto *Inst : PointerComparisonsOrSubtracts) {
3132     instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3133     FunctionModified = true;
3134   }
3135 
3136   if (ChangedStack || !NoReturnCalls.empty())
3137     FunctionModified = true;
3138 
3139   LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
3140                     << F << "\n");
3141 
3142   return FunctionModified;
3143 }
3144 
3145 // Workaround for bug 11395: we don't want to instrument stack in functions
3146 // with large assembly blobs (32-bit only), otherwise reg alloc may crash.
3147 // FIXME: remove once the bug 11395 is fixed.
LooksLikeCodeInBug11395(Instruction * I)3148 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
3149   if (LongSize != 32) return false;
3150   CallInst *CI = dyn_cast<CallInst>(I);
3151   if (!CI || !CI->isInlineAsm()) return false;
3152   if (CI->arg_size() <= 5)
3153     return false;
3154   // We have inline assembly with quite a few arguments.
3155   return true;
3156 }
3157 
initializeCallbacks(Module & M)3158 void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3159   IRBuilder<> IRB(*C);
3160   if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3161       ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3162     const char *MallocNameTemplate =
3163         ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3164             ? kAsanStackMallocAlwaysNameTemplate
3165             : kAsanStackMallocNameTemplate;
3166     for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3167       std::string Suffix = itostr(Index);
3168       AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3169           MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3170       AsanStackFreeFunc[Index] =
3171           M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3172                                 IRB.getVoidTy(), IntptrTy, IntptrTy);
3173     }
3174   }
3175   if (ASan.UseAfterScope) {
3176     AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3177         kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3178     AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3179         kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3180   }
3181 
3182   for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3183                      0xf3, 0xf5, 0xf8}) {
3184     std::ostringstream Name;
3185     Name << kAsanSetShadowPrefix;
3186     Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3187     AsanSetShadowFunc[Val] =
3188         M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3189   }
3190 
3191   AsanAllocaPoisonFunc = M.getOrInsertFunction(
3192       kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3193   AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3194       kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3195 }
3196 
copyToShadowInline(ArrayRef<uint8_t> ShadowMask,ArrayRef<uint8_t> ShadowBytes,size_t Begin,size_t End,IRBuilder<> & IRB,Value * ShadowBase)3197 void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3198                                                ArrayRef<uint8_t> ShadowBytes,
3199                                                size_t Begin, size_t End,
3200                                                IRBuilder<> &IRB,
3201                                                Value *ShadowBase) {
3202   if (Begin >= End)
3203     return;
3204 
3205   const size_t LargestStoreSizeInBytes =
3206       std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3207 
3208   const bool IsLittleEndian = F.getDataLayout().isLittleEndian();
3209 
3210   // Poison given range in shadow using larges store size with out leading and
3211   // trailing zeros in ShadowMask. Zeros never change, so they need neither
3212   // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3213   // middle of a store.
3214   for (size_t i = Begin; i < End;) {
3215     if (!ShadowMask[i]) {
3216       assert(!ShadowBytes[i]);
3217       ++i;
3218       continue;
3219     }
3220 
3221     size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3222     // Fit store size into the range.
3223     while (StoreSizeInBytes > End - i)
3224       StoreSizeInBytes /= 2;
3225 
3226     // Minimize store size by trimming trailing zeros.
3227     for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3228       while (j <= StoreSizeInBytes / 2)
3229         StoreSizeInBytes /= 2;
3230     }
3231 
3232     uint64_t Val = 0;
3233     for (size_t j = 0; j < StoreSizeInBytes; j++) {
3234       if (IsLittleEndian)
3235         Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3236       else
3237         Val = (Val << 8) | ShadowBytes[i + j];
3238     }
3239 
3240     Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3241     Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3242     IRB.CreateAlignedStore(
3243         Poison, IRB.CreateIntToPtr(Ptr, PointerType::getUnqual(Poison->getContext())),
3244         Align(1));
3245 
3246     i += StoreSizeInBytes;
3247   }
3248 }
3249 
copyToShadow(ArrayRef<uint8_t> ShadowMask,ArrayRef<uint8_t> ShadowBytes,IRBuilder<> & IRB,Value * ShadowBase)3250 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3251                                          ArrayRef<uint8_t> ShadowBytes,
3252                                          IRBuilder<> &IRB, Value *ShadowBase) {
3253   copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3254 }
3255 
copyToShadow(ArrayRef<uint8_t> ShadowMask,ArrayRef<uint8_t> ShadowBytes,size_t Begin,size_t End,IRBuilder<> & IRB,Value * ShadowBase)3256 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3257                                          ArrayRef<uint8_t> ShadowBytes,
3258                                          size_t Begin, size_t End,
3259                                          IRBuilder<> &IRB, Value *ShadowBase) {
3260   assert(ShadowMask.size() == ShadowBytes.size());
3261   size_t Done = Begin;
3262   for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3263     if (!ShadowMask[i]) {
3264       assert(!ShadowBytes[i]);
3265       continue;
3266     }
3267     uint8_t Val = ShadowBytes[i];
3268     if (!AsanSetShadowFunc[Val])
3269       continue;
3270 
3271     // Skip same values.
3272     for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3273     }
3274 
3275     if (j - i >= ASan.MaxInlinePoisoningSize) {
3276       copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3277       RTCI.createRuntimeCall(
3278           IRB, AsanSetShadowFunc[Val],
3279           {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3280            ConstantInt::get(IntptrTy, j - i)});
3281       Done = j;
3282     }
3283   }
3284 
3285   copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3286 }
3287 
3288 // Fake stack allocator (asan_fake_stack.h) has 11 size classes
3289 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
StackMallocSizeClass(uint64_t LocalStackSize)3290 static int StackMallocSizeClass(uint64_t LocalStackSize) {
3291   assert(LocalStackSize <= kMaxStackMallocSize);
3292   uint64_t MaxSize = kMinStackMallocSize;
3293   for (int i = 0;; i++, MaxSize *= 2)
3294     if (LocalStackSize <= MaxSize) return i;
3295   llvm_unreachable("impossible LocalStackSize");
3296 }
3297 
copyArgsPassedByValToAllocas()3298 void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3299   Instruction *CopyInsertPoint = &F.front().front();
3300   if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3301     // Insert after the dynamic shadow location is determined
3302     CopyInsertPoint = CopyInsertPoint->getNextNode();
3303     assert(CopyInsertPoint);
3304   }
3305   IRBuilder<> IRB(CopyInsertPoint);
3306   const DataLayout &DL = F.getDataLayout();
3307   for (Argument &Arg : F.args()) {
3308     if (Arg.hasByValAttr()) {
3309       Type *Ty = Arg.getParamByValType();
3310       const Align Alignment =
3311           DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3312 
3313       AllocaInst *AI = IRB.CreateAlloca(
3314           Ty, nullptr,
3315           (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3316               ".byval");
3317       AI->setAlignment(Alignment);
3318       Arg.replaceAllUsesWith(AI);
3319 
3320       uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3321       IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3322     }
3323   }
3324 }
3325 
createPHI(IRBuilder<> & IRB,Value * Cond,Value * ValueIfTrue,Instruction * ThenTerm,Value * ValueIfFalse)3326 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3327                                           Value *ValueIfTrue,
3328                                           Instruction *ThenTerm,
3329                                           Value *ValueIfFalse) {
3330   PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
3331   BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3332   PHI->addIncoming(ValueIfFalse, CondBlock);
3333   BasicBlock *ThenBlock = ThenTerm->getParent();
3334   PHI->addIncoming(ValueIfTrue, ThenBlock);
3335   return PHI;
3336 }
3337 
createAllocaForLayout(IRBuilder<> & IRB,const ASanStackFrameLayout & L,bool Dynamic)3338 Value *FunctionStackPoisoner::createAllocaForLayout(
3339     IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3340   AllocaInst *Alloca;
3341   if (Dynamic) {
3342     Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3343                               ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3344                               "MyAlloca");
3345   } else {
3346     Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3347                               nullptr, "MyAlloca");
3348     assert(Alloca->isStaticAlloca());
3349   }
3350   assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3351   uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3352   Alloca->setAlignment(Align(FrameAlignment));
3353   return IRB.CreatePointerCast(Alloca, IntptrTy);
3354 }
3355 
createDynamicAllocasInitStorage()3356 void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3357   BasicBlock &FirstBB = *F.begin();
3358   IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3359   DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3360   IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3361   DynamicAllocaLayout->setAlignment(Align(32));
3362 }
3363 
processDynamicAllocas()3364 void FunctionStackPoisoner::processDynamicAllocas() {
3365   if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3366     assert(DynamicAllocaPoisonCallVec.empty());
3367     return;
3368   }
3369 
3370   // Insert poison calls for lifetime intrinsics for dynamic allocas.
3371   for (const auto &APC : DynamicAllocaPoisonCallVec) {
3372     assert(APC.InsBefore);
3373     assert(APC.AI);
3374     assert(ASan.isInterestingAlloca(*APC.AI));
3375     assert(!APC.AI->isStaticAlloca());
3376 
3377     IRBuilder<> IRB(APC.InsBefore);
3378     poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3379     // Dynamic allocas will be unpoisoned unconditionally below in
3380     // unpoisonDynamicAllocas.
3381     // Flag that we need unpoison static allocas.
3382   }
3383 
3384   // Handle dynamic allocas.
3385   createDynamicAllocasInitStorage();
3386   for (auto &AI : DynamicAllocaVec)
3387     handleDynamicAllocaCall(AI);
3388   unpoisonDynamicAllocas();
3389 }
3390 
3391 /// Collect instructions in the entry block after \p InsBefore which initialize
3392 /// permanent storage for a function argument. These instructions must remain in
3393 /// the entry block so that uninitialized values do not appear in backtraces. An
3394 /// added benefit is that this conserves spill slots. This does not move stores
3395 /// before instrumented / "interesting" allocas.
findStoresToUninstrumentedArgAllocas(AddressSanitizer & ASan,Instruction & InsBefore,SmallVectorImpl<Instruction * > & InitInsts)3396 static void findStoresToUninstrumentedArgAllocas(
3397     AddressSanitizer &ASan, Instruction &InsBefore,
3398     SmallVectorImpl<Instruction *> &InitInsts) {
3399   Instruction *Start = InsBefore.getNextNonDebugInstruction();
3400   for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) {
3401     // Argument initialization looks like:
3402     // 1) store <Argument>, <Alloca> OR
3403     // 2) <CastArgument> = cast <Argument> to ...
3404     //    store <CastArgument> to <Alloca>
3405     // Do not consider any other kind of instruction.
3406     //
3407     // Note: This covers all known cases, but may not be exhaustive. An
3408     // alternative to pattern-matching stores is to DFS over all Argument uses:
3409     // this might be more general, but is probably much more complicated.
3410     if (isa<AllocaInst>(It) || isa<CastInst>(It))
3411       continue;
3412     if (auto *Store = dyn_cast<StoreInst>(It)) {
3413       // The store destination must be an alloca that isn't interesting for
3414       // ASan to instrument. These are moved up before InsBefore, and they're
3415       // not interesting because allocas for arguments can be mem2reg'd.
3416       auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3417       if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3418         continue;
3419 
3420       Value *Val = Store->getValueOperand();
3421       bool IsDirectArgInit = isa<Argument>(Val);
3422       bool IsArgInitViaCast =
3423           isa<CastInst>(Val) &&
3424           isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3425           // Check that the cast appears directly before the store. Otherwise
3426           // moving the cast before InsBefore may break the IR.
3427           Val == It->getPrevNonDebugInstruction();
3428       bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3429       if (!IsArgInit)
3430         continue;
3431 
3432       if (IsArgInitViaCast)
3433         InitInsts.push_back(cast<Instruction>(Val));
3434       InitInsts.push_back(Store);
3435       continue;
3436     }
3437 
3438     // Do not reorder past unknown instructions: argument initialization should
3439     // only involve casts and stores.
3440     return;
3441   }
3442 }
3443 
getAllocaName(AllocaInst * AI)3444 static StringRef getAllocaName(AllocaInst *AI) {
3445   // Alloca could have been renamed for uniqueness. Its true name will have been
3446   // recorded as an annotation.
3447   if (AI->hasMetadata(LLVMContext::MD_annotation)) {
3448     MDTuple *AllocaAnnotations =
3449         cast<MDTuple>(AI->getMetadata(LLVMContext::MD_annotation));
3450     for (auto &Annotation : AllocaAnnotations->operands()) {
3451       if (!isa<MDTuple>(Annotation))
3452         continue;
3453       auto AnnotationTuple = cast<MDTuple>(Annotation);
3454       for (unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3455            Index++) {
3456         // All annotations are strings
3457         auto MetadataString =
3458             cast<MDString>(AnnotationTuple->getOperand(Index));
3459         if (MetadataString->getString() == "alloca_name_altered")
3460           return cast<MDString>(AnnotationTuple->getOperand(Index + 1))
3461               ->getString();
3462       }
3463     }
3464   }
3465   return AI->getName();
3466 }
3467 
processStaticAllocas()3468 void FunctionStackPoisoner::processStaticAllocas() {
3469   if (AllocaVec.empty()) {
3470     assert(StaticAllocaPoisonCallVec.empty());
3471     return;
3472   }
3473 
3474   int StackMallocIdx = -1;
3475   DebugLoc EntryDebugLocation;
3476   if (auto SP = F.getSubprogram())
3477     EntryDebugLocation =
3478         DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3479 
3480   Instruction *InsBefore = AllocaVec[0];
3481   IRBuilder<> IRB(InsBefore);
3482 
3483   // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3484   // debug info is broken, because only entry-block allocas are treated as
3485   // regular stack slots.
3486   auto InsBeforeB = InsBefore->getParent();
3487   assert(InsBeforeB == &F.getEntryBlock());
3488   for (auto *AI : StaticAllocasToMoveUp)
3489     if (AI->getParent() == InsBeforeB)
3490       AI->moveBefore(InsBefore->getIterator());
3491 
3492   // Move stores of arguments into entry-block allocas as well. This prevents
3493   // extra stack slots from being generated (to house the argument values until
3494   // they can be stored into the allocas). This also prevents uninitialized
3495   // values from being shown in backtraces.
3496   SmallVector<Instruction *, 8> ArgInitInsts;
3497   findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3498   for (Instruction *ArgInitInst : ArgInitInsts)
3499     ArgInitInst->moveBefore(InsBefore->getIterator());
3500 
3501   // If we have a call to llvm.localescape, keep it in the entry block.
3502   if (LocalEscapeCall)
3503     LocalEscapeCall->moveBefore(InsBefore->getIterator());
3504 
3505   SmallVector<ASanStackVariableDescription, 16> SVD;
3506   SVD.reserve(AllocaVec.size());
3507   for (AllocaInst *AI : AllocaVec) {
3508     StringRef Name = getAllocaName(AI);
3509     ASanStackVariableDescription D = {Name.data(),
3510                                       ASan.getAllocaSizeInBytes(*AI),
3511                                       0,
3512                                       AI->getAlign().value(),
3513                                       AI,
3514                                       0,
3515                                       0};
3516     SVD.push_back(D);
3517   }
3518 
3519   // Minimal header size (left redzone) is 4 pointers,
3520   // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3521   uint64_t Granularity = 1ULL << Mapping.Scale;
3522   uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3523   const ASanStackFrameLayout &L =
3524       ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3525 
3526   // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3527   DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap;
3528   for (auto &Desc : SVD)
3529     AllocaToSVDMap[Desc.AI] = &Desc;
3530 
3531   // Update SVD with information from lifetime intrinsics.
3532   for (const auto &APC : StaticAllocaPoisonCallVec) {
3533     assert(APC.InsBefore);
3534     assert(APC.AI);
3535     assert(ASan.isInterestingAlloca(*APC.AI));
3536     assert(APC.AI->isStaticAlloca());
3537 
3538     ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3539     Desc.LifetimeSize = Desc.Size;
3540     if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3541       if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3542         if (LifetimeLoc->getFile() == FnLoc->getFile())
3543           if (unsigned Line = LifetimeLoc->getLine())
3544             Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3545       }
3546     }
3547   }
3548 
3549   auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3550   LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3551   uint64_t LocalStackSize = L.FrameSize;
3552   bool DoStackMalloc =
3553       ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3554       !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3555   bool DoDynamicAlloca = ClDynamicAllocaStack;
3556   // Don't do dynamic alloca or stack malloc if:
3557   // 1) There is inline asm: too often it makes assumptions on which registers
3558   //    are available.
3559   // 2) There is a returns_twice call (typically setjmp), which is
3560   //    optimization-hostile, and doesn't play well with introduced indirect
3561   //    register-relative calculation of local variable addresses.
3562   DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3563   DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3564 
3565   Value *StaticAlloca =
3566       DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3567 
3568   Value *FakeStack;
3569   Value *LocalStackBase;
3570   Value *LocalStackBaseAlloca;
3571   uint8_t DIExprFlags = DIExpression::ApplyOffset;
3572 
3573   if (DoStackMalloc) {
3574     LocalStackBaseAlloca =
3575         IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3576     if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3577       // void *FakeStack = __asan_option_detect_stack_use_after_return
3578       //     ? __asan_stack_malloc_N(LocalStackSize)
3579       //     : nullptr;
3580       // void *LocalStackBase = (FakeStack) ? FakeStack :
3581       //                        alloca(LocalStackSize);
3582       Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3583           kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty());
3584       Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3585           IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3586           Constant::getNullValue(IRB.getInt32Ty()));
3587       Instruction *Term =
3588           SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3589       IRBuilder<> IRBIf(Term);
3590       StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3591       assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3592       Value *FakeStackValue =
3593           RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3594                                  ConstantInt::get(IntptrTy, LocalStackSize));
3595       IRB.SetInsertPoint(InsBefore);
3596       FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3597                             ConstantInt::get(IntptrTy, 0));
3598     } else {
3599       // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3600       // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3601       // void *LocalStackBase = (FakeStack) ? FakeStack :
3602       //                        alloca(LocalStackSize);
3603       StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3604       FakeStack =
3605           RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3606                                  ConstantInt::get(IntptrTy, LocalStackSize));
3607     }
3608     Value *NoFakeStack =
3609         IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
3610     Instruction *Term =
3611         SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3612     IRBuilder<> IRBIf(Term);
3613     Value *AllocaValue =
3614         DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3615 
3616     IRB.SetInsertPoint(InsBefore);
3617     LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3618     IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3619     DIExprFlags |= DIExpression::DerefBefore;
3620   } else {
3621     // void *FakeStack = nullptr;
3622     // void *LocalStackBase = alloca(LocalStackSize);
3623     FakeStack = ConstantInt::get(IntptrTy, 0);
3624     LocalStackBase =
3625         DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3626     LocalStackBaseAlloca = LocalStackBase;
3627   }
3628 
3629   // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the
3630   // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse
3631   // later passes and can result in dropped variable coverage in debug info.
3632   Value *LocalStackBaseAllocaPtr =
3633       isa<PtrToIntInst>(LocalStackBaseAlloca)
3634           ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3635           : LocalStackBaseAlloca;
3636   assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3637          "Variable descriptions relative to ASan stack base will be dropped");
3638 
3639   // Replace Alloca instructions with base+offset.
3640   for (const auto &Desc : SVD) {
3641     AllocaInst *AI = Desc.AI;
3642     replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags,
3643                       Desc.Offset);
3644     Value *NewAllocaPtr = IRB.CreateIntToPtr(
3645         IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
3646         AI->getType());
3647     AI->replaceAllUsesWith(NewAllocaPtr);
3648   }
3649 
3650   // The left-most redzone has enough space for at least 4 pointers.
3651   // Write the Magic value to redzone[0].
3652   Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
3653   IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3654                   BasePlus0);
3655   // Write the frame description constant to redzone[1].
3656   Value *BasePlus1 = IRB.CreateIntToPtr(
3657       IRB.CreateAdd(LocalStackBase,
3658                     ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3659       IntptrPtrTy);
3660   GlobalVariable *StackDescriptionGlobal =
3661       createPrivateGlobalForString(*F.getParent(), DescriptionString,
3662                                    /*AllowMerging*/ true, genName("stack"));
3663   Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3664   IRB.CreateStore(Description, BasePlus1);
3665   // Write the PC to redzone[2].
3666   Value *BasePlus2 = IRB.CreateIntToPtr(
3667       IRB.CreateAdd(LocalStackBase,
3668                     ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3669       IntptrPtrTy);
3670   IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3671 
3672   const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3673 
3674   // Poison the stack red zones at the entry.
3675   Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3676   // As mask we must use most poisoned case: red zones and after scope.
3677   // As bytes we can use either the same or just red zones only.
3678   copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3679 
3680   if (!StaticAllocaPoisonCallVec.empty()) {
3681     const auto &ShadowInScope = GetShadowBytes(SVD, L);
3682 
3683     // Poison static allocas near lifetime intrinsics.
3684     for (const auto &APC : StaticAllocaPoisonCallVec) {
3685       const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3686       assert(Desc.Offset % L.Granularity == 0);
3687       size_t Begin = Desc.Offset / L.Granularity;
3688       size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3689 
3690       IRBuilder<> IRB(APC.InsBefore);
3691       copyToShadow(ShadowAfterScope,
3692                    APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3693                    IRB, ShadowBase);
3694     }
3695   }
3696 
3697   SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3698   SmallVector<uint8_t, 64> ShadowAfterReturn;
3699 
3700   // (Un)poison the stack before all ret instructions.
3701   for (Instruction *Ret : RetVec) {
3702     IRBuilder<> IRBRet(Ret);
3703     // Mark the current frame as retired.
3704     IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3705                        BasePlus0);
3706     if (DoStackMalloc) {
3707       assert(StackMallocIdx >= 0);
3708       // if FakeStack != 0  // LocalStackBase == FakeStack
3709       //     // In use-after-return mode, poison the whole stack frame.
3710       //     if StackMallocIdx <= 4
3711       //         // For small sizes inline the whole thing:
3712       //         memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3713       //         **SavedFlagPtr(FakeStack) = 0
3714       //     else
3715       //         __asan_stack_free_N(FakeStack, LocalStackSize)
3716       // else
3717       //     <This is not a fake stack; unpoison the redzones>
3718       Value *Cmp =
3719           IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
3720       Instruction *ThenTerm, *ElseTerm;
3721       SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3722 
3723       IRBuilder<> IRBPoison(ThenTerm);
3724       if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3725         int ClassSize = kMinStackMallocSize << StackMallocIdx;
3726         ShadowAfterReturn.resize(ClassSize / L.Granularity,
3727                                  kAsanStackUseAfterReturnMagic);
3728         copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3729                      ShadowBase);
3730         Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3731             FakeStack,
3732             ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3733         Value *SavedFlagPtr = IRBPoison.CreateLoad(
3734             IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3735         IRBPoison.CreateStore(
3736             Constant::getNullValue(IRBPoison.getInt8Ty()),
3737             IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3738       } else {
3739         // For larger frames call __asan_stack_free_*.
3740         RTCI.createRuntimeCall(
3741             IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3742             {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3743       }
3744 
3745       IRBuilder<> IRBElse(ElseTerm);
3746       copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3747     } else {
3748       copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3749     }
3750   }
3751 
3752   // We are done. Remove the old unused alloca instructions.
3753   for (auto *AI : AllocaVec)
3754     AI->eraseFromParent();
3755 }
3756 
poisonAlloca(Value * V,uint64_t Size,IRBuilder<> & IRB,bool DoPoison)3757 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3758                                          IRBuilder<> &IRB, bool DoPoison) {
3759   // For now just insert the call to ASan runtime.
3760   Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3761   Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3762   RTCI.createRuntimeCall(
3763       IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3764       {AddrArg, SizeArg});
3765 }
3766 
3767 // Handling llvm.lifetime intrinsics for a given %alloca:
3768 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3769 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3770 //     invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3771 //     could be poisoned by previous llvm.lifetime.end instruction, as the
3772 //     variable may go in and out of scope several times, e.g. in loops).
3773 // (3) if we poisoned at least one %alloca in a function,
3774 //     unpoison the whole stack frame at function exit.
handleDynamicAllocaCall(AllocaInst * AI)3775 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3776   IRBuilder<> IRB(AI);
3777 
3778   const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3779   const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3780 
3781   Value *Zero = Constant::getNullValue(IntptrTy);
3782   Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3783   Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3784 
3785   // Since we need to extend alloca with additional memory to locate
3786   // redzones, and OldSize is number of allocated blocks with
3787   // ElementSize size, get allocated memory size in bytes by
3788   // OldSize * ElementSize.
3789   const unsigned ElementSize =
3790       F.getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3791   Value *OldSize =
3792       IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3793                     ConstantInt::get(IntptrTy, ElementSize));
3794 
3795   // PartialSize = OldSize % 32
3796   Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3797 
3798   // Misalign = kAllocaRzSize - PartialSize;
3799   Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3800 
3801   // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3802   Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3803   Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3804 
3805   // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3806   // Alignment is added to locate left redzone, PartialPadding for possible
3807   // partial redzone and kAllocaRzSize for right redzone respectively.
3808   Value *AdditionalChunkSize = IRB.CreateAdd(
3809       ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3810       PartialPadding);
3811 
3812   Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3813 
3814   // Insert new alloca with new NewSize and Alignment params.
3815   AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3816   NewAlloca->setAlignment(Alignment);
3817 
3818   // NewAddress = Address + Alignment
3819   Value *NewAddress =
3820       IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3821                     ConstantInt::get(IntptrTy, Alignment.value()));
3822 
3823   // Insert __asan_alloca_poison call for new created alloca.
3824   RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3825 
3826   // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3827   // for unpoisoning stuff.
3828   IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3829 
3830   Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3831 
3832   // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3833   AI->replaceAllUsesWith(NewAddressPtr);
3834 
3835   // We are done. Erase old alloca from parent.
3836   AI->eraseFromParent();
3837 }
3838 
3839 // isSafeAccess returns true if Addr is always inbounds with respect to its
3840 // base object. For example, it is a field access or an array access with
3841 // constant inbounds index.
isSafeAccess(ObjectSizeOffsetVisitor & ObjSizeVis,Value * Addr,TypeSize TypeStoreSize) const3842 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3843                                     Value *Addr, TypeSize TypeStoreSize) const {
3844   if (TypeStoreSize.isScalable())
3845     // TODO: We can use vscale_range to convert a scalable value to an
3846     // upper bound on the access size.
3847     return false;
3848 
3849   SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3850   if (!SizeOffset.bothKnown())
3851     return false;
3852 
3853   uint64_t Size = SizeOffset.Size.getZExtValue();
3854   int64_t Offset = SizeOffset.Offset.getSExtValue();
3855 
3856   // Three checks are required to ensure safety:
3857   // . Offset >= 0  (since the offset is given from the base ptr)
3858   // . Size >= Offset  (unsigned)
3859   // . Size - Offset >= NeededSize  (unsigned)
3860   return Offset >= 0 && Size >= uint64_t(Offset) &&
3861          Size - uint64_t(Offset) >= TypeStoreSize / 8;
3862 }
3863