1 //===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file is a part of HWAddressSanitizer, an address sanity checker 11 /// based on tagged addressing. 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h" 15 #include "llvm/ADT/MapVector.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/ADT/StringExtras.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/ADT/Triple.h" 20 #include "llvm/BinaryFormat/ELF.h" 21 #include "llvm/IR/Attributes.h" 22 #include "llvm/IR/BasicBlock.h" 23 #include "llvm/IR/Constant.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DebugInfoMetadata.h" 27 #include "llvm/IR/DerivedTypes.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/IR/IRBuilder.h" 30 #include "llvm/IR/InlineAsm.h" 31 #include "llvm/IR/InstVisitor.h" 32 #include "llvm/IR/Instruction.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/LLVMContext.h" 37 #include "llvm/IR/MDBuilder.h" 38 #include "llvm/IR/Module.h" 39 #include "llvm/IR/Type.h" 40 #include "llvm/IR/Value.h" 41 #include "llvm/InitializePasses.h" 42 #include "llvm/Pass.h" 43 #include "llvm/Support/Casting.h" 44 #include "llvm/Support/CommandLine.h" 45 #include "llvm/Support/Debug.h" 46 #include "llvm/Support/raw_ostream.h" 47 #include "llvm/Transforms/Instrumentation.h" 48 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" 49 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 50 #include "llvm/Transforms/Utils/ModuleUtils.h" 51 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 52 #include <sstream> 53 54 using namespace llvm; 55 56 #define DEBUG_TYPE "hwasan" 57 58 static const char *const kHwasanModuleCtorName = "hwasan.module_ctor"; 59 static const char *const kHwasanNoteName = "hwasan.note"; 60 static const char *const kHwasanInitName = "__hwasan_init"; 61 static const char *const kHwasanPersonalityThunkName = 62 "__hwasan_personality_thunk"; 63 64 static const char *const kHwasanShadowMemoryDynamicAddress = 65 "__hwasan_shadow_memory_dynamic_address"; 66 67 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 68 static const size_t kNumberOfAccessSizes = 5; 69 70 static const size_t kDefaultShadowScale = 4; 71 static const uint64_t kDynamicShadowSentinel = 72 std::numeric_limits<uint64_t>::max(); 73 static const unsigned kPointerTagShift = 56; 74 75 static const unsigned kShadowBaseAlignment = 32; 76 77 static cl::opt<std::string> ClMemoryAccessCallbackPrefix( 78 "hwasan-memory-access-callback-prefix", 79 cl::desc("Prefix for memory access callbacks"), cl::Hidden, 80 cl::init("__hwasan_")); 81 82 static cl::opt<bool> 83 ClInstrumentWithCalls("hwasan-instrument-with-calls", 84 cl::desc("instrument reads and writes with callbacks"), 85 cl::Hidden, cl::init(false)); 86 87 static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads", 88 cl::desc("instrument read instructions"), 89 cl::Hidden, cl::init(true)); 90 91 static cl::opt<bool> ClInstrumentWrites( 92 "hwasan-instrument-writes", cl::desc("instrument write instructions"), 93 cl::Hidden, cl::init(true)); 94 95 static cl::opt<bool> ClInstrumentAtomics( 96 "hwasan-instrument-atomics", 97 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, 98 cl::init(true)); 99 100 static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval", 101 cl::desc("instrument byval arguments"), 102 cl::Hidden, cl::init(true)); 103 104 static cl::opt<bool> ClRecover( 105 "hwasan-recover", 106 cl::desc("Enable recovery mode (continue-after-error)."), 107 cl::Hidden, cl::init(false)); 108 109 static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack", 110 cl::desc("instrument stack (allocas)"), 111 cl::Hidden, cl::init(true)); 112 113 static cl::opt<bool> ClUARRetagToZero( 114 "hwasan-uar-retag-to-zero", 115 cl::desc("Clear alloca tags before returning from the function to allow " 116 "non-instrumented and instrumented function calls mix. When set " 117 "to false, allocas are retagged before returning from the " 118 "function to detect use after return."), 119 cl::Hidden, cl::init(true)); 120 121 static cl::opt<bool> ClGenerateTagsWithCalls( 122 "hwasan-generate-tags-with-calls", 123 cl::desc("generate new tags with runtime library calls"), cl::Hidden, 124 cl::init(false)); 125 126 static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"), 127 cl::Hidden, cl::init(false), cl::ZeroOrMore); 128 129 static cl::opt<int> ClMatchAllTag( 130 "hwasan-match-all-tag", 131 cl::desc("don't report bad accesses via pointers with this tag"), 132 cl::Hidden, cl::init(-1)); 133 134 static cl::opt<bool> ClEnableKhwasan( 135 "hwasan-kernel", 136 cl::desc("Enable KernelHWAddressSanitizer instrumentation"), 137 cl::Hidden, cl::init(false)); 138 139 // These flags allow to change the shadow mapping and control how shadow memory 140 // is accessed. The shadow mapping looks like: 141 // Shadow = (Mem >> scale) + offset 142 143 static cl::opt<uint64_t> 144 ClMappingOffset("hwasan-mapping-offset", 145 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), 146 cl::Hidden, cl::init(0)); 147 148 static cl::opt<bool> 149 ClWithIfunc("hwasan-with-ifunc", 150 cl::desc("Access dynamic shadow through an ifunc global on " 151 "platforms that support this"), 152 cl::Hidden, cl::init(false)); 153 154 static cl::opt<bool> ClWithTls( 155 "hwasan-with-tls", 156 cl::desc("Access dynamic shadow through an thread-local pointer on " 157 "platforms that support this"), 158 cl::Hidden, cl::init(true)); 159 160 static cl::opt<bool> 161 ClRecordStackHistory("hwasan-record-stack-history", 162 cl::desc("Record stack frames with tagged allocations " 163 "in a thread-local ring buffer"), 164 cl::Hidden, cl::init(true)); 165 static cl::opt<bool> 166 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", 167 cl::desc("instrument memory intrinsics"), 168 cl::Hidden, cl::init(true)); 169 170 static cl::opt<bool> 171 ClInstrumentLandingPads("hwasan-instrument-landing-pads", 172 cl::desc("instrument landing pads"), cl::Hidden, 173 cl::init(false), cl::ZeroOrMore); 174 175 static cl::opt<bool> ClUseShortGranules( 176 "hwasan-use-short-granules", 177 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, 178 cl::init(false), cl::ZeroOrMore); 179 180 static cl::opt<bool> ClInstrumentPersonalityFunctions( 181 "hwasan-instrument-personality-functions", 182 cl::desc("instrument personality functions"), cl::Hidden, cl::init(false), 183 cl::ZeroOrMore); 184 185 static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks", 186 cl::desc("inline all checks"), 187 cl::Hidden, cl::init(false)); 188 189 namespace { 190 191 /// An instrumentation pass implementing detection of addressability bugs 192 /// using tagged pointers. 193 class HWAddressSanitizer { 194 public: 195 explicit HWAddressSanitizer(Module &M, bool CompileKernel = false, 196 bool Recover = false) : M(M) { 197 this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover; 198 this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0 ? 199 ClEnableKhwasan : CompileKernel; 200 201 initializeModule(); 202 } 203 204 bool sanitizeFunction(Function &F); 205 void initializeModule(); 206 207 void initializeCallbacks(Module &M); 208 209 Value *getDynamicShadowIfunc(IRBuilder<> &IRB); 210 Value *getDynamicShadowNonTls(IRBuilder<> &IRB); 211 212 void untagPointerOperand(Instruction *I, Value *Addr); 213 Value *shadowBase(); 214 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); 215 void instrumentMemAccessInline(Value *Ptr, bool IsWrite, 216 unsigned AccessSizeIndex, 217 Instruction *InsertBefore); 218 void instrumentMemIntrinsic(MemIntrinsic *MI); 219 bool instrumentMemAccess(InterestingMemoryOperand &O); 220 bool ignoreAccess(Value *Ptr); 221 void getInterestingMemoryOperands( 222 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting); 223 224 bool isInterestingAlloca(const AllocaInst &AI); 225 bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size); 226 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag); 227 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong); 228 bool instrumentStack( 229 SmallVectorImpl<AllocaInst *> &Allocas, 230 DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap, 231 SmallVectorImpl<Instruction *> &RetVec, Value *StackTag); 232 Value *readRegister(IRBuilder<> &IRB, StringRef Name); 233 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec); 234 Value *getNextTagWithCall(IRBuilder<> &IRB); 235 Value *getStackBaseTag(IRBuilder<> &IRB); 236 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI, 237 unsigned AllocaNo); 238 Value *getUARTag(IRBuilder<> &IRB, Value *StackTag); 239 240 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty); 241 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord); 242 243 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag); 244 void instrumentGlobals(); 245 246 void instrumentPersonalityFunctions(); 247 248 private: 249 LLVMContext *C; 250 Module &M; 251 Triple TargetTriple; 252 FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset; 253 FunctionCallee HWAsanHandleVfork; 254 255 /// This struct defines the shadow mapping using the rule: 256 /// shadow = (mem >> Scale) + Offset. 257 /// If InGlobal is true, then 258 /// extern char __hwasan_shadow[]; 259 /// shadow = (mem >> Scale) + &__hwasan_shadow 260 /// If InTls is true, then 261 /// extern char *__hwasan_tls; 262 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment) 263 struct ShadowMapping { 264 int Scale; 265 uint64_t Offset; 266 bool InGlobal; 267 bool InTls; 268 269 void init(Triple &TargetTriple); 270 unsigned getObjectAlignment() const { return 1U << Scale; } 271 }; 272 ShadowMapping Mapping; 273 274 Type *VoidTy = Type::getVoidTy(M.getContext()); 275 Type *IntptrTy; 276 Type *Int8PtrTy; 277 Type *Int8Ty; 278 Type *Int32Ty; 279 Type *Int64Ty = Type::getInt64Ty(M.getContext()); 280 281 bool CompileKernel; 282 bool Recover; 283 bool UseShortGranules; 284 bool InstrumentLandingPads; 285 286 Function *HwasanCtorFunction; 287 288 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes]; 289 FunctionCallee HwasanMemoryAccessCallbackSized[2]; 290 291 FunctionCallee HwasanTagMemoryFunc; 292 FunctionCallee HwasanGenerateTagFunc; 293 294 Constant *ShadowGlobal; 295 296 Value *LocalDynamicShadow = nullptr; 297 Value *StackBaseTag = nullptr; 298 GlobalValue *ThreadPtrGlobal = nullptr; 299 }; 300 301 class HWAddressSanitizerLegacyPass : public FunctionPass { 302 public: 303 // Pass identification, replacement for typeid. 304 static char ID; 305 306 explicit HWAddressSanitizerLegacyPass(bool CompileKernel = false, 307 bool Recover = false) 308 : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover) { 309 initializeHWAddressSanitizerLegacyPassPass( 310 *PassRegistry::getPassRegistry()); 311 } 312 313 StringRef getPassName() const override { return "HWAddressSanitizer"; } 314 315 bool doInitialization(Module &M) override { 316 HWASan = std::make_unique<HWAddressSanitizer>(M, CompileKernel, Recover); 317 return true; 318 } 319 320 bool runOnFunction(Function &F) override { 321 return HWASan->sanitizeFunction(F); 322 } 323 324 bool doFinalization(Module &M) override { 325 HWASan.reset(); 326 return false; 327 } 328 329 private: 330 std::unique_ptr<HWAddressSanitizer> HWASan; 331 bool CompileKernel; 332 bool Recover; 333 }; 334 335 } // end anonymous namespace 336 337 char HWAddressSanitizerLegacyPass::ID = 0; 338 339 INITIALIZE_PASS_BEGIN( 340 HWAddressSanitizerLegacyPass, "hwasan", 341 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false, 342 false) 343 INITIALIZE_PASS_END( 344 HWAddressSanitizerLegacyPass, "hwasan", 345 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false, 346 false) 347 348 FunctionPass *llvm::createHWAddressSanitizerLegacyPassPass(bool CompileKernel, 349 bool Recover) { 350 assert(!CompileKernel || Recover); 351 return new HWAddressSanitizerLegacyPass(CompileKernel, Recover); 352 } 353 354 HWAddressSanitizerPass::HWAddressSanitizerPass(bool CompileKernel, bool Recover) 355 : CompileKernel(CompileKernel), Recover(Recover) {} 356 357 PreservedAnalyses HWAddressSanitizerPass::run(Module &M, 358 ModuleAnalysisManager &MAM) { 359 HWAddressSanitizer HWASan(M, CompileKernel, Recover); 360 bool Modified = false; 361 for (Function &F : M) 362 Modified |= HWASan.sanitizeFunction(F); 363 if (Modified) 364 return PreservedAnalyses::none(); 365 return PreservedAnalyses::all(); 366 } 367 368 /// Module-level initialization. 369 /// 370 /// inserts a call to __hwasan_init to the module's constructor list. 371 void HWAddressSanitizer::initializeModule() { 372 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n"); 373 auto &DL = M.getDataLayout(); 374 375 TargetTriple = Triple(M.getTargetTriple()); 376 377 Mapping.init(TargetTriple); 378 379 C = &(M.getContext()); 380 IRBuilder<> IRB(*C); 381 IntptrTy = IRB.getIntPtrTy(DL); 382 Int8PtrTy = IRB.getInt8PtrTy(); 383 Int8Ty = IRB.getInt8Ty(); 384 Int32Ty = IRB.getInt32Ty(); 385 386 HwasanCtorFunction = nullptr; 387 388 // Older versions of Android do not have the required runtime support for 389 // short granules, global or personality function instrumentation. On other 390 // platforms we currently require using the latest version of the runtime. 391 bool NewRuntime = 392 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30); 393 394 UseShortGranules = 395 ClUseShortGranules.getNumOccurrences() ? ClUseShortGranules : NewRuntime; 396 397 // If we don't have personality function support, fall back to landing pads. 398 InstrumentLandingPads = ClInstrumentLandingPads.getNumOccurrences() 399 ? ClInstrumentLandingPads 400 : !NewRuntime; 401 402 if (!CompileKernel) { 403 std::tie(HwasanCtorFunction, std::ignore) = 404 getOrCreateSanitizerCtorAndInitFunctions( 405 M, kHwasanModuleCtorName, kHwasanInitName, 406 /*InitArgTypes=*/{}, 407 /*InitArgs=*/{}, 408 // This callback is invoked when the functions are created the first 409 // time. Hook them into the global ctors list in that case: 410 [&](Function *Ctor, FunctionCallee) { 411 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName); 412 Ctor->setComdat(CtorComdat); 413 appendToGlobalCtors(M, Ctor, 0, Ctor); 414 }); 415 416 bool InstrumentGlobals = 417 ClGlobals.getNumOccurrences() ? ClGlobals : NewRuntime; 418 if (InstrumentGlobals) 419 instrumentGlobals(); 420 421 bool InstrumentPersonalityFunctions = 422 ClInstrumentPersonalityFunctions.getNumOccurrences() 423 ? ClInstrumentPersonalityFunctions 424 : NewRuntime; 425 if (InstrumentPersonalityFunctions) 426 instrumentPersonalityFunctions(); 427 } 428 429 if (!TargetTriple.isAndroid()) { 430 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] { 431 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false, 432 GlobalValue::ExternalLinkage, nullptr, 433 "__hwasan_tls", nullptr, 434 GlobalVariable::InitialExecTLSModel); 435 appendToCompilerUsed(M, GV); 436 return GV; 437 }); 438 ThreadPtrGlobal = cast<GlobalVariable>(C); 439 } 440 } 441 442 void HWAddressSanitizer::initializeCallbacks(Module &M) { 443 IRBuilder<> IRB(*C); 444 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { 445 const std::string TypeStr = AccessIsWrite ? "store" : "load"; 446 const std::string EndingStr = Recover ? "_noabort" : ""; 447 448 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction( 449 ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr, 450 FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false)); 451 452 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 453 AccessSizeIndex++) { 454 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] = 455 M.getOrInsertFunction( 456 ClMemoryAccessCallbackPrefix + TypeStr + 457 itostr(1ULL << AccessSizeIndex) + EndingStr, 458 FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false)); 459 } 460 } 461 462 HwasanTagMemoryFunc = M.getOrInsertFunction( 463 "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy); 464 HwasanGenerateTagFunc = 465 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty); 466 467 ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow", 468 ArrayType::get(IRB.getInt8Ty(), 0)); 469 470 const std::string MemIntrinCallbackPrefix = 471 CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix; 472 HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove", 473 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 474 IRB.getInt8PtrTy(), IntptrTy); 475 HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", 476 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 477 IRB.getInt8PtrTy(), IntptrTy); 478 HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset", 479 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 480 IRB.getInt32Ty(), IntptrTy); 481 482 HWAsanHandleVfork = 483 M.getOrInsertFunction("__hwasan_handle_vfork", IRB.getVoidTy(), IntptrTy); 484 } 485 486 Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) { 487 // An empty inline asm with input reg == output reg. 488 // An opaque no-op cast, basically. 489 InlineAsm *Asm = InlineAsm::get( 490 FunctionType::get(Int8PtrTy, {ShadowGlobal->getType()}, false), 491 StringRef(""), StringRef("=r,0"), 492 /*hasSideEffects=*/false); 493 return IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow"); 494 } 495 496 Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) { 497 // Generate code only when dynamic addressing is needed. 498 if (Mapping.Offset != kDynamicShadowSentinel) 499 return nullptr; 500 501 if (Mapping.InGlobal) { 502 return getDynamicShadowIfunc(IRB); 503 } else { 504 Value *GlobalDynamicAddress = 505 IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal( 506 kHwasanShadowMemoryDynamicAddress, Int8PtrTy); 507 return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress); 508 } 509 } 510 511 bool HWAddressSanitizer::ignoreAccess(Value *Ptr) { 512 // Do not instrument acesses from different address spaces; we cannot deal 513 // with them. 514 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); 515 if (PtrTy->getPointerAddressSpace() != 0) 516 return true; 517 518 // Ignore swifterror addresses. 519 // swifterror memory addresses are mem2reg promoted by instruction 520 // selection. As such they cannot have regular uses like an instrumentation 521 // function and it makes no sense to track them as memory. 522 if (Ptr->isSwiftError()) 523 return true; 524 525 return false; 526 } 527 528 void HWAddressSanitizer::getInterestingMemoryOperands( 529 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) { 530 // Skip memory accesses inserted by another instrumentation. 531 if (I->hasMetadata("nosanitize")) 532 return; 533 534 // Do not instrument the load fetching the dynamic shadow address. 535 if (LocalDynamicShadow == I) 536 return; 537 538 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 539 if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand())) 540 return; 541 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, 542 LI->getType(), LI->getAlign()); 543 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 544 if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand())) 545 return; 546 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, 547 SI->getValueOperand()->getType(), SI->getAlign()); 548 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 549 if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand())) 550 return; 551 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, 552 RMW->getValOperand()->getType(), None); 553 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { 554 if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand())) 555 return; 556 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, 557 XCHG->getCompareOperand()->getType(), None); 558 } else if (auto CI = dyn_cast<CallInst>(I)) { 559 for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) { 560 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || 561 ignoreAccess(CI->getArgOperand(ArgNo))) 562 continue; 563 Type *Ty = CI->getParamByValType(ArgNo); 564 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); 565 } 566 } 567 } 568 569 static unsigned getPointerOperandIndex(Instruction *I) { 570 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 571 return LI->getPointerOperandIndex(); 572 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 573 return SI->getPointerOperandIndex(); 574 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) 575 return RMW->getPointerOperandIndex(); 576 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) 577 return XCHG->getPointerOperandIndex(); 578 report_fatal_error("Unexpected instruction"); 579 return -1; 580 } 581 582 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { 583 size_t Res = countTrailingZeros(TypeSize / 8); 584 assert(Res < kNumberOfAccessSizes); 585 return Res; 586 } 587 588 void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) { 589 if (TargetTriple.isAArch64()) 590 return; 591 592 IRBuilder<> IRB(I); 593 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 594 Value *UntaggedPtr = 595 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType()); 596 I->setOperand(getPointerOperandIndex(I), UntaggedPtr); 597 } 598 599 Value *HWAddressSanitizer::shadowBase() { 600 if (LocalDynamicShadow) 601 return LocalDynamicShadow; 602 return ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, Mapping.Offset), 603 Int8PtrTy); 604 } 605 606 Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) { 607 // Mem >> Scale 608 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale); 609 if (Mapping.Offset == 0) 610 return IRB.CreateIntToPtr(Shadow, Int8PtrTy); 611 // (Mem >> Scale) + Offset 612 return IRB.CreateGEP(Int8Ty, shadowBase(), Shadow); 613 } 614 615 void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite, 616 unsigned AccessSizeIndex, 617 Instruction *InsertBefore) { 618 const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex; 619 IRBuilder<> IRB(InsertBefore); 620 621 if (!ClInlineAllChecks && TargetTriple.isAArch64() && 622 TargetTriple.isOSBinFormatELF() && !Recover) { 623 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 624 Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy); 625 IRB.CreateCall(Intrinsic::getDeclaration( 626 M, UseShortGranules 627 ? Intrinsic::hwasan_check_memaccess_shortgranules 628 : Intrinsic::hwasan_check_memaccess), 629 {shadowBase(), Ptr, ConstantInt::get(Int32Ty, AccessInfo)}); 630 return; 631 } 632 633 Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy); 634 Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift), 635 IRB.getInt8Ty()); 636 Value *AddrLong = untagPointer(IRB, PtrLong); 637 Value *Shadow = memToShadow(AddrLong, IRB); 638 Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow); 639 Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag); 640 641 int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ? 642 ClMatchAllTag : (CompileKernel ? 0xFF : -1); 643 if (matchAllTag != -1) { 644 Value *TagNotIgnored = IRB.CreateICmpNE(PtrTag, 645 ConstantInt::get(PtrTag->getType(), matchAllTag)); 646 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored); 647 } 648 649 Instruction *CheckTerm = 650 SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false, 651 MDBuilder(*C).createBranchWeights(1, 100000)); 652 653 IRB.SetInsertPoint(CheckTerm); 654 Value *OutOfShortGranuleTagRange = 655 IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15)); 656 Instruction *CheckFailTerm = 657 SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover, 658 MDBuilder(*C).createBranchWeights(1, 100000)); 659 660 IRB.SetInsertPoint(CheckTerm); 661 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty); 662 PtrLowBits = IRB.CreateAdd( 663 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1)); 664 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag); 665 SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false, 666 MDBuilder(*C).createBranchWeights(1, 100000), 667 nullptr, nullptr, CheckFailTerm->getParent()); 668 669 IRB.SetInsertPoint(CheckTerm); 670 Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15); 671 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy); 672 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr); 673 Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag); 674 SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false, 675 MDBuilder(*C).createBranchWeights(1, 100000), 676 nullptr, nullptr, CheckFailTerm->getParent()); 677 678 IRB.SetInsertPoint(CheckFailTerm); 679 InlineAsm *Asm; 680 switch (TargetTriple.getArch()) { 681 case Triple::x86_64: 682 // The signal handler will find the data address in rdi. 683 Asm = InlineAsm::get( 684 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false), 685 "int3\nnopl " + itostr(0x40 + AccessInfo) + "(%rax)", 686 "{rdi}", 687 /*hasSideEffects=*/true); 688 break; 689 case Triple::aarch64: 690 case Triple::aarch64_be: 691 // The signal handler will find the data address in x0. 692 Asm = InlineAsm::get( 693 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false), 694 "brk #" + itostr(0x900 + AccessInfo), 695 "{x0}", 696 /*hasSideEffects=*/true); 697 break; 698 default: 699 report_fatal_error("unsupported architecture"); 700 } 701 IRB.CreateCall(Asm, PtrLong); 702 if (Recover) 703 cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent()); 704 } 705 706 void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { 707 IRBuilder<> IRB(MI); 708 if (isa<MemTransferInst>(MI)) { 709 IRB.CreateCall( 710 isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy, 711 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 712 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), 713 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 714 } else if (isa<MemSetInst>(MI)) { 715 IRB.CreateCall( 716 HWAsanMemset, 717 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 718 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), 719 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 720 } 721 MI->eraseFromParent(); 722 } 723 724 bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) { 725 Value *Addr = O.getPtr(); 726 727 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n"); 728 729 if (O.MaybeMask) 730 return false; //FIXME 731 732 IRBuilder<> IRB(O.getInsn()); 733 if (isPowerOf2_64(O.TypeSize) && 734 (O.TypeSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) && 735 (!O.Alignment || *O.Alignment >= (1ULL << Mapping.Scale) || 736 *O.Alignment >= O.TypeSize / 8)) { 737 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeSize); 738 if (ClInstrumentWithCalls) { 739 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex], 740 IRB.CreatePointerCast(Addr, IntptrTy)); 741 } else { 742 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn()); 743 } 744 } else { 745 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], 746 {IRB.CreatePointerCast(Addr, IntptrTy), 747 ConstantInt::get(IntptrTy, O.TypeSize / 8)}); 748 } 749 untagPointerOperand(O.getInsn(), Addr); 750 751 return true; 752 } 753 754 static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) { 755 uint64_t ArraySize = 1; 756 if (AI.isArrayAllocation()) { 757 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize()); 758 assert(CI && "non-constant array size"); 759 ArraySize = CI->getZExtValue(); 760 } 761 Type *Ty = AI.getAllocatedType(); 762 uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty); 763 return SizeInBytes * ArraySize; 764 } 765 766 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, 767 Value *Tag, size_t Size) { 768 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment()); 769 if (!UseShortGranules) 770 Size = AlignedSize; 771 772 Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty()); 773 if (ClInstrumentWithCalls) { 774 IRB.CreateCall(HwasanTagMemoryFunc, 775 {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag, 776 ConstantInt::get(IntptrTy, AlignedSize)}); 777 } else { 778 size_t ShadowSize = Size >> Mapping.Scale; 779 Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB); 780 // If this memset is not inlined, it will be intercepted in the hwasan 781 // runtime library. That's OK, because the interceptor skips the checks if 782 // the address is in the shadow region. 783 // FIXME: the interceptor is not as fast as real memset. Consider lowering 784 // llvm.memset right here into either a sequence of stores, or a call to 785 // hwasan_tag_memory. 786 if (ShadowSize) 787 IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, Align(1)); 788 if (Size != AlignedSize) { 789 IRB.CreateStore( 790 ConstantInt::get(Int8Ty, Size % Mapping.getObjectAlignment()), 791 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize)); 792 IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32( 793 Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy), 794 AlignedSize - 1)); 795 } 796 } 797 return true; 798 } 799 800 static unsigned RetagMask(unsigned AllocaNo) { 801 // A list of 8-bit numbers that have at most one run of non-zero bits. 802 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these 803 // masks. 804 // The list does not include the value 255, which is used for UAR. 805 // 806 // Because we are more likely to use earlier elements of this list than later 807 // ones, it is sorted in increasing order of probability of collision with a 808 // mask allocated (temporally) nearby. The program that generated this list 809 // can be found at: 810 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py 811 static unsigned FastMasks[] = {0, 128, 64, 192, 32, 96, 224, 112, 240, 812 48, 16, 120, 248, 56, 24, 8, 124, 252, 813 60, 28, 12, 4, 126, 254, 62, 30, 14, 814 6, 2, 127, 63, 31, 15, 7, 3, 1}; 815 return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))]; 816 } 817 818 Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) { 819 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy); 820 } 821 822 Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) { 823 if (ClGenerateTagsWithCalls) 824 return getNextTagWithCall(IRB); 825 if (StackBaseTag) 826 return StackBaseTag; 827 // FIXME: use addressofreturnaddress (but implement it in aarch64 backend 828 // first). 829 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 830 auto GetStackPointerFn = Intrinsic::getDeclaration( 831 M, Intrinsic::frameaddress, 832 IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace())); 833 Value *StackPointer = IRB.CreateCall( 834 GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())}); 835 836 // Extract some entropy from the stack pointer for the tags. 837 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ 838 // between functions). 839 Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy); 840 Value *StackTag = 841 IRB.CreateXor(StackPointerLong, IRB.CreateLShr(StackPointerLong, 20), 842 "hwasan.stack.base.tag"); 843 return StackTag; 844 } 845 846 Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag, 847 AllocaInst *AI, unsigned AllocaNo) { 848 if (ClGenerateTagsWithCalls) 849 return getNextTagWithCall(IRB); 850 return IRB.CreateXor(StackTag, 851 ConstantInt::get(IntptrTy, RetagMask(AllocaNo))); 852 } 853 854 Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) { 855 if (ClUARRetagToZero) 856 return ConstantInt::get(IntptrTy, 0); 857 if (ClGenerateTagsWithCalls) 858 return getNextTagWithCall(IRB); 859 return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, 0xFFU)); 860 } 861 862 // Add a tag to an address. 863 Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty, 864 Value *PtrLong, Value *Tag) { 865 Value *TaggedPtrLong; 866 if (CompileKernel) { 867 // Kernel addresses have 0xFF in the most significant byte. 868 Value *ShiftedTag = IRB.CreateOr( 869 IRB.CreateShl(Tag, kPointerTagShift), 870 ConstantInt::get(IntptrTy, (1ULL << kPointerTagShift) - 1)); 871 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag); 872 } else { 873 // Userspace can simply do OR (tag << 56); 874 Value *ShiftedTag = IRB.CreateShl(Tag, kPointerTagShift); 875 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag); 876 } 877 return IRB.CreateIntToPtr(TaggedPtrLong, Ty); 878 } 879 880 // Remove tag from an address. 881 Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) { 882 Value *UntaggedPtrLong; 883 if (CompileKernel) { 884 // Kernel addresses have 0xFF in the most significant byte. 885 UntaggedPtrLong = IRB.CreateOr(PtrLong, 886 ConstantInt::get(PtrLong->getType(), 0xFFULL << kPointerTagShift)); 887 } else { 888 // Userspace addresses have 0x00. 889 UntaggedPtrLong = IRB.CreateAnd(PtrLong, 890 ConstantInt::get(PtrLong->getType(), ~(0xFFULL << kPointerTagShift))); 891 } 892 return UntaggedPtrLong; 893 } 894 895 Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) { 896 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 897 if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) { 898 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER 899 // in Bionic's libc/private/bionic_tls.h. 900 Function *ThreadPointerFunc = 901 Intrinsic::getDeclaration(M, Intrinsic::thread_pointer); 902 Value *SlotPtr = IRB.CreatePointerCast( 903 IRB.CreateConstGEP1_32(IRB.getInt8Ty(), 904 IRB.CreateCall(ThreadPointerFunc), 0x30), 905 Ty->getPointerTo(0)); 906 return SlotPtr; 907 } 908 if (ThreadPtrGlobal) 909 return ThreadPtrGlobal; 910 911 912 return nullptr; 913 } 914 915 void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) { 916 if (!Mapping.InTls) { 917 LocalDynamicShadow = getDynamicShadowNonTls(IRB); 918 return; 919 } 920 921 if (!WithFrameRecord && TargetTriple.isAndroid()) { 922 LocalDynamicShadow = getDynamicShadowIfunc(IRB); 923 return; 924 } 925 926 Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy); 927 assert(SlotPtr); 928 929 Value *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr); 930 // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI. 931 Value *ThreadLongMaybeUntagged = 932 TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong); 933 934 if (WithFrameRecord) { 935 Function *F = IRB.GetInsertBlock()->getParent(); 936 StackBaseTag = IRB.CreateAShr(ThreadLong, 3); 937 938 // Prepare ring buffer data. 939 Value *PC; 940 if (TargetTriple.getArch() == Triple::aarch64) 941 PC = readRegister(IRB, "pc"); 942 else 943 PC = IRB.CreatePtrToInt(F, IntptrTy); 944 Module *M = F->getParent(); 945 auto GetStackPointerFn = Intrinsic::getDeclaration( 946 M, Intrinsic::frameaddress, 947 IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace())); 948 Value *SP = IRB.CreatePtrToInt( 949 IRB.CreateCall(GetStackPointerFn, 950 {Constant::getNullValue(IRB.getInt32Ty())}), 951 IntptrTy); 952 // Mix SP and PC. 953 // Assumptions: 954 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero) 955 // SP is 0xsssssssssssSSSS0 (4 lower bits are zero) 956 // We only really need ~20 lower non-zero bits (SSSS), so we mix like this: 957 // 0xSSSSPPPPPPPPPPPP 958 SP = IRB.CreateShl(SP, 44); 959 960 // Store data to ring buffer. 961 Value *RecordPtr = 962 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IntptrTy->getPointerTo(0)); 963 IRB.CreateStore(IRB.CreateOr(PC, SP), RecordPtr); 964 965 // Update the ring buffer. Top byte of ThreadLong defines the size of the 966 // buffer in pages, it must be a power of two, and the start of the buffer 967 // must be aligned by twice that much. Therefore wrap around of the ring 968 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12). 969 // The use of AShr instead of LShr is due to 970 // https://bugs.llvm.org/show_bug.cgi?id=39030 971 // Runtime library makes sure not to use the highest bit. 972 Value *WrapMask = IRB.CreateXor( 973 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true), 974 ConstantInt::get(IntptrTy, (uint64_t)-1)); 975 Value *ThreadLongNew = IRB.CreateAnd( 976 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask); 977 IRB.CreateStore(ThreadLongNew, SlotPtr); 978 } 979 980 // Get shadow base address by aligning RecordPtr up. 981 // Note: this is not correct if the pointer is already aligned. 982 // Runtime library will make sure this never happens. 983 LocalDynamicShadow = IRB.CreateAdd( 984 IRB.CreateOr( 985 ThreadLongMaybeUntagged, 986 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)), 987 ConstantInt::get(IntptrTy, 1), "hwasan.shadow"); 988 LocalDynamicShadow = IRB.CreateIntToPtr(LocalDynamicShadow, Int8PtrTy); 989 } 990 991 Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) { 992 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 993 Function *ReadRegister = 994 Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy); 995 MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)}); 996 Value *Args[] = {MetadataAsValue::get(*C, MD)}; 997 return IRB.CreateCall(ReadRegister, Args); 998 } 999 1000 bool HWAddressSanitizer::instrumentLandingPads( 1001 SmallVectorImpl<Instruction *> &LandingPadVec) { 1002 for (auto *LP : LandingPadVec) { 1003 IRBuilder<> IRB(LP->getNextNode()); 1004 IRB.CreateCall( 1005 HWAsanHandleVfork, 1006 {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" 1007 : "sp")}); 1008 } 1009 return true; 1010 } 1011 1012 bool HWAddressSanitizer::instrumentStack( 1013 SmallVectorImpl<AllocaInst *> &Allocas, 1014 DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap, 1015 SmallVectorImpl<Instruction *> &RetVec, Value *StackTag) { 1016 // Ideally, we want to calculate tagged stack base pointer, and rewrite all 1017 // alloca addresses using that. Unfortunately, offsets are not known yet 1018 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a 1019 // temp, shift-OR it into each alloca address and xor with the retag mask. 1020 // This generates one extra instruction per alloca use. 1021 for (unsigned N = 0; N < Allocas.size(); ++N) { 1022 auto *AI = Allocas[N]; 1023 IRBuilder<> IRB(AI->getNextNode()); 1024 1025 // Replace uses of the alloca with tagged address. 1026 Value *Tag = getAllocaTag(IRB, StackTag, AI, N); 1027 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy); 1028 Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag); 1029 std::string Name = 1030 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N); 1031 Replacement->setName(Name + ".hwasan"); 1032 1033 AI->replaceUsesWithIf(Replacement, 1034 [AILong](Use &U) { return U.getUser() != AILong; }); 1035 1036 for (auto *DDI : AllocaDbgMap.lookup(AI)) { 1037 // Prepend "tag_offset, N" to the dwarf expression. 1038 // Tag offset logically applies to the alloca pointer, and it makes sense 1039 // to put it at the beginning of the expression. 1040 SmallVector<uint64_t, 8> NewOps = {dwarf::DW_OP_LLVM_tag_offset, 1041 RetagMask(N)}; 1042 DDI->setArgOperand( 1043 2, MetadataAsValue::get(*C, DIExpression::prependOpcodes( 1044 DDI->getExpression(), NewOps))); 1045 } 1046 1047 size_t Size = getAllocaSizeInBytes(*AI); 1048 tagAlloca(IRB, AI, Tag, Size); 1049 1050 for (auto RI : RetVec) { 1051 IRB.SetInsertPoint(RI); 1052 1053 // Re-tag alloca memory with the special UAR tag. 1054 Value *Tag = getUARTag(IRB, StackTag); 1055 tagAlloca(IRB, AI, Tag, alignTo(Size, Mapping.getObjectAlignment())); 1056 } 1057 } 1058 1059 return true; 1060 } 1061 1062 bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) { 1063 return (AI.getAllocatedType()->isSized() && 1064 // FIXME: instrument dynamic allocas, too 1065 AI.isStaticAlloca() && 1066 // alloca() may be called with 0 size, ignore it. 1067 getAllocaSizeInBytes(AI) > 0 && 1068 // We are only interested in allocas not promotable to registers. 1069 // Promotable allocas are common under -O0. 1070 !isAllocaPromotable(&AI) && 1071 // inalloca allocas are not treated as static, and we don't want 1072 // dynamic alloca instrumentation for them as well. 1073 !AI.isUsedWithInAlloca() && 1074 // swifterror allocas are register promoted by ISel 1075 !AI.isSwiftError()); 1076 } 1077 1078 bool HWAddressSanitizer::sanitizeFunction(Function &F) { 1079 if (&F == HwasanCtorFunction) 1080 return false; 1081 1082 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress)) 1083 return false; 1084 1085 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n"); 1086 1087 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument; 1088 SmallVector<MemIntrinsic *, 16> IntrinToInstrument; 1089 SmallVector<AllocaInst*, 8> AllocasToInstrument; 1090 SmallVector<Instruction*, 8> RetVec; 1091 SmallVector<Instruction*, 8> LandingPadVec; 1092 DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> AllocaDbgMap; 1093 for (auto &BB : F) { 1094 for (auto &Inst : BB) { 1095 if (ClInstrumentStack) 1096 if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) { 1097 if (isInterestingAlloca(*AI)) 1098 AllocasToInstrument.push_back(AI); 1099 continue; 1100 } 1101 1102 if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) || 1103 isa<CleanupReturnInst>(Inst)) 1104 RetVec.push_back(&Inst); 1105 1106 if (auto *DDI = dyn_cast<DbgVariableIntrinsic>(&Inst)) 1107 if (auto *Alloca = 1108 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocation())) 1109 AllocaDbgMap[Alloca].push_back(DDI); 1110 1111 if (InstrumentLandingPads && isa<LandingPadInst>(Inst)) 1112 LandingPadVec.push_back(&Inst); 1113 1114 getInterestingMemoryOperands(&Inst, OperandsToInstrument); 1115 1116 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) 1117 IntrinToInstrument.push_back(MI); 1118 } 1119 } 1120 1121 initializeCallbacks(*F.getParent()); 1122 1123 bool Changed = false; 1124 1125 if (!LandingPadVec.empty()) 1126 Changed |= instrumentLandingPads(LandingPadVec); 1127 1128 if (AllocasToInstrument.empty() && F.hasPersonalityFn() && 1129 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) { 1130 // __hwasan_personality_thunk is a no-op for functions without an 1131 // instrumented stack, so we can drop it. 1132 F.setPersonalityFn(nullptr); 1133 Changed = true; 1134 } 1135 1136 if (AllocasToInstrument.empty() && OperandsToInstrument.empty() && 1137 IntrinToInstrument.empty()) 1138 return Changed; 1139 1140 assert(!LocalDynamicShadow); 1141 1142 Instruction *InsertPt = &*F.getEntryBlock().begin(); 1143 IRBuilder<> EntryIRB(InsertPt); 1144 emitPrologue(EntryIRB, 1145 /*WithFrameRecord*/ ClRecordStackHistory && 1146 !AllocasToInstrument.empty()); 1147 1148 if (!AllocasToInstrument.empty()) { 1149 Value *StackTag = 1150 ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB); 1151 instrumentStack(AllocasToInstrument, AllocaDbgMap, RetVec, StackTag); 1152 } 1153 // Pad and align each of the allocas that we instrumented to stop small 1154 // uninteresting allocas from hiding in instrumented alloca's padding and so 1155 // that we have enough space to store real tags for short granules. 1156 DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap; 1157 for (AllocaInst *AI : AllocasToInstrument) { 1158 uint64_t Size = getAllocaSizeInBytes(*AI); 1159 uint64_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment()); 1160 AI->setAlignment( 1161 Align(std::max(AI->getAlignment(), Mapping.getObjectAlignment()))); 1162 if (Size != AlignedSize) { 1163 Type *AllocatedType = AI->getAllocatedType(); 1164 if (AI->isArrayAllocation()) { 1165 uint64_t ArraySize = 1166 cast<ConstantInt>(AI->getArraySize())->getZExtValue(); 1167 AllocatedType = ArrayType::get(AllocatedType, ArraySize); 1168 } 1169 Type *TypeWithPadding = StructType::get( 1170 AllocatedType, ArrayType::get(Int8Ty, AlignedSize - Size)); 1171 auto *NewAI = new AllocaInst( 1172 TypeWithPadding, AI->getType()->getAddressSpace(), nullptr, "", AI); 1173 NewAI->takeName(AI); 1174 NewAI->setAlignment(AI->getAlign()); 1175 NewAI->setUsedWithInAlloca(AI->isUsedWithInAlloca()); 1176 NewAI->setSwiftError(AI->isSwiftError()); 1177 NewAI->copyMetadata(*AI); 1178 auto *Bitcast = new BitCastInst(NewAI, AI->getType(), "", AI); 1179 AI->replaceAllUsesWith(Bitcast); 1180 AllocaToPaddedAllocaMap[AI] = NewAI; 1181 } 1182 } 1183 1184 if (!AllocaToPaddedAllocaMap.empty()) { 1185 for (auto &BB : F) 1186 for (auto &Inst : BB) 1187 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) 1188 if (auto *AI = 1189 dyn_cast_or_null<AllocaInst>(DVI->getVariableLocation())) 1190 if (auto *NewAI = AllocaToPaddedAllocaMap.lookup(AI)) 1191 DVI->setArgOperand( 1192 0, MetadataAsValue::get(*C, LocalAsMetadata::get(NewAI))); 1193 for (auto &P : AllocaToPaddedAllocaMap) 1194 P.first->eraseFromParent(); 1195 } 1196 1197 // If we split the entry block, move any allocas that were originally in the 1198 // entry block back into the entry block so that they aren't treated as 1199 // dynamic allocas. 1200 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) { 1201 InsertPt = &*F.getEntryBlock().begin(); 1202 for (auto II = EntryIRB.GetInsertBlock()->begin(), 1203 IE = EntryIRB.GetInsertBlock()->end(); 1204 II != IE;) { 1205 Instruction *I = &*II++; 1206 if (auto *AI = dyn_cast<AllocaInst>(I)) 1207 if (isa<ConstantInt>(AI->getArraySize())) 1208 I->moveBefore(InsertPt); 1209 } 1210 } 1211 1212 for (auto &Operand : OperandsToInstrument) 1213 instrumentMemAccess(Operand); 1214 1215 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) { 1216 for (auto Inst : IntrinToInstrument) 1217 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); 1218 } 1219 1220 LocalDynamicShadow = nullptr; 1221 StackBaseTag = nullptr; 1222 1223 return true; 1224 } 1225 1226 void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) { 1227 Constant *Initializer = GV->getInitializer(); 1228 uint64_t SizeInBytes = 1229 M.getDataLayout().getTypeAllocSize(Initializer->getType()); 1230 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment()); 1231 if (SizeInBytes != NewSize) { 1232 // Pad the initializer out to the next multiple of 16 bytes and add the 1233 // required short granule tag. 1234 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0); 1235 Init.back() = Tag; 1236 Constant *Padding = ConstantDataArray::get(*C, Init); 1237 Initializer = ConstantStruct::getAnon({Initializer, Padding}); 1238 } 1239 1240 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(), 1241 GlobalValue::ExternalLinkage, Initializer, 1242 GV->getName() + ".hwasan"); 1243 NewGV->copyAttributesFrom(GV); 1244 NewGV->setLinkage(GlobalValue::PrivateLinkage); 1245 NewGV->copyMetadata(GV, 0); 1246 NewGV->setAlignment( 1247 MaybeAlign(std::max(GV->getAlignment(), Mapping.getObjectAlignment()))); 1248 1249 // It is invalid to ICF two globals that have different tags. In the case 1250 // where the size of the global is a multiple of the tag granularity the 1251 // contents of the globals may be the same but the tags (i.e. symbol values) 1252 // may be different, and the symbols are not considered during ICF. In the 1253 // case where the size is not a multiple of the granularity, the short granule 1254 // tags would discriminate two globals with different tags, but there would 1255 // otherwise be nothing stopping such a global from being incorrectly ICF'd 1256 // with an uninstrumented (i.e. tag 0) global that happened to have the short 1257 // granule tag in the last byte. 1258 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None); 1259 1260 // Descriptor format (assuming little-endian): 1261 // bytes 0-3: relative address of global 1262 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case 1263 // it isn't, we create multiple descriptors) 1264 // byte 7: tag 1265 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty); 1266 const uint64_t MaxDescriptorSize = 0xfffff0; 1267 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes; 1268 DescriptorPos += MaxDescriptorSize) { 1269 auto *Descriptor = 1270 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage, 1271 nullptr, GV->getName() + ".hwasan.descriptor"); 1272 auto *GVRelPtr = ConstantExpr::getTrunc( 1273 ConstantExpr::getAdd( 1274 ConstantExpr::getSub( 1275 ConstantExpr::getPtrToInt(NewGV, Int64Ty), 1276 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)), 1277 ConstantInt::get(Int64Ty, DescriptorPos)), 1278 Int32Ty); 1279 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize); 1280 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24)); 1281 Descriptor->setComdat(NewGV->getComdat()); 1282 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag})); 1283 Descriptor->setSection("hwasan_globals"); 1284 Descriptor->setMetadata(LLVMContext::MD_associated, 1285 MDNode::get(*C, ValueAsMetadata::get(NewGV))); 1286 appendToCompilerUsed(M, Descriptor); 1287 } 1288 1289 Constant *Aliasee = ConstantExpr::getIntToPtr( 1290 ConstantExpr::getAdd( 1291 ConstantExpr::getPtrToInt(NewGV, Int64Ty), 1292 ConstantInt::get(Int64Ty, uint64_t(Tag) << kPointerTagShift)), 1293 GV->getType()); 1294 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(), 1295 GV->getLinkage(), "", Aliasee, &M); 1296 Alias->setVisibility(GV->getVisibility()); 1297 Alias->takeName(GV); 1298 GV->replaceAllUsesWith(Alias); 1299 GV->eraseFromParent(); 1300 } 1301 1302 void HWAddressSanitizer::instrumentGlobals() { 1303 // Start by creating a note that contains pointers to the list of global 1304 // descriptors. Adding a note to the output file will cause the linker to 1305 // create a PT_NOTE program header pointing to the note that we can use to 1306 // find the descriptor list starting from the program headers. A function 1307 // provided by the runtime initializes the shadow memory for the globals by 1308 // accessing the descriptor list via the note. The dynamic loader needs to 1309 // call this function whenever a library is loaded. 1310 // 1311 // The reason why we use a note for this instead of a more conventional 1312 // approach of having a global constructor pass a descriptor list pointer to 1313 // the runtime is because of an order of initialization problem. With 1314 // constructors we can encounter the following problematic scenario: 1315 // 1316 // 1) library A depends on library B and also interposes one of B's symbols 1317 // 2) B's constructors are called before A's (as required for correctness) 1318 // 3) during construction, B accesses one of its "own" globals (actually 1319 // interposed by A) and triggers a HWASAN failure due to the initialization 1320 // for A not having happened yet 1321 // 1322 // Even without interposition it is possible to run into similar situations in 1323 // cases where two libraries mutually depend on each other. 1324 // 1325 // We only need one note per binary, so put everything for the note in a 1326 // comdat. This need to be a comdat with an .init_array section to prevent 1327 // newer versions of lld from discarding the note. 1328 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName); 1329 1330 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0); 1331 auto Start = 1332 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage, 1333 nullptr, "__start_hwasan_globals"); 1334 Start->setVisibility(GlobalValue::HiddenVisibility); 1335 Start->setDSOLocal(true); 1336 auto Stop = 1337 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage, 1338 nullptr, "__stop_hwasan_globals"); 1339 Stop->setVisibility(GlobalValue::HiddenVisibility); 1340 Stop->setDSOLocal(true); 1341 1342 // Null-terminated so actually 8 bytes, which are required in order to align 1343 // the note properly. 1344 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0"); 1345 1346 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(), 1347 Int32Ty, Int32Ty); 1348 auto *Note = 1349 new GlobalVariable(M, NoteTy, /*isConstantGlobal=*/true, 1350 GlobalValue::PrivateLinkage, nullptr, kHwasanNoteName); 1351 Note->setSection(".note.hwasan.globals"); 1352 Note->setComdat(NoteComdat); 1353 Note->setAlignment(Align(4)); 1354 Note->setDSOLocal(true); 1355 1356 // The pointers in the note need to be relative so that the note ends up being 1357 // placed in rodata, which is the standard location for notes. 1358 auto CreateRelPtr = [&](Constant *Ptr) { 1359 return ConstantExpr::getTrunc( 1360 ConstantExpr::getSub(ConstantExpr::getPtrToInt(Ptr, Int64Ty), 1361 ConstantExpr::getPtrToInt(Note, Int64Ty)), 1362 Int32Ty); 1363 }; 1364 Note->setInitializer(ConstantStruct::getAnon( 1365 {ConstantInt::get(Int32Ty, 8), // n_namesz 1366 ConstantInt::get(Int32Ty, 8), // n_descsz 1367 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type 1368 Name, CreateRelPtr(Start), CreateRelPtr(Stop)})); 1369 appendToCompilerUsed(M, Note); 1370 1371 // Create a zero-length global in hwasan_globals so that the linker will 1372 // always create start and stop symbols. 1373 auto Dummy = new GlobalVariable( 1374 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage, 1375 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global"); 1376 Dummy->setSection("hwasan_globals"); 1377 Dummy->setComdat(NoteComdat); 1378 Dummy->setMetadata(LLVMContext::MD_associated, 1379 MDNode::get(*C, ValueAsMetadata::get(Note))); 1380 appendToCompilerUsed(M, Dummy); 1381 1382 std::vector<GlobalVariable *> Globals; 1383 for (GlobalVariable &GV : M.globals()) { 1384 if (GV.isDeclarationForLinker() || GV.getName().startswith("llvm.") || 1385 GV.isThreadLocal()) 1386 continue; 1387 1388 // Common symbols can't have aliases point to them, so they can't be tagged. 1389 if (GV.hasCommonLinkage()) 1390 continue; 1391 1392 // Globals with custom sections may be used in __start_/__stop_ enumeration, 1393 // which would be broken both by adding tags and potentially by the extra 1394 // padding/alignment that we insert. 1395 if (GV.hasSection()) 1396 continue; 1397 1398 Globals.push_back(&GV); 1399 } 1400 1401 MD5 Hasher; 1402 Hasher.update(M.getSourceFileName()); 1403 MD5::MD5Result Hash; 1404 Hasher.final(Hash); 1405 uint8_t Tag = Hash[0]; 1406 1407 for (GlobalVariable *GV : Globals) { 1408 // Skip tag 0 in order to avoid collisions with untagged memory. 1409 if (Tag == 0) 1410 Tag = 1; 1411 instrumentGlobal(GV, Tag++); 1412 } 1413 } 1414 1415 void HWAddressSanitizer::instrumentPersonalityFunctions() { 1416 // We need to untag stack frames as we unwind past them. That is the job of 1417 // the personality function wrapper, which either wraps an existing 1418 // personality function or acts as a personality function on its own. Each 1419 // function that has a personality function or that can be unwound past has 1420 // its personality function changed to a thunk that calls the personality 1421 // function wrapper in the runtime. 1422 MapVector<Constant *, std::vector<Function *>> PersonalityFns; 1423 for (Function &F : M) { 1424 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress)) 1425 continue; 1426 1427 if (F.hasPersonalityFn()) { 1428 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F); 1429 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) { 1430 PersonalityFns[nullptr].push_back(&F); 1431 } 1432 } 1433 1434 if (PersonalityFns.empty()) 1435 return; 1436 1437 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction( 1438 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, 1439 Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy); 1440 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy); 1441 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy); 1442 1443 for (auto &P : PersonalityFns) { 1444 std::string ThunkName = kHwasanPersonalityThunkName; 1445 if (P.first) 1446 ThunkName += ("." + P.first->getName()).str(); 1447 FunctionType *ThunkFnTy = FunctionType::get( 1448 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int8PtrTy}, false); 1449 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) || 1450 cast<GlobalValue>(P.first)->hasLocalLinkage()); 1451 auto *ThunkFn = Function::Create(ThunkFnTy, 1452 IsLocal ? GlobalValue::InternalLinkage 1453 : GlobalValue::LinkOnceODRLinkage, 1454 ThunkName, &M); 1455 if (!IsLocal) { 1456 ThunkFn->setVisibility(GlobalValue::HiddenVisibility); 1457 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName)); 1458 } 1459 1460 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn); 1461 IRBuilder<> IRB(BB); 1462 CallInst *WrapperCall = IRB.CreateCall( 1463 HwasanPersonalityWrapper, 1464 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2), 1465 ThunkFn->getArg(3), ThunkFn->getArg(4), 1466 P.first ? IRB.CreateBitCast(P.first, Int8PtrTy) 1467 : Constant::getNullValue(Int8PtrTy), 1468 IRB.CreateBitCast(UnwindGetGR.getCallee(), Int8PtrTy), 1469 IRB.CreateBitCast(UnwindGetCFA.getCallee(), Int8PtrTy)}); 1470 WrapperCall->setTailCall(); 1471 IRB.CreateRet(WrapperCall); 1472 1473 for (Function *F : P.second) 1474 F->setPersonalityFn(ThunkFn); 1475 } 1476 } 1477 1478 void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple) { 1479 Scale = kDefaultShadowScale; 1480 if (ClMappingOffset.getNumOccurrences() > 0) { 1481 InGlobal = false; 1482 InTls = false; 1483 Offset = ClMappingOffset; 1484 } else if (ClEnableKhwasan || ClInstrumentWithCalls) { 1485 InGlobal = false; 1486 InTls = false; 1487 Offset = 0; 1488 } else if (ClWithIfunc) { 1489 InGlobal = true; 1490 InTls = false; 1491 Offset = kDynamicShadowSentinel; 1492 } else if (ClWithTls) { 1493 InGlobal = false; 1494 InTls = true; 1495 Offset = kDynamicShadowSentinel; 1496 } else { 1497 InGlobal = false; 1498 InTls = false; 1499 Offset = kDynamicShadowSentinel; 1500 } 1501 } 1502