1 //===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file is a part of HWAddressSanitizer, an address sanity checker 11 /// based on tagged addressing. 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h" 15 #include "llvm/ADT/MapVector.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/ADT/StringExtras.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/ADT/Triple.h" 20 #include "llvm/BinaryFormat/ELF.h" 21 #include "llvm/IR/Attributes.h" 22 #include "llvm/IR/BasicBlock.h" 23 #include "llvm/IR/Constant.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DebugInfoMetadata.h" 27 #include "llvm/IR/DerivedTypes.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/IR/IRBuilder.h" 30 #include "llvm/IR/InlineAsm.h" 31 #include "llvm/IR/InstVisitor.h" 32 #include "llvm/IR/Instruction.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/LLVMContext.h" 37 #include "llvm/IR/MDBuilder.h" 38 #include "llvm/IR/Module.h" 39 #include "llvm/IR/Type.h" 40 #include "llvm/IR/Value.h" 41 #include "llvm/InitializePasses.h" 42 #include "llvm/Pass.h" 43 #include "llvm/Support/Casting.h" 44 #include "llvm/Support/CommandLine.h" 45 #include "llvm/Support/Debug.h" 46 #include "llvm/Support/raw_ostream.h" 47 #include "llvm/Transforms/Instrumentation.h" 48 #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" 49 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 50 #include "llvm/Transforms/Utils/ModuleUtils.h" 51 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 52 #include <sstream> 53 54 using namespace llvm; 55 56 #define DEBUG_TYPE "hwasan" 57 58 const char kHwasanModuleCtorName[] = "hwasan.module_ctor"; 59 const char kHwasanNoteName[] = "hwasan.note"; 60 const char kHwasanInitName[] = "__hwasan_init"; 61 const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk"; 62 63 const char kHwasanShadowMemoryDynamicAddress[] = 64 "__hwasan_shadow_memory_dynamic_address"; 65 66 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 67 static const size_t kNumberOfAccessSizes = 5; 68 69 static const size_t kDefaultShadowScale = 4; 70 static const uint64_t kDynamicShadowSentinel = 71 std::numeric_limits<uint64_t>::max(); 72 static const unsigned kPointerTagShift = 56; 73 74 static const unsigned kShadowBaseAlignment = 32; 75 76 static cl::opt<std::string> ClMemoryAccessCallbackPrefix( 77 "hwasan-memory-access-callback-prefix", 78 cl::desc("Prefix for memory access callbacks"), cl::Hidden, 79 cl::init("__hwasan_")); 80 81 static cl::opt<bool> 82 ClInstrumentWithCalls("hwasan-instrument-with-calls", 83 cl::desc("instrument reads and writes with callbacks"), 84 cl::Hidden, cl::init(false)); 85 86 static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads", 87 cl::desc("instrument read instructions"), 88 cl::Hidden, cl::init(true)); 89 90 static cl::opt<bool> ClInstrumentWrites( 91 "hwasan-instrument-writes", cl::desc("instrument write instructions"), 92 cl::Hidden, cl::init(true)); 93 94 static cl::opt<bool> ClInstrumentAtomics( 95 "hwasan-instrument-atomics", 96 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, 97 cl::init(true)); 98 99 static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval", 100 cl::desc("instrument byval arguments"), 101 cl::Hidden, cl::init(true)); 102 103 static cl::opt<bool> ClRecover( 104 "hwasan-recover", 105 cl::desc("Enable recovery mode (continue-after-error)."), 106 cl::Hidden, cl::init(false)); 107 108 static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack", 109 cl::desc("instrument stack (allocas)"), 110 cl::Hidden, cl::init(true)); 111 112 static cl::opt<bool> ClUARRetagToZero( 113 "hwasan-uar-retag-to-zero", 114 cl::desc("Clear alloca tags before returning from the function to allow " 115 "non-instrumented and instrumented function calls mix. When set " 116 "to false, allocas are retagged before returning from the " 117 "function to detect use after return."), 118 cl::Hidden, cl::init(true)); 119 120 static cl::opt<bool> ClGenerateTagsWithCalls( 121 "hwasan-generate-tags-with-calls", 122 cl::desc("generate new tags with runtime library calls"), cl::Hidden, 123 cl::init(false)); 124 125 static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"), 126 cl::Hidden, cl::init(false), cl::ZeroOrMore); 127 128 static cl::opt<int> ClMatchAllTag( 129 "hwasan-match-all-tag", 130 cl::desc("don't report bad accesses via pointers with this tag"), 131 cl::Hidden, cl::init(-1)); 132 133 static cl::opt<bool> ClEnableKhwasan( 134 "hwasan-kernel", 135 cl::desc("Enable KernelHWAddressSanitizer instrumentation"), 136 cl::Hidden, cl::init(false)); 137 138 // These flags allow to change the shadow mapping and control how shadow memory 139 // is accessed. The shadow mapping looks like: 140 // Shadow = (Mem >> scale) + offset 141 142 static cl::opt<uint64_t> 143 ClMappingOffset("hwasan-mapping-offset", 144 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), 145 cl::Hidden, cl::init(0)); 146 147 static cl::opt<bool> 148 ClWithIfunc("hwasan-with-ifunc", 149 cl::desc("Access dynamic shadow through an ifunc global on " 150 "platforms that support this"), 151 cl::Hidden, cl::init(false)); 152 153 static cl::opt<bool> ClWithTls( 154 "hwasan-with-tls", 155 cl::desc("Access dynamic shadow through an thread-local pointer on " 156 "platforms that support this"), 157 cl::Hidden, cl::init(true)); 158 159 static cl::opt<bool> 160 ClRecordStackHistory("hwasan-record-stack-history", 161 cl::desc("Record stack frames with tagged allocations " 162 "in a thread-local ring buffer"), 163 cl::Hidden, cl::init(true)); 164 static cl::opt<bool> 165 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", 166 cl::desc("instrument memory intrinsics"), 167 cl::Hidden, cl::init(true)); 168 169 static cl::opt<bool> 170 ClInstrumentLandingPads("hwasan-instrument-landing-pads", 171 cl::desc("instrument landing pads"), cl::Hidden, 172 cl::init(false), cl::ZeroOrMore); 173 174 static cl::opt<bool> ClUseShortGranules( 175 "hwasan-use-short-granules", 176 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, 177 cl::init(false), cl::ZeroOrMore); 178 179 static cl::opt<bool> ClInstrumentPersonalityFunctions( 180 "hwasan-instrument-personality-functions", 181 cl::desc("instrument personality functions"), cl::Hidden, cl::init(false), 182 cl::ZeroOrMore); 183 184 static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks", 185 cl::desc("inline all checks"), 186 cl::Hidden, cl::init(false)); 187 188 namespace { 189 190 /// An instrumentation pass implementing detection of addressability bugs 191 /// using tagged pointers. 192 class HWAddressSanitizer { 193 public: 194 explicit HWAddressSanitizer(Module &M, bool CompileKernel = false, 195 bool Recover = false) : M(M) { 196 this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover; 197 this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0 ? 198 ClEnableKhwasan : CompileKernel; 199 200 initializeModule(); 201 } 202 203 bool sanitizeFunction(Function &F); 204 void initializeModule(); 205 void createHwasanCtorComdat(); 206 207 void initializeCallbacks(Module &M); 208 209 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val); 210 211 Value *getDynamicShadowIfunc(IRBuilder<> &IRB); 212 Value *getShadowNonTls(IRBuilder<> &IRB); 213 214 void untagPointerOperand(Instruction *I, Value *Addr); 215 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); 216 void instrumentMemAccessInline(Value *Ptr, bool IsWrite, 217 unsigned AccessSizeIndex, 218 Instruction *InsertBefore); 219 void instrumentMemIntrinsic(MemIntrinsic *MI); 220 bool instrumentMemAccess(InterestingMemoryOperand &O); 221 bool ignoreAccess(Value *Ptr); 222 void getInterestingMemoryOperands( 223 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting); 224 225 bool isInterestingAlloca(const AllocaInst &AI); 226 bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size); 227 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag); 228 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong); 229 bool instrumentStack( 230 SmallVectorImpl<AllocaInst *> &Allocas, 231 DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap, 232 SmallVectorImpl<Instruction *> &RetVec, Value *StackTag); 233 Value *readRegister(IRBuilder<> &IRB, StringRef Name); 234 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec); 235 Value *getNextTagWithCall(IRBuilder<> &IRB); 236 Value *getStackBaseTag(IRBuilder<> &IRB); 237 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI, 238 unsigned AllocaNo); 239 Value *getUARTag(IRBuilder<> &IRB, Value *StackTag); 240 241 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty); 242 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord); 243 244 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag); 245 void instrumentGlobals(); 246 247 void instrumentPersonalityFunctions(); 248 249 private: 250 LLVMContext *C; 251 Module &M; 252 Triple TargetTriple; 253 FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset; 254 FunctionCallee HWAsanHandleVfork; 255 256 /// This struct defines the shadow mapping using the rule: 257 /// shadow = (mem >> Scale) + Offset. 258 /// If InGlobal is true, then 259 /// extern char __hwasan_shadow[]; 260 /// shadow = (mem >> Scale) + &__hwasan_shadow 261 /// If InTls is true, then 262 /// extern char *__hwasan_tls; 263 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment) 264 struct ShadowMapping { 265 int Scale; 266 uint64_t Offset; 267 bool InGlobal; 268 bool InTls; 269 270 void init(Triple &TargetTriple); 271 unsigned getObjectAlignment() const { return 1U << Scale; } 272 }; 273 ShadowMapping Mapping; 274 275 Type *VoidTy = Type::getVoidTy(M.getContext()); 276 Type *IntptrTy; 277 Type *Int8PtrTy; 278 Type *Int8Ty; 279 Type *Int32Ty; 280 Type *Int64Ty = Type::getInt64Ty(M.getContext()); 281 282 bool CompileKernel; 283 bool Recover; 284 bool OutlinedChecks; 285 bool UseShortGranules; 286 bool InstrumentLandingPads; 287 288 bool HasMatchAllTag = false; 289 uint8_t MatchAllTag = 0; 290 291 Function *HwasanCtorFunction; 292 293 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes]; 294 FunctionCallee HwasanMemoryAccessCallbackSized[2]; 295 296 FunctionCallee HwasanTagMemoryFunc; 297 FunctionCallee HwasanGenerateTagFunc; 298 299 Constant *ShadowGlobal; 300 301 Value *ShadowBase = nullptr; 302 Value *StackBaseTag = nullptr; 303 GlobalValue *ThreadPtrGlobal = nullptr; 304 }; 305 306 class HWAddressSanitizerLegacyPass : public FunctionPass { 307 public: 308 // Pass identification, replacement for typeid. 309 static char ID; 310 311 explicit HWAddressSanitizerLegacyPass(bool CompileKernel = false, 312 bool Recover = false) 313 : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover) { 314 initializeHWAddressSanitizerLegacyPassPass( 315 *PassRegistry::getPassRegistry()); 316 } 317 318 StringRef getPassName() const override { return "HWAddressSanitizer"; } 319 320 bool doInitialization(Module &M) override { 321 HWASan = std::make_unique<HWAddressSanitizer>(M, CompileKernel, Recover); 322 return true; 323 } 324 325 bool runOnFunction(Function &F) override { 326 return HWASan->sanitizeFunction(F); 327 } 328 329 bool doFinalization(Module &M) override { 330 HWASan.reset(); 331 return false; 332 } 333 334 private: 335 std::unique_ptr<HWAddressSanitizer> HWASan; 336 bool CompileKernel; 337 bool Recover; 338 }; 339 340 } // end anonymous namespace 341 342 char HWAddressSanitizerLegacyPass::ID = 0; 343 344 INITIALIZE_PASS_BEGIN( 345 HWAddressSanitizerLegacyPass, "hwasan", 346 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false, 347 false) 348 INITIALIZE_PASS_END( 349 HWAddressSanitizerLegacyPass, "hwasan", 350 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false, 351 false) 352 353 FunctionPass *llvm::createHWAddressSanitizerLegacyPassPass(bool CompileKernel, 354 bool Recover) { 355 assert(!CompileKernel || Recover); 356 return new HWAddressSanitizerLegacyPass(CompileKernel, Recover); 357 } 358 359 HWAddressSanitizerPass::HWAddressSanitizerPass(bool CompileKernel, bool Recover) 360 : CompileKernel(CompileKernel), Recover(Recover) {} 361 362 PreservedAnalyses HWAddressSanitizerPass::run(Module &M, 363 ModuleAnalysisManager &MAM) { 364 HWAddressSanitizer HWASan(M, CompileKernel, Recover); 365 bool Modified = false; 366 for (Function &F : M) 367 Modified |= HWASan.sanitizeFunction(F); 368 if (Modified) 369 return PreservedAnalyses::none(); 370 return PreservedAnalyses::all(); 371 } 372 373 void HWAddressSanitizer::createHwasanCtorComdat() { 374 std::tie(HwasanCtorFunction, std::ignore) = 375 getOrCreateSanitizerCtorAndInitFunctions( 376 M, kHwasanModuleCtorName, kHwasanInitName, 377 /*InitArgTypes=*/{}, 378 /*InitArgs=*/{}, 379 // This callback is invoked when the functions are created the first 380 // time. Hook them into the global ctors list in that case: 381 [&](Function *Ctor, FunctionCallee) { 382 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName); 383 Ctor->setComdat(CtorComdat); 384 appendToGlobalCtors(M, Ctor, 0, Ctor); 385 }); 386 387 // Create a note that contains pointers to the list of global 388 // descriptors. Adding a note to the output file will cause the linker to 389 // create a PT_NOTE program header pointing to the note that we can use to 390 // find the descriptor list starting from the program headers. A function 391 // provided by the runtime initializes the shadow memory for the globals by 392 // accessing the descriptor list via the note. The dynamic loader needs to 393 // call this function whenever a library is loaded. 394 // 395 // The reason why we use a note for this instead of a more conventional 396 // approach of having a global constructor pass a descriptor list pointer to 397 // the runtime is because of an order of initialization problem. With 398 // constructors we can encounter the following problematic scenario: 399 // 400 // 1) library A depends on library B and also interposes one of B's symbols 401 // 2) B's constructors are called before A's (as required for correctness) 402 // 3) during construction, B accesses one of its "own" globals (actually 403 // interposed by A) and triggers a HWASAN failure due to the initialization 404 // for A not having happened yet 405 // 406 // Even without interposition it is possible to run into similar situations in 407 // cases where two libraries mutually depend on each other. 408 // 409 // We only need one note per binary, so put everything for the note in a 410 // comdat. This needs to be a comdat with an .init_array section to prevent 411 // newer versions of lld from discarding the note. 412 // 413 // Create the note even if we aren't instrumenting globals. This ensures that 414 // binaries linked from object files with both instrumented and 415 // non-instrumented globals will end up with a note, even if a comdat from an 416 // object file with non-instrumented globals is selected. The note is harmless 417 // if the runtime doesn't support it, since it will just be ignored. 418 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName); 419 420 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0); 421 auto Start = 422 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage, 423 nullptr, "__start_hwasan_globals"); 424 Start->setVisibility(GlobalValue::HiddenVisibility); 425 Start->setDSOLocal(true); 426 auto Stop = 427 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage, 428 nullptr, "__stop_hwasan_globals"); 429 Stop->setVisibility(GlobalValue::HiddenVisibility); 430 Stop->setDSOLocal(true); 431 432 // Null-terminated so actually 8 bytes, which are required in order to align 433 // the note properly. 434 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0"); 435 436 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(), 437 Int32Ty, Int32Ty); 438 auto *Note = 439 new GlobalVariable(M, NoteTy, /*isConstant=*/true, 440 GlobalValue::PrivateLinkage, nullptr, kHwasanNoteName); 441 Note->setSection(".note.hwasan.globals"); 442 Note->setComdat(NoteComdat); 443 Note->setAlignment(Align(4)); 444 Note->setDSOLocal(true); 445 446 // The pointers in the note need to be relative so that the note ends up being 447 // placed in rodata, which is the standard location for notes. 448 auto CreateRelPtr = [&](Constant *Ptr) { 449 return ConstantExpr::getTrunc( 450 ConstantExpr::getSub(ConstantExpr::getPtrToInt(Ptr, Int64Ty), 451 ConstantExpr::getPtrToInt(Note, Int64Ty)), 452 Int32Ty); 453 }; 454 Note->setInitializer(ConstantStruct::getAnon( 455 {ConstantInt::get(Int32Ty, 8), // n_namesz 456 ConstantInt::get(Int32Ty, 8), // n_descsz 457 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type 458 Name, CreateRelPtr(Start), CreateRelPtr(Stop)})); 459 appendToCompilerUsed(M, Note); 460 461 // Create a zero-length global in hwasan_globals so that the linker will 462 // always create start and stop symbols. 463 auto Dummy = new GlobalVariable( 464 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage, 465 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global"); 466 Dummy->setSection("hwasan_globals"); 467 Dummy->setComdat(NoteComdat); 468 Dummy->setMetadata(LLVMContext::MD_associated, 469 MDNode::get(*C, ValueAsMetadata::get(Note))); 470 appendToCompilerUsed(M, Dummy); 471 } 472 473 /// Module-level initialization. 474 /// 475 /// inserts a call to __hwasan_init to the module's constructor list. 476 void HWAddressSanitizer::initializeModule() { 477 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n"); 478 auto &DL = M.getDataLayout(); 479 480 TargetTriple = Triple(M.getTargetTriple()); 481 482 Mapping.init(TargetTriple); 483 484 C = &(M.getContext()); 485 IRBuilder<> IRB(*C); 486 IntptrTy = IRB.getIntPtrTy(DL); 487 Int8PtrTy = IRB.getInt8PtrTy(); 488 Int8Ty = IRB.getInt8Ty(); 489 Int32Ty = IRB.getInt32Ty(); 490 491 HwasanCtorFunction = nullptr; 492 493 // Older versions of Android do not have the required runtime support for 494 // short granules, global or personality function instrumentation. On other 495 // platforms we currently require using the latest version of the runtime. 496 bool NewRuntime = 497 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30); 498 499 UseShortGranules = 500 ClUseShortGranules.getNumOccurrences() ? ClUseShortGranules : NewRuntime; 501 OutlinedChecks = 502 TargetTriple.isAArch64() && TargetTriple.isOSBinFormatELF() && 503 (ClInlineAllChecks.getNumOccurrences() ? !ClInlineAllChecks : !Recover); 504 505 if (ClMatchAllTag.getNumOccurrences()) { 506 if (ClMatchAllTag != -1) { 507 HasMatchAllTag = true; 508 MatchAllTag = ClMatchAllTag & 0xFF; 509 } 510 } else if (CompileKernel) { 511 HasMatchAllTag = true; 512 MatchAllTag = 0xFF; 513 } 514 515 // If we don't have personality function support, fall back to landing pads. 516 InstrumentLandingPads = ClInstrumentLandingPads.getNumOccurrences() 517 ? ClInstrumentLandingPads 518 : !NewRuntime; 519 520 if (!CompileKernel) { 521 createHwasanCtorComdat(); 522 bool InstrumentGlobals = 523 ClGlobals.getNumOccurrences() ? ClGlobals : NewRuntime; 524 if (InstrumentGlobals) 525 instrumentGlobals(); 526 527 bool InstrumentPersonalityFunctions = 528 ClInstrumentPersonalityFunctions.getNumOccurrences() 529 ? ClInstrumentPersonalityFunctions 530 : NewRuntime; 531 if (InstrumentPersonalityFunctions) 532 instrumentPersonalityFunctions(); 533 } 534 535 if (!TargetTriple.isAndroid()) { 536 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] { 537 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false, 538 GlobalValue::ExternalLinkage, nullptr, 539 "__hwasan_tls", nullptr, 540 GlobalVariable::InitialExecTLSModel); 541 appendToCompilerUsed(M, GV); 542 return GV; 543 }); 544 ThreadPtrGlobal = cast<GlobalVariable>(C); 545 } 546 } 547 548 void HWAddressSanitizer::initializeCallbacks(Module &M) { 549 IRBuilder<> IRB(*C); 550 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { 551 const std::string TypeStr = AccessIsWrite ? "store" : "load"; 552 const std::string EndingStr = Recover ? "_noabort" : ""; 553 554 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction( 555 ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr, 556 FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false)); 557 558 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 559 AccessSizeIndex++) { 560 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] = 561 M.getOrInsertFunction( 562 ClMemoryAccessCallbackPrefix + TypeStr + 563 itostr(1ULL << AccessSizeIndex) + EndingStr, 564 FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false)); 565 } 566 } 567 568 HwasanTagMemoryFunc = M.getOrInsertFunction( 569 "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy); 570 HwasanGenerateTagFunc = 571 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty); 572 573 ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow", 574 ArrayType::get(IRB.getInt8Ty(), 0)); 575 576 const std::string MemIntrinCallbackPrefix = 577 CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix; 578 HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove", 579 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 580 IRB.getInt8PtrTy(), IntptrTy); 581 HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", 582 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 583 IRB.getInt8PtrTy(), IntptrTy); 584 HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset", 585 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 586 IRB.getInt32Ty(), IntptrTy); 587 588 HWAsanHandleVfork = 589 M.getOrInsertFunction("__hwasan_handle_vfork", IRB.getVoidTy(), IntptrTy); 590 } 591 592 Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) { 593 // An empty inline asm with input reg == output reg. 594 // An opaque no-op cast, basically. 595 // This prevents code bloat as a result of rematerializing trivial definitions 596 // such as constants or global addresses at every load and store. 597 InlineAsm *Asm = 598 InlineAsm::get(FunctionType::get(Int8PtrTy, {Val->getType()}, false), 599 StringRef(""), StringRef("=r,0"), 600 /*hasSideEffects=*/false); 601 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow"); 602 } 603 604 Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) { 605 return getOpaqueNoopCast(IRB, ShadowGlobal); 606 } 607 608 Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) { 609 if (Mapping.Offset != kDynamicShadowSentinel) 610 return getOpaqueNoopCast( 611 IRB, ConstantExpr::getIntToPtr( 612 ConstantInt::get(IntptrTy, Mapping.Offset), Int8PtrTy)); 613 614 if (Mapping.InGlobal) { 615 return getDynamicShadowIfunc(IRB); 616 } else { 617 Value *GlobalDynamicAddress = 618 IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal( 619 kHwasanShadowMemoryDynamicAddress, Int8PtrTy); 620 return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress); 621 } 622 } 623 624 bool HWAddressSanitizer::ignoreAccess(Value *Ptr) { 625 // Do not instrument acesses from different address spaces; we cannot deal 626 // with them. 627 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); 628 if (PtrTy->getPointerAddressSpace() != 0) 629 return true; 630 631 // Ignore swifterror addresses. 632 // swifterror memory addresses are mem2reg promoted by instruction 633 // selection. As such they cannot have regular uses like an instrumentation 634 // function and it makes no sense to track them as memory. 635 if (Ptr->isSwiftError()) 636 return true; 637 638 return false; 639 } 640 641 void HWAddressSanitizer::getInterestingMemoryOperands( 642 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) { 643 // Skip memory accesses inserted by another instrumentation. 644 if (I->hasMetadata("nosanitize")) 645 return; 646 647 // Do not instrument the load fetching the dynamic shadow address. 648 if (ShadowBase == I) 649 return; 650 651 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 652 if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand())) 653 return; 654 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, 655 LI->getType(), LI->getAlign()); 656 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 657 if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand())) 658 return; 659 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, 660 SI->getValueOperand()->getType(), SI->getAlign()); 661 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { 662 if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand())) 663 return; 664 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, 665 RMW->getValOperand()->getType(), None); 666 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { 667 if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand())) 668 return; 669 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, 670 XCHG->getCompareOperand()->getType(), None); 671 } else if (auto CI = dyn_cast<CallInst>(I)) { 672 for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) { 673 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || 674 ignoreAccess(CI->getArgOperand(ArgNo))) 675 continue; 676 Type *Ty = CI->getParamByValType(ArgNo); 677 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); 678 } 679 } 680 } 681 682 static unsigned getPointerOperandIndex(Instruction *I) { 683 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 684 return LI->getPointerOperandIndex(); 685 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 686 return SI->getPointerOperandIndex(); 687 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) 688 return RMW->getPointerOperandIndex(); 689 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) 690 return XCHG->getPointerOperandIndex(); 691 report_fatal_error("Unexpected instruction"); 692 return -1; 693 } 694 695 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { 696 size_t Res = countTrailingZeros(TypeSize / 8); 697 assert(Res < kNumberOfAccessSizes); 698 return Res; 699 } 700 701 void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) { 702 if (TargetTriple.isAArch64()) 703 return; 704 705 IRBuilder<> IRB(I); 706 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); 707 Value *UntaggedPtr = 708 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType()); 709 I->setOperand(getPointerOperandIndex(I), UntaggedPtr); 710 } 711 712 Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) { 713 // Mem >> Scale 714 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale); 715 if (Mapping.Offset == 0) 716 return IRB.CreateIntToPtr(Shadow, Int8PtrTy); 717 // (Mem >> Scale) + Offset 718 return IRB.CreateGEP(Int8Ty, ShadowBase, Shadow); 719 } 720 721 void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite, 722 unsigned AccessSizeIndex, 723 Instruction *InsertBefore) { 724 const int64_t AccessInfo = 725 (CompileKernel << HWASanAccessInfo::CompileKernelShift) + 726 (HasMatchAllTag << HWASanAccessInfo::HasMatchAllShift) + 727 (MatchAllTag << HWASanAccessInfo::MatchAllShift) + 728 (Recover << HWASanAccessInfo::RecoverShift) + 729 (IsWrite << HWASanAccessInfo::IsWriteShift) + 730 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift); 731 IRBuilder<> IRB(InsertBefore); 732 733 if (OutlinedChecks) { 734 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 735 Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy); 736 IRB.CreateCall(Intrinsic::getDeclaration( 737 M, UseShortGranules 738 ? Intrinsic::hwasan_check_memaccess_shortgranules 739 : Intrinsic::hwasan_check_memaccess), 740 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)}); 741 return; 742 } 743 744 Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy); 745 Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift), 746 IRB.getInt8Ty()); 747 Value *AddrLong = untagPointer(IRB, PtrLong); 748 Value *Shadow = memToShadow(AddrLong, IRB); 749 Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow); 750 Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag); 751 752 if (HasMatchAllTag) { 753 Value *TagNotIgnored = IRB.CreateICmpNE( 754 PtrTag, ConstantInt::get(PtrTag->getType(), MatchAllTag)); 755 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored); 756 } 757 758 Instruction *CheckTerm = 759 SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false, 760 MDBuilder(*C).createBranchWeights(1, 100000)); 761 762 IRB.SetInsertPoint(CheckTerm); 763 Value *OutOfShortGranuleTagRange = 764 IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15)); 765 Instruction *CheckFailTerm = 766 SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover, 767 MDBuilder(*C).createBranchWeights(1, 100000)); 768 769 IRB.SetInsertPoint(CheckTerm); 770 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty); 771 PtrLowBits = IRB.CreateAdd( 772 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1)); 773 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag); 774 SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false, 775 MDBuilder(*C).createBranchWeights(1, 100000), 776 (DomTreeUpdater *)nullptr, nullptr, 777 CheckFailTerm->getParent()); 778 779 IRB.SetInsertPoint(CheckTerm); 780 Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15); 781 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy); 782 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr); 783 Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag); 784 SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false, 785 MDBuilder(*C).createBranchWeights(1, 100000), 786 (DomTreeUpdater *)nullptr, nullptr, 787 CheckFailTerm->getParent()); 788 789 IRB.SetInsertPoint(CheckFailTerm); 790 InlineAsm *Asm; 791 switch (TargetTriple.getArch()) { 792 case Triple::x86_64: 793 // The signal handler will find the data address in rdi. 794 Asm = InlineAsm::get( 795 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false), 796 "int3\nnopl " + 797 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) + 798 "(%rax)", 799 "{rdi}", 800 /*hasSideEffects=*/true); 801 break; 802 case Triple::aarch64: 803 case Triple::aarch64_be: 804 // The signal handler will find the data address in x0. 805 Asm = InlineAsm::get( 806 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false), 807 "brk #" + 808 itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)), 809 "{x0}", 810 /*hasSideEffects=*/true); 811 break; 812 default: 813 report_fatal_error("unsupported architecture"); 814 } 815 IRB.CreateCall(Asm, PtrLong); 816 if (Recover) 817 cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent()); 818 } 819 820 void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { 821 IRBuilder<> IRB(MI); 822 if (isa<MemTransferInst>(MI)) { 823 IRB.CreateCall( 824 isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy, 825 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 826 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), 827 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 828 } else if (isa<MemSetInst>(MI)) { 829 IRB.CreateCall( 830 HWAsanMemset, 831 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), 832 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), 833 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); 834 } 835 MI->eraseFromParent(); 836 } 837 838 bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) { 839 Value *Addr = O.getPtr(); 840 841 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n"); 842 843 if (O.MaybeMask) 844 return false; //FIXME 845 846 IRBuilder<> IRB(O.getInsn()); 847 if (isPowerOf2_64(O.TypeSize) && 848 (O.TypeSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) && 849 (!O.Alignment || *O.Alignment >= (1ULL << Mapping.Scale) || 850 *O.Alignment >= O.TypeSize / 8)) { 851 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeSize); 852 if (ClInstrumentWithCalls) { 853 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex], 854 IRB.CreatePointerCast(Addr, IntptrTy)); 855 } else { 856 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn()); 857 } 858 } else { 859 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], 860 {IRB.CreatePointerCast(Addr, IntptrTy), 861 ConstantInt::get(IntptrTy, O.TypeSize / 8)}); 862 } 863 untagPointerOperand(O.getInsn(), Addr); 864 865 return true; 866 } 867 868 static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) { 869 uint64_t ArraySize = 1; 870 if (AI.isArrayAllocation()) { 871 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize()); 872 assert(CI && "non-constant array size"); 873 ArraySize = CI->getZExtValue(); 874 } 875 Type *Ty = AI.getAllocatedType(); 876 uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty); 877 return SizeInBytes * ArraySize; 878 } 879 880 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, 881 Value *Tag, size_t Size) { 882 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment()); 883 if (!UseShortGranules) 884 Size = AlignedSize; 885 886 Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty()); 887 if (ClInstrumentWithCalls) { 888 IRB.CreateCall(HwasanTagMemoryFunc, 889 {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag, 890 ConstantInt::get(IntptrTy, AlignedSize)}); 891 } else { 892 size_t ShadowSize = Size >> Mapping.Scale; 893 Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB); 894 // If this memset is not inlined, it will be intercepted in the hwasan 895 // runtime library. That's OK, because the interceptor skips the checks if 896 // the address is in the shadow region. 897 // FIXME: the interceptor is not as fast as real memset. Consider lowering 898 // llvm.memset right here into either a sequence of stores, or a call to 899 // hwasan_tag_memory. 900 if (ShadowSize) 901 IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, Align(1)); 902 if (Size != AlignedSize) { 903 IRB.CreateStore( 904 ConstantInt::get(Int8Ty, Size % Mapping.getObjectAlignment()), 905 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize)); 906 IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32( 907 Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy), 908 AlignedSize - 1)); 909 } 910 } 911 return true; 912 } 913 914 static unsigned RetagMask(unsigned AllocaNo) { 915 // A list of 8-bit numbers that have at most one run of non-zero bits. 916 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these 917 // masks. 918 // The list does not include the value 255, which is used for UAR. 919 // 920 // Because we are more likely to use earlier elements of this list than later 921 // ones, it is sorted in increasing order of probability of collision with a 922 // mask allocated (temporally) nearby. The program that generated this list 923 // can be found at: 924 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py 925 static unsigned FastMasks[] = {0, 128, 64, 192, 32, 96, 224, 112, 240, 926 48, 16, 120, 248, 56, 24, 8, 124, 252, 927 60, 28, 12, 4, 126, 254, 62, 30, 14, 928 6, 2, 127, 63, 31, 15, 7, 3, 1}; 929 return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))]; 930 } 931 932 Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) { 933 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy); 934 } 935 936 Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) { 937 if (ClGenerateTagsWithCalls) 938 return getNextTagWithCall(IRB); 939 if (StackBaseTag) 940 return StackBaseTag; 941 // FIXME: use addressofreturnaddress (but implement it in aarch64 backend 942 // first). 943 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 944 auto GetStackPointerFn = Intrinsic::getDeclaration( 945 M, Intrinsic::frameaddress, 946 IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace())); 947 Value *StackPointer = IRB.CreateCall( 948 GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())}); 949 950 // Extract some entropy from the stack pointer for the tags. 951 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ 952 // between functions). 953 Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy); 954 Value *StackTag = 955 IRB.CreateXor(StackPointerLong, IRB.CreateLShr(StackPointerLong, 20), 956 "hwasan.stack.base.tag"); 957 return StackTag; 958 } 959 960 Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag, 961 AllocaInst *AI, unsigned AllocaNo) { 962 if (ClGenerateTagsWithCalls) 963 return getNextTagWithCall(IRB); 964 return IRB.CreateXor(StackTag, 965 ConstantInt::get(IntptrTy, RetagMask(AllocaNo))); 966 } 967 968 Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) { 969 if (ClUARRetagToZero) 970 return ConstantInt::get(IntptrTy, 0); 971 if (ClGenerateTagsWithCalls) 972 return getNextTagWithCall(IRB); 973 return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, 0xFFU)); 974 } 975 976 // Add a tag to an address. 977 Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty, 978 Value *PtrLong, Value *Tag) { 979 Value *TaggedPtrLong; 980 if (CompileKernel) { 981 // Kernel addresses have 0xFF in the most significant byte. 982 Value *ShiftedTag = IRB.CreateOr( 983 IRB.CreateShl(Tag, kPointerTagShift), 984 ConstantInt::get(IntptrTy, (1ULL << kPointerTagShift) - 1)); 985 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag); 986 } else { 987 // Userspace can simply do OR (tag << 56); 988 Value *ShiftedTag = IRB.CreateShl(Tag, kPointerTagShift); 989 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag); 990 } 991 return IRB.CreateIntToPtr(TaggedPtrLong, Ty); 992 } 993 994 // Remove tag from an address. 995 Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) { 996 Value *UntaggedPtrLong; 997 if (CompileKernel) { 998 // Kernel addresses have 0xFF in the most significant byte. 999 UntaggedPtrLong = IRB.CreateOr(PtrLong, 1000 ConstantInt::get(PtrLong->getType(), 0xFFULL << kPointerTagShift)); 1001 } else { 1002 // Userspace addresses have 0x00. 1003 UntaggedPtrLong = IRB.CreateAnd(PtrLong, 1004 ConstantInt::get(PtrLong->getType(), ~(0xFFULL << kPointerTagShift))); 1005 } 1006 return UntaggedPtrLong; 1007 } 1008 1009 Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) { 1010 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1011 if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) { 1012 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER 1013 // in Bionic's libc/private/bionic_tls.h. 1014 Function *ThreadPointerFunc = 1015 Intrinsic::getDeclaration(M, Intrinsic::thread_pointer); 1016 Value *SlotPtr = IRB.CreatePointerCast( 1017 IRB.CreateConstGEP1_32(IRB.getInt8Ty(), 1018 IRB.CreateCall(ThreadPointerFunc), 0x30), 1019 Ty->getPointerTo(0)); 1020 return SlotPtr; 1021 } 1022 if (ThreadPtrGlobal) 1023 return ThreadPtrGlobal; 1024 1025 1026 return nullptr; 1027 } 1028 1029 void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) { 1030 if (!Mapping.InTls) { 1031 ShadowBase = getShadowNonTls(IRB); 1032 return; 1033 } 1034 1035 if (!WithFrameRecord && TargetTriple.isAndroid()) { 1036 ShadowBase = getDynamicShadowIfunc(IRB); 1037 return; 1038 } 1039 1040 Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy); 1041 assert(SlotPtr); 1042 1043 Value *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr); 1044 // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI. 1045 Value *ThreadLongMaybeUntagged = 1046 TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong); 1047 1048 if (WithFrameRecord) { 1049 Function *F = IRB.GetInsertBlock()->getParent(); 1050 StackBaseTag = IRB.CreateAShr(ThreadLong, 3); 1051 1052 // Prepare ring buffer data. 1053 Value *PC; 1054 if (TargetTriple.getArch() == Triple::aarch64) 1055 PC = readRegister(IRB, "pc"); 1056 else 1057 PC = IRB.CreatePtrToInt(F, IntptrTy); 1058 Module *M = F->getParent(); 1059 auto GetStackPointerFn = Intrinsic::getDeclaration( 1060 M, Intrinsic::frameaddress, 1061 IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace())); 1062 Value *SP = IRB.CreatePtrToInt( 1063 IRB.CreateCall(GetStackPointerFn, 1064 {Constant::getNullValue(IRB.getInt32Ty())}), 1065 IntptrTy); 1066 // Mix SP and PC. 1067 // Assumptions: 1068 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero) 1069 // SP is 0xsssssssssssSSSS0 (4 lower bits are zero) 1070 // We only really need ~20 lower non-zero bits (SSSS), so we mix like this: 1071 // 0xSSSSPPPPPPPPPPPP 1072 SP = IRB.CreateShl(SP, 44); 1073 1074 // Store data to ring buffer. 1075 Value *RecordPtr = 1076 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IntptrTy->getPointerTo(0)); 1077 IRB.CreateStore(IRB.CreateOr(PC, SP), RecordPtr); 1078 1079 // Update the ring buffer. Top byte of ThreadLong defines the size of the 1080 // buffer in pages, it must be a power of two, and the start of the buffer 1081 // must be aligned by twice that much. Therefore wrap around of the ring 1082 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12). 1083 // The use of AShr instead of LShr is due to 1084 // https://bugs.llvm.org/show_bug.cgi?id=39030 1085 // Runtime library makes sure not to use the highest bit. 1086 Value *WrapMask = IRB.CreateXor( 1087 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true), 1088 ConstantInt::get(IntptrTy, (uint64_t)-1)); 1089 Value *ThreadLongNew = IRB.CreateAnd( 1090 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask); 1091 IRB.CreateStore(ThreadLongNew, SlotPtr); 1092 } 1093 1094 // Get shadow base address by aligning RecordPtr up. 1095 // Note: this is not correct if the pointer is already aligned. 1096 // Runtime library will make sure this never happens. 1097 ShadowBase = IRB.CreateAdd( 1098 IRB.CreateOr( 1099 ThreadLongMaybeUntagged, 1100 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)), 1101 ConstantInt::get(IntptrTy, 1), "hwasan.shadow"); 1102 ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy); 1103 } 1104 1105 Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) { 1106 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1107 Function *ReadRegister = 1108 Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy); 1109 MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)}); 1110 Value *Args[] = {MetadataAsValue::get(*C, MD)}; 1111 return IRB.CreateCall(ReadRegister, Args); 1112 } 1113 1114 bool HWAddressSanitizer::instrumentLandingPads( 1115 SmallVectorImpl<Instruction *> &LandingPadVec) { 1116 for (auto *LP : LandingPadVec) { 1117 IRBuilder<> IRB(LP->getNextNode()); 1118 IRB.CreateCall( 1119 HWAsanHandleVfork, 1120 {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" 1121 : "sp")}); 1122 } 1123 return true; 1124 } 1125 1126 bool HWAddressSanitizer::instrumentStack( 1127 SmallVectorImpl<AllocaInst *> &Allocas, 1128 DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap, 1129 SmallVectorImpl<Instruction *> &RetVec, Value *StackTag) { 1130 // Ideally, we want to calculate tagged stack base pointer, and rewrite all 1131 // alloca addresses using that. Unfortunately, offsets are not known yet 1132 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a 1133 // temp, shift-OR it into each alloca address and xor with the retag mask. 1134 // This generates one extra instruction per alloca use. 1135 for (unsigned N = 0; N < Allocas.size(); ++N) { 1136 auto *AI = Allocas[N]; 1137 IRBuilder<> IRB(AI->getNextNode()); 1138 1139 // Replace uses of the alloca with tagged address. 1140 Value *Tag = getAllocaTag(IRB, StackTag, AI, N); 1141 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy); 1142 Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag); 1143 std::string Name = 1144 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N); 1145 Replacement->setName(Name + ".hwasan"); 1146 1147 AI->replaceUsesWithIf(Replacement, 1148 [AILong](Use &U) { return U.getUser() != AILong; }); 1149 1150 for (auto *DDI : AllocaDbgMap.lookup(AI)) { 1151 // Prepend "tag_offset, N" to the dwarf expression. 1152 // Tag offset logically applies to the alloca pointer, and it makes sense 1153 // to put it at the beginning of the expression. 1154 SmallVector<uint64_t, 8> NewOps = {dwarf::DW_OP_LLVM_tag_offset, 1155 RetagMask(N)}; 1156 DDI->setArgOperand( 1157 2, MetadataAsValue::get(*C, DIExpression::prependOpcodes( 1158 DDI->getExpression(), NewOps))); 1159 } 1160 1161 size_t Size = getAllocaSizeInBytes(*AI); 1162 tagAlloca(IRB, AI, Tag, Size); 1163 1164 for (auto RI : RetVec) { 1165 IRB.SetInsertPoint(RI); 1166 1167 // Re-tag alloca memory with the special UAR tag. 1168 Value *Tag = getUARTag(IRB, StackTag); 1169 tagAlloca(IRB, AI, Tag, alignTo(Size, Mapping.getObjectAlignment())); 1170 } 1171 } 1172 1173 return true; 1174 } 1175 1176 bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) { 1177 return (AI.getAllocatedType()->isSized() && 1178 // FIXME: instrument dynamic allocas, too 1179 AI.isStaticAlloca() && 1180 // alloca() may be called with 0 size, ignore it. 1181 getAllocaSizeInBytes(AI) > 0 && 1182 // We are only interested in allocas not promotable to registers. 1183 // Promotable allocas are common under -O0. 1184 !isAllocaPromotable(&AI) && 1185 // inalloca allocas are not treated as static, and we don't want 1186 // dynamic alloca instrumentation for them as well. 1187 !AI.isUsedWithInAlloca() && 1188 // swifterror allocas are register promoted by ISel 1189 !AI.isSwiftError()); 1190 } 1191 1192 bool HWAddressSanitizer::sanitizeFunction(Function &F) { 1193 if (&F == HwasanCtorFunction) 1194 return false; 1195 1196 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress)) 1197 return false; 1198 1199 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n"); 1200 1201 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument; 1202 SmallVector<MemIntrinsic *, 16> IntrinToInstrument; 1203 SmallVector<AllocaInst*, 8> AllocasToInstrument; 1204 SmallVector<Instruction*, 8> RetVec; 1205 SmallVector<Instruction*, 8> LandingPadVec; 1206 DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> AllocaDbgMap; 1207 for (auto &BB : F) { 1208 for (auto &Inst : BB) { 1209 if (ClInstrumentStack) 1210 if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) { 1211 if (isInterestingAlloca(*AI)) 1212 AllocasToInstrument.push_back(AI); 1213 continue; 1214 } 1215 1216 if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) || 1217 isa<CleanupReturnInst>(Inst)) 1218 RetVec.push_back(&Inst); 1219 1220 if (auto *DDI = dyn_cast<DbgVariableIntrinsic>(&Inst)) 1221 if (auto *Alloca = 1222 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocation())) 1223 AllocaDbgMap[Alloca].push_back(DDI); 1224 1225 if (InstrumentLandingPads && isa<LandingPadInst>(Inst)) 1226 LandingPadVec.push_back(&Inst); 1227 1228 getInterestingMemoryOperands(&Inst, OperandsToInstrument); 1229 1230 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) 1231 IntrinToInstrument.push_back(MI); 1232 } 1233 } 1234 1235 initializeCallbacks(*F.getParent()); 1236 1237 bool Changed = false; 1238 1239 if (!LandingPadVec.empty()) 1240 Changed |= instrumentLandingPads(LandingPadVec); 1241 1242 if (AllocasToInstrument.empty() && F.hasPersonalityFn() && 1243 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) { 1244 // __hwasan_personality_thunk is a no-op for functions without an 1245 // instrumented stack, so we can drop it. 1246 F.setPersonalityFn(nullptr); 1247 Changed = true; 1248 } 1249 1250 if (AllocasToInstrument.empty() && OperandsToInstrument.empty() && 1251 IntrinToInstrument.empty()) 1252 return Changed; 1253 1254 assert(!ShadowBase); 1255 1256 Instruction *InsertPt = &*F.getEntryBlock().begin(); 1257 IRBuilder<> EntryIRB(InsertPt); 1258 emitPrologue(EntryIRB, 1259 /*WithFrameRecord*/ ClRecordStackHistory && 1260 !AllocasToInstrument.empty()); 1261 1262 if (!AllocasToInstrument.empty()) { 1263 Value *StackTag = 1264 ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB); 1265 instrumentStack(AllocasToInstrument, AllocaDbgMap, RetVec, StackTag); 1266 } 1267 // Pad and align each of the allocas that we instrumented to stop small 1268 // uninteresting allocas from hiding in instrumented alloca's padding and so 1269 // that we have enough space to store real tags for short granules. 1270 DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap; 1271 for (AllocaInst *AI : AllocasToInstrument) { 1272 uint64_t Size = getAllocaSizeInBytes(*AI); 1273 uint64_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment()); 1274 AI->setAlignment( 1275 Align(std::max(AI->getAlignment(), Mapping.getObjectAlignment()))); 1276 if (Size != AlignedSize) { 1277 Type *AllocatedType = AI->getAllocatedType(); 1278 if (AI->isArrayAllocation()) { 1279 uint64_t ArraySize = 1280 cast<ConstantInt>(AI->getArraySize())->getZExtValue(); 1281 AllocatedType = ArrayType::get(AllocatedType, ArraySize); 1282 } 1283 Type *TypeWithPadding = StructType::get( 1284 AllocatedType, ArrayType::get(Int8Ty, AlignedSize - Size)); 1285 auto *NewAI = new AllocaInst( 1286 TypeWithPadding, AI->getType()->getAddressSpace(), nullptr, "", AI); 1287 NewAI->takeName(AI); 1288 NewAI->setAlignment(AI->getAlign()); 1289 NewAI->setUsedWithInAlloca(AI->isUsedWithInAlloca()); 1290 NewAI->setSwiftError(AI->isSwiftError()); 1291 NewAI->copyMetadata(*AI); 1292 auto *Bitcast = new BitCastInst(NewAI, AI->getType(), "", AI); 1293 AI->replaceAllUsesWith(Bitcast); 1294 AllocaToPaddedAllocaMap[AI] = NewAI; 1295 } 1296 } 1297 1298 if (!AllocaToPaddedAllocaMap.empty()) { 1299 for (auto &BB : F) 1300 for (auto &Inst : BB) 1301 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) 1302 if (auto *AI = 1303 dyn_cast_or_null<AllocaInst>(DVI->getVariableLocation())) 1304 if (auto *NewAI = AllocaToPaddedAllocaMap.lookup(AI)) 1305 DVI->setArgOperand( 1306 0, MetadataAsValue::get(*C, LocalAsMetadata::get(NewAI))); 1307 for (auto &P : AllocaToPaddedAllocaMap) 1308 P.first->eraseFromParent(); 1309 } 1310 1311 // If we split the entry block, move any allocas that were originally in the 1312 // entry block back into the entry block so that they aren't treated as 1313 // dynamic allocas. 1314 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) { 1315 InsertPt = &*F.getEntryBlock().begin(); 1316 for (auto II = EntryIRB.GetInsertBlock()->begin(), 1317 IE = EntryIRB.GetInsertBlock()->end(); 1318 II != IE;) { 1319 Instruction *I = &*II++; 1320 if (auto *AI = dyn_cast<AllocaInst>(I)) 1321 if (isa<ConstantInt>(AI->getArraySize())) 1322 I->moveBefore(InsertPt); 1323 } 1324 } 1325 1326 for (auto &Operand : OperandsToInstrument) 1327 instrumentMemAccess(Operand); 1328 1329 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) { 1330 for (auto Inst : IntrinToInstrument) 1331 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); 1332 } 1333 1334 ShadowBase = nullptr; 1335 StackBaseTag = nullptr; 1336 1337 return true; 1338 } 1339 1340 void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) { 1341 Constant *Initializer = GV->getInitializer(); 1342 uint64_t SizeInBytes = 1343 M.getDataLayout().getTypeAllocSize(Initializer->getType()); 1344 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment()); 1345 if (SizeInBytes != NewSize) { 1346 // Pad the initializer out to the next multiple of 16 bytes and add the 1347 // required short granule tag. 1348 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0); 1349 Init.back() = Tag; 1350 Constant *Padding = ConstantDataArray::get(*C, Init); 1351 Initializer = ConstantStruct::getAnon({Initializer, Padding}); 1352 } 1353 1354 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(), 1355 GlobalValue::ExternalLinkage, Initializer, 1356 GV->getName() + ".hwasan"); 1357 NewGV->copyAttributesFrom(GV); 1358 NewGV->setLinkage(GlobalValue::PrivateLinkage); 1359 NewGV->copyMetadata(GV, 0); 1360 NewGV->setAlignment( 1361 MaybeAlign(std::max(GV->getAlignment(), Mapping.getObjectAlignment()))); 1362 1363 // It is invalid to ICF two globals that have different tags. In the case 1364 // where the size of the global is a multiple of the tag granularity the 1365 // contents of the globals may be the same but the tags (i.e. symbol values) 1366 // may be different, and the symbols are not considered during ICF. In the 1367 // case where the size is not a multiple of the granularity, the short granule 1368 // tags would discriminate two globals with different tags, but there would 1369 // otherwise be nothing stopping such a global from being incorrectly ICF'd 1370 // with an uninstrumented (i.e. tag 0) global that happened to have the short 1371 // granule tag in the last byte. 1372 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None); 1373 1374 // Descriptor format (assuming little-endian): 1375 // bytes 0-3: relative address of global 1376 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case 1377 // it isn't, we create multiple descriptors) 1378 // byte 7: tag 1379 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty); 1380 const uint64_t MaxDescriptorSize = 0xfffff0; 1381 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes; 1382 DescriptorPos += MaxDescriptorSize) { 1383 auto *Descriptor = 1384 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage, 1385 nullptr, GV->getName() + ".hwasan.descriptor"); 1386 auto *GVRelPtr = ConstantExpr::getTrunc( 1387 ConstantExpr::getAdd( 1388 ConstantExpr::getSub( 1389 ConstantExpr::getPtrToInt(NewGV, Int64Ty), 1390 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)), 1391 ConstantInt::get(Int64Ty, DescriptorPos)), 1392 Int32Ty); 1393 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize); 1394 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24)); 1395 Descriptor->setComdat(NewGV->getComdat()); 1396 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag})); 1397 Descriptor->setSection("hwasan_globals"); 1398 Descriptor->setMetadata(LLVMContext::MD_associated, 1399 MDNode::get(*C, ValueAsMetadata::get(NewGV))); 1400 appendToCompilerUsed(M, Descriptor); 1401 } 1402 1403 Constant *Aliasee = ConstantExpr::getIntToPtr( 1404 ConstantExpr::getAdd( 1405 ConstantExpr::getPtrToInt(NewGV, Int64Ty), 1406 ConstantInt::get(Int64Ty, uint64_t(Tag) << kPointerTagShift)), 1407 GV->getType()); 1408 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(), 1409 GV->getLinkage(), "", Aliasee, &M); 1410 Alias->setVisibility(GV->getVisibility()); 1411 Alias->takeName(GV); 1412 GV->replaceAllUsesWith(Alias); 1413 GV->eraseFromParent(); 1414 } 1415 1416 void HWAddressSanitizer::instrumentGlobals() { 1417 std::vector<GlobalVariable *> Globals; 1418 for (GlobalVariable &GV : M.globals()) { 1419 if (GV.isDeclarationForLinker() || GV.getName().startswith("llvm.") || 1420 GV.isThreadLocal()) 1421 continue; 1422 1423 // Common symbols can't have aliases point to them, so they can't be tagged. 1424 if (GV.hasCommonLinkage()) 1425 continue; 1426 1427 // Globals with custom sections may be used in __start_/__stop_ enumeration, 1428 // which would be broken both by adding tags and potentially by the extra 1429 // padding/alignment that we insert. 1430 if (GV.hasSection()) 1431 continue; 1432 1433 Globals.push_back(&GV); 1434 } 1435 1436 MD5 Hasher; 1437 Hasher.update(M.getSourceFileName()); 1438 MD5::MD5Result Hash; 1439 Hasher.final(Hash); 1440 uint8_t Tag = Hash[0]; 1441 1442 for (GlobalVariable *GV : Globals) { 1443 // Skip tag 0 in order to avoid collisions with untagged memory. 1444 if (Tag == 0) 1445 Tag = 1; 1446 instrumentGlobal(GV, Tag++); 1447 } 1448 } 1449 1450 void HWAddressSanitizer::instrumentPersonalityFunctions() { 1451 // We need to untag stack frames as we unwind past them. That is the job of 1452 // the personality function wrapper, which either wraps an existing 1453 // personality function or acts as a personality function on its own. Each 1454 // function that has a personality function or that can be unwound past has 1455 // its personality function changed to a thunk that calls the personality 1456 // function wrapper in the runtime. 1457 MapVector<Constant *, std::vector<Function *>> PersonalityFns; 1458 for (Function &F : M) { 1459 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress)) 1460 continue; 1461 1462 if (F.hasPersonalityFn()) { 1463 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F); 1464 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) { 1465 PersonalityFns[nullptr].push_back(&F); 1466 } 1467 } 1468 1469 if (PersonalityFns.empty()) 1470 return; 1471 1472 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction( 1473 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, 1474 Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy); 1475 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy); 1476 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy); 1477 1478 for (auto &P : PersonalityFns) { 1479 std::string ThunkName = kHwasanPersonalityThunkName; 1480 if (P.first) 1481 ThunkName += ("." + P.first->getName()).str(); 1482 FunctionType *ThunkFnTy = FunctionType::get( 1483 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int8PtrTy}, false); 1484 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) || 1485 cast<GlobalValue>(P.first)->hasLocalLinkage()); 1486 auto *ThunkFn = Function::Create(ThunkFnTy, 1487 IsLocal ? GlobalValue::InternalLinkage 1488 : GlobalValue::LinkOnceODRLinkage, 1489 ThunkName, &M); 1490 if (!IsLocal) { 1491 ThunkFn->setVisibility(GlobalValue::HiddenVisibility); 1492 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName)); 1493 } 1494 1495 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn); 1496 IRBuilder<> IRB(BB); 1497 CallInst *WrapperCall = IRB.CreateCall( 1498 HwasanPersonalityWrapper, 1499 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2), 1500 ThunkFn->getArg(3), ThunkFn->getArg(4), 1501 P.first ? IRB.CreateBitCast(P.first, Int8PtrTy) 1502 : Constant::getNullValue(Int8PtrTy), 1503 IRB.CreateBitCast(UnwindGetGR.getCallee(), Int8PtrTy), 1504 IRB.CreateBitCast(UnwindGetCFA.getCallee(), Int8PtrTy)}); 1505 WrapperCall->setTailCall(); 1506 IRB.CreateRet(WrapperCall); 1507 1508 for (Function *F : P.second) 1509 F->setPersonalityFn(ThunkFn); 1510 } 1511 } 1512 1513 void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple) { 1514 Scale = kDefaultShadowScale; 1515 if (ClMappingOffset.getNumOccurrences() > 0) { 1516 InGlobal = false; 1517 InTls = false; 1518 Offset = ClMappingOffset; 1519 } else if (ClEnableKhwasan || ClInstrumentWithCalls) { 1520 InGlobal = false; 1521 InTls = false; 1522 Offset = 0; 1523 } else if (ClWithIfunc) { 1524 InGlobal = true; 1525 InTls = false; 1526 Offset = kDynamicShadowSentinel; 1527 } else if (ClWithTls) { 1528 InGlobal = false; 1529 InTls = true; 1530 Offset = kDynamicShadowSentinel; 1531 } else { 1532 InGlobal = false; 1533 InTls = false; 1534 Offset = kDynamicShadowSentinel; 1535 } 1536 } 1537