1 //===-- tsan_rtl.cpp ------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer (TSan), a race detector. 10 // 11 // Main file (entry points) for the TSan run-time. 12 //===----------------------------------------------------------------------===// 13 14 #include "tsan_rtl.h" 15 16 #include "sanitizer_common/sanitizer_atomic.h" 17 #include "sanitizer_common/sanitizer_common.h" 18 #include "sanitizer_common/sanitizer_file.h" 19 #include "sanitizer_common/sanitizer_libc.h" 20 #include "sanitizer_common/sanitizer_placement_new.h" 21 #include "sanitizer_common/sanitizer_stackdepot.h" 22 #include "sanitizer_common/sanitizer_symbolizer.h" 23 #include "tsan_defs.h" 24 #include "tsan_interface.h" 25 #include "tsan_mman.h" 26 #include "tsan_platform.h" 27 #include "tsan_suppressions.h" 28 #include "tsan_symbolize.h" 29 #include "ubsan/ubsan_init.h" 30 31 #ifdef __SSE3__ 32 // <emmintrin.h> transitively includes <stdlib.h>, 33 // and it's prohibited to include std headers into tsan runtime. 34 // So we do this dirty trick. 35 #define _MM_MALLOC_H_INCLUDED 36 #define __MM_MALLOC_H 37 #include <emmintrin.h> 38 typedef __m128i m128; 39 #endif 40 41 volatile int __tsan_resumed = 0; 42 43 extern "C" void __tsan_resume() { 44 __tsan_resumed = 1; 45 } 46 47 namespace __tsan { 48 49 #if !SANITIZER_GO && !SANITIZER_MAC 50 __attribute__((tls_model("initial-exec"))) 51 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); 52 #endif 53 static char ctx_placeholder[sizeof(Context)] ALIGNED(64); 54 Context *ctx; 55 56 // Can be overriden by a front-end. 57 #ifdef TSAN_EXTERNAL_HOOKS 58 bool OnFinalize(bool failed); 59 void OnInitialize(); 60 #else 61 #include <dlfcn.h> 62 SANITIZER_WEAK_CXX_DEFAULT_IMPL 63 bool OnFinalize(bool failed) { 64 #if !SANITIZER_GO 65 if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_finalize")) 66 return reinterpret_cast<decltype(&__tsan_on_finalize)>(ptr)(failed); 67 #endif 68 return failed; 69 } 70 SANITIZER_WEAK_CXX_DEFAULT_IMPL 71 void OnInitialize() { 72 #if !SANITIZER_GO 73 if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_initialize")) { 74 return reinterpret_cast<decltype(&__tsan_on_initialize)>(ptr)(); 75 } 76 #endif 77 } 78 #endif 79 80 static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)]; 81 82 static ThreadContextBase *CreateThreadContext(u32 tid) { 83 // Map thread trace when context is created. 84 char name[50]; 85 internal_snprintf(name, sizeof(name), "trace %u", tid); 86 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name); 87 const uptr hdr = GetThreadTraceHeader(tid); 88 internal_snprintf(name, sizeof(name), "trace header %u", tid); 89 MapThreadTrace(hdr, sizeof(Trace), name); 90 new((void*)hdr) Trace(); 91 // We are going to use only a small part of the trace with the default 92 // value of history_size. However, the constructor writes to the whole trace. 93 // Release the unused part. 94 uptr hdr_end = hdr + sizeof(Trace); 95 hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts()); 96 hdr_end = RoundUp(hdr_end, GetPageSizeCached()); 97 if (hdr_end < hdr + sizeof(Trace)) { 98 ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace)); 99 uptr unused = hdr + sizeof(Trace) - hdr_end; 100 if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) { 101 Report("ThreadSanitizer: failed to mprotect(%p, %p)\n", 102 hdr_end, unused); 103 CHECK("unable to mprotect" && 0); 104 } 105 } 106 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); 107 return new(mem) ThreadContext(tid); 108 } 109 110 #if !SANITIZER_GO 111 static const u32 kThreadQuarantineSize = 16; 112 #else 113 static const u32 kThreadQuarantineSize = 64; 114 #endif 115 116 Context::Context() 117 : initialized(), 118 report_mtx(MutexTypeReport), 119 nreported(), 120 nmissed_expected(), 121 thread_registry(new (thread_registry_placeholder) ThreadRegistry( 122 CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)), 123 racy_mtx(MutexTypeRacy), 124 racy_stacks(), 125 racy_addresses(), 126 fired_suppressions_mtx(MutexTypeFired), 127 clock_alloc(LINKER_INITIALIZED, "clock allocator") { 128 fired_suppressions.reserve(8); 129 } 130 131 // The objects are allocated in TLS, so one may rely on zero-initialization. 132 ThreadState::ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch, 133 unsigned reuse_count, uptr stk_addr, uptr stk_size, 134 uptr tls_addr, uptr tls_size) 135 : fast_state(tid, epoch) 136 // Do not touch these, rely on zero initialization, 137 // they may be accessed before the ctor. 138 // , ignore_reads_and_writes() 139 // , ignore_interceptors() 140 , 141 clock(tid, reuse_count) 142 #if !SANITIZER_GO 143 , 144 jmp_bufs() 145 #endif 146 , 147 tid(tid), 148 unique_id(unique_id), 149 stk_addr(stk_addr), 150 stk_size(stk_size), 151 tls_addr(tls_addr), 152 tls_size(tls_size) 153 #if !SANITIZER_GO 154 , 155 last_sleep_clock(tid) 156 #endif 157 { 158 } 159 160 #if !SANITIZER_GO 161 static void MemoryProfiler(Context *ctx, fd_t fd, int i) { 162 uptr n_threads; 163 uptr n_running_threads; 164 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); 165 InternalMmapVector<char> buf(4096); 166 WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads); 167 WriteToFile(fd, buf.data(), internal_strlen(buf.data())); 168 } 169 170 static void *BackgroundThread(void *arg) { 171 // This is a non-initialized non-user thread, nothing to see here. 172 // We don't use ScopedIgnoreInterceptors, because we want ignores to be 173 // enabled even when the thread function exits (e.g. during pthread thread 174 // shutdown code). 175 cur_thread_init(); 176 cur_thread()->ignore_interceptors++; 177 const u64 kMs2Ns = 1000 * 1000; 178 179 fd_t mprof_fd = kInvalidFd; 180 if (flags()->profile_memory && flags()->profile_memory[0]) { 181 if (internal_strcmp(flags()->profile_memory, "stdout") == 0) { 182 mprof_fd = 1; 183 } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) { 184 mprof_fd = 2; 185 } else { 186 InternalScopedString filename; 187 filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid()); 188 fd_t fd = OpenFile(filename.data(), WrOnly); 189 if (fd == kInvalidFd) { 190 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", 191 filename.data()); 192 } else { 193 mprof_fd = fd; 194 } 195 } 196 } 197 198 u64 last_flush = NanoTime(); 199 uptr last_rss = 0; 200 for (int i = 0; 201 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; 202 i++) { 203 SleepForMillis(100); 204 u64 now = NanoTime(); 205 206 // Flush memory if requested. 207 if (flags()->flush_memory_ms > 0) { 208 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { 209 VPrintf(1, "ThreadSanitizer: periodic memory flush\n"); 210 FlushShadowMemory(); 211 last_flush = NanoTime(); 212 } 213 } 214 // GetRSS can be expensive on huge programs, so don't do it every 100ms. 215 if (flags()->memory_limit_mb > 0) { 216 uptr rss = GetRSS(); 217 uptr limit = uptr(flags()->memory_limit_mb) << 20; 218 VPrintf(1, "ThreadSanitizer: memory flush check" 219 " RSS=%llu LAST=%llu LIMIT=%llu\n", 220 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); 221 if (2 * rss > limit + last_rss) { 222 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n"); 223 FlushShadowMemory(); 224 rss = GetRSS(); 225 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); 226 } 227 last_rss = rss; 228 } 229 230 // Write memory profile if requested. 231 if (mprof_fd != kInvalidFd) 232 MemoryProfiler(ctx, mprof_fd, i); 233 234 // Flush symbolizer cache if requested. 235 if (flags()->flush_symbolizer_ms > 0) { 236 u64 last = atomic_load(&ctx->last_symbolize_time_ns, 237 memory_order_relaxed); 238 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { 239 Lock l(&ctx->report_mtx); 240 ScopedErrorReportLock l2; 241 SymbolizeFlush(); 242 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); 243 } 244 } 245 } 246 return nullptr; 247 } 248 249 static void StartBackgroundThread() { 250 ctx->background_thread = internal_start_thread(&BackgroundThread, 0); 251 } 252 253 #ifndef __mips__ 254 static void StopBackgroundThread() { 255 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); 256 internal_join_thread(ctx->background_thread); 257 ctx->background_thread = 0; 258 } 259 #endif 260 #endif 261 262 void DontNeedShadowFor(uptr addr, uptr size) { 263 ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size)); 264 } 265 266 #if !SANITIZER_GO 267 void UnmapShadow(ThreadState *thr, uptr addr, uptr size) { 268 if (size == 0) return; 269 DontNeedShadowFor(addr, size); 270 ScopedGlobalProcessor sgp; 271 ctx->metamap.ResetRange(thr->proc(), addr, size); 272 } 273 #endif 274 275 void MapShadow(uptr addr, uptr size) { 276 // Global data is not 64K aligned, but there are no adjacent mappings, 277 // so we can get away with unaligned mapping. 278 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 279 const uptr kPageSize = GetPageSizeCached(); 280 uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); 281 uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); 282 if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, 283 "shadow")) 284 Die(); 285 286 // Meta shadow is 2:1, so tread carefully. 287 static bool data_mapped = false; 288 static uptr mapped_meta_end = 0; 289 uptr meta_begin = (uptr)MemToMeta(addr); 290 uptr meta_end = (uptr)MemToMeta(addr + size); 291 meta_begin = RoundDownTo(meta_begin, 64 << 10); 292 meta_end = RoundUpTo(meta_end, 64 << 10); 293 if (!data_mapped) { 294 // First call maps data+bss. 295 data_mapped = true; 296 if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, 297 "meta shadow")) 298 Die(); 299 } else { 300 // Mapping continous heap. 301 // Windows wants 64K alignment. 302 meta_begin = RoundDownTo(meta_begin, 64 << 10); 303 meta_end = RoundUpTo(meta_end, 64 << 10); 304 if (meta_end <= mapped_meta_end) 305 return; 306 if (meta_begin < mapped_meta_end) 307 meta_begin = mapped_meta_end; 308 if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, 309 "meta shadow")) 310 Die(); 311 mapped_meta_end = meta_end; 312 } 313 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n", 314 addr, addr+size, meta_begin, meta_end); 315 } 316 317 void MapThreadTrace(uptr addr, uptr size, const char *name) { 318 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); 319 CHECK_GE(addr, TraceMemBeg()); 320 CHECK_LE(addr + size, TraceMemEnd()); 321 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 322 if (!MmapFixedSuperNoReserve(addr, size, name)) { 323 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n", 324 addr, size); 325 Die(); 326 } 327 } 328 329 static void CheckShadowMapping() { 330 uptr beg, end; 331 for (int i = 0; GetUserRegion(i, &beg, &end); i++) { 332 // Skip cases for empty regions (heap definition for architectures that 333 // do not use 64-bit allocator). 334 if (beg == end) 335 continue; 336 VPrintf(3, "checking shadow region %p-%p\n", beg, end); 337 uptr prev = 0; 338 for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) { 339 for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { 340 const uptr p = RoundDown(p0 + x, kShadowCell); 341 if (p < beg || p >= end) 342 continue; 343 const uptr s = MemToShadow(p); 344 const uptr m = (uptr)MemToMeta(p); 345 VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m); 346 CHECK(IsAppMem(p)); 347 CHECK(IsShadowMem(s)); 348 CHECK_EQ(p, ShadowToMem(s)); 349 CHECK(IsMetaMem(m)); 350 if (prev) { 351 // Ensure that shadow and meta mappings are linear within a single 352 // user range. Lots of code that processes memory ranges assumes it. 353 const uptr prev_s = MemToShadow(prev); 354 const uptr prev_m = (uptr)MemToMeta(prev); 355 CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier); 356 CHECK_EQ((m - prev_m) / kMetaShadowSize, 357 (p - prev) / kMetaShadowCell); 358 } 359 prev = p; 360 } 361 } 362 } 363 } 364 365 #if !SANITIZER_GO 366 static void OnStackUnwind(const SignalContext &sig, const void *, 367 BufferedStackTrace *stack) { 368 stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, 369 common_flags()->fast_unwind_on_fatal); 370 } 371 372 static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) { 373 HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr); 374 } 375 #endif 376 377 void CheckUnwind() { 378 // There is high probability that interceptors will check-fail as well, 379 // on the other hand there is no sense in processing interceptors 380 // since we are going to die soon. 381 ScopedIgnoreInterceptors ignore; 382 #if !SANITIZER_GO 383 cur_thread()->ignore_sync++; 384 cur_thread()->ignore_reads_and_writes++; 385 #endif 386 PrintCurrentStackSlow(StackTrace::GetCurrentPc()); 387 } 388 389 void Initialize(ThreadState *thr) { 390 // Thread safe because done before all threads exist. 391 static bool is_initialized = false; 392 if (is_initialized) 393 return; 394 is_initialized = true; 395 // We are not ready to handle interceptors yet. 396 ScopedIgnoreInterceptors ignore; 397 SanitizerToolName = "ThreadSanitizer"; 398 // Install tool-specific callbacks in sanitizer_common. 399 SetCheckUnwindCallback(CheckUnwind); 400 401 ctx = new(ctx_placeholder) Context; 402 const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS"; 403 const char *options = GetEnv(env_name); 404 CacheBinaryName(); 405 CheckASLR(); 406 InitializeFlags(&ctx->flags, options, env_name); 407 AvoidCVE_2016_2143(); 408 __sanitizer::InitializePlatformEarly(); 409 __tsan::InitializePlatformEarly(); 410 411 #if !SANITIZER_GO 412 // Re-exec ourselves if we need to set additional env or command line args. 413 MaybeReexec(); 414 415 InitializeAllocator(); 416 ReplaceSystemMalloc(); 417 #endif 418 if (common_flags()->detect_deadlocks) 419 ctx->dd = DDetector::Create(flags()); 420 Processor *proc = ProcCreate(); 421 ProcWire(proc, thr); 422 InitializeInterceptors(); 423 CheckShadowMapping(); 424 InitializePlatform(); 425 InitializeDynamicAnnotations(); 426 #if !SANITIZER_GO 427 InitializeShadowMemory(); 428 InitializeAllocatorLate(); 429 InstallDeadlySignalHandlers(TsanOnDeadlySignal); 430 #endif 431 // Setup correct file descriptor for error reports. 432 __sanitizer_set_report_path(common_flags()->log_path); 433 InitializeSuppressions(); 434 #if !SANITIZER_GO 435 InitializeLibIgnore(); 436 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); 437 #endif 438 439 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n", 440 (int)internal_getpid()); 441 442 // Initialize thread 0. 443 int tid = ThreadCreate(thr, 0, 0, true); 444 CHECK_EQ(tid, 0); 445 ThreadStart(thr, tid, GetTid(), ThreadType::Regular); 446 #if TSAN_CONTAINS_UBSAN 447 __ubsan::InitAsPlugin(); 448 #endif 449 ctx->initialized = true; 450 451 #if !SANITIZER_GO 452 Symbolizer::LateInitialize(); 453 #endif 454 455 if (flags()->stop_on_start) { 456 Printf("ThreadSanitizer is suspended at startup (pid %d)." 457 " Call __tsan_resume().\n", 458 (int)internal_getpid()); 459 while (__tsan_resumed == 0) {} 460 } 461 462 OnInitialize(); 463 } 464 465 void MaybeSpawnBackgroundThread() { 466 // On MIPS, TSan initialization is run before 467 // __pthread_initialize_minimal_internal() is finished, so we can not spawn 468 // new threads. 469 #if !SANITIZER_GO && !defined(__mips__) 470 static atomic_uint32_t bg_thread = {}; 471 if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && 472 atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { 473 StartBackgroundThread(); 474 SetSandboxingCallback(StopBackgroundThread); 475 } 476 #endif 477 } 478 479 480 int Finalize(ThreadState *thr) { 481 bool failed = false; 482 483 if (common_flags()->print_module_map == 1) 484 DumpProcessMap(); 485 486 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) 487 SleepForMillis(flags()->atexit_sleep_ms); 488 489 // Wait for pending reports. 490 ctx->report_mtx.Lock(); 491 { ScopedErrorReportLock l; } 492 ctx->report_mtx.Unlock(); 493 494 #if !SANITIZER_GO 495 if (Verbosity()) AllocatorPrintStats(); 496 #endif 497 498 ThreadFinalize(thr); 499 500 if (ctx->nreported) { 501 failed = true; 502 #if !SANITIZER_GO 503 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); 504 #else 505 Printf("Found %d data race(s)\n", ctx->nreported); 506 #endif 507 } 508 509 if (ctx->nmissed_expected) { 510 failed = true; 511 Printf("ThreadSanitizer: missed %d expected races\n", 512 ctx->nmissed_expected); 513 } 514 515 if (common_flags()->print_suppressions) 516 PrintMatchedSuppressions(); 517 #if !SANITIZER_GO 518 if (flags()->print_benign) 519 PrintMatchedBenignRaces(); 520 #endif 521 522 failed = OnFinalize(failed); 523 524 return failed ? common_flags()->exitcode : 0; 525 } 526 527 #if !SANITIZER_GO 528 void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { 529 ctx->thread_registry->Lock(); 530 ctx->report_mtx.Lock(); 531 ScopedErrorReportLock::Lock(); 532 // Suppress all reports in the pthread_atfork callbacks. 533 // Reports will deadlock on the report_mtx. 534 // We could ignore sync operations as well, 535 // but so far it's unclear if it will do more good or harm. 536 // Unnecessarily ignoring things can lead to false positives later. 537 thr->suppress_reports++; 538 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and 539 // we'll assert in CheckNoLocks() unless we ignore interceptors. 540 thr->ignore_interceptors++; 541 } 542 543 void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { 544 thr->suppress_reports--; // Enabled in ForkBefore. 545 thr->ignore_interceptors--; 546 ScopedErrorReportLock::Unlock(); 547 ctx->report_mtx.Unlock(); 548 ctx->thread_registry->Unlock(); 549 } 550 551 void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { 552 thr->suppress_reports--; // Enabled in ForkBefore. 553 thr->ignore_interceptors--; 554 ScopedErrorReportLock::Unlock(); 555 ctx->report_mtx.Unlock(); 556 ctx->thread_registry->Unlock(); 557 558 uptr nthread = 0; 559 ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */); 560 VPrintf(1, "ThreadSanitizer: forked new process with pid %d," 561 " parent had %d threads\n", (int)internal_getpid(), (int)nthread); 562 if (nthread == 1) { 563 StartBackgroundThread(); 564 } else { 565 // We've just forked a multi-threaded process. We cannot reasonably function 566 // after that (some mutexes may be locked before fork). So just enable 567 // ignores for everything in the hope that we will exec soon. 568 ctx->after_multithreaded_fork = true; 569 thr->ignore_interceptors++; 570 ThreadIgnoreBegin(thr, pc); 571 ThreadIgnoreSyncBegin(thr, pc); 572 } 573 } 574 #endif 575 576 #if SANITIZER_GO 577 NOINLINE 578 void GrowShadowStack(ThreadState *thr) { 579 const int sz = thr->shadow_stack_end - thr->shadow_stack; 580 const int newsz = 2 * sz; 581 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, 582 newsz * sizeof(uptr)); 583 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); 584 internal_free(thr->shadow_stack); 585 thr->shadow_stack = newstack; 586 thr->shadow_stack_pos = newstack + sz; 587 thr->shadow_stack_end = newstack + newsz; 588 } 589 #endif 590 591 u32 CurrentStackId(ThreadState *thr, uptr pc) { 592 if (!thr->is_inited) // May happen during bootstrap. 593 return 0; 594 if (pc != 0) { 595 #if !SANITIZER_GO 596 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 597 #else 598 if (thr->shadow_stack_pos == thr->shadow_stack_end) 599 GrowShadowStack(thr); 600 #endif 601 thr->shadow_stack_pos[0] = pc; 602 thr->shadow_stack_pos++; 603 } 604 u32 id = StackDepotPut( 605 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); 606 if (pc != 0) 607 thr->shadow_stack_pos--; 608 return id; 609 } 610 611 void TraceSwitch(ThreadState *thr) { 612 #if !SANITIZER_GO 613 if (ctx->after_multithreaded_fork) 614 return; 615 #endif 616 thr->nomalloc++; 617 Trace *thr_trace = ThreadTrace(thr->tid); 618 Lock l(&thr_trace->mtx); 619 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); 620 TraceHeader *hdr = &thr_trace->headers[trace]; 621 hdr->epoch0 = thr->fast_state.epoch(); 622 ObtainCurrentStack(thr, 0, &hdr->stack0); 623 hdr->mset0 = thr->mset; 624 thr->nomalloc--; 625 } 626 627 Trace *ThreadTrace(int tid) { 628 return (Trace*)GetThreadTraceHeader(tid); 629 } 630 631 uptr TraceTopPC(ThreadState *thr) { 632 Event *events = (Event*)GetThreadTrace(thr->tid); 633 uptr pc = events[thr->fast_state.GetTracePos()]; 634 return pc; 635 } 636 637 uptr TraceSize() { 638 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); 639 } 640 641 uptr TraceParts() { 642 return TraceSize() / kTracePartSize; 643 } 644 645 #if !SANITIZER_GO 646 extern "C" void __tsan_trace_switch() { 647 TraceSwitch(cur_thread()); 648 } 649 650 extern "C" void __tsan_report_race() { 651 ReportRace(cur_thread()); 652 } 653 #endif 654 655 ALWAYS_INLINE 656 Shadow LoadShadow(u64 *p) { 657 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); 658 return Shadow(raw); 659 } 660 661 ALWAYS_INLINE 662 void StoreShadow(u64 *sp, u64 s) { 663 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); 664 } 665 666 ALWAYS_INLINE 667 void StoreIfNotYetStored(u64 *sp, u64 *s) { 668 StoreShadow(sp, *s); 669 *s = 0; 670 } 671 672 ALWAYS_INLINE 673 void HandleRace(ThreadState *thr, u64 *shadow_mem, 674 Shadow cur, Shadow old) { 675 thr->racy_state[0] = cur.raw(); 676 thr->racy_state[1] = old.raw(); 677 thr->racy_shadow_addr = shadow_mem; 678 #if !SANITIZER_GO 679 HACKY_CALL(__tsan_report_race); 680 #else 681 ReportRace(thr); 682 #endif 683 } 684 685 static inline bool HappensBefore(Shadow old, ThreadState *thr) { 686 return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); 687 } 688 689 ALWAYS_INLINE 690 void MemoryAccessImpl1(ThreadState *thr, uptr addr, 691 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 692 u64 *shadow_mem, Shadow cur) { 693 694 // This potentially can live in an MMX/SSE scratch register. 695 // The required intrinsics are: 696 // __m128i _mm_move_epi64(__m128i*); 697 // _mm_storel_epi64(u64*, __m128i); 698 u64 store_word = cur.raw(); 699 bool stored = false; 700 701 // scan all the shadow values and dispatch to 4 categories: 702 // same, replace, candidate and race (see comments below). 703 // we consider only 3 cases regarding access sizes: 704 // equal, intersect and not intersect. initially I considered 705 // larger and smaller as well, it allowed to replace some 706 // 'candidates' with 'same' or 'replace', but I think 707 // it's just not worth it (performance- and complexity-wise). 708 709 Shadow old(0); 710 711 // It release mode we manually unroll the loop, 712 // because empirically gcc generates better code this way. 713 // However, we can't afford unrolling in debug mode, because the function 714 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test 715 // threads, which is not enough for the unrolled loop. 716 #if SANITIZER_DEBUG 717 for (int idx = 0; idx < 4; idx++) { 718 #include "tsan_update_shadow_word_inl.h" 719 } 720 #else 721 int idx = 0; 722 #include "tsan_update_shadow_word_inl.h" 723 idx = 1; 724 if (stored) { 725 #include "tsan_update_shadow_word_inl.h" 726 } else { 727 #include "tsan_update_shadow_word_inl.h" 728 } 729 idx = 2; 730 if (stored) { 731 #include "tsan_update_shadow_word_inl.h" 732 } else { 733 #include "tsan_update_shadow_word_inl.h" 734 } 735 idx = 3; 736 if (stored) { 737 #include "tsan_update_shadow_word_inl.h" 738 } else { 739 #include "tsan_update_shadow_word_inl.h" 740 } 741 #endif 742 743 // we did not find any races and had already stored 744 // the current access info, so we are done 745 if (LIKELY(stored)) 746 return; 747 // choose a random candidate slot and replace it 748 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); 749 return; 750 RACE: 751 HandleRace(thr, shadow_mem, cur, old); 752 return; 753 } 754 755 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, 756 int size, bool kAccessIsWrite, bool kIsAtomic) { 757 while (size) { 758 int size1 = 1; 759 int kAccessSizeLog = kSizeLog1; 760 if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) { 761 size1 = 8; 762 kAccessSizeLog = kSizeLog8; 763 } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) { 764 size1 = 4; 765 kAccessSizeLog = kSizeLog4; 766 } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) { 767 size1 = 2; 768 kAccessSizeLog = kSizeLog2; 769 } 770 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); 771 addr += size1; 772 size -= size1; 773 } 774 } 775 776 ALWAYS_INLINE 777 bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 778 Shadow cur(a); 779 for (uptr i = 0; i < kShadowCnt; i++) { 780 Shadow old(LoadShadow(&s[i])); 781 if (Shadow::Addr0AndSizeAreEqual(cur, old) && 782 old.TidWithIgnore() == cur.TidWithIgnore() && 783 old.epoch() > sync_epoch && 784 old.IsAtomic() == cur.IsAtomic() && 785 old.IsRead() <= cur.IsRead()) 786 return true; 787 } 788 return false; 789 } 790 791 #if defined(__SSE3__) 792 #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \ 793 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \ 794 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)) 795 ALWAYS_INLINE 796 bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 797 // This is an optimized version of ContainsSameAccessSlow. 798 // load current access into access[0:63] 799 const m128 access = _mm_cvtsi64_si128(a); 800 // duplicate high part of access in addr0: 801 // addr0[0:31] = access[32:63] 802 // addr0[32:63] = access[32:63] 803 // addr0[64:95] = access[32:63] 804 // addr0[96:127] = access[32:63] 805 const m128 addr0 = SHUF(access, access, 1, 1, 1, 1); 806 // load 4 shadow slots 807 const m128 shadow0 = _mm_load_si128((__m128i*)s); 808 const m128 shadow1 = _mm_load_si128((__m128i*)s + 1); 809 // load high parts of 4 shadow slots into addr_vect: 810 // addr_vect[0:31] = shadow0[32:63] 811 // addr_vect[32:63] = shadow0[96:127] 812 // addr_vect[64:95] = shadow1[32:63] 813 // addr_vect[96:127] = shadow1[96:127] 814 m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3); 815 if (!is_write) { 816 // set IsRead bit in addr_vect 817 const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15); 818 const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0); 819 addr_vect = _mm_or_si128(addr_vect, rw_mask); 820 } 821 // addr0 == addr_vect? 822 const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect); 823 // epoch1[0:63] = sync_epoch 824 const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch); 825 // epoch[0:31] = sync_epoch[0:31] 826 // epoch[32:63] = sync_epoch[0:31] 827 // epoch[64:95] = sync_epoch[0:31] 828 // epoch[96:127] = sync_epoch[0:31] 829 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0); 830 // load low parts of shadow cell epochs into epoch_vect: 831 // epoch_vect[0:31] = shadow0[0:31] 832 // epoch_vect[32:63] = shadow0[64:95] 833 // epoch_vect[64:95] = shadow1[0:31] 834 // epoch_vect[96:127] = shadow1[64:95] 835 const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2); 836 // epoch_vect >= sync_epoch? 837 const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch); 838 // addr_res & epoch_res 839 const m128 res = _mm_and_si128(addr_res, epoch_res); 840 // mask[0] = res[7] 841 // mask[1] = res[15] 842 // ... 843 // mask[15] = res[127] 844 const int mask = _mm_movemask_epi8(res); 845 return mask != 0; 846 } 847 #endif 848 849 ALWAYS_INLINE 850 bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 851 #if defined(__SSE3__) 852 bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write); 853 // NOTE: this check can fail if the shadow is concurrently mutated 854 // by other threads. But it still can be useful if you modify 855 // ContainsSameAccessFast and want to ensure that it's not completely broken. 856 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write)); 857 return res; 858 #else 859 return ContainsSameAccessSlow(s, a, sync_epoch, is_write); 860 #endif 861 } 862 863 ALWAYS_INLINE USED 864 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, 865 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { 866 u64 *shadow_mem = (u64*)MemToShadow(addr); 867 DPrintf2("#%d: MemoryAccess: @%p %p size=%d" 868 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", 869 (int)thr->fast_state.tid(), (void*)pc, (void*)addr, 870 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, 871 (uptr)shadow_mem[0], (uptr)shadow_mem[1], 872 (uptr)shadow_mem[2], (uptr)shadow_mem[3]); 873 #if SANITIZER_DEBUG 874 if (!IsAppMem(addr)) { 875 Printf("Access to non app mem %zx\n", addr); 876 DCHECK(IsAppMem(addr)); 877 } 878 if (!IsShadowMem((uptr)shadow_mem)) { 879 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); 880 DCHECK(IsShadowMem((uptr)shadow_mem)); 881 } 882 #endif 883 884 if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) { 885 // Access to .rodata section, no races here. 886 // Measurements show that it can be 10-20% of all memory accesses. 887 return; 888 } 889 890 FastState fast_state = thr->fast_state; 891 if (UNLIKELY(fast_state.GetIgnoreBit())) { 892 return; 893 } 894 895 Shadow cur(fast_state); 896 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); 897 cur.SetWrite(kAccessIsWrite); 898 cur.SetAtomic(kIsAtomic); 899 900 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), 901 thr->fast_synch_epoch, kAccessIsWrite))) { 902 return; 903 } 904 905 if (kCollectHistory) { 906 fast_state.IncrementEpoch(); 907 thr->fast_state = fast_state; 908 TraceAddEvent(thr, fast_state, EventTypeMop, pc); 909 cur.IncrementEpoch(); 910 } 911 912 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, 913 shadow_mem, cur); 914 } 915 916 // Called by MemoryAccessRange in tsan_rtl_thread.cpp 917 ALWAYS_INLINE USED 918 void MemoryAccessImpl(ThreadState *thr, uptr addr, 919 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 920 u64 *shadow_mem, Shadow cur) { 921 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), 922 thr->fast_synch_epoch, kAccessIsWrite))) { 923 return; 924 } 925 926 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, 927 shadow_mem, cur); 928 } 929 930 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, 931 u64 val) { 932 (void)thr; 933 (void)pc; 934 if (size == 0) 935 return; 936 // FIXME: fix me. 937 uptr offset = addr % kShadowCell; 938 if (offset) { 939 offset = kShadowCell - offset; 940 if (size <= offset) 941 return; 942 addr += offset; 943 size -= offset; 944 } 945 DCHECK_EQ(addr % 8, 0); 946 // If a user passes some insane arguments (memset(0)), 947 // let it just crash as usual. 948 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) 949 return; 950 // Don't want to touch lots of shadow memory. 951 // If a program maps 10MB stack, there is no need reset the whole range. 952 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); 953 // UnmapOrDie/MmapFixedNoReserve does not work on Windows. 954 if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) { 955 u64 *p = (u64*)MemToShadow(addr); 956 CHECK(IsShadowMem((uptr)p)); 957 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); 958 // FIXME: may overwrite a part outside the region 959 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { 960 p[i++] = val; 961 for (uptr j = 1; j < kShadowCnt; j++) 962 p[i++] = 0; 963 } 964 } else { 965 // The region is big, reset only beginning and end. 966 const uptr kPageSize = GetPageSizeCached(); 967 u64 *begin = (u64*)MemToShadow(addr); 968 u64 *end = begin + size / kShadowCell * kShadowCnt; 969 u64 *p = begin; 970 // Set at least first kPageSize/2 to page boundary. 971 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { 972 *p++ = val; 973 for (uptr j = 1; j < kShadowCnt; j++) 974 *p++ = 0; 975 } 976 // Reset middle part. 977 u64 *p1 = p; 978 p = RoundDown(end, kPageSize); 979 if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1)) 980 Die(); 981 // Set the ending. 982 while (p < end) { 983 *p++ = val; 984 for (uptr j = 1; j < kShadowCnt; j++) 985 *p++ = 0; 986 } 987 } 988 } 989 990 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { 991 MemoryRangeSet(thr, pc, addr, size, 0); 992 } 993 994 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { 995 // Processing more than 1k (4k of shadow) is expensive, 996 // can cause excessive memory consumption (user does not necessary touch 997 // the whole range) and most likely unnecessary. 998 if (size > 1024) 999 size = 1024; 1000 CHECK_EQ(thr->is_freeing, false); 1001 thr->is_freeing = true; 1002 MemoryAccessRange(thr, pc, addr, size, true); 1003 thr->is_freeing = false; 1004 if (kCollectHistory) { 1005 thr->fast_state.IncrementEpoch(); 1006 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); 1007 } 1008 Shadow s(thr->fast_state); 1009 s.ClearIgnoreBit(); 1010 s.MarkAsFreed(); 1011 s.SetWrite(true); 1012 s.SetAddr0AndSizeLog(0, 3); 1013 MemoryRangeSet(thr, pc, addr, size, s.raw()); 1014 } 1015 1016 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { 1017 if (kCollectHistory) { 1018 thr->fast_state.IncrementEpoch(); 1019 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); 1020 } 1021 Shadow s(thr->fast_state); 1022 s.ClearIgnoreBit(); 1023 s.SetWrite(true); 1024 s.SetAddr0AndSizeLog(0, 3); 1025 MemoryRangeSet(thr, pc, addr, size, s.raw()); 1026 } 1027 1028 void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr, 1029 uptr size) { 1030 if (thr->ignore_reads_and_writes == 0) 1031 MemoryRangeImitateWrite(thr, pc, addr, size); 1032 else 1033 MemoryResetRange(thr, pc, addr, size); 1034 } 1035 1036 ALWAYS_INLINE USED 1037 void FuncEntry(ThreadState *thr, uptr pc) { 1038 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); 1039 if (kCollectHistory) { 1040 thr->fast_state.IncrementEpoch(); 1041 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); 1042 } 1043 1044 // Shadow stack maintenance can be replaced with 1045 // stack unwinding during trace switch (which presumably must be faster). 1046 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack); 1047 #if !SANITIZER_GO 1048 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 1049 #else 1050 if (thr->shadow_stack_pos == thr->shadow_stack_end) 1051 GrowShadowStack(thr); 1052 #endif 1053 thr->shadow_stack_pos[0] = pc; 1054 thr->shadow_stack_pos++; 1055 } 1056 1057 ALWAYS_INLINE USED 1058 void FuncExit(ThreadState *thr) { 1059 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); 1060 if (kCollectHistory) { 1061 thr->fast_state.IncrementEpoch(); 1062 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); 1063 } 1064 1065 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack); 1066 #if !SANITIZER_GO 1067 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 1068 #endif 1069 thr->shadow_stack_pos--; 1070 } 1071 1072 void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) { 1073 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); 1074 thr->ignore_reads_and_writes++; 1075 CHECK_GT(thr->ignore_reads_and_writes, 0); 1076 thr->fast_state.SetIgnoreBit(); 1077 #if !SANITIZER_GO 1078 if (save_stack && !ctx->after_multithreaded_fork) 1079 thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); 1080 #endif 1081 } 1082 1083 void ThreadIgnoreEnd(ThreadState *thr, uptr pc) { 1084 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); 1085 CHECK_GT(thr->ignore_reads_and_writes, 0); 1086 thr->ignore_reads_and_writes--; 1087 if (thr->ignore_reads_and_writes == 0) { 1088 thr->fast_state.ClearIgnoreBit(); 1089 #if !SANITIZER_GO 1090 thr->mop_ignore_set.Reset(); 1091 #endif 1092 } 1093 } 1094 1095 #if !SANITIZER_GO 1096 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 1097 uptr __tsan_testonly_shadow_stack_current_size() { 1098 ThreadState *thr = cur_thread(); 1099 return thr->shadow_stack_pos - thr->shadow_stack; 1100 } 1101 #endif 1102 1103 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) { 1104 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); 1105 thr->ignore_sync++; 1106 CHECK_GT(thr->ignore_sync, 0); 1107 #if !SANITIZER_GO 1108 if (save_stack && !ctx->after_multithreaded_fork) 1109 thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); 1110 #endif 1111 } 1112 1113 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) { 1114 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); 1115 CHECK_GT(thr->ignore_sync, 0); 1116 thr->ignore_sync--; 1117 #if !SANITIZER_GO 1118 if (thr->ignore_sync == 0) 1119 thr->sync_ignore_set.Reset(); 1120 #endif 1121 } 1122 1123 bool MD5Hash::operator==(const MD5Hash &other) const { 1124 return hash[0] == other.hash[0] && hash[1] == other.hash[1]; 1125 } 1126 1127 #if SANITIZER_DEBUG 1128 void build_consistency_debug() {} 1129 #else 1130 void build_consistency_release() {} 1131 #endif 1132 1133 } // namespace __tsan 1134 1135 #if SANITIZER_CHECK_DEADLOCKS 1136 namespace __sanitizer { 1137 using namespace __tsan; 1138 MutexMeta mutex_meta[] = { 1139 {MutexInvalid, "Invalid", {}}, 1140 {MutexThreadRegistry, "ThreadRegistry", {}}, 1141 {MutexTypeTrace, "Trace", {MutexLeaf}}, 1142 {MutexTypeReport, "Report", {MutexTypeSyncVar}}, 1143 {MutexTypeSyncVar, "SyncVar", {}}, 1144 {MutexTypeAnnotations, "Annotations", {}}, 1145 {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}}, 1146 {MutexTypeFired, "Fired", {MutexLeaf}}, 1147 {MutexTypeRacy, "Racy", {MutexLeaf}}, 1148 {MutexTypeGlobalProc, "GlobalProc", {}}, 1149 {}, 1150 }; 1151 1152 void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); } 1153 } // namespace __sanitizer 1154 #endif 1155 1156 #if !SANITIZER_GO 1157 // Must be included in this file to make sure everything is inlined. 1158 # include "tsan_interface_inl.h" 1159 #endif 1160