1 //===-- dfsan.cpp ---------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of DataFlowSanitizer. 10 // 11 // DataFlowSanitizer runtime. This file defines the public interface to 12 // DataFlowSanitizer as well as the definition of certain runtime functions 13 // called automatically by the compiler (specifically the instrumentation pass 14 // in llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp). 15 // 16 // The public interface is defined in include/sanitizer/dfsan_interface.h whose 17 // functions are prefixed dfsan_ while the compiler interface functions are 18 // prefixed __dfsan_. 19 //===----------------------------------------------------------------------===// 20 21 #include "dfsan/dfsan.h" 22 23 #include "dfsan/dfsan_chained_origin_depot.h" 24 #include "dfsan/dfsan_flags.h" 25 #include "dfsan/dfsan_origin.h" 26 #include "dfsan/dfsan_thread.h" 27 #include "sanitizer_common/sanitizer_atomic.h" 28 #include "sanitizer_common/sanitizer_common.h" 29 #include "sanitizer_common/sanitizer_file.h" 30 #include "sanitizer_common/sanitizer_flag_parser.h" 31 #include "sanitizer_common/sanitizer_flags.h" 32 #include "sanitizer_common/sanitizer_internal_defs.h" 33 #include "sanitizer_common/sanitizer_libc.h" 34 #include "sanitizer_common/sanitizer_report_decorator.h" 35 #include "sanitizer_common/sanitizer_stacktrace.h" 36 37 using namespace __dfsan; 38 39 Flags __dfsan::flags_data; 40 41 // The size of TLS variables. These constants must be kept in sync with the ones 42 // in DataFlowSanitizer.cpp. 43 static const int kDFsanArgTlsSize = 800; 44 static const int kDFsanRetvalTlsSize = 800; 45 static const int kDFsanArgOriginTlsSize = 800; 46 47 SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u64 48 __dfsan_retval_tls[kDFsanRetvalTlsSize / sizeof(u64)]; 49 SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u32 __dfsan_retval_origin_tls; 50 SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u64 51 __dfsan_arg_tls[kDFsanArgTlsSize / sizeof(u64)]; 52 SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u32 53 __dfsan_arg_origin_tls[kDFsanArgOriginTlsSize / sizeof(u32)]; 54 55 // Instrumented code may set this value in terms of -dfsan-track-origins. 56 // * undefined or 0: do not track origins. 57 // * 1: track origins at memory store operations. 58 // * 2: track origins at memory load and store operations. 59 // TODO: track callsites. 60 extern "C" SANITIZER_WEAK_ATTRIBUTE const int __dfsan_track_origins; 61 62 extern "C" SANITIZER_INTERFACE_ATTRIBUTE int dfsan_get_track_origins() { 63 return &__dfsan_track_origins ? __dfsan_track_origins : 0; 64 } 65 66 // On Linux/x86_64, memory is laid out as follows: 67 // 68 // +--------------------+ 0x800000000000 (top of memory) 69 // | application 3 | 70 // +--------------------+ 0x700000000000 71 // | invalid | 72 // +--------------------+ 0x610000000000 73 // | origin 1 | 74 // +--------------------+ 0x600000000000 75 // | application 2 | 76 // +--------------------+ 0x510000000000 77 // | shadow 1 | 78 // +--------------------+ 0x500000000000 79 // | invalid | 80 // +--------------------+ 0x400000000000 81 // | origin 3 | 82 // +--------------------+ 0x300000000000 83 // | shadow 3 | 84 // +--------------------+ 0x200000000000 85 // | origin 2 | 86 // +--------------------+ 0x110000000000 87 // | invalid | 88 // +--------------------+ 0x100000000000 89 // | shadow 2 | 90 // +--------------------+ 0x010000000000 91 // | application 1 | 92 // +--------------------+ 0x000000000000 93 // 94 // MEM_TO_SHADOW(mem) = mem ^ 0x500000000000 95 // SHADOW_TO_ORIGIN(shadow) = shadow + 0x100000000000 96 97 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 98 dfsan_label __dfsan_union_load(const dfsan_label *ls, uptr n) { 99 dfsan_label label = ls[0]; 100 for (uptr i = 1; i != n; ++i) 101 label |= ls[i]; 102 return label; 103 } 104 105 // Return the union of all the n labels from addr at the high 32 bit, and the 106 // origin of the first taint byte at the low 32 bit. 107 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u64 108 __dfsan_load_label_and_origin(const void *addr, uptr n) { 109 dfsan_label label = 0; 110 u64 ret = 0; 111 uptr p = (uptr)addr; 112 dfsan_label *s = shadow_for((void *)p); 113 for (uptr i = 0; i < n; ++i) { 114 dfsan_label l = s[i]; 115 if (!l) 116 continue; 117 label |= l; 118 if (!ret) 119 ret = *(dfsan_origin *)origin_for((void *)(p + i)); 120 } 121 return ret | (u64)label << 32; 122 } 123 124 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 125 void __dfsan_unimplemented(char *fname) { 126 if (flags().warn_unimplemented) 127 Report("WARNING: DataFlowSanitizer: call to uninstrumented function %s\n", 128 fname); 129 } 130 131 // Use '-mllvm -dfsan-debug-nonzero-labels' and break on this function 132 // to try to figure out where labels are being introduced in a nominally 133 // label-free program. 134 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_nonzero_label() { 135 if (flags().warn_nonzero_labels) 136 Report("WARNING: DataFlowSanitizer: saw nonzero label\n"); 137 } 138 139 // Indirect call to an uninstrumented vararg function. We don't have a way of 140 // handling these at the moment. 141 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void 142 __dfsan_vararg_wrapper(const char *fname) { 143 Report("FATAL: DataFlowSanitizer: unsupported indirect call to vararg " 144 "function %s\n", fname); 145 Die(); 146 } 147 148 // Resolves the union of two labels. 149 SANITIZER_INTERFACE_ATTRIBUTE dfsan_label 150 dfsan_union(dfsan_label l1, dfsan_label l2) { 151 return l1 | l2; 152 } 153 154 static const uptr kOriginAlign = sizeof(dfsan_origin); 155 static const uptr kOriginAlignMask = ~(kOriginAlign - 1UL); 156 157 static uptr OriginAlignUp(uptr u) { 158 return (u + kOriginAlign - 1) & kOriginAlignMask; 159 } 160 161 static uptr OriginAlignDown(uptr u) { return u & kOriginAlignMask; } 162 163 // Return the origin of the first taint byte in the size bytes from the address 164 // addr. 165 static dfsan_origin GetOriginIfTainted(uptr addr, uptr size) { 166 for (uptr i = 0; i < size; ++i, ++addr) { 167 dfsan_label *s = shadow_for((void *)addr); 168 169 if (*s) { 170 // Validate address region. 171 CHECK(MEM_IS_SHADOW(s)); 172 return *(dfsan_origin *)origin_for((void *)addr); 173 } 174 } 175 return 0; 176 } 177 178 // For platforms which support slow unwinder only, we need to restrict the store 179 // context size to 1, basically only storing the current pc, because the slow 180 // unwinder which is based on libunwind is not async signal safe and causes 181 // random freezes in forking applications as well as in signal handlers. 182 // DFSan supports only Linux. So we do not restrict the store context size. 183 #define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \ 184 BufferedStackTrace stack; \ 185 stack.Unwind(pc, bp, nullptr, true, flags().store_context_size); 186 187 #define PRINT_CALLER_STACK_TRACE \ 188 { \ 189 GET_CALLER_PC_BP_SP; \ 190 (void)sp; \ 191 GET_STORE_STACK_TRACE_PC_BP(pc, bp) \ 192 stack.Print(); \ 193 } 194 195 // Return a chain with the previous ID id and the current stack. 196 // from_init = true if this is the first chain of an origin tracking path. 197 static u32 ChainOrigin(u32 id, StackTrace *stack, bool from_init = false) { 198 // StackDepot is not async signal safe. Do not create new chains in a signal 199 // handler. 200 DFsanThread *t = GetCurrentThread(); 201 if (t && t->InSignalHandler()) 202 return id; 203 204 // As an optimization the origin of an application byte is updated only when 205 // its shadow is non-zero. Because we are only interested in the origins of 206 // taint labels, it does not matter what origin a zero label has. This reduces 207 // memory write cost. MSan does similar optimization. The following invariant 208 // may not hold because of some bugs. We check the invariant to help debug. 209 if (!from_init && id == 0 && flags().check_origin_invariant) { 210 Printf(" DFSan found invalid origin invariant\n"); 211 PRINT_CALLER_STACK_TRACE 212 } 213 214 Origin o = Origin::FromRawId(id); 215 stack->tag = StackTrace::TAG_UNKNOWN; 216 Origin chained = Origin::CreateChainedOrigin(o, stack); 217 return chained.raw_id(); 218 } 219 220 static void ChainAndWriteOriginIfTainted(uptr src, uptr size, uptr dst, 221 StackTrace *stack) { 222 dfsan_origin o = GetOriginIfTainted(src, size); 223 if (o) { 224 o = ChainOrigin(o, stack); 225 *(dfsan_origin *)origin_for((void *)dst) = o; 226 } 227 } 228 229 // Copy the origins of the size bytes from src to dst. The source and target 230 // memory ranges cannot be overlapped. This is used by memcpy. stack records the 231 // stack trace of the memcpy. When dst and src are not 4-byte aligned properly, 232 // origins at the unaligned address boundaries may be overwritten because four 233 // contiguous bytes share the same origin. 234 static void CopyOrigin(const void *dst, const void *src, uptr size, 235 StackTrace *stack) { 236 uptr d = (uptr)dst; 237 uptr beg = OriginAlignDown(d); 238 // Copy left unaligned origin if that memory is tainted. 239 if (beg < d) { 240 ChainAndWriteOriginIfTainted((uptr)src, beg + kOriginAlign - d, beg, stack); 241 beg += kOriginAlign; 242 } 243 244 uptr end = OriginAlignDown(d + size); 245 // If both ends fall into the same 4-byte slot, we are done. 246 if (end < beg) 247 return; 248 249 // Copy right unaligned origin if that memory is tainted. 250 if (end < d + size) 251 ChainAndWriteOriginIfTainted((uptr)src + (end - d), (d + size) - end, end, 252 stack); 253 254 if (beg >= end) 255 return; 256 257 // Align src up. 258 uptr src_a = OriginAlignUp((uptr)src); 259 dfsan_origin *src_o = origin_for((void *)src_a); 260 u32 *src_s = (u32 *)shadow_for((void *)src_a); 261 dfsan_origin *src_end = origin_for((void *)(src_a + (end - beg))); 262 dfsan_origin *dst_o = origin_for((void *)beg); 263 dfsan_origin last_src_o = 0; 264 dfsan_origin last_dst_o = 0; 265 for (; src_o < src_end; ++src_o, ++src_s, ++dst_o) { 266 if (!*src_s) 267 continue; 268 if (*src_o != last_src_o) { 269 last_src_o = *src_o; 270 last_dst_o = ChainOrigin(last_src_o, stack); 271 } 272 *dst_o = last_dst_o; 273 } 274 } 275 276 // Copy the origins of the size bytes from src to dst. The source and target 277 // memory ranges may be overlapped. So the copy is done in a reverse order. 278 // This is used by memmove. stack records the stack trace of the memmove. 279 static void ReverseCopyOrigin(const void *dst, const void *src, uptr size, 280 StackTrace *stack) { 281 uptr d = (uptr)dst; 282 uptr end = OriginAlignDown(d + size); 283 284 // Copy right unaligned origin if that memory is tainted. 285 if (end < d + size) 286 ChainAndWriteOriginIfTainted((uptr)src + (end - d), (d + size) - end, end, 287 stack); 288 289 uptr beg = OriginAlignDown(d); 290 291 if (beg + kOriginAlign < end) { 292 // Align src up. 293 uptr src_a = OriginAlignUp((uptr)src); 294 void *src_end = (void *)(src_a + end - beg - kOriginAlign); 295 dfsan_origin *src_end_o = origin_for(src_end); 296 u32 *src_end_s = (u32 *)shadow_for(src_end); 297 dfsan_origin *src_begin_o = origin_for((void *)src_a); 298 dfsan_origin *dst = origin_for((void *)(end - kOriginAlign)); 299 dfsan_origin last_src_o = 0; 300 dfsan_origin last_dst_o = 0; 301 for (; src_end_o >= src_begin_o; --src_end_o, --src_end_s, --dst) { 302 if (!*src_end_s) 303 continue; 304 if (*src_end_o != last_src_o) { 305 last_src_o = *src_end_o; 306 last_dst_o = ChainOrigin(last_src_o, stack); 307 } 308 *dst = last_dst_o; 309 } 310 } 311 312 // Copy left unaligned origin if that memory is tainted. 313 if (beg < d) 314 ChainAndWriteOriginIfTainted((uptr)src, beg + kOriginAlign - d, beg, stack); 315 } 316 317 // Copy or move the origins of the len bytes from src to dst. The source and 318 // target memory ranges may or may not be overlapped. This is used by memory 319 // transfer operations. stack records the stack trace of the memory transfer 320 // operation. 321 static void MoveOrigin(const void *dst, const void *src, uptr size, 322 StackTrace *stack) { 323 // Validate address regions. 324 if (!MEM_IS_SHADOW(shadow_for(dst)) || 325 !MEM_IS_SHADOW(shadow_for((void *)((uptr)dst + size))) || 326 !MEM_IS_SHADOW(shadow_for(src)) || 327 !MEM_IS_SHADOW(shadow_for((void *)((uptr)src + size)))) { 328 CHECK(false); 329 return; 330 } 331 // If destination origin range overlaps with source origin range, move 332 // origins by copying origins in a reverse order; otherwise, copy origins in 333 // a normal order. The orders of origin transfer are consistent with the 334 // orders of how memcpy and memmove transfer user data. 335 uptr src_aligned_beg = reinterpret_cast<uptr>(src) & ~3UL; 336 uptr src_aligned_end = (reinterpret_cast<uptr>(src) + size) & ~3UL; 337 uptr dst_aligned_beg = reinterpret_cast<uptr>(dst) & ~3UL; 338 if (dst_aligned_beg < src_aligned_end && dst_aligned_beg >= src_aligned_beg) 339 return ReverseCopyOrigin(dst, src, size, stack); 340 return CopyOrigin(dst, src, size, stack); 341 } 342 343 // Set the size bytes from the addres dst to be the origin value. 344 static void SetOrigin(const void *dst, uptr size, u32 origin) { 345 if (size == 0) 346 return; 347 348 // Origin mapping is 4 bytes per 4 bytes of application memory. 349 // Here we extend the range such that its left and right bounds are both 350 // 4 byte aligned. 351 uptr x = unaligned_origin_for((uptr)dst); 352 uptr beg = OriginAlignDown(x); 353 uptr end = OriginAlignUp(x + size); // align up. 354 u64 origin64 = ((u64)origin << 32) | origin; 355 // This is like memset, but the value is 32-bit. We unroll by 2 to write 356 // 64 bits at once. May want to unroll further to get 128-bit stores. 357 if (beg & 7ULL) { 358 if (*(u32 *)beg != origin) 359 *(u32 *)beg = origin; 360 beg += 4; 361 } 362 for (uptr addr = beg; addr < (end & ~7UL); addr += 8) { 363 if (*(u64 *)addr == origin64) 364 continue; 365 *(u64 *)addr = origin64; 366 } 367 if (end & 7ULL) 368 if (*(u32 *)(end - kOriginAlign) != origin) 369 *(u32 *)(end - kOriginAlign) = origin; 370 } 371 372 #define RET_CHAIN_ORIGIN(id) \ 373 GET_CALLER_PC_BP_SP; \ 374 (void)sp; \ 375 GET_STORE_STACK_TRACE_PC_BP(pc, bp); \ 376 return ChainOrigin(id, &stack); 377 378 // Return a new origin chain with the previous ID id and the current stack 379 // trace. 380 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin 381 __dfsan_chain_origin(dfsan_origin id) { 382 RET_CHAIN_ORIGIN(id) 383 } 384 385 // Return a new origin chain with the previous ID id and the current stack 386 // trace if the label is tainted. 387 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin 388 __dfsan_chain_origin_if_tainted(dfsan_label label, dfsan_origin id) { 389 if (!label) 390 return id; 391 RET_CHAIN_ORIGIN(id) 392 } 393 394 // Copy or move the origins of the len bytes from src to dst. 395 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_mem_origin_transfer( 396 const void *dst, const void *src, uptr len) { 397 if (src == dst) 398 return; 399 GET_CALLER_PC_BP; 400 GET_STORE_STACK_TRACE_PC_BP(pc, bp); 401 MoveOrigin(dst, src, len, &stack); 402 } 403 404 SANITIZER_INTERFACE_ATTRIBUTE void dfsan_mem_origin_transfer(const void *dst, 405 const void *src, 406 uptr len) { 407 __dfsan_mem_origin_transfer(dst, src, len); 408 } 409 410 namespace __dfsan { 411 412 bool dfsan_inited = false; 413 bool dfsan_init_is_running = false; 414 415 void dfsan_copy_memory(void *dst, const void *src, uptr size) { 416 internal_memcpy(dst, src, size); 417 internal_memcpy((void *)shadow_for(dst), (const void *)shadow_for(src), 418 size * sizeof(dfsan_label)); 419 if (dfsan_get_track_origins()) 420 dfsan_mem_origin_transfer(dst, src, size); 421 } 422 423 // Releases the pages within the origin address range. 424 static void ReleaseOrigins(void *addr, uptr size) { 425 const uptr beg_origin_addr = (uptr)__dfsan::origin_for(addr); 426 const void *end_addr = (void *)((uptr)addr + size); 427 const uptr end_origin_addr = (uptr)__dfsan::origin_for(end_addr); 428 429 if (end_origin_addr - beg_origin_addr < 430 common_flags()->clear_shadow_mmap_threshold) 431 return; 432 433 const uptr page_size = GetPageSizeCached(); 434 const uptr beg_aligned = RoundUpTo(beg_origin_addr, page_size); 435 const uptr end_aligned = RoundDownTo(end_origin_addr, page_size); 436 437 if (!MmapFixedSuperNoReserve(beg_aligned, end_aligned - beg_aligned)) 438 Die(); 439 } 440 441 static void WriteZeroShadowInRange(uptr beg, uptr end) { 442 // Don't write the label if it is already the value we need it to be. 443 // In a program where most addresses are not labeled, it is common that 444 // a page of shadow memory is entirely zeroed. The Linux copy-on-write 445 // implementation will share all of the zeroed pages, making a copy of a 446 // page when any value is written. The un-sharing will happen even if 447 // the value written does not change the value in memory. Avoiding the 448 // write when both |label| and |*labelp| are zero dramatically reduces 449 // the amount of real memory used by large programs. 450 if (!mem_is_zero((const char *)beg, end - beg)) 451 internal_memset((void *)beg, 0, end - beg); 452 } 453 454 // Releases the pages within the shadow address range, and sets 455 // the shadow addresses not on the pages to be 0. 456 static void ReleaseOrClearShadows(void *addr, uptr size) { 457 const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr); 458 const void *end_addr = (void *)((uptr)addr + size); 459 const uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr); 460 461 if (end_shadow_addr - beg_shadow_addr < 462 common_flags()->clear_shadow_mmap_threshold) { 463 WriteZeroShadowInRange(beg_shadow_addr, end_shadow_addr); 464 return; 465 } 466 467 const uptr page_size = GetPageSizeCached(); 468 const uptr beg_aligned = RoundUpTo(beg_shadow_addr, page_size); 469 const uptr end_aligned = RoundDownTo(end_shadow_addr, page_size); 470 471 if (beg_aligned >= end_aligned) { 472 WriteZeroShadowInRange(beg_shadow_addr, end_shadow_addr); 473 } else { 474 if (beg_aligned != beg_shadow_addr) 475 WriteZeroShadowInRange(beg_shadow_addr, beg_aligned); 476 if (end_aligned != end_shadow_addr) 477 WriteZeroShadowInRange(end_aligned, end_shadow_addr); 478 if (!MmapFixedSuperNoReserve(beg_aligned, end_aligned - beg_aligned)) 479 Die(); 480 } 481 } 482 483 void SetShadow(dfsan_label label, void *addr, uptr size, dfsan_origin origin) { 484 if (0 != label) { 485 const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr); 486 internal_memset((void *)beg_shadow_addr, label, size); 487 if (dfsan_get_track_origins()) 488 SetOrigin(addr, size, origin); 489 return; 490 } 491 492 if (dfsan_get_track_origins()) 493 ReleaseOrigins(addr, size); 494 495 ReleaseOrClearShadows(addr, size); 496 } 497 498 } // namespace __dfsan 499 500 // If the label s is tainted, set the size bytes from the address p to be a new 501 // origin chain with the previous ID o and the current stack trace. This is 502 // used by instrumentation to reduce code size when too much code is inserted. 503 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_maybe_store_origin( 504 dfsan_label s, void *p, uptr size, dfsan_origin o) { 505 if (UNLIKELY(s)) { 506 GET_CALLER_PC_BP_SP; 507 (void)sp; 508 GET_STORE_STACK_TRACE_PC_BP(pc, bp); 509 SetOrigin(p, size, ChainOrigin(o, &stack)); 510 } 511 } 512 513 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_set_label( 514 dfsan_label label, dfsan_origin origin, void *addr, uptr size) { 515 __dfsan::SetShadow(label, addr, size, origin); 516 } 517 518 SANITIZER_INTERFACE_ATTRIBUTE 519 void dfsan_set_label(dfsan_label label, void *addr, uptr size) { 520 dfsan_origin init_origin = 0; 521 if (label && dfsan_get_track_origins()) { 522 GET_CALLER_PC_BP; 523 GET_STORE_STACK_TRACE_PC_BP(pc, bp); 524 init_origin = ChainOrigin(0, &stack, true); 525 } 526 __dfsan::SetShadow(label, addr, size, init_origin); 527 } 528 529 SANITIZER_INTERFACE_ATTRIBUTE 530 void dfsan_add_label(dfsan_label label, void *addr, uptr size) { 531 if (0 == label) 532 return; 533 534 if (dfsan_get_track_origins()) { 535 GET_CALLER_PC_BP; 536 GET_STORE_STACK_TRACE_PC_BP(pc, bp); 537 dfsan_origin init_origin = ChainOrigin(0, &stack, true); 538 SetOrigin(addr, size, init_origin); 539 } 540 541 for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp) 542 *labelp |= label; 543 } 544 545 // Unlike the other dfsan interface functions the behavior of this function 546 // depends on the label of one of its arguments. Hence it is implemented as a 547 // custom function. 548 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label 549 __dfsw_dfsan_get_label(long data, dfsan_label data_label, 550 dfsan_label *ret_label) { 551 *ret_label = 0; 552 return data_label; 553 } 554 555 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label __dfso_dfsan_get_label( 556 long data, dfsan_label data_label, dfsan_label *ret_label, 557 dfsan_origin data_origin, dfsan_origin *ret_origin) { 558 *ret_label = 0; 559 *ret_origin = 0; 560 return data_label; 561 } 562 563 // This function is used if dfsan_get_origin is called when origin tracking is 564 // off. 565 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin __dfsw_dfsan_get_origin( 566 long data, dfsan_label data_label, dfsan_label *ret_label) { 567 *ret_label = 0; 568 return 0; 569 } 570 571 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin __dfso_dfsan_get_origin( 572 long data, dfsan_label data_label, dfsan_label *ret_label, 573 dfsan_origin data_origin, dfsan_origin *ret_origin) { 574 *ret_label = 0; 575 *ret_origin = 0; 576 return data_origin; 577 } 578 579 SANITIZER_INTERFACE_ATTRIBUTE dfsan_label 580 dfsan_read_label(const void *addr, uptr size) { 581 if (size == 0) 582 return 0; 583 return __dfsan_union_load(shadow_for(addr), size); 584 } 585 586 SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin 587 dfsan_read_origin_of_first_taint(const void *addr, uptr size) { 588 return GetOriginIfTainted((uptr)addr, size); 589 } 590 591 SANITIZER_INTERFACE_ATTRIBUTE void dfsan_set_label_origin(dfsan_label label, 592 dfsan_origin origin, 593 void *addr, 594 uptr size) { 595 __dfsan_set_label(label, origin, addr, size); 596 } 597 598 extern "C" SANITIZER_INTERFACE_ATTRIBUTE int 599 dfsan_has_label(dfsan_label label, dfsan_label elem) { 600 return (label & elem) == elem; 601 } 602 603 class Decorator : public __sanitizer::SanitizerCommonDecorator { 604 public: 605 Decorator() : SanitizerCommonDecorator() {} 606 const char *Origin() const { return Magenta(); } 607 }; 608 609 namespace { 610 611 void PrintNoOriginTrackingWarning() { 612 Decorator d; 613 Printf( 614 " %sDFSan: origin tracking is not enabled. Did you specify the " 615 "-dfsan-track-origins=1 option?%s\n", 616 d.Warning(), d.Default()); 617 } 618 619 void PrintNoTaintWarning(const void *address) { 620 Decorator d; 621 Printf(" %sDFSan: no tainted value at %x%s\n", d.Warning(), address, 622 d.Default()); 623 } 624 625 void PrintInvalidOriginWarning(dfsan_label label, const void *address) { 626 Decorator d; 627 Printf( 628 " %sTaint value 0x%x (at %p) has invalid origin tracking. This can " 629 "be a DFSan bug.%s\n", 630 d.Warning(), label, address, d.Default()); 631 } 632 633 bool PrintOriginTraceToStr(const void *addr, const char *description, 634 InternalScopedString *out) { 635 CHECK(out); 636 CHECK(dfsan_get_track_origins()); 637 Decorator d; 638 639 const dfsan_label label = *__dfsan::shadow_for(addr); 640 CHECK(label); 641 642 const dfsan_origin origin = *__dfsan::origin_for(addr); 643 644 out->append(" %sTaint value 0x%x (at %p) origin tracking (%s)%s\n", 645 d.Origin(), label, addr, description ? description : "", 646 d.Default()); 647 648 Origin o = Origin::FromRawId(origin); 649 bool found = false; 650 651 while (o.isChainedOrigin()) { 652 StackTrace stack; 653 dfsan_origin origin_id = o.raw_id(); 654 o = o.getNextChainedOrigin(&stack); 655 if (o.isChainedOrigin()) 656 out->append( 657 " %sOrigin value: 0x%x, Taint value was stored to memory at%s\n", 658 d.Origin(), origin_id, d.Default()); 659 else 660 out->append(" %sOrigin value: 0x%x, Taint value was created at%s\n", 661 d.Origin(), origin_id, d.Default()); 662 663 // Includes a trailing newline, so no need to add it again. 664 stack.PrintTo(out); 665 found = true; 666 } 667 668 return found; 669 } 670 671 } // namespace 672 673 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_print_origin_trace( 674 const void *addr, const char *description) { 675 if (!dfsan_get_track_origins()) { 676 PrintNoOriginTrackingWarning(); 677 return; 678 } 679 680 const dfsan_label label = *__dfsan::shadow_for(addr); 681 if (!label) { 682 PrintNoTaintWarning(addr); 683 return; 684 } 685 686 InternalScopedString trace; 687 bool success = PrintOriginTraceToStr(addr, description, &trace); 688 689 if (trace.length()) 690 Printf("%s", trace.data()); 691 692 if (!success) 693 PrintInvalidOriginWarning(label, addr); 694 } 695 696 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr 697 dfsan_sprint_origin_trace(const void *addr, const char *description, 698 char *out_buf, uptr out_buf_size) { 699 CHECK(out_buf); 700 701 if (!dfsan_get_track_origins()) { 702 PrintNoOriginTrackingWarning(); 703 return 0; 704 } 705 706 const dfsan_label label = *__dfsan::shadow_for(addr); 707 if (!label) { 708 PrintNoTaintWarning(addr); 709 return 0; 710 } 711 712 InternalScopedString trace; 713 bool success = PrintOriginTraceToStr(addr, description, &trace); 714 715 if (!success) { 716 PrintInvalidOriginWarning(label, addr); 717 return 0; 718 } 719 720 if (out_buf_size) { 721 internal_strncpy(out_buf, trace.data(), out_buf_size - 1); 722 out_buf[out_buf_size - 1] = '\0'; 723 } 724 725 return trace.length(); 726 } 727 728 extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin 729 dfsan_get_init_origin(const void *addr) { 730 if (!dfsan_get_track_origins()) 731 return 0; 732 733 const dfsan_label label = *__dfsan::shadow_for(addr); 734 if (!label) 735 return 0; 736 737 const dfsan_origin origin = *__dfsan::origin_for(addr); 738 739 Origin o = Origin::FromRawId(origin); 740 dfsan_origin origin_id = o.raw_id(); 741 while (o.isChainedOrigin()) { 742 StackTrace stack; 743 origin_id = o.raw_id(); 744 o = o.getNextChainedOrigin(&stack); 745 } 746 return origin_id; 747 } 748 749 void __sanitizer::BufferedStackTrace::UnwindImpl(uptr pc, uptr bp, 750 void *context, 751 bool request_fast, 752 u32 max_depth) { 753 using namespace __dfsan; 754 DFsanThread *t = GetCurrentThread(); 755 if (!t || !StackTrace::WillUseFastUnwind(request_fast)) { 756 return Unwind(max_depth, pc, bp, context, 0, 0, false); 757 } 758 Unwind(max_depth, pc, bp, nullptr, t->stack_top(), t->stack_bottom(), true); 759 } 760 761 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_print_stack_trace() { 762 GET_CALLER_PC_BP; 763 GET_STORE_STACK_TRACE_PC_BP(pc, bp); 764 stack.Print(); 765 } 766 767 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr 768 dfsan_sprint_stack_trace(char *out_buf, uptr out_buf_size) { 769 CHECK(out_buf); 770 GET_CALLER_PC_BP; 771 GET_STORE_STACK_TRACE_PC_BP(pc, bp); 772 return stack.PrintTo(out_buf, out_buf_size); 773 } 774 775 void Flags::SetDefaults() { 776 #define DFSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; 777 #include "dfsan_flags.inc" 778 #undef DFSAN_FLAG 779 } 780 781 static void RegisterDfsanFlags(FlagParser *parser, Flags *f) { 782 #define DFSAN_FLAG(Type, Name, DefaultValue, Description) \ 783 RegisterFlag(parser, #Name, Description, &f->Name); 784 #include "dfsan_flags.inc" 785 #undef DFSAN_FLAG 786 } 787 788 static void InitializeFlags() { 789 SetCommonFlagsDefaults(); 790 { 791 CommonFlags cf; 792 cf.CopyFrom(*common_flags()); 793 cf.intercept_tls_get_addr = true; 794 OverrideCommonFlags(cf); 795 } 796 flags().SetDefaults(); 797 798 FlagParser parser; 799 RegisterCommonFlags(&parser); 800 RegisterDfsanFlags(&parser, &flags()); 801 parser.ParseStringFromEnv("DFSAN_OPTIONS"); 802 InitializeCommonFlags(); 803 if (Verbosity()) ReportUnrecognizedFlags(); 804 if (common_flags()->help) parser.PrintFlagDescriptions(); 805 } 806 807 SANITIZER_INTERFACE_ATTRIBUTE 808 void dfsan_clear_arg_tls(uptr offset, uptr size) { 809 internal_memset((void *)((uptr)__dfsan_arg_tls + offset), 0, size); 810 } 811 812 SANITIZER_INTERFACE_ATTRIBUTE 813 void dfsan_clear_thread_local_state() { 814 internal_memset(__dfsan_arg_tls, 0, sizeof(__dfsan_arg_tls)); 815 internal_memset(__dfsan_retval_tls, 0, sizeof(__dfsan_retval_tls)); 816 817 if (dfsan_get_track_origins()) { 818 internal_memset(__dfsan_arg_origin_tls, 0, sizeof(__dfsan_arg_origin_tls)); 819 internal_memset(&__dfsan_retval_origin_tls, 0, 820 sizeof(__dfsan_retval_origin_tls)); 821 } 822 } 823 824 extern "C" void dfsan_flush() { 825 const uptr maxVirtualAddress = GetMaxUserVirtualAddress(); 826 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) { 827 uptr start = kMemoryLayout[i].start; 828 uptr end = kMemoryLayout[i].end; 829 uptr size = end - start; 830 MappingDesc::Type type = kMemoryLayout[i].type; 831 832 if (type != MappingDesc::SHADOW && type != MappingDesc::ORIGIN) 833 continue; 834 835 // Check if the segment should be mapped based on platform constraints. 836 if (start >= maxVirtualAddress) 837 continue; 838 839 if (!MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name)) { 840 Printf("FATAL: DataFlowSanitizer: failed to clear memory region\n"); 841 Die(); 842 } 843 } 844 } 845 846 // TODO: CheckMemoryLayoutSanity is based on msan. 847 // Consider refactoring these into a shared implementation. 848 static void CheckMemoryLayoutSanity() { 849 uptr prev_end = 0; 850 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) { 851 uptr start = kMemoryLayout[i].start; 852 uptr end = kMemoryLayout[i].end; 853 MappingDesc::Type type = kMemoryLayout[i].type; 854 CHECK_LT(start, end); 855 CHECK_EQ(prev_end, start); 856 CHECK(addr_is_type(start, type)); 857 CHECK(addr_is_type((start + end) / 2, type)); 858 CHECK(addr_is_type(end - 1, type)); 859 if (type == MappingDesc::APP) { 860 uptr addr = start; 861 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr))); 862 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr))); 863 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr))); 864 865 addr = (start + end) / 2; 866 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr))); 867 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr))); 868 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr))); 869 870 addr = end - 1; 871 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr))); 872 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr))); 873 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr))); 874 } 875 prev_end = end; 876 } 877 } 878 879 // TODO: CheckMemoryRangeAvailability is based on msan. 880 // Consider refactoring these into a shared implementation. 881 static bool CheckMemoryRangeAvailability(uptr beg, uptr size) { 882 if (size > 0) { 883 uptr end = beg + size - 1; 884 if (!MemoryRangeIsAvailable(beg, end)) { 885 Printf("FATAL: Memory range %p - %p is not available.\n", beg, end); 886 return false; 887 } 888 } 889 return true; 890 } 891 892 // TODO: ProtectMemoryRange is based on msan. 893 // Consider refactoring these into a shared implementation. 894 static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) { 895 if (size > 0) { 896 void *addr = MmapFixedNoAccess(beg, size, name); 897 if (beg == 0 && addr) { 898 // Depending on the kernel configuration, we may not be able to protect 899 // the page at address zero. 900 uptr gap = 16 * GetPageSizeCached(); 901 beg += gap; 902 size -= gap; 903 addr = MmapFixedNoAccess(beg, size, name); 904 } 905 if ((uptr)addr != beg) { 906 uptr end = beg + size - 1; 907 Printf("FATAL: Cannot protect memory range %p - %p (%s).\n", beg, end, 908 name); 909 return false; 910 } 911 } 912 return true; 913 } 914 915 // TODO: InitShadow is based on msan. 916 // Consider refactoring these into a shared implementation. 917 bool InitShadow(bool init_origins) { 918 // Let user know mapping parameters first. 919 VPrintf(1, "dfsan_init %p\n", (void *)&__dfsan::dfsan_init); 920 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) 921 VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start, 922 kMemoryLayout[i].end - 1); 923 924 CheckMemoryLayoutSanity(); 925 926 if (!MEM_IS_APP(&__dfsan::dfsan_init)) { 927 Printf("FATAL: Code %p is out of application range. Non-PIE build?\n", 928 (uptr)&__dfsan::dfsan_init); 929 return false; 930 } 931 932 const uptr maxVirtualAddress = GetMaxUserVirtualAddress(); 933 934 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) { 935 uptr start = kMemoryLayout[i].start; 936 uptr end = kMemoryLayout[i].end; 937 uptr size = end - start; 938 MappingDesc::Type type = kMemoryLayout[i].type; 939 940 // Check if the segment should be mapped based on platform constraints. 941 if (start >= maxVirtualAddress) 942 continue; 943 944 bool map = type == MappingDesc::SHADOW || 945 (init_origins && type == MappingDesc::ORIGIN); 946 bool protect = type == MappingDesc::INVALID || 947 (!init_origins && type == MappingDesc::ORIGIN); 948 CHECK(!(map && protect)); 949 if (!map && !protect) 950 CHECK(type == MappingDesc::APP); 951 if (map) { 952 if (!CheckMemoryRangeAvailability(start, size)) 953 return false; 954 if (!MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name)) 955 return false; 956 if (common_flags()->use_madv_dontdump) 957 DontDumpShadowMemory(start, size); 958 } 959 if (protect) { 960 if (!CheckMemoryRangeAvailability(start, size)) 961 return false; 962 if (!ProtectMemoryRange(start, size, kMemoryLayout[i].name)) 963 return false; 964 } 965 } 966 967 return true; 968 } 969 970 static void DFsanInit(int argc, char **argv, char **envp) { 971 CHECK(!dfsan_init_is_running); 972 if (dfsan_inited) 973 return; 974 dfsan_init_is_running = true; 975 SanitizerToolName = "DataflowSanitizer"; 976 977 AvoidCVE_2016_2143(); 978 979 InitializeFlags(); 980 981 CheckASLR(); 982 983 InitShadow(dfsan_get_track_origins()); 984 985 initialize_interceptors(); 986 987 // Set up threads 988 DFsanTSDInit(DFsanTSDDtor); 989 990 dfsan_allocator_init(); 991 992 DFsanThread *main_thread = DFsanThread::Create(nullptr, nullptr, nullptr); 993 SetCurrentThread(main_thread); 994 main_thread->Init(); 995 996 dfsan_init_is_running = false; 997 dfsan_inited = true; 998 } 999 1000 namespace __dfsan { 1001 1002 void dfsan_init() { DFsanInit(0, nullptr, nullptr); } 1003 1004 } // namespace __dfsan 1005 1006 #if SANITIZER_CAN_USE_PREINIT_ARRAY 1007 __attribute__((section(".preinit_array"), 1008 used)) static void (*dfsan_init_ptr)(int, char **, 1009 char **) = DFsanInit; 1010 #endif 1011