1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Test cases for KMSAN. 4 * For each test case checks the presence (or absence) of generated reports. 5 * Relies on 'console' tracepoint to capture reports as they appear in the 6 * kernel log. 7 * 8 * Copyright (C) 2021-2022, Google LLC. 9 * Author: Alexander Potapenko <glider@google.com> 10 * 11 */ 12 13 #include <kunit/test.h> 14 #include "kmsan.h" 15 16 #include <linux/jiffies.h> 17 #include <linux/kernel.h> 18 #include <linux/kmsan.h> 19 #include <linux/mm.h> 20 #include <linux/random.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/string.h> 24 #include <linux/tracepoint.h> 25 #include <linux/vmalloc.h> 26 #include <trace/events/printk.h> 27 28 static DEFINE_PER_CPU(int, per_cpu_var); 29 30 /* Report as observed from console. */ 31 static struct { 32 spinlock_t lock; 33 bool available; 34 bool ignore; /* Stop console output collection. */ 35 char header[256]; 36 } observed = { 37 .lock = __SPIN_LOCK_UNLOCKED(observed.lock), 38 }; 39 40 /* Probe for console output: obtains observed lines of interest. */ 41 static void probe_console(void *ignore, const char *buf, size_t len) 42 { 43 unsigned long flags; 44 45 if (observed.ignore) 46 return; 47 spin_lock_irqsave(&observed.lock, flags); 48 49 if (strnstr(buf, "BUG: KMSAN: ", len)) { 50 /* 51 * KMSAN report and related to the test. 52 * 53 * The provided @buf is not NUL-terminated; copy no more than 54 * @len bytes and let strscpy() add the missing NUL-terminator. 55 */ 56 strscpy(observed.header, buf, 57 min(len + 1, sizeof(observed.header))); 58 WRITE_ONCE(observed.available, true); 59 observed.ignore = true; 60 } 61 spin_unlock_irqrestore(&observed.lock, flags); 62 } 63 64 /* Check if a report related to the test exists. */ 65 static bool report_available(void) 66 { 67 return READ_ONCE(observed.available); 68 } 69 70 /* Reset observed.available, so that the test can trigger another report. */ 71 static void report_reset(void) 72 { 73 unsigned long flags; 74 75 spin_lock_irqsave(&observed.lock, flags); 76 WRITE_ONCE(observed.available, false); 77 observed.ignore = false; 78 spin_unlock_irqrestore(&observed.lock, flags); 79 } 80 81 /* Information we expect in a report. */ 82 struct expect_report { 83 const char *error_type; /* Error type. */ 84 /* 85 * Kernel symbol from the error header, or NULL if no report is 86 * expected. 87 */ 88 const char *symbol; 89 }; 90 91 /* Check observed report matches information in @r. */ 92 static bool report_matches(const struct expect_report *r) 93 { 94 typeof(observed.header) expected_header; 95 unsigned long flags; 96 bool ret = false; 97 const char *end; 98 char *cur; 99 100 /* Doubled-checked locking. */ 101 if (!report_available() || !r->symbol) 102 return (!report_available() && !r->symbol); 103 104 /* Generate expected report contents. */ 105 106 /* Title */ 107 cur = expected_header; 108 end = ARRAY_END(expected_header); 109 110 cur += scnprintf(cur, end - cur, "BUG: KMSAN: %s", r->error_type); 111 112 scnprintf(cur, end - cur, " in %s", r->symbol); 113 /* The exact offset won't match, remove it; also strip module name. */ 114 cur = strchr(expected_header, '+'); 115 if (cur) 116 *cur = '\0'; 117 118 spin_lock_irqsave(&observed.lock, flags); 119 if (!report_available()) 120 goto out; /* A new report is being captured. */ 121 122 /* Finally match expected output to what we actually observed. */ 123 ret = strstr(observed.header, expected_header); 124 out: 125 spin_unlock_irqrestore(&observed.lock, flags); 126 127 return ret; 128 } 129 130 /* ===== Test cases ===== */ 131 132 /* Prevent replacing branch with select in LLVM. */ 133 static noinline void check_true(char *arg) 134 { 135 pr_info("%s is true\n", arg); 136 } 137 138 static noinline void check_false(char *arg) 139 { 140 pr_info("%s is false\n", arg); 141 } 142 143 #define USE(x) \ 144 do { \ 145 if (x) \ 146 check_true(#x); \ 147 else \ 148 check_false(#x); \ 149 } while (0) 150 151 #define EXPECTATION_ETYPE_FN(e, reason, fn) \ 152 struct expect_report e = { \ 153 .error_type = reason, \ 154 .symbol = fn, \ 155 } 156 157 #define EXPECTATION_NO_REPORT(e) EXPECTATION_ETYPE_FN(e, NULL, NULL) 158 #define EXPECTATION_UNINIT_VALUE_FN(e, fn) \ 159 EXPECTATION_ETYPE_FN(e, "uninit-value", fn) 160 #define EXPECTATION_UNINIT_VALUE(e) EXPECTATION_UNINIT_VALUE_FN(e, __func__) 161 #define EXPECTATION_USE_AFTER_FREE(e) \ 162 EXPECTATION_ETYPE_FN(e, "use-after-free", __func__) 163 164 /* Test case: ensure that kmalloc() returns uninitialized memory. */ 165 static void test_uninit_kmalloc(struct kunit *test) 166 { 167 EXPECTATION_UNINIT_VALUE(expect); 168 int *ptr; 169 170 kunit_info(test, "uninitialized kmalloc test (UMR report)\n"); 171 ptr = kmalloc_obj(*ptr); 172 USE(*ptr); 173 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 174 } 175 176 /* 177 * Test case: ensure that kmalloc'ed memory becomes initialized after memset(). 178 */ 179 static void test_init_kmalloc(struct kunit *test) 180 { 181 EXPECTATION_NO_REPORT(expect); 182 int *ptr; 183 184 kunit_info(test, "initialized kmalloc test (no reports)\n"); 185 ptr = kmalloc_obj(*ptr); 186 memset(ptr, 0, sizeof(*ptr)); 187 USE(*ptr); 188 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 189 } 190 191 /* Test case: ensure that kzalloc() returns initialized memory. */ 192 static void test_init_kzalloc(struct kunit *test) 193 { 194 EXPECTATION_NO_REPORT(expect); 195 int *ptr; 196 197 kunit_info(test, "initialized kzalloc test (no reports)\n"); 198 ptr = kzalloc_obj(*ptr); 199 USE(*ptr); 200 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 201 } 202 203 /* Test case: ensure that local variables are uninitialized by default. */ 204 static void test_uninit_stack_var(struct kunit *test) 205 { 206 EXPECTATION_UNINIT_VALUE(expect); 207 volatile int cond; 208 209 kunit_info(test, "uninitialized stack variable (UMR report)\n"); 210 USE(cond); 211 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 212 } 213 214 /* Test case: ensure that local variables with initializers are initialized. */ 215 static void test_init_stack_var(struct kunit *test) 216 { 217 EXPECTATION_NO_REPORT(expect); 218 volatile int cond = 1; 219 220 kunit_info(test, "initialized stack variable (no reports)\n"); 221 USE(cond); 222 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 223 } 224 225 static noinline void two_param_fn_2(int arg1, int arg2) 226 { 227 USE(arg1); 228 USE(arg2); 229 } 230 231 static noinline void one_param_fn(int arg) 232 { 233 two_param_fn_2(arg, arg); 234 USE(arg); 235 } 236 237 static noinline void two_param_fn(int arg1, int arg2) 238 { 239 int init = 0; 240 241 one_param_fn(init); 242 USE(arg1); 243 USE(arg2); 244 } 245 246 static void test_params(struct kunit *test) 247 { 248 #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL 249 /* 250 * With eager param/retval checking enabled, KMSAN will report an error 251 * before the call to two_param_fn(). 252 */ 253 EXPECTATION_UNINIT_VALUE_FN(expect, "test_params"); 254 #else 255 EXPECTATION_UNINIT_VALUE_FN(expect, "two_param_fn"); 256 #endif 257 volatile int uninit, init = 1; 258 259 kunit_info(test, 260 "uninit passed through a function parameter (UMR report)\n"); 261 two_param_fn(uninit, init); 262 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 263 } 264 265 static int signed_sum3(int a, int b, int c) 266 { 267 return a + b + c; 268 } 269 270 /* 271 * Test case: ensure that uninitialized values are tracked through function 272 * arguments. 273 */ 274 static void test_uninit_multiple_params(struct kunit *test) 275 { 276 EXPECTATION_UNINIT_VALUE(expect); 277 volatile char b = 3, c; 278 volatile int a; 279 280 kunit_info(test, "uninitialized local passed to fn (UMR report)\n"); 281 USE(signed_sum3(a, b, c)); 282 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 283 } 284 285 /* Helper function to make an array uninitialized. */ 286 static noinline void do_uninit_local_array(char *array, int start, int stop) 287 { 288 volatile char uninit; 289 290 for (int i = start; i < stop; i++) 291 array[i] = uninit; 292 } 293 294 /* 295 * Test case: ensure kmsan_check_memory() reports an error when checking 296 * uninitialized memory. 297 */ 298 static void test_uninit_kmsan_check_memory(struct kunit *test) 299 { 300 EXPECTATION_UNINIT_VALUE_FN(expect, "test_uninit_kmsan_check_memory"); 301 volatile char local_array[8]; 302 303 kunit_info( 304 test, 305 "kmsan_check_memory() called on uninit local (UMR report)\n"); 306 do_uninit_local_array((char *)local_array, 5, 7); 307 308 kmsan_check_memory((char *)local_array, 8); 309 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 310 } 311 312 /* 313 * Test case: check that a virtual memory range created with vmap() from 314 * initialized pages is still considered as initialized. 315 */ 316 static void test_init_kmsan_vmap_vunmap(struct kunit *test) 317 { 318 EXPECTATION_NO_REPORT(expect); 319 const int npages = 2; 320 struct page **pages; 321 void *vbuf; 322 323 kunit_info(test, "pages initialized via vmap (no reports)\n"); 324 325 pages = kmalloc_objs(*pages, npages); 326 for (int i = 0; i < npages; i++) 327 pages[i] = alloc_page(GFP_KERNEL); 328 vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL); 329 memset(vbuf, 0xfe, npages * PAGE_SIZE); 330 for (int i = 0; i < npages; i++) 331 kmsan_check_memory(page_address(pages[i]), PAGE_SIZE); 332 333 if (vbuf) 334 vunmap(vbuf); 335 for (int i = 0; i < npages; i++) { 336 if (pages[i]) 337 __free_page(pages[i]); 338 } 339 kfree(pages); 340 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 341 } 342 343 /* 344 * Test case: ensure that memset() can initialize a buffer allocated via 345 * vmalloc(). 346 */ 347 static void test_init_vmalloc(struct kunit *test) 348 { 349 EXPECTATION_NO_REPORT(expect); 350 int npages = 8; 351 char *buf; 352 353 kunit_info(test, "vmalloc buffer can be initialized (no reports)\n"); 354 buf = vmalloc(PAGE_SIZE * npages); 355 buf[0] = 1; 356 memset(buf, 0xfe, PAGE_SIZE * npages); 357 USE(buf[0]); 358 for (int i = 0; i < npages; i++) 359 kmsan_check_memory(&buf[PAGE_SIZE * i], PAGE_SIZE); 360 vfree(buf); 361 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 362 } 363 364 /* Test case: ensure that use-after-free reporting works for kmalloc. */ 365 static void test_uaf(struct kunit *test) 366 { 367 EXPECTATION_USE_AFTER_FREE(expect); 368 volatile int value; 369 volatile int *var; 370 371 kunit_info(test, "use-after-free in kmalloc-ed buffer (UMR report)\n"); 372 var = kmalloc(80, GFP_KERNEL); 373 var[3] = 0xfeedface; 374 kfree((int *)var); 375 /* Copy the invalid value before checking it. */ 376 value = var[3]; 377 USE(value); 378 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 379 } 380 381 static void test_uninit_page(struct kunit *test) 382 { 383 EXPECTATION_UNINIT_VALUE(expect); 384 struct page *page; 385 int *ptr; 386 387 kunit_info(test, "uninitialized page allocation (UMR report)\n"); 388 page = alloc_pages(GFP_KERNEL, 0); 389 ptr = page_address(page); 390 USE(*ptr); 391 __free_pages(page, 0); 392 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 393 } 394 395 static volatile char *test_uaf_pages_helper(int order, int offset) 396 { 397 struct page *page; 398 volatile char *var; 399 400 /* Memory is initialized up until __free_pages() thanks to __GFP_ZERO. */ 401 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 402 var = page_address(page) + offset; 403 __free_pages(page, order); 404 405 return var; 406 } 407 408 /* Test case: ensure that use-after-free reporting works for a freed page. */ 409 static void test_uaf_pages(struct kunit *test) 410 { 411 EXPECTATION_USE_AFTER_FREE(expect); 412 volatile char value; 413 414 kunit_info(test, "use-after-free on a freed page (UMR report)\n"); 415 /* Allocate a single page, free it, then try to access it. */ 416 value = *test_uaf_pages_helper(0, 3); 417 USE(value); 418 419 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 420 } 421 422 /* Test case: ensure that UAF reporting works for high order pages. */ 423 static void test_uaf_high_order_pages(struct kunit *test) 424 { 425 EXPECTATION_USE_AFTER_FREE(expect); 426 volatile char value; 427 428 kunit_info(test, 429 "use-after-free on a freed high-order page (UMR report)\n"); 430 /* 431 * Create a high-order non-compound page, free it, then try to access 432 * its tail page. 433 */ 434 value = *test_uaf_pages_helper(1, PAGE_SIZE + 3); 435 USE(value); 436 437 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 438 } 439 440 /* 441 * Test case: ensure that uninitialized values are propagated through per-CPU 442 * memory. 443 */ 444 static void test_percpu_propagate(struct kunit *test) 445 { 446 EXPECTATION_UNINIT_VALUE(expect); 447 volatile int uninit, check; 448 449 kunit_info(test, 450 "uninit local stored to per_cpu memory (UMR report)\n"); 451 452 this_cpu_write(per_cpu_var, uninit); 453 check = this_cpu_read(per_cpu_var); 454 USE(check); 455 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 456 } 457 458 /* 459 * Test case: ensure that passing uninitialized values to printk() leads to an 460 * error report. 461 */ 462 static void test_printk(struct kunit *test) 463 { 464 #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL 465 /* 466 * With eager param/retval checking enabled, KMSAN will report an error 467 * before the call to pr_info(). 468 */ 469 EXPECTATION_UNINIT_VALUE_FN(expect, "test_printk"); 470 #else 471 EXPECTATION_UNINIT_VALUE_FN(expect, "number"); 472 #endif 473 volatile int uninit; 474 475 kunit_info(test, "uninit local passed to pr_info() (UMR report)\n"); 476 pr_info("%px contains %d\n", &uninit, uninit); 477 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 478 } 479 480 /* Prevent the compiler from inlining a memcpy() call. */ 481 static noinline void *memcpy_noinline(volatile void *dst, 482 const volatile void *src, size_t size) 483 { 484 return memcpy((void *)dst, (const void *)src, size); 485 } 486 487 /* Test case: ensure that memcpy() correctly copies initialized values. */ 488 static void test_init_memcpy(struct kunit *test) 489 { 490 EXPECTATION_NO_REPORT(expect); 491 volatile long long src; 492 volatile long long dst = 0; 493 494 src = 1; 495 kunit_info( 496 test, 497 "memcpy()ing aligned initialized src to aligned dst (no reports)\n"); 498 memcpy_noinline((void *)&dst, (void *)&src, sizeof(src)); 499 kmsan_check_memory((void *)&dst, sizeof(dst)); 500 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 501 } 502 503 /* 504 * Test case: ensure that memcpy() correctly copies uninitialized values between 505 * aligned `src` and `dst`. 506 */ 507 static void test_memcpy_aligned_to_aligned(struct kunit *test) 508 { 509 EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_aligned"); 510 volatile int uninit_src; 511 volatile int dst = 0; 512 513 kunit_info( 514 test, 515 "memcpy()ing aligned uninit src to aligned dst (UMR report)\n"); 516 memcpy_noinline((void *)&dst, (void *)&uninit_src, sizeof(uninit_src)); 517 kmsan_check_memory((void *)&dst, sizeof(dst)); 518 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 519 } 520 521 /* 522 * Test case: ensure that memcpy() correctly copies uninitialized values between 523 * aligned `src` and unaligned `dst`. 524 * 525 * Copying aligned 4-byte value to an unaligned one leads to touching two 526 * aligned 4-byte values. This test case checks that KMSAN correctly reports an 527 * error on the mentioned two values. 528 */ 529 static void test_memcpy_aligned_to_unaligned(struct kunit *test) 530 { 531 EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_unaligned"); 532 volatile int uninit_src; 533 volatile char dst[8] = { 0 }; 534 535 kunit_info( 536 test, 537 "memcpy()ing aligned uninit src to unaligned dst (UMR report)\n"); 538 kmsan_check_memory((void *)&uninit_src, sizeof(uninit_src)); 539 memcpy_noinline((void *)&dst[1], (void *)&uninit_src, 540 sizeof(uninit_src)); 541 kmsan_check_memory((void *)dst, 4); 542 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 543 report_reset(); 544 kmsan_check_memory((void *)&dst[4], sizeof(uninit_src)); 545 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 546 } 547 548 /* 549 * Test case: ensure that origin slots do not accidentally get overwritten with 550 * zeroes during memcpy(). 551 * 552 * Previously, when copying memory from an aligned buffer to an unaligned one, 553 * if there were zero origins corresponding to zero shadow values in the source 554 * buffer, they could have ended up being copied to nonzero shadow values in the 555 * destination buffer: 556 * 557 * memcpy(0xffff888080a00000, 0xffff888080900002, 8) 558 * 559 * src (0xffff888080900002): ..xx .... xx.. 560 * src origins: o111 0000 o222 561 * dst (0xffff888080a00000): xx.. ..xx 562 * dst origins: o111 0000 563 * (or 0000 o222) 564 * 565 * (here . stands for an initialized byte, and x for an uninitialized one. 566 * 567 * Ensure that this does not happen anymore, and for both destination bytes 568 * the origin is nonzero (i.e. KMSAN reports an error). 569 */ 570 static void test_memcpy_initialized_gap(struct kunit *test) 571 { 572 EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_initialized_gap"); 573 volatile char uninit_src[12]; 574 volatile char dst[8] = { 0 }; 575 576 kunit_info( 577 test, 578 "unaligned 4-byte initialized value gets a nonzero origin after memcpy() - (2 UMR reports)\n"); 579 580 uninit_src[0] = 42; 581 uninit_src[1] = 42; 582 uninit_src[4] = 42; 583 uninit_src[5] = 42; 584 uninit_src[6] = 42; 585 uninit_src[7] = 42; 586 uninit_src[10] = 42; 587 uninit_src[11] = 42; 588 memcpy_noinline((void *)&dst[0], (void *)&uninit_src[2], 8); 589 590 kmsan_check_memory((void *)&dst[0], 4); 591 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 592 report_reset(); 593 kmsan_check_memory((void *)&dst[2], 4); 594 KUNIT_EXPECT_FALSE(test, report_matches(&expect)); 595 report_reset(); 596 kmsan_check_memory((void *)&dst[4], 4); 597 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 598 } 599 600 /* Generate test cases for memset16(), memset32(), memset64(). */ 601 #define DEFINE_TEST_MEMSETXX(size) \ 602 static void test_memset##size(struct kunit *test) \ 603 { \ 604 EXPECTATION_NO_REPORT(expect); \ 605 volatile uint##size##_t uninit; \ 606 \ 607 kunit_info(test, \ 608 "memset" #size "() should initialize memory\n"); \ 609 memset##size((uint##size##_t *)&uninit, 0, 1); \ 610 kmsan_check_memory((void *)&uninit, sizeof(uninit)); \ 611 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); \ 612 } 613 614 DEFINE_TEST_MEMSETXX(16) 615 DEFINE_TEST_MEMSETXX(32) 616 DEFINE_TEST_MEMSETXX(64) 617 618 /* Test case: ensure that KMSAN does not access shadow memory out of bounds. */ 619 static void test_memset_on_guarded_buffer(struct kunit *test) 620 { 621 void *buf = vmalloc(PAGE_SIZE); 622 623 kunit_info(test, 624 "memset() on ends of guarded buffer should not crash\n"); 625 626 for (size_t size = 0; size <= 128; size++) { 627 memset(buf, 0xff, size); 628 memset(buf + PAGE_SIZE - size, 0xff, size); 629 } 630 vfree(buf); 631 } 632 633 static noinline void fibonacci(int *array, int size, int start) 634 { 635 if (start < 2 || (start == size)) 636 return; 637 array[start] = array[start - 1] + array[start - 2]; 638 fibonacci(array, size, start + 1); 639 } 640 641 static void test_long_origin_chain(struct kunit *test) 642 { 643 EXPECTATION_UNINIT_VALUE_FN(expect, "test_long_origin_chain"); 644 /* (KMSAN_MAX_ORIGIN_DEPTH * 2) recursive calls to fibonacci(). */ 645 volatile int accum[KMSAN_MAX_ORIGIN_DEPTH * 2 + 2]; 646 int last = ARRAY_SIZE(accum) - 1; 647 648 kunit_info( 649 test, 650 "origin chain exceeding KMSAN_MAX_ORIGIN_DEPTH (UMR report)\n"); 651 /* 652 * We do not set accum[1] to 0, so the uninitializedness will be carried 653 * over to accum[2..last]. 654 */ 655 accum[0] = 1; 656 fibonacci((int *)accum, ARRAY_SIZE(accum), 2); 657 kmsan_check_memory((void *)&accum[last], sizeof(int)); 658 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 659 } 660 661 /* 662 * Test case: ensure that saving/restoring/printing stacks to/from stackdepot 663 * does not trigger errors. 664 * 665 * KMSAN uses stackdepot to store origin stack traces, that's why we do not 666 * instrument lib/stackdepot.c. Yet it must properly mark its outputs as 667 * initialized because other kernel features (e.g. netdev tracker) may also 668 * access stackdepot from instrumented code. 669 */ 670 static void test_stackdepot_roundtrip(struct kunit *test) 671 { 672 unsigned long src_entries[16], *dst_entries; 673 unsigned int src_nentries, dst_nentries; 674 EXPECTATION_NO_REPORT(expect); 675 depot_stack_handle_t handle; 676 677 kunit_info(test, "testing stackdepot roundtrip (no reports)\n"); 678 679 src_nentries = 680 stack_trace_save(src_entries, ARRAY_SIZE(src_entries), 1); 681 handle = stack_depot_save(src_entries, src_nentries, GFP_KERNEL); 682 stack_depot_print(handle); 683 dst_nentries = stack_depot_fetch(handle, &dst_entries); 684 KUNIT_EXPECT_TRUE(test, src_nentries == dst_nentries); 685 686 kmsan_check_memory((void *)dst_entries, 687 sizeof(*dst_entries) * dst_nentries); 688 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 689 } 690 691 /* 692 * Test case: ensure that kmsan_unpoison_memory() and the instrumentation work 693 * the same. 694 */ 695 static void test_unpoison_memory(struct kunit *test) 696 { 697 EXPECTATION_UNINIT_VALUE_FN(expect, "test_unpoison_memory"); 698 volatile char a[4], b[4]; 699 700 kunit_info( 701 test, 702 "unpoisoning via the instrumentation vs. kmsan_unpoison_memory() (2 UMR reports)\n"); 703 704 /* Initialize a[0] and check a[1]--a[3]. */ 705 a[0] = 0; 706 kmsan_check_memory((char *)&a[1], 3); 707 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 708 709 report_reset(); 710 711 /* Initialize b[0] and check b[1]--b[3]. */ 712 kmsan_unpoison_memory((char *)&b[0], 1); 713 kmsan_check_memory((char *)&b[1], 3); 714 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 715 } 716 717 static void test_copy_from_kernel_nofault(struct kunit *test) 718 { 719 long ret; 720 char buf[4], src[4]; 721 size_t size = sizeof(buf); 722 723 EXPECTATION_UNINIT_VALUE_FN(expect, "copy_from_kernel_nofault"); 724 kunit_info( 725 test, 726 "testing copy_from_kernel_nofault with uninitialized memory\n"); 727 728 ret = copy_from_kernel_nofault((char *)&buf[0], (char *)&src[0], size); 729 USE(ret); 730 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 731 } 732 733 static struct kunit_case kmsan_test_cases[] = { 734 KUNIT_CASE(test_uninit_kmalloc), 735 KUNIT_CASE(test_init_kmalloc), 736 KUNIT_CASE(test_init_kzalloc), 737 KUNIT_CASE(test_uninit_stack_var), 738 KUNIT_CASE(test_init_stack_var), 739 KUNIT_CASE(test_params), 740 KUNIT_CASE(test_uninit_multiple_params), 741 KUNIT_CASE(test_uninit_kmsan_check_memory), 742 KUNIT_CASE(test_init_kmsan_vmap_vunmap), 743 KUNIT_CASE(test_init_vmalloc), 744 KUNIT_CASE(test_uninit_page), 745 KUNIT_CASE(test_uaf), 746 KUNIT_CASE(test_uaf_pages), 747 KUNIT_CASE(test_uaf_high_order_pages), 748 KUNIT_CASE(test_percpu_propagate), 749 KUNIT_CASE(test_printk), 750 KUNIT_CASE(test_init_memcpy), 751 KUNIT_CASE(test_memcpy_aligned_to_aligned), 752 KUNIT_CASE(test_memcpy_aligned_to_unaligned), 753 KUNIT_CASE(test_memcpy_initialized_gap), 754 KUNIT_CASE(test_memset16), 755 KUNIT_CASE(test_memset32), 756 KUNIT_CASE(test_memset64), 757 KUNIT_CASE(test_memset_on_guarded_buffer), 758 KUNIT_CASE(test_long_origin_chain), 759 KUNIT_CASE(test_stackdepot_roundtrip), 760 KUNIT_CASE(test_unpoison_memory), 761 KUNIT_CASE(test_copy_from_kernel_nofault), 762 {}, 763 }; 764 765 /* ===== End test cases ===== */ 766 767 static int test_init(struct kunit *test) 768 { 769 unsigned long flags; 770 771 spin_lock_irqsave(&observed.lock, flags); 772 observed.header[0] = '\0'; 773 observed.ignore = false; 774 observed.available = false; 775 spin_unlock_irqrestore(&observed.lock, flags); 776 777 return 0; 778 } 779 780 static void test_exit(struct kunit *test) 781 { 782 } 783 784 static int orig_panic_on_kmsan; 785 786 static int kmsan_suite_init(struct kunit_suite *suite) 787 { 788 register_trace_console(probe_console, NULL); 789 orig_panic_on_kmsan = panic_on_kmsan; 790 panic_on_kmsan = 0; 791 return 0; 792 } 793 794 static void kmsan_suite_exit(struct kunit_suite *suite) 795 { 796 unregister_trace_console(probe_console, NULL); 797 tracepoint_synchronize_unregister(); 798 panic_on_kmsan = orig_panic_on_kmsan; 799 } 800 801 static struct kunit_suite kmsan_test_suite = { 802 .name = "kmsan", 803 .test_cases = kmsan_test_cases, 804 .init = test_init, 805 .exit = test_exit, 806 .suite_init = kmsan_suite_init, 807 .suite_exit = kmsan_suite_exit, 808 }; 809 kunit_test_suites(&kmsan_test_suite); 810 811 MODULE_LICENSE("GPL"); 812 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>"); 813 MODULE_DESCRIPTION("Test cases for KMSAN"); 814