1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Test cases for KMSAN. 4 * For each test case checks the presence (or absence) of generated reports. 5 * Relies on 'console' tracepoint to capture reports as they appear in the 6 * kernel log. 7 * 8 * Copyright (C) 2021-2022, Google LLC. 9 * Author: Alexander Potapenko <glider@google.com> 10 * 11 */ 12 13 #include <kunit/test.h> 14 #include "kmsan.h" 15 16 #include <linux/jiffies.h> 17 #include <linux/kernel.h> 18 #include <linux/kmsan.h> 19 #include <linux/mm.h> 20 #include <linux/random.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/string.h> 24 #include <linux/tracepoint.h> 25 #include <trace/events/printk.h> 26 27 static DEFINE_PER_CPU(int, per_cpu_var); 28 29 /* Report as observed from console. */ 30 static struct { 31 spinlock_t lock; 32 bool available; 33 bool ignore; /* Stop console output collection. */ 34 char header[256]; 35 } observed = { 36 .lock = __SPIN_LOCK_UNLOCKED(observed.lock), 37 }; 38 39 /* Probe for console output: obtains observed lines of interest. */ 40 static void probe_console(void *ignore, const char *buf, size_t len) 41 { 42 unsigned long flags; 43 44 if (observed.ignore) 45 return; 46 spin_lock_irqsave(&observed.lock, flags); 47 48 if (strnstr(buf, "BUG: KMSAN: ", len)) { 49 /* 50 * KMSAN report and related to the test. 51 * 52 * The provided @buf is not NUL-terminated; copy no more than 53 * @len bytes and let strscpy() add the missing NUL-terminator. 54 */ 55 strscpy(observed.header, buf, 56 min(len + 1, sizeof(observed.header))); 57 WRITE_ONCE(observed.available, true); 58 observed.ignore = true; 59 } 60 spin_unlock_irqrestore(&observed.lock, flags); 61 } 62 63 /* Check if a report related to the test exists. */ 64 static bool report_available(void) 65 { 66 return READ_ONCE(observed.available); 67 } 68 69 /* Information we expect in a report. */ 70 struct expect_report { 71 const char *error_type; /* Error type. */ 72 /* 73 * Kernel symbol from the error header, or NULL if no report is 74 * expected. 75 */ 76 const char *symbol; 77 }; 78 79 /* Check observed report matches information in @r. */ 80 static bool report_matches(const struct expect_report *r) 81 { 82 typeof(observed.header) expected_header; 83 unsigned long flags; 84 bool ret = false; 85 const char *end; 86 char *cur; 87 88 /* Doubled-checked locking. */ 89 if (!report_available() || !r->symbol) 90 return (!report_available() && !r->symbol); 91 92 /* Generate expected report contents. */ 93 94 /* Title */ 95 cur = expected_header; 96 end = &expected_header[sizeof(expected_header) - 1]; 97 98 cur += scnprintf(cur, end - cur, "BUG: KMSAN: %s", r->error_type); 99 100 scnprintf(cur, end - cur, " in %s", r->symbol); 101 /* The exact offset won't match, remove it; also strip module name. */ 102 cur = strchr(expected_header, '+'); 103 if (cur) 104 *cur = '\0'; 105 106 spin_lock_irqsave(&observed.lock, flags); 107 if (!report_available()) 108 goto out; /* A new report is being captured. */ 109 110 /* Finally match expected output to what we actually observed. */ 111 ret = strstr(observed.header, expected_header); 112 out: 113 spin_unlock_irqrestore(&observed.lock, flags); 114 115 return ret; 116 } 117 118 /* ===== Test cases ===== */ 119 120 /* Prevent replacing branch with select in LLVM. */ 121 static noinline void check_true(char *arg) 122 { 123 pr_info("%s is true\n", arg); 124 } 125 126 static noinline void check_false(char *arg) 127 { 128 pr_info("%s is false\n", arg); 129 } 130 131 #define USE(x) \ 132 do { \ 133 if (x) \ 134 check_true(#x); \ 135 else \ 136 check_false(#x); \ 137 } while (0) 138 139 #define EXPECTATION_ETYPE_FN(e, reason, fn) \ 140 struct expect_report e = { \ 141 .error_type = reason, \ 142 .symbol = fn, \ 143 } 144 145 #define EXPECTATION_NO_REPORT(e) EXPECTATION_ETYPE_FN(e, NULL, NULL) 146 #define EXPECTATION_UNINIT_VALUE_FN(e, fn) \ 147 EXPECTATION_ETYPE_FN(e, "uninit-value", fn) 148 #define EXPECTATION_UNINIT_VALUE(e) EXPECTATION_UNINIT_VALUE_FN(e, __func__) 149 #define EXPECTATION_USE_AFTER_FREE(e) \ 150 EXPECTATION_ETYPE_FN(e, "use-after-free", __func__) 151 152 /* Test case: ensure that kmalloc() returns uninitialized memory. */ 153 static void test_uninit_kmalloc(struct kunit *test) 154 { 155 EXPECTATION_UNINIT_VALUE(expect); 156 int *ptr; 157 158 kunit_info(test, "uninitialized kmalloc test (UMR report)\n"); 159 ptr = kmalloc(sizeof(*ptr), GFP_KERNEL); 160 USE(*ptr); 161 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 162 } 163 164 /* 165 * Test case: ensure that kmalloc'ed memory becomes initialized after memset(). 166 */ 167 static void test_init_kmalloc(struct kunit *test) 168 { 169 EXPECTATION_NO_REPORT(expect); 170 int *ptr; 171 172 kunit_info(test, "initialized kmalloc test (no reports)\n"); 173 ptr = kmalloc(sizeof(*ptr), GFP_KERNEL); 174 memset(ptr, 0, sizeof(*ptr)); 175 USE(*ptr); 176 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 177 } 178 179 /* Test case: ensure that kzalloc() returns initialized memory. */ 180 static void test_init_kzalloc(struct kunit *test) 181 { 182 EXPECTATION_NO_REPORT(expect); 183 int *ptr; 184 185 kunit_info(test, "initialized kzalloc test (no reports)\n"); 186 ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); 187 USE(*ptr); 188 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 189 } 190 191 /* Test case: ensure that local variables are uninitialized by default. */ 192 static void test_uninit_stack_var(struct kunit *test) 193 { 194 EXPECTATION_UNINIT_VALUE(expect); 195 volatile int cond; 196 197 kunit_info(test, "uninitialized stack variable (UMR report)\n"); 198 USE(cond); 199 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 200 } 201 202 /* Test case: ensure that local variables with initializers are initialized. */ 203 static void test_init_stack_var(struct kunit *test) 204 { 205 EXPECTATION_NO_REPORT(expect); 206 volatile int cond = 1; 207 208 kunit_info(test, "initialized stack variable (no reports)\n"); 209 USE(cond); 210 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 211 } 212 213 static noinline void two_param_fn_2(int arg1, int arg2) 214 { 215 USE(arg1); 216 USE(arg2); 217 } 218 219 static noinline void one_param_fn(int arg) 220 { 221 two_param_fn_2(arg, arg); 222 USE(arg); 223 } 224 225 static noinline void two_param_fn(int arg1, int arg2) 226 { 227 int init = 0; 228 229 one_param_fn(init); 230 USE(arg1); 231 USE(arg2); 232 } 233 234 static void test_params(struct kunit *test) 235 { 236 #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL 237 /* 238 * With eager param/retval checking enabled, KMSAN will report an error 239 * before the call to two_param_fn(). 240 */ 241 EXPECTATION_UNINIT_VALUE_FN(expect, "test_params"); 242 #else 243 EXPECTATION_UNINIT_VALUE_FN(expect, "two_param_fn"); 244 #endif 245 volatile int uninit, init = 1; 246 247 kunit_info(test, 248 "uninit passed through a function parameter (UMR report)\n"); 249 two_param_fn(uninit, init); 250 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 251 } 252 253 static int signed_sum3(int a, int b, int c) 254 { 255 return a + b + c; 256 } 257 258 /* 259 * Test case: ensure that uninitialized values are tracked through function 260 * arguments. 261 */ 262 static void test_uninit_multiple_params(struct kunit *test) 263 { 264 EXPECTATION_UNINIT_VALUE(expect); 265 volatile char b = 3, c; 266 volatile int a; 267 268 kunit_info(test, "uninitialized local passed to fn (UMR report)\n"); 269 USE(signed_sum3(a, b, c)); 270 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 271 } 272 273 /* Helper function to make an array uninitialized. */ 274 static noinline void do_uninit_local_array(char *array, int start, int stop) 275 { 276 volatile char uninit; 277 278 for (int i = start; i < stop; i++) 279 array[i] = uninit; 280 } 281 282 /* 283 * Test case: ensure kmsan_check_memory() reports an error when checking 284 * uninitialized memory. 285 */ 286 static void test_uninit_kmsan_check_memory(struct kunit *test) 287 { 288 EXPECTATION_UNINIT_VALUE_FN(expect, "test_uninit_kmsan_check_memory"); 289 volatile char local_array[8]; 290 291 kunit_info( 292 test, 293 "kmsan_check_memory() called on uninit local (UMR report)\n"); 294 do_uninit_local_array((char *)local_array, 5, 7); 295 296 kmsan_check_memory((char *)local_array, 8); 297 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 298 } 299 300 /* 301 * Test case: check that a virtual memory range created with vmap() from 302 * initialized pages is still considered as initialized. 303 */ 304 static void test_init_kmsan_vmap_vunmap(struct kunit *test) 305 { 306 EXPECTATION_NO_REPORT(expect); 307 const int npages = 2; 308 struct page **pages; 309 void *vbuf; 310 311 kunit_info(test, "pages initialized via vmap (no reports)\n"); 312 313 pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL); 314 for (int i = 0; i < npages; i++) 315 pages[i] = alloc_page(GFP_KERNEL); 316 vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL); 317 memset(vbuf, 0xfe, npages * PAGE_SIZE); 318 for (int i = 0; i < npages; i++) 319 kmsan_check_memory(page_address(pages[i]), PAGE_SIZE); 320 321 if (vbuf) 322 vunmap(vbuf); 323 for (int i = 0; i < npages; i++) { 324 if (pages[i]) 325 __free_page(pages[i]); 326 } 327 kfree(pages); 328 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 329 } 330 331 /* 332 * Test case: ensure that memset() can initialize a buffer allocated via 333 * vmalloc(). 334 */ 335 static void test_init_vmalloc(struct kunit *test) 336 { 337 EXPECTATION_NO_REPORT(expect); 338 int npages = 8; 339 char *buf; 340 341 kunit_info(test, "vmalloc buffer can be initialized (no reports)\n"); 342 buf = vmalloc(PAGE_SIZE * npages); 343 buf[0] = 1; 344 memset(buf, 0xfe, PAGE_SIZE * npages); 345 USE(buf[0]); 346 for (int i = 0; i < npages; i++) 347 kmsan_check_memory(&buf[PAGE_SIZE * i], PAGE_SIZE); 348 vfree(buf); 349 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 350 } 351 352 /* Test case: ensure that use-after-free reporting works. */ 353 static void test_uaf(struct kunit *test) 354 { 355 EXPECTATION_USE_AFTER_FREE(expect); 356 volatile int value; 357 volatile int *var; 358 359 kunit_info(test, "use-after-free in kmalloc-ed buffer (UMR report)\n"); 360 var = kmalloc(80, GFP_KERNEL); 361 var[3] = 0xfeedface; 362 kfree((int *)var); 363 /* Copy the invalid value before checking it. */ 364 value = var[3]; 365 USE(value); 366 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 367 } 368 369 /* 370 * Test case: ensure that uninitialized values are propagated through per-CPU 371 * memory. 372 */ 373 static void test_percpu_propagate(struct kunit *test) 374 { 375 EXPECTATION_UNINIT_VALUE(expect); 376 volatile int uninit, check; 377 378 kunit_info(test, 379 "uninit local stored to per_cpu memory (UMR report)\n"); 380 381 this_cpu_write(per_cpu_var, uninit); 382 check = this_cpu_read(per_cpu_var); 383 USE(check); 384 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 385 } 386 387 /* 388 * Test case: ensure that passing uninitialized values to printk() leads to an 389 * error report. 390 */ 391 static void test_printk(struct kunit *test) 392 { 393 #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL 394 /* 395 * With eager param/retval checking enabled, KMSAN will report an error 396 * before the call to pr_info(). 397 */ 398 EXPECTATION_UNINIT_VALUE_FN(expect, "test_printk"); 399 #else 400 EXPECTATION_UNINIT_VALUE_FN(expect, "number"); 401 #endif 402 volatile int uninit; 403 404 kunit_info(test, "uninit local passed to pr_info() (UMR report)\n"); 405 pr_info("%px contains %d\n", &uninit, uninit); 406 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 407 } 408 409 /* 410 * Test case: ensure that memcpy() correctly copies uninitialized values between 411 * aligned `src` and `dst`. 412 */ 413 static void test_memcpy_aligned_to_aligned(struct kunit *test) 414 { 415 EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_aligned"); 416 volatile int uninit_src; 417 volatile int dst = 0; 418 419 kunit_info( 420 test, 421 "memcpy()ing aligned uninit src to aligned dst (UMR report)\n"); 422 memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src)); 423 kmsan_check_memory((void *)&dst, sizeof(dst)); 424 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 425 } 426 427 /* 428 * Test case: ensure that memcpy() correctly copies uninitialized values between 429 * aligned `src` and unaligned `dst`. 430 * 431 * Copying aligned 4-byte value to an unaligned one leads to touching two 432 * aligned 4-byte values. This test case checks that KMSAN correctly reports an 433 * error on the first of the two values. 434 */ 435 static void test_memcpy_aligned_to_unaligned(struct kunit *test) 436 { 437 EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_unaligned"); 438 volatile int uninit_src; 439 volatile char dst[8] = { 0 }; 440 441 kunit_info( 442 test, 443 "memcpy()ing aligned uninit src to unaligned dst (UMR report)\n"); 444 memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src)); 445 kmsan_check_memory((void *)dst, 4); 446 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 447 } 448 449 /* 450 * Test case: ensure that memcpy() correctly copies uninitialized values between 451 * aligned `src` and unaligned `dst`. 452 * 453 * Copying aligned 4-byte value to an unaligned one leads to touching two 454 * aligned 4-byte values. This test case checks that KMSAN correctly reports an 455 * error on the second of the two values. 456 */ 457 static void test_memcpy_aligned_to_unaligned2(struct kunit *test) 458 { 459 EXPECTATION_UNINIT_VALUE_FN(expect, 460 "test_memcpy_aligned_to_unaligned2"); 461 volatile int uninit_src; 462 volatile char dst[8] = { 0 }; 463 464 kunit_info( 465 test, 466 "memcpy()ing aligned uninit src to unaligned dst - part 2 (UMR report)\n"); 467 memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src)); 468 kmsan_check_memory((void *)&dst[4], sizeof(uninit_src)); 469 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 470 } 471 472 static noinline void fibonacci(int *array, int size, int start) { 473 if (start < 2 || (start == size)) 474 return; 475 array[start] = array[start - 1] + array[start - 2]; 476 fibonacci(array, size, start + 1); 477 } 478 479 static void test_long_origin_chain(struct kunit *test) 480 { 481 EXPECTATION_UNINIT_VALUE_FN(expect, 482 "test_long_origin_chain"); 483 /* (KMSAN_MAX_ORIGIN_DEPTH * 2) recursive calls to fibonacci(). */ 484 volatile int accum[KMSAN_MAX_ORIGIN_DEPTH * 2 + 2]; 485 int last = ARRAY_SIZE(accum) - 1; 486 487 kunit_info( 488 test, 489 "origin chain exceeding KMSAN_MAX_ORIGIN_DEPTH (UMR report)\n"); 490 /* 491 * We do not set accum[1] to 0, so the uninitializedness will be carried 492 * over to accum[2..last]. 493 */ 494 accum[0] = 1; 495 fibonacci((int *)accum, ARRAY_SIZE(accum), 2); 496 kmsan_check_memory((void *)&accum[last], sizeof(int)); 497 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 498 } 499 500 static struct kunit_case kmsan_test_cases[] = { 501 KUNIT_CASE(test_uninit_kmalloc), 502 KUNIT_CASE(test_init_kmalloc), 503 KUNIT_CASE(test_init_kzalloc), 504 KUNIT_CASE(test_uninit_stack_var), 505 KUNIT_CASE(test_init_stack_var), 506 KUNIT_CASE(test_params), 507 KUNIT_CASE(test_uninit_multiple_params), 508 KUNIT_CASE(test_uninit_kmsan_check_memory), 509 KUNIT_CASE(test_init_kmsan_vmap_vunmap), 510 KUNIT_CASE(test_init_vmalloc), 511 KUNIT_CASE(test_uaf), 512 KUNIT_CASE(test_percpu_propagate), 513 KUNIT_CASE(test_printk), 514 KUNIT_CASE(test_memcpy_aligned_to_aligned), 515 KUNIT_CASE(test_memcpy_aligned_to_unaligned), 516 KUNIT_CASE(test_memcpy_aligned_to_unaligned2), 517 KUNIT_CASE(test_long_origin_chain), 518 {}, 519 }; 520 521 /* ===== End test cases ===== */ 522 523 static int test_init(struct kunit *test) 524 { 525 unsigned long flags; 526 527 spin_lock_irqsave(&observed.lock, flags); 528 observed.header[0] = '\0'; 529 observed.ignore = false; 530 observed.available = false; 531 spin_unlock_irqrestore(&observed.lock, flags); 532 533 return 0; 534 } 535 536 static void test_exit(struct kunit *test) 537 { 538 } 539 540 static void register_tracepoints(struct tracepoint *tp, void *ignore) 541 { 542 check_trace_callback_type_console(probe_console); 543 if (!strcmp(tp->name, "console")) 544 WARN_ON(tracepoint_probe_register(tp, probe_console, NULL)); 545 } 546 547 static void unregister_tracepoints(struct tracepoint *tp, void *ignore) 548 { 549 if (!strcmp(tp->name, "console")) 550 tracepoint_probe_unregister(tp, probe_console, NULL); 551 } 552 553 static int kmsan_suite_init(struct kunit_suite *suite) 554 { 555 /* 556 * Because we want to be able to build the test as a module, we need to 557 * iterate through all known tracepoints, since the static registration 558 * won't work here. 559 */ 560 for_each_kernel_tracepoint(register_tracepoints, NULL); 561 return 0; 562 } 563 564 static void kmsan_suite_exit(struct kunit_suite *suite) 565 { 566 for_each_kernel_tracepoint(unregister_tracepoints, NULL); 567 tracepoint_synchronize_unregister(); 568 } 569 570 static struct kunit_suite kmsan_test_suite = { 571 .name = "kmsan", 572 .test_cases = kmsan_test_cases, 573 .init = test_init, 574 .exit = test_exit, 575 .suite_init = kmsan_suite_init, 576 .suite_exit = kmsan_suite_exit, 577 }; 578 kunit_test_suites(&kmsan_test_suite); 579 580 MODULE_LICENSE("GPL"); 581 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>"); 582