1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Test cases for KFENCE memory safety error detector. Since the interface with 4 * which KFENCE's reports are obtained is via the console, this is the output we 5 * should verify. For each test case checks the presence (or absence) of 6 * generated reports. Relies on 'console' tracepoint to capture reports as they 7 * appear in the kernel log. 8 * 9 * Copyright (C) 2020, Google LLC. 10 * Author: Alexander Potapenko <glider@google.com> 11 * Marco Elver <elver@google.com> 12 */ 13 14 #include <kunit/test.h> 15 #include <linux/jiffies.h> 16 #include <linux/kernel.h> 17 #include <linux/kfence.h> 18 #include <linux/mm.h> 19 #include <linux/random.h> 20 #include <linux/slab.h> 21 #include <linux/spinlock.h> 22 #include <linux/string.h> 23 #include <linux/tracepoint.h> 24 #include <trace/events/printk.h> 25 26 #include <asm/kfence.h> 27 28 #include "kfence.h" 29 30 /* May be overridden by <asm/kfence.h>. */ 31 #ifndef arch_kfence_test_address 32 #define arch_kfence_test_address(addr) (addr) 33 #endif 34 35 #define KFENCE_TEST_REQUIRES(test, cond) do { \ 36 if (!(cond)) \ 37 kunit_skip((test), "Test requires: " #cond); \ 38 } while (0) 39 40 /* Report as observed from console. */ 41 static struct { 42 spinlock_t lock; 43 int nlines; 44 char lines[2][256]; 45 } observed = { 46 .lock = __SPIN_LOCK_UNLOCKED(observed.lock), 47 }; 48 49 /* Probe for console output: obtains observed lines of interest. */ 50 static void probe_console(void *ignore, const char *buf, size_t len) 51 { 52 unsigned long flags; 53 int nlines; 54 55 spin_lock_irqsave(&observed.lock, flags); 56 nlines = observed.nlines; 57 58 if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) { 59 /* 60 * KFENCE report and related to the test. 61 * 62 * The provided @buf is not NUL-terminated; copy no more than 63 * @len bytes and let strscpy() add the missing NUL-terminator. 64 */ 65 strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0]))); 66 nlines = 1; 67 } else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) { 68 strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0]))); 69 } 70 71 WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */ 72 spin_unlock_irqrestore(&observed.lock, flags); 73 } 74 75 /* Check if a report related to the test exists. */ 76 static bool report_available(void) 77 { 78 return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines); 79 } 80 81 /* Information we expect in a report. */ 82 struct expect_report { 83 enum kfence_error_type type; /* The type or error. */ 84 void *fn; /* Function pointer to expected function where access occurred. */ 85 char *addr; /* Address at which the bad access occurred. */ 86 bool is_write; /* Is access a write. */ 87 }; 88 89 static const char *get_access_type(const struct expect_report *r) 90 { 91 return r->is_write ? "write" : "read"; 92 } 93 94 /* Check observed report matches information in @r. */ 95 static bool report_matches(const struct expect_report *r) 96 { 97 unsigned long addr = (unsigned long)r->addr; 98 bool ret = false; 99 unsigned long flags; 100 typeof(observed.lines) expect; 101 const char *end; 102 char *cur; 103 104 /* Doubled-checked locking. */ 105 if (!report_available()) 106 return false; 107 108 /* Generate expected report contents. */ 109 110 /* Title */ 111 cur = expect[0]; 112 end = &expect[0][sizeof(expect[0]) - 1]; 113 switch (r->type) { 114 case KFENCE_ERROR_OOB: 115 cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s", 116 get_access_type(r)); 117 break; 118 case KFENCE_ERROR_UAF: 119 cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s", 120 get_access_type(r)); 121 break; 122 case KFENCE_ERROR_CORRUPTION: 123 cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption"); 124 break; 125 case KFENCE_ERROR_INVALID: 126 cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s", 127 get_access_type(r)); 128 break; 129 case KFENCE_ERROR_INVALID_FREE: 130 cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free"); 131 break; 132 } 133 134 scnprintf(cur, end - cur, " in %pS", r->fn); 135 /* The exact offset won't match, remove it; also strip module name. */ 136 cur = strchr(expect[0], '+'); 137 if (cur) 138 *cur = '\0'; 139 140 /* Access information */ 141 cur = expect[1]; 142 end = &expect[1][sizeof(expect[1]) - 1]; 143 144 switch (r->type) { 145 case KFENCE_ERROR_OOB: 146 cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r)); 147 addr = arch_kfence_test_address(addr); 148 break; 149 case KFENCE_ERROR_UAF: 150 cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r)); 151 addr = arch_kfence_test_address(addr); 152 break; 153 case KFENCE_ERROR_CORRUPTION: 154 cur += scnprintf(cur, end - cur, "Corrupted memory at"); 155 break; 156 case KFENCE_ERROR_INVALID: 157 cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r)); 158 addr = arch_kfence_test_address(addr); 159 break; 160 case KFENCE_ERROR_INVALID_FREE: 161 cur += scnprintf(cur, end - cur, "Invalid free of"); 162 break; 163 } 164 165 cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr); 166 167 spin_lock_irqsave(&observed.lock, flags); 168 if (!report_available()) 169 goto out; /* A new report is being captured. */ 170 171 /* Finally match expected output to what we actually observed. */ 172 ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]); 173 out: 174 spin_unlock_irqrestore(&observed.lock, flags); 175 return ret; 176 } 177 178 /* ===== Test cases ===== */ 179 180 #define TEST_PRIV_WANT_MEMCACHE ((void *)1) 181 182 /* Cache used by tests; if NULL, allocate from kmalloc instead. */ 183 static struct kmem_cache *test_cache; 184 185 static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags, 186 void (*ctor)(void *)) 187 { 188 if (test->priv != TEST_PRIV_WANT_MEMCACHE) 189 return size; 190 191 kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor); 192 193 /* 194 * Use SLAB_NO_MERGE to prevent merging with existing caches. 195 * Use SLAB_ACCOUNT to allocate via memcg, if enabled. 196 */ 197 flags |= SLAB_NO_MERGE | SLAB_ACCOUNT; 198 test_cache = kmem_cache_create("test", size, 1, flags, ctor); 199 KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache"); 200 201 return size; 202 } 203 204 static void test_cache_destroy(void) 205 { 206 if (!test_cache) 207 return; 208 209 kmem_cache_destroy(test_cache); 210 test_cache = NULL; 211 } 212 213 static inline size_t kmalloc_cache_alignment(size_t size) 214 { 215 return kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]->align; 216 } 217 218 /* Must always inline to match stack trace against caller. */ 219 static __always_inline void test_free(void *ptr) 220 { 221 if (test_cache) 222 kmem_cache_free(test_cache, ptr); 223 else 224 kfree(ptr); 225 } 226 227 /* 228 * If this should be a KFENCE allocation, and on which side the allocation and 229 * the closest guard page should be. 230 */ 231 enum allocation_policy { 232 ALLOCATE_ANY, /* KFENCE, any side. */ 233 ALLOCATE_LEFT, /* KFENCE, left side of page. */ 234 ALLOCATE_RIGHT, /* KFENCE, right side of page. */ 235 ALLOCATE_NONE, /* No KFENCE allocation. */ 236 }; 237 238 /* 239 * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the 240 * current test_cache if set up. 241 */ 242 static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy) 243 { 244 void *alloc; 245 unsigned long timeout, resched_after; 246 const char *policy_name; 247 248 switch (policy) { 249 case ALLOCATE_ANY: 250 policy_name = "any"; 251 break; 252 case ALLOCATE_LEFT: 253 policy_name = "left"; 254 break; 255 case ALLOCATE_RIGHT: 256 policy_name = "right"; 257 break; 258 case ALLOCATE_NONE: 259 policy_name = "none"; 260 break; 261 } 262 263 kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp, 264 policy_name, !!test_cache); 265 266 /* 267 * 100x the sample interval should be more than enough to ensure we get 268 * a KFENCE allocation eventually. 269 */ 270 timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); 271 /* 272 * Especially for non-preemption kernels, ensure the allocation-gate 273 * timer can catch up: after @resched_after, every failed allocation 274 * attempt yields, to ensure the allocation-gate timer is scheduled. 275 */ 276 resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval); 277 do { 278 if (test_cache) 279 alloc = kmem_cache_alloc(test_cache, gfp); 280 else 281 alloc = kmalloc(size, gfp); 282 283 if (is_kfence_address(alloc)) { 284 struct slab *slab = virt_to_slab(alloc); 285 struct kmem_cache *s = test_cache ?: 286 kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]; 287 288 /* 289 * Verify that various helpers return the right values 290 * even for KFENCE objects; these are required so that 291 * memcg accounting works correctly. 292 */ 293 KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U); 294 KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1); 295 296 if (policy == ALLOCATE_ANY) 297 return alloc; 298 if (policy == ALLOCATE_LEFT && PAGE_ALIGNED(alloc)) 299 return alloc; 300 if (policy == ALLOCATE_RIGHT && !PAGE_ALIGNED(alloc)) 301 return alloc; 302 } else if (policy == ALLOCATE_NONE) 303 return alloc; 304 305 test_free(alloc); 306 307 if (time_after(jiffies, resched_after)) 308 cond_resched(); 309 } while (time_before(jiffies, timeout)); 310 311 KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE"); 312 return NULL; /* Unreachable. */ 313 } 314 315 static void test_out_of_bounds_read(struct kunit *test) 316 { 317 size_t size = 32; 318 struct expect_report expect = { 319 .type = KFENCE_ERROR_OOB, 320 .fn = test_out_of_bounds_read, 321 .is_write = false, 322 }; 323 char *buf; 324 325 setup_test_cache(test, size, 0, NULL); 326 327 /* 328 * If we don't have our own cache, adjust based on alignment, so that we 329 * actually access guard pages on either side. 330 */ 331 if (!test_cache) 332 size = kmalloc_cache_alignment(size); 333 334 /* Test both sides. */ 335 336 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT); 337 expect.addr = buf - 1; 338 READ_ONCE(*expect.addr); 339 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 340 test_free(buf); 341 342 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT); 343 expect.addr = buf + size; 344 READ_ONCE(*expect.addr); 345 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 346 test_free(buf); 347 } 348 349 static void test_out_of_bounds_write(struct kunit *test) 350 { 351 size_t size = 32; 352 struct expect_report expect = { 353 .type = KFENCE_ERROR_OOB, 354 .fn = test_out_of_bounds_write, 355 .is_write = true, 356 }; 357 char *buf; 358 359 setup_test_cache(test, size, 0, NULL); 360 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT); 361 expect.addr = buf - 1; 362 WRITE_ONCE(*expect.addr, 42); 363 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 364 test_free(buf); 365 } 366 367 static void test_use_after_free_read(struct kunit *test) 368 { 369 const size_t size = 32; 370 struct expect_report expect = { 371 .type = KFENCE_ERROR_UAF, 372 .fn = test_use_after_free_read, 373 .is_write = false, 374 }; 375 376 setup_test_cache(test, size, 0, NULL); 377 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); 378 test_free(expect.addr); 379 READ_ONCE(*expect.addr); 380 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 381 } 382 383 static void test_double_free(struct kunit *test) 384 { 385 const size_t size = 32; 386 struct expect_report expect = { 387 .type = KFENCE_ERROR_INVALID_FREE, 388 .fn = test_double_free, 389 }; 390 391 setup_test_cache(test, size, 0, NULL); 392 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); 393 test_free(expect.addr); 394 test_free(expect.addr); /* Double-free. */ 395 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 396 } 397 398 static void test_invalid_addr_free(struct kunit *test) 399 { 400 const size_t size = 32; 401 struct expect_report expect = { 402 .type = KFENCE_ERROR_INVALID_FREE, 403 .fn = test_invalid_addr_free, 404 }; 405 char *buf; 406 407 setup_test_cache(test, size, 0, NULL); 408 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); 409 expect.addr = buf + 1; /* Free on invalid address. */ 410 test_free(expect.addr); /* Invalid address free. */ 411 test_free(buf); /* No error. */ 412 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 413 } 414 415 static void test_corruption(struct kunit *test) 416 { 417 size_t size = 32; 418 struct expect_report expect = { 419 .type = KFENCE_ERROR_CORRUPTION, 420 .fn = test_corruption, 421 }; 422 char *buf; 423 424 setup_test_cache(test, size, 0, NULL); 425 426 /* Test both sides. */ 427 428 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT); 429 expect.addr = buf + size; 430 WRITE_ONCE(*expect.addr, 42); 431 test_free(buf); 432 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 433 434 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT); 435 expect.addr = buf - 1; 436 WRITE_ONCE(*expect.addr, 42); 437 test_free(buf); 438 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 439 } 440 441 /* 442 * KFENCE is unable to detect an OOB if the allocation's alignment requirements 443 * leave a gap between the object and the guard page. Specifically, an 444 * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB 445 * respectively. Therefore it is impossible for the allocated object to 446 * contiguously line up with the right guard page. 447 * 448 * However, we test that an access to memory beyond the gap results in KFENCE 449 * detecting an OOB access. 450 */ 451 static void test_kmalloc_aligned_oob_read(struct kunit *test) 452 { 453 const size_t size = 73; 454 const size_t align = kmalloc_cache_alignment(size); 455 struct expect_report expect = { 456 .type = KFENCE_ERROR_OOB, 457 .fn = test_kmalloc_aligned_oob_read, 458 .is_write = false, 459 }; 460 char *buf; 461 462 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT); 463 464 /* 465 * The object is offset to the right, so there won't be an OOB to the 466 * left of it. 467 */ 468 READ_ONCE(*(buf - 1)); 469 KUNIT_EXPECT_FALSE(test, report_available()); 470 471 /* 472 * @buf must be aligned on @align, therefore buf + size belongs to the 473 * same page -> no OOB. 474 */ 475 READ_ONCE(*(buf + size)); 476 KUNIT_EXPECT_FALSE(test, report_available()); 477 478 /* Overflowing by @align bytes will result in an OOB. */ 479 expect.addr = buf + size + align; 480 READ_ONCE(*expect.addr); 481 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 482 483 test_free(buf); 484 } 485 486 static void test_kmalloc_aligned_oob_write(struct kunit *test) 487 { 488 const size_t size = 73; 489 struct expect_report expect = { 490 .type = KFENCE_ERROR_CORRUPTION, 491 .fn = test_kmalloc_aligned_oob_write, 492 }; 493 char *buf; 494 495 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT); 496 /* 497 * The object is offset to the right, so we won't get a page 498 * fault immediately after it. 499 */ 500 expect.addr = buf + size; 501 WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1); 502 KUNIT_EXPECT_FALSE(test, report_available()); 503 test_free(buf); 504 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 505 } 506 507 /* Test cache shrinking and destroying with KFENCE. */ 508 static void test_shrink_memcache(struct kunit *test) 509 { 510 const size_t size = 32; 511 void *buf; 512 513 setup_test_cache(test, size, 0, NULL); 514 KUNIT_EXPECT_TRUE(test, test_cache); 515 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); 516 kmem_cache_shrink(test_cache); 517 test_free(buf); 518 519 KUNIT_EXPECT_FALSE(test, report_available()); 520 } 521 522 static void ctor_set_x(void *obj) 523 { 524 /* Every object has at least 8 bytes. */ 525 memset(obj, 'x', 8); 526 } 527 528 /* Ensure that SL*B does not modify KFENCE objects on bulk free. */ 529 static void test_free_bulk(struct kunit *test) 530 { 531 int iter; 532 533 for (iter = 0; iter < 5; iter++) { 534 const size_t size = setup_test_cache(test, get_random_u32_inclusive(8, 307), 535 0, (iter & 1) ? ctor_set_x : NULL); 536 void *objects[] = { 537 test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT), 538 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE), 539 test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT), 540 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE), 541 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE), 542 }; 543 544 kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects); 545 KUNIT_ASSERT_FALSE(test, report_available()); 546 test_cache_destroy(); 547 } 548 } 549 550 /* Test init-on-free works. */ 551 static void test_init_on_free(struct kunit *test) 552 { 553 const size_t size = 32; 554 struct expect_report expect = { 555 .type = KFENCE_ERROR_UAF, 556 .fn = test_init_on_free, 557 .is_write = false, 558 }; 559 int i; 560 561 KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON)); 562 /* Assume it hasn't been disabled on command line. */ 563 564 setup_test_cache(test, size, 0, NULL); 565 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); 566 for (i = 0; i < size; i++) 567 expect.addr[i] = i + 1; 568 test_free(expect.addr); 569 570 for (i = 0; i < size; i++) { 571 /* 572 * This may fail if the page was recycled by KFENCE and then 573 * written to again -- this however, is near impossible with a 574 * default config. 575 */ 576 KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0); 577 578 if (!i) /* Only check first access to not fail test if page is ever re-protected. */ 579 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 580 } 581 } 582 583 /* Ensure that constructors work properly. */ 584 static void test_memcache_ctor(struct kunit *test) 585 { 586 const size_t size = 32; 587 char *buf; 588 int i; 589 590 setup_test_cache(test, size, 0, ctor_set_x); 591 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); 592 593 for (i = 0; i < 8; i++) 594 KUNIT_EXPECT_EQ(test, buf[i], (char)'x'); 595 596 test_free(buf); 597 598 KUNIT_EXPECT_FALSE(test, report_available()); 599 } 600 601 /* Test that memory is zeroed if requested. */ 602 static void test_gfpzero(struct kunit *test) 603 { 604 const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */ 605 char *buf1, *buf2; 606 int i; 607 608 /* Skip if we think it'd take too long. */ 609 KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100); 610 611 setup_test_cache(test, size, 0, NULL); 612 buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); 613 for (i = 0; i < size; i++) 614 buf1[i] = i + 1; 615 test_free(buf1); 616 617 /* Try to get same address again -- this can take a while. */ 618 for (i = 0;; i++) { 619 buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY); 620 if (buf1 == buf2) 621 break; 622 test_free(buf2); 623 624 if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) { 625 kunit_warn(test, "giving up ... cannot get same object back\n"); 626 return; 627 } 628 cond_resched(); 629 } 630 631 for (i = 0; i < size; i++) 632 KUNIT_EXPECT_EQ(test, buf2[i], (char)0); 633 634 test_free(buf2); 635 636 KUNIT_EXPECT_FALSE(test, report_available()); 637 } 638 639 static void test_invalid_access(struct kunit *test) 640 { 641 const struct expect_report expect = { 642 .type = KFENCE_ERROR_INVALID, 643 .fn = test_invalid_access, 644 .addr = &__kfence_pool[10], 645 .is_write = false, 646 }; 647 648 READ_ONCE(__kfence_pool[10]); 649 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 650 } 651 652 /* Test SLAB_TYPESAFE_BY_RCU works. */ 653 static void test_memcache_typesafe_by_rcu(struct kunit *test) 654 { 655 const size_t size = 32; 656 struct expect_report expect = { 657 .type = KFENCE_ERROR_UAF, 658 .fn = test_memcache_typesafe_by_rcu, 659 .is_write = false, 660 }; 661 662 setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL); 663 KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */ 664 665 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); 666 *expect.addr = 42; 667 668 rcu_read_lock(); 669 test_free(expect.addr); 670 KUNIT_EXPECT_EQ(test, *expect.addr, (char)42); 671 /* 672 * Up to this point, memory should not have been freed yet, and 673 * therefore there should be no KFENCE report from the above access. 674 */ 675 rcu_read_unlock(); 676 677 /* Above access to @expect.addr should not have generated a report! */ 678 KUNIT_EXPECT_FALSE(test, report_available()); 679 680 /* Only after rcu_barrier() is the memory guaranteed to be freed. */ 681 rcu_barrier(); 682 683 /* Expect use-after-free. */ 684 KUNIT_EXPECT_EQ(test, *expect.addr, (char)42); 685 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); 686 } 687 688 /* Test krealloc(). */ 689 static void test_krealloc(struct kunit *test) 690 { 691 const size_t size = 32; 692 const struct expect_report expect = { 693 .type = KFENCE_ERROR_UAF, 694 .fn = test_krealloc, 695 .addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY), 696 .is_write = false, 697 }; 698 char *buf = expect.addr; 699 int i; 700 701 KUNIT_EXPECT_FALSE(test, test_cache); 702 KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */ 703 for (i = 0; i < size; i++) 704 buf[i] = i + 1; 705 706 /* Check that we successfully change the size. */ 707 buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */ 708 /* Note: Might no longer be a KFENCE alloc. */ 709 KUNIT_EXPECT_GE(test, ksize(buf), size * 3); 710 for (i = 0; i < size; i++) 711 KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1)); 712 for (; i < size * 3; i++) /* Fill to extra bytes. */ 713 buf[i] = i + 1; 714 715 buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */ 716 KUNIT_EXPECT_GE(test, ksize(buf), size * 2); 717 for (i = 0; i < size * 2; i++) 718 KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1)); 719 720 buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */ 721 KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR); 722 KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */ 723 724 READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */ 725 KUNIT_ASSERT_TRUE(test, report_matches(&expect)); 726 } 727 728 /* Test that some objects from a bulk allocation belong to KFENCE pool. */ 729 static void test_memcache_alloc_bulk(struct kunit *test) 730 { 731 const size_t size = 32; 732 bool pass = false; 733 unsigned long timeout; 734 735 setup_test_cache(test, size, 0, NULL); 736 KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */ 737 /* 738 * 100x the sample interval should be more than enough to ensure we get 739 * a KFENCE allocation eventually. 740 */ 741 timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); 742 do { 743 void *objects[100]; 744 int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects), 745 objects); 746 if (!num) 747 continue; 748 for (i = 0; i < ARRAY_SIZE(objects); i++) { 749 if (is_kfence_address(objects[i])) { 750 pass = true; 751 break; 752 } 753 } 754 kmem_cache_free_bulk(test_cache, num, objects); 755 /* 756 * kmem_cache_alloc_bulk() disables interrupts, and calling it 757 * in a tight loop may not give KFENCE a chance to switch the 758 * static branch. Call cond_resched() to let KFENCE chime in. 759 */ 760 cond_resched(); 761 } while (!pass && time_before(jiffies, timeout)); 762 763 KUNIT_EXPECT_TRUE(test, pass); 764 KUNIT_EXPECT_FALSE(test, report_available()); 765 } 766 767 /* 768 * KUnit does not provide a way to provide arguments to tests, and we encode 769 * additional info in the name. Set up 2 tests per test case, one using the 770 * default allocator, and another using a custom memcache (suffix '-memcache'). 771 */ 772 #define KFENCE_KUNIT_CASE(test_name) \ 773 { .run_case = test_name, .name = #test_name }, \ 774 { .run_case = test_name, .name = #test_name "-memcache" } 775 776 static struct kunit_case kfence_test_cases[] = { 777 KFENCE_KUNIT_CASE(test_out_of_bounds_read), 778 KFENCE_KUNIT_CASE(test_out_of_bounds_write), 779 KFENCE_KUNIT_CASE(test_use_after_free_read), 780 KFENCE_KUNIT_CASE(test_double_free), 781 KFENCE_KUNIT_CASE(test_invalid_addr_free), 782 KFENCE_KUNIT_CASE(test_corruption), 783 KFENCE_KUNIT_CASE(test_free_bulk), 784 KFENCE_KUNIT_CASE(test_init_on_free), 785 KUNIT_CASE(test_kmalloc_aligned_oob_read), 786 KUNIT_CASE(test_kmalloc_aligned_oob_write), 787 KUNIT_CASE(test_shrink_memcache), 788 KUNIT_CASE(test_memcache_ctor), 789 KUNIT_CASE(test_invalid_access), 790 KUNIT_CASE(test_gfpzero), 791 KUNIT_CASE(test_memcache_typesafe_by_rcu), 792 KUNIT_CASE(test_krealloc), 793 KUNIT_CASE(test_memcache_alloc_bulk), 794 {}, 795 }; 796 797 /* ===== End test cases ===== */ 798 799 static int test_init(struct kunit *test) 800 { 801 unsigned long flags; 802 int i; 803 804 if (!__kfence_pool) 805 return -EINVAL; 806 807 spin_lock_irqsave(&observed.lock, flags); 808 for (i = 0; i < ARRAY_SIZE(observed.lines); i++) 809 observed.lines[i][0] = '\0'; 810 observed.nlines = 0; 811 spin_unlock_irqrestore(&observed.lock, flags); 812 813 /* Any test with 'memcache' in its name will want a memcache. */ 814 if (strstr(test->name, "memcache")) 815 test->priv = TEST_PRIV_WANT_MEMCACHE; 816 else 817 test->priv = NULL; 818 819 return 0; 820 } 821 822 static void test_exit(struct kunit *test) 823 { 824 test_cache_destroy(); 825 } 826 827 static int kfence_suite_init(struct kunit_suite *suite) 828 { 829 register_trace_console(probe_console, NULL); 830 return 0; 831 } 832 833 static void kfence_suite_exit(struct kunit_suite *suite) 834 { 835 unregister_trace_console(probe_console, NULL); 836 tracepoint_synchronize_unregister(); 837 } 838 839 static struct kunit_suite kfence_test_suite = { 840 .name = "kfence", 841 .test_cases = kfence_test_cases, 842 .init = test_init, 843 .exit = test_exit, 844 .suite_init = kfence_suite_init, 845 .suite_exit = kfence_suite_exit, 846 }; 847 848 kunit_test_suites(&kfence_test_suite); 849 850 MODULE_LICENSE("GPL v2"); 851 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>"); 852