xref: /linux/mm/kfence/kfence_test.c (revision c34e9ab9a612ee8b18273398ef75c207b01f516d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test cases for KFENCE memory safety error detector. Since the interface with
4  * which KFENCE's reports are obtained is via the console, this is the output we
5  * should verify. For each test case checks the presence (or absence) of
6  * generated reports. Relies on 'console' tracepoint to capture reports as they
7  * appear in the kernel log.
8  *
9  * Copyright (C) 2020, Google LLC.
10  * Author: Alexander Potapenko <glider@google.com>
11  *         Marco Elver <elver@google.com>
12  */
13 
14 #include <kunit/test.h>
15 #include <linux/jiffies.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/tracepoint.h>
24 #include <trace/events/printk.h>
25 
26 #include <asm/kfence.h>
27 
28 #include "kfence.h"
29 
30 /* May be overridden by <asm/kfence.h>. */
31 #ifndef arch_kfence_test_address
32 #define arch_kfence_test_address(addr) (addr)
33 #endif
34 
35 #define KFENCE_TEST_REQUIRES(test, cond) do {			\
36 	if (!(cond))						\
37 		kunit_skip((test), "Test requires: " #cond);	\
38 } while (0)
39 
40 /* Report as observed from console. */
41 static struct {
42 	spinlock_t lock;
43 	int nlines;
44 	char lines[2][256];
45 } observed = {
46 	.lock = __SPIN_LOCK_UNLOCKED(observed.lock),
47 };
48 
49 /* Probe for console output: obtains observed lines of interest. */
50 static void probe_console(void *ignore, const char *buf, size_t len)
51 {
52 	unsigned long flags;
53 	int nlines;
54 
55 	spin_lock_irqsave(&observed.lock, flags);
56 	nlines = observed.nlines;
57 
58 	if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) {
59 		/*
60 		 * KFENCE report and related to the test.
61 		 *
62 		 * The provided @buf is not NUL-terminated; copy no more than
63 		 * @len bytes and let strscpy() add the missing NUL-terminator.
64 		 */
65 		strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
66 		nlines = 1;
67 	} else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
68 		strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
69 	}
70 
71 	WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
72 	spin_unlock_irqrestore(&observed.lock, flags);
73 }
74 
75 /* Check if a report related to the test exists. */
76 static bool report_available(void)
77 {
78 	return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
79 }
80 
81 /* Information we expect in a report. */
82 struct expect_report {
83 	enum kfence_error_type type; /* The type or error. */
84 	void *fn; /* Function pointer to expected function where access occurred. */
85 	char *addr; /* Address at which the bad access occurred. */
86 	bool is_write; /* Is access a write. */
87 };
88 
89 static const char *get_access_type(const struct expect_report *r)
90 {
91 	return r->is_write ? "write" : "read";
92 }
93 
94 /* Check observed report matches information in @r. */
95 static bool report_matches(const struct expect_report *r)
96 {
97 	unsigned long addr = (unsigned long)r->addr;
98 	bool ret = false;
99 	unsigned long flags;
100 	typeof(observed.lines) expect;
101 	const char *end;
102 	char *cur;
103 
104 	/* Doubled-checked locking. */
105 	if (!report_available())
106 		return false;
107 
108 	/* Generate expected report contents. */
109 
110 	/* Title */
111 	cur = expect[0];
112 	end = &expect[0][sizeof(expect[0]) - 1];
113 	switch (r->type) {
114 	case KFENCE_ERROR_OOB:
115 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
116 				 get_access_type(r));
117 		break;
118 	case KFENCE_ERROR_UAF:
119 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
120 				 get_access_type(r));
121 		break;
122 	case KFENCE_ERROR_CORRUPTION:
123 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
124 		break;
125 	case KFENCE_ERROR_INVALID:
126 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
127 				 get_access_type(r));
128 		break;
129 	case KFENCE_ERROR_INVALID_FREE:
130 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
131 		break;
132 	}
133 
134 	scnprintf(cur, end - cur, " in %pS", r->fn);
135 	/* The exact offset won't match, remove it; also strip module name. */
136 	cur = strchr(expect[0], '+');
137 	if (cur)
138 		*cur = '\0';
139 
140 	/* Access information */
141 	cur = expect[1];
142 	end = &expect[1][sizeof(expect[1]) - 1];
143 
144 	switch (r->type) {
145 	case KFENCE_ERROR_OOB:
146 		cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
147 		addr = arch_kfence_test_address(addr);
148 		break;
149 	case KFENCE_ERROR_UAF:
150 		cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
151 		addr = arch_kfence_test_address(addr);
152 		break;
153 	case KFENCE_ERROR_CORRUPTION:
154 		cur += scnprintf(cur, end - cur, "Corrupted memory at");
155 		break;
156 	case KFENCE_ERROR_INVALID:
157 		cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
158 		addr = arch_kfence_test_address(addr);
159 		break;
160 	case KFENCE_ERROR_INVALID_FREE:
161 		cur += scnprintf(cur, end - cur, "Invalid free of");
162 		break;
163 	}
164 
165 	cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
166 
167 	spin_lock_irqsave(&observed.lock, flags);
168 	if (!report_available())
169 		goto out; /* A new report is being captured. */
170 
171 	/* Finally match expected output to what we actually observed. */
172 	ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
173 out:
174 	spin_unlock_irqrestore(&observed.lock, flags);
175 	return ret;
176 }
177 
178 /* ===== Test cases ===== */
179 
180 #define TEST_PRIV_WANT_MEMCACHE ((void *)1)
181 
182 /* Cache used by tests; if NULL, allocate from kmalloc instead. */
183 static struct kmem_cache *test_cache;
184 
185 static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags,
186 			       void (*ctor)(void *))
187 {
188 	if (test->priv != TEST_PRIV_WANT_MEMCACHE)
189 		return size;
190 
191 	kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor);
192 
193 	/*
194 	 * Use SLAB_NO_MERGE to prevent merging with existing caches.
195 	 * Use SLAB_ACCOUNT to allocate via memcg, if enabled.
196 	 */
197 	flags |= SLAB_NO_MERGE | SLAB_ACCOUNT;
198 	test_cache = kmem_cache_create("test", size, 1, flags, ctor);
199 	KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
200 
201 	return size;
202 }
203 
204 static void test_cache_destroy(void)
205 {
206 	if (!test_cache)
207 		return;
208 
209 	kmem_cache_destroy(test_cache);
210 	test_cache = NULL;
211 }
212 
213 static inline size_t kmalloc_cache_alignment(size_t size)
214 {
215 	/* just to get ->align so no need to pass in the real caller */
216 	enum kmalloc_cache_type type = kmalloc_type(GFP_KERNEL, 0);
217 	return kmalloc_caches[type][__kmalloc_index(size, false)]->align;
218 }
219 
220 /* Must always inline to match stack trace against caller. */
221 static __always_inline void test_free(void *ptr)
222 {
223 	if (test_cache)
224 		kmem_cache_free(test_cache, ptr);
225 	else
226 		kfree(ptr);
227 }
228 
229 /*
230  * If this should be a KFENCE allocation, and on which side the allocation and
231  * the closest guard page should be.
232  */
233 enum allocation_policy {
234 	ALLOCATE_ANY, /* KFENCE, any side. */
235 	ALLOCATE_LEFT, /* KFENCE, left side of page. */
236 	ALLOCATE_RIGHT, /* KFENCE, right side of page. */
237 	ALLOCATE_NONE, /* No KFENCE allocation. */
238 };
239 
240 /*
241  * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
242  * current test_cache if set up.
243  */
244 static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
245 {
246 	void *alloc;
247 	unsigned long timeout, resched_after;
248 	const char *policy_name;
249 
250 	switch (policy) {
251 	case ALLOCATE_ANY:
252 		policy_name = "any";
253 		break;
254 	case ALLOCATE_LEFT:
255 		policy_name = "left";
256 		break;
257 	case ALLOCATE_RIGHT:
258 		policy_name = "right";
259 		break;
260 	case ALLOCATE_NONE:
261 		policy_name = "none";
262 		break;
263 	}
264 
265 	kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp,
266 		   policy_name, !!test_cache);
267 
268 	/*
269 	 * 100x the sample interval should be more than enough to ensure we get
270 	 * a KFENCE allocation eventually.
271 	 */
272 	timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
273 	/*
274 	 * Especially for non-preemption kernels, ensure the allocation-gate
275 	 * timer can catch up: after @resched_after, every failed allocation
276 	 * attempt yields, to ensure the allocation-gate timer is scheduled.
277 	 */
278 	resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
279 	do {
280 		if (test_cache)
281 			alloc = kmem_cache_alloc(test_cache, gfp);
282 		else
283 			alloc = kmalloc(size, gfp);
284 
285 		if (is_kfence_address(alloc)) {
286 			struct slab *slab = virt_to_slab(alloc);
287 			enum kmalloc_cache_type type = kmalloc_type(GFP_KERNEL, _RET_IP_);
288 			struct kmem_cache *s = test_cache ?:
289 					kmalloc_caches[type][__kmalloc_index(size, false)];
290 
291 			/*
292 			 * Verify that various helpers return the right values
293 			 * even for KFENCE objects; these are required so that
294 			 * memcg accounting works correctly.
295 			 */
296 			KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U);
297 			KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1);
298 
299 			if (policy == ALLOCATE_ANY)
300 				return alloc;
301 			if (policy == ALLOCATE_LEFT && PAGE_ALIGNED(alloc))
302 				return alloc;
303 			if (policy == ALLOCATE_RIGHT && !PAGE_ALIGNED(alloc))
304 				return alloc;
305 		} else if (policy == ALLOCATE_NONE)
306 			return alloc;
307 
308 		test_free(alloc);
309 
310 		if (time_after(jiffies, resched_after))
311 			cond_resched();
312 	} while (time_before(jiffies, timeout));
313 
314 	KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE");
315 	return NULL; /* Unreachable. */
316 }
317 
318 static void test_out_of_bounds_read(struct kunit *test)
319 {
320 	size_t size = 32;
321 	struct expect_report expect = {
322 		.type = KFENCE_ERROR_OOB,
323 		.fn = test_out_of_bounds_read,
324 		.is_write = false,
325 	};
326 	char *buf;
327 
328 	setup_test_cache(test, size, 0, NULL);
329 
330 	/*
331 	 * If we don't have our own cache, adjust based on alignment, so that we
332 	 * actually access guard pages on either side.
333 	 */
334 	if (!test_cache)
335 		size = kmalloc_cache_alignment(size);
336 
337 	/* Test both sides. */
338 
339 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
340 	expect.addr = buf - 1;
341 	READ_ONCE(*expect.addr);
342 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
343 	test_free(buf);
344 
345 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
346 	expect.addr = buf + size;
347 	READ_ONCE(*expect.addr);
348 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
349 	test_free(buf);
350 }
351 
352 static void test_out_of_bounds_write(struct kunit *test)
353 {
354 	size_t size = 32;
355 	struct expect_report expect = {
356 		.type = KFENCE_ERROR_OOB,
357 		.fn = test_out_of_bounds_write,
358 		.is_write = true,
359 	};
360 	char *buf;
361 
362 	setup_test_cache(test, size, 0, NULL);
363 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
364 	expect.addr = buf - 1;
365 	WRITE_ONCE(*expect.addr, 42);
366 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
367 	test_free(buf);
368 }
369 
370 static void test_use_after_free_read(struct kunit *test)
371 {
372 	const size_t size = 32;
373 	struct expect_report expect = {
374 		.type = KFENCE_ERROR_UAF,
375 		.fn = test_use_after_free_read,
376 		.is_write = false,
377 	};
378 
379 	setup_test_cache(test, size, 0, NULL);
380 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
381 	test_free(expect.addr);
382 	READ_ONCE(*expect.addr);
383 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
384 }
385 
386 static void test_use_after_free_read_nofault(struct kunit *test)
387 {
388 	const size_t size = 32;
389 	char *addr;
390 	char dst;
391 	int ret;
392 
393 	setup_test_cache(test, size, 0, NULL);
394 	addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
395 	test_free(addr);
396 	/* Use after free with *_nofault() */
397 	ret = copy_from_kernel_nofault(&dst, addr, 1);
398 	KUNIT_EXPECT_EQ(test, ret, -EFAULT);
399 	KUNIT_EXPECT_FALSE(test, report_available());
400 }
401 
402 static void test_double_free(struct kunit *test)
403 {
404 	const size_t size = 32;
405 	struct expect_report expect = {
406 		.type = KFENCE_ERROR_INVALID_FREE,
407 		.fn = test_double_free,
408 	};
409 
410 	setup_test_cache(test, size, 0, NULL);
411 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
412 	test_free(expect.addr);
413 	test_free(expect.addr); /* Double-free. */
414 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
415 }
416 
417 static void test_invalid_addr_free(struct kunit *test)
418 {
419 	const size_t size = 32;
420 	struct expect_report expect = {
421 		.type = KFENCE_ERROR_INVALID_FREE,
422 		.fn = test_invalid_addr_free,
423 	};
424 	char *buf;
425 
426 	setup_test_cache(test, size, 0, NULL);
427 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
428 	expect.addr = buf + 1; /* Free on invalid address. */
429 	test_free(expect.addr); /* Invalid address free. */
430 	test_free(buf); /* No error. */
431 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
432 }
433 
434 static void test_corruption(struct kunit *test)
435 {
436 	size_t size = 32;
437 	struct expect_report expect = {
438 		.type = KFENCE_ERROR_CORRUPTION,
439 		.fn = test_corruption,
440 	};
441 	char *buf;
442 
443 	setup_test_cache(test, size, 0, NULL);
444 
445 	/* Test both sides. */
446 
447 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
448 	expect.addr = buf + size;
449 	WRITE_ONCE(*expect.addr, 42);
450 	test_free(buf);
451 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
452 
453 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
454 	expect.addr = buf - 1;
455 	WRITE_ONCE(*expect.addr, 42);
456 	test_free(buf);
457 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
458 }
459 
460 /*
461  * KFENCE is unable to detect an OOB if the allocation's alignment requirements
462  * leave a gap between the object and the guard page. Specifically, an
463  * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
464  * respectively. Therefore it is impossible for the allocated object to
465  * contiguously line up with the right guard page.
466  *
467  * However, we test that an access to memory beyond the gap results in KFENCE
468  * detecting an OOB access.
469  */
470 static void test_kmalloc_aligned_oob_read(struct kunit *test)
471 {
472 	const size_t size = 73;
473 	const size_t align = kmalloc_cache_alignment(size);
474 	struct expect_report expect = {
475 		.type = KFENCE_ERROR_OOB,
476 		.fn = test_kmalloc_aligned_oob_read,
477 		.is_write = false,
478 	};
479 	char *buf;
480 
481 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
482 
483 	/*
484 	 * The object is offset to the right, so there won't be an OOB to the
485 	 * left of it.
486 	 */
487 	READ_ONCE(*(buf - 1));
488 	KUNIT_EXPECT_FALSE(test, report_available());
489 
490 	/*
491 	 * @buf must be aligned on @align, therefore buf + size belongs to the
492 	 * same page -> no OOB.
493 	 */
494 	READ_ONCE(*(buf + size));
495 	KUNIT_EXPECT_FALSE(test, report_available());
496 
497 	/* Overflowing by @align bytes will result in an OOB. */
498 	expect.addr = buf + size + align;
499 	READ_ONCE(*expect.addr);
500 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
501 
502 	test_free(buf);
503 }
504 
505 static void test_kmalloc_aligned_oob_write(struct kunit *test)
506 {
507 	const size_t size = 73;
508 	struct expect_report expect = {
509 		.type = KFENCE_ERROR_CORRUPTION,
510 		.fn = test_kmalloc_aligned_oob_write,
511 	};
512 	char *buf;
513 
514 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
515 	/*
516 	 * The object is offset to the right, so we won't get a page
517 	 * fault immediately after it.
518 	 */
519 	expect.addr = buf + size;
520 	WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
521 	KUNIT_EXPECT_FALSE(test, report_available());
522 	test_free(buf);
523 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
524 }
525 
526 /* Test cache shrinking and destroying with KFENCE. */
527 static void test_shrink_memcache(struct kunit *test)
528 {
529 	const size_t size = 32;
530 	void *buf;
531 
532 	setup_test_cache(test, size, 0, NULL);
533 	KUNIT_EXPECT_TRUE(test, test_cache);
534 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
535 	kmem_cache_shrink(test_cache);
536 	test_free(buf);
537 
538 	KUNIT_EXPECT_FALSE(test, report_available());
539 }
540 
541 static void ctor_set_x(void *obj)
542 {
543 	/* Every object has at least 8 bytes. */
544 	memset(obj, 'x', 8);
545 }
546 
547 /* Ensure that SL*B does not modify KFENCE objects on bulk free. */
548 static void test_free_bulk(struct kunit *test)
549 {
550 	int iter;
551 
552 	for (iter = 0; iter < 5; iter++) {
553 		const size_t size = setup_test_cache(test, get_random_u32_inclusive(8, 307),
554 						     0, (iter & 1) ? ctor_set_x : NULL);
555 		void *objects[] = {
556 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
557 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
558 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
559 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
560 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
561 		};
562 
563 		kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);
564 		KUNIT_ASSERT_FALSE(test, report_available());
565 		test_cache_destroy();
566 	}
567 }
568 
569 /* Test init-on-free works. */
570 static void test_init_on_free(struct kunit *test)
571 {
572 	const size_t size = 32;
573 	struct expect_report expect = {
574 		.type = KFENCE_ERROR_UAF,
575 		.fn = test_init_on_free,
576 		.is_write = false,
577 	};
578 	int i;
579 
580 	KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON));
581 	/* Assume it hasn't been disabled on command line. */
582 
583 	setup_test_cache(test, size, 0, NULL);
584 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
585 	for (i = 0; i < size; i++)
586 		expect.addr[i] = i + 1;
587 	test_free(expect.addr);
588 
589 	for (i = 0; i < size; i++) {
590 		/*
591 		 * This may fail if the page was recycled by KFENCE and then
592 		 * written to again -- this however, is near impossible with a
593 		 * default config.
594 		 */
595 		KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
596 
597 		if (!i) /* Only check first access to not fail test if page is ever re-protected. */
598 			KUNIT_EXPECT_TRUE(test, report_matches(&expect));
599 	}
600 }
601 
602 /* Ensure that constructors work properly. */
603 static void test_memcache_ctor(struct kunit *test)
604 {
605 	const size_t size = 32;
606 	char *buf;
607 	int i;
608 
609 	setup_test_cache(test, size, 0, ctor_set_x);
610 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
611 
612 	for (i = 0; i < 8; i++)
613 		KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
614 
615 	test_free(buf);
616 
617 	KUNIT_EXPECT_FALSE(test, report_available());
618 }
619 
620 /* Test that memory is zeroed if requested. */
621 static void test_gfpzero(struct kunit *test)
622 {
623 	const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
624 	char *buf1, *buf2;
625 	int i;
626 
627 	/* Skip if we think it'd take too long. */
628 	KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
629 
630 	setup_test_cache(test, size, 0, NULL);
631 	buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
632 	for (i = 0; i < size; i++)
633 		buf1[i] = i + 1;
634 	test_free(buf1);
635 
636 	/* Try to get same address again -- this can take a while. */
637 	for (i = 0;; i++) {
638 		buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY);
639 		if (buf1 == buf2)
640 			break;
641 		test_free(buf2);
642 
643 		if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) {
644 			kunit_warn(test, "giving up ... cannot get same object back\n");
645 			return;
646 		}
647 		cond_resched();
648 	}
649 
650 	for (i = 0; i < size; i++)
651 		KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
652 
653 	test_free(buf2);
654 
655 	KUNIT_EXPECT_FALSE(test, report_available());
656 }
657 
658 static void test_invalid_access(struct kunit *test)
659 {
660 	const struct expect_report expect = {
661 		.type = KFENCE_ERROR_INVALID,
662 		.fn = test_invalid_access,
663 		.addr = &__kfence_pool[10],
664 		.is_write = false,
665 	};
666 
667 	READ_ONCE(__kfence_pool[10]);
668 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
669 }
670 
671 /* Test SLAB_TYPESAFE_BY_RCU works. */
672 static void test_memcache_typesafe_by_rcu(struct kunit *test)
673 {
674 	const size_t size = 32;
675 	struct expect_report expect = {
676 		.type = KFENCE_ERROR_UAF,
677 		.fn = test_memcache_typesafe_by_rcu,
678 		.is_write = false,
679 	};
680 
681 	setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
682 	KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
683 
684 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
685 	*expect.addr = 42;
686 
687 	rcu_read_lock();
688 	test_free(expect.addr);
689 	KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
690 	/*
691 	 * Up to this point, memory should not have been freed yet, and
692 	 * therefore there should be no KFENCE report from the above access.
693 	 */
694 	rcu_read_unlock();
695 
696 	/* Above access to @expect.addr should not have generated a report! */
697 	KUNIT_EXPECT_FALSE(test, report_available());
698 
699 	/* Only after rcu_barrier() is the memory guaranteed to be freed. */
700 	rcu_barrier();
701 
702 	/* Expect use-after-free. */
703 	KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
704 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
705 }
706 
707 /* Test krealloc(). */
708 static void test_krealloc(struct kunit *test)
709 {
710 	const size_t size = 32;
711 	const struct expect_report expect = {
712 		.type = KFENCE_ERROR_UAF,
713 		.fn = test_krealloc,
714 		.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
715 		.is_write = false,
716 	};
717 	char *buf = expect.addr;
718 	int i;
719 
720 	KUNIT_EXPECT_FALSE(test, test_cache);
721 	KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */
722 	for (i = 0; i < size; i++)
723 		buf[i] = i + 1;
724 
725 	/* Check that we successfully change the size. */
726 	buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */
727 	/* Note: Might no longer be a KFENCE alloc. */
728 	KUNIT_EXPECT_GE(test, ksize(buf), size * 3);
729 	for (i = 0; i < size; i++)
730 		KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
731 	for (; i < size * 3; i++) /* Fill to extra bytes. */
732 		buf[i] = i + 1;
733 
734 	buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
735 	KUNIT_EXPECT_GE(test, ksize(buf), size * 2);
736 	for (i = 0; i < size * 2; i++)
737 		KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
738 
739 	buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */
740 	KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR);
741 	KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */
742 
743 	READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */
744 	KUNIT_ASSERT_TRUE(test, report_matches(&expect));
745 }
746 
747 /* Test that some objects from a bulk allocation belong to KFENCE pool. */
748 static void test_memcache_alloc_bulk(struct kunit *test)
749 {
750 	const size_t size = 32;
751 	bool pass = false;
752 	unsigned long timeout;
753 
754 	setup_test_cache(test, size, 0, NULL);
755 	KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
756 	/*
757 	 * 100x the sample interval should be more than enough to ensure we get
758 	 * a KFENCE allocation eventually.
759 	 */
760 	timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
761 	do {
762 		void *objects[100];
763 		int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
764 						   objects);
765 		if (!num)
766 			continue;
767 		for (i = 0; i < ARRAY_SIZE(objects); i++) {
768 			if (is_kfence_address(objects[i])) {
769 				pass = true;
770 				break;
771 			}
772 		}
773 		kmem_cache_free_bulk(test_cache, num, objects);
774 		/*
775 		 * kmem_cache_alloc_bulk() disables interrupts, and calling it
776 		 * in a tight loop may not give KFENCE a chance to switch the
777 		 * static branch. Call cond_resched() to let KFENCE chime in.
778 		 */
779 		cond_resched();
780 	} while (!pass && time_before(jiffies, timeout));
781 
782 	KUNIT_EXPECT_TRUE(test, pass);
783 	KUNIT_EXPECT_FALSE(test, report_available());
784 }
785 
786 /*
787  * KUnit does not provide a way to provide arguments to tests, and we encode
788  * additional info in the name. Set up 2 tests per test case, one using the
789  * default allocator, and another using a custom memcache (suffix '-memcache').
790  */
791 #define KFENCE_KUNIT_CASE(test_name)						\
792 	{ .run_case = test_name, .name = #test_name },				\
793 	{ .run_case = test_name, .name = #test_name "-memcache" }
794 
795 static struct kunit_case kfence_test_cases[] = {
796 	KFENCE_KUNIT_CASE(test_out_of_bounds_read),
797 	KFENCE_KUNIT_CASE(test_out_of_bounds_write),
798 	KFENCE_KUNIT_CASE(test_use_after_free_read),
799 	KFENCE_KUNIT_CASE(test_use_after_free_read_nofault),
800 	KFENCE_KUNIT_CASE(test_double_free),
801 	KFENCE_KUNIT_CASE(test_invalid_addr_free),
802 	KFENCE_KUNIT_CASE(test_corruption),
803 	KFENCE_KUNIT_CASE(test_free_bulk),
804 	KFENCE_KUNIT_CASE(test_init_on_free),
805 	KUNIT_CASE(test_kmalloc_aligned_oob_read),
806 	KUNIT_CASE(test_kmalloc_aligned_oob_write),
807 	KUNIT_CASE(test_shrink_memcache),
808 	KUNIT_CASE(test_memcache_ctor),
809 	KUNIT_CASE(test_invalid_access),
810 	KUNIT_CASE(test_gfpzero),
811 	KUNIT_CASE(test_memcache_typesafe_by_rcu),
812 	KUNIT_CASE(test_krealloc),
813 	KUNIT_CASE(test_memcache_alloc_bulk),
814 	{},
815 };
816 
817 /* ===== End test cases ===== */
818 
819 static int test_init(struct kunit *test)
820 {
821 	unsigned long flags;
822 	int i;
823 
824 	if (!__kfence_pool)
825 		return -EINVAL;
826 
827 	spin_lock_irqsave(&observed.lock, flags);
828 	for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
829 		observed.lines[i][0] = '\0';
830 	observed.nlines = 0;
831 	spin_unlock_irqrestore(&observed.lock, flags);
832 
833 	/* Any test with 'memcache' in its name will want a memcache. */
834 	if (strstr(test->name, "memcache"))
835 		test->priv = TEST_PRIV_WANT_MEMCACHE;
836 	else
837 		test->priv = NULL;
838 
839 	return 0;
840 }
841 
842 static void test_exit(struct kunit *test)
843 {
844 	test_cache_destroy();
845 }
846 
847 static int kfence_suite_init(struct kunit_suite *suite)
848 {
849 	register_trace_console(probe_console, NULL);
850 	return 0;
851 }
852 
853 static void kfence_suite_exit(struct kunit_suite *suite)
854 {
855 	unregister_trace_console(probe_console, NULL);
856 	tracepoint_synchronize_unregister();
857 }
858 
859 static struct kunit_suite kfence_test_suite = {
860 	.name = "kfence",
861 	.test_cases = kfence_test_cases,
862 	.init = test_init,
863 	.exit = test_exit,
864 	.suite_init = kfence_suite_init,
865 	.suite_exit = kfence_suite_exit,
866 };
867 
868 kunit_test_suites(&kfence_test_suite);
869 
870 MODULE_LICENSE("GPL v2");
871 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");
872 MODULE_DESCRIPTION("kfence unit test suite");
873