xref: /linux/mm/kfence/kfence_test.c (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test cases for KFENCE memory safety error detector. Since the interface with
4  * which KFENCE's reports are obtained is via the console, this is the output we
5  * should verify. For each test case checks the presence (or absence) of
6  * generated reports. Relies on 'console' tracepoint to capture reports as they
7  * appear in the kernel log.
8  *
9  * Copyright (C) 2020, Google LLC.
10  * Author: Alexander Potapenko <glider@google.com>
11  *         Marco Elver <elver@google.com>
12  */
13 
14 #include <kunit/test.h>
15 #include <linux/jiffies.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/tracepoint.h>
24 #include <trace/events/printk.h>
25 
26 #include "kfence.h"
27 
28 /* Report as observed from console. */
29 static struct {
30 	spinlock_t lock;
31 	int nlines;
32 	char lines[2][256];
33 } observed = {
34 	.lock = __SPIN_LOCK_UNLOCKED(observed.lock),
35 };
36 
37 /* Probe for console output: obtains observed lines of interest. */
38 static void probe_console(void *ignore, const char *buf, size_t len)
39 {
40 	unsigned long flags;
41 	int nlines;
42 
43 	spin_lock_irqsave(&observed.lock, flags);
44 	nlines = observed.nlines;
45 
46 	if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) {
47 		/*
48 		 * KFENCE report and related to the test.
49 		 *
50 		 * The provided @buf is not NUL-terminated; copy no more than
51 		 * @len bytes and let strscpy() add the missing NUL-terminator.
52 		 */
53 		strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
54 		nlines = 1;
55 	} else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
56 		strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
57 	}
58 
59 	WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
60 	spin_unlock_irqrestore(&observed.lock, flags);
61 }
62 
63 /* Check if a report related to the test exists. */
64 static bool report_available(void)
65 {
66 	return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
67 }
68 
69 /* Information we expect in a report. */
70 struct expect_report {
71 	enum kfence_error_type type; /* The type or error. */
72 	void *fn; /* Function pointer to expected function where access occurred. */
73 	char *addr; /* Address at which the bad access occurred. */
74 	bool is_write; /* Is access a write. */
75 };
76 
77 static const char *get_access_type(const struct expect_report *r)
78 {
79 	return r->is_write ? "write" : "read";
80 }
81 
82 /* Check observed report matches information in @r. */
83 static bool report_matches(const struct expect_report *r)
84 {
85 	bool ret = false;
86 	unsigned long flags;
87 	typeof(observed.lines) expect;
88 	const char *end;
89 	char *cur;
90 
91 	/* Doubled-checked locking. */
92 	if (!report_available())
93 		return false;
94 
95 	/* Generate expected report contents. */
96 
97 	/* Title */
98 	cur = expect[0];
99 	end = &expect[0][sizeof(expect[0]) - 1];
100 	switch (r->type) {
101 	case KFENCE_ERROR_OOB:
102 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
103 				 get_access_type(r));
104 		break;
105 	case KFENCE_ERROR_UAF:
106 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
107 				 get_access_type(r));
108 		break;
109 	case KFENCE_ERROR_CORRUPTION:
110 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
111 		break;
112 	case KFENCE_ERROR_INVALID:
113 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
114 				 get_access_type(r));
115 		break;
116 	case KFENCE_ERROR_INVALID_FREE:
117 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
118 		break;
119 	}
120 
121 	scnprintf(cur, end - cur, " in %pS", r->fn);
122 	/* The exact offset won't match, remove it; also strip module name. */
123 	cur = strchr(expect[0], '+');
124 	if (cur)
125 		*cur = '\0';
126 
127 	/* Access information */
128 	cur = expect[1];
129 	end = &expect[1][sizeof(expect[1]) - 1];
130 
131 	switch (r->type) {
132 	case KFENCE_ERROR_OOB:
133 		cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
134 		break;
135 	case KFENCE_ERROR_UAF:
136 		cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
137 		break;
138 	case KFENCE_ERROR_CORRUPTION:
139 		cur += scnprintf(cur, end - cur, "Corrupted memory at");
140 		break;
141 	case KFENCE_ERROR_INVALID:
142 		cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
143 		break;
144 	case KFENCE_ERROR_INVALID_FREE:
145 		cur += scnprintf(cur, end - cur, "Invalid free of");
146 		break;
147 	}
148 
149 	cur += scnprintf(cur, end - cur, " 0x%p", (void *)r->addr);
150 
151 	spin_lock_irqsave(&observed.lock, flags);
152 	if (!report_available())
153 		goto out; /* A new report is being captured. */
154 
155 	/* Finally match expected output to what we actually observed. */
156 	ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
157 out:
158 	spin_unlock_irqrestore(&observed.lock, flags);
159 	return ret;
160 }
161 
162 /* ===== Test cases ===== */
163 
164 #define TEST_PRIV_WANT_MEMCACHE ((void *)1)
165 
166 /* Cache used by tests; if NULL, allocate from kmalloc instead. */
167 static struct kmem_cache *test_cache;
168 
169 static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags,
170 			       void (*ctor)(void *))
171 {
172 	if (test->priv != TEST_PRIV_WANT_MEMCACHE)
173 		return size;
174 
175 	kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor);
176 
177 	/*
178 	 * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any
179 	 * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to
180 	 * allocate via memcg, if enabled.
181 	 */
182 	flags |= SLAB_NOLEAKTRACE | SLAB_ACCOUNT;
183 	test_cache = kmem_cache_create("test", size, 1, flags, ctor);
184 	KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
185 
186 	return size;
187 }
188 
189 static void test_cache_destroy(void)
190 {
191 	if (!test_cache)
192 		return;
193 
194 	kmem_cache_destroy(test_cache);
195 	test_cache = NULL;
196 }
197 
198 static inline size_t kmalloc_cache_alignment(size_t size)
199 {
200 	return kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]->align;
201 }
202 
203 /* Must always inline to match stack trace against caller. */
204 static __always_inline void test_free(void *ptr)
205 {
206 	if (test_cache)
207 		kmem_cache_free(test_cache, ptr);
208 	else
209 		kfree(ptr);
210 }
211 
212 /*
213  * If this should be a KFENCE allocation, and on which side the allocation and
214  * the closest guard page should be.
215  */
216 enum allocation_policy {
217 	ALLOCATE_ANY, /* KFENCE, any side. */
218 	ALLOCATE_LEFT, /* KFENCE, left side of page. */
219 	ALLOCATE_RIGHT, /* KFENCE, right side of page. */
220 	ALLOCATE_NONE, /* No KFENCE allocation. */
221 };
222 
223 /*
224  * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
225  * current test_cache if set up.
226  */
227 static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
228 {
229 	void *alloc;
230 	unsigned long timeout, resched_after;
231 	const char *policy_name;
232 
233 	switch (policy) {
234 	case ALLOCATE_ANY:
235 		policy_name = "any";
236 		break;
237 	case ALLOCATE_LEFT:
238 		policy_name = "left";
239 		break;
240 	case ALLOCATE_RIGHT:
241 		policy_name = "right";
242 		break;
243 	case ALLOCATE_NONE:
244 		policy_name = "none";
245 		break;
246 	}
247 
248 	kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp,
249 		   policy_name, !!test_cache);
250 
251 	/*
252 	 * 100x the sample interval should be more than enough to ensure we get
253 	 * a KFENCE allocation eventually.
254 	 */
255 	timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
256 	/*
257 	 * Especially for non-preemption kernels, ensure the allocation-gate
258 	 * timer can catch up: after @resched_after, every failed allocation
259 	 * attempt yields, to ensure the allocation-gate timer is scheduled.
260 	 */
261 	resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
262 	do {
263 		if (test_cache)
264 			alloc = kmem_cache_alloc(test_cache, gfp);
265 		else
266 			alloc = kmalloc(size, gfp);
267 
268 		if (is_kfence_address(alloc)) {
269 			struct page *page = virt_to_head_page(alloc);
270 			struct kmem_cache *s = test_cache ?:
271 					kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)];
272 
273 			/*
274 			 * Verify that various helpers return the right values
275 			 * even for KFENCE objects; these are required so that
276 			 * memcg accounting works correctly.
277 			 */
278 			KUNIT_EXPECT_EQ(test, obj_to_index(s, page, alloc), 0U);
279 			KUNIT_EXPECT_EQ(test, objs_per_slab_page(s, page), 1);
280 
281 			if (policy == ALLOCATE_ANY)
282 				return alloc;
283 			if (policy == ALLOCATE_LEFT && IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
284 				return alloc;
285 			if (policy == ALLOCATE_RIGHT &&
286 			    !IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
287 				return alloc;
288 		} else if (policy == ALLOCATE_NONE)
289 			return alloc;
290 
291 		test_free(alloc);
292 
293 		if (time_after(jiffies, resched_after))
294 			cond_resched();
295 	} while (time_before(jiffies, timeout));
296 
297 	KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE");
298 	return NULL; /* Unreachable. */
299 }
300 
301 static void test_out_of_bounds_read(struct kunit *test)
302 {
303 	size_t size = 32;
304 	struct expect_report expect = {
305 		.type = KFENCE_ERROR_OOB,
306 		.fn = test_out_of_bounds_read,
307 		.is_write = false,
308 	};
309 	char *buf;
310 
311 	setup_test_cache(test, size, 0, NULL);
312 
313 	/*
314 	 * If we don't have our own cache, adjust based on alignment, so that we
315 	 * actually access guard pages on either side.
316 	 */
317 	if (!test_cache)
318 		size = kmalloc_cache_alignment(size);
319 
320 	/* Test both sides. */
321 
322 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
323 	expect.addr = buf - 1;
324 	READ_ONCE(*expect.addr);
325 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
326 	test_free(buf);
327 
328 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
329 	expect.addr = buf + size;
330 	READ_ONCE(*expect.addr);
331 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
332 	test_free(buf);
333 }
334 
335 static void test_out_of_bounds_write(struct kunit *test)
336 {
337 	size_t size = 32;
338 	struct expect_report expect = {
339 		.type = KFENCE_ERROR_OOB,
340 		.fn = test_out_of_bounds_write,
341 		.is_write = true,
342 	};
343 	char *buf;
344 
345 	setup_test_cache(test, size, 0, NULL);
346 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
347 	expect.addr = buf - 1;
348 	WRITE_ONCE(*expect.addr, 42);
349 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
350 	test_free(buf);
351 }
352 
353 static void test_use_after_free_read(struct kunit *test)
354 {
355 	const size_t size = 32;
356 	struct expect_report expect = {
357 		.type = KFENCE_ERROR_UAF,
358 		.fn = test_use_after_free_read,
359 		.is_write = false,
360 	};
361 
362 	setup_test_cache(test, size, 0, NULL);
363 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
364 	test_free(expect.addr);
365 	READ_ONCE(*expect.addr);
366 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
367 }
368 
369 static void test_double_free(struct kunit *test)
370 {
371 	const size_t size = 32;
372 	struct expect_report expect = {
373 		.type = KFENCE_ERROR_INVALID_FREE,
374 		.fn = test_double_free,
375 	};
376 
377 	setup_test_cache(test, size, 0, NULL);
378 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
379 	test_free(expect.addr);
380 	test_free(expect.addr); /* Double-free. */
381 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
382 }
383 
384 static void test_invalid_addr_free(struct kunit *test)
385 {
386 	const size_t size = 32;
387 	struct expect_report expect = {
388 		.type = KFENCE_ERROR_INVALID_FREE,
389 		.fn = test_invalid_addr_free,
390 	};
391 	char *buf;
392 
393 	setup_test_cache(test, size, 0, NULL);
394 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
395 	expect.addr = buf + 1; /* Free on invalid address. */
396 	test_free(expect.addr); /* Invalid address free. */
397 	test_free(buf); /* No error. */
398 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
399 }
400 
401 static void test_corruption(struct kunit *test)
402 {
403 	size_t size = 32;
404 	struct expect_report expect = {
405 		.type = KFENCE_ERROR_CORRUPTION,
406 		.fn = test_corruption,
407 	};
408 	char *buf;
409 
410 	setup_test_cache(test, size, 0, NULL);
411 
412 	/* Test both sides. */
413 
414 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
415 	expect.addr = buf + size;
416 	WRITE_ONCE(*expect.addr, 42);
417 	test_free(buf);
418 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
419 
420 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
421 	expect.addr = buf - 1;
422 	WRITE_ONCE(*expect.addr, 42);
423 	test_free(buf);
424 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
425 }
426 
427 /*
428  * KFENCE is unable to detect an OOB if the allocation's alignment requirements
429  * leave a gap between the object and the guard page. Specifically, an
430  * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
431  * respectively. Therefore it is impossible for the allocated object to
432  * contiguously line up with the right guard page.
433  *
434  * However, we test that an access to memory beyond the gap results in KFENCE
435  * detecting an OOB access.
436  */
437 static void test_kmalloc_aligned_oob_read(struct kunit *test)
438 {
439 	const size_t size = 73;
440 	const size_t align = kmalloc_cache_alignment(size);
441 	struct expect_report expect = {
442 		.type = KFENCE_ERROR_OOB,
443 		.fn = test_kmalloc_aligned_oob_read,
444 		.is_write = false,
445 	};
446 	char *buf;
447 
448 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
449 
450 	/*
451 	 * The object is offset to the right, so there won't be an OOB to the
452 	 * left of it.
453 	 */
454 	READ_ONCE(*(buf - 1));
455 	KUNIT_EXPECT_FALSE(test, report_available());
456 
457 	/*
458 	 * @buf must be aligned on @align, therefore buf + size belongs to the
459 	 * same page -> no OOB.
460 	 */
461 	READ_ONCE(*(buf + size));
462 	KUNIT_EXPECT_FALSE(test, report_available());
463 
464 	/* Overflowing by @align bytes will result in an OOB. */
465 	expect.addr = buf + size + align;
466 	READ_ONCE(*expect.addr);
467 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
468 
469 	test_free(buf);
470 }
471 
472 static void test_kmalloc_aligned_oob_write(struct kunit *test)
473 {
474 	const size_t size = 73;
475 	struct expect_report expect = {
476 		.type = KFENCE_ERROR_CORRUPTION,
477 		.fn = test_kmalloc_aligned_oob_write,
478 	};
479 	char *buf;
480 
481 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
482 	/*
483 	 * The object is offset to the right, so we won't get a page
484 	 * fault immediately after it.
485 	 */
486 	expect.addr = buf + size;
487 	WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
488 	KUNIT_EXPECT_FALSE(test, report_available());
489 	test_free(buf);
490 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
491 }
492 
493 /* Test cache shrinking and destroying with KFENCE. */
494 static void test_shrink_memcache(struct kunit *test)
495 {
496 	const size_t size = 32;
497 	void *buf;
498 
499 	setup_test_cache(test, size, 0, NULL);
500 	KUNIT_EXPECT_TRUE(test, test_cache);
501 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
502 	kmem_cache_shrink(test_cache);
503 	test_free(buf);
504 
505 	KUNIT_EXPECT_FALSE(test, report_available());
506 }
507 
508 static void ctor_set_x(void *obj)
509 {
510 	/* Every object has at least 8 bytes. */
511 	memset(obj, 'x', 8);
512 }
513 
514 /* Ensure that SL*B does not modify KFENCE objects on bulk free. */
515 static void test_free_bulk(struct kunit *test)
516 {
517 	int iter;
518 
519 	for (iter = 0; iter < 5; iter++) {
520 		const size_t size = setup_test_cache(test, 8 + prandom_u32_max(300), 0,
521 						     (iter & 1) ? ctor_set_x : NULL);
522 		void *objects[] = {
523 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
524 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
525 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
526 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
527 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
528 		};
529 
530 		kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);
531 		KUNIT_ASSERT_FALSE(test, report_available());
532 		test_cache_destroy();
533 	}
534 }
535 
536 /* Test init-on-free works. */
537 static void test_init_on_free(struct kunit *test)
538 {
539 	const size_t size = 32;
540 	struct expect_report expect = {
541 		.type = KFENCE_ERROR_UAF,
542 		.fn = test_init_on_free,
543 		.is_write = false,
544 	};
545 	int i;
546 
547 	if (!IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON))
548 		return;
549 	/* Assume it hasn't been disabled on command line. */
550 
551 	setup_test_cache(test, size, 0, NULL);
552 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
553 	for (i = 0; i < size; i++)
554 		expect.addr[i] = i + 1;
555 	test_free(expect.addr);
556 
557 	for (i = 0; i < size; i++) {
558 		/*
559 		 * This may fail if the page was recycled by KFENCE and then
560 		 * written to again -- this however, is near impossible with a
561 		 * default config.
562 		 */
563 		KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
564 
565 		if (!i) /* Only check first access to not fail test if page is ever re-protected. */
566 			KUNIT_EXPECT_TRUE(test, report_matches(&expect));
567 	}
568 }
569 
570 /* Ensure that constructors work properly. */
571 static void test_memcache_ctor(struct kunit *test)
572 {
573 	const size_t size = 32;
574 	char *buf;
575 	int i;
576 
577 	setup_test_cache(test, size, 0, ctor_set_x);
578 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
579 
580 	for (i = 0; i < 8; i++)
581 		KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
582 
583 	test_free(buf);
584 
585 	KUNIT_EXPECT_FALSE(test, report_available());
586 }
587 
588 /* Test that memory is zeroed if requested. */
589 static void test_gfpzero(struct kunit *test)
590 {
591 	const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
592 	char *buf1, *buf2;
593 	int i;
594 
595 	if (CONFIG_KFENCE_SAMPLE_INTERVAL > 100) {
596 		kunit_warn(test, "skipping ... would take too long\n");
597 		return;
598 	}
599 
600 	setup_test_cache(test, size, 0, NULL);
601 	buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
602 	for (i = 0; i < size; i++)
603 		buf1[i] = i + 1;
604 	test_free(buf1);
605 
606 	/* Try to get same address again -- this can take a while. */
607 	for (i = 0;; i++) {
608 		buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY);
609 		if (buf1 == buf2)
610 			break;
611 		test_free(buf2);
612 
613 		if (i == CONFIG_KFENCE_NUM_OBJECTS) {
614 			kunit_warn(test, "giving up ... cannot get same object back\n");
615 			return;
616 		}
617 	}
618 
619 	for (i = 0; i < size; i++)
620 		KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
621 
622 	test_free(buf2);
623 
624 	KUNIT_EXPECT_FALSE(test, report_available());
625 }
626 
627 static void test_invalid_access(struct kunit *test)
628 {
629 	const struct expect_report expect = {
630 		.type = KFENCE_ERROR_INVALID,
631 		.fn = test_invalid_access,
632 		.addr = &__kfence_pool[10],
633 		.is_write = false,
634 	};
635 
636 	READ_ONCE(__kfence_pool[10]);
637 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
638 }
639 
640 /* Test SLAB_TYPESAFE_BY_RCU works. */
641 static void test_memcache_typesafe_by_rcu(struct kunit *test)
642 {
643 	const size_t size = 32;
644 	struct expect_report expect = {
645 		.type = KFENCE_ERROR_UAF,
646 		.fn = test_memcache_typesafe_by_rcu,
647 		.is_write = false,
648 	};
649 
650 	setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
651 	KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
652 
653 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
654 	*expect.addr = 42;
655 
656 	rcu_read_lock();
657 	test_free(expect.addr);
658 	KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
659 	/*
660 	 * Up to this point, memory should not have been freed yet, and
661 	 * therefore there should be no KFENCE report from the above access.
662 	 */
663 	rcu_read_unlock();
664 
665 	/* Above access to @expect.addr should not have generated a report! */
666 	KUNIT_EXPECT_FALSE(test, report_available());
667 
668 	/* Only after rcu_barrier() is the memory guaranteed to be freed. */
669 	rcu_barrier();
670 
671 	/* Expect use-after-free. */
672 	KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
673 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
674 }
675 
676 /* Test krealloc(). */
677 static void test_krealloc(struct kunit *test)
678 {
679 	const size_t size = 32;
680 	const struct expect_report expect = {
681 		.type = KFENCE_ERROR_UAF,
682 		.fn = test_krealloc,
683 		.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
684 		.is_write = false,
685 	};
686 	char *buf = expect.addr;
687 	int i;
688 
689 	KUNIT_EXPECT_FALSE(test, test_cache);
690 	KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */
691 	for (i = 0; i < size; i++)
692 		buf[i] = i + 1;
693 
694 	/* Check that we successfully change the size. */
695 	buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */
696 	/* Note: Might no longer be a KFENCE alloc. */
697 	KUNIT_EXPECT_GE(test, ksize(buf), size * 3);
698 	for (i = 0; i < size; i++)
699 		KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
700 	for (; i < size * 3; i++) /* Fill to extra bytes. */
701 		buf[i] = i + 1;
702 
703 	buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
704 	KUNIT_EXPECT_GE(test, ksize(buf), size * 2);
705 	for (i = 0; i < size * 2; i++)
706 		KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
707 
708 	buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */
709 	KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR);
710 	KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */
711 
712 	READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */
713 	KUNIT_ASSERT_TRUE(test, report_matches(&expect));
714 }
715 
716 /* Test that some objects from a bulk allocation belong to KFENCE pool. */
717 static void test_memcache_alloc_bulk(struct kunit *test)
718 {
719 	const size_t size = 32;
720 	bool pass = false;
721 	unsigned long timeout;
722 
723 	setup_test_cache(test, size, 0, NULL);
724 	KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
725 	/*
726 	 * 100x the sample interval should be more than enough to ensure we get
727 	 * a KFENCE allocation eventually.
728 	 */
729 	timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
730 	do {
731 		void *objects[100];
732 		int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
733 						   objects);
734 		if (!num)
735 			continue;
736 		for (i = 0; i < ARRAY_SIZE(objects); i++) {
737 			if (is_kfence_address(objects[i])) {
738 				pass = true;
739 				break;
740 			}
741 		}
742 		kmem_cache_free_bulk(test_cache, num, objects);
743 		/*
744 		 * kmem_cache_alloc_bulk() disables interrupts, and calling it
745 		 * in a tight loop may not give KFENCE a chance to switch the
746 		 * static branch. Call cond_resched() to let KFENCE chime in.
747 		 */
748 		cond_resched();
749 	} while (!pass && time_before(jiffies, timeout));
750 
751 	KUNIT_EXPECT_TRUE(test, pass);
752 	KUNIT_EXPECT_FALSE(test, report_available());
753 }
754 
755 /*
756  * KUnit does not provide a way to provide arguments to tests, and we encode
757  * additional info in the name. Set up 2 tests per test case, one using the
758  * default allocator, and another using a custom memcache (suffix '-memcache').
759  */
760 #define KFENCE_KUNIT_CASE(test_name)						\
761 	{ .run_case = test_name, .name = #test_name },				\
762 	{ .run_case = test_name, .name = #test_name "-memcache" }
763 
764 static struct kunit_case kfence_test_cases[] = {
765 	KFENCE_KUNIT_CASE(test_out_of_bounds_read),
766 	KFENCE_KUNIT_CASE(test_out_of_bounds_write),
767 	KFENCE_KUNIT_CASE(test_use_after_free_read),
768 	KFENCE_KUNIT_CASE(test_double_free),
769 	KFENCE_KUNIT_CASE(test_invalid_addr_free),
770 	KFENCE_KUNIT_CASE(test_corruption),
771 	KFENCE_KUNIT_CASE(test_free_bulk),
772 	KFENCE_KUNIT_CASE(test_init_on_free),
773 	KUNIT_CASE(test_kmalloc_aligned_oob_read),
774 	KUNIT_CASE(test_kmalloc_aligned_oob_write),
775 	KUNIT_CASE(test_shrink_memcache),
776 	KUNIT_CASE(test_memcache_ctor),
777 	KUNIT_CASE(test_invalid_access),
778 	KUNIT_CASE(test_gfpzero),
779 	KUNIT_CASE(test_memcache_typesafe_by_rcu),
780 	KUNIT_CASE(test_krealloc),
781 	KUNIT_CASE(test_memcache_alloc_bulk),
782 	{},
783 };
784 
785 /* ===== End test cases ===== */
786 
787 static int test_init(struct kunit *test)
788 {
789 	unsigned long flags;
790 	int i;
791 
792 	spin_lock_irqsave(&observed.lock, flags);
793 	for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
794 		observed.lines[i][0] = '\0';
795 	observed.nlines = 0;
796 	spin_unlock_irqrestore(&observed.lock, flags);
797 
798 	/* Any test with 'memcache' in its name will want a memcache. */
799 	if (strstr(test->name, "memcache"))
800 		test->priv = TEST_PRIV_WANT_MEMCACHE;
801 	else
802 		test->priv = NULL;
803 
804 	return 0;
805 }
806 
807 static void test_exit(struct kunit *test)
808 {
809 	test_cache_destroy();
810 }
811 
812 static struct kunit_suite kfence_test_suite = {
813 	.name = "kfence",
814 	.test_cases = kfence_test_cases,
815 	.init = test_init,
816 	.exit = test_exit,
817 };
818 static struct kunit_suite *kfence_test_suites[] = { &kfence_test_suite, NULL };
819 
820 static void register_tracepoints(struct tracepoint *tp, void *ignore)
821 {
822 	check_trace_callback_type_console(probe_console);
823 	if (!strcmp(tp->name, "console"))
824 		WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
825 }
826 
827 static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
828 {
829 	if (!strcmp(tp->name, "console"))
830 		tracepoint_probe_unregister(tp, probe_console, NULL);
831 }
832 
833 /*
834  * We only want to do tracepoints setup and teardown once, therefore we have to
835  * customize the init and exit functions and cannot rely on kunit_test_suite().
836  */
837 static int __init kfence_test_init(void)
838 {
839 	/*
840 	 * Because we want to be able to build the test as a module, we need to
841 	 * iterate through all known tracepoints, since the static registration
842 	 * won't work here.
843 	 */
844 	for_each_kernel_tracepoint(register_tracepoints, NULL);
845 	return __kunit_test_suites_init(kfence_test_suites);
846 }
847 
848 static void kfence_test_exit(void)
849 {
850 	__kunit_test_suites_exit(kfence_test_suites);
851 	for_each_kernel_tracepoint(unregister_tracepoints, NULL);
852 	tracepoint_synchronize_unregister();
853 }
854 
855 late_initcall_sync(kfence_test_init);
856 module_exit(kfence_test_exit);
857 
858 MODULE_LICENSE("GPL v2");
859 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");
860