xref: /linux/mm/kasan/kasan_test_c.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6  */
7 
8 #define pr_fmt(fmt) "kasan: test: " fmt
9 
10 #include <kunit/test.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/io.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/mempool.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/module.h>
20 #include <linux/printk.h>
21 #include <linux/random.h>
22 #include <linux/set_memory.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/tracepoint.h>
26 #include <linux/uaccess.h>
27 #include <linux/vmalloc.h>
28 #include <trace/events/printk.h>
29 
30 #include <asm/page.h>
31 
32 #include "kasan.h"
33 
34 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
35 
36 MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
37 
38 static bool multishot;
39 
40 /* Fields set based on lines observed in the console. */
41 static struct {
42 	bool report_found;
43 	bool async_fault;
44 } test_status;
45 
46 /*
47  * Some tests use these global variables to store return values from function
48  * calls that could otherwise be eliminated by the compiler as dead code.
49  */
50 void *kasan_ptr_result;
51 int kasan_int_result;
52 
53 /* Probe for console output: obtains test_status lines of interest. */
probe_console(void * ignore,const char * buf,size_t len)54 static void probe_console(void *ignore, const char *buf, size_t len)
55 {
56 	if (strnstr(buf, "BUG: KASAN: ", len))
57 		WRITE_ONCE(test_status.report_found, true);
58 	else if (strnstr(buf, "Asynchronous fault: ", len))
59 		WRITE_ONCE(test_status.async_fault, true);
60 }
61 
kasan_suite_init(struct kunit_suite * suite)62 static int kasan_suite_init(struct kunit_suite *suite)
63 {
64 	if (!kasan_enabled()) {
65 		pr_err("Can't run KASAN tests with KASAN disabled");
66 		return -1;
67 	}
68 
69 	/* Stop failing KUnit tests on KASAN reports. */
70 	kasan_kunit_test_suite_start();
71 
72 	/*
73 	 * Temporarily enable multi-shot mode. Otherwise, KASAN would only
74 	 * report the first detected bug and panic the kernel if panic_on_warn
75 	 * is enabled.
76 	 */
77 	multishot = kasan_save_enable_multi_shot();
78 
79 	register_trace_console(probe_console, NULL);
80 	return 0;
81 }
82 
kasan_suite_exit(struct kunit_suite * suite)83 static void kasan_suite_exit(struct kunit_suite *suite)
84 {
85 	kasan_kunit_test_suite_end();
86 	kasan_restore_multi_shot(multishot);
87 	unregister_trace_console(probe_console, NULL);
88 	tracepoint_synchronize_unregister();
89 }
90 
kasan_test_exit(struct kunit * test)91 static void kasan_test_exit(struct kunit *test)
92 {
93 	KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
94 }
95 
96 /**
97  * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
98  * KASAN report; causes a KUnit test failure otherwise.
99  *
100  * @test: Currently executing KUnit test.
101  * @expression: Expression that must produce a KASAN report.
102  *
103  * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
104  * checking is auto-disabled. When this happens, this test handler reenables
105  * tag checking. As tag checking can be only disabled or enabled per CPU,
106  * this handler disables migration (preemption).
107  *
108  * Since the compiler doesn't see that the expression can change the test_status
109  * fields, it can reorder or optimize away the accesses to those fields.
110  * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
111  * expression to prevent that.
112  *
113  * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
114  * as false. This allows detecting KASAN reports that happen outside of the
115  * checks by asserting !test_status.report_found at the start of
116  * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
117  */
118 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do {			\
119 	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&				\
120 	    kasan_sync_fault_possible())				\
121 		migrate_disable();					\
122 	KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));	\
123 	barrier();							\
124 	expression;							\
125 	barrier();							\
126 	if (kasan_async_fault_possible())				\
127 		kasan_force_async_fault();				\
128 	if (!READ_ONCE(test_status.report_found)) {			\
129 		KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure "	\
130 				"expected in \"" #expression		\
131 				 "\", but none occurred");		\
132 	}								\
133 	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&				\
134 	    kasan_sync_fault_possible()) {				\
135 		if (READ_ONCE(test_status.report_found) &&		\
136 		    !READ_ONCE(test_status.async_fault))		\
137 			kasan_enable_hw_tags();				\
138 		migrate_enable();					\
139 	}								\
140 	WRITE_ONCE(test_status.report_found, false);			\
141 	WRITE_ONCE(test_status.async_fault, false);			\
142 } while (0)
143 
144 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do {			\
145 	if (!IS_ENABLED(config))					\
146 		kunit_skip((test), "Test requires " #config "=y");	\
147 } while (0)
148 
149 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do {			\
150 	if (IS_ENABLED(config))						\
151 		kunit_skip((test), "Test requires " #config "=n");	\
152 } while (0)
153 
154 #define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do {		\
155 	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS))				\
156 		break;  /* No compiler instrumentation. */		\
157 	if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX))	\
158 		break;  /* Should always be instrumented! */		\
159 	if (IS_ENABLED(CONFIG_GENERIC_ENTRY))				\
160 		kunit_skip((test), "Test requires checked mem*()");	\
161 } while (0)
162 
kmalloc_oob_right(struct kunit * test)163 static void kmalloc_oob_right(struct kunit *test)
164 {
165 	char *ptr;
166 	size_t size = 128 - KASAN_GRANULE_SIZE - 5;
167 
168 	ptr = kmalloc(size, GFP_KERNEL);
169 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
170 
171 	OPTIMIZER_HIDE_VAR(ptr);
172 	/*
173 	 * An unaligned access past the requested kmalloc size.
174 	 * Only generic KASAN can precisely detect these.
175 	 */
176 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
177 		KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
178 
179 	/*
180 	 * An aligned access into the first out-of-bounds granule that falls
181 	 * within the aligned kmalloc object.
182 	 */
183 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
184 
185 	/* Out-of-bounds access past the aligned kmalloc object. */
186 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
187 					ptr[size + KASAN_GRANULE_SIZE + 5]);
188 
189 	kfree(ptr);
190 }
191 
kmalloc_oob_left(struct kunit * test)192 static void kmalloc_oob_left(struct kunit *test)
193 {
194 	char *ptr;
195 	size_t size = 15;
196 
197 	ptr = kmalloc(size, GFP_KERNEL);
198 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
199 
200 	OPTIMIZER_HIDE_VAR(ptr);
201 	KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
202 	kfree(ptr);
203 }
204 
kmalloc_node_oob_right(struct kunit * test)205 static void kmalloc_node_oob_right(struct kunit *test)
206 {
207 	char *ptr;
208 	size_t size = 4096;
209 
210 	ptr = kmalloc_node(size, GFP_KERNEL, 0);
211 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
212 
213 	OPTIMIZER_HIDE_VAR(ptr);
214 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
215 	kfree(ptr);
216 }
217 
kmalloc_track_caller_oob_right(struct kunit * test)218 static void kmalloc_track_caller_oob_right(struct kunit *test)
219 {
220 	char *ptr;
221 	size_t size = 128 - KASAN_GRANULE_SIZE;
222 
223 	/*
224 	 * Check that KASAN detects out-of-bounds access for object allocated via
225 	 * kmalloc_track_caller().
226 	 */
227 	ptr = kmalloc_track_caller(size, GFP_KERNEL);
228 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
229 
230 	OPTIMIZER_HIDE_VAR(ptr);
231 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y');
232 
233 	kfree(ptr);
234 
235 	/*
236 	 * Check that KASAN detects out-of-bounds access for object allocated via
237 	 * kmalloc_node_track_caller().
238 	 */
239 	ptr = kmalloc_node_track_caller(size, GFP_KERNEL, 0);
240 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
241 
242 	OPTIMIZER_HIDE_VAR(ptr);
243 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y');
244 
245 	kfree(ptr);
246 }
247 
248 /*
249  * Check that KASAN detects an out-of-bounds access for a big object allocated
250  * via kmalloc(). But not as big as to trigger the page_alloc fallback.
251  */
kmalloc_big_oob_right(struct kunit * test)252 static void kmalloc_big_oob_right(struct kunit *test)
253 {
254 	char *ptr;
255 	size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
256 
257 	ptr = kmalloc(size, GFP_KERNEL);
258 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
259 
260 	OPTIMIZER_HIDE_VAR(ptr);
261 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
262 	kfree(ptr);
263 }
264 
265 /*
266  * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
267  * that does not fit into the largest slab cache and therefore is allocated via
268  * the page_alloc fallback.
269  */
270 
kmalloc_large_oob_right(struct kunit * test)271 static void kmalloc_large_oob_right(struct kunit *test)
272 {
273 	char *ptr;
274 	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
275 
276 	ptr = kmalloc(size, GFP_KERNEL);
277 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
278 
279 	OPTIMIZER_HIDE_VAR(ptr);
280 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
281 
282 	kfree(ptr);
283 }
284 
kmalloc_large_uaf(struct kunit * test)285 static void kmalloc_large_uaf(struct kunit *test)
286 {
287 	char *ptr;
288 	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
289 
290 	ptr = kmalloc(size, GFP_KERNEL);
291 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
292 	kfree(ptr);
293 
294 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
295 }
296 
kmalloc_large_invalid_free(struct kunit * test)297 static void kmalloc_large_invalid_free(struct kunit *test)
298 {
299 	char *ptr;
300 	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
301 
302 	ptr = kmalloc(size, GFP_KERNEL);
303 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
304 
305 	KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
306 }
307 
page_alloc_oob_right(struct kunit * test)308 static void page_alloc_oob_right(struct kunit *test)
309 {
310 	char *ptr;
311 	struct page *pages;
312 	size_t order = 4;
313 	size_t size = (1UL << (PAGE_SHIFT + order));
314 
315 	/*
316 	 * With generic KASAN page allocations have no redzones, thus
317 	 * out-of-bounds detection is not guaranteed.
318 	 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
319 	 */
320 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
321 
322 	pages = alloc_pages(GFP_KERNEL, order);
323 	ptr = page_address(pages);
324 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
325 
326 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
327 	free_pages((unsigned long)ptr, order);
328 }
329 
page_alloc_uaf(struct kunit * test)330 static void page_alloc_uaf(struct kunit *test)
331 {
332 	char *ptr;
333 	struct page *pages;
334 	size_t order = 4;
335 
336 	pages = alloc_pages(GFP_KERNEL, order);
337 	ptr = page_address(pages);
338 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
339 	free_pages((unsigned long)ptr, order);
340 
341 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
342 }
343 
krealloc_more_oob_helper(struct kunit * test,size_t size1,size_t size2)344 static void krealloc_more_oob_helper(struct kunit *test,
345 					size_t size1, size_t size2)
346 {
347 	char *ptr1, *ptr2;
348 	size_t middle;
349 
350 	KUNIT_ASSERT_LT(test, size1, size2);
351 	middle = size1 + (size2 - size1) / 2;
352 
353 	ptr1 = kmalloc(size1, GFP_KERNEL);
354 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
355 
356 	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
357 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
358 
359 	/* Suppress -Warray-bounds warnings. */
360 	OPTIMIZER_HIDE_VAR(ptr2);
361 
362 	/* All offsets up to size2 must be accessible. */
363 	ptr2[size1 - 1] = 'x';
364 	ptr2[size1] = 'x';
365 	ptr2[middle] = 'x';
366 	ptr2[size2 - 1] = 'x';
367 
368 	/* Generic mode is precise, so unaligned size2 must be inaccessible. */
369 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
370 		KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
371 
372 	/* For all modes first aligned offset after size2 must be inaccessible. */
373 	KUNIT_EXPECT_KASAN_FAIL(test,
374 		ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
375 
376 	kfree(ptr2);
377 }
378 
krealloc_less_oob_helper(struct kunit * test,size_t size1,size_t size2)379 static void krealloc_less_oob_helper(struct kunit *test,
380 					size_t size1, size_t size2)
381 {
382 	char *ptr1, *ptr2;
383 	size_t middle;
384 
385 	KUNIT_ASSERT_LT(test, size2, size1);
386 	middle = size2 + (size1 - size2) / 2;
387 
388 	ptr1 = kmalloc(size1, GFP_KERNEL);
389 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
390 
391 	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
392 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
393 
394 	/* Suppress -Warray-bounds warnings. */
395 	OPTIMIZER_HIDE_VAR(ptr2);
396 
397 	/* Must be accessible for all modes. */
398 	ptr2[size2 - 1] = 'x';
399 
400 	/* Generic mode is precise, so unaligned size2 must be inaccessible. */
401 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
402 		KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
403 
404 	/* For all modes first aligned offset after size2 must be inaccessible. */
405 	KUNIT_EXPECT_KASAN_FAIL(test,
406 		ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
407 
408 	/*
409 	 * For all modes all size2, middle, and size1 should land in separate
410 	 * granules and thus the latter two offsets should be inaccessible.
411 	 */
412 	KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
413 				round_down(middle, KASAN_GRANULE_SIZE));
414 	KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
415 				round_down(size1, KASAN_GRANULE_SIZE));
416 	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
417 	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
418 	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
419 
420 	kfree(ptr2);
421 }
422 
krealloc_more_oob(struct kunit * test)423 static void krealloc_more_oob(struct kunit *test)
424 {
425 	krealloc_more_oob_helper(test, 201, 235);
426 }
427 
krealloc_less_oob(struct kunit * test)428 static void krealloc_less_oob(struct kunit *test)
429 {
430 	krealloc_less_oob_helper(test, 235, 201);
431 }
432 
krealloc_large_more_oob(struct kunit * test)433 static void krealloc_large_more_oob(struct kunit *test)
434 {
435 	krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
436 					KMALLOC_MAX_CACHE_SIZE + 235);
437 }
438 
krealloc_large_less_oob(struct kunit * test)439 static void krealloc_large_less_oob(struct kunit *test)
440 {
441 	krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
442 					KMALLOC_MAX_CACHE_SIZE + 201);
443 }
444 
445 /*
446  * Check that krealloc() detects a use-after-free, returns NULL,
447  * and doesn't unpoison the freed object.
448  */
krealloc_uaf(struct kunit * test)449 static void krealloc_uaf(struct kunit *test)
450 {
451 	char *ptr1, *ptr2;
452 	int size1 = 201;
453 	int size2 = 235;
454 
455 	ptr1 = kmalloc(size1, GFP_KERNEL);
456 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
457 	kfree(ptr1);
458 
459 	KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
460 	KUNIT_ASSERT_NULL(test, ptr2);
461 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
462 }
463 
kmalloc_oob_16(struct kunit * test)464 static void kmalloc_oob_16(struct kunit *test)
465 {
466 	struct {
467 		u64 words[2];
468 	} *ptr1, *ptr2;
469 
470 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
471 
472 	/* This test is specifically crafted for the generic mode. */
473 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
474 
475 	/* RELOC_HIDE to prevent gcc from warning about short alloc */
476 	ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
477 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
478 
479 	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
480 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
481 
482 	OPTIMIZER_HIDE_VAR(ptr1);
483 	OPTIMIZER_HIDE_VAR(ptr2);
484 	KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
485 	kfree(ptr1);
486 	kfree(ptr2);
487 }
488 
kmalloc_uaf_16(struct kunit * test)489 static void kmalloc_uaf_16(struct kunit *test)
490 {
491 	struct {
492 		u64 words[2];
493 	} *ptr1, *ptr2;
494 
495 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
496 
497 	ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
498 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
499 
500 	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
501 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
502 	kfree(ptr2);
503 
504 	KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
505 	kfree(ptr1);
506 }
507 
508 /*
509  * Note: in the memset tests below, the written range touches both valid and
510  * invalid memory. This makes sure that the instrumentation does not only check
511  * the starting address but the whole range.
512  */
513 
kmalloc_oob_memset_2(struct kunit * test)514 static void kmalloc_oob_memset_2(struct kunit *test)
515 {
516 	char *ptr;
517 	size_t size = 128 - KASAN_GRANULE_SIZE;
518 	size_t memset_size = 2;
519 
520 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
521 
522 	ptr = kmalloc(size, GFP_KERNEL);
523 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
524 
525 	OPTIMIZER_HIDE_VAR(ptr);
526 	OPTIMIZER_HIDE_VAR(size);
527 	OPTIMIZER_HIDE_VAR(memset_size);
528 	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size));
529 	kfree(ptr);
530 }
531 
kmalloc_oob_memset_4(struct kunit * test)532 static void kmalloc_oob_memset_4(struct kunit *test)
533 {
534 	char *ptr;
535 	size_t size = 128 - KASAN_GRANULE_SIZE;
536 	size_t memset_size = 4;
537 
538 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
539 
540 	ptr = kmalloc(size, GFP_KERNEL);
541 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
542 
543 	OPTIMIZER_HIDE_VAR(ptr);
544 	OPTIMIZER_HIDE_VAR(size);
545 	OPTIMIZER_HIDE_VAR(memset_size);
546 	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size));
547 	kfree(ptr);
548 }
549 
kmalloc_oob_memset_8(struct kunit * test)550 static void kmalloc_oob_memset_8(struct kunit *test)
551 {
552 	char *ptr;
553 	size_t size = 128 - KASAN_GRANULE_SIZE;
554 	size_t memset_size = 8;
555 
556 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
557 
558 	ptr = kmalloc(size, GFP_KERNEL);
559 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
560 
561 	OPTIMIZER_HIDE_VAR(ptr);
562 	OPTIMIZER_HIDE_VAR(size);
563 	OPTIMIZER_HIDE_VAR(memset_size);
564 	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size));
565 	kfree(ptr);
566 }
567 
kmalloc_oob_memset_16(struct kunit * test)568 static void kmalloc_oob_memset_16(struct kunit *test)
569 {
570 	char *ptr;
571 	size_t size = 128 - KASAN_GRANULE_SIZE;
572 	size_t memset_size = 16;
573 
574 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
575 
576 	ptr = kmalloc(size, GFP_KERNEL);
577 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
578 
579 	OPTIMIZER_HIDE_VAR(ptr);
580 	OPTIMIZER_HIDE_VAR(size);
581 	OPTIMIZER_HIDE_VAR(memset_size);
582 	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size));
583 	kfree(ptr);
584 }
585 
kmalloc_oob_in_memset(struct kunit * test)586 static void kmalloc_oob_in_memset(struct kunit *test)
587 {
588 	char *ptr;
589 	size_t size = 128 - KASAN_GRANULE_SIZE;
590 
591 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
592 
593 	ptr = kmalloc(size, GFP_KERNEL);
594 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
595 
596 	OPTIMIZER_HIDE_VAR(ptr);
597 	OPTIMIZER_HIDE_VAR(size);
598 	KUNIT_EXPECT_KASAN_FAIL(test,
599 				memset(ptr, 0, size + KASAN_GRANULE_SIZE));
600 	kfree(ptr);
601 }
602 
kmalloc_memmove_negative_size(struct kunit * test)603 static void kmalloc_memmove_negative_size(struct kunit *test)
604 {
605 	char *ptr;
606 	size_t size = 64;
607 	size_t invalid_size = -2;
608 
609 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
610 
611 	/*
612 	 * Hardware tag-based mode doesn't check memmove for negative size.
613 	 * As a result, this test introduces a side-effect memory corruption,
614 	 * which can result in a crash.
615 	 */
616 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
617 
618 	ptr = kmalloc(size, GFP_KERNEL);
619 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
620 
621 	memset((char *)ptr, 0, 64);
622 	OPTIMIZER_HIDE_VAR(ptr);
623 	OPTIMIZER_HIDE_VAR(invalid_size);
624 	KUNIT_EXPECT_KASAN_FAIL(test,
625 		memmove((char *)ptr, (char *)ptr + 4, invalid_size));
626 	kfree(ptr);
627 }
628 
kmalloc_memmove_invalid_size(struct kunit * test)629 static void kmalloc_memmove_invalid_size(struct kunit *test)
630 {
631 	char *ptr;
632 	size_t size = 64;
633 	size_t invalid_size = size;
634 
635 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
636 
637 	ptr = kmalloc(size, GFP_KERNEL);
638 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
639 
640 	memset((char *)ptr, 0, 64);
641 	OPTIMIZER_HIDE_VAR(ptr);
642 	OPTIMIZER_HIDE_VAR(invalid_size);
643 	KUNIT_EXPECT_KASAN_FAIL(test,
644 		memmove((char *)ptr, (char *)ptr + 4, invalid_size));
645 	kfree(ptr);
646 }
647 
kmalloc_uaf(struct kunit * test)648 static void kmalloc_uaf(struct kunit *test)
649 {
650 	char *ptr;
651 	size_t size = 10;
652 
653 	ptr = kmalloc(size, GFP_KERNEL);
654 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
655 
656 	kfree(ptr);
657 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
658 }
659 
kmalloc_uaf_memset(struct kunit * test)660 static void kmalloc_uaf_memset(struct kunit *test)
661 {
662 	char *ptr;
663 	size_t size = 33;
664 
665 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
666 
667 	/*
668 	 * Only generic KASAN uses quarantine, which is required to avoid a
669 	 * kernel memory corruption this test causes.
670 	 */
671 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
672 
673 	ptr = kmalloc(size, GFP_KERNEL);
674 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
675 
676 	kfree(ptr);
677 	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
678 }
679 
kmalloc_uaf2(struct kunit * test)680 static void kmalloc_uaf2(struct kunit *test)
681 {
682 	char *ptr1, *ptr2;
683 	size_t size = 43;
684 	int counter = 0;
685 
686 again:
687 	ptr1 = kmalloc(size, GFP_KERNEL);
688 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
689 
690 	kfree(ptr1);
691 
692 	ptr2 = kmalloc(size, GFP_KERNEL);
693 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
694 
695 	/*
696 	 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
697 	 * Allow up to 16 attempts at generating different tags.
698 	 */
699 	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
700 		kfree(ptr2);
701 		goto again;
702 	}
703 
704 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
705 	KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
706 
707 	kfree(ptr2);
708 }
709 
710 /*
711  * Check that KASAN detects use-after-free when another object was allocated in
712  * the same slot. Relevant for the tag-based modes, which do not use quarantine.
713  */
kmalloc_uaf3(struct kunit * test)714 static void kmalloc_uaf3(struct kunit *test)
715 {
716 	char *ptr1, *ptr2;
717 	size_t size = 100;
718 
719 	/* This test is specifically crafted for tag-based modes. */
720 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
721 
722 	ptr1 = kmalloc(size, GFP_KERNEL);
723 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
724 	kfree(ptr1);
725 
726 	ptr2 = kmalloc(size, GFP_KERNEL);
727 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
728 	kfree(ptr2);
729 
730 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
731 }
732 
kasan_atomics_helper(struct kunit * test,void * unsafe,void * safe)733 static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
734 {
735 	int *i_unsafe = unsafe;
736 
737 	KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
738 	KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
739 	KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
740 	KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
741 
742 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
743 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
744 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
745 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
746 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
747 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
748 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
749 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
750 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
751 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
752 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
753 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
754 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
755 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
756 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
757 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
758 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
759 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
760 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
761 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
762 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
763 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
764 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
765 
766 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
767 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
768 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
769 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
770 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
771 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
772 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
773 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
774 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
775 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
776 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
777 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
778 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
779 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
780 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
781 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
782 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
783 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
784 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
785 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
786 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
787 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
788 	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
789 }
790 
kasan_atomics(struct kunit * test)791 static void kasan_atomics(struct kunit *test)
792 {
793 	void *a1, *a2;
794 
795 	/*
796 	 * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
797 	 * that the following 16 bytes will make up the redzone.
798 	 */
799 	a1 = kzalloc(48, GFP_KERNEL);
800 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
801 	a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL);
802 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2);
803 
804 	/* Use atomics to access the redzone. */
805 	kasan_atomics_helper(test, a1 + 48, a2);
806 
807 	kfree(a1);
808 	kfree(a2);
809 }
810 
kmalloc_double_kzfree(struct kunit * test)811 static void kmalloc_double_kzfree(struct kunit *test)
812 {
813 	char *ptr;
814 	size_t size = 16;
815 
816 	ptr = kmalloc(size, GFP_KERNEL);
817 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
818 
819 	kfree_sensitive(ptr);
820 	KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
821 }
822 
823 /* Check that ksize() does NOT unpoison whole object. */
ksize_unpoisons_memory(struct kunit * test)824 static void ksize_unpoisons_memory(struct kunit *test)
825 {
826 	char *ptr;
827 	size_t size = 128 - KASAN_GRANULE_SIZE - 5;
828 	size_t real_size;
829 
830 	ptr = kmalloc(size, GFP_KERNEL);
831 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
832 
833 	real_size = ksize(ptr);
834 	KUNIT_EXPECT_GT(test, real_size, size);
835 
836 	OPTIMIZER_HIDE_VAR(ptr);
837 
838 	/* These accesses shouldn't trigger a KASAN report. */
839 	ptr[0] = 'x';
840 	ptr[size - 1] = 'x';
841 
842 	/* These must trigger a KASAN report. */
843 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
844 		KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
845 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
846 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
847 
848 	kfree(ptr);
849 }
850 
851 /*
852  * Check that a use-after-free is detected by ksize() and via normal accesses
853  * after it.
854  */
ksize_uaf(struct kunit * test)855 static void ksize_uaf(struct kunit *test)
856 {
857 	char *ptr;
858 	int size = 128 - KASAN_GRANULE_SIZE;
859 
860 	ptr = kmalloc(size, GFP_KERNEL);
861 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
862 	kfree(ptr);
863 
864 	OPTIMIZER_HIDE_VAR(ptr);
865 	KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
866 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
867 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
868 }
869 
870 /*
871  * The two tests below check that Generic KASAN prints auxiliary stack traces
872  * for RCU callbacks and workqueues. The reports need to be inspected manually.
873  *
874  * These tests are still enabled for other KASAN modes to make sure that all
875  * modes report bad accesses in tested scenarios.
876  */
877 
878 static struct kasan_rcu_info {
879 	int i;
880 	struct rcu_head rcu;
881 } *global_rcu_ptr;
882 
rcu_uaf_reclaim(struct rcu_head * rp)883 static void rcu_uaf_reclaim(struct rcu_head *rp)
884 {
885 	struct kasan_rcu_info *fp =
886 		container_of(rp, struct kasan_rcu_info, rcu);
887 
888 	kfree(fp);
889 	((volatile struct kasan_rcu_info *)fp)->i;
890 }
891 
rcu_uaf(struct kunit * test)892 static void rcu_uaf(struct kunit *test)
893 {
894 	struct kasan_rcu_info *ptr;
895 
896 	ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
897 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
898 
899 	global_rcu_ptr = rcu_dereference_protected(
900 				(struct kasan_rcu_info __rcu *)ptr, NULL);
901 
902 	KUNIT_EXPECT_KASAN_FAIL(test,
903 		call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
904 		rcu_barrier());
905 }
906 
workqueue_uaf_work(struct work_struct * work)907 static void workqueue_uaf_work(struct work_struct *work)
908 {
909 	kfree(work);
910 }
911 
workqueue_uaf(struct kunit * test)912 static void workqueue_uaf(struct kunit *test)
913 {
914 	struct workqueue_struct *workqueue;
915 	struct work_struct *work;
916 
917 	workqueue = create_workqueue("kasan_workqueue_test");
918 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
919 
920 	work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
921 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
922 
923 	INIT_WORK(work, workqueue_uaf_work);
924 	queue_work(workqueue, work);
925 	destroy_workqueue(workqueue);
926 
927 	KUNIT_EXPECT_KASAN_FAIL(test,
928 		((volatile struct work_struct *)work)->data);
929 }
930 
kfree_via_page(struct kunit * test)931 static void kfree_via_page(struct kunit *test)
932 {
933 	char *ptr;
934 	size_t size = 8;
935 	struct page *page;
936 	unsigned long offset;
937 
938 	ptr = kmalloc(size, GFP_KERNEL);
939 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
940 
941 	page = virt_to_page(ptr);
942 	offset = offset_in_page(ptr);
943 	kfree(page_address(page) + offset);
944 }
945 
kfree_via_phys(struct kunit * test)946 static void kfree_via_phys(struct kunit *test)
947 {
948 	char *ptr;
949 	size_t size = 8;
950 	phys_addr_t phys;
951 
952 	ptr = kmalloc(size, GFP_KERNEL);
953 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
954 
955 	phys = virt_to_phys(ptr);
956 	kfree(phys_to_virt(phys));
957 }
958 
kmem_cache_oob(struct kunit * test)959 static void kmem_cache_oob(struct kunit *test)
960 {
961 	char *p;
962 	size_t size = 200;
963 	struct kmem_cache *cache;
964 
965 	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
966 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
967 
968 	p = kmem_cache_alloc(cache, GFP_KERNEL);
969 	if (!p) {
970 		kunit_err(test, "Allocation failed: %s\n", __func__);
971 		kmem_cache_destroy(cache);
972 		return;
973 	}
974 
975 	KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
976 
977 	kmem_cache_free(cache, p);
978 	kmem_cache_destroy(cache);
979 }
980 
kmem_cache_double_free(struct kunit * test)981 static void kmem_cache_double_free(struct kunit *test)
982 {
983 	char *p;
984 	size_t size = 200;
985 	struct kmem_cache *cache;
986 
987 	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
988 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
989 
990 	p = kmem_cache_alloc(cache, GFP_KERNEL);
991 	if (!p) {
992 		kunit_err(test, "Allocation failed: %s\n", __func__);
993 		kmem_cache_destroy(cache);
994 		return;
995 	}
996 
997 	kmem_cache_free(cache, p);
998 	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
999 	kmem_cache_destroy(cache);
1000 }
1001 
kmem_cache_invalid_free(struct kunit * test)1002 static void kmem_cache_invalid_free(struct kunit *test)
1003 {
1004 	char *p;
1005 	size_t size = 200;
1006 	struct kmem_cache *cache;
1007 
1008 	cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
1009 				  NULL);
1010 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1011 
1012 	p = kmem_cache_alloc(cache, GFP_KERNEL);
1013 	if (!p) {
1014 		kunit_err(test, "Allocation failed: %s\n", __func__);
1015 		kmem_cache_destroy(cache);
1016 		return;
1017 	}
1018 
1019 	/* Trigger invalid free, the object doesn't get freed. */
1020 	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
1021 
1022 	/*
1023 	 * Properly free the object to prevent the "Objects remaining in
1024 	 * test_cache on __kmem_cache_shutdown" BUG failure.
1025 	 */
1026 	kmem_cache_free(cache, p);
1027 
1028 	kmem_cache_destroy(cache);
1029 }
1030 
kmem_cache_rcu_uaf(struct kunit * test)1031 static void kmem_cache_rcu_uaf(struct kunit *test)
1032 {
1033 	char *p;
1034 	size_t size = 200;
1035 	struct kmem_cache *cache;
1036 
1037 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
1038 
1039 	cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
1040 				  NULL);
1041 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1042 
1043 	p = kmem_cache_alloc(cache, GFP_KERNEL);
1044 	if (!p) {
1045 		kunit_err(test, "Allocation failed: %s\n", __func__);
1046 		kmem_cache_destroy(cache);
1047 		return;
1048 	}
1049 	*p = 1;
1050 
1051 	rcu_read_lock();
1052 
1053 	/* Free the object - this will internally schedule an RCU callback. */
1054 	kmem_cache_free(cache, p);
1055 
1056 	/*
1057 	 * We should still be allowed to access the object at this point because
1058 	 * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
1059 	 * critical section since before the kmem_cache_free().
1060 	 */
1061 	READ_ONCE(*p);
1062 
1063 	rcu_read_unlock();
1064 
1065 	/*
1066 	 * Wait for the RCU callback to execute; after this, the object should
1067 	 * have actually been freed from KASAN's perspective.
1068 	 */
1069 	rcu_barrier();
1070 
1071 	KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
1072 
1073 	kmem_cache_destroy(cache);
1074 }
1075 
empty_cache_ctor(void * object)1076 static void empty_cache_ctor(void *object) { }
1077 
kmem_cache_double_destroy(struct kunit * test)1078 static void kmem_cache_double_destroy(struct kunit *test)
1079 {
1080 	struct kmem_cache *cache;
1081 
1082 	/* Provide a constructor to prevent cache merging. */
1083 	cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
1084 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1085 	kmem_cache_destroy(cache);
1086 	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
1087 }
1088 
kmem_cache_accounted(struct kunit * test)1089 static void kmem_cache_accounted(struct kunit *test)
1090 {
1091 	int i;
1092 	char *p;
1093 	size_t size = 200;
1094 	struct kmem_cache *cache;
1095 
1096 	cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
1097 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1098 
1099 	/*
1100 	 * Several allocations with a delay to allow for lazy per memcg kmem
1101 	 * cache creation.
1102 	 */
1103 	for (i = 0; i < 5; i++) {
1104 		p = kmem_cache_alloc(cache, GFP_KERNEL);
1105 		if (!p)
1106 			goto free_cache;
1107 
1108 		kmem_cache_free(cache, p);
1109 		msleep(100);
1110 	}
1111 
1112 free_cache:
1113 	kmem_cache_destroy(cache);
1114 }
1115 
kmem_cache_bulk(struct kunit * test)1116 static void kmem_cache_bulk(struct kunit *test)
1117 {
1118 	struct kmem_cache *cache;
1119 	size_t size = 200;
1120 	char *p[10];
1121 	bool ret;
1122 	int i;
1123 
1124 	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1125 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1126 
1127 	ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
1128 	if (!ret) {
1129 		kunit_err(test, "Allocation failed: %s\n", __func__);
1130 		kmem_cache_destroy(cache);
1131 		return;
1132 	}
1133 
1134 	for (i = 0; i < ARRAY_SIZE(p); i++)
1135 		p[i][0] = p[i][size - 1] = 42;
1136 
1137 	kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
1138 	kmem_cache_destroy(cache);
1139 }
1140 
mempool_prepare_kmalloc(struct kunit * test,mempool_t * pool,size_t size)1141 static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
1142 {
1143 	int pool_size = 4;
1144 	int ret;
1145 	void *elem;
1146 
1147 	memset(pool, 0, sizeof(*pool));
1148 	ret = mempool_init_kmalloc_pool(pool, pool_size, size);
1149 	KUNIT_ASSERT_EQ(test, ret, 0);
1150 
1151 	/*
1152 	 * Allocate one element to prevent mempool from freeing elements to the
1153 	 * underlying allocator and instead make it add them to the element
1154 	 * list when the tests trigger double-free and invalid-free bugs.
1155 	 * This allows testing KASAN annotations in add_element().
1156 	 */
1157 	elem = mempool_alloc_preallocated(pool);
1158 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1159 
1160 	return elem;
1161 }
1162 
mempool_prepare_slab(struct kunit * test,mempool_t * pool,size_t size)1163 static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
1164 {
1165 	struct kmem_cache *cache;
1166 	int pool_size = 4;
1167 	int ret;
1168 
1169 	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1170 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1171 
1172 	memset(pool, 0, sizeof(*pool));
1173 	ret = mempool_init_slab_pool(pool, pool_size, cache);
1174 	KUNIT_ASSERT_EQ(test, ret, 0);
1175 
1176 	/*
1177 	 * Do not allocate one preallocated element, as we skip the double-free
1178 	 * and invalid-free tests for slab mempool for simplicity.
1179 	 */
1180 
1181 	return cache;
1182 }
1183 
mempool_prepare_page(struct kunit * test,mempool_t * pool,int order)1184 static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
1185 {
1186 	int pool_size = 4;
1187 	int ret;
1188 	void *elem;
1189 
1190 	memset(pool, 0, sizeof(*pool));
1191 	ret = mempool_init_page_pool(pool, pool_size, order);
1192 	KUNIT_ASSERT_EQ(test, ret, 0);
1193 
1194 	elem = mempool_alloc_preallocated(pool);
1195 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1196 
1197 	return elem;
1198 }
1199 
mempool_oob_right_helper(struct kunit * test,mempool_t * pool,size_t size)1200 static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
1201 {
1202 	char *elem;
1203 
1204 	elem = mempool_alloc_preallocated(pool);
1205 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1206 
1207 	OPTIMIZER_HIDE_VAR(elem);
1208 
1209 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1210 		KUNIT_EXPECT_KASAN_FAIL(test,
1211 			((volatile char *)&elem[size])[0]);
1212 	else
1213 		KUNIT_EXPECT_KASAN_FAIL(test,
1214 			((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
1215 
1216 	mempool_free(elem, pool);
1217 }
1218 
mempool_kmalloc_oob_right(struct kunit * test)1219 static void mempool_kmalloc_oob_right(struct kunit *test)
1220 {
1221 	mempool_t pool;
1222 	size_t size = 128 - KASAN_GRANULE_SIZE - 5;
1223 	void *extra_elem;
1224 
1225 	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1226 
1227 	mempool_oob_right_helper(test, &pool, size);
1228 
1229 	mempool_free(extra_elem, &pool);
1230 	mempool_exit(&pool);
1231 }
1232 
mempool_kmalloc_large_oob_right(struct kunit * test)1233 static void mempool_kmalloc_large_oob_right(struct kunit *test)
1234 {
1235 	mempool_t pool;
1236 	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1237 	void *extra_elem;
1238 
1239 	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1240 
1241 	mempool_oob_right_helper(test, &pool, size);
1242 
1243 	mempool_free(extra_elem, &pool);
1244 	mempool_exit(&pool);
1245 }
1246 
mempool_slab_oob_right(struct kunit * test)1247 static void mempool_slab_oob_right(struct kunit *test)
1248 {
1249 	mempool_t pool;
1250 	size_t size = 123;
1251 	struct kmem_cache *cache;
1252 
1253 	cache = mempool_prepare_slab(test, &pool, size);
1254 
1255 	mempool_oob_right_helper(test, &pool, size);
1256 
1257 	mempool_exit(&pool);
1258 	kmem_cache_destroy(cache);
1259 }
1260 
1261 /*
1262  * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
1263  * allocations have no redzones, and thus the out-of-bounds detection is not
1264  * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
1265  * the tag-based KASAN modes, the neighboring allocation might have the same
1266  * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
1267  */
1268 
mempool_uaf_helper(struct kunit * test,mempool_t * pool,bool page)1269 static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
1270 {
1271 	char *elem, *ptr;
1272 
1273 	elem = mempool_alloc_preallocated(pool);
1274 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1275 
1276 	mempool_free(elem, pool);
1277 
1278 	ptr = page ? page_address((struct page *)elem) : elem;
1279 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
1280 }
1281 
mempool_kmalloc_uaf(struct kunit * test)1282 static void mempool_kmalloc_uaf(struct kunit *test)
1283 {
1284 	mempool_t pool;
1285 	size_t size = 128;
1286 	void *extra_elem;
1287 
1288 	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1289 
1290 	mempool_uaf_helper(test, &pool, false);
1291 
1292 	mempool_free(extra_elem, &pool);
1293 	mempool_exit(&pool);
1294 }
1295 
mempool_kmalloc_large_uaf(struct kunit * test)1296 static void mempool_kmalloc_large_uaf(struct kunit *test)
1297 {
1298 	mempool_t pool;
1299 	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1300 	void *extra_elem;
1301 
1302 	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1303 
1304 	mempool_uaf_helper(test, &pool, false);
1305 
1306 	mempool_free(extra_elem, &pool);
1307 	mempool_exit(&pool);
1308 }
1309 
mempool_slab_uaf(struct kunit * test)1310 static void mempool_slab_uaf(struct kunit *test)
1311 {
1312 	mempool_t pool;
1313 	size_t size = 123;
1314 	struct kmem_cache *cache;
1315 
1316 	cache = mempool_prepare_slab(test, &pool, size);
1317 
1318 	mempool_uaf_helper(test, &pool, false);
1319 
1320 	mempool_exit(&pool);
1321 	kmem_cache_destroy(cache);
1322 }
1323 
mempool_page_alloc_uaf(struct kunit * test)1324 static void mempool_page_alloc_uaf(struct kunit *test)
1325 {
1326 	mempool_t pool;
1327 	int order = 2;
1328 	void *extra_elem;
1329 
1330 	extra_elem = mempool_prepare_page(test, &pool, order);
1331 
1332 	mempool_uaf_helper(test, &pool, true);
1333 
1334 	mempool_free(extra_elem, &pool);
1335 	mempool_exit(&pool);
1336 }
1337 
mempool_double_free_helper(struct kunit * test,mempool_t * pool)1338 static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
1339 {
1340 	char *elem;
1341 
1342 	elem = mempool_alloc_preallocated(pool);
1343 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1344 
1345 	mempool_free(elem, pool);
1346 
1347 	KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
1348 }
1349 
mempool_kmalloc_double_free(struct kunit * test)1350 static void mempool_kmalloc_double_free(struct kunit *test)
1351 {
1352 	mempool_t pool;
1353 	size_t size = 128;
1354 	char *extra_elem;
1355 
1356 	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1357 
1358 	mempool_double_free_helper(test, &pool);
1359 
1360 	mempool_free(extra_elem, &pool);
1361 	mempool_exit(&pool);
1362 }
1363 
mempool_kmalloc_large_double_free(struct kunit * test)1364 static void mempool_kmalloc_large_double_free(struct kunit *test)
1365 {
1366 	mempool_t pool;
1367 	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1368 	char *extra_elem;
1369 
1370 	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1371 
1372 	mempool_double_free_helper(test, &pool);
1373 
1374 	mempool_free(extra_elem, &pool);
1375 	mempool_exit(&pool);
1376 }
1377 
mempool_page_alloc_double_free(struct kunit * test)1378 static void mempool_page_alloc_double_free(struct kunit *test)
1379 {
1380 	mempool_t pool;
1381 	int order = 2;
1382 	char *extra_elem;
1383 
1384 	extra_elem = mempool_prepare_page(test, &pool, order);
1385 
1386 	mempool_double_free_helper(test, &pool);
1387 
1388 	mempool_free(extra_elem, &pool);
1389 	mempool_exit(&pool);
1390 }
1391 
mempool_kmalloc_invalid_free_helper(struct kunit * test,mempool_t * pool)1392 static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
1393 {
1394 	char *elem;
1395 
1396 	elem = mempool_alloc_preallocated(pool);
1397 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1398 
1399 	KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
1400 
1401 	mempool_free(elem, pool);
1402 }
1403 
mempool_kmalloc_invalid_free(struct kunit * test)1404 static void mempool_kmalloc_invalid_free(struct kunit *test)
1405 {
1406 	mempool_t pool;
1407 	size_t size = 128;
1408 	char *extra_elem;
1409 
1410 	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1411 
1412 	mempool_kmalloc_invalid_free_helper(test, &pool);
1413 
1414 	mempool_free(extra_elem, &pool);
1415 	mempool_exit(&pool);
1416 }
1417 
mempool_kmalloc_large_invalid_free(struct kunit * test)1418 static void mempool_kmalloc_large_invalid_free(struct kunit *test)
1419 {
1420 	mempool_t pool;
1421 	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1422 	char *extra_elem;
1423 
1424 	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1425 
1426 	mempool_kmalloc_invalid_free_helper(test, &pool);
1427 
1428 	mempool_free(extra_elem, &pool);
1429 	mempool_exit(&pool);
1430 }
1431 
1432 /*
1433  * Skip the invalid-free test for page mempool. The invalid-free detection only
1434  * works for compound pages and mempool preallocates all page elements without
1435  * the __GFP_COMP flag.
1436  */
1437 
1438 static char global_array[10];
1439 
kasan_global_oob_right(struct kunit * test)1440 static void kasan_global_oob_right(struct kunit *test)
1441 {
1442 	/*
1443 	 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
1444 	 * from failing here and panicking the kernel, access the array via a
1445 	 * volatile pointer, which will prevent the compiler from being able to
1446 	 * determine the array bounds.
1447 	 *
1448 	 * This access uses a volatile pointer to char (char *volatile) rather
1449 	 * than the more conventional pointer to volatile char (volatile char *)
1450 	 * because we want to prevent the compiler from making inferences about
1451 	 * the pointer itself (i.e. its array bounds), not the data that it
1452 	 * refers to.
1453 	 */
1454 	char *volatile array = global_array;
1455 	char *p = &array[ARRAY_SIZE(global_array) + 3];
1456 
1457 	/* Only generic mode instruments globals. */
1458 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1459 
1460 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1461 }
1462 
kasan_global_oob_left(struct kunit * test)1463 static void kasan_global_oob_left(struct kunit *test)
1464 {
1465 	char *volatile array = global_array;
1466 	char *p = array - 3;
1467 
1468 	/*
1469 	 * GCC is known to fail this test, skip it.
1470 	 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
1471 	 */
1472 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
1473 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1474 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1475 }
1476 
kasan_stack_oob(struct kunit * test)1477 static void kasan_stack_oob(struct kunit *test)
1478 {
1479 	char stack_array[10];
1480 	/* See comment in kasan_global_oob_right. */
1481 	char *volatile array = stack_array;
1482 	char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
1483 
1484 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1485 
1486 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1487 }
1488 
kasan_alloca_oob_left(struct kunit * test)1489 static void kasan_alloca_oob_left(struct kunit *test)
1490 {
1491 	volatile int i = 10;
1492 	char alloca_array[i];
1493 	/* See comment in kasan_global_oob_right. */
1494 	char *volatile array = alloca_array;
1495 	char *p = array - 1;
1496 
1497 	/* Only generic mode instruments dynamic allocas. */
1498 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1499 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1500 
1501 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1502 }
1503 
kasan_alloca_oob_right(struct kunit * test)1504 static void kasan_alloca_oob_right(struct kunit *test)
1505 {
1506 	volatile int i = 10;
1507 	char alloca_array[i];
1508 	/* See comment in kasan_global_oob_right. */
1509 	char *volatile array = alloca_array;
1510 	char *p = array + i;
1511 
1512 	/* Only generic mode instruments dynamic allocas. */
1513 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1514 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1515 
1516 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1517 }
1518 
kasan_memchr(struct kunit * test)1519 static void kasan_memchr(struct kunit *test)
1520 {
1521 	char *ptr;
1522 	size_t size = 24;
1523 
1524 	/*
1525 	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1526 	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1527 	 */
1528 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1529 
1530 	if (OOB_TAG_OFF)
1531 		size = round_up(size, OOB_TAG_OFF);
1532 
1533 	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1534 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1535 
1536 	OPTIMIZER_HIDE_VAR(ptr);
1537 	OPTIMIZER_HIDE_VAR(size);
1538 	KUNIT_EXPECT_KASAN_FAIL(test,
1539 		kasan_ptr_result = memchr(ptr, '1', size + 1));
1540 
1541 	kfree(ptr);
1542 }
1543 
kasan_memcmp(struct kunit * test)1544 static void kasan_memcmp(struct kunit *test)
1545 {
1546 	char *ptr;
1547 	size_t size = 24;
1548 	int arr[9];
1549 
1550 	/*
1551 	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1552 	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1553 	 */
1554 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1555 
1556 	if (OOB_TAG_OFF)
1557 		size = round_up(size, OOB_TAG_OFF);
1558 
1559 	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1560 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1561 	memset(arr, 0, sizeof(arr));
1562 
1563 	OPTIMIZER_HIDE_VAR(ptr);
1564 	OPTIMIZER_HIDE_VAR(size);
1565 	KUNIT_EXPECT_KASAN_FAIL(test,
1566 		kasan_int_result = memcmp(ptr, arr, size+1));
1567 	kfree(ptr);
1568 }
1569 
kasan_strings(struct kunit * test)1570 static void kasan_strings(struct kunit *test)
1571 {
1572 	char *ptr;
1573 	size_t size = 24;
1574 
1575 	/*
1576 	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1577 	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1578 	 */
1579 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1580 
1581 	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1582 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1583 
1584 	kfree(ptr);
1585 
1586 	/*
1587 	 * Try to cause only 1 invalid access (less spam in dmesg).
1588 	 * For that we need ptr to point to zeroed byte.
1589 	 * Skip metadata that could be stored in freed object so ptr
1590 	 * will likely point to zeroed byte.
1591 	 */
1592 	ptr += 16;
1593 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
1594 
1595 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
1596 
1597 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
1598 
1599 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
1600 
1601 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
1602 
1603 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
1604 }
1605 
kasan_bitops_modify(struct kunit * test,int nr,void * addr)1606 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1607 {
1608 	KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1609 	KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1610 	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1611 	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1612 	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1613 	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1614 	KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1615 	KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1616 }
1617 
kasan_bitops_test_and_modify(struct kunit * test,int nr,void * addr)1618 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1619 {
1620 	KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1621 	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1622 	KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1623 	KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1624 	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1625 	KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1626 	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1627 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1628 	if (nr < 7)
1629 		KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1630 				xor_unlock_is_negative_byte(1 << nr, addr));
1631 }
1632 
kasan_bitops_generic(struct kunit * test)1633 static void kasan_bitops_generic(struct kunit *test)
1634 {
1635 	long *bits;
1636 
1637 	/* This test is specifically crafted for the generic mode. */
1638 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1639 
1640 	/*
1641 	 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1642 	 * this way we do not actually corrupt other memory.
1643 	 */
1644 	bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1645 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1646 
1647 	/*
1648 	 * Below calls try to access bit within allocated memory; however, the
1649 	 * below accesses are still out-of-bounds, since bitops are defined to
1650 	 * operate on the whole long the bit is in.
1651 	 */
1652 	kasan_bitops_modify(test, BITS_PER_LONG, bits);
1653 
1654 	/*
1655 	 * Below calls try to access bit beyond allocated memory.
1656 	 */
1657 	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1658 
1659 	kfree(bits);
1660 }
1661 
kasan_bitops_tags(struct kunit * test)1662 static void kasan_bitops_tags(struct kunit *test)
1663 {
1664 	long *bits;
1665 
1666 	/* This test is specifically crafted for tag-based modes. */
1667 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1668 
1669 	/* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1670 	bits = kzalloc(48, GFP_KERNEL);
1671 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1672 
1673 	/* Do the accesses past the 48 allocated bytes, but within the redone. */
1674 	kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1675 	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1676 
1677 	kfree(bits);
1678 }
1679 
vmalloc_helpers_tags(struct kunit * test)1680 static void vmalloc_helpers_tags(struct kunit *test)
1681 {
1682 	void *ptr;
1683 
1684 	/* This test is intended for tag-based modes. */
1685 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1686 
1687 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1688 
1689 	if (!kasan_vmalloc_enabled())
1690 		kunit_skip(test, "Test requires kasan.vmalloc=on");
1691 
1692 	ptr = vmalloc(PAGE_SIZE);
1693 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1694 
1695 	/* Check that the returned pointer is tagged. */
1696 	KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1697 	KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1698 
1699 	/* Make sure exported vmalloc helpers handle tagged pointers. */
1700 	KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1701 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1702 
1703 #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1704 	{
1705 		int rv;
1706 
1707 		/* Make sure vmalloc'ed memory permissions can be changed. */
1708 		rv = set_memory_ro((unsigned long)ptr, 1);
1709 		KUNIT_ASSERT_GE(test, rv, 0);
1710 		rv = set_memory_rw((unsigned long)ptr, 1);
1711 		KUNIT_ASSERT_GE(test, rv, 0);
1712 	}
1713 #endif
1714 
1715 	vfree(ptr);
1716 }
1717 
vmalloc_oob(struct kunit * test)1718 static void vmalloc_oob(struct kunit *test)
1719 {
1720 	char *v_ptr, *p_ptr;
1721 	struct page *page;
1722 	size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1723 
1724 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1725 
1726 	if (!kasan_vmalloc_enabled())
1727 		kunit_skip(test, "Test requires kasan.vmalloc=on");
1728 
1729 	v_ptr = vmalloc(size);
1730 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1731 
1732 	OPTIMIZER_HIDE_VAR(v_ptr);
1733 
1734 	/*
1735 	 * We have to be careful not to hit the guard page in vmalloc tests.
1736 	 * The MMU will catch that and crash us.
1737 	 */
1738 
1739 	/* Make sure in-bounds accesses are valid. */
1740 	v_ptr[0] = 0;
1741 	v_ptr[size - 1] = 0;
1742 
1743 	/*
1744 	 * An unaligned access past the requested vmalloc size.
1745 	 * Only generic KASAN can precisely detect these.
1746 	 */
1747 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1748 		KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1749 
1750 	/* An aligned access into the first out-of-bounds granule. */
1751 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1752 
1753 	/* Check that in-bounds accesses to the physical page are valid. */
1754 	page = vmalloc_to_page(v_ptr);
1755 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1756 	p_ptr = page_address(page);
1757 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1758 	p_ptr[0] = 0;
1759 
1760 	vfree(v_ptr);
1761 
1762 	/*
1763 	 * We can't check for use-after-unmap bugs in this nor in the following
1764 	 * vmalloc tests, as the page might be fully unmapped and accessing it
1765 	 * will crash the kernel.
1766 	 */
1767 }
1768 
vmap_tags(struct kunit * test)1769 static void vmap_tags(struct kunit *test)
1770 {
1771 	char *p_ptr, *v_ptr;
1772 	struct page *p_page, *v_page;
1773 
1774 	/*
1775 	 * This test is specifically crafted for the software tag-based mode,
1776 	 * the only tag-based mode that poisons vmap mappings.
1777 	 */
1778 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1779 
1780 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1781 
1782 	if (!kasan_vmalloc_enabled())
1783 		kunit_skip(test, "Test requires kasan.vmalloc=on");
1784 
1785 	p_page = alloc_pages(GFP_KERNEL, 1);
1786 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1787 	p_ptr = page_address(p_page);
1788 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1789 
1790 	v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1791 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1792 
1793 	/*
1794 	 * We can't check for out-of-bounds bugs in this nor in the following
1795 	 * vmalloc tests, as allocations have page granularity and accessing
1796 	 * the guard page will crash the kernel.
1797 	 */
1798 
1799 	KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1800 	KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1801 
1802 	/* Make sure that in-bounds accesses through both pointers work. */
1803 	*p_ptr = 0;
1804 	*v_ptr = 0;
1805 
1806 	/* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1807 	v_page = vmalloc_to_page(v_ptr);
1808 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1809 	KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1810 
1811 	vunmap(v_ptr);
1812 	free_pages((unsigned long)p_ptr, 1);
1813 }
1814 
vm_map_ram_tags(struct kunit * test)1815 static void vm_map_ram_tags(struct kunit *test)
1816 {
1817 	char *p_ptr, *v_ptr;
1818 	struct page *page;
1819 
1820 	/*
1821 	 * This test is specifically crafted for the software tag-based mode,
1822 	 * the only tag-based mode that poisons vm_map_ram mappings.
1823 	 */
1824 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1825 
1826 	page = alloc_pages(GFP_KERNEL, 1);
1827 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1828 	p_ptr = page_address(page);
1829 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1830 
1831 	v_ptr = vm_map_ram(&page, 1, -1);
1832 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1833 
1834 	KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1835 	KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1836 
1837 	/* Make sure that in-bounds accesses through both pointers work. */
1838 	*p_ptr = 0;
1839 	*v_ptr = 0;
1840 
1841 	vm_unmap_ram(v_ptr, 1);
1842 	free_pages((unsigned long)p_ptr, 1);
1843 }
1844 
1845 /*
1846  * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1847  * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1848  * modes.
1849  */
match_all_not_assigned(struct kunit * test)1850 static void match_all_not_assigned(struct kunit *test)
1851 {
1852 	char *ptr;
1853 	struct page *pages;
1854 	int i, size, order;
1855 
1856 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1857 
1858 	for (i = 0; i < 256; i++) {
1859 		size = get_random_u32_inclusive(1, 1024);
1860 		ptr = kmalloc(size, GFP_KERNEL);
1861 		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1862 		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1863 		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1864 		kfree(ptr);
1865 	}
1866 
1867 	for (i = 0; i < 256; i++) {
1868 		order = get_random_u32_inclusive(1, 4);
1869 		pages = alloc_pages(GFP_KERNEL, order);
1870 		ptr = page_address(pages);
1871 		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1872 		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1873 		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1874 		free_pages((unsigned long)ptr, order);
1875 	}
1876 
1877 	if (!kasan_vmalloc_enabled())
1878 		return;
1879 
1880 	for (i = 0; i < 256; i++) {
1881 		size = get_random_u32_inclusive(1, 1024);
1882 		ptr = vmalloc(size);
1883 		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1884 		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1885 		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1886 		vfree(ptr);
1887 	}
1888 }
1889 
1890 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
match_all_ptr_tag(struct kunit * test)1891 static void match_all_ptr_tag(struct kunit *test)
1892 {
1893 	char *ptr;
1894 	u8 tag;
1895 
1896 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1897 
1898 	ptr = kmalloc(128, GFP_KERNEL);
1899 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1900 
1901 	/* Backup the assigned tag. */
1902 	tag = get_tag(ptr);
1903 	KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1904 
1905 	/* Reset the tag to 0xff.*/
1906 	ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1907 
1908 	/* This access shouldn't trigger a KASAN report. */
1909 	*ptr = 0;
1910 
1911 	/* Recover the pointer tag and free. */
1912 	ptr = set_tag(ptr, tag);
1913 	kfree(ptr);
1914 }
1915 
1916 /* Check that there are no match-all memory tags for tag-based modes. */
match_all_mem_tag(struct kunit * test)1917 static void match_all_mem_tag(struct kunit *test)
1918 {
1919 	char *ptr;
1920 	int tag;
1921 
1922 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1923 
1924 	ptr = kmalloc(128, GFP_KERNEL);
1925 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1926 	KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1927 
1928 	/* For each possible tag value not matching the pointer tag. */
1929 	for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1930 		/*
1931 		 * For Software Tag-Based KASAN, skip the majority of tag
1932 		 * values to avoid the test printing too many reports.
1933 		 */
1934 		if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
1935 		    tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
1936 			continue;
1937 
1938 		if (tag == get_tag(ptr))
1939 			continue;
1940 
1941 		/* Mark the first memory granule with the chosen memory tag. */
1942 		kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1943 
1944 		/* This access must cause a KASAN report. */
1945 		KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1946 	}
1947 
1948 	/* Recover the memory tag and free. */
1949 	kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1950 	kfree(ptr);
1951 }
1952 
1953 /*
1954  * Check that Rust performing a use-after-free using `unsafe` is detected.
1955  * This is a smoke test to make sure that Rust is being sanitized properly.
1956  */
rust_uaf(struct kunit * test)1957 static void rust_uaf(struct kunit *test)
1958 {
1959 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST);
1960 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
1961 }
1962 
copy_to_kernel_nofault_oob(struct kunit * test)1963 static void copy_to_kernel_nofault_oob(struct kunit *test)
1964 {
1965 	char *ptr;
1966 	char buf[128];
1967 	size_t size = sizeof(buf);
1968 
1969 	/*
1970 	 * This test currently fails with the HW_TAGS mode. The reason is
1971 	 * unknown and needs to be investigated.
1972 	 */
1973 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
1974 
1975 	ptr = kmalloc(size - KASAN_GRANULE_SIZE, GFP_KERNEL);
1976 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1977 	OPTIMIZER_HIDE_VAR(ptr);
1978 
1979 	/*
1980 	 * We test copy_to_kernel_nofault() to detect corrupted memory that is
1981 	 * being written into the kernel. In contrast,
1982 	 * copy_from_kernel_nofault() is primarily used in kernel helper
1983 	 * functions where the source address might be random or uninitialized.
1984 	 * Applying KASAN instrumentation to copy_from_kernel_nofault() could
1985 	 * lead to false positives.  By focusing KASAN checks only on
1986 	 * copy_to_kernel_nofault(), we ensure that only valid memory is
1987 	 * written to the kernel, minimizing the risk of kernel corruption
1988 	 * while avoiding false positives in the reverse case.
1989 	 */
1990 	KUNIT_EXPECT_KASAN_FAIL(test,
1991 		copy_to_kernel_nofault(&buf[0], ptr, size));
1992 	KUNIT_EXPECT_KASAN_FAIL(test,
1993 		copy_to_kernel_nofault(ptr, &buf[0], size));
1994 
1995 	kfree(ptr);
1996 }
1997 
copy_user_test_oob(struct kunit * test)1998 static void copy_user_test_oob(struct kunit *test)
1999 {
2000 	char *kmem;
2001 	char __user *usermem;
2002 	unsigned long useraddr;
2003 	size_t size = 128 - KASAN_GRANULE_SIZE;
2004 	int __maybe_unused unused;
2005 
2006 	kmem = kunit_kmalloc(test, size, GFP_KERNEL);
2007 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, kmem);
2008 
2009 	useraddr = kunit_vm_mmap(test, NULL, 0, PAGE_SIZE,
2010 					PROT_READ | PROT_WRITE | PROT_EXEC,
2011 					MAP_ANONYMOUS | MAP_PRIVATE, 0);
2012 	KUNIT_ASSERT_NE_MSG(test, useraddr, 0,
2013 		"Could not create userspace mm");
2014 	KUNIT_ASSERT_LT_MSG(test, useraddr, (unsigned long)TASK_SIZE,
2015 		"Failed to allocate user memory");
2016 
2017 	OPTIMIZER_HIDE_VAR(size);
2018 	usermem = (char __user *)useraddr;
2019 
2020 	KUNIT_EXPECT_KASAN_FAIL(test,
2021 		unused = copy_from_user(kmem, usermem, size + 1));
2022 	KUNIT_EXPECT_KASAN_FAIL(test,
2023 		unused = copy_to_user(usermem, kmem, size + 1));
2024 	KUNIT_EXPECT_KASAN_FAIL(test,
2025 		unused = __copy_from_user(kmem, usermem, size + 1));
2026 	KUNIT_EXPECT_KASAN_FAIL(test,
2027 		unused = __copy_to_user(usermem, kmem, size + 1));
2028 	KUNIT_EXPECT_KASAN_FAIL(test,
2029 		unused = __copy_from_user_inatomic(kmem, usermem, size + 1));
2030 	KUNIT_EXPECT_KASAN_FAIL(test,
2031 		unused = __copy_to_user_inatomic(usermem, kmem, size + 1));
2032 
2033 	/*
2034 	* Prepare a long string in usermem to avoid the strncpy_from_user test
2035 	* bailing out on '\0' before it reaches out-of-bounds.
2036 	*/
2037 	memset(kmem, 'a', size);
2038 	KUNIT_EXPECT_EQ(test, copy_to_user(usermem, kmem, size), 0);
2039 
2040 	KUNIT_EXPECT_KASAN_FAIL(test,
2041 		unused = strncpy_from_user(kmem, usermem, size + 1));
2042 }
2043 
2044 static struct kunit_case kasan_kunit_test_cases[] = {
2045 	KUNIT_CASE(kmalloc_oob_right),
2046 	KUNIT_CASE(kmalloc_oob_left),
2047 	KUNIT_CASE(kmalloc_node_oob_right),
2048 	KUNIT_CASE(kmalloc_track_caller_oob_right),
2049 	KUNIT_CASE(kmalloc_big_oob_right),
2050 	KUNIT_CASE(kmalloc_large_oob_right),
2051 	KUNIT_CASE(kmalloc_large_uaf),
2052 	KUNIT_CASE(kmalloc_large_invalid_free),
2053 	KUNIT_CASE(page_alloc_oob_right),
2054 	KUNIT_CASE(page_alloc_uaf),
2055 	KUNIT_CASE(krealloc_more_oob),
2056 	KUNIT_CASE(krealloc_less_oob),
2057 	KUNIT_CASE(krealloc_large_more_oob),
2058 	KUNIT_CASE(krealloc_large_less_oob),
2059 	KUNIT_CASE(krealloc_uaf),
2060 	KUNIT_CASE(kmalloc_oob_16),
2061 	KUNIT_CASE(kmalloc_uaf_16),
2062 	KUNIT_CASE(kmalloc_oob_in_memset),
2063 	KUNIT_CASE(kmalloc_oob_memset_2),
2064 	KUNIT_CASE(kmalloc_oob_memset_4),
2065 	KUNIT_CASE(kmalloc_oob_memset_8),
2066 	KUNIT_CASE(kmalloc_oob_memset_16),
2067 	KUNIT_CASE(kmalloc_memmove_negative_size),
2068 	KUNIT_CASE(kmalloc_memmove_invalid_size),
2069 	KUNIT_CASE(kmalloc_uaf),
2070 	KUNIT_CASE(kmalloc_uaf_memset),
2071 	KUNIT_CASE(kmalloc_uaf2),
2072 	KUNIT_CASE(kmalloc_uaf3),
2073 	KUNIT_CASE(kmalloc_double_kzfree),
2074 	KUNIT_CASE(ksize_unpoisons_memory),
2075 	KUNIT_CASE(ksize_uaf),
2076 	KUNIT_CASE(rcu_uaf),
2077 	KUNIT_CASE(workqueue_uaf),
2078 	KUNIT_CASE(kfree_via_page),
2079 	KUNIT_CASE(kfree_via_phys),
2080 	KUNIT_CASE(kmem_cache_oob),
2081 	KUNIT_CASE(kmem_cache_double_free),
2082 	KUNIT_CASE(kmem_cache_invalid_free),
2083 	KUNIT_CASE(kmem_cache_rcu_uaf),
2084 	KUNIT_CASE(kmem_cache_double_destroy),
2085 	KUNIT_CASE(kmem_cache_accounted),
2086 	KUNIT_CASE(kmem_cache_bulk),
2087 	KUNIT_CASE(mempool_kmalloc_oob_right),
2088 	KUNIT_CASE(mempool_kmalloc_large_oob_right),
2089 	KUNIT_CASE(mempool_slab_oob_right),
2090 	KUNIT_CASE(mempool_kmalloc_uaf),
2091 	KUNIT_CASE(mempool_kmalloc_large_uaf),
2092 	KUNIT_CASE(mempool_slab_uaf),
2093 	KUNIT_CASE(mempool_page_alloc_uaf),
2094 	KUNIT_CASE(mempool_kmalloc_double_free),
2095 	KUNIT_CASE(mempool_kmalloc_large_double_free),
2096 	KUNIT_CASE(mempool_page_alloc_double_free),
2097 	KUNIT_CASE(mempool_kmalloc_invalid_free),
2098 	KUNIT_CASE(mempool_kmalloc_large_invalid_free),
2099 	KUNIT_CASE(kasan_global_oob_right),
2100 	KUNIT_CASE(kasan_global_oob_left),
2101 	KUNIT_CASE(kasan_stack_oob),
2102 	KUNIT_CASE(kasan_alloca_oob_left),
2103 	KUNIT_CASE(kasan_alloca_oob_right),
2104 	KUNIT_CASE(kasan_memchr),
2105 	KUNIT_CASE(kasan_memcmp),
2106 	KUNIT_CASE(kasan_strings),
2107 	KUNIT_CASE(kasan_bitops_generic),
2108 	KUNIT_CASE(kasan_bitops_tags),
2109 	KUNIT_CASE_SLOW(kasan_atomics),
2110 	KUNIT_CASE(vmalloc_helpers_tags),
2111 	KUNIT_CASE(vmalloc_oob),
2112 	KUNIT_CASE(vmap_tags),
2113 	KUNIT_CASE(vm_map_ram_tags),
2114 	KUNIT_CASE(match_all_not_assigned),
2115 	KUNIT_CASE(match_all_ptr_tag),
2116 	KUNIT_CASE(match_all_mem_tag),
2117 	KUNIT_CASE(copy_to_kernel_nofault_oob),
2118 	KUNIT_CASE(rust_uaf),
2119 	KUNIT_CASE(copy_user_test_oob),
2120 	{}
2121 };
2122 
2123 static struct kunit_suite kasan_kunit_test_suite = {
2124 	.name = "kasan",
2125 	.test_cases = kasan_kunit_test_cases,
2126 	.exit = kasan_test_exit,
2127 	.suite_init = kasan_suite_init,
2128 	.suite_exit = kasan_suite_exit,
2129 };
2130 
2131 kunit_test_suite(kasan_kunit_test_suite);
2132 
2133 MODULE_LICENSE("GPL");
2134