1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6 */
7
8 #define pr_fmt(fmt) "kasan: test: " fmt
9
10 #include <kunit/test.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/io.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/mempool.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/module.h>
20 #include <linux/printk.h>
21 #include <linux/random.h>
22 #include <linux/set_memory.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/tracepoint.h>
26 #include <linux/uaccess.h>
27 #include <linux/vmalloc.h>
28 #include <trace/events/printk.h>
29
30 #include <asm/page.h>
31
32 #include "kasan.h"
33
34 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
35
36 MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
37
38 static bool multishot;
39
40 /* Fields set based on lines observed in the console. */
41 static struct {
42 bool report_found;
43 bool async_fault;
44 } test_status;
45
46 /*
47 * Some tests use these global variables to store return values from function
48 * calls that could otherwise be eliminated by the compiler as dead code.
49 */
50 static void *volatile kasan_ptr_result;
51 static volatile int kasan_int_result;
52
53 /* Probe for console output: obtains test_status lines of interest. */
probe_console(void * ignore,const char * buf,size_t len)54 static void probe_console(void *ignore, const char *buf, size_t len)
55 {
56 if (strnstr(buf, "BUG: KASAN: ", len))
57 WRITE_ONCE(test_status.report_found, true);
58 else if (strnstr(buf, "Asynchronous fault: ", len))
59 WRITE_ONCE(test_status.async_fault, true);
60 }
61
kasan_suite_init(struct kunit_suite * suite)62 static int kasan_suite_init(struct kunit_suite *suite)
63 {
64 if (!kasan_enabled()) {
65 pr_err("Can't run KASAN tests with KASAN disabled");
66 return -1;
67 }
68
69 /* Stop failing KUnit tests on KASAN reports. */
70 kasan_kunit_test_suite_start();
71
72 /*
73 * Temporarily enable multi-shot mode. Otherwise, KASAN would only
74 * report the first detected bug and panic the kernel if panic_on_warn
75 * is enabled.
76 */
77 multishot = kasan_save_enable_multi_shot();
78
79 register_trace_console(probe_console, NULL);
80 return 0;
81 }
82
kasan_suite_exit(struct kunit_suite * suite)83 static void kasan_suite_exit(struct kunit_suite *suite)
84 {
85 kasan_kunit_test_suite_end();
86 kasan_restore_multi_shot(multishot);
87 unregister_trace_console(probe_console, NULL);
88 tracepoint_synchronize_unregister();
89 }
90
kasan_test_exit(struct kunit * test)91 static void kasan_test_exit(struct kunit *test)
92 {
93 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
94 }
95
96 /**
97 * KUNIT_EXPECT_KASAN_RESULT - checks whether the executed expression
98 * produces a KASAN report; causes a KUnit test failure when the result
99 * is different from @fail.
100 *
101 * @test: Currently executing KUnit test.
102 * @expr: Expression to be tested.
103 * @expr_str: Expression to be tested encoded as a string.
104 * @fail: Whether expression should produce a KASAN report.
105 *
106 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
107 * checking is auto-disabled. When this happens, this test handler reenables
108 * tag checking. As tag checking can be only disabled or enabled per CPU,
109 * this handler disables migration (preemption).
110 *
111 * Since the compiler doesn't see that the expression can change the test_status
112 * fields, it can reorder or optimize away the accesses to those fields.
113 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
114 * expression to prevent that.
115 *
116 * In between KUNIT_EXPECT_KASAN_RESULT checks, test_status.report_found is kept
117 * as false. This allows detecting KASAN reports that happen outside of the
118 * checks by asserting !test_status.report_found at the start of
119 * KUNIT_EXPECT_KASAN_RESULT and in kasan_test_exit.
120 */
121 #define KUNIT_EXPECT_KASAN_RESULT(test, expr, expr_str, fail) \
122 do { \
123 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
124 kasan_sync_fault_possible()) \
125 migrate_disable(); \
126 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
127 barrier(); \
128 expr; \
129 barrier(); \
130 if (kasan_async_fault_possible()) \
131 kasan_force_async_fault(); \
132 if (READ_ONCE(test_status.report_found) != fail) { \
133 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure" \
134 "%sexpected in \"" expr_str \
135 "\", but %soccurred", \
136 (fail ? " " : " not "), \
137 (test_status.report_found ? \
138 "" : "none ")); \
139 } \
140 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
141 kasan_sync_fault_possible()) { \
142 if (READ_ONCE(test_status.report_found) && \
143 !READ_ONCE(test_status.async_fault)) \
144 kasan_enable_hw_tags(); \
145 migrate_enable(); \
146 } \
147 WRITE_ONCE(test_status.report_found, false); \
148 WRITE_ONCE(test_status.async_fault, false); \
149 } while (0)
150
151 /*
152 * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
153 * KASAN report; causes a KUnit test failure otherwise.
154 *
155 * @test: Currently executing KUnit test.
156 * @expr: Expression that must produce a KASAN report.
157 */
158 #define KUNIT_EXPECT_KASAN_FAIL(test, expr) \
159 KUNIT_EXPECT_KASAN_RESULT(test, expr, #expr, true)
160
161 /*
162 * KUNIT_EXPECT_KASAN_FAIL_READ - check that the executed expression
163 * produces a KASAN report when the write-only mode is not enabled;
164 * causes a KUnit test failure otherwise.
165 *
166 * Note: At the moment, this macro does not check whether the produced
167 * KASAN report is a report about a bad read access. It is only intended
168 * for checking the write-only KASAN mode functionality without failing
169 * KASAN tests.
170 *
171 * @test: Currently executing KUnit test.
172 * @expr: Expression that must only produce a KASAN report
173 * when the write-only mode is not enabled.
174 */
175 #define KUNIT_EXPECT_KASAN_FAIL_READ(test, expr) \
176 KUNIT_EXPECT_KASAN_RESULT(test, expr, #expr, \
177 !kasan_write_only_enabled()) \
178
179 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
180 if (!IS_ENABLED(config)) \
181 kunit_skip((test), "Test requires " #config "=y"); \
182 } while (0)
183
184 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
185 if (IS_ENABLED(config)) \
186 kunit_skip((test), "Test requires " #config "=n"); \
187 } while (0)
188
189 #define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do { \
190 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
191 break; /* No compiler instrumentation. */ \
192 if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)) \
193 break; /* Should always be instrumented! */ \
194 if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) \
195 kunit_skip((test), "Test requires checked mem*()"); \
196 } while (0)
197
kmalloc_oob_right(struct kunit * test)198 static void kmalloc_oob_right(struct kunit *test)
199 {
200 char *ptr;
201 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
202
203 ptr = kmalloc(size, GFP_KERNEL);
204 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
205
206 OPTIMIZER_HIDE_VAR(ptr);
207 /*
208 * An unaligned access past the requested kmalloc size.
209 * Only generic KASAN can precisely detect these.
210 */
211 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
212 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
213
214 /*
215 * An aligned access into the first out-of-bounds granule that falls
216 * within the aligned kmalloc object.
217 */
218 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
219
220 /* Out-of-bounds access past the aligned kmalloc object. */
221 KUNIT_EXPECT_KASAN_FAIL_READ(test, ptr[0] =
222 ptr[size + KASAN_GRANULE_SIZE + 5]);
223
224 kfree(ptr);
225 }
226
kmalloc_oob_left(struct kunit * test)227 static void kmalloc_oob_left(struct kunit *test)
228 {
229 char *ptr;
230 size_t size = 15;
231
232 ptr = kmalloc(size, GFP_KERNEL);
233 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
234
235 OPTIMIZER_HIDE_VAR(ptr);
236 KUNIT_EXPECT_KASAN_FAIL_READ(test, *ptr = *(ptr - 1));
237 kfree(ptr);
238 }
239
kmalloc_node_oob_right(struct kunit * test)240 static void kmalloc_node_oob_right(struct kunit *test)
241 {
242 char *ptr;
243 size_t size = 4096;
244
245 ptr = kmalloc_node(size, GFP_KERNEL, 0);
246 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
247
248 OPTIMIZER_HIDE_VAR(ptr);
249 KUNIT_EXPECT_KASAN_FAIL_READ(test, ptr[0] = ptr[size]);
250 kfree(ptr);
251 }
252
kmalloc_track_caller_oob_right(struct kunit * test)253 static void kmalloc_track_caller_oob_right(struct kunit *test)
254 {
255 char *ptr;
256 size_t size = 128 - KASAN_GRANULE_SIZE;
257
258 /*
259 * Check that KASAN detects out-of-bounds access for object allocated via
260 * kmalloc_track_caller().
261 */
262 ptr = kmalloc_track_caller(size, GFP_KERNEL);
263 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
264
265 OPTIMIZER_HIDE_VAR(ptr);
266 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y');
267
268 kfree(ptr);
269
270 /*
271 * Check that KASAN detects out-of-bounds access for object allocated via
272 * kmalloc_node_track_caller().
273 */
274 ptr = kmalloc_node_track_caller(size, GFP_KERNEL, 0);
275 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
276
277 OPTIMIZER_HIDE_VAR(ptr);
278 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y');
279
280 kfree(ptr);
281 }
282
283 /*
284 * Check that KASAN detects an out-of-bounds access for a big object allocated
285 * via kmalloc(). But not as big as to trigger the page_alloc fallback.
286 */
kmalloc_big_oob_right(struct kunit * test)287 static void kmalloc_big_oob_right(struct kunit *test)
288 {
289 char *ptr;
290 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
291
292 ptr = kmalloc(size, GFP_KERNEL);
293 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
294
295 OPTIMIZER_HIDE_VAR(ptr);
296 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
297 kfree(ptr);
298 }
299
300 /*
301 * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
302 * that does not fit into the largest slab cache and therefore is allocated via
303 * the page_alloc fallback.
304 */
305
kmalloc_large_oob_right(struct kunit * test)306 static void kmalloc_large_oob_right(struct kunit *test)
307 {
308 char *ptr;
309 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
310
311 ptr = kmalloc(size, GFP_KERNEL);
312 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
313
314 OPTIMIZER_HIDE_VAR(ptr);
315 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
316
317 kfree(ptr);
318 }
319
kmalloc_large_uaf(struct kunit * test)320 static void kmalloc_large_uaf(struct kunit *test)
321 {
322 char *ptr;
323 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
324
325 ptr = kmalloc(size, GFP_KERNEL);
326 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
327 kfree(ptr);
328
329 KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
330 }
331
kmalloc_large_invalid_free(struct kunit * test)332 static void kmalloc_large_invalid_free(struct kunit *test)
333 {
334 char *ptr;
335 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
336
337 ptr = kmalloc(size, GFP_KERNEL);
338 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
339
340 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
341 }
342
page_alloc_oob_right(struct kunit * test)343 static void page_alloc_oob_right(struct kunit *test)
344 {
345 char *ptr;
346 struct page *pages;
347 size_t order = 4;
348 size_t size = (1UL << (PAGE_SHIFT + order));
349
350 /*
351 * With generic KASAN page allocations have no redzones, thus
352 * out-of-bounds detection is not guaranteed.
353 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
354 */
355 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
356
357 pages = alloc_pages(GFP_KERNEL, order);
358 ptr = page_address(pages);
359 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
360
361 KUNIT_EXPECT_KASAN_FAIL_READ(test, ptr[0] = ptr[size]);
362 free_pages((unsigned long)ptr, order);
363 }
364
page_alloc_uaf(struct kunit * test)365 static void page_alloc_uaf(struct kunit *test)
366 {
367 char *ptr;
368 struct page *pages;
369 size_t order = 4;
370
371 pages = alloc_pages(GFP_KERNEL, order);
372 ptr = page_address(pages);
373 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
374 free_pages((unsigned long)ptr, order);
375
376 KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
377 }
378
krealloc_more_oob_helper(struct kunit * test,size_t size1,size_t size2)379 static void krealloc_more_oob_helper(struct kunit *test,
380 size_t size1, size_t size2)
381 {
382 char *ptr1, *ptr2;
383 size_t middle;
384
385 KUNIT_ASSERT_LT(test, size1, size2);
386 middle = size1 + (size2 - size1) / 2;
387
388 ptr1 = kmalloc(size1, GFP_KERNEL);
389 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
390
391 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
392 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
393
394 /* Suppress -Warray-bounds warnings. */
395 OPTIMIZER_HIDE_VAR(ptr2);
396
397 /* All offsets up to size2 must be accessible. */
398 ptr2[size1 - 1] = 'x';
399 ptr2[size1] = 'x';
400 ptr2[middle] = 'x';
401 ptr2[size2 - 1] = 'x';
402
403 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
404 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
405 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
406
407 /* For all modes first aligned offset after size2 must be inaccessible. */
408 KUNIT_EXPECT_KASAN_FAIL(test,
409 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
410
411 kfree(ptr2);
412 }
413
krealloc_less_oob_helper(struct kunit * test,size_t size1,size_t size2)414 static void krealloc_less_oob_helper(struct kunit *test,
415 size_t size1, size_t size2)
416 {
417 char *ptr1, *ptr2;
418 size_t middle;
419
420 KUNIT_ASSERT_LT(test, size2, size1);
421 middle = size2 + (size1 - size2) / 2;
422
423 ptr1 = kmalloc(size1, GFP_KERNEL);
424 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
425
426 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
427 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
428
429 /* Suppress -Warray-bounds warnings. */
430 OPTIMIZER_HIDE_VAR(ptr2);
431
432 /* Must be accessible for all modes. */
433 ptr2[size2 - 1] = 'x';
434
435 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
436 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
437 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
438
439 /* For all modes first aligned offset after size2 must be inaccessible. */
440 KUNIT_EXPECT_KASAN_FAIL(test,
441 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
442
443 /*
444 * For all modes all size2, middle, and size1 should land in separate
445 * granules and thus the latter two offsets should be inaccessible.
446 */
447 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
448 round_down(middle, KASAN_GRANULE_SIZE));
449 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
450 round_down(size1, KASAN_GRANULE_SIZE));
451 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
452 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
453 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
454
455 kfree(ptr2);
456 }
457
krealloc_more_oob(struct kunit * test)458 static void krealloc_more_oob(struct kunit *test)
459 {
460 krealloc_more_oob_helper(test, 201, 235);
461 }
462
krealloc_less_oob(struct kunit * test)463 static void krealloc_less_oob(struct kunit *test)
464 {
465 krealloc_less_oob_helper(test, 235, 201);
466 }
467
krealloc_large_more_oob(struct kunit * test)468 static void krealloc_large_more_oob(struct kunit *test)
469 {
470 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
471 KMALLOC_MAX_CACHE_SIZE + 235);
472 }
473
krealloc_large_less_oob(struct kunit * test)474 static void krealloc_large_less_oob(struct kunit *test)
475 {
476 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
477 KMALLOC_MAX_CACHE_SIZE + 201);
478 }
479
480 /*
481 * Check that krealloc() detects a use-after-free, returns NULL,
482 * and doesn't unpoison the freed object.
483 */
krealloc_uaf(struct kunit * test)484 static void krealloc_uaf(struct kunit *test)
485 {
486 char *ptr1, *ptr2;
487 int size1 = 201;
488 int size2 = 235;
489
490 ptr1 = kmalloc(size1, GFP_KERNEL);
491 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
492 kfree(ptr1);
493
494 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
495 KUNIT_ASSERT_NULL(test, ptr2);
496 KUNIT_EXPECT_KASAN_FAIL_READ(test, *(volatile char *)ptr1);
497 }
498
kmalloc_oob_16(struct kunit * test)499 static void kmalloc_oob_16(struct kunit *test)
500 {
501 struct {
502 u64 words[2];
503 } *ptr1, *ptr2;
504
505 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
506
507 /* This test is specifically crafted for the generic mode. */
508 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
509
510 /* RELOC_HIDE to prevent gcc from warning about short alloc */
511 ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
512 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
513
514 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
515 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
516
517 OPTIMIZER_HIDE_VAR(ptr1);
518 OPTIMIZER_HIDE_VAR(ptr2);
519 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
520 kfree(ptr1);
521 kfree(ptr2);
522 }
523
kmalloc_uaf_16(struct kunit * test)524 static void kmalloc_uaf_16(struct kunit *test)
525 {
526 struct {
527 u64 words[2];
528 } *ptr1, *ptr2;
529
530 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
531
532 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
533 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
534
535 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
536 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
537 kfree(ptr2);
538
539 KUNIT_EXPECT_KASAN_FAIL_READ(test, *ptr1 = *ptr2);
540 kfree(ptr1);
541 }
542
543 /*
544 * Note: in the memset tests below, the written range touches both valid and
545 * invalid memory. This makes sure that the instrumentation does not only check
546 * the starting address but the whole range.
547 */
548
kmalloc_oob_memset_2(struct kunit * test)549 static void kmalloc_oob_memset_2(struct kunit *test)
550 {
551 char *ptr;
552 size_t size = 128 - KASAN_GRANULE_SIZE;
553 size_t memset_size = 2;
554
555 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
556
557 ptr = kmalloc(size, GFP_KERNEL);
558 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
559
560 OPTIMIZER_HIDE_VAR(ptr);
561 OPTIMIZER_HIDE_VAR(size);
562 OPTIMIZER_HIDE_VAR(memset_size);
563 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size));
564 kfree(ptr);
565 }
566
kmalloc_oob_memset_4(struct kunit * test)567 static void kmalloc_oob_memset_4(struct kunit *test)
568 {
569 char *ptr;
570 size_t size = 128 - KASAN_GRANULE_SIZE;
571 size_t memset_size = 4;
572
573 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
574
575 ptr = kmalloc(size, GFP_KERNEL);
576 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
577
578 OPTIMIZER_HIDE_VAR(ptr);
579 OPTIMIZER_HIDE_VAR(size);
580 OPTIMIZER_HIDE_VAR(memset_size);
581 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size));
582 kfree(ptr);
583 }
584
kmalloc_oob_memset_8(struct kunit * test)585 static void kmalloc_oob_memset_8(struct kunit *test)
586 {
587 char *ptr;
588 size_t size = 128 - KASAN_GRANULE_SIZE;
589 size_t memset_size = 8;
590
591 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
592
593 ptr = kmalloc(size, GFP_KERNEL);
594 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
595
596 OPTIMIZER_HIDE_VAR(ptr);
597 OPTIMIZER_HIDE_VAR(size);
598 OPTIMIZER_HIDE_VAR(memset_size);
599 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size));
600 kfree(ptr);
601 }
602
kmalloc_oob_memset_16(struct kunit * test)603 static void kmalloc_oob_memset_16(struct kunit *test)
604 {
605 char *ptr;
606 size_t size = 128 - KASAN_GRANULE_SIZE;
607 size_t memset_size = 16;
608
609 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
610
611 ptr = kmalloc(size, GFP_KERNEL);
612 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
613
614 OPTIMIZER_HIDE_VAR(ptr);
615 OPTIMIZER_HIDE_VAR(size);
616 OPTIMIZER_HIDE_VAR(memset_size);
617 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size));
618 kfree(ptr);
619 }
620
kmalloc_oob_in_memset(struct kunit * test)621 static void kmalloc_oob_in_memset(struct kunit *test)
622 {
623 char *ptr;
624 size_t size = 128 - KASAN_GRANULE_SIZE;
625
626 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
627
628 ptr = kmalloc(size, GFP_KERNEL);
629 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
630
631 OPTIMIZER_HIDE_VAR(ptr);
632 OPTIMIZER_HIDE_VAR(size);
633 KUNIT_EXPECT_KASAN_FAIL(test,
634 memset(ptr, 0, size + KASAN_GRANULE_SIZE));
635 kfree(ptr);
636 }
637
kmalloc_memmove_negative_size(struct kunit * test)638 static void kmalloc_memmove_negative_size(struct kunit *test)
639 {
640 char *ptr;
641 size_t size = 64;
642 size_t invalid_size = -2;
643
644 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
645
646 /*
647 * Hardware tag-based mode doesn't check memmove for negative size.
648 * As a result, this test introduces a side-effect memory corruption,
649 * which can result in a crash.
650 */
651 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
652
653 ptr = kmalloc(size, GFP_KERNEL);
654 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
655
656 memset((char *)ptr, 0, 64);
657 OPTIMIZER_HIDE_VAR(ptr);
658 OPTIMIZER_HIDE_VAR(invalid_size);
659 KUNIT_EXPECT_KASAN_FAIL(test,
660 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
661 kfree(ptr);
662 }
663
kmalloc_memmove_invalid_size(struct kunit * test)664 static void kmalloc_memmove_invalid_size(struct kunit *test)
665 {
666 char *ptr;
667 size_t size = 64;
668 size_t invalid_size = size;
669
670 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
671
672 ptr = kmalloc(size, GFP_KERNEL);
673 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
674
675 memset((char *)ptr, 0, 64);
676 OPTIMIZER_HIDE_VAR(ptr);
677 OPTIMIZER_HIDE_VAR(invalid_size);
678 KUNIT_EXPECT_KASAN_FAIL_READ(test,
679 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
680 kfree(ptr);
681 }
682
kmalloc_uaf(struct kunit * test)683 static void kmalloc_uaf(struct kunit *test)
684 {
685 char *ptr;
686 size_t size = 10;
687
688 ptr = kmalloc(size, GFP_KERNEL);
689 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
690
691 kfree(ptr);
692 KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[8]);
693 }
694
kmalloc_uaf_memset(struct kunit * test)695 static void kmalloc_uaf_memset(struct kunit *test)
696 {
697 char *ptr;
698 size_t size = 33;
699
700 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
701
702 /*
703 * Only generic KASAN uses quarantine, which is required to avoid a
704 * kernel memory corruption this test causes.
705 */
706 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
707
708 ptr = kmalloc(size, GFP_KERNEL);
709 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
710
711 kfree(ptr);
712 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
713 }
714
kmalloc_uaf2(struct kunit * test)715 static void kmalloc_uaf2(struct kunit *test)
716 {
717 char *ptr1, *ptr2;
718 size_t size = 43;
719 int counter = 0;
720
721 again:
722 ptr1 = kmalloc(size, GFP_KERNEL);
723 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
724
725 kfree(ptr1);
726
727 ptr2 = kmalloc(size, GFP_KERNEL);
728 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
729
730 /*
731 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
732 * Allow up to 16 attempts at generating different tags.
733 */
734 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
735 kfree(ptr2);
736 goto again;
737 }
738
739 KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr1)[40]);
740 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
741
742 kfree(ptr2);
743 }
744
745 /*
746 * Check that KASAN detects use-after-free when another object was allocated in
747 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
748 */
kmalloc_uaf3(struct kunit * test)749 static void kmalloc_uaf3(struct kunit *test)
750 {
751 char *ptr1, *ptr2;
752 size_t size = 100;
753
754 /* This test is specifically crafted for tag-based modes. */
755 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
756
757 ptr1 = kmalloc(size, GFP_KERNEL);
758 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
759 kfree(ptr1);
760
761 ptr2 = kmalloc(size, GFP_KERNEL);
762 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
763 kfree(ptr2);
764
765 KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr1)[8]);
766 }
767
kasan_atomics_helper(struct kunit * test,void * unsafe,void * safe)768 static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
769 {
770 int *i_unsafe = unsafe;
771
772 KUNIT_EXPECT_KASAN_FAIL_READ(test, READ_ONCE(*i_unsafe));
773 KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
774 KUNIT_EXPECT_KASAN_FAIL_READ(test, smp_load_acquire(i_unsafe));
775 KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
776
777 KUNIT_EXPECT_KASAN_FAIL_READ(test, atomic_read(unsafe));
778 KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
779 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
780 KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
781 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
782 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
783 KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
784 KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
785 KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
786 KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
787 KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
788 KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
789 KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
790 /*
791 * The result of the test below may vary due to garbage values of
792 * unsafe in write-only mode.
793 * Therefore, skip this test when KASAN is configured in write-only mode.
794 */
795 if (!kasan_write_only_enabled())
796 KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
797 KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
798 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
799 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
800 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
801 /*
802 * The result of the test below may vary due to garbage values of
803 * unsafe in write-only mode.
804 * Therefore, skip this test when KASAN is configured in write-only mode.
805 */
806 if (!kasan_write_only_enabled()) {
807 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
808 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
809 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
810 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
811 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
812 }
813
814 KUNIT_EXPECT_KASAN_FAIL_READ(test, atomic_long_read(unsafe));
815 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
816 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
817 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
818 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
819 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
820 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
821 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
822 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
823 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
824 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
825 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
826 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
827 /*
828 * The result of the test below may vary due to garbage values of
829 * unsafe in write-only mode.
830 * Therefore, skip this test when KASAN is configured in write-only mode.
831 */
832 if (!kasan_write_only_enabled())
833 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
834 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
835 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
836 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
837 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
838 /*
839 * The result of the test below may vary due to garbage values of
840 * unsafe in write-only mode.
841 * Therefore, skip this test when KASAN is configured in write-only mode.
842 */
843 if (!kasan_write_only_enabled()) {
844 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
845 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
846 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
847 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
848 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
849 }
850 }
851
kasan_atomics(struct kunit * test)852 static void kasan_atomics(struct kunit *test)
853 {
854 void *a1, *a2;
855
856 /*
857 * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
858 * that the following 16 bytes will make up the redzone.
859 */
860 a1 = kzalloc(48, GFP_KERNEL);
861 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
862 a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL);
863 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2);
864
865 /* Use atomics to access the redzone. */
866 kasan_atomics_helper(test, a1 + 48, a2);
867
868 kfree(a1);
869 kfree(a2);
870 }
871
kmalloc_double_kzfree(struct kunit * test)872 static void kmalloc_double_kzfree(struct kunit *test)
873 {
874 char *ptr;
875 size_t size = 16;
876
877 ptr = kmalloc(size, GFP_KERNEL);
878 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
879
880 kfree_sensitive(ptr);
881 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
882 }
883
884 /* Check that ksize() does NOT unpoison whole object. */
ksize_unpoisons_memory(struct kunit * test)885 static void ksize_unpoisons_memory(struct kunit *test)
886 {
887 char *ptr;
888 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
889 size_t real_size;
890
891 ptr = kmalloc(size, GFP_KERNEL);
892 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
893
894 real_size = ksize(ptr);
895 KUNIT_EXPECT_GT(test, real_size, size);
896
897 OPTIMIZER_HIDE_VAR(ptr);
898
899 /* These accesses shouldn't trigger a KASAN report. */
900 ptr[0] = 'x';
901 ptr[size - 1] = 'x';
902
903 /* These must trigger a KASAN report. */
904 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
905 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
906 KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[size + 5]);
907 KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[real_size - 1]);
908
909 kfree(ptr);
910 }
911
912 /*
913 * Check that a use-after-free is detected by ksize() and via normal accesses
914 * after it.
915 */
ksize_uaf(struct kunit * test)916 static void ksize_uaf(struct kunit *test)
917 {
918 char *ptr;
919 int size = 128 - KASAN_GRANULE_SIZE;
920
921 ptr = kmalloc(size, GFP_KERNEL);
922 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
923 kfree(ptr);
924
925 OPTIMIZER_HIDE_VAR(ptr);
926 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
927 KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
928 KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[size]);
929 }
930
931 /*
932 * The two tests below check that Generic KASAN prints auxiliary stack traces
933 * for RCU callbacks and workqueues. The reports need to be inspected manually.
934 *
935 * These tests are still enabled for other KASAN modes to make sure that all
936 * modes report bad accesses in tested scenarios.
937 */
938
939 static struct kasan_rcu_info {
940 int i;
941 struct rcu_head rcu;
942 } *global_rcu_ptr;
943
rcu_uaf_reclaim(struct rcu_head * rp)944 static void rcu_uaf_reclaim(struct rcu_head *rp)
945 {
946 struct kasan_rcu_info *fp =
947 container_of(rp, struct kasan_rcu_info, rcu);
948
949 kfree(fp);
950 ((volatile struct kasan_rcu_info *)fp)->i;
951 }
952
rcu_uaf(struct kunit * test)953 static void rcu_uaf(struct kunit *test)
954 {
955 struct kasan_rcu_info *ptr;
956
957 ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
958 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
959
960 global_rcu_ptr = rcu_dereference_protected(
961 (struct kasan_rcu_info __rcu *)ptr, NULL);
962
963 KUNIT_EXPECT_KASAN_FAIL_READ(test,
964 call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
965 rcu_barrier());
966 }
967
workqueue_uaf_work(struct work_struct * work)968 static void workqueue_uaf_work(struct work_struct *work)
969 {
970 kfree(work);
971 }
972
workqueue_uaf(struct kunit * test)973 static void workqueue_uaf(struct kunit *test)
974 {
975 struct workqueue_struct *workqueue;
976 struct work_struct *work;
977
978 workqueue = create_workqueue("kasan_workqueue_test");
979 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
980
981 work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
982 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
983
984 INIT_WORK(work, workqueue_uaf_work);
985 queue_work(workqueue, work);
986 destroy_workqueue(workqueue);
987
988 KUNIT_EXPECT_KASAN_FAIL_READ(test,
989 ((volatile struct work_struct *)work)->data);
990 }
991
kfree_via_page(struct kunit * test)992 static void kfree_via_page(struct kunit *test)
993 {
994 char *ptr;
995 size_t size = 8;
996 struct page *page;
997 unsigned long offset;
998
999 ptr = kmalloc(size, GFP_KERNEL);
1000 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1001
1002 page = virt_to_page(ptr);
1003 offset = offset_in_page(ptr);
1004 kfree(page_address(page) + offset);
1005 }
1006
kfree_via_phys(struct kunit * test)1007 static void kfree_via_phys(struct kunit *test)
1008 {
1009 char *ptr;
1010 size_t size = 8;
1011 phys_addr_t phys;
1012
1013 ptr = kmalloc(size, GFP_KERNEL);
1014 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1015
1016 phys = virt_to_phys(ptr);
1017 kfree(phys_to_virt(phys));
1018 }
1019
kmem_cache_oob(struct kunit * test)1020 static void kmem_cache_oob(struct kunit *test)
1021 {
1022 char *p;
1023 size_t size = 200;
1024 struct kmem_cache *cache;
1025
1026 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1027 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1028
1029 p = kmem_cache_alloc(cache, GFP_KERNEL);
1030 if (!p) {
1031 kunit_err(test, "Allocation failed: %s\n", __func__);
1032 kmem_cache_destroy(cache);
1033 return;
1034 }
1035
1036 KUNIT_EXPECT_KASAN_FAIL_READ(test, *p = p[size + OOB_TAG_OFF]);
1037
1038 kmem_cache_free(cache, p);
1039 kmem_cache_destroy(cache);
1040 }
1041
kmem_cache_double_free(struct kunit * test)1042 static void kmem_cache_double_free(struct kunit *test)
1043 {
1044 char *p;
1045 size_t size = 200;
1046 struct kmem_cache *cache;
1047
1048 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1049 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1050
1051 p = kmem_cache_alloc(cache, GFP_KERNEL);
1052 if (!p) {
1053 kunit_err(test, "Allocation failed: %s\n", __func__);
1054 kmem_cache_destroy(cache);
1055 return;
1056 }
1057
1058 kmem_cache_free(cache, p);
1059 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
1060 kmem_cache_destroy(cache);
1061 }
1062
kmem_cache_invalid_free(struct kunit * test)1063 static void kmem_cache_invalid_free(struct kunit *test)
1064 {
1065 char *p;
1066 size_t size = 200;
1067 struct kmem_cache *cache;
1068
1069 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
1070 NULL);
1071 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1072
1073 p = kmem_cache_alloc(cache, GFP_KERNEL);
1074 if (!p) {
1075 kunit_err(test, "Allocation failed: %s\n", __func__);
1076 kmem_cache_destroy(cache);
1077 return;
1078 }
1079
1080 /* Trigger invalid free, the object doesn't get freed. */
1081 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
1082
1083 /*
1084 * Properly free the object to prevent the "Objects remaining in
1085 * test_cache on __kmem_cache_shutdown" BUG failure.
1086 */
1087 kmem_cache_free(cache, p);
1088
1089 kmem_cache_destroy(cache);
1090 }
1091
kmem_cache_rcu_uaf(struct kunit * test)1092 static void kmem_cache_rcu_uaf(struct kunit *test)
1093 {
1094 char *p;
1095 size_t size = 200;
1096 struct kmem_cache *cache;
1097
1098 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
1099
1100 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
1101 NULL);
1102 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1103
1104 p = kmem_cache_alloc(cache, GFP_KERNEL);
1105 if (!p) {
1106 kunit_err(test, "Allocation failed: %s\n", __func__);
1107 kmem_cache_destroy(cache);
1108 return;
1109 }
1110 *p = 1;
1111
1112 rcu_read_lock();
1113
1114 /* Free the object - this will internally schedule an RCU callback. */
1115 kmem_cache_free(cache, p);
1116
1117 /*
1118 * We should still be allowed to access the object at this point because
1119 * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
1120 * critical section since before the kmem_cache_free().
1121 */
1122 READ_ONCE(*p);
1123
1124 rcu_read_unlock();
1125
1126 /*
1127 * Wait for the RCU callback to execute; after this, the object should
1128 * have actually been freed from KASAN's perspective.
1129 */
1130 rcu_barrier();
1131
1132 KUNIT_EXPECT_KASAN_FAIL_READ(test, READ_ONCE(*p));
1133
1134 kmem_cache_destroy(cache);
1135 }
1136
1137 /*
1138 * Check that SLAB_TYPESAFE_BY_RCU objects are immediately reused when
1139 * CONFIG_SLUB_RCU_DEBUG is off, and stay at the same address.
1140 * Without this, KASAN builds would be unable to trigger bugs caused by
1141 * SLAB_TYPESAFE_BY_RCU users handling reycled objects improperly.
1142 */
kmem_cache_rcu_reuse(struct kunit * test)1143 static void kmem_cache_rcu_reuse(struct kunit *test)
1144 {
1145 char *p, *p2;
1146 struct kmem_cache *cache;
1147
1148 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_SLUB_RCU_DEBUG);
1149
1150 cache = kmem_cache_create("test_cache", 16, 0, SLAB_TYPESAFE_BY_RCU,
1151 NULL);
1152 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1153
1154 migrate_disable();
1155 p = kmem_cache_alloc(cache, GFP_KERNEL);
1156 if (!p) {
1157 kunit_err(test, "Allocation failed: %s\n", __func__);
1158 goto out;
1159 }
1160
1161 kmem_cache_free(cache, p);
1162 p2 = kmem_cache_alloc(cache, GFP_KERNEL);
1163 if (!p2) {
1164 kunit_err(test, "Allocation failed: %s\n", __func__);
1165 goto out;
1166 }
1167 KUNIT_EXPECT_PTR_EQ(test, p, p2);
1168
1169 kmem_cache_free(cache, p2);
1170
1171 out:
1172 migrate_enable();
1173 kmem_cache_destroy(cache);
1174 }
1175
kmem_cache_double_destroy(struct kunit * test)1176 static void kmem_cache_double_destroy(struct kunit *test)
1177 {
1178 struct kmem_cache *cache;
1179
1180 cache = kmem_cache_create("test_cache", 200, 0, SLAB_NO_MERGE, NULL);
1181 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1182 kmem_cache_destroy(cache);
1183 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
1184 }
1185
kmem_cache_accounted(struct kunit * test)1186 static void kmem_cache_accounted(struct kunit *test)
1187 {
1188 int i;
1189 char *p;
1190 size_t size = 200;
1191 struct kmem_cache *cache;
1192
1193 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
1194 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1195
1196 /*
1197 * Several allocations with a delay to allow for lazy per memcg kmem
1198 * cache creation.
1199 */
1200 for (i = 0; i < 5; i++) {
1201 p = kmem_cache_alloc(cache, GFP_KERNEL);
1202 if (!p)
1203 goto free_cache;
1204
1205 kmem_cache_free(cache, p);
1206 msleep(100);
1207 }
1208
1209 free_cache:
1210 kmem_cache_destroy(cache);
1211 }
1212
kmem_cache_bulk(struct kunit * test)1213 static void kmem_cache_bulk(struct kunit *test)
1214 {
1215 struct kmem_cache *cache;
1216 size_t size = 200;
1217 char *p[10];
1218 bool ret;
1219 int i;
1220
1221 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1222 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1223
1224 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
1225 if (!ret) {
1226 kunit_err(test, "Allocation failed: %s\n", __func__);
1227 kmem_cache_destroy(cache);
1228 return;
1229 }
1230
1231 for (i = 0; i < ARRAY_SIZE(p); i++)
1232 p[i][0] = p[i][size - 1] = 42;
1233
1234 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
1235 kmem_cache_destroy(cache);
1236 }
1237
mempool_prepare_kmalloc(struct kunit * test,mempool_t * pool,size_t size)1238 static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
1239 {
1240 int pool_size = 4;
1241 int ret;
1242 void *elem;
1243
1244 memset(pool, 0, sizeof(*pool));
1245 ret = mempool_init_kmalloc_pool(pool, pool_size, size);
1246 KUNIT_ASSERT_EQ(test, ret, 0);
1247
1248 /*
1249 * Allocate one element to prevent mempool from freeing elements to the
1250 * underlying allocator and instead make it add them to the element
1251 * list when the tests trigger double-free and invalid-free bugs.
1252 * This allows testing KASAN annotations in add_element().
1253 */
1254 elem = mempool_alloc_preallocated(pool);
1255 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1256
1257 return elem;
1258 }
1259
mempool_prepare_slab(struct kunit * test,mempool_t * pool,size_t size)1260 static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
1261 {
1262 struct kmem_cache *cache;
1263 int pool_size = 4;
1264 int ret;
1265
1266 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1267 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1268
1269 memset(pool, 0, sizeof(*pool));
1270 ret = mempool_init_slab_pool(pool, pool_size, cache);
1271 KUNIT_ASSERT_EQ(test, ret, 0);
1272
1273 /*
1274 * Do not allocate one preallocated element, as we skip the double-free
1275 * and invalid-free tests for slab mempool for simplicity.
1276 */
1277
1278 return cache;
1279 }
1280
mempool_prepare_page(struct kunit * test,mempool_t * pool,int order)1281 static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
1282 {
1283 int pool_size = 4;
1284 int ret;
1285 void *elem;
1286
1287 memset(pool, 0, sizeof(*pool));
1288 ret = mempool_init_page_pool(pool, pool_size, order);
1289 KUNIT_ASSERT_EQ(test, ret, 0);
1290
1291 elem = mempool_alloc_preallocated(pool);
1292 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1293
1294 return elem;
1295 }
1296
mempool_oob_right_helper(struct kunit * test,mempool_t * pool,size_t size)1297 static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
1298 {
1299 char *elem;
1300
1301 elem = mempool_alloc_preallocated(pool);
1302 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1303
1304 OPTIMIZER_HIDE_VAR(elem);
1305
1306 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1307 KUNIT_EXPECT_KASAN_FAIL(test,
1308 ((volatile char *)&elem[size])[0]);
1309 else
1310 KUNIT_EXPECT_KASAN_FAIL_READ(test,
1311 ((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
1312
1313 mempool_free(elem, pool);
1314 }
1315
mempool_kmalloc_oob_right(struct kunit * test)1316 static void mempool_kmalloc_oob_right(struct kunit *test)
1317 {
1318 mempool_t pool;
1319 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
1320 void *extra_elem;
1321
1322 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1323
1324 mempool_oob_right_helper(test, &pool, size);
1325
1326 mempool_free(extra_elem, &pool);
1327 mempool_exit(&pool);
1328 }
1329
mempool_kmalloc_large_oob_right(struct kunit * test)1330 static void mempool_kmalloc_large_oob_right(struct kunit *test)
1331 {
1332 mempool_t pool;
1333 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1334 void *extra_elem;
1335
1336 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1337
1338 mempool_oob_right_helper(test, &pool, size);
1339
1340 mempool_free(extra_elem, &pool);
1341 mempool_exit(&pool);
1342 }
1343
mempool_slab_oob_right(struct kunit * test)1344 static void mempool_slab_oob_right(struct kunit *test)
1345 {
1346 mempool_t pool;
1347 size_t size = 123;
1348 struct kmem_cache *cache;
1349
1350 cache = mempool_prepare_slab(test, &pool, size);
1351
1352 mempool_oob_right_helper(test, &pool, size);
1353
1354 mempool_exit(&pool);
1355 kmem_cache_destroy(cache);
1356 }
1357
1358 /*
1359 * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
1360 * allocations have no redzones, and thus the out-of-bounds detection is not
1361 * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
1362 * the tag-based KASAN modes, the neighboring allocation might have the same
1363 * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
1364 */
1365
mempool_uaf_helper(struct kunit * test,mempool_t * pool,bool page)1366 static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
1367 {
1368 char *elem, *ptr;
1369
1370 elem = mempool_alloc_preallocated(pool);
1371 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1372
1373 mempool_free(elem, pool);
1374
1375 ptr = page ? page_address((struct page *)elem) : elem;
1376 KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
1377 }
1378
mempool_kmalloc_uaf(struct kunit * test)1379 static void mempool_kmalloc_uaf(struct kunit *test)
1380 {
1381 mempool_t pool;
1382 size_t size = 128;
1383 void *extra_elem;
1384
1385 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1386
1387 mempool_uaf_helper(test, &pool, false);
1388
1389 mempool_free(extra_elem, &pool);
1390 mempool_exit(&pool);
1391 }
1392
mempool_kmalloc_large_uaf(struct kunit * test)1393 static void mempool_kmalloc_large_uaf(struct kunit *test)
1394 {
1395 mempool_t pool;
1396 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1397 void *extra_elem;
1398
1399 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1400
1401 mempool_uaf_helper(test, &pool, false);
1402
1403 mempool_free(extra_elem, &pool);
1404 mempool_exit(&pool);
1405 }
1406
mempool_slab_uaf(struct kunit * test)1407 static void mempool_slab_uaf(struct kunit *test)
1408 {
1409 mempool_t pool;
1410 size_t size = 123;
1411 struct kmem_cache *cache;
1412
1413 cache = mempool_prepare_slab(test, &pool, size);
1414
1415 mempool_uaf_helper(test, &pool, false);
1416
1417 mempool_exit(&pool);
1418 kmem_cache_destroy(cache);
1419 }
1420
mempool_page_alloc_uaf(struct kunit * test)1421 static void mempool_page_alloc_uaf(struct kunit *test)
1422 {
1423 mempool_t pool;
1424 int order = 2;
1425 void *extra_elem;
1426
1427 extra_elem = mempool_prepare_page(test, &pool, order);
1428
1429 mempool_uaf_helper(test, &pool, true);
1430
1431 mempool_free(extra_elem, &pool);
1432 mempool_exit(&pool);
1433 }
1434
mempool_double_free_helper(struct kunit * test,mempool_t * pool)1435 static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
1436 {
1437 char *elem;
1438
1439 elem = mempool_alloc_preallocated(pool);
1440 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1441
1442 mempool_free(elem, pool);
1443
1444 KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
1445 }
1446
mempool_kmalloc_double_free(struct kunit * test)1447 static void mempool_kmalloc_double_free(struct kunit *test)
1448 {
1449 mempool_t pool;
1450 size_t size = 128;
1451 char *extra_elem;
1452
1453 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1454
1455 mempool_double_free_helper(test, &pool);
1456
1457 mempool_free(extra_elem, &pool);
1458 mempool_exit(&pool);
1459 }
1460
mempool_kmalloc_large_double_free(struct kunit * test)1461 static void mempool_kmalloc_large_double_free(struct kunit *test)
1462 {
1463 mempool_t pool;
1464 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1465 char *extra_elem;
1466
1467 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1468
1469 mempool_double_free_helper(test, &pool);
1470
1471 mempool_free(extra_elem, &pool);
1472 mempool_exit(&pool);
1473 }
1474
mempool_page_alloc_double_free(struct kunit * test)1475 static void mempool_page_alloc_double_free(struct kunit *test)
1476 {
1477 mempool_t pool;
1478 int order = 2;
1479 char *extra_elem;
1480
1481 extra_elem = mempool_prepare_page(test, &pool, order);
1482
1483 mempool_double_free_helper(test, &pool);
1484
1485 mempool_free(extra_elem, &pool);
1486 mempool_exit(&pool);
1487 }
1488
mempool_kmalloc_invalid_free_helper(struct kunit * test,mempool_t * pool)1489 static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
1490 {
1491 char *elem;
1492
1493 elem = mempool_alloc_preallocated(pool);
1494 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1495
1496 KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
1497
1498 mempool_free(elem, pool);
1499 }
1500
mempool_kmalloc_invalid_free(struct kunit * test)1501 static void mempool_kmalloc_invalid_free(struct kunit *test)
1502 {
1503 mempool_t pool;
1504 size_t size = 128;
1505 char *extra_elem;
1506
1507 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1508
1509 mempool_kmalloc_invalid_free_helper(test, &pool);
1510
1511 mempool_free(extra_elem, &pool);
1512 mempool_exit(&pool);
1513 }
1514
mempool_kmalloc_large_invalid_free(struct kunit * test)1515 static void mempool_kmalloc_large_invalid_free(struct kunit *test)
1516 {
1517 mempool_t pool;
1518 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1519 char *extra_elem;
1520
1521 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1522
1523 mempool_kmalloc_invalid_free_helper(test, &pool);
1524
1525 mempool_free(extra_elem, &pool);
1526 mempool_exit(&pool);
1527 }
1528
1529 /*
1530 * Skip the invalid-free test for page mempool. The invalid-free detection only
1531 * works for compound pages and mempool preallocates all page elements without
1532 * the __GFP_COMP flag.
1533 */
1534
1535 static char global_array[10];
1536
kasan_global_oob_right(struct kunit * test)1537 static void kasan_global_oob_right(struct kunit *test)
1538 {
1539 /*
1540 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
1541 * from failing here and panicking the kernel, access the array via a
1542 * volatile pointer, which will prevent the compiler from being able to
1543 * determine the array bounds.
1544 *
1545 * This access uses a volatile pointer to char (char *volatile) rather
1546 * than the more conventional pointer to volatile char (volatile char *)
1547 * because we want to prevent the compiler from making inferences about
1548 * the pointer itself (i.e. its array bounds), not the data that it
1549 * refers to.
1550 */
1551 char *volatile array = global_array;
1552 char *p = &array[ARRAY_SIZE(global_array) + 3];
1553
1554 /* Only generic mode instruments globals. */
1555 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1556
1557 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1558 }
1559
kasan_global_oob_left(struct kunit * test)1560 static void kasan_global_oob_left(struct kunit *test)
1561 {
1562 char *volatile array = global_array;
1563 char *p = array - 3;
1564
1565 /*
1566 * GCC is known to fail this test, skip it.
1567 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
1568 */
1569 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
1570 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1571 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1572 }
1573
kasan_stack_oob(struct kunit * test)1574 static void kasan_stack_oob(struct kunit *test)
1575 {
1576 char stack_array[10];
1577 /* See comment in kasan_global_oob_right. */
1578 char *volatile array = stack_array;
1579 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
1580
1581 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1582
1583 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1584 }
1585
kasan_alloca_oob_left(struct kunit * test)1586 static void kasan_alloca_oob_left(struct kunit *test)
1587 {
1588 volatile int i = 10;
1589 char alloca_array[i];
1590 /* See comment in kasan_global_oob_right. */
1591 char *volatile array = alloca_array;
1592 char *p = array - 1;
1593
1594 /* Only generic mode instruments dynamic allocas. */
1595 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1596 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1597
1598 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1599 }
1600
kasan_alloca_oob_right(struct kunit * test)1601 static void kasan_alloca_oob_right(struct kunit *test)
1602 {
1603 volatile int i = 10;
1604 char alloca_array[i];
1605 /* See comment in kasan_global_oob_right. */
1606 char *volatile array = alloca_array;
1607 char *p = array + i;
1608
1609 /* Only generic mode instruments dynamic allocas. */
1610 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1611 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1612
1613 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1614 }
1615
kasan_memchr(struct kunit * test)1616 static void kasan_memchr(struct kunit *test)
1617 {
1618 char *ptr;
1619 size_t size = 24;
1620
1621 /*
1622 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1623 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1624 */
1625 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1626
1627 if (OOB_TAG_OFF)
1628 size = round_up(size, OOB_TAG_OFF);
1629
1630 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1631 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1632
1633 OPTIMIZER_HIDE_VAR(ptr);
1634 OPTIMIZER_HIDE_VAR(size);
1635 KUNIT_EXPECT_KASAN_FAIL_READ(test,
1636 kasan_ptr_result = memchr(ptr, '1', size + 1));
1637
1638 kfree(ptr);
1639 }
1640
kasan_memcmp(struct kunit * test)1641 static void kasan_memcmp(struct kunit *test)
1642 {
1643 char *ptr;
1644 size_t size = 24;
1645 int arr[9];
1646
1647 /*
1648 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1649 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1650 */
1651 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1652
1653 if (OOB_TAG_OFF)
1654 size = round_up(size, OOB_TAG_OFF);
1655
1656 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1657 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1658 memset(arr, 0, sizeof(arr));
1659
1660 OPTIMIZER_HIDE_VAR(ptr);
1661 OPTIMIZER_HIDE_VAR(size);
1662 KUNIT_EXPECT_KASAN_FAIL_READ(test,
1663 kasan_int_result = memcmp(ptr, arr, size+1));
1664 kfree(ptr);
1665 }
1666
kasan_strings(struct kunit * test)1667 static void kasan_strings(struct kunit *test)
1668 {
1669 char *ptr;
1670 char *src;
1671 size_t size = 24;
1672
1673 /*
1674 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1675 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1676 */
1677 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1678
1679 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1680 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1681 OPTIMIZER_HIDE_VAR(ptr);
1682
1683 src = kmalloc(KASAN_GRANULE_SIZE, GFP_KERNEL | __GFP_ZERO);
1684 strscpy(src, "f0cacc1a0000000", KASAN_GRANULE_SIZE);
1685 OPTIMIZER_HIDE_VAR(src);
1686
1687 /*
1688 * Make sure that strscpy() does not trigger KASAN if it overreads into
1689 * poisoned memory.
1690 *
1691 * The expected size does not include the terminator '\0'
1692 * so it is (KASAN_GRANULE_SIZE - 2) ==
1693 * KASAN_GRANULE_SIZE - ("initial removed character" + "\0").
1694 */
1695 KUNIT_EXPECT_EQ(test, KASAN_GRANULE_SIZE - 2,
1696 strscpy(ptr, src + 1, KASAN_GRANULE_SIZE));
1697
1698 /* strscpy should fail if the first byte is unreadable. */
1699 KUNIT_EXPECT_KASAN_FAIL_READ(test, strscpy(ptr, src + KASAN_GRANULE_SIZE,
1700 KASAN_GRANULE_SIZE));
1701
1702 kfree(src);
1703 kfree(ptr);
1704
1705 /*
1706 * Try to cause only 1 invalid access (less spam in dmesg).
1707 * For that we need ptr to point to zeroed byte.
1708 * Skip metadata that could be stored in freed object so ptr
1709 * will likely point to zeroed byte.
1710 */
1711 ptr += 16;
1712 KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_ptr_result = strchr(ptr, '1'));
1713
1714 KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_ptr_result = strrchr(ptr, '1'));
1715
1716 KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strcmp(ptr, "2"));
1717
1718 KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strncmp(ptr, "2", 1));
1719
1720 KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strlen(ptr));
1721
1722 KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strnlen(ptr, 1));
1723 }
1724
kasan_bitops_modify(struct kunit * test,int nr,void * addr)1725 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1726 {
1727 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1728 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1729 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1730 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1731 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1732 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1733 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1734 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1735 }
1736
kasan_bitops_test_and_modify(struct kunit * test,int nr,void * addr)1737 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1738 {
1739 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1740 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1741 /*
1742 * When KASAN is running in write-only mode,
1743 * a fault won't occur when the bit is set.
1744 * Therefore, skip the test_and_set_bit_lock test in write-only mode.
1745 */
1746 if (!kasan_write_only_enabled())
1747 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1748 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1749 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1750 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1751 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1752 KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = test_bit(nr, addr));
1753 if (nr < 7)
1754 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1755 xor_unlock_is_negative_byte(1 << nr, addr));
1756 }
1757
kasan_bitops_generic(struct kunit * test)1758 static void kasan_bitops_generic(struct kunit *test)
1759 {
1760 long *bits;
1761
1762 /* This test is specifically crafted for the generic mode. */
1763 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1764
1765 /*
1766 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1767 * this way we do not actually corrupt other memory.
1768 */
1769 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1770 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1771
1772 /*
1773 * Below calls try to access bit within allocated memory; however, the
1774 * below accesses are still out-of-bounds, since bitops are defined to
1775 * operate on the whole long the bit is in.
1776 */
1777 kasan_bitops_modify(test, BITS_PER_LONG, bits);
1778
1779 /*
1780 * Below calls try to access bit beyond allocated memory.
1781 */
1782 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1783
1784 kfree(bits);
1785 }
1786
kasan_bitops_tags(struct kunit * test)1787 static void kasan_bitops_tags(struct kunit *test)
1788 {
1789 long *bits;
1790
1791 /* This test is specifically crafted for tag-based modes. */
1792 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1793
1794 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1795 bits = kzalloc(48, GFP_KERNEL);
1796 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1797
1798 /* Do the accesses past the 48 allocated bytes, but within the redone. */
1799 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1800 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1801
1802 kfree(bits);
1803 }
1804
vmalloc_helpers_tags(struct kunit * test)1805 static void vmalloc_helpers_tags(struct kunit *test)
1806 {
1807 void *ptr;
1808
1809 /* This test is intended for tag-based modes. */
1810 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1811
1812 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1813
1814 if (!kasan_vmalloc_enabled())
1815 kunit_skip(test, "Test requires kasan.vmalloc=on");
1816
1817 ptr = vmalloc(PAGE_SIZE);
1818 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1819
1820 /* Check that the returned pointer is tagged. */
1821 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1822 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1823
1824 /* Make sure exported vmalloc helpers handle tagged pointers. */
1825 KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1826 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1827
1828 #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1829 {
1830 int rv;
1831
1832 /* Make sure vmalloc'ed memory permissions can be changed. */
1833 rv = set_memory_ro((unsigned long)ptr, 1);
1834 KUNIT_ASSERT_GE(test, rv, 0);
1835 rv = set_memory_rw((unsigned long)ptr, 1);
1836 KUNIT_ASSERT_GE(test, rv, 0);
1837 }
1838 #endif
1839
1840 vfree(ptr);
1841 }
1842
vmalloc_oob(struct kunit * test)1843 static void vmalloc_oob(struct kunit *test)
1844 {
1845 char *v_ptr, *p_ptr;
1846 struct page *page;
1847 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1848
1849 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1850
1851 if (!kasan_vmalloc_enabled())
1852 kunit_skip(test, "Test requires kasan.vmalloc=on");
1853
1854 v_ptr = vmalloc(size);
1855 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1856
1857 OPTIMIZER_HIDE_VAR(v_ptr);
1858
1859 /*
1860 * We have to be careful not to hit the guard page in vmalloc tests.
1861 * The MMU will catch that and crash us.
1862 */
1863
1864 /* Make sure in-bounds accesses are valid. */
1865 v_ptr[0] = 0;
1866 v_ptr[size - 1] = 0;
1867
1868 /*
1869 * An unaligned access past the requested vmalloc size.
1870 * Only generic KASAN can precisely detect these.
1871 */
1872 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1873 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1874
1875 /* An aligned access into the first out-of-bounds granule. */
1876 KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)v_ptr)[size + 5]);
1877
1878 /* Check that in-bounds accesses to the physical page are valid. */
1879 page = vmalloc_to_page(v_ptr);
1880 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1881 p_ptr = page_address(page);
1882 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1883 p_ptr[0] = 0;
1884
1885 vfree(v_ptr);
1886
1887 /*
1888 * We can't check for use-after-unmap bugs in this nor in the following
1889 * vmalloc tests, as the page might be fully unmapped and accessing it
1890 * will crash the kernel.
1891 */
1892 }
1893
vmap_tags(struct kunit * test)1894 static void vmap_tags(struct kunit *test)
1895 {
1896 char *p_ptr, *v_ptr;
1897 struct page *p_page, *v_page;
1898
1899 /*
1900 * This test is specifically crafted for the software tag-based mode,
1901 * the only tag-based mode that poisons vmap mappings.
1902 */
1903 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1904
1905 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1906
1907 if (!kasan_vmalloc_enabled())
1908 kunit_skip(test, "Test requires kasan.vmalloc=on");
1909
1910 p_page = alloc_pages(GFP_KERNEL, 1);
1911 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1912 p_ptr = page_address(p_page);
1913 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1914
1915 v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1916 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1917
1918 /*
1919 * We can't check for out-of-bounds bugs in this nor in the following
1920 * vmalloc tests, as allocations have page granularity and accessing
1921 * the guard page will crash the kernel.
1922 */
1923
1924 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1925 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1926
1927 /* Make sure that in-bounds accesses through both pointers work. */
1928 *p_ptr = 0;
1929 *v_ptr = 0;
1930
1931 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1932 v_page = vmalloc_to_page(v_ptr);
1933 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1934 KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1935
1936 vunmap(v_ptr);
1937 free_pages((unsigned long)p_ptr, 1);
1938 }
1939
vm_map_ram_tags(struct kunit * test)1940 static void vm_map_ram_tags(struct kunit *test)
1941 {
1942 char *p_ptr, *v_ptr;
1943 struct page *page;
1944
1945 /*
1946 * This test is specifically crafted for the software tag-based mode,
1947 * the only tag-based mode that poisons vm_map_ram mappings.
1948 */
1949 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1950
1951 page = alloc_pages(GFP_KERNEL, 1);
1952 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1953 p_ptr = page_address(page);
1954 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1955
1956 v_ptr = vm_map_ram(&page, 1, -1);
1957 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1958
1959 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1960 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1961
1962 /* Make sure that in-bounds accesses through both pointers work. */
1963 *p_ptr = 0;
1964 *v_ptr = 0;
1965
1966 vm_unmap_ram(v_ptr, 1);
1967 free_pages((unsigned long)p_ptr, 1);
1968 }
1969
1970 /*
1971 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1972 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1973 * modes.
1974 */
match_all_not_assigned(struct kunit * test)1975 static void match_all_not_assigned(struct kunit *test)
1976 {
1977 char *ptr;
1978 struct page *pages;
1979 int i, size, order;
1980
1981 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1982
1983 for (i = 0; i < 256; i++) {
1984 size = get_random_u32_inclusive(1, 1024);
1985 ptr = kmalloc(size, GFP_KERNEL);
1986 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1987 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1988 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1989 kfree(ptr);
1990 }
1991
1992 for (i = 0; i < 256; i++) {
1993 order = get_random_u32_inclusive(1, 4);
1994 pages = alloc_pages(GFP_KERNEL, order);
1995 ptr = page_address(pages);
1996 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1997 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1998 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1999 free_pages((unsigned long)ptr, order);
2000 }
2001
2002 if (!kasan_vmalloc_enabled())
2003 return;
2004
2005 for (i = 0; i < 256; i++) {
2006 size = get_random_u32_inclusive(1, 1024);
2007 ptr = vmalloc(size);
2008 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
2009 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
2010 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
2011 vfree(ptr);
2012 }
2013 }
2014
2015 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
match_all_ptr_tag(struct kunit * test)2016 static void match_all_ptr_tag(struct kunit *test)
2017 {
2018 char *ptr;
2019 u8 tag;
2020
2021 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
2022
2023 ptr = kmalloc(128, GFP_KERNEL);
2024 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
2025
2026 /* Backup the assigned tag. */
2027 tag = get_tag(ptr);
2028 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
2029
2030 /* Reset the tag to 0xff.*/
2031 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
2032
2033 /* This access shouldn't trigger a KASAN report. */
2034 *ptr = 0;
2035
2036 /* Recover the pointer tag and free. */
2037 ptr = set_tag(ptr, tag);
2038 kfree(ptr);
2039 }
2040
2041 /* Check that there are no match-all memory tags for tag-based modes. */
match_all_mem_tag(struct kunit * test)2042 static void match_all_mem_tag(struct kunit *test)
2043 {
2044 char *ptr;
2045 int tag;
2046
2047 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
2048
2049 ptr = kmalloc(128, GFP_KERNEL);
2050 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
2051 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
2052
2053 /* For each possible tag value not matching the pointer tag. */
2054 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
2055 /*
2056 * For Software Tag-Based KASAN, skip the majority of tag
2057 * values to avoid the test printing too many reports.
2058 */
2059 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
2060 tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
2061 continue;
2062
2063 if (tag == get_tag(ptr))
2064 continue;
2065
2066 /* Mark the first memory granule with the chosen memory tag. */
2067 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
2068
2069 /* This access must cause a KASAN report. */
2070 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
2071 }
2072
2073 /* Recover the memory tag and free. */
2074 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
2075 kfree(ptr);
2076 }
2077
2078 /*
2079 * Check that Rust performing a use-after-free using `unsafe` is detected.
2080 * This is a smoke test to make sure that Rust is being sanitized properly.
2081 */
rust_uaf(struct kunit * test)2082 static void rust_uaf(struct kunit *test)
2083 {
2084 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST);
2085 KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
2086 }
2087
2088 /*
2089 * copy_to_kernel_nofault() is an internal helper available when
2090 * kasan_test is built-in, so it must not be visible to loadable modules.
2091 */
2092 #ifndef MODULE
copy_to_kernel_nofault_oob(struct kunit * test)2093 static void copy_to_kernel_nofault_oob(struct kunit *test)
2094 {
2095 char *ptr;
2096 char buf[128];
2097 size_t size = sizeof(buf);
2098
2099 /*
2100 * This test currently fails with the HW_TAGS mode. The reason is
2101 * unknown and needs to be investigated.
2102 */
2103 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
2104
2105 ptr = kmalloc(size - KASAN_GRANULE_SIZE, GFP_KERNEL);
2106 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
2107 OPTIMIZER_HIDE_VAR(ptr);
2108
2109 /*
2110 * We test copy_to_kernel_nofault() to detect corrupted memory that is
2111 * being written into the kernel. In contrast,
2112 * copy_from_kernel_nofault() is primarily used in kernel helper
2113 * functions where the source address might be random or uninitialized.
2114 * Applying KASAN instrumentation to copy_from_kernel_nofault() could
2115 * lead to false positives. By focusing KASAN checks only on
2116 * copy_to_kernel_nofault(), we ensure that only valid memory is
2117 * written to the kernel, minimizing the risk of kernel corruption
2118 * while avoiding false positives in the reverse case.
2119 */
2120 KUNIT_EXPECT_KASAN_FAIL(test,
2121 copy_to_kernel_nofault(&buf[0], ptr, size));
2122 KUNIT_EXPECT_KASAN_FAIL(test,
2123 copy_to_kernel_nofault(ptr, &buf[0], size));
2124
2125 kfree(ptr);
2126 }
2127 #endif /* !MODULE */
2128
copy_user_test_oob(struct kunit * test)2129 static void copy_user_test_oob(struct kunit *test)
2130 {
2131 char *kmem;
2132 char __user *usermem;
2133 unsigned long useraddr;
2134 size_t size = 128 - KASAN_GRANULE_SIZE;
2135 int __maybe_unused unused;
2136
2137 kmem = kunit_kmalloc(test, size, GFP_KERNEL);
2138 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, kmem);
2139
2140 useraddr = kunit_vm_mmap(test, NULL, 0, PAGE_SIZE,
2141 PROT_READ | PROT_WRITE | PROT_EXEC,
2142 MAP_ANONYMOUS | MAP_PRIVATE, 0);
2143 KUNIT_ASSERT_NE_MSG(test, useraddr, 0,
2144 "Could not create userspace mm");
2145 KUNIT_ASSERT_LT_MSG(test, useraddr, (unsigned long)TASK_SIZE,
2146 "Failed to allocate user memory");
2147
2148 OPTIMIZER_HIDE_VAR(size);
2149 usermem = (char __user *)useraddr;
2150
2151 KUNIT_EXPECT_KASAN_FAIL(test,
2152 unused = copy_from_user(kmem, usermem, size + 1));
2153 KUNIT_EXPECT_KASAN_FAIL_READ(test,
2154 unused = copy_to_user(usermem, kmem, size + 1));
2155 KUNIT_EXPECT_KASAN_FAIL(test,
2156 unused = __copy_from_user(kmem, usermem, size + 1));
2157 KUNIT_EXPECT_KASAN_FAIL_READ(test,
2158 unused = __copy_to_user(usermem, kmem, size + 1));
2159 KUNIT_EXPECT_KASAN_FAIL(test,
2160 unused = __copy_from_user_inatomic(kmem, usermem, size + 1));
2161 KUNIT_EXPECT_KASAN_FAIL_READ(test,
2162 unused = __copy_to_user_inatomic(usermem, kmem, size + 1));
2163
2164 /*
2165 * Prepare a long string in usermem to avoid the strncpy_from_user test
2166 * bailing out on '\0' before it reaches out-of-bounds.
2167 */
2168 memset(kmem, 'a', size);
2169 KUNIT_EXPECT_EQ(test, copy_to_user(usermem, kmem, size), 0);
2170
2171 KUNIT_EXPECT_KASAN_FAIL(test,
2172 unused = strncpy_from_user(kmem, usermem, size + 1));
2173 }
2174
2175 static struct kunit_case kasan_kunit_test_cases[] = {
2176 KUNIT_CASE(kmalloc_oob_right),
2177 KUNIT_CASE(kmalloc_oob_left),
2178 KUNIT_CASE(kmalloc_node_oob_right),
2179 KUNIT_CASE(kmalloc_track_caller_oob_right),
2180 KUNIT_CASE(kmalloc_big_oob_right),
2181 KUNIT_CASE(kmalloc_large_oob_right),
2182 KUNIT_CASE(kmalloc_large_uaf),
2183 KUNIT_CASE(kmalloc_large_invalid_free),
2184 KUNIT_CASE(page_alloc_oob_right),
2185 KUNIT_CASE(page_alloc_uaf),
2186 KUNIT_CASE(krealloc_more_oob),
2187 KUNIT_CASE(krealloc_less_oob),
2188 KUNIT_CASE(krealloc_large_more_oob),
2189 KUNIT_CASE(krealloc_large_less_oob),
2190 KUNIT_CASE(krealloc_uaf),
2191 KUNIT_CASE(kmalloc_oob_16),
2192 KUNIT_CASE(kmalloc_uaf_16),
2193 KUNIT_CASE(kmalloc_oob_in_memset),
2194 KUNIT_CASE(kmalloc_oob_memset_2),
2195 KUNIT_CASE(kmalloc_oob_memset_4),
2196 KUNIT_CASE(kmalloc_oob_memset_8),
2197 KUNIT_CASE(kmalloc_oob_memset_16),
2198 KUNIT_CASE(kmalloc_memmove_negative_size),
2199 KUNIT_CASE(kmalloc_memmove_invalid_size),
2200 KUNIT_CASE(kmalloc_uaf),
2201 KUNIT_CASE(kmalloc_uaf_memset),
2202 KUNIT_CASE(kmalloc_uaf2),
2203 KUNIT_CASE(kmalloc_uaf3),
2204 KUNIT_CASE(kmalloc_double_kzfree),
2205 KUNIT_CASE(ksize_unpoisons_memory),
2206 KUNIT_CASE(ksize_uaf),
2207 KUNIT_CASE(rcu_uaf),
2208 KUNIT_CASE(workqueue_uaf),
2209 KUNIT_CASE(kfree_via_page),
2210 KUNIT_CASE(kfree_via_phys),
2211 KUNIT_CASE(kmem_cache_oob),
2212 KUNIT_CASE(kmem_cache_double_free),
2213 KUNIT_CASE(kmem_cache_invalid_free),
2214 KUNIT_CASE(kmem_cache_rcu_uaf),
2215 KUNIT_CASE(kmem_cache_rcu_reuse),
2216 KUNIT_CASE(kmem_cache_double_destroy),
2217 KUNIT_CASE(kmem_cache_accounted),
2218 KUNIT_CASE(kmem_cache_bulk),
2219 KUNIT_CASE(mempool_kmalloc_oob_right),
2220 KUNIT_CASE(mempool_kmalloc_large_oob_right),
2221 KUNIT_CASE(mempool_slab_oob_right),
2222 KUNIT_CASE(mempool_kmalloc_uaf),
2223 KUNIT_CASE(mempool_kmalloc_large_uaf),
2224 KUNIT_CASE(mempool_slab_uaf),
2225 KUNIT_CASE(mempool_page_alloc_uaf),
2226 KUNIT_CASE(mempool_kmalloc_double_free),
2227 KUNIT_CASE(mempool_kmalloc_large_double_free),
2228 KUNIT_CASE(mempool_page_alloc_double_free),
2229 KUNIT_CASE(mempool_kmalloc_invalid_free),
2230 KUNIT_CASE(mempool_kmalloc_large_invalid_free),
2231 KUNIT_CASE(kasan_global_oob_right),
2232 KUNIT_CASE(kasan_global_oob_left),
2233 KUNIT_CASE(kasan_stack_oob),
2234 KUNIT_CASE(kasan_alloca_oob_left),
2235 KUNIT_CASE(kasan_alloca_oob_right),
2236 KUNIT_CASE(kasan_memchr),
2237 KUNIT_CASE(kasan_memcmp),
2238 KUNIT_CASE(kasan_strings),
2239 KUNIT_CASE(kasan_bitops_generic),
2240 KUNIT_CASE(kasan_bitops_tags),
2241 KUNIT_CASE_SLOW(kasan_atomics),
2242 KUNIT_CASE(vmalloc_helpers_tags),
2243 KUNIT_CASE(vmalloc_oob),
2244 KUNIT_CASE(vmap_tags),
2245 KUNIT_CASE(vm_map_ram_tags),
2246 KUNIT_CASE(match_all_not_assigned),
2247 KUNIT_CASE(match_all_ptr_tag),
2248 KUNIT_CASE(match_all_mem_tag),
2249 #ifndef MODULE
2250 KUNIT_CASE(copy_to_kernel_nofault_oob),
2251 #endif
2252 KUNIT_CASE(rust_uaf),
2253 KUNIT_CASE(copy_user_test_oob),
2254 {}
2255 };
2256
2257 static struct kunit_suite kasan_kunit_test_suite = {
2258 .name = "kasan",
2259 .test_cases = kasan_kunit_test_cases,
2260 .exit = kasan_test_exit,
2261 .suite_init = kasan_suite_init,
2262 .suite_exit = kasan_suite_exit,
2263 };
2264
2265 kunit_test_suite(kasan_kunit_test_suite);
2266
2267 MODULE_DESCRIPTION("KUnit tests for checking KASAN bug-detection capabilities");
2268 MODULE_LICENSE("GPL");
2269