1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6 */
7
8 #define pr_fmt(fmt) "kasan: test: " fmt
9
10 #include <kunit/test.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/io.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/mempool.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/module.h>
20 #include <linux/printk.h>
21 #include <linux/random.h>
22 #include <linux/set_memory.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/tracepoint.h>
26 #include <linux/uaccess.h>
27 #include <linux/vmalloc.h>
28 #include <trace/events/printk.h>
29
30 #include <asm/page.h>
31
32 #include "kasan.h"
33
34 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
35
36 static bool multishot;
37
38 /* Fields set based on lines observed in the console. */
39 static struct {
40 bool report_found;
41 bool async_fault;
42 } test_status;
43
44 /*
45 * Some tests use these global variables to store return values from function
46 * calls that could otherwise be eliminated by the compiler as dead code.
47 */
48 void *kasan_ptr_result;
49 int kasan_int_result;
50
51 /* Probe for console output: obtains test_status lines of interest. */
probe_console(void * ignore,const char * buf,size_t len)52 static void probe_console(void *ignore, const char *buf, size_t len)
53 {
54 if (strnstr(buf, "BUG: KASAN: ", len))
55 WRITE_ONCE(test_status.report_found, true);
56 else if (strnstr(buf, "Asynchronous fault: ", len))
57 WRITE_ONCE(test_status.async_fault, true);
58 }
59
kasan_suite_init(struct kunit_suite * suite)60 static int kasan_suite_init(struct kunit_suite *suite)
61 {
62 if (!kasan_enabled()) {
63 pr_err("Can't run KASAN tests with KASAN disabled");
64 return -1;
65 }
66
67 /* Stop failing KUnit tests on KASAN reports. */
68 kasan_kunit_test_suite_start();
69
70 /*
71 * Temporarily enable multi-shot mode. Otherwise, KASAN would only
72 * report the first detected bug and panic the kernel if panic_on_warn
73 * is enabled.
74 */
75 multishot = kasan_save_enable_multi_shot();
76
77 register_trace_console(probe_console, NULL);
78 return 0;
79 }
80
kasan_suite_exit(struct kunit_suite * suite)81 static void kasan_suite_exit(struct kunit_suite *suite)
82 {
83 kasan_kunit_test_suite_end();
84 kasan_restore_multi_shot(multishot);
85 unregister_trace_console(probe_console, NULL);
86 tracepoint_synchronize_unregister();
87 }
88
kasan_test_exit(struct kunit * test)89 static void kasan_test_exit(struct kunit *test)
90 {
91 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
92 }
93
94 /**
95 * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
96 * KASAN report; causes a KUnit test failure otherwise.
97 *
98 * @test: Currently executing KUnit test.
99 * @expression: Expression that must produce a KASAN report.
100 *
101 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
102 * checking is auto-disabled. When this happens, this test handler reenables
103 * tag checking. As tag checking can be only disabled or enabled per CPU,
104 * this handler disables migration (preemption).
105 *
106 * Since the compiler doesn't see that the expression can change the test_status
107 * fields, it can reorder or optimize away the accesses to those fields.
108 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
109 * expression to prevent that.
110 *
111 * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
112 * as false. This allows detecting KASAN reports that happen outside of the
113 * checks by asserting !test_status.report_found at the start of
114 * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
115 */
116 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
117 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
118 kasan_sync_fault_possible()) \
119 migrate_disable(); \
120 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
121 barrier(); \
122 expression; \
123 barrier(); \
124 if (kasan_async_fault_possible()) \
125 kasan_force_async_fault(); \
126 if (!READ_ONCE(test_status.report_found)) { \
127 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
128 "expected in \"" #expression \
129 "\", but none occurred"); \
130 } \
131 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
132 kasan_sync_fault_possible()) { \
133 if (READ_ONCE(test_status.report_found) && \
134 !READ_ONCE(test_status.async_fault)) \
135 kasan_enable_hw_tags(); \
136 migrate_enable(); \
137 } \
138 WRITE_ONCE(test_status.report_found, false); \
139 WRITE_ONCE(test_status.async_fault, false); \
140 } while (0)
141
142 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
143 if (!IS_ENABLED(config)) \
144 kunit_skip((test), "Test requires " #config "=y"); \
145 } while (0)
146
147 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
148 if (IS_ENABLED(config)) \
149 kunit_skip((test), "Test requires " #config "=n"); \
150 } while (0)
151
152 #define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do { \
153 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
154 break; /* No compiler instrumentation. */ \
155 if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)) \
156 break; /* Should always be instrumented! */ \
157 if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) \
158 kunit_skip((test), "Test requires checked mem*()"); \
159 } while (0)
160
kmalloc_oob_right(struct kunit * test)161 static void kmalloc_oob_right(struct kunit *test)
162 {
163 char *ptr;
164 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
165
166 ptr = kmalloc(size, GFP_KERNEL);
167 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
168
169 OPTIMIZER_HIDE_VAR(ptr);
170 /*
171 * An unaligned access past the requested kmalloc size.
172 * Only generic KASAN can precisely detect these.
173 */
174 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
175 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
176
177 /*
178 * An aligned access into the first out-of-bounds granule that falls
179 * within the aligned kmalloc object.
180 */
181 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
182
183 /* Out-of-bounds access past the aligned kmalloc object. */
184 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
185 ptr[size + KASAN_GRANULE_SIZE + 5]);
186
187 kfree(ptr);
188 }
189
kmalloc_oob_left(struct kunit * test)190 static void kmalloc_oob_left(struct kunit *test)
191 {
192 char *ptr;
193 size_t size = 15;
194
195 ptr = kmalloc(size, GFP_KERNEL);
196 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
197
198 OPTIMIZER_HIDE_VAR(ptr);
199 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
200 kfree(ptr);
201 }
202
kmalloc_node_oob_right(struct kunit * test)203 static void kmalloc_node_oob_right(struct kunit *test)
204 {
205 char *ptr;
206 size_t size = 4096;
207
208 ptr = kmalloc_node(size, GFP_KERNEL, 0);
209 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
210
211 OPTIMIZER_HIDE_VAR(ptr);
212 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
213 kfree(ptr);
214 }
215
216 /*
217 * Check that KASAN detects an out-of-bounds access for a big object allocated
218 * via kmalloc(). But not as big as to trigger the page_alloc fallback.
219 */
kmalloc_big_oob_right(struct kunit * test)220 static void kmalloc_big_oob_right(struct kunit *test)
221 {
222 char *ptr;
223 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
224
225 ptr = kmalloc(size, GFP_KERNEL);
226 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
227
228 OPTIMIZER_HIDE_VAR(ptr);
229 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
230 kfree(ptr);
231 }
232
233 /*
234 * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
235 * that does not fit into the largest slab cache and therefore is allocated via
236 * the page_alloc fallback.
237 */
238
kmalloc_large_oob_right(struct kunit * test)239 static void kmalloc_large_oob_right(struct kunit *test)
240 {
241 char *ptr;
242 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
243
244 ptr = kmalloc(size, GFP_KERNEL);
245 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
246
247 OPTIMIZER_HIDE_VAR(ptr);
248 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
249
250 kfree(ptr);
251 }
252
kmalloc_large_uaf(struct kunit * test)253 static void kmalloc_large_uaf(struct kunit *test)
254 {
255 char *ptr;
256 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
257
258 ptr = kmalloc(size, GFP_KERNEL);
259 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
260 kfree(ptr);
261
262 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
263 }
264
kmalloc_large_invalid_free(struct kunit * test)265 static void kmalloc_large_invalid_free(struct kunit *test)
266 {
267 char *ptr;
268 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
269
270 ptr = kmalloc(size, GFP_KERNEL);
271 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
272
273 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
274 }
275
page_alloc_oob_right(struct kunit * test)276 static void page_alloc_oob_right(struct kunit *test)
277 {
278 char *ptr;
279 struct page *pages;
280 size_t order = 4;
281 size_t size = (1UL << (PAGE_SHIFT + order));
282
283 /*
284 * With generic KASAN page allocations have no redzones, thus
285 * out-of-bounds detection is not guaranteed.
286 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
287 */
288 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
289
290 pages = alloc_pages(GFP_KERNEL, order);
291 ptr = page_address(pages);
292 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
293
294 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
295 free_pages((unsigned long)ptr, order);
296 }
297
page_alloc_uaf(struct kunit * test)298 static void page_alloc_uaf(struct kunit *test)
299 {
300 char *ptr;
301 struct page *pages;
302 size_t order = 4;
303
304 pages = alloc_pages(GFP_KERNEL, order);
305 ptr = page_address(pages);
306 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
307 free_pages((unsigned long)ptr, order);
308
309 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
310 }
311
krealloc_more_oob_helper(struct kunit * test,size_t size1,size_t size2)312 static void krealloc_more_oob_helper(struct kunit *test,
313 size_t size1, size_t size2)
314 {
315 char *ptr1, *ptr2;
316 size_t middle;
317
318 KUNIT_ASSERT_LT(test, size1, size2);
319 middle = size1 + (size2 - size1) / 2;
320
321 ptr1 = kmalloc(size1, GFP_KERNEL);
322 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
323
324 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
325 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
326
327 /* Suppress -Warray-bounds warnings. */
328 OPTIMIZER_HIDE_VAR(ptr2);
329
330 /* All offsets up to size2 must be accessible. */
331 ptr2[size1 - 1] = 'x';
332 ptr2[size1] = 'x';
333 ptr2[middle] = 'x';
334 ptr2[size2 - 1] = 'x';
335
336 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
337 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
338 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
339
340 /* For all modes first aligned offset after size2 must be inaccessible. */
341 KUNIT_EXPECT_KASAN_FAIL(test,
342 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
343
344 kfree(ptr2);
345 }
346
krealloc_less_oob_helper(struct kunit * test,size_t size1,size_t size2)347 static void krealloc_less_oob_helper(struct kunit *test,
348 size_t size1, size_t size2)
349 {
350 char *ptr1, *ptr2;
351 size_t middle;
352
353 KUNIT_ASSERT_LT(test, size2, size1);
354 middle = size2 + (size1 - size2) / 2;
355
356 ptr1 = kmalloc(size1, GFP_KERNEL);
357 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
358
359 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
360 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
361
362 /* Suppress -Warray-bounds warnings. */
363 OPTIMIZER_HIDE_VAR(ptr2);
364
365 /* Must be accessible for all modes. */
366 ptr2[size2 - 1] = 'x';
367
368 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
369 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
370 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
371
372 /* For all modes first aligned offset after size2 must be inaccessible. */
373 KUNIT_EXPECT_KASAN_FAIL(test,
374 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
375
376 /*
377 * For all modes all size2, middle, and size1 should land in separate
378 * granules and thus the latter two offsets should be inaccessible.
379 */
380 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
381 round_down(middle, KASAN_GRANULE_SIZE));
382 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
383 round_down(size1, KASAN_GRANULE_SIZE));
384 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
385 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
386 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
387
388 kfree(ptr2);
389 }
390
krealloc_more_oob(struct kunit * test)391 static void krealloc_more_oob(struct kunit *test)
392 {
393 krealloc_more_oob_helper(test, 201, 235);
394 }
395
krealloc_less_oob(struct kunit * test)396 static void krealloc_less_oob(struct kunit *test)
397 {
398 krealloc_less_oob_helper(test, 235, 201);
399 }
400
krealloc_large_more_oob(struct kunit * test)401 static void krealloc_large_more_oob(struct kunit *test)
402 {
403 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
404 KMALLOC_MAX_CACHE_SIZE + 235);
405 }
406
krealloc_large_less_oob(struct kunit * test)407 static void krealloc_large_less_oob(struct kunit *test)
408 {
409 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
410 KMALLOC_MAX_CACHE_SIZE + 201);
411 }
412
413 /*
414 * Check that krealloc() detects a use-after-free, returns NULL,
415 * and doesn't unpoison the freed object.
416 */
krealloc_uaf(struct kunit * test)417 static void krealloc_uaf(struct kunit *test)
418 {
419 char *ptr1, *ptr2;
420 int size1 = 201;
421 int size2 = 235;
422
423 ptr1 = kmalloc(size1, GFP_KERNEL);
424 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
425 kfree(ptr1);
426
427 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
428 KUNIT_ASSERT_NULL(test, ptr2);
429 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
430 }
431
kmalloc_oob_16(struct kunit * test)432 static void kmalloc_oob_16(struct kunit *test)
433 {
434 struct {
435 u64 words[2];
436 } *ptr1, *ptr2;
437
438 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
439
440 /* This test is specifically crafted for the generic mode. */
441 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
442
443 /* RELOC_HIDE to prevent gcc from warning about short alloc */
444 ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
445 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
446
447 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
448 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
449
450 OPTIMIZER_HIDE_VAR(ptr1);
451 OPTIMIZER_HIDE_VAR(ptr2);
452 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
453 kfree(ptr1);
454 kfree(ptr2);
455 }
456
kmalloc_uaf_16(struct kunit * test)457 static void kmalloc_uaf_16(struct kunit *test)
458 {
459 struct {
460 u64 words[2];
461 } *ptr1, *ptr2;
462
463 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
464
465 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
466 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
467
468 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
469 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
470 kfree(ptr2);
471
472 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
473 kfree(ptr1);
474 }
475
476 /*
477 * Note: in the memset tests below, the written range touches both valid and
478 * invalid memory. This makes sure that the instrumentation does not only check
479 * the starting address but the whole range.
480 */
481
kmalloc_oob_memset_2(struct kunit * test)482 static void kmalloc_oob_memset_2(struct kunit *test)
483 {
484 char *ptr;
485 size_t size = 128 - KASAN_GRANULE_SIZE;
486 size_t memset_size = 2;
487
488 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
489
490 ptr = kmalloc(size, GFP_KERNEL);
491 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
492
493 OPTIMIZER_HIDE_VAR(ptr);
494 OPTIMIZER_HIDE_VAR(size);
495 OPTIMIZER_HIDE_VAR(memset_size);
496 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size));
497 kfree(ptr);
498 }
499
kmalloc_oob_memset_4(struct kunit * test)500 static void kmalloc_oob_memset_4(struct kunit *test)
501 {
502 char *ptr;
503 size_t size = 128 - KASAN_GRANULE_SIZE;
504 size_t memset_size = 4;
505
506 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
507
508 ptr = kmalloc(size, GFP_KERNEL);
509 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
510
511 OPTIMIZER_HIDE_VAR(ptr);
512 OPTIMIZER_HIDE_VAR(size);
513 OPTIMIZER_HIDE_VAR(memset_size);
514 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size));
515 kfree(ptr);
516 }
517
kmalloc_oob_memset_8(struct kunit * test)518 static void kmalloc_oob_memset_8(struct kunit *test)
519 {
520 char *ptr;
521 size_t size = 128 - KASAN_GRANULE_SIZE;
522 size_t memset_size = 8;
523
524 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
525
526 ptr = kmalloc(size, GFP_KERNEL);
527 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
528
529 OPTIMIZER_HIDE_VAR(ptr);
530 OPTIMIZER_HIDE_VAR(size);
531 OPTIMIZER_HIDE_VAR(memset_size);
532 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size));
533 kfree(ptr);
534 }
535
kmalloc_oob_memset_16(struct kunit * test)536 static void kmalloc_oob_memset_16(struct kunit *test)
537 {
538 char *ptr;
539 size_t size = 128 - KASAN_GRANULE_SIZE;
540 size_t memset_size = 16;
541
542 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
543
544 ptr = kmalloc(size, GFP_KERNEL);
545 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
546
547 OPTIMIZER_HIDE_VAR(ptr);
548 OPTIMIZER_HIDE_VAR(size);
549 OPTIMIZER_HIDE_VAR(memset_size);
550 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size));
551 kfree(ptr);
552 }
553
kmalloc_oob_in_memset(struct kunit * test)554 static void kmalloc_oob_in_memset(struct kunit *test)
555 {
556 char *ptr;
557 size_t size = 128 - KASAN_GRANULE_SIZE;
558
559 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
560
561 ptr = kmalloc(size, GFP_KERNEL);
562 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
563
564 OPTIMIZER_HIDE_VAR(ptr);
565 OPTIMIZER_HIDE_VAR(size);
566 KUNIT_EXPECT_KASAN_FAIL(test,
567 memset(ptr, 0, size + KASAN_GRANULE_SIZE));
568 kfree(ptr);
569 }
570
kmalloc_memmove_negative_size(struct kunit * test)571 static void kmalloc_memmove_negative_size(struct kunit *test)
572 {
573 char *ptr;
574 size_t size = 64;
575 size_t invalid_size = -2;
576
577 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
578
579 /*
580 * Hardware tag-based mode doesn't check memmove for negative size.
581 * As a result, this test introduces a side-effect memory corruption,
582 * which can result in a crash.
583 */
584 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
585
586 ptr = kmalloc(size, GFP_KERNEL);
587 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
588
589 memset((char *)ptr, 0, 64);
590 OPTIMIZER_HIDE_VAR(ptr);
591 OPTIMIZER_HIDE_VAR(invalid_size);
592 KUNIT_EXPECT_KASAN_FAIL(test,
593 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
594 kfree(ptr);
595 }
596
kmalloc_memmove_invalid_size(struct kunit * test)597 static void kmalloc_memmove_invalid_size(struct kunit *test)
598 {
599 char *ptr;
600 size_t size = 64;
601 size_t invalid_size = size;
602
603 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
604
605 ptr = kmalloc(size, GFP_KERNEL);
606 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
607
608 memset((char *)ptr, 0, 64);
609 OPTIMIZER_HIDE_VAR(ptr);
610 OPTIMIZER_HIDE_VAR(invalid_size);
611 KUNIT_EXPECT_KASAN_FAIL(test,
612 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
613 kfree(ptr);
614 }
615
kmalloc_uaf(struct kunit * test)616 static void kmalloc_uaf(struct kunit *test)
617 {
618 char *ptr;
619 size_t size = 10;
620
621 ptr = kmalloc(size, GFP_KERNEL);
622 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
623
624 kfree(ptr);
625 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
626 }
627
kmalloc_uaf_memset(struct kunit * test)628 static void kmalloc_uaf_memset(struct kunit *test)
629 {
630 char *ptr;
631 size_t size = 33;
632
633 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
634
635 /*
636 * Only generic KASAN uses quarantine, which is required to avoid a
637 * kernel memory corruption this test causes.
638 */
639 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
640
641 ptr = kmalloc(size, GFP_KERNEL);
642 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
643
644 kfree(ptr);
645 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
646 }
647
kmalloc_uaf2(struct kunit * test)648 static void kmalloc_uaf2(struct kunit *test)
649 {
650 char *ptr1, *ptr2;
651 size_t size = 43;
652 int counter = 0;
653
654 again:
655 ptr1 = kmalloc(size, GFP_KERNEL);
656 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
657
658 kfree(ptr1);
659
660 ptr2 = kmalloc(size, GFP_KERNEL);
661 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
662
663 /*
664 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
665 * Allow up to 16 attempts at generating different tags.
666 */
667 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
668 kfree(ptr2);
669 goto again;
670 }
671
672 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
673 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
674
675 kfree(ptr2);
676 }
677
678 /*
679 * Check that KASAN detects use-after-free when another object was allocated in
680 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
681 */
kmalloc_uaf3(struct kunit * test)682 static void kmalloc_uaf3(struct kunit *test)
683 {
684 char *ptr1, *ptr2;
685 size_t size = 100;
686
687 /* This test is specifically crafted for tag-based modes. */
688 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
689
690 ptr1 = kmalloc(size, GFP_KERNEL);
691 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
692 kfree(ptr1);
693
694 ptr2 = kmalloc(size, GFP_KERNEL);
695 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
696 kfree(ptr2);
697
698 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
699 }
700
kasan_atomics_helper(struct kunit * test,void * unsafe,void * safe)701 static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
702 {
703 int *i_unsafe = unsafe;
704
705 KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
706 KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
707 KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
708 KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
709
710 KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
711 KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
712 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
713 KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
714 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
715 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
716 KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
717 KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
718 KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
719 KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
720 KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
721 KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
722 KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
723 KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
724 KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
725 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
726 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
727 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
728 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
729 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
730 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
731 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
732 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
733
734 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
735 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
736 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
737 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
738 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
739 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
740 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
741 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
742 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
743 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
744 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
745 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
746 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
747 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
748 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
749 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
750 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
751 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
752 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
753 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
754 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
755 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
756 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
757 }
758
kasan_atomics(struct kunit * test)759 static void kasan_atomics(struct kunit *test)
760 {
761 void *a1, *a2;
762
763 /*
764 * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
765 * that the following 16 bytes will make up the redzone.
766 */
767 a1 = kzalloc(48, GFP_KERNEL);
768 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
769 a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL);
770 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2);
771
772 /* Use atomics to access the redzone. */
773 kasan_atomics_helper(test, a1 + 48, a2);
774
775 kfree(a1);
776 kfree(a2);
777 }
778
kmalloc_double_kzfree(struct kunit * test)779 static void kmalloc_double_kzfree(struct kunit *test)
780 {
781 char *ptr;
782 size_t size = 16;
783
784 ptr = kmalloc(size, GFP_KERNEL);
785 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
786
787 kfree_sensitive(ptr);
788 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
789 }
790
791 /* Check that ksize() does NOT unpoison whole object. */
ksize_unpoisons_memory(struct kunit * test)792 static void ksize_unpoisons_memory(struct kunit *test)
793 {
794 char *ptr;
795 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
796 size_t real_size;
797
798 ptr = kmalloc(size, GFP_KERNEL);
799 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
800
801 real_size = ksize(ptr);
802 KUNIT_EXPECT_GT(test, real_size, size);
803
804 OPTIMIZER_HIDE_VAR(ptr);
805
806 /* These accesses shouldn't trigger a KASAN report. */
807 ptr[0] = 'x';
808 ptr[size - 1] = 'x';
809
810 /* These must trigger a KASAN report. */
811 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
812 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
813 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
814 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
815
816 kfree(ptr);
817 }
818
819 /*
820 * Check that a use-after-free is detected by ksize() and via normal accesses
821 * after it.
822 */
ksize_uaf(struct kunit * test)823 static void ksize_uaf(struct kunit *test)
824 {
825 char *ptr;
826 int size = 128 - KASAN_GRANULE_SIZE;
827
828 ptr = kmalloc(size, GFP_KERNEL);
829 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
830 kfree(ptr);
831
832 OPTIMIZER_HIDE_VAR(ptr);
833 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
834 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
835 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
836 }
837
838 /*
839 * The two tests below check that Generic KASAN prints auxiliary stack traces
840 * for RCU callbacks and workqueues. The reports need to be inspected manually.
841 *
842 * These tests are still enabled for other KASAN modes to make sure that all
843 * modes report bad accesses in tested scenarios.
844 */
845
846 static struct kasan_rcu_info {
847 int i;
848 struct rcu_head rcu;
849 } *global_rcu_ptr;
850
rcu_uaf_reclaim(struct rcu_head * rp)851 static void rcu_uaf_reclaim(struct rcu_head *rp)
852 {
853 struct kasan_rcu_info *fp =
854 container_of(rp, struct kasan_rcu_info, rcu);
855
856 kfree(fp);
857 ((volatile struct kasan_rcu_info *)fp)->i;
858 }
859
rcu_uaf(struct kunit * test)860 static void rcu_uaf(struct kunit *test)
861 {
862 struct kasan_rcu_info *ptr;
863
864 ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
865 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
866
867 global_rcu_ptr = rcu_dereference_protected(
868 (struct kasan_rcu_info __rcu *)ptr, NULL);
869
870 KUNIT_EXPECT_KASAN_FAIL(test,
871 call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
872 rcu_barrier());
873 }
874
workqueue_uaf_work(struct work_struct * work)875 static void workqueue_uaf_work(struct work_struct *work)
876 {
877 kfree(work);
878 }
879
workqueue_uaf(struct kunit * test)880 static void workqueue_uaf(struct kunit *test)
881 {
882 struct workqueue_struct *workqueue;
883 struct work_struct *work;
884
885 workqueue = create_workqueue("kasan_workqueue_test");
886 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
887
888 work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
889 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
890
891 INIT_WORK(work, workqueue_uaf_work);
892 queue_work(workqueue, work);
893 destroy_workqueue(workqueue);
894
895 KUNIT_EXPECT_KASAN_FAIL(test,
896 ((volatile struct work_struct *)work)->data);
897 }
898
kfree_via_page(struct kunit * test)899 static void kfree_via_page(struct kunit *test)
900 {
901 char *ptr;
902 size_t size = 8;
903 struct page *page;
904 unsigned long offset;
905
906 ptr = kmalloc(size, GFP_KERNEL);
907 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
908
909 page = virt_to_page(ptr);
910 offset = offset_in_page(ptr);
911 kfree(page_address(page) + offset);
912 }
913
kfree_via_phys(struct kunit * test)914 static void kfree_via_phys(struct kunit *test)
915 {
916 char *ptr;
917 size_t size = 8;
918 phys_addr_t phys;
919
920 ptr = kmalloc(size, GFP_KERNEL);
921 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
922
923 phys = virt_to_phys(ptr);
924 kfree(phys_to_virt(phys));
925 }
926
kmem_cache_oob(struct kunit * test)927 static void kmem_cache_oob(struct kunit *test)
928 {
929 char *p;
930 size_t size = 200;
931 struct kmem_cache *cache;
932
933 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
934 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
935
936 p = kmem_cache_alloc(cache, GFP_KERNEL);
937 if (!p) {
938 kunit_err(test, "Allocation failed: %s\n", __func__);
939 kmem_cache_destroy(cache);
940 return;
941 }
942
943 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
944
945 kmem_cache_free(cache, p);
946 kmem_cache_destroy(cache);
947 }
948
kmem_cache_double_free(struct kunit * test)949 static void kmem_cache_double_free(struct kunit *test)
950 {
951 char *p;
952 size_t size = 200;
953 struct kmem_cache *cache;
954
955 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
956 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
957
958 p = kmem_cache_alloc(cache, GFP_KERNEL);
959 if (!p) {
960 kunit_err(test, "Allocation failed: %s\n", __func__);
961 kmem_cache_destroy(cache);
962 return;
963 }
964
965 kmem_cache_free(cache, p);
966 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
967 kmem_cache_destroy(cache);
968 }
969
kmem_cache_invalid_free(struct kunit * test)970 static void kmem_cache_invalid_free(struct kunit *test)
971 {
972 char *p;
973 size_t size = 200;
974 struct kmem_cache *cache;
975
976 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
977 NULL);
978 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
979
980 p = kmem_cache_alloc(cache, GFP_KERNEL);
981 if (!p) {
982 kunit_err(test, "Allocation failed: %s\n", __func__);
983 kmem_cache_destroy(cache);
984 return;
985 }
986
987 /* Trigger invalid free, the object doesn't get freed. */
988 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
989
990 /*
991 * Properly free the object to prevent the "Objects remaining in
992 * test_cache on __kmem_cache_shutdown" BUG failure.
993 */
994 kmem_cache_free(cache, p);
995
996 kmem_cache_destroy(cache);
997 }
998
kmem_cache_rcu_uaf(struct kunit * test)999 static void kmem_cache_rcu_uaf(struct kunit *test)
1000 {
1001 char *p;
1002 size_t size = 200;
1003 struct kmem_cache *cache;
1004
1005 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
1006
1007 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
1008 NULL);
1009 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1010
1011 p = kmem_cache_alloc(cache, GFP_KERNEL);
1012 if (!p) {
1013 kunit_err(test, "Allocation failed: %s\n", __func__);
1014 kmem_cache_destroy(cache);
1015 return;
1016 }
1017 *p = 1;
1018
1019 rcu_read_lock();
1020
1021 /* Free the object - this will internally schedule an RCU callback. */
1022 kmem_cache_free(cache, p);
1023
1024 /*
1025 * We should still be allowed to access the object at this point because
1026 * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
1027 * critical section since before the kmem_cache_free().
1028 */
1029 READ_ONCE(*p);
1030
1031 rcu_read_unlock();
1032
1033 /*
1034 * Wait for the RCU callback to execute; after this, the object should
1035 * have actually been freed from KASAN's perspective.
1036 */
1037 rcu_barrier();
1038
1039 KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
1040
1041 kmem_cache_destroy(cache);
1042 }
1043
empty_cache_ctor(void * object)1044 static void empty_cache_ctor(void *object) { }
1045
kmem_cache_double_destroy(struct kunit * test)1046 static void kmem_cache_double_destroy(struct kunit *test)
1047 {
1048 struct kmem_cache *cache;
1049
1050 /* Provide a constructor to prevent cache merging. */
1051 cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
1052 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1053 kmem_cache_destroy(cache);
1054 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
1055 }
1056
kmem_cache_accounted(struct kunit * test)1057 static void kmem_cache_accounted(struct kunit *test)
1058 {
1059 int i;
1060 char *p;
1061 size_t size = 200;
1062 struct kmem_cache *cache;
1063
1064 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
1065 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1066
1067 /*
1068 * Several allocations with a delay to allow for lazy per memcg kmem
1069 * cache creation.
1070 */
1071 for (i = 0; i < 5; i++) {
1072 p = kmem_cache_alloc(cache, GFP_KERNEL);
1073 if (!p)
1074 goto free_cache;
1075
1076 kmem_cache_free(cache, p);
1077 msleep(100);
1078 }
1079
1080 free_cache:
1081 kmem_cache_destroy(cache);
1082 }
1083
kmem_cache_bulk(struct kunit * test)1084 static void kmem_cache_bulk(struct kunit *test)
1085 {
1086 struct kmem_cache *cache;
1087 size_t size = 200;
1088 char *p[10];
1089 bool ret;
1090 int i;
1091
1092 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1093 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1094
1095 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
1096 if (!ret) {
1097 kunit_err(test, "Allocation failed: %s\n", __func__);
1098 kmem_cache_destroy(cache);
1099 return;
1100 }
1101
1102 for (i = 0; i < ARRAY_SIZE(p); i++)
1103 p[i][0] = p[i][size - 1] = 42;
1104
1105 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
1106 kmem_cache_destroy(cache);
1107 }
1108
mempool_prepare_kmalloc(struct kunit * test,mempool_t * pool,size_t size)1109 static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
1110 {
1111 int pool_size = 4;
1112 int ret;
1113 void *elem;
1114
1115 memset(pool, 0, sizeof(*pool));
1116 ret = mempool_init_kmalloc_pool(pool, pool_size, size);
1117 KUNIT_ASSERT_EQ(test, ret, 0);
1118
1119 /*
1120 * Allocate one element to prevent mempool from freeing elements to the
1121 * underlying allocator and instead make it add them to the element
1122 * list when the tests trigger double-free and invalid-free bugs.
1123 * This allows testing KASAN annotations in add_element().
1124 */
1125 elem = mempool_alloc_preallocated(pool);
1126 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1127
1128 return elem;
1129 }
1130
mempool_prepare_slab(struct kunit * test,mempool_t * pool,size_t size)1131 static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
1132 {
1133 struct kmem_cache *cache;
1134 int pool_size = 4;
1135 int ret;
1136
1137 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1138 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1139
1140 memset(pool, 0, sizeof(*pool));
1141 ret = mempool_init_slab_pool(pool, pool_size, cache);
1142 KUNIT_ASSERT_EQ(test, ret, 0);
1143
1144 /*
1145 * Do not allocate one preallocated element, as we skip the double-free
1146 * and invalid-free tests for slab mempool for simplicity.
1147 */
1148
1149 return cache;
1150 }
1151
mempool_prepare_page(struct kunit * test,mempool_t * pool,int order)1152 static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
1153 {
1154 int pool_size = 4;
1155 int ret;
1156 void *elem;
1157
1158 memset(pool, 0, sizeof(*pool));
1159 ret = mempool_init_page_pool(pool, pool_size, order);
1160 KUNIT_ASSERT_EQ(test, ret, 0);
1161
1162 elem = mempool_alloc_preallocated(pool);
1163 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1164
1165 return elem;
1166 }
1167
mempool_oob_right_helper(struct kunit * test,mempool_t * pool,size_t size)1168 static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
1169 {
1170 char *elem;
1171
1172 elem = mempool_alloc_preallocated(pool);
1173 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1174
1175 OPTIMIZER_HIDE_VAR(elem);
1176
1177 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1178 KUNIT_EXPECT_KASAN_FAIL(test,
1179 ((volatile char *)&elem[size])[0]);
1180 else
1181 KUNIT_EXPECT_KASAN_FAIL(test,
1182 ((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
1183
1184 mempool_free(elem, pool);
1185 }
1186
mempool_kmalloc_oob_right(struct kunit * test)1187 static void mempool_kmalloc_oob_right(struct kunit *test)
1188 {
1189 mempool_t pool;
1190 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
1191 void *extra_elem;
1192
1193 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1194
1195 mempool_oob_right_helper(test, &pool, size);
1196
1197 mempool_free(extra_elem, &pool);
1198 mempool_exit(&pool);
1199 }
1200
mempool_kmalloc_large_oob_right(struct kunit * test)1201 static void mempool_kmalloc_large_oob_right(struct kunit *test)
1202 {
1203 mempool_t pool;
1204 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1205 void *extra_elem;
1206
1207 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1208
1209 mempool_oob_right_helper(test, &pool, size);
1210
1211 mempool_free(extra_elem, &pool);
1212 mempool_exit(&pool);
1213 }
1214
mempool_slab_oob_right(struct kunit * test)1215 static void mempool_slab_oob_right(struct kunit *test)
1216 {
1217 mempool_t pool;
1218 size_t size = 123;
1219 struct kmem_cache *cache;
1220
1221 cache = mempool_prepare_slab(test, &pool, size);
1222
1223 mempool_oob_right_helper(test, &pool, size);
1224
1225 mempool_exit(&pool);
1226 kmem_cache_destroy(cache);
1227 }
1228
1229 /*
1230 * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
1231 * allocations have no redzones, and thus the out-of-bounds detection is not
1232 * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
1233 * the tag-based KASAN modes, the neighboring allocation might have the same
1234 * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
1235 */
1236
mempool_uaf_helper(struct kunit * test,mempool_t * pool,bool page)1237 static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
1238 {
1239 char *elem, *ptr;
1240
1241 elem = mempool_alloc_preallocated(pool);
1242 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1243
1244 mempool_free(elem, pool);
1245
1246 ptr = page ? page_address((struct page *)elem) : elem;
1247 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
1248 }
1249
mempool_kmalloc_uaf(struct kunit * test)1250 static void mempool_kmalloc_uaf(struct kunit *test)
1251 {
1252 mempool_t pool;
1253 size_t size = 128;
1254 void *extra_elem;
1255
1256 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1257
1258 mempool_uaf_helper(test, &pool, false);
1259
1260 mempool_free(extra_elem, &pool);
1261 mempool_exit(&pool);
1262 }
1263
mempool_kmalloc_large_uaf(struct kunit * test)1264 static void mempool_kmalloc_large_uaf(struct kunit *test)
1265 {
1266 mempool_t pool;
1267 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1268 void *extra_elem;
1269
1270 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1271
1272 mempool_uaf_helper(test, &pool, false);
1273
1274 mempool_free(extra_elem, &pool);
1275 mempool_exit(&pool);
1276 }
1277
mempool_slab_uaf(struct kunit * test)1278 static void mempool_slab_uaf(struct kunit *test)
1279 {
1280 mempool_t pool;
1281 size_t size = 123;
1282 struct kmem_cache *cache;
1283
1284 cache = mempool_prepare_slab(test, &pool, size);
1285
1286 mempool_uaf_helper(test, &pool, false);
1287
1288 mempool_exit(&pool);
1289 kmem_cache_destroy(cache);
1290 }
1291
mempool_page_alloc_uaf(struct kunit * test)1292 static void mempool_page_alloc_uaf(struct kunit *test)
1293 {
1294 mempool_t pool;
1295 int order = 2;
1296 void *extra_elem;
1297
1298 extra_elem = mempool_prepare_page(test, &pool, order);
1299
1300 mempool_uaf_helper(test, &pool, true);
1301
1302 mempool_free(extra_elem, &pool);
1303 mempool_exit(&pool);
1304 }
1305
mempool_double_free_helper(struct kunit * test,mempool_t * pool)1306 static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
1307 {
1308 char *elem;
1309
1310 elem = mempool_alloc_preallocated(pool);
1311 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1312
1313 mempool_free(elem, pool);
1314
1315 KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
1316 }
1317
mempool_kmalloc_double_free(struct kunit * test)1318 static void mempool_kmalloc_double_free(struct kunit *test)
1319 {
1320 mempool_t pool;
1321 size_t size = 128;
1322 char *extra_elem;
1323
1324 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1325
1326 mempool_double_free_helper(test, &pool);
1327
1328 mempool_free(extra_elem, &pool);
1329 mempool_exit(&pool);
1330 }
1331
mempool_kmalloc_large_double_free(struct kunit * test)1332 static void mempool_kmalloc_large_double_free(struct kunit *test)
1333 {
1334 mempool_t pool;
1335 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1336 char *extra_elem;
1337
1338 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1339
1340 mempool_double_free_helper(test, &pool);
1341
1342 mempool_free(extra_elem, &pool);
1343 mempool_exit(&pool);
1344 }
1345
mempool_page_alloc_double_free(struct kunit * test)1346 static void mempool_page_alloc_double_free(struct kunit *test)
1347 {
1348 mempool_t pool;
1349 int order = 2;
1350 char *extra_elem;
1351
1352 extra_elem = mempool_prepare_page(test, &pool, order);
1353
1354 mempool_double_free_helper(test, &pool);
1355
1356 mempool_free(extra_elem, &pool);
1357 mempool_exit(&pool);
1358 }
1359
mempool_kmalloc_invalid_free_helper(struct kunit * test,mempool_t * pool)1360 static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
1361 {
1362 char *elem;
1363
1364 elem = mempool_alloc_preallocated(pool);
1365 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1366
1367 KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
1368
1369 mempool_free(elem, pool);
1370 }
1371
mempool_kmalloc_invalid_free(struct kunit * test)1372 static void mempool_kmalloc_invalid_free(struct kunit *test)
1373 {
1374 mempool_t pool;
1375 size_t size = 128;
1376 char *extra_elem;
1377
1378 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1379
1380 mempool_kmalloc_invalid_free_helper(test, &pool);
1381
1382 mempool_free(extra_elem, &pool);
1383 mempool_exit(&pool);
1384 }
1385
mempool_kmalloc_large_invalid_free(struct kunit * test)1386 static void mempool_kmalloc_large_invalid_free(struct kunit *test)
1387 {
1388 mempool_t pool;
1389 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1390 char *extra_elem;
1391
1392 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1393
1394 mempool_kmalloc_invalid_free_helper(test, &pool);
1395
1396 mempool_free(extra_elem, &pool);
1397 mempool_exit(&pool);
1398 }
1399
1400 /*
1401 * Skip the invalid-free test for page mempool. The invalid-free detection only
1402 * works for compound pages and mempool preallocates all page elements without
1403 * the __GFP_COMP flag.
1404 */
1405
1406 static char global_array[10];
1407
kasan_global_oob_right(struct kunit * test)1408 static void kasan_global_oob_right(struct kunit *test)
1409 {
1410 /*
1411 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
1412 * from failing here and panicking the kernel, access the array via a
1413 * volatile pointer, which will prevent the compiler from being able to
1414 * determine the array bounds.
1415 *
1416 * This access uses a volatile pointer to char (char *volatile) rather
1417 * than the more conventional pointer to volatile char (volatile char *)
1418 * because we want to prevent the compiler from making inferences about
1419 * the pointer itself (i.e. its array bounds), not the data that it
1420 * refers to.
1421 */
1422 char *volatile array = global_array;
1423 char *p = &array[ARRAY_SIZE(global_array) + 3];
1424
1425 /* Only generic mode instruments globals. */
1426 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1427
1428 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1429 }
1430
kasan_global_oob_left(struct kunit * test)1431 static void kasan_global_oob_left(struct kunit *test)
1432 {
1433 char *volatile array = global_array;
1434 char *p = array - 3;
1435
1436 /*
1437 * GCC is known to fail this test, skip it.
1438 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
1439 */
1440 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
1441 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1442 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1443 }
1444
kasan_stack_oob(struct kunit * test)1445 static void kasan_stack_oob(struct kunit *test)
1446 {
1447 char stack_array[10];
1448 /* See comment in kasan_global_oob_right. */
1449 char *volatile array = stack_array;
1450 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
1451
1452 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1453
1454 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1455 }
1456
kasan_alloca_oob_left(struct kunit * test)1457 static void kasan_alloca_oob_left(struct kunit *test)
1458 {
1459 volatile int i = 10;
1460 char alloca_array[i];
1461 /* See comment in kasan_global_oob_right. */
1462 char *volatile array = alloca_array;
1463 char *p = array - 1;
1464
1465 /* Only generic mode instruments dynamic allocas. */
1466 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1467 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1468
1469 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1470 }
1471
kasan_alloca_oob_right(struct kunit * test)1472 static void kasan_alloca_oob_right(struct kunit *test)
1473 {
1474 volatile int i = 10;
1475 char alloca_array[i];
1476 /* See comment in kasan_global_oob_right. */
1477 char *volatile array = alloca_array;
1478 char *p = array + i;
1479
1480 /* Only generic mode instruments dynamic allocas. */
1481 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1482 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1483
1484 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1485 }
1486
kasan_memchr(struct kunit * test)1487 static void kasan_memchr(struct kunit *test)
1488 {
1489 char *ptr;
1490 size_t size = 24;
1491
1492 /*
1493 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1494 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1495 */
1496 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1497
1498 if (OOB_TAG_OFF)
1499 size = round_up(size, OOB_TAG_OFF);
1500
1501 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1502 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1503
1504 OPTIMIZER_HIDE_VAR(ptr);
1505 OPTIMIZER_HIDE_VAR(size);
1506 KUNIT_EXPECT_KASAN_FAIL(test,
1507 kasan_ptr_result = memchr(ptr, '1', size + 1));
1508
1509 kfree(ptr);
1510 }
1511
kasan_memcmp(struct kunit * test)1512 static void kasan_memcmp(struct kunit *test)
1513 {
1514 char *ptr;
1515 size_t size = 24;
1516 int arr[9];
1517
1518 /*
1519 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1520 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1521 */
1522 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1523
1524 if (OOB_TAG_OFF)
1525 size = round_up(size, OOB_TAG_OFF);
1526
1527 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1528 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1529 memset(arr, 0, sizeof(arr));
1530
1531 OPTIMIZER_HIDE_VAR(ptr);
1532 OPTIMIZER_HIDE_VAR(size);
1533 KUNIT_EXPECT_KASAN_FAIL(test,
1534 kasan_int_result = memcmp(ptr, arr, size+1));
1535 kfree(ptr);
1536 }
1537
kasan_strings(struct kunit * test)1538 static void kasan_strings(struct kunit *test)
1539 {
1540 char *ptr;
1541 size_t size = 24;
1542
1543 /*
1544 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1545 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1546 */
1547 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1548
1549 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1550 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1551
1552 kfree(ptr);
1553
1554 /*
1555 * Try to cause only 1 invalid access (less spam in dmesg).
1556 * For that we need ptr to point to zeroed byte.
1557 * Skip metadata that could be stored in freed object so ptr
1558 * will likely point to zeroed byte.
1559 */
1560 ptr += 16;
1561 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
1562
1563 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
1564
1565 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
1566
1567 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
1568
1569 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
1570
1571 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
1572 }
1573
kasan_bitops_modify(struct kunit * test,int nr,void * addr)1574 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1575 {
1576 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1577 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1578 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1579 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1580 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1581 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1582 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1583 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1584 }
1585
kasan_bitops_test_and_modify(struct kunit * test,int nr,void * addr)1586 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1587 {
1588 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1589 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1590 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1591 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1592 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1593 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1594 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1595 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1596 if (nr < 7)
1597 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1598 xor_unlock_is_negative_byte(1 << nr, addr));
1599 }
1600
kasan_bitops_generic(struct kunit * test)1601 static void kasan_bitops_generic(struct kunit *test)
1602 {
1603 long *bits;
1604
1605 /* This test is specifically crafted for the generic mode. */
1606 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1607
1608 /*
1609 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1610 * this way we do not actually corrupt other memory.
1611 */
1612 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1613 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1614
1615 /*
1616 * Below calls try to access bit within allocated memory; however, the
1617 * below accesses are still out-of-bounds, since bitops are defined to
1618 * operate on the whole long the bit is in.
1619 */
1620 kasan_bitops_modify(test, BITS_PER_LONG, bits);
1621
1622 /*
1623 * Below calls try to access bit beyond allocated memory.
1624 */
1625 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1626
1627 kfree(bits);
1628 }
1629
kasan_bitops_tags(struct kunit * test)1630 static void kasan_bitops_tags(struct kunit *test)
1631 {
1632 long *bits;
1633
1634 /* This test is specifically crafted for tag-based modes. */
1635 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1636
1637 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1638 bits = kzalloc(48, GFP_KERNEL);
1639 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1640
1641 /* Do the accesses past the 48 allocated bytes, but within the redone. */
1642 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1643 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1644
1645 kfree(bits);
1646 }
1647
vmalloc_helpers_tags(struct kunit * test)1648 static void vmalloc_helpers_tags(struct kunit *test)
1649 {
1650 void *ptr;
1651
1652 /* This test is intended for tag-based modes. */
1653 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1654
1655 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1656
1657 if (!kasan_vmalloc_enabled())
1658 kunit_skip(test, "Test requires kasan.vmalloc=on");
1659
1660 ptr = vmalloc(PAGE_SIZE);
1661 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1662
1663 /* Check that the returned pointer is tagged. */
1664 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1665 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1666
1667 /* Make sure exported vmalloc helpers handle tagged pointers. */
1668 KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1669 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1670
1671 #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1672 {
1673 int rv;
1674
1675 /* Make sure vmalloc'ed memory permissions can be changed. */
1676 rv = set_memory_ro((unsigned long)ptr, 1);
1677 KUNIT_ASSERT_GE(test, rv, 0);
1678 rv = set_memory_rw((unsigned long)ptr, 1);
1679 KUNIT_ASSERT_GE(test, rv, 0);
1680 }
1681 #endif
1682
1683 vfree(ptr);
1684 }
1685
vmalloc_oob(struct kunit * test)1686 static void vmalloc_oob(struct kunit *test)
1687 {
1688 char *v_ptr, *p_ptr;
1689 struct page *page;
1690 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1691
1692 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1693
1694 if (!kasan_vmalloc_enabled())
1695 kunit_skip(test, "Test requires kasan.vmalloc=on");
1696
1697 v_ptr = vmalloc(size);
1698 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1699
1700 OPTIMIZER_HIDE_VAR(v_ptr);
1701
1702 /*
1703 * We have to be careful not to hit the guard page in vmalloc tests.
1704 * The MMU will catch that and crash us.
1705 */
1706
1707 /* Make sure in-bounds accesses are valid. */
1708 v_ptr[0] = 0;
1709 v_ptr[size - 1] = 0;
1710
1711 /*
1712 * An unaligned access past the requested vmalloc size.
1713 * Only generic KASAN can precisely detect these.
1714 */
1715 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1716 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1717
1718 /* An aligned access into the first out-of-bounds granule. */
1719 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1720
1721 /* Check that in-bounds accesses to the physical page are valid. */
1722 page = vmalloc_to_page(v_ptr);
1723 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1724 p_ptr = page_address(page);
1725 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1726 p_ptr[0] = 0;
1727
1728 vfree(v_ptr);
1729
1730 /*
1731 * We can't check for use-after-unmap bugs in this nor in the following
1732 * vmalloc tests, as the page might be fully unmapped and accessing it
1733 * will crash the kernel.
1734 */
1735 }
1736
vmap_tags(struct kunit * test)1737 static void vmap_tags(struct kunit *test)
1738 {
1739 char *p_ptr, *v_ptr;
1740 struct page *p_page, *v_page;
1741
1742 /*
1743 * This test is specifically crafted for the software tag-based mode,
1744 * the only tag-based mode that poisons vmap mappings.
1745 */
1746 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1747
1748 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1749
1750 if (!kasan_vmalloc_enabled())
1751 kunit_skip(test, "Test requires kasan.vmalloc=on");
1752
1753 p_page = alloc_pages(GFP_KERNEL, 1);
1754 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1755 p_ptr = page_address(p_page);
1756 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1757
1758 v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1759 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1760
1761 /*
1762 * We can't check for out-of-bounds bugs in this nor in the following
1763 * vmalloc tests, as allocations have page granularity and accessing
1764 * the guard page will crash the kernel.
1765 */
1766
1767 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1768 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1769
1770 /* Make sure that in-bounds accesses through both pointers work. */
1771 *p_ptr = 0;
1772 *v_ptr = 0;
1773
1774 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1775 v_page = vmalloc_to_page(v_ptr);
1776 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1777 KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1778
1779 vunmap(v_ptr);
1780 free_pages((unsigned long)p_ptr, 1);
1781 }
1782
vm_map_ram_tags(struct kunit * test)1783 static void vm_map_ram_tags(struct kunit *test)
1784 {
1785 char *p_ptr, *v_ptr;
1786 struct page *page;
1787
1788 /*
1789 * This test is specifically crafted for the software tag-based mode,
1790 * the only tag-based mode that poisons vm_map_ram mappings.
1791 */
1792 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1793
1794 page = alloc_pages(GFP_KERNEL, 1);
1795 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1796 p_ptr = page_address(page);
1797 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1798
1799 v_ptr = vm_map_ram(&page, 1, -1);
1800 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1801
1802 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1803 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1804
1805 /* Make sure that in-bounds accesses through both pointers work. */
1806 *p_ptr = 0;
1807 *v_ptr = 0;
1808
1809 vm_unmap_ram(v_ptr, 1);
1810 free_pages((unsigned long)p_ptr, 1);
1811 }
1812
1813 /*
1814 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1815 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1816 * modes.
1817 */
match_all_not_assigned(struct kunit * test)1818 static void match_all_not_assigned(struct kunit *test)
1819 {
1820 char *ptr;
1821 struct page *pages;
1822 int i, size, order;
1823
1824 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1825
1826 for (i = 0; i < 256; i++) {
1827 size = get_random_u32_inclusive(1, 1024);
1828 ptr = kmalloc(size, GFP_KERNEL);
1829 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1830 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1831 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1832 kfree(ptr);
1833 }
1834
1835 for (i = 0; i < 256; i++) {
1836 order = get_random_u32_inclusive(1, 4);
1837 pages = alloc_pages(GFP_KERNEL, order);
1838 ptr = page_address(pages);
1839 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1840 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1841 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1842 free_pages((unsigned long)ptr, order);
1843 }
1844
1845 if (!kasan_vmalloc_enabled())
1846 return;
1847
1848 for (i = 0; i < 256; i++) {
1849 size = get_random_u32_inclusive(1, 1024);
1850 ptr = vmalloc(size);
1851 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1852 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1853 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1854 vfree(ptr);
1855 }
1856 }
1857
1858 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
match_all_ptr_tag(struct kunit * test)1859 static void match_all_ptr_tag(struct kunit *test)
1860 {
1861 char *ptr;
1862 u8 tag;
1863
1864 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1865
1866 ptr = kmalloc(128, GFP_KERNEL);
1867 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1868
1869 /* Backup the assigned tag. */
1870 tag = get_tag(ptr);
1871 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1872
1873 /* Reset the tag to 0xff.*/
1874 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1875
1876 /* This access shouldn't trigger a KASAN report. */
1877 *ptr = 0;
1878
1879 /* Recover the pointer tag and free. */
1880 ptr = set_tag(ptr, tag);
1881 kfree(ptr);
1882 }
1883
1884 /* Check that there are no match-all memory tags for tag-based modes. */
match_all_mem_tag(struct kunit * test)1885 static void match_all_mem_tag(struct kunit *test)
1886 {
1887 char *ptr;
1888 int tag;
1889
1890 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1891
1892 ptr = kmalloc(128, GFP_KERNEL);
1893 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1894 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1895
1896 /* For each possible tag value not matching the pointer tag. */
1897 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1898 /*
1899 * For Software Tag-Based KASAN, skip the majority of tag
1900 * values to avoid the test printing too many reports.
1901 */
1902 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
1903 tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
1904 continue;
1905
1906 if (tag == get_tag(ptr))
1907 continue;
1908
1909 /* Mark the first memory granule with the chosen memory tag. */
1910 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1911
1912 /* This access must cause a KASAN report. */
1913 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1914 }
1915
1916 /* Recover the memory tag and free. */
1917 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1918 kfree(ptr);
1919 }
1920
1921 /*
1922 * Check that Rust performing a use-after-free using `unsafe` is detected.
1923 * This is a smoke test to make sure that Rust is being sanitized properly.
1924 */
rust_uaf(struct kunit * test)1925 static void rust_uaf(struct kunit *test)
1926 {
1927 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST);
1928 KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
1929 }
1930
1931 static struct kunit_case kasan_kunit_test_cases[] = {
1932 KUNIT_CASE(kmalloc_oob_right),
1933 KUNIT_CASE(kmalloc_oob_left),
1934 KUNIT_CASE(kmalloc_node_oob_right),
1935 KUNIT_CASE(kmalloc_big_oob_right),
1936 KUNIT_CASE(kmalloc_large_oob_right),
1937 KUNIT_CASE(kmalloc_large_uaf),
1938 KUNIT_CASE(kmalloc_large_invalid_free),
1939 KUNIT_CASE(page_alloc_oob_right),
1940 KUNIT_CASE(page_alloc_uaf),
1941 KUNIT_CASE(krealloc_more_oob),
1942 KUNIT_CASE(krealloc_less_oob),
1943 KUNIT_CASE(krealloc_large_more_oob),
1944 KUNIT_CASE(krealloc_large_less_oob),
1945 KUNIT_CASE(krealloc_uaf),
1946 KUNIT_CASE(kmalloc_oob_16),
1947 KUNIT_CASE(kmalloc_uaf_16),
1948 KUNIT_CASE(kmalloc_oob_in_memset),
1949 KUNIT_CASE(kmalloc_oob_memset_2),
1950 KUNIT_CASE(kmalloc_oob_memset_4),
1951 KUNIT_CASE(kmalloc_oob_memset_8),
1952 KUNIT_CASE(kmalloc_oob_memset_16),
1953 KUNIT_CASE(kmalloc_memmove_negative_size),
1954 KUNIT_CASE(kmalloc_memmove_invalid_size),
1955 KUNIT_CASE(kmalloc_uaf),
1956 KUNIT_CASE(kmalloc_uaf_memset),
1957 KUNIT_CASE(kmalloc_uaf2),
1958 KUNIT_CASE(kmalloc_uaf3),
1959 KUNIT_CASE(kmalloc_double_kzfree),
1960 KUNIT_CASE(ksize_unpoisons_memory),
1961 KUNIT_CASE(ksize_uaf),
1962 KUNIT_CASE(rcu_uaf),
1963 KUNIT_CASE(workqueue_uaf),
1964 KUNIT_CASE(kfree_via_page),
1965 KUNIT_CASE(kfree_via_phys),
1966 KUNIT_CASE(kmem_cache_oob),
1967 KUNIT_CASE(kmem_cache_double_free),
1968 KUNIT_CASE(kmem_cache_invalid_free),
1969 KUNIT_CASE(kmem_cache_rcu_uaf),
1970 KUNIT_CASE(kmem_cache_double_destroy),
1971 KUNIT_CASE(kmem_cache_accounted),
1972 KUNIT_CASE(kmem_cache_bulk),
1973 KUNIT_CASE(mempool_kmalloc_oob_right),
1974 KUNIT_CASE(mempool_kmalloc_large_oob_right),
1975 KUNIT_CASE(mempool_slab_oob_right),
1976 KUNIT_CASE(mempool_kmalloc_uaf),
1977 KUNIT_CASE(mempool_kmalloc_large_uaf),
1978 KUNIT_CASE(mempool_slab_uaf),
1979 KUNIT_CASE(mempool_page_alloc_uaf),
1980 KUNIT_CASE(mempool_kmalloc_double_free),
1981 KUNIT_CASE(mempool_kmalloc_large_double_free),
1982 KUNIT_CASE(mempool_page_alloc_double_free),
1983 KUNIT_CASE(mempool_kmalloc_invalid_free),
1984 KUNIT_CASE(mempool_kmalloc_large_invalid_free),
1985 KUNIT_CASE(kasan_global_oob_right),
1986 KUNIT_CASE(kasan_global_oob_left),
1987 KUNIT_CASE(kasan_stack_oob),
1988 KUNIT_CASE(kasan_alloca_oob_left),
1989 KUNIT_CASE(kasan_alloca_oob_right),
1990 KUNIT_CASE(kasan_memchr),
1991 KUNIT_CASE(kasan_memcmp),
1992 KUNIT_CASE(kasan_strings),
1993 KUNIT_CASE(kasan_bitops_generic),
1994 KUNIT_CASE(kasan_bitops_tags),
1995 KUNIT_CASE(kasan_atomics),
1996 KUNIT_CASE(vmalloc_helpers_tags),
1997 KUNIT_CASE(vmalloc_oob),
1998 KUNIT_CASE(vmap_tags),
1999 KUNIT_CASE(vm_map_ram_tags),
2000 KUNIT_CASE(match_all_not_assigned),
2001 KUNIT_CASE(match_all_ptr_tag),
2002 KUNIT_CASE(match_all_mem_tag),
2003 KUNIT_CASE(rust_uaf),
2004 {}
2005 };
2006
2007 static struct kunit_suite kasan_kunit_test_suite = {
2008 .name = "kasan",
2009 .test_cases = kasan_kunit_test_cases,
2010 .exit = kasan_test_exit,
2011 .suite_init = kasan_suite_init,
2012 .suite_exit = kasan_suite_exit,
2013 };
2014
2015 kunit_test_suite(kasan_kunit_test_suite);
2016
2017 MODULE_LICENSE("GPL");
2018