1f7e01ab8SAndrey Konovalov // SPDX-License-Identifier: GPL-2.0-only
2f7e01ab8SAndrey Konovalov /*
3f7e01ab8SAndrey Konovalov *
4f7e01ab8SAndrey Konovalov * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5f7e01ab8SAndrey Konovalov * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6f7e01ab8SAndrey Konovalov */
7f7e01ab8SAndrey Konovalov
8d7196d87SAndrey Konovalov #define pr_fmt(fmt) "kasan: test: " fmt
97ce0ea19SAndrey Konovalov
107ce0ea19SAndrey Konovalov #include <kunit/test.h>
11f7e01ab8SAndrey Konovalov #include <linux/bitops.h>
12f7e01ab8SAndrey Konovalov #include <linux/delay.h>
137ce0ea19SAndrey Konovalov #include <linux/io.h>
14f7e01ab8SAndrey Konovalov #include <linux/kasan.h>
15f7e01ab8SAndrey Konovalov #include <linux/kernel.h>
160f199eb4SAndrey Konovalov #include <linux/mempool.h>
17f7e01ab8SAndrey Konovalov #include <linux/mm.h>
18f7e01ab8SAndrey Konovalov #include <linux/mman.h>
19f7e01ab8SAndrey Konovalov #include <linux/module.h>
20f7e01ab8SAndrey Konovalov #include <linux/printk.h>
21f7e01ab8SAndrey Konovalov #include <linux/random.h>
227ce0ea19SAndrey Konovalov #include <linux/set_memory.h>
23f7e01ab8SAndrey Konovalov #include <linux/slab.h>
24f7e01ab8SAndrey Konovalov #include <linux/string.h>
257ce0ea19SAndrey Konovalov #include <linux/tracepoint.h>
26f7e01ab8SAndrey Konovalov #include <linux/uaccess.h>
27f7e01ab8SAndrey Konovalov #include <linux/vmalloc.h>
287ce0ea19SAndrey Konovalov #include <trace/events/printk.h>
29f7e01ab8SAndrey Konovalov
30f7e01ab8SAndrey Konovalov #include <asm/page.h>
31f7e01ab8SAndrey Konovalov
32f7e01ab8SAndrey Konovalov #include "kasan.h"
33f7e01ab8SAndrey Konovalov
34f7e01ab8SAndrey Konovalov #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
35f7e01ab8SAndrey Konovalov
367ce0ea19SAndrey Konovalov static bool multishot;
377ce0ea19SAndrey Konovalov
387ce0ea19SAndrey Konovalov /* Fields set based on lines observed in the console. */
397ce0ea19SAndrey Konovalov static struct {
407ce0ea19SAndrey Konovalov bool report_found;
417ce0ea19SAndrey Konovalov bool async_fault;
427ce0ea19SAndrey Konovalov } test_status;
437ce0ea19SAndrey Konovalov
44f7e01ab8SAndrey Konovalov /*
45f7e01ab8SAndrey Konovalov * Some tests use these global variables to store return values from function
46f7e01ab8SAndrey Konovalov * calls that could otherwise be eliminated by the compiler as dead code.
47f7e01ab8SAndrey Konovalov */
48f7e01ab8SAndrey Konovalov void *kasan_ptr_result;
49f7e01ab8SAndrey Konovalov int kasan_int_result;
50f7e01ab8SAndrey Konovalov
517ce0ea19SAndrey Konovalov /* Probe for console output: obtains test_status lines of interest. */
probe_console(void * ignore,const char * buf,size_t len)527ce0ea19SAndrey Konovalov static void probe_console(void *ignore, const char *buf, size_t len)
537ce0ea19SAndrey Konovalov {
547ce0ea19SAndrey Konovalov if (strnstr(buf, "BUG: KASAN: ", len))
557ce0ea19SAndrey Konovalov WRITE_ONCE(test_status.report_found, true);
567ce0ea19SAndrey Konovalov else if (strnstr(buf, "Asynchronous fault: ", len))
577ce0ea19SAndrey Konovalov WRITE_ONCE(test_status.async_fault, true);
587ce0ea19SAndrey Konovalov }
59f7e01ab8SAndrey Konovalov
kasan_suite_init(struct kunit_suite * suite)607ce0ea19SAndrey Konovalov static int kasan_suite_init(struct kunit_suite *suite)
61f7e01ab8SAndrey Konovalov {
62f7e01ab8SAndrey Konovalov if (!kasan_enabled()) {
637ce0ea19SAndrey Konovalov pr_err("Can't run KASAN tests with KASAN disabled");
64f7e01ab8SAndrey Konovalov return -1;
65f7e01ab8SAndrey Konovalov }
66f7e01ab8SAndrey Konovalov
67c8c7016fSAndrey Konovalov /* Stop failing KUnit tests on KASAN reports. */
68c8c7016fSAndrey Konovalov kasan_kunit_test_suite_start();
69c8c7016fSAndrey Konovalov
707ce0ea19SAndrey Konovalov /*
717ce0ea19SAndrey Konovalov * Temporarily enable multi-shot mode. Otherwise, KASAN would only
727ce0ea19SAndrey Konovalov * report the first detected bug and panic the kernel if panic_on_warn
737ce0ea19SAndrey Konovalov * is enabled.
747ce0ea19SAndrey Konovalov */
75f7e01ab8SAndrey Konovalov multishot = kasan_save_enable_multi_shot();
767ce0ea19SAndrey Konovalov
771f6ab566SPavankumar Kondeti register_trace_console(probe_console, NULL);
78f7e01ab8SAndrey Konovalov return 0;
79f7e01ab8SAndrey Konovalov }
80f7e01ab8SAndrey Konovalov
kasan_suite_exit(struct kunit_suite * suite)817ce0ea19SAndrey Konovalov static void kasan_suite_exit(struct kunit_suite *suite)
827ce0ea19SAndrey Konovalov {
83c8c7016fSAndrey Konovalov kasan_kunit_test_suite_end();
847ce0ea19SAndrey Konovalov kasan_restore_multi_shot(multishot);
851f6ab566SPavankumar Kondeti unregister_trace_console(probe_console, NULL);
867ce0ea19SAndrey Konovalov tracepoint_synchronize_unregister();
877ce0ea19SAndrey Konovalov }
887ce0ea19SAndrey Konovalov
kasan_test_exit(struct kunit * test)89f7e01ab8SAndrey Konovalov static void kasan_test_exit(struct kunit *test)
90f7e01ab8SAndrey Konovalov {
917ce0ea19SAndrey Konovalov KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
92f7e01ab8SAndrey Konovalov }
93f7e01ab8SAndrey Konovalov
94f7e01ab8SAndrey Konovalov /**
95ff093a96SAndrey Konovalov * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
96ff093a96SAndrey Konovalov * KASAN report; causes a KUnit test failure otherwise.
97ff093a96SAndrey Konovalov *
98ff093a96SAndrey Konovalov * @test: Currently executing KUnit test.
99ff093a96SAndrey Konovalov * @expression: Expression that must produce a KASAN report.
100f7e01ab8SAndrey Konovalov *
101f7e01ab8SAndrey Konovalov * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
102f7e01ab8SAndrey Konovalov * checking is auto-disabled. When this happens, this test handler reenables
103f7e01ab8SAndrey Konovalov * tag checking. As tag checking can be only disabled or enabled per CPU,
104f7e01ab8SAndrey Konovalov * this handler disables migration (preemption).
105f7e01ab8SAndrey Konovalov *
106f7e01ab8SAndrey Konovalov * Since the compiler doesn't see that the expression can change the test_status
107f7e01ab8SAndrey Konovalov * fields, it can reorder or optimize away the accesses to those fields.
108f7e01ab8SAndrey Konovalov * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
109f7e01ab8SAndrey Konovalov * expression to prevent that.
110f7e01ab8SAndrey Konovalov *
111f7e01ab8SAndrey Konovalov * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
112f7e01ab8SAndrey Konovalov * as false. This allows detecting KASAN reports that happen outside of the
113f7e01ab8SAndrey Konovalov * checks by asserting !test_status.report_found at the start of
114f7e01ab8SAndrey Konovalov * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
115f7e01ab8SAndrey Konovalov */
116f7e01ab8SAndrey Konovalov #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
117f7e01ab8SAndrey Konovalov if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
118f7e01ab8SAndrey Konovalov kasan_sync_fault_possible()) \
119f7e01ab8SAndrey Konovalov migrate_disable(); \
120f7e01ab8SAndrey Konovalov KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
121f7e01ab8SAndrey Konovalov barrier(); \
122f7e01ab8SAndrey Konovalov expression; \
123f7e01ab8SAndrey Konovalov barrier(); \
124f7e01ab8SAndrey Konovalov if (kasan_async_fault_possible()) \
125f7e01ab8SAndrey Konovalov kasan_force_async_fault(); \
126f7e01ab8SAndrey Konovalov if (!READ_ONCE(test_status.report_found)) { \
127f7e01ab8SAndrey Konovalov KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
128f7e01ab8SAndrey Konovalov "expected in \"" #expression \
129f7e01ab8SAndrey Konovalov "\", but none occurred"); \
130f7e01ab8SAndrey Konovalov } \
131f7e01ab8SAndrey Konovalov if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
132f7e01ab8SAndrey Konovalov kasan_sync_fault_possible()) { \
133f7e01ab8SAndrey Konovalov if (READ_ONCE(test_status.report_found) && \
1347ce0ea19SAndrey Konovalov !READ_ONCE(test_status.async_fault)) \
1350eafff1cSAndrey Konovalov kasan_enable_hw_tags(); \
136f7e01ab8SAndrey Konovalov migrate_enable(); \
137f7e01ab8SAndrey Konovalov } \
138f7e01ab8SAndrey Konovalov WRITE_ONCE(test_status.report_found, false); \
1397ce0ea19SAndrey Konovalov WRITE_ONCE(test_status.async_fault, false); \
140f7e01ab8SAndrey Konovalov } while (0)
141f7e01ab8SAndrey Konovalov
142f7e01ab8SAndrey Konovalov #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
143f7e01ab8SAndrey Konovalov if (!IS_ENABLED(config)) \
144f7e01ab8SAndrey Konovalov kunit_skip((test), "Test requires " #config "=y"); \
145f7e01ab8SAndrey Konovalov } while (0)
146f7e01ab8SAndrey Konovalov
147f7e01ab8SAndrey Konovalov #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
148f7e01ab8SAndrey Konovalov if (IS_ENABLED(config)) \
149f7e01ab8SAndrey Konovalov kunit_skip((test), "Test requires " #config "=n"); \
150f7e01ab8SAndrey Konovalov } while (0)
151f7e01ab8SAndrey Konovalov
15285f195b1SMarco Elver #define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do { \
15385f195b1SMarco Elver if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
15485f195b1SMarco Elver break; /* No compiler instrumentation. */ \
15585f195b1SMarco Elver if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)) \
15685f195b1SMarco Elver break; /* Should always be instrumented! */ \
15785f195b1SMarco Elver if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) \
15885f195b1SMarco Elver kunit_skip((test), "Test requires checked mem*()"); \
15985f195b1SMarco Elver } while (0)
16085f195b1SMarco Elver
kmalloc_oob_right(struct kunit * test)161f7e01ab8SAndrey Konovalov static void kmalloc_oob_right(struct kunit *test)
162f7e01ab8SAndrey Konovalov {
163f7e01ab8SAndrey Konovalov char *ptr;
164f7e01ab8SAndrey Konovalov size_t size = 128 - KASAN_GRANULE_SIZE - 5;
165f7e01ab8SAndrey Konovalov
166f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
167f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
168f7e01ab8SAndrey Konovalov
169f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
170f7e01ab8SAndrey Konovalov /*
171f7e01ab8SAndrey Konovalov * An unaligned access past the requested kmalloc size.
172f7e01ab8SAndrey Konovalov * Only generic KASAN can precisely detect these.
173f7e01ab8SAndrey Konovalov */
174f7e01ab8SAndrey Konovalov if (IS_ENABLED(CONFIG_KASAN_GENERIC))
175f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
176f7e01ab8SAndrey Konovalov
177f7e01ab8SAndrey Konovalov /*
178f7e01ab8SAndrey Konovalov * An aligned access into the first out-of-bounds granule that falls
179f7e01ab8SAndrey Konovalov * within the aligned kmalloc object.
180f7e01ab8SAndrey Konovalov */
181f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
182f7e01ab8SAndrey Konovalov
183f7e01ab8SAndrey Konovalov /* Out-of-bounds access past the aligned kmalloc object. */
184f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
185f7e01ab8SAndrey Konovalov ptr[size + KASAN_GRANULE_SIZE + 5]);
186f7e01ab8SAndrey Konovalov
187f7e01ab8SAndrey Konovalov kfree(ptr);
188f7e01ab8SAndrey Konovalov }
189f7e01ab8SAndrey Konovalov
kmalloc_oob_left(struct kunit * test)190f7e01ab8SAndrey Konovalov static void kmalloc_oob_left(struct kunit *test)
191f7e01ab8SAndrey Konovalov {
192f7e01ab8SAndrey Konovalov char *ptr;
193f7e01ab8SAndrey Konovalov size_t size = 15;
194f7e01ab8SAndrey Konovalov
195f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
196f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
197f7e01ab8SAndrey Konovalov
198f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
199f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
200f7e01ab8SAndrey Konovalov kfree(ptr);
201f7e01ab8SAndrey Konovalov }
202f7e01ab8SAndrey Konovalov
kmalloc_node_oob_right(struct kunit * test)203f7e01ab8SAndrey Konovalov static void kmalloc_node_oob_right(struct kunit *test)
204f7e01ab8SAndrey Konovalov {
205f7e01ab8SAndrey Konovalov char *ptr;
206f7e01ab8SAndrey Konovalov size_t size = 4096;
207f7e01ab8SAndrey Konovalov
208f7e01ab8SAndrey Konovalov ptr = kmalloc_node(size, GFP_KERNEL, 0);
209f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
210f7e01ab8SAndrey Konovalov
211f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
212f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
213f7e01ab8SAndrey Konovalov kfree(ptr);
214f7e01ab8SAndrey Konovalov }
215f7e01ab8SAndrey Konovalov
216f7e01ab8SAndrey Konovalov /*
21786b15969SAndrey Konovalov * Check that KASAN detects an out-of-bounds access for a big object allocated
2183ab9304dSAndrey Konovalov * via kmalloc(). But not as big as to trigger the page_alloc fallback.
21986b15969SAndrey Konovalov */
kmalloc_big_oob_right(struct kunit * test)22086b15969SAndrey Konovalov static void kmalloc_big_oob_right(struct kunit *test)
22186b15969SAndrey Konovalov {
22286b15969SAndrey Konovalov char *ptr;
22386b15969SAndrey Konovalov size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
22486b15969SAndrey Konovalov
22586b15969SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
22686b15969SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
22786b15969SAndrey Konovalov
22886b15969SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
22986b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
23086b15969SAndrey Konovalov kfree(ptr);
23186b15969SAndrey Konovalov }
23286b15969SAndrey Konovalov
23386b15969SAndrey Konovalov /*
2340f18ea6eSAndrey Konovalov * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
2350f18ea6eSAndrey Konovalov * that does not fit into the largest slab cache and therefore is allocated via
2363ab9304dSAndrey Konovalov * the page_alloc fallback.
237f7e01ab8SAndrey Konovalov */
2380f18ea6eSAndrey Konovalov
kmalloc_large_oob_right(struct kunit * test)2390f18ea6eSAndrey Konovalov static void kmalloc_large_oob_right(struct kunit *test)
240f7e01ab8SAndrey Konovalov {
241f7e01ab8SAndrey Konovalov char *ptr;
242f7e01ab8SAndrey Konovalov size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
243f7e01ab8SAndrey Konovalov
244f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
245f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
246f7e01ab8SAndrey Konovalov
247f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
248f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
249f7e01ab8SAndrey Konovalov
250f7e01ab8SAndrey Konovalov kfree(ptr);
251f7e01ab8SAndrey Konovalov }
252f7e01ab8SAndrey Konovalov
kmalloc_large_uaf(struct kunit * test)2530f18ea6eSAndrey Konovalov static void kmalloc_large_uaf(struct kunit *test)
254f7e01ab8SAndrey Konovalov {
255f7e01ab8SAndrey Konovalov char *ptr;
256f7e01ab8SAndrey Konovalov size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
257f7e01ab8SAndrey Konovalov
258f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
259f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
260f7e01ab8SAndrey Konovalov kfree(ptr);
261f7e01ab8SAndrey Konovalov
262f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
263f7e01ab8SAndrey Konovalov }
264f7e01ab8SAndrey Konovalov
kmalloc_large_invalid_free(struct kunit * test)2650f18ea6eSAndrey Konovalov static void kmalloc_large_invalid_free(struct kunit *test)
266f7e01ab8SAndrey Konovalov {
267f7e01ab8SAndrey Konovalov char *ptr;
268f7e01ab8SAndrey Konovalov size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
269f7e01ab8SAndrey Konovalov
270f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
271f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
272f7e01ab8SAndrey Konovalov
273f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
274f7e01ab8SAndrey Konovalov }
275f7e01ab8SAndrey Konovalov
page_alloc_oob_right(struct kunit * test)2760f18ea6eSAndrey Konovalov static void page_alloc_oob_right(struct kunit *test)
277f7e01ab8SAndrey Konovalov {
278f7e01ab8SAndrey Konovalov char *ptr;
279f7e01ab8SAndrey Konovalov struct page *pages;
280f7e01ab8SAndrey Konovalov size_t order = 4;
281f7e01ab8SAndrey Konovalov size_t size = (1UL << (PAGE_SHIFT + order));
282f7e01ab8SAndrey Konovalov
283f7e01ab8SAndrey Konovalov /*
284f7e01ab8SAndrey Konovalov * With generic KASAN page allocations have no redzones, thus
285f7e01ab8SAndrey Konovalov * out-of-bounds detection is not guaranteed.
286f7e01ab8SAndrey Konovalov * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
287f7e01ab8SAndrey Konovalov */
288f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
289f7e01ab8SAndrey Konovalov
290f7e01ab8SAndrey Konovalov pages = alloc_pages(GFP_KERNEL, order);
291f7e01ab8SAndrey Konovalov ptr = page_address(pages);
292f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
293f7e01ab8SAndrey Konovalov
294f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
295f7e01ab8SAndrey Konovalov free_pages((unsigned long)ptr, order);
296f7e01ab8SAndrey Konovalov }
297f7e01ab8SAndrey Konovalov
page_alloc_uaf(struct kunit * test)2980f18ea6eSAndrey Konovalov static void page_alloc_uaf(struct kunit *test)
299f7e01ab8SAndrey Konovalov {
300f7e01ab8SAndrey Konovalov char *ptr;
301f7e01ab8SAndrey Konovalov struct page *pages;
302f7e01ab8SAndrey Konovalov size_t order = 4;
303f7e01ab8SAndrey Konovalov
304f7e01ab8SAndrey Konovalov pages = alloc_pages(GFP_KERNEL, order);
305f7e01ab8SAndrey Konovalov ptr = page_address(pages);
306f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
307f7e01ab8SAndrey Konovalov free_pages((unsigned long)ptr, order);
308f7e01ab8SAndrey Konovalov
309f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
310f7e01ab8SAndrey Konovalov }
311f7e01ab8SAndrey Konovalov
krealloc_more_oob_helper(struct kunit * test,size_t size1,size_t size2)312f7e01ab8SAndrey Konovalov static void krealloc_more_oob_helper(struct kunit *test,
313f7e01ab8SAndrey Konovalov size_t size1, size_t size2)
314f7e01ab8SAndrey Konovalov {
315f7e01ab8SAndrey Konovalov char *ptr1, *ptr2;
316f7e01ab8SAndrey Konovalov size_t middle;
317f7e01ab8SAndrey Konovalov
318f7e01ab8SAndrey Konovalov KUNIT_ASSERT_LT(test, size1, size2);
319f7e01ab8SAndrey Konovalov middle = size1 + (size2 - size1) / 2;
320f7e01ab8SAndrey Konovalov
321f7e01ab8SAndrey Konovalov ptr1 = kmalloc(size1, GFP_KERNEL);
322f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
323f7e01ab8SAndrey Konovalov
324f7e01ab8SAndrey Konovalov ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
325f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
326f7e01ab8SAndrey Konovalov
327d6e5040bSAndrey Konovalov /* Suppress -Warray-bounds warnings. */
328d6e5040bSAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr2);
329d6e5040bSAndrey Konovalov
330f7e01ab8SAndrey Konovalov /* All offsets up to size2 must be accessible. */
331f7e01ab8SAndrey Konovalov ptr2[size1 - 1] = 'x';
332f7e01ab8SAndrey Konovalov ptr2[size1] = 'x';
333f7e01ab8SAndrey Konovalov ptr2[middle] = 'x';
334f7e01ab8SAndrey Konovalov ptr2[size2 - 1] = 'x';
335f7e01ab8SAndrey Konovalov
336f7e01ab8SAndrey Konovalov /* Generic mode is precise, so unaligned size2 must be inaccessible. */
337f7e01ab8SAndrey Konovalov if (IS_ENABLED(CONFIG_KASAN_GENERIC))
338f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
339f7e01ab8SAndrey Konovalov
340f7e01ab8SAndrey Konovalov /* For all modes first aligned offset after size2 must be inaccessible. */
341f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test,
342f7e01ab8SAndrey Konovalov ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
343f7e01ab8SAndrey Konovalov
344f7e01ab8SAndrey Konovalov kfree(ptr2);
345f7e01ab8SAndrey Konovalov }
346f7e01ab8SAndrey Konovalov
krealloc_less_oob_helper(struct kunit * test,size_t size1,size_t size2)347f7e01ab8SAndrey Konovalov static void krealloc_less_oob_helper(struct kunit *test,
348f7e01ab8SAndrey Konovalov size_t size1, size_t size2)
349f7e01ab8SAndrey Konovalov {
350f7e01ab8SAndrey Konovalov char *ptr1, *ptr2;
351f7e01ab8SAndrey Konovalov size_t middle;
352f7e01ab8SAndrey Konovalov
353f7e01ab8SAndrey Konovalov KUNIT_ASSERT_LT(test, size2, size1);
354f7e01ab8SAndrey Konovalov middle = size2 + (size1 - size2) / 2;
355f7e01ab8SAndrey Konovalov
356f7e01ab8SAndrey Konovalov ptr1 = kmalloc(size1, GFP_KERNEL);
357f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
358f7e01ab8SAndrey Konovalov
359f7e01ab8SAndrey Konovalov ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
360f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
361f7e01ab8SAndrey Konovalov
362d6e5040bSAndrey Konovalov /* Suppress -Warray-bounds warnings. */
363d6e5040bSAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr2);
364d6e5040bSAndrey Konovalov
365f7e01ab8SAndrey Konovalov /* Must be accessible for all modes. */
366f7e01ab8SAndrey Konovalov ptr2[size2 - 1] = 'x';
367f7e01ab8SAndrey Konovalov
368f7e01ab8SAndrey Konovalov /* Generic mode is precise, so unaligned size2 must be inaccessible. */
369f7e01ab8SAndrey Konovalov if (IS_ENABLED(CONFIG_KASAN_GENERIC))
370f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
371f7e01ab8SAndrey Konovalov
372f7e01ab8SAndrey Konovalov /* For all modes first aligned offset after size2 must be inaccessible. */
373f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test,
374f7e01ab8SAndrey Konovalov ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
375f7e01ab8SAndrey Konovalov
376f7e01ab8SAndrey Konovalov /*
377f7e01ab8SAndrey Konovalov * For all modes all size2, middle, and size1 should land in separate
378f7e01ab8SAndrey Konovalov * granules and thus the latter two offsets should be inaccessible.
379f7e01ab8SAndrey Konovalov */
380f7e01ab8SAndrey Konovalov KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
381f7e01ab8SAndrey Konovalov round_down(middle, KASAN_GRANULE_SIZE));
382f7e01ab8SAndrey Konovalov KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
383f7e01ab8SAndrey Konovalov round_down(size1, KASAN_GRANULE_SIZE));
384f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
385f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
386f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
387f7e01ab8SAndrey Konovalov
388f7e01ab8SAndrey Konovalov kfree(ptr2);
389f7e01ab8SAndrey Konovalov }
390f7e01ab8SAndrey Konovalov
krealloc_more_oob(struct kunit * test)391f7e01ab8SAndrey Konovalov static void krealloc_more_oob(struct kunit *test)
392f7e01ab8SAndrey Konovalov {
393f7e01ab8SAndrey Konovalov krealloc_more_oob_helper(test, 201, 235);
394f7e01ab8SAndrey Konovalov }
395f7e01ab8SAndrey Konovalov
krealloc_less_oob(struct kunit * test)396f7e01ab8SAndrey Konovalov static void krealloc_less_oob(struct kunit *test)
397f7e01ab8SAndrey Konovalov {
398f7e01ab8SAndrey Konovalov krealloc_less_oob_helper(test, 235, 201);
399f7e01ab8SAndrey Konovalov }
400f7e01ab8SAndrey Konovalov
krealloc_large_more_oob(struct kunit * test)4010f18ea6eSAndrey Konovalov static void krealloc_large_more_oob(struct kunit *test)
402f7e01ab8SAndrey Konovalov {
403f7e01ab8SAndrey Konovalov krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
404f7e01ab8SAndrey Konovalov KMALLOC_MAX_CACHE_SIZE + 235);
405f7e01ab8SAndrey Konovalov }
406f7e01ab8SAndrey Konovalov
krealloc_large_less_oob(struct kunit * test)4070f18ea6eSAndrey Konovalov static void krealloc_large_less_oob(struct kunit *test)
408f7e01ab8SAndrey Konovalov {
409f7e01ab8SAndrey Konovalov krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
410f7e01ab8SAndrey Konovalov KMALLOC_MAX_CACHE_SIZE + 201);
411f7e01ab8SAndrey Konovalov }
412f7e01ab8SAndrey Konovalov
413f7e01ab8SAndrey Konovalov /*
414f7e01ab8SAndrey Konovalov * Check that krealloc() detects a use-after-free, returns NULL,
415f7e01ab8SAndrey Konovalov * and doesn't unpoison the freed object.
416f7e01ab8SAndrey Konovalov */
krealloc_uaf(struct kunit * test)417f7e01ab8SAndrey Konovalov static void krealloc_uaf(struct kunit *test)
418f7e01ab8SAndrey Konovalov {
419f7e01ab8SAndrey Konovalov char *ptr1, *ptr2;
420f7e01ab8SAndrey Konovalov int size1 = 201;
421f7e01ab8SAndrey Konovalov int size2 = 235;
422f7e01ab8SAndrey Konovalov
423f7e01ab8SAndrey Konovalov ptr1 = kmalloc(size1, GFP_KERNEL);
424f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
425f7e01ab8SAndrey Konovalov kfree(ptr1);
426f7e01ab8SAndrey Konovalov
427f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
428f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NULL(test, ptr2);
429f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
430f7e01ab8SAndrey Konovalov }
431f7e01ab8SAndrey Konovalov
kmalloc_oob_16(struct kunit * test)432f7e01ab8SAndrey Konovalov static void kmalloc_oob_16(struct kunit *test)
433f7e01ab8SAndrey Konovalov {
434f7e01ab8SAndrey Konovalov struct {
435f7e01ab8SAndrey Konovalov u64 words[2];
436f7e01ab8SAndrey Konovalov } *ptr1, *ptr2;
437f7e01ab8SAndrey Konovalov
43885f195b1SMarco Elver KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
43985f195b1SMarco Elver
440f7e01ab8SAndrey Konovalov /* This test is specifically crafted for the generic mode. */
441f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
442f7e01ab8SAndrey Konovalov
443e10aea10SArnd Bergmann /* RELOC_HIDE to prevent gcc from warning about short alloc */
444e10aea10SArnd Bergmann ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
445f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
446f7e01ab8SAndrey Konovalov
447f7e01ab8SAndrey Konovalov ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
448f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
449f7e01ab8SAndrey Konovalov
450f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr1);
451f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr2);
452f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
453f7e01ab8SAndrey Konovalov kfree(ptr1);
454f7e01ab8SAndrey Konovalov kfree(ptr2);
455f7e01ab8SAndrey Konovalov }
456f7e01ab8SAndrey Konovalov
kmalloc_uaf_16(struct kunit * test)457f7e01ab8SAndrey Konovalov static void kmalloc_uaf_16(struct kunit *test)
458f7e01ab8SAndrey Konovalov {
459f7e01ab8SAndrey Konovalov struct {
460f7e01ab8SAndrey Konovalov u64 words[2];
461f7e01ab8SAndrey Konovalov } *ptr1, *ptr2;
462f7e01ab8SAndrey Konovalov
46385f195b1SMarco Elver KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
46485f195b1SMarco Elver
465f7e01ab8SAndrey Konovalov ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
466f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
467f7e01ab8SAndrey Konovalov
468f7e01ab8SAndrey Konovalov ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
469f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
470f7e01ab8SAndrey Konovalov kfree(ptr2);
471f7e01ab8SAndrey Konovalov
472f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
473f7e01ab8SAndrey Konovalov kfree(ptr1);
474f7e01ab8SAndrey Konovalov }
475f7e01ab8SAndrey Konovalov
476f7e01ab8SAndrey Konovalov /*
477f7e01ab8SAndrey Konovalov * Note: in the memset tests below, the written range touches both valid and
478f7e01ab8SAndrey Konovalov * invalid memory. This makes sure that the instrumentation does not only check
479f7e01ab8SAndrey Konovalov * the starting address but the whole range.
480f7e01ab8SAndrey Konovalov */
481f7e01ab8SAndrey Konovalov
kmalloc_oob_memset_2(struct kunit * test)482f7e01ab8SAndrey Konovalov static void kmalloc_oob_memset_2(struct kunit *test)
483f7e01ab8SAndrey Konovalov {
484f7e01ab8SAndrey Konovalov char *ptr;
485f7e01ab8SAndrey Konovalov size_t size = 128 - KASAN_GRANULE_SIZE;
486b2325bf8SNico Pache size_t memset_size = 2;
487f7e01ab8SAndrey Konovalov
48885f195b1SMarco Elver KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
48985f195b1SMarco Elver
490f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
491f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
492f7e01ab8SAndrey Konovalov
493b2325bf8SNico Pache OPTIMIZER_HIDE_VAR(ptr);
494f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(size);
495b2325bf8SNico Pache OPTIMIZER_HIDE_VAR(memset_size);
496b2325bf8SNico Pache KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size));
497f7e01ab8SAndrey Konovalov kfree(ptr);
498f7e01ab8SAndrey Konovalov }
499f7e01ab8SAndrey Konovalov
kmalloc_oob_memset_4(struct kunit * test)500f7e01ab8SAndrey Konovalov static void kmalloc_oob_memset_4(struct kunit *test)
501f7e01ab8SAndrey Konovalov {
502f7e01ab8SAndrey Konovalov char *ptr;
503f7e01ab8SAndrey Konovalov size_t size = 128 - KASAN_GRANULE_SIZE;
504b2325bf8SNico Pache size_t memset_size = 4;
505f7e01ab8SAndrey Konovalov
50685f195b1SMarco Elver KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
50785f195b1SMarco Elver
508f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
509f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
510f7e01ab8SAndrey Konovalov
511b2325bf8SNico Pache OPTIMIZER_HIDE_VAR(ptr);
512f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(size);
513b2325bf8SNico Pache OPTIMIZER_HIDE_VAR(memset_size);
514b2325bf8SNico Pache KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size));
515f7e01ab8SAndrey Konovalov kfree(ptr);
516f7e01ab8SAndrey Konovalov }
517f7e01ab8SAndrey Konovalov
kmalloc_oob_memset_8(struct kunit * test)518f7e01ab8SAndrey Konovalov static void kmalloc_oob_memset_8(struct kunit *test)
519f7e01ab8SAndrey Konovalov {
520f7e01ab8SAndrey Konovalov char *ptr;
521f7e01ab8SAndrey Konovalov size_t size = 128 - KASAN_GRANULE_SIZE;
522b2325bf8SNico Pache size_t memset_size = 8;
523f7e01ab8SAndrey Konovalov
52485f195b1SMarco Elver KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
52585f195b1SMarco Elver
526f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
527f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
528f7e01ab8SAndrey Konovalov
529b2325bf8SNico Pache OPTIMIZER_HIDE_VAR(ptr);
530f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(size);
531b2325bf8SNico Pache OPTIMIZER_HIDE_VAR(memset_size);
532b2325bf8SNico Pache KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size));
533f7e01ab8SAndrey Konovalov kfree(ptr);
534f7e01ab8SAndrey Konovalov }
535f7e01ab8SAndrey Konovalov
kmalloc_oob_memset_16(struct kunit * test)536f7e01ab8SAndrey Konovalov static void kmalloc_oob_memset_16(struct kunit *test)
537f7e01ab8SAndrey Konovalov {
538f7e01ab8SAndrey Konovalov char *ptr;
539f7e01ab8SAndrey Konovalov size_t size = 128 - KASAN_GRANULE_SIZE;
540b2325bf8SNico Pache size_t memset_size = 16;
541f7e01ab8SAndrey Konovalov
54285f195b1SMarco Elver KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
54385f195b1SMarco Elver
544f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
545f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
546f7e01ab8SAndrey Konovalov
547b2325bf8SNico Pache OPTIMIZER_HIDE_VAR(ptr);
548f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(size);
549b2325bf8SNico Pache OPTIMIZER_HIDE_VAR(memset_size);
550b2325bf8SNico Pache KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size));
551f7e01ab8SAndrey Konovalov kfree(ptr);
552f7e01ab8SAndrey Konovalov }
553f7e01ab8SAndrey Konovalov
kmalloc_oob_in_memset(struct kunit * test)554f7e01ab8SAndrey Konovalov static void kmalloc_oob_in_memset(struct kunit *test)
555f7e01ab8SAndrey Konovalov {
556f7e01ab8SAndrey Konovalov char *ptr;
557f7e01ab8SAndrey Konovalov size_t size = 128 - KASAN_GRANULE_SIZE;
558f7e01ab8SAndrey Konovalov
55985f195b1SMarco Elver KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
56085f195b1SMarco Elver
561f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
562f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
563f7e01ab8SAndrey Konovalov
564f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
565f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(size);
566f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test,
567f7e01ab8SAndrey Konovalov memset(ptr, 0, size + KASAN_GRANULE_SIZE));
568f7e01ab8SAndrey Konovalov kfree(ptr);
569f7e01ab8SAndrey Konovalov }
570f7e01ab8SAndrey Konovalov
kmalloc_memmove_negative_size(struct kunit * test)571f7e01ab8SAndrey Konovalov static void kmalloc_memmove_negative_size(struct kunit *test)
572f7e01ab8SAndrey Konovalov {
573f7e01ab8SAndrey Konovalov char *ptr;
574f7e01ab8SAndrey Konovalov size_t size = 64;
575f7e01ab8SAndrey Konovalov size_t invalid_size = -2;
576f7e01ab8SAndrey Konovalov
57785f195b1SMarco Elver KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
57885f195b1SMarco Elver
579f7e01ab8SAndrey Konovalov /*
580f7e01ab8SAndrey Konovalov * Hardware tag-based mode doesn't check memmove for negative size.
581f7e01ab8SAndrey Konovalov * As a result, this test introduces a side-effect memory corruption,
582f7e01ab8SAndrey Konovalov * which can result in a crash.
583f7e01ab8SAndrey Konovalov */
584f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
585f7e01ab8SAndrey Konovalov
586f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
587f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
588f7e01ab8SAndrey Konovalov
589f7e01ab8SAndrey Konovalov memset((char *)ptr, 0, 64);
590f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
591f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(invalid_size);
592f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test,
593f7e01ab8SAndrey Konovalov memmove((char *)ptr, (char *)ptr + 4, invalid_size));
594f7e01ab8SAndrey Konovalov kfree(ptr);
595f7e01ab8SAndrey Konovalov }
596f7e01ab8SAndrey Konovalov
kmalloc_memmove_invalid_size(struct kunit * test)597f7e01ab8SAndrey Konovalov static void kmalloc_memmove_invalid_size(struct kunit *test)
598f7e01ab8SAndrey Konovalov {
599f7e01ab8SAndrey Konovalov char *ptr;
600f7e01ab8SAndrey Konovalov size_t size = 64;
601d6e5040bSAndrey Konovalov size_t invalid_size = size;
602f7e01ab8SAndrey Konovalov
60385f195b1SMarco Elver KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
60485f195b1SMarco Elver
605f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
606f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
607f7e01ab8SAndrey Konovalov
608f7e01ab8SAndrey Konovalov memset((char *)ptr, 0, 64);
609f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
610d6e5040bSAndrey Konovalov OPTIMIZER_HIDE_VAR(invalid_size);
611f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test,
612f7e01ab8SAndrey Konovalov memmove((char *)ptr, (char *)ptr + 4, invalid_size));
613f7e01ab8SAndrey Konovalov kfree(ptr);
614f7e01ab8SAndrey Konovalov }
615f7e01ab8SAndrey Konovalov
kmalloc_uaf(struct kunit * test)616f7e01ab8SAndrey Konovalov static void kmalloc_uaf(struct kunit *test)
617f7e01ab8SAndrey Konovalov {
618f7e01ab8SAndrey Konovalov char *ptr;
619f7e01ab8SAndrey Konovalov size_t size = 10;
620f7e01ab8SAndrey Konovalov
621f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
622f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
623f7e01ab8SAndrey Konovalov
624f7e01ab8SAndrey Konovalov kfree(ptr);
625f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
626f7e01ab8SAndrey Konovalov }
627f7e01ab8SAndrey Konovalov
kmalloc_uaf_memset(struct kunit * test)628f7e01ab8SAndrey Konovalov static void kmalloc_uaf_memset(struct kunit *test)
629f7e01ab8SAndrey Konovalov {
630f7e01ab8SAndrey Konovalov char *ptr;
631f7e01ab8SAndrey Konovalov size_t size = 33;
632f7e01ab8SAndrey Konovalov
63385f195b1SMarco Elver KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
63485f195b1SMarco Elver
635f7e01ab8SAndrey Konovalov /*
636f7e01ab8SAndrey Konovalov * Only generic KASAN uses quarantine, which is required to avoid a
637f7e01ab8SAndrey Konovalov * kernel memory corruption this test causes.
638f7e01ab8SAndrey Konovalov */
639f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
640f7e01ab8SAndrey Konovalov
641f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
642f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
643f7e01ab8SAndrey Konovalov
644f7e01ab8SAndrey Konovalov kfree(ptr);
645f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
646f7e01ab8SAndrey Konovalov }
647f7e01ab8SAndrey Konovalov
kmalloc_uaf2(struct kunit * test)648f7e01ab8SAndrey Konovalov static void kmalloc_uaf2(struct kunit *test)
649f7e01ab8SAndrey Konovalov {
650f7e01ab8SAndrey Konovalov char *ptr1, *ptr2;
651f7e01ab8SAndrey Konovalov size_t size = 43;
652f7e01ab8SAndrey Konovalov int counter = 0;
653f7e01ab8SAndrey Konovalov
654f7e01ab8SAndrey Konovalov again:
655f7e01ab8SAndrey Konovalov ptr1 = kmalloc(size, GFP_KERNEL);
656f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
657f7e01ab8SAndrey Konovalov
658f7e01ab8SAndrey Konovalov kfree(ptr1);
659f7e01ab8SAndrey Konovalov
660f7e01ab8SAndrey Konovalov ptr2 = kmalloc(size, GFP_KERNEL);
661f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
662f7e01ab8SAndrey Konovalov
663f7e01ab8SAndrey Konovalov /*
664f7e01ab8SAndrey Konovalov * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
665f7e01ab8SAndrey Konovalov * Allow up to 16 attempts at generating different tags.
666f7e01ab8SAndrey Konovalov */
667f7e01ab8SAndrey Konovalov if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
668f7e01ab8SAndrey Konovalov kfree(ptr2);
669f7e01ab8SAndrey Konovalov goto again;
670f7e01ab8SAndrey Konovalov }
671f7e01ab8SAndrey Konovalov
672f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
673f7e01ab8SAndrey Konovalov KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
674f7e01ab8SAndrey Konovalov
675f7e01ab8SAndrey Konovalov kfree(ptr2);
676f7e01ab8SAndrey Konovalov }
677f7e01ab8SAndrey Konovalov
678f7e01ab8SAndrey Konovalov /*
679f7e01ab8SAndrey Konovalov * Check that KASAN detects use-after-free when another object was allocated in
680f7e01ab8SAndrey Konovalov * the same slot. Relevant for the tag-based modes, which do not use quarantine.
681f7e01ab8SAndrey Konovalov */
kmalloc_uaf3(struct kunit * test)682f7e01ab8SAndrey Konovalov static void kmalloc_uaf3(struct kunit *test)
683f7e01ab8SAndrey Konovalov {
684f7e01ab8SAndrey Konovalov char *ptr1, *ptr2;
685f7e01ab8SAndrey Konovalov size_t size = 100;
686f7e01ab8SAndrey Konovalov
687f7e01ab8SAndrey Konovalov /* This test is specifically crafted for tag-based modes. */
688f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
689f7e01ab8SAndrey Konovalov
690f7e01ab8SAndrey Konovalov ptr1 = kmalloc(size, GFP_KERNEL);
691f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
692f7e01ab8SAndrey Konovalov kfree(ptr1);
693f7e01ab8SAndrey Konovalov
694f7e01ab8SAndrey Konovalov ptr2 = kmalloc(size, GFP_KERNEL);
695f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
696f7e01ab8SAndrey Konovalov kfree(ptr2);
697f7e01ab8SAndrey Konovalov
698f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
699f7e01ab8SAndrey Konovalov }
700f7e01ab8SAndrey Konovalov
kasan_atomics_helper(struct kunit * test,void * unsafe,void * safe)7014e76c8ccSPaul Heidekrüger static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
7024e76c8ccSPaul Heidekrüger {
703*8cc92a67SPaul Heidekrüger int *i_unsafe = unsafe;
7044e76c8ccSPaul Heidekrüger
7054e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
7064e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
7074e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
7084e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
7094e76c8ccSPaul Heidekrüger
7104e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
7114e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
7124e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
7134e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
7144e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
7154e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
7164e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
7174e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
7184e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
7194e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
7204e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
7214e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
7224e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
7234e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
7244e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
7254e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
7264e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
7274e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
7284e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
7294e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
7304e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
7314e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
7324e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
7334e76c8ccSPaul Heidekrüger
7344e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
7354e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
7364e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
7374e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
7384e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
7394e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
7404e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
7414e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
7424e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
7434e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
7444e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
7454e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
7464e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
7474e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
7484e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
7494e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
7504e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
7514e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
7524e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
7534e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
7544e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
7554e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
7564e76c8ccSPaul Heidekrüger KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
7574e76c8ccSPaul Heidekrüger }
7584e76c8ccSPaul Heidekrüger
kasan_atomics(struct kunit * test)7594e76c8ccSPaul Heidekrüger static void kasan_atomics(struct kunit *test)
7604e76c8ccSPaul Heidekrüger {
7614e76c8ccSPaul Heidekrüger void *a1, *a2;
7624e76c8ccSPaul Heidekrüger
7634e76c8ccSPaul Heidekrüger /*
7644e76c8ccSPaul Heidekrüger * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
7654e76c8ccSPaul Heidekrüger * that the following 16 bytes will make up the redzone.
7664e76c8ccSPaul Heidekrüger */
7674e76c8ccSPaul Heidekrüger a1 = kzalloc(48, GFP_KERNEL);
7684e76c8ccSPaul Heidekrüger KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
769*8cc92a67SPaul Heidekrüger a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL);
770*8cc92a67SPaul Heidekrüger KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2);
7714e76c8ccSPaul Heidekrüger
7724e76c8ccSPaul Heidekrüger /* Use atomics to access the redzone. */
7734e76c8ccSPaul Heidekrüger kasan_atomics_helper(test, a1 + 48, a2);
7744e76c8ccSPaul Heidekrüger
7754e76c8ccSPaul Heidekrüger kfree(a1);
7764e76c8ccSPaul Heidekrüger kfree(a2);
7774e76c8ccSPaul Heidekrüger }
7784e76c8ccSPaul Heidekrüger
kmalloc_double_kzfree(struct kunit * test)77986b15969SAndrey Konovalov static void kmalloc_double_kzfree(struct kunit *test)
78086b15969SAndrey Konovalov {
78186b15969SAndrey Konovalov char *ptr;
78286b15969SAndrey Konovalov size_t size = 16;
78386b15969SAndrey Konovalov
78486b15969SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
78586b15969SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
78686b15969SAndrey Konovalov
78786b15969SAndrey Konovalov kfree_sensitive(ptr);
78886b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
78986b15969SAndrey Konovalov }
79086b15969SAndrey Konovalov
79186b15969SAndrey Konovalov /* Check that ksize() does NOT unpoison whole object. */
ksize_unpoisons_memory(struct kunit * test)79286b15969SAndrey Konovalov static void ksize_unpoisons_memory(struct kunit *test)
79386b15969SAndrey Konovalov {
79486b15969SAndrey Konovalov char *ptr;
79586b15969SAndrey Konovalov size_t size = 128 - KASAN_GRANULE_SIZE - 5;
79686b15969SAndrey Konovalov size_t real_size;
79786b15969SAndrey Konovalov
79886b15969SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
79986b15969SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
80086b15969SAndrey Konovalov
80186b15969SAndrey Konovalov real_size = ksize(ptr);
80286b15969SAndrey Konovalov KUNIT_EXPECT_GT(test, real_size, size);
80386b15969SAndrey Konovalov
80486b15969SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
80586b15969SAndrey Konovalov
80686b15969SAndrey Konovalov /* These accesses shouldn't trigger a KASAN report. */
80786b15969SAndrey Konovalov ptr[0] = 'x';
80886b15969SAndrey Konovalov ptr[size - 1] = 'x';
80986b15969SAndrey Konovalov
81086b15969SAndrey Konovalov /* These must trigger a KASAN report. */
81186b15969SAndrey Konovalov if (IS_ENABLED(CONFIG_KASAN_GENERIC))
81286b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
81386b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
81486b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
81586b15969SAndrey Konovalov
81686b15969SAndrey Konovalov kfree(ptr);
81786b15969SAndrey Konovalov }
81886b15969SAndrey Konovalov
81986b15969SAndrey Konovalov /*
82086b15969SAndrey Konovalov * Check that a use-after-free is detected by ksize() and via normal accesses
82186b15969SAndrey Konovalov * after it.
82286b15969SAndrey Konovalov */
ksize_uaf(struct kunit * test)82386b15969SAndrey Konovalov static void ksize_uaf(struct kunit *test)
82486b15969SAndrey Konovalov {
82586b15969SAndrey Konovalov char *ptr;
82686b15969SAndrey Konovalov int size = 128 - KASAN_GRANULE_SIZE;
82786b15969SAndrey Konovalov
82886b15969SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
82986b15969SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
83086b15969SAndrey Konovalov kfree(ptr);
83186b15969SAndrey Konovalov
83286b15969SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
83386b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
83486b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
83586b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
83686b15969SAndrey Konovalov }
83786b15969SAndrey Konovalov
83886b15969SAndrey Konovalov /*
83986b15969SAndrey Konovalov * The two tests below check that Generic KASAN prints auxiliary stack traces
84086b15969SAndrey Konovalov * for RCU callbacks and workqueues. The reports need to be inspected manually.
84186b15969SAndrey Konovalov *
84286b15969SAndrey Konovalov * These tests are still enabled for other KASAN modes to make sure that all
84386b15969SAndrey Konovalov * modes report bad accesses in tested scenarios.
84486b15969SAndrey Konovalov */
84586b15969SAndrey Konovalov
84686b15969SAndrey Konovalov static struct kasan_rcu_info {
84786b15969SAndrey Konovalov int i;
84886b15969SAndrey Konovalov struct rcu_head rcu;
84986b15969SAndrey Konovalov } *global_rcu_ptr;
85086b15969SAndrey Konovalov
rcu_uaf_reclaim(struct rcu_head * rp)85186b15969SAndrey Konovalov static void rcu_uaf_reclaim(struct rcu_head *rp)
85286b15969SAndrey Konovalov {
85386b15969SAndrey Konovalov struct kasan_rcu_info *fp =
85486b15969SAndrey Konovalov container_of(rp, struct kasan_rcu_info, rcu);
85586b15969SAndrey Konovalov
85686b15969SAndrey Konovalov kfree(fp);
85786b15969SAndrey Konovalov ((volatile struct kasan_rcu_info *)fp)->i;
85886b15969SAndrey Konovalov }
85986b15969SAndrey Konovalov
rcu_uaf(struct kunit * test)86086b15969SAndrey Konovalov static void rcu_uaf(struct kunit *test)
86186b15969SAndrey Konovalov {
86286b15969SAndrey Konovalov struct kasan_rcu_info *ptr;
86386b15969SAndrey Konovalov
86486b15969SAndrey Konovalov ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
86586b15969SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
86686b15969SAndrey Konovalov
86786b15969SAndrey Konovalov global_rcu_ptr = rcu_dereference_protected(
86886b15969SAndrey Konovalov (struct kasan_rcu_info __rcu *)ptr, NULL);
86986b15969SAndrey Konovalov
87086b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test,
87186b15969SAndrey Konovalov call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
87286b15969SAndrey Konovalov rcu_barrier());
87386b15969SAndrey Konovalov }
87486b15969SAndrey Konovalov
workqueue_uaf_work(struct work_struct * work)87586b15969SAndrey Konovalov static void workqueue_uaf_work(struct work_struct *work)
87686b15969SAndrey Konovalov {
87786b15969SAndrey Konovalov kfree(work);
87886b15969SAndrey Konovalov }
87986b15969SAndrey Konovalov
workqueue_uaf(struct kunit * test)88086b15969SAndrey Konovalov static void workqueue_uaf(struct kunit *test)
88186b15969SAndrey Konovalov {
88286b15969SAndrey Konovalov struct workqueue_struct *workqueue;
88386b15969SAndrey Konovalov struct work_struct *work;
88486b15969SAndrey Konovalov
88586b15969SAndrey Konovalov workqueue = create_workqueue("kasan_workqueue_test");
88686b15969SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
88786b15969SAndrey Konovalov
88886b15969SAndrey Konovalov work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
88986b15969SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
89086b15969SAndrey Konovalov
89186b15969SAndrey Konovalov INIT_WORK(work, workqueue_uaf_work);
89286b15969SAndrey Konovalov queue_work(workqueue, work);
89386b15969SAndrey Konovalov destroy_workqueue(workqueue);
89486b15969SAndrey Konovalov
89586b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test,
89686b15969SAndrey Konovalov ((volatile struct work_struct *)work)->data);
89786b15969SAndrey Konovalov }
89886b15969SAndrey Konovalov
kfree_via_page(struct kunit * test)899f7e01ab8SAndrey Konovalov static void kfree_via_page(struct kunit *test)
900f7e01ab8SAndrey Konovalov {
901f7e01ab8SAndrey Konovalov char *ptr;
902f7e01ab8SAndrey Konovalov size_t size = 8;
903f7e01ab8SAndrey Konovalov struct page *page;
904f7e01ab8SAndrey Konovalov unsigned long offset;
905f7e01ab8SAndrey Konovalov
906f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
907f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
908f7e01ab8SAndrey Konovalov
909f7e01ab8SAndrey Konovalov page = virt_to_page(ptr);
910f7e01ab8SAndrey Konovalov offset = offset_in_page(ptr);
911f7e01ab8SAndrey Konovalov kfree(page_address(page) + offset);
912f7e01ab8SAndrey Konovalov }
913f7e01ab8SAndrey Konovalov
kfree_via_phys(struct kunit * test)914f7e01ab8SAndrey Konovalov static void kfree_via_phys(struct kunit *test)
915f7e01ab8SAndrey Konovalov {
916f7e01ab8SAndrey Konovalov char *ptr;
917f7e01ab8SAndrey Konovalov size_t size = 8;
918f7e01ab8SAndrey Konovalov phys_addr_t phys;
919f7e01ab8SAndrey Konovalov
920f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
921f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
922f7e01ab8SAndrey Konovalov
923f7e01ab8SAndrey Konovalov phys = virt_to_phys(ptr);
924f7e01ab8SAndrey Konovalov kfree(phys_to_virt(phys));
925f7e01ab8SAndrey Konovalov }
926f7e01ab8SAndrey Konovalov
kmem_cache_oob(struct kunit * test)927f7e01ab8SAndrey Konovalov static void kmem_cache_oob(struct kunit *test)
928f7e01ab8SAndrey Konovalov {
929f7e01ab8SAndrey Konovalov char *p;
930f7e01ab8SAndrey Konovalov size_t size = 200;
931f7e01ab8SAndrey Konovalov struct kmem_cache *cache;
932f7e01ab8SAndrey Konovalov
933f7e01ab8SAndrey Konovalov cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
934f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
935f7e01ab8SAndrey Konovalov
936f7e01ab8SAndrey Konovalov p = kmem_cache_alloc(cache, GFP_KERNEL);
937f7e01ab8SAndrey Konovalov if (!p) {
938f7e01ab8SAndrey Konovalov kunit_err(test, "Allocation failed: %s\n", __func__);
939f7e01ab8SAndrey Konovalov kmem_cache_destroy(cache);
940f7e01ab8SAndrey Konovalov return;
941f7e01ab8SAndrey Konovalov }
942f7e01ab8SAndrey Konovalov
943f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
944f7e01ab8SAndrey Konovalov
945f7e01ab8SAndrey Konovalov kmem_cache_free(cache, p);
946f7e01ab8SAndrey Konovalov kmem_cache_destroy(cache);
947f7e01ab8SAndrey Konovalov }
948f7e01ab8SAndrey Konovalov
kmem_cache_double_free(struct kunit * test)94986b15969SAndrey Konovalov static void kmem_cache_double_free(struct kunit *test)
95086b15969SAndrey Konovalov {
95186b15969SAndrey Konovalov char *p;
95286b15969SAndrey Konovalov size_t size = 200;
95386b15969SAndrey Konovalov struct kmem_cache *cache;
95486b15969SAndrey Konovalov
95586b15969SAndrey Konovalov cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
95686b15969SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
95786b15969SAndrey Konovalov
95886b15969SAndrey Konovalov p = kmem_cache_alloc(cache, GFP_KERNEL);
95986b15969SAndrey Konovalov if (!p) {
96086b15969SAndrey Konovalov kunit_err(test, "Allocation failed: %s\n", __func__);
96186b15969SAndrey Konovalov kmem_cache_destroy(cache);
96286b15969SAndrey Konovalov return;
96386b15969SAndrey Konovalov }
96486b15969SAndrey Konovalov
96586b15969SAndrey Konovalov kmem_cache_free(cache, p);
96686b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
96786b15969SAndrey Konovalov kmem_cache_destroy(cache);
96886b15969SAndrey Konovalov }
96986b15969SAndrey Konovalov
kmem_cache_invalid_free(struct kunit * test)97086b15969SAndrey Konovalov static void kmem_cache_invalid_free(struct kunit *test)
97186b15969SAndrey Konovalov {
97286b15969SAndrey Konovalov char *p;
97386b15969SAndrey Konovalov size_t size = 200;
97486b15969SAndrey Konovalov struct kmem_cache *cache;
97586b15969SAndrey Konovalov
97686b15969SAndrey Konovalov cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
97786b15969SAndrey Konovalov NULL);
97886b15969SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
97986b15969SAndrey Konovalov
98086b15969SAndrey Konovalov p = kmem_cache_alloc(cache, GFP_KERNEL);
98186b15969SAndrey Konovalov if (!p) {
98286b15969SAndrey Konovalov kunit_err(test, "Allocation failed: %s\n", __func__);
98386b15969SAndrey Konovalov kmem_cache_destroy(cache);
98486b15969SAndrey Konovalov return;
98586b15969SAndrey Konovalov }
98686b15969SAndrey Konovalov
98786b15969SAndrey Konovalov /* Trigger invalid free, the object doesn't get freed. */
98886b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
98986b15969SAndrey Konovalov
99086b15969SAndrey Konovalov /*
99186b15969SAndrey Konovalov * Properly free the object to prevent the "Objects remaining in
99286b15969SAndrey Konovalov * test_cache on __kmem_cache_shutdown" BUG failure.
99386b15969SAndrey Konovalov */
99486b15969SAndrey Konovalov kmem_cache_free(cache, p);
99586b15969SAndrey Konovalov
99686b15969SAndrey Konovalov kmem_cache_destroy(cache);
99786b15969SAndrey Konovalov }
99886b15969SAndrey Konovalov
empty_cache_ctor(void * object)99986b15969SAndrey Konovalov static void empty_cache_ctor(void *object) { }
100086b15969SAndrey Konovalov
kmem_cache_double_destroy(struct kunit * test)100186b15969SAndrey Konovalov static void kmem_cache_double_destroy(struct kunit *test)
100286b15969SAndrey Konovalov {
100386b15969SAndrey Konovalov struct kmem_cache *cache;
100486b15969SAndrey Konovalov
100586b15969SAndrey Konovalov /* Provide a constructor to prevent cache merging. */
100686b15969SAndrey Konovalov cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
100786b15969SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
100886b15969SAndrey Konovalov kmem_cache_destroy(cache);
100986b15969SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
101086b15969SAndrey Konovalov }
101186b15969SAndrey Konovalov
kmem_cache_accounted(struct kunit * test)1012f7e01ab8SAndrey Konovalov static void kmem_cache_accounted(struct kunit *test)
1013f7e01ab8SAndrey Konovalov {
1014f7e01ab8SAndrey Konovalov int i;
1015f7e01ab8SAndrey Konovalov char *p;
1016f7e01ab8SAndrey Konovalov size_t size = 200;
1017f7e01ab8SAndrey Konovalov struct kmem_cache *cache;
1018f7e01ab8SAndrey Konovalov
1019f7e01ab8SAndrey Konovalov cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
1020f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1021f7e01ab8SAndrey Konovalov
1022f7e01ab8SAndrey Konovalov /*
1023f7e01ab8SAndrey Konovalov * Several allocations with a delay to allow for lazy per memcg kmem
1024f7e01ab8SAndrey Konovalov * cache creation.
1025f7e01ab8SAndrey Konovalov */
1026f7e01ab8SAndrey Konovalov for (i = 0; i < 5; i++) {
1027f7e01ab8SAndrey Konovalov p = kmem_cache_alloc(cache, GFP_KERNEL);
1028f7e01ab8SAndrey Konovalov if (!p)
1029f7e01ab8SAndrey Konovalov goto free_cache;
1030f7e01ab8SAndrey Konovalov
1031f7e01ab8SAndrey Konovalov kmem_cache_free(cache, p);
1032f7e01ab8SAndrey Konovalov msleep(100);
1033f7e01ab8SAndrey Konovalov }
1034f7e01ab8SAndrey Konovalov
1035f7e01ab8SAndrey Konovalov free_cache:
1036f7e01ab8SAndrey Konovalov kmem_cache_destroy(cache);
1037f7e01ab8SAndrey Konovalov }
1038f7e01ab8SAndrey Konovalov
kmem_cache_bulk(struct kunit * test)1039f7e01ab8SAndrey Konovalov static void kmem_cache_bulk(struct kunit *test)
1040f7e01ab8SAndrey Konovalov {
1041f7e01ab8SAndrey Konovalov struct kmem_cache *cache;
1042f7e01ab8SAndrey Konovalov size_t size = 200;
1043f7e01ab8SAndrey Konovalov char *p[10];
1044f7e01ab8SAndrey Konovalov bool ret;
1045f7e01ab8SAndrey Konovalov int i;
1046f7e01ab8SAndrey Konovalov
1047f7e01ab8SAndrey Konovalov cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1048f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1049f7e01ab8SAndrey Konovalov
1050f7e01ab8SAndrey Konovalov ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
1051f7e01ab8SAndrey Konovalov if (!ret) {
1052f7e01ab8SAndrey Konovalov kunit_err(test, "Allocation failed: %s\n", __func__);
1053f7e01ab8SAndrey Konovalov kmem_cache_destroy(cache);
1054f7e01ab8SAndrey Konovalov return;
1055f7e01ab8SAndrey Konovalov }
1056f7e01ab8SAndrey Konovalov
1057f7e01ab8SAndrey Konovalov for (i = 0; i < ARRAY_SIZE(p); i++)
1058f7e01ab8SAndrey Konovalov p[i][0] = p[i][size - 1] = 42;
1059f7e01ab8SAndrey Konovalov
1060f7e01ab8SAndrey Konovalov kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
1061f7e01ab8SAndrey Konovalov kmem_cache_destroy(cache);
1062f7e01ab8SAndrey Konovalov }
1063f7e01ab8SAndrey Konovalov
mempool_prepare_kmalloc(struct kunit * test,mempool_t * pool,size_t size)10640f199eb4SAndrey Konovalov static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
10650f199eb4SAndrey Konovalov {
10660f199eb4SAndrey Konovalov int pool_size = 4;
10670f199eb4SAndrey Konovalov int ret;
10680f199eb4SAndrey Konovalov void *elem;
10690f199eb4SAndrey Konovalov
10700f199eb4SAndrey Konovalov memset(pool, 0, sizeof(*pool));
10710f199eb4SAndrey Konovalov ret = mempool_init_kmalloc_pool(pool, pool_size, size);
10720f199eb4SAndrey Konovalov KUNIT_ASSERT_EQ(test, ret, 0);
10730f199eb4SAndrey Konovalov
10740f199eb4SAndrey Konovalov /*
10750f199eb4SAndrey Konovalov * Allocate one element to prevent mempool from freeing elements to the
10760f199eb4SAndrey Konovalov * underlying allocator and instead make it add them to the element
10770f199eb4SAndrey Konovalov * list when the tests trigger double-free and invalid-free bugs.
10780f199eb4SAndrey Konovalov * This allows testing KASAN annotations in add_element().
10790f199eb4SAndrey Konovalov */
10800f199eb4SAndrey Konovalov elem = mempool_alloc_preallocated(pool);
10810f199eb4SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
10820f199eb4SAndrey Konovalov
10830f199eb4SAndrey Konovalov return elem;
10840f199eb4SAndrey Konovalov }
10850f199eb4SAndrey Konovalov
mempool_prepare_slab(struct kunit * test,mempool_t * pool,size_t size)10860f199eb4SAndrey Konovalov static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
10870f199eb4SAndrey Konovalov {
10880f199eb4SAndrey Konovalov struct kmem_cache *cache;
10890f199eb4SAndrey Konovalov int pool_size = 4;
10900f199eb4SAndrey Konovalov int ret;
10910f199eb4SAndrey Konovalov
10920f199eb4SAndrey Konovalov cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
10930f199eb4SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
10940f199eb4SAndrey Konovalov
10950f199eb4SAndrey Konovalov memset(pool, 0, sizeof(*pool));
10960f199eb4SAndrey Konovalov ret = mempool_init_slab_pool(pool, pool_size, cache);
10970f199eb4SAndrey Konovalov KUNIT_ASSERT_EQ(test, ret, 0);
10980f199eb4SAndrey Konovalov
10990f199eb4SAndrey Konovalov /*
11000f199eb4SAndrey Konovalov * Do not allocate one preallocated element, as we skip the double-free
11010f199eb4SAndrey Konovalov * and invalid-free tests for slab mempool for simplicity.
11020f199eb4SAndrey Konovalov */
11030f199eb4SAndrey Konovalov
11040f199eb4SAndrey Konovalov return cache;
11050f199eb4SAndrey Konovalov }
11060f199eb4SAndrey Konovalov
mempool_prepare_page(struct kunit * test,mempool_t * pool,int order)11070f199eb4SAndrey Konovalov static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
11080f199eb4SAndrey Konovalov {
11090f199eb4SAndrey Konovalov int pool_size = 4;
11100f199eb4SAndrey Konovalov int ret;
11110f199eb4SAndrey Konovalov void *elem;
11120f199eb4SAndrey Konovalov
11130f199eb4SAndrey Konovalov memset(pool, 0, sizeof(*pool));
11140f199eb4SAndrey Konovalov ret = mempool_init_page_pool(pool, pool_size, order);
11150f199eb4SAndrey Konovalov KUNIT_ASSERT_EQ(test, ret, 0);
11160f199eb4SAndrey Konovalov
11170f199eb4SAndrey Konovalov elem = mempool_alloc_preallocated(pool);
11180f199eb4SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
11190f199eb4SAndrey Konovalov
11200f199eb4SAndrey Konovalov return elem;
11210f199eb4SAndrey Konovalov }
11220f199eb4SAndrey Konovalov
mempool_oob_right_helper(struct kunit * test,mempool_t * pool,size_t size)11230f199eb4SAndrey Konovalov static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
11240f199eb4SAndrey Konovalov {
11250f199eb4SAndrey Konovalov char *elem;
11260f199eb4SAndrey Konovalov
11270f199eb4SAndrey Konovalov elem = mempool_alloc_preallocated(pool);
11280f199eb4SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
11290f199eb4SAndrey Konovalov
11300f199eb4SAndrey Konovalov OPTIMIZER_HIDE_VAR(elem);
11310f199eb4SAndrey Konovalov
11320f199eb4SAndrey Konovalov if (IS_ENABLED(CONFIG_KASAN_GENERIC))
11330f199eb4SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test,
11340f199eb4SAndrey Konovalov ((volatile char *)&elem[size])[0]);
11350f199eb4SAndrey Konovalov else
11360f199eb4SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test,
11370f199eb4SAndrey Konovalov ((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
11380f199eb4SAndrey Konovalov
11390f199eb4SAndrey Konovalov mempool_free(elem, pool);
11400f199eb4SAndrey Konovalov }
11410f199eb4SAndrey Konovalov
mempool_kmalloc_oob_right(struct kunit * test)11420f199eb4SAndrey Konovalov static void mempool_kmalloc_oob_right(struct kunit *test)
11430f199eb4SAndrey Konovalov {
11440f199eb4SAndrey Konovalov mempool_t pool;
11450f199eb4SAndrey Konovalov size_t size = 128 - KASAN_GRANULE_SIZE - 5;
11460f199eb4SAndrey Konovalov void *extra_elem;
11470f199eb4SAndrey Konovalov
11480f199eb4SAndrey Konovalov extra_elem = mempool_prepare_kmalloc(test, &pool, size);
11490f199eb4SAndrey Konovalov
11500f199eb4SAndrey Konovalov mempool_oob_right_helper(test, &pool, size);
11510f199eb4SAndrey Konovalov
11520f199eb4SAndrey Konovalov mempool_free(extra_elem, &pool);
11530f199eb4SAndrey Konovalov mempool_exit(&pool);
11540f199eb4SAndrey Konovalov }
11550f199eb4SAndrey Konovalov
mempool_kmalloc_large_oob_right(struct kunit * test)11560f199eb4SAndrey Konovalov static void mempool_kmalloc_large_oob_right(struct kunit *test)
11570f199eb4SAndrey Konovalov {
11580f199eb4SAndrey Konovalov mempool_t pool;
11590f199eb4SAndrey Konovalov size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
11600f199eb4SAndrey Konovalov void *extra_elem;
11610f199eb4SAndrey Konovalov
11620f199eb4SAndrey Konovalov extra_elem = mempool_prepare_kmalloc(test, &pool, size);
11630f199eb4SAndrey Konovalov
11640f199eb4SAndrey Konovalov mempool_oob_right_helper(test, &pool, size);
11650f199eb4SAndrey Konovalov
11660f199eb4SAndrey Konovalov mempool_free(extra_elem, &pool);
11670f199eb4SAndrey Konovalov mempool_exit(&pool);
11680f199eb4SAndrey Konovalov }
11690f199eb4SAndrey Konovalov
mempool_slab_oob_right(struct kunit * test)11700f199eb4SAndrey Konovalov static void mempool_slab_oob_right(struct kunit *test)
11710f199eb4SAndrey Konovalov {
11720f199eb4SAndrey Konovalov mempool_t pool;
11730f199eb4SAndrey Konovalov size_t size = 123;
11740f199eb4SAndrey Konovalov struct kmem_cache *cache;
11750f199eb4SAndrey Konovalov
11760f199eb4SAndrey Konovalov cache = mempool_prepare_slab(test, &pool, size);
11770f199eb4SAndrey Konovalov
11780f199eb4SAndrey Konovalov mempool_oob_right_helper(test, &pool, size);
11790f199eb4SAndrey Konovalov
11800f199eb4SAndrey Konovalov mempool_exit(&pool);
11810f199eb4SAndrey Konovalov kmem_cache_destroy(cache);
11820f199eb4SAndrey Konovalov }
11830f199eb4SAndrey Konovalov
11840f199eb4SAndrey Konovalov /*
11850f199eb4SAndrey Konovalov * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
11860f199eb4SAndrey Konovalov * allocations have no redzones, and thus the out-of-bounds detection is not
11870f199eb4SAndrey Konovalov * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
11880f199eb4SAndrey Konovalov * the tag-based KASAN modes, the neighboring allocation might have the same
11890f199eb4SAndrey Konovalov * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
11900f199eb4SAndrey Konovalov */
11910f199eb4SAndrey Konovalov
mempool_uaf_helper(struct kunit * test,mempool_t * pool,bool page)11920f199eb4SAndrey Konovalov static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
11930f199eb4SAndrey Konovalov {
11940f199eb4SAndrey Konovalov char *elem, *ptr;
11950f199eb4SAndrey Konovalov
11960f199eb4SAndrey Konovalov elem = mempool_alloc_preallocated(pool);
11970f199eb4SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
11980f199eb4SAndrey Konovalov
11990f199eb4SAndrey Konovalov mempool_free(elem, pool);
12000f199eb4SAndrey Konovalov
12010f199eb4SAndrey Konovalov ptr = page ? page_address((struct page *)elem) : elem;
12020f199eb4SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
12030f199eb4SAndrey Konovalov }
12040f199eb4SAndrey Konovalov
mempool_kmalloc_uaf(struct kunit * test)12050f199eb4SAndrey Konovalov static void mempool_kmalloc_uaf(struct kunit *test)
12060f199eb4SAndrey Konovalov {
12070f199eb4SAndrey Konovalov mempool_t pool;
12080f199eb4SAndrey Konovalov size_t size = 128;
12090f199eb4SAndrey Konovalov void *extra_elem;
12100f199eb4SAndrey Konovalov
12110f199eb4SAndrey Konovalov extra_elem = mempool_prepare_kmalloc(test, &pool, size);
12120f199eb4SAndrey Konovalov
12130f199eb4SAndrey Konovalov mempool_uaf_helper(test, &pool, false);
12140f199eb4SAndrey Konovalov
12150f199eb4SAndrey Konovalov mempool_free(extra_elem, &pool);
12160f199eb4SAndrey Konovalov mempool_exit(&pool);
12170f199eb4SAndrey Konovalov }
12180f199eb4SAndrey Konovalov
mempool_kmalloc_large_uaf(struct kunit * test)12190f199eb4SAndrey Konovalov static void mempool_kmalloc_large_uaf(struct kunit *test)
12200f199eb4SAndrey Konovalov {
12210f199eb4SAndrey Konovalov mempool_t pool;
12220f199eb4SAndrey Konovalov size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
12230f199eb4SAndrey Konovalov void *extra_elem;
12240f199eb4SAndrey Konovalov
12250f199eb4SAndrey Konovalov extra_elem = mempool_prepare_kmalloc(test, &pool, size);
12260f199eb4SAndrey Konovalov
12270f199eb4SAndrey Konovalov mempool_uaf_helper(test, &pool, false);
12280f199eb4SAndrey Konovalov
12290f199eb4SAndrey Konovalov mempool_free(extra_elem, &pool);
12300f199eb4SAndrey Konovalov mempool_exit(&pool);
12310f199eb4SAndrey Konovalov }
12320f199eb4SAndrey Konovalov
mempool_slab_uaf(struct kunit * test)12330f199eb4SAndrey Konovalov static void mempool_slab_uaf(struct kunit *test)
12340f199eb4SAndrey Konovalov {
12350f199eb4SAndrey Konovalov mempool_t pool;
12360f199eb4SAndrey Konovalov size_t size = 123;
12370f199eb4SAndrey Konovalov struct kmem_cache *cache;
12380f199eb4SAndrey Konovalov
12390f199eb4SAndrey Konovalov cache = mempool_prepare_slab(test, &pool, size);
12400f199eb4SAndrey Konovalov
12410f199eb4SAndrey Konovalov mempool_uaf_helper(test, &pool, false);
12420f199eb4SAndrey Konovalov
12430f199eb4SAndrey Konovalov mempool_exit(&pool);
12440f199eb4SAndrey Konovalov kmem_cache_destroy(cache);
12450f199eb4SAndrey Konovalov }
12460f199eb4SAndrey Konovalov
mempool_page_alloc_uaf(struct kunit * test)12470f199eb4SAndrey Konovalov static void mempool_page_alloc_uaf(struct kunit *test)
12480f199eb4SAndrey Konovalov {
12490f199eb4SAndrey Konovalov mempool_t pool;
12500f199eb4SAndrey Konovalov int order = 2;
12510f199eb4SAndrey Konovalov void *extra_elem;
12520f199eb4SAndrey Konovalov
12530f199eb4SAndrey Konovalov extra_elem = mempool_prepare_page(test, &pool, order);
12540f199eb4SAndrey Konovalov
12550f199eb4SAndrey Konovalov mempool_uaf_helper(test, &pool, true);
12560f199eb4SAndrey Konovalov
12570f199eb4SAndrey Konovalov mempool_free(extra_elem, &pool);
12580f199eb4SAndrey Konovalov mempool_exit(&pool);
12590f199eb4SAndrey Konovalov }
12600f199eb4SAndrey Konovalov
mempool_double_free_helper(struct kunit * test,mempool_t * pool)12610f199eb4SAndrey Konovalov static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
12620f199eb4SAndrey Konovalov {
12630f199eb4SAndrey Konovalov char *elem;
12640f199eb4SAndrey Konovalov
12650f199eb4SAndrey Konovalov elem = mempool_alloc_preallocated(pool);
12660f199eb4SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
12670f199eb4SAndrey Konovalov
12680f199eb4SAndrey Konovalov mempool_free(elem, pool);
12690f199eb4SAndrey Konovalov
12700f199eb4SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
12710f199eb4SAndrey Konovalov }
12720f199eb4SAndrey Konovalov
mempool_kmalloc_double_free(struct kunit * test)12730f199eb4SAndrey Konovalov static void mempool_kmalloc_double_free(struct kunit *test)
12740f199eb4SAndrey Konovalov {
12750f199eb4SAndrey Konovalov mempool_t pool;
12760f199eb4SAndrey Konovalov size_t size = 128;
12770f199eb4SAndrey Konovalov char *extra_elem;
12780f199eb4SAndrey Konovalov
12790f199eb4SAndrey Konovalov extra_elem = mempool_prepare_kmalloc(test, &pool, size);
12800f199eb4SAndrey Konovalov
12810f199eb4SAndrey Konovalov mempool_double_free_helper(test, &pool);
12820f199eb4SAndrey Konovalov
12830f199eb4SAndrey Konovalov mempool_free(extra_elem, &pool);
12840f199eb4SAndrey Konovalov mempool_exit(&pool);
12850f199eb4SAndrey Konovalov }
12860f199eb4SAndrey Konovalov
mempool_kmalloc_large_double_free(struct kunit * test)12870f199eb4SAndrey Konovalov static void mempool_kmalloc_large_double_free(struct kunit *test)
12880f199eb4SAndrey Konovalov {
12890f199eb4SAndrey Konovalov mempool_t pool;
12900f199eb4SAndrey Konovalov size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
12910f199eb4SAndrey Konovalov char *extra_elem;
12920f199eb4SAndrey Konovalov
12930f199eb4SAndrey Konovalov extra_elem = mempool_prepare_kmalloc(test, &pool, size);
12940f199eb4SAndrey Konovalov
12950f199eb4SAndrey Konovalov mempool_double_free_helper(test, &pool);
12960f199eb4SAndrey Konovalov
12970f199eb4SAndrey Konovalov mempool_free(extra_elem, &pool);
12980f199eb4SAndrey Konovalov mempool_exit(&pool);
12990f199eb4SAndrey Konovalov }
13000f199eb4SAndrey Konovalov
mempool_page_alloc_double_free(struct kunit * test)13010f199eb4SAndrey Konovalov static void mempool_page_alloc_double_free(struct kunit *test)
13020f199eb4SAndrey Konovalov {
13030f199eb4SAndrey Konovalov mempool_t pool;
13040f199eb4SAndrey Konovalov int order = 2;
13050f199eb4SAndrey Konovalov char *extra_elem;
13060f199eb4SAndrey Konovalov
13070f199eb4SAndrey Konovalov extra_elem = mempool_prepare_page(test, &pool, order);
13080f199eb4SAndrey Konovalov
13090f199eb4SAndrey Konovalov mempool_double_free_helper(test, &pool);
13100f199eb4SAndrey Konovalov
13110f199eb4SAndrey Konovalov mempool_free(extra_elem, &pool);
13120f199eb4SAndrey Konovalov mempool_exit(&pool);
13130f199eb4SAndrey Konovalov }
13140f199eb4SAndrey Konovalov
mempool_kmalloc_invalid_free_helper(struct kunit * test,mempool_t * pool)13150f199eb4SAndrey Konovalov static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
13160f199eb4SAndrey Konovalov {
13170f199eb4SAndrey Konovalov char *elem;
13180f199eb4SAndrey Konovalov
13190f199eb4SAndrey Konovalov elem = mempool_alloc_preallocated(pool);
13200f199eb4SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
13210f199eb4SAndrey Konovalov
13220f199eb4SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
13230f199eb4SAndrey Konovalov
13240f199eb4SAndrey Konovalov mempool_free(elem, pool);
13250f199eb4SAndrey Konovalov }
13260f199eb4SAndrey Konovalov
mempool_kmalloc_invalid_free(struct kunit * test)13270f199eb4SAndrey Konovalov static void mempool_kmalloc_invalid_free(struct kunit *test)
13280f199eb4SAndrey Konovalov {
13290f199eb4SAndrey Konovalov mempool_t pool;
13300f199eb4SAndrey Konovalov size_t size = 128;
13310f199eb4SAndrey Konovalov char *extra_elem;
13320f199eb4SAndrey Konovalov
13330f199eb4SAndrey Konovalov extra_elem = mempool_prepare_kmalloc(test, &pool, size);
13340f199eb4SAndrey Konovalov
13350f199eb4SAndrey Konovalov mempool_kmalloc_invalid_free_helper(test, &pool);
13360f199eb4SAndrey Konovalov
13370f199eb4SAndrey Konovalov mempool_free(extra_elem, &pool);
13380f199eb4SAndrey Konovalov mempool_exit(&pool);
13390f199eb4SAndrey Konovalov }
13400f199eb4SAndrey Konovalov
mempool_kmalloc_large_invalid_free(struct kunit * test)13410f199eb4SAndrey Konovalov static void mempool_kmalloc_large_invalid_free(struct kunit *test)
13420f199eb4SAndrey Konovalov {
13430f199eb4SAndrey Konovalov mempool_t pool;
13440f199eb4SAndrey Konovalov size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
13450f199eb4SAndrey Konovalov char *extra_elem;
13460f199eb4SAndrey Konovalov
13470f199eb4SAndrey Konovalov extra_elem = mempool_prepare_kmalloc(test, &pool, size);
13480f199eb4SAndrey Konovalov
13490f199eb4SAndrey Konovalov mempool_kmalloc_invalid_free_helper(test, &pool);
13500f199eb4SAndrey Konovalov
13510f199eb4SAndrey Konovalov mempool_free(extra_elem, &pool);
13520f199eb4SAndrey Konovalov mempool_exit(&pool);
13530f199eb4SAndrey Konovalov }
13540f199eb4SAndrey Konovalov
13550f199eb4SAndrey Konovalov /*
13560f199eb4SAndrey Konovalov * Skip the invalid-free test for page mempool. The invalid-free detection only
13570f199eb4SAndrey Konovalov * works for compound pages and mempool preallocates all page elements without
13580f199eb4SAndrey Konovalov * the __GFP_COMP flag.
13590f199eb4SAndrey Konovalov */
13600f199eb4SAndrey Konovalov
1361f7e01ab8SAndrey Konovalov static char global_array[10];
1362f7e01ab8SAndrey Konovalov
kasan_global_oob_right(struct kunit * test)1363f7e01ab8SAndrey Konovalov static void kasan_global_oob_right(struct kunit *test)
1364f7e01ab8SAndrey Konovalov {
1365f7e01ab8SAndrey Konovalov /*
1366f7e01ab8SAndrey Konovalov * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
1367f7e01ab8SAndrey Konovalov * from failing here and panicking the kernel, access the array via a
1368f7e01ab8SAndrey Konovalov * volatile pointer, which will prevent the compiler from being able to
1369f7e01ab8SAndrey Konovalov * determine the array bounds.
1370f7e01ab8SAndrey Konovalov *
1371f7e01ab8SAndrey Konovalov * This access uses a volatile pointer to char (char *volatile) rather
1372f7e01ab8SAndrey Konovalov * than the more conventional pointer to volatile char (volatile char *)
1373f7e01ab8SAndrey Konovalov * because we want to prevent the compiler from making inferences about
1374f7e01ab8SAndrey Konovalov * the pointer itself (i.e. its array bounds), not the data that it
1375f7e01ab8SAndrey Konovalov * refers to.
1376f7e01ab8SAndrey Konovalov */
1377f7e01ab8SAndrey Konovalov char *volatile array = global_array;
1378f7e01ab8SAndrey Konovalov char *p = &array[ARRAY_SIZE(global_array) + 3];
1379f7e01ab8SAndrey Konovalov
1380f7e01ab8SAndrey Konovalov /* Only generic mode instruments globals. */
1381f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1382f7e01ab8SAndrey Konovalov
1383f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1384f7e01ab8SAndrey Konovalov }
1385f7e01ab8SAndrey Konovalov
kasan_global_oob_left(struct kunit * test)1386f7e01ab8SAndrey Konovalov static void kasan_global_oob_left(struct kunit *test)
1387f7e01ab8SAndrey Konovalov {
1388f7e01ab8SAndrey Konovalov char *volatile array = global_array;
1389f7e01ab8SAndrey Konovalov char *p = array - 3;
1390f7e01ab8SAndrey Konovalov
1391f7e01ab8SAndrey Konovalov /*
1392f7e01ab8SAndrey Konovalov * GCC is known to fail this test, skip it.
1393f7e01ab8SAndrey Konovalov * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
1394f7e01ab8SAndrey Konovalov */
1395f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
1396f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1397f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1398f7e01ab8SAndrey Konovalov }
1399f7e01ab8SAndrey Konovalov
kasan_stack_oob(struct kunit * test)1400f7e01ab8SAndrey Konovalov static void kasan_stack_oob(struct kunit *test)
1401f7e01ab8SAndrey Konovalov {
1402f7e01ab8SAndrey Konovalov char stack_array[10];
1403f7e01ab8SAndrey Konovalov /* See comment in kasan_global_oob_right. */
1404f7e01ab8SAndrey Konovalov char *volatile array = stack_array;
1405f7e01ab8SAndrey Konovalov char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
1406f7e01ab8SAndrey Konovalov
1407f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1408f7e01ab8SAndrey Konovalov
1409f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1410f7e01ab8SAndrey Konovalov }
1411f7e01ab8SAndrey Konovalov
kasan_alloca_oob_left(struct kunit * test)1412f7e01ab8SAndrey Konovalov static void kasan_alloca_oob_left(struct kunit *test)
1413f7e01ab8SAndrey Konovalov {
1414f7e01ab8SAndrey Konovalov volatile int i = 10;
1415f7e01ab8SAndrey Konovalov char alloca_array[i];
1416f7e01ab8SAndrey Konovalov /* See comment in kasan_global_oob_right. */
1417f7e01ab8SAndrey Konovalov char *volatile array = alloca_array;
1418f7e01ab8SAndrey Konovalov char *p = array - 1;
1419f7e01ab8SAndrey Konovalov
1420f7e01ab8SAndrey Konovalov /* Only generic mode instruments dynamic allocas. */
1421f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1422f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1423f7e01ab8SAndrey Konovalov
1424f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1425f7e01ab8SAndrey Konovalov }
1426f7e01ab8SAndrey Konovalov
kasan_alloca_oob_right(struct kunit * test)1427f7e01ab8SAndrey Konovalov static void kasan_alloca_oob_right(struct kunit *test)
1428f7e01ab8SAndrey Konovalov {
1429f7e01ab8SAndrey Konovalov volatile int i = 10;
1430f7e01ab8SAndrey Konovalov char alloca_array[i];
1431f7e01ab8SAndrey Konovalov /* See comment in kasan_global_oob_right. */
1432f7e01ab8SAndrey Konovalov char *volatile array = alloca_array;
1433f7e01ab8SAndrey Konovalov char *p = array + i;
1434f7e01ab8SAndrey Konovalov
1435f7e01ab8SAndrey Konovalov /* Only generic mode instruments dynamic allocas. */
1436f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1437f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1438f7e01ab8SAndrey Konovalov
1439f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1440f7e01ab8SAndrey Konovalov }
1441f7e01ab8SAndrey Konovalov
kasan_memchr(struct kunit * test)1442f7e01ab8SAndrey Konovalov static void kasan_memchr(struct kunit *test)
1443f7e01ab8SAndrey Konovalov {
1444f7e01ab8SAndrey Konovalov char *ptr;
1445f7e01ab8SAndrey Konovalov size_t size = 24;
1446f7e01ab8SAndrey Konovalov
1447f7e01ab8SAndrey Konovalov /*
1448f7e01ab8SAndrey Konovalov * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1449f7e01ab8SAndrey Konovalov * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1450f7e01ab8SAndrey Konovalov */
1451f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1452f7e01ab8SAndrey Konovalov
1453f7e01ab8SAndrey Konovalov if (OOB_TAG_OFF)
1454f7e01ab8SAndrey Konovalov size = round_up(size, OOB_TAG_OFF);
1455f7e01ab8SAndrey Konovalov
1456f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1457f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1458f7e01ab8SAndrey Konovalov
1459f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
1460f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(size);
1461f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test,
1462f7e01ab8SAndrey Konovalov kasan_ptr_result = memchr(ptr, '1', size + 1));
1463f7e01ab8SAndrey Konovalov
1464f7e01ab8SAndrey Konovalov kfree(ptr);
1465f7e01ab8SAndrey Konovalov }
1466f7e01ab8SAndrey Konovalov
kasan_memcmp(struct kunit * test)1467f7e01ab8SAndrey Konovalov static void kasan_memcmp(struct kunit *test)
1468f7e01ab8SAndrey Konovalov {
1469f7e01ab8SAndrey Konovalov char *ptr;
1470f7e01ab8SAndrey Konovalov size_t size = 24;
1471f7e01ab8SAndrey Konovalov int arr[9];
1472f7e01ab8SAndrey Konovalov
1473f7e01ab8SAndrey Konovalov /*
1474f7e01ab8SAndrey Konovalov * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1475f7e01ab8SAndrey Konovalov * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1476f7e01ab8SAndrey Konovalov */
1477f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1478f7e01ab8SAndrey Konovalov
1479f7e01ab8SAndrey Konovalov if (OOB_TAG_OFF)
1480f7e01ab8SAndrey Konovalov size = round_up(size, OOB_TAG_OFF);
1481f7e01ab8SAndrey Konovalov
1482f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1483f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1484f7e01ab8SAndrey Konovalov memset(arr, 0, sizeof(arr));
1485f7e01ab8SAndrey Konovalov
1486f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(ptr);
1487f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(size);
1488f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test,
1489f7e01ab8SAndrey Konovalov kasan_int_result = memcmp(ptr, arr, size+1));
1490f7e01ab8SAndrey Konovalov kfree(ptr);
1491f7e01ab8SAndrey Konovalov }
1492f7e01ab8SAndrey Konovalov
kasan_strings(struct kunit * test)1493f7e01ab8SAndrey Konovalov static void kasan_strings(struct kunit *test)
1494f7e01ab8SAndrey Konovalov {
1495f7e01ab8SAndrey Konovalov char *ptr;
1496f7e01ab8SAndrey Konovalov size_t size = 24;
1497f7e01ab8SAndrey Konovalov
1498f7e01ab8SAndrey Konovalov /*
1499f7e01ab8SAndrey Konovalov * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1500f7e01ab8SAndrey Konovalov * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1501f7e01ab8SAndrey Konovalov */
1502f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1503f7e01ab8SAndrey Konovalov
1504f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1505f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1506f7e01ab8SAndrey Konovalov
1507f7e01ab8SAndrey Konovalov kfree(ptr);
1508f7e01ab8SAndrey Konovalov
1509f7e01ab8SAndrey Konovalov /*
1510f7e01ab8SAndrey Konovalov * Try to cause only 1 invalid access (less spam in dmesg).
1511f7e01ab8SAndrey Konovalov * For that we need ptr to point to zeroed byte.
1512f7e01ab8SAndrey Konovalov * Skip metadata that could be stored in freed object so ptr
1513f7e01ab8SAndrey Konovalov * will likely point to zeroed byte.
1514f7e01ab8SAndrey Konovalov */
1515f7e01ab8SAndrey Konovalov ptr += 16;
1516f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
1517f7e01ab8SAndrey Konovalov
1518f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
1519f7e01ab8SAndrey Konovalov
1520f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
1521f7e01ab8SAndrey Konovalov
1522f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
1523f7e01ab8SAndrey Konovalov
1524f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
1525f7e01ab8SAndrey Konovalov
1526f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
1527f7e01ab8SAndrey Konovalov }
1528f7e01ab8SAndrey Konovalov
kasan_bitops_modify(struct kunit * test,int nr,void * addr)1529f7e01ab8SAndrey Konovalov static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1530f7e01ab8SAndrey Konovalov {
1531f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1532f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1533f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1534f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1535f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1536f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1537f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1538f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1539f7e01ab8SAndrey Konovalov }
1540f7e01ab8SAndrey Konovalov
kasan_bitops_test_and_modify(struct kunit * test,int nr,void * addr)1541f7e01ab8SAndrey Konovalov static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1542f7e01ab8SAndrey Konovalov {
1543f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1544f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1545f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1546f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1547f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1548f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1549f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1550f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1551247dbcdbSMatthew Wilcox (Oracle) if (nr < 7)
1552f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1553247dbcdbSMatthew Wilcox (Oracle) xor_unlock_is_negative_byte(1 << nr, addr));
1554f7e01ab8SAndrey Konovalov }
1555f7e01ab8SAndrey Konovalov
kasan_bitops_generic(struct kunit * test)1556f7e01ab8SAndrey Konovalov static void kasan_bitops_generic(struct kunit *test)
1557f7e01ab8SAndrey Konovalov {
1558f7e01ab8SAndrey Konovalov long *bits;
1559f7e01ab8SAndrey Konovalov
1560f7e01ab8SAndrey Konovalov /* This test is specifically crafted for the generic mode. */
1561f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1562f7e01ab8SAndrey Konovalov
1563f7e01ab8SAndrey Konovalov /*
1564f7e01ab8SAndrey Konovalov * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1565f7e01ab8SAndrey Konovalov * this way we do not actually corrupt other memory.
1566f7e01ab8SAndrey Konovalov */
1567f7e01ab8SAndrey Konovalov bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1568f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1569f7e01ab8SAndrey Konovalov
1570f7e01ab8SAndrey Konovalov /*
1571f7e01ab8SAndrey Konovalov * Below calls try to access bit within allocated memory; however, the
1572f7e01ab8SAndrey Konovalov * below accesses are still out-of-bounds, since bitops are defined to
1573f7e01ab8SAndrey Konovalov * operate on the whole long the bit is in.
1574f7e01ab8SAndrey Konovalov */
1575f7e01ab8SAndrey Konovalov kasan_bitops_modify(test, BITS_PER_LONG, bits);
1576f7e01ab8SAndrey Konovalov
1577f7e01ab8SAndrey Konovalov /*
1578f7e01ab8SAndrey Konovalov * Below calls try to access bit beyond allocated memory.
1579f7e01ab8SAndrey Konovalov */
1580f7e01ab8SAndrey Konovalov kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1581f7e01ab8SAndrey Konovalov
1582f7e01ab8SAndrey Konovalov kfree(bits);
1583f7e01ab8SAndrey Konovalov }
1584f7e01ab8SAndrey Konovalov
kasan_bitops_tags(struct kunit * test)1585f7e01ab8SAndrey Konovalov static void kasan_bitops_tags(struct kunit *test)
1586f7e01ab8SAndrey Konovalov {
1587f7e01ab8SAndrey Konovalov long *bits;
1588f7e01ab8SAndrey Konovalov
1589f7e01ab8SAndrey Konovalov /* This test is specifically crafted for tag-based modes. */
1590f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1591f7e01ab8SAndrey Konovalov
1592f7e01ab8SAndrey Konovalov /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1593f7e01ab8SAndrey Konovalov bits = kzalloc(48, GFP_KERNEL);
1594f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1595f7e01ab8SAndrey Konovalov
1596f7e01ab8SAndrey Konovalov /* Do the accesses past the 48 allocated bytes, but within the redone. */
1597f7e01ab8SAndrey Konovalov kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1598f7e01ab8SAndrey Konovalov kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1599f7e01ab8SAndrey Konovalov
1600f7e01ab8SAndrey Konovalov kfree(bits);
1601f7e01ab8SAndrey Konovalov }
1602f7e01ab8SAndrey Konovalov
vmalloc_helpers_tags(struct kunit * test)1603f7e01ab8SAndrey Konovalov static void vmalloc_helpers_tags(struct kunit *test)
1604f7e01ab8SAndrey Konovalov {
1605f7e01ab8SAndrey Konovalov void *ptr;
1606f7e01ab8SAndrey Konovalov
1607f7e01ab8SAndrey Konovalov /* This test is intended for tag-based modes. */
1608f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1609f7e01ab8SAndrey Konovalov
1610f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1611f7e01ab8SAndrey Konovalov
161214c99b99SAndrey Konovalov if (!kasan_vmalloc_enabled())
161314c99b99SAndrey Konovalov kunit_skip(test, "Test requires kasan.vmalloc=on");
161414c99b99SAndrey Konovalov
1615f7e01ab8SAndrey Konovalov ptr = vmalloc(PAGE_SIZE);
1616f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1617f7e01ab8SAndrey Konovalov
1618f7e01ab8SAndrey Konovalov /* Check that the returned pointer is tagged. */
1619f7e01ab8SAndrey Konovalov KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1620f7e01ab8SAndrey Konovalov KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1621f7e01ab8SAndrey Konovalov
1622f7e01ab8SAndrey Konovalov /* Make sure exported vmalloc helpers handle tagged pointers. */
1623f7e01ab8SAndrey Konovalov KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1624f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1625f7e01ab8SAndrey Konovalov
1626f7e01ab8SAndrey Konovalov #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1627f7e01ab8SAndrey Konovalov {
1628f7e01ab8SAndrey Konovalov int rv;
1629f7e01ab8SAndrey Konovalov
1630f7e01ab8SAndrey Konovalov /* Make sure vmalloc'ed memory permissions can be changed. */
1631f7e01ab8SAndrey Konovalov rv = set_memory_ro((unsigned long)ptr, 1);
1632f7e01ab8SAndrey Konovalov KUNIT_ASSERT_GE(test, rv, 0);
1633f7e01ab8SAndrey Konovalov rv = set_memory_rw((unsigned long)ptr, 1);
1634f7e01ab8SAndrey Konovalov KUNIT_ASSERT_GE(test, rv, 0);
1635f7e01ab8SAndrey Konovalov }
1636f7e01ab8SAndrey Konovalov #endif
1637f7e01ab8SAndrey Konovalov
1638f7e01ab8SAndrey Konovalov vfree(ptr);
1639f7e01ab8SAndrey Konovalov }
1640f7e01ab8SAndrey Konovalov
vmalloc_oob(struct kunit * test)1641f7e01ab8SAndrey Konovalov static void vmalloc_oob(struct kunit *test)
1642f7e01ab8SAndrey Konovalov {
1643f7e01ab8SAndrey Konovalov char *v_ptr, *p_ptr;
1644f7e01ab8SAndrey Konovalov struct page *page;
1645f7e01ab8SAndrey Konovalov size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1646f7e01ab8SAndrey Konovalov
1647f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1648f7e01ab8SAndrey Konovalov
164914c99b99SAndrey Konovalov if (!kasan_vmalloc_enabled())
165014c99b99SAndrey Konovalov kunit_skip(test, "Test requires kasan.vmalloc=on");
165114c99b99SAndrey Konovalov
1652f7e01ab8SAndrey Konovalov v_ptr = vmalloc(size);
1653f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1654f7e01ab8SAndrey Konovalov
1655f7e01ab8SAndrey Konovalov OPTIMIZER_HIDE_VAR(v_ptr);
1656f7e01ab8SAndrey Konovalov
1657f7e01ab8SAndrey Konovalov /*
1658f7e01ab8SAndrey Konovalov * We have to be careful not to hit the guard page in vmalloc tests.
1659f7e01ab8SAndrey Konovalov * The MMU will catch that and crash us.
1660f7e01ab8SAndrey Konovalov */
1661f7e01ab8SAndrey Konovalov
1662f7e01ab8SAndrey Konovalov /* Make sure in-bounds accesses are valid. */
1663f7e01ab8SAndrey Konovalov v_ptr[0] = 0;
1664f7e01ab8SAndrey Konovalov v_ptr[size - 1] = 0;
1665f7e01ab8SAndrey Konovalov
1666f7e01ab8SAndrey Konovalov /*
1667f7e01ab8SAndrey Konovalov * An unaligned access past the requested vmalloc size.
1668f7e01ab8SAndrey Konovalov * Only generic KASAN can precisely detect these.
1669f7e01ab8SAndrey Konovalov */
1670f7e01ab8SAndrey Konovalov if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1671f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1672f7e01ab8SAndrey Konovalov
1673f7e01ab8SAndrey Konovalov /* An aligned access into the first out-of-bounds granule. */
1674f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1675f7e01ab8SAndrey Konovalov
1676f7e01ab8SAndrey Konovalov /* Check that in-bounds accesses to the physical page are valid. */
1677f7e01ab8SAndrey Konovalov page = vmalloc_to_page(v_ptr);
1678f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1679f7e01ab8SAndrey Konovalov p_ptr = page_address(page);
1680f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1681f7e01ab8SAndrey Konovalov p_ptr[0] = 0;
1682f7e01ab8SAndrey Konovalov
1683f7e01ab8SAndrey Konovalov vfree(v_ptr);
1684f7e01ab8SAndrey Konovalov
1685f7e01ab8SAndrey Konovalov /*
1686f7e01ab8SAndrey Konovalov * We can't check for use-after-unmap bugs in this nor in the following
1687f7e01ab8SAndrey Konovalov * vmalloc tests, as the page might be fully unmapped and accessing it
1688f7e01ab8SAndrey Konovalov * will crash the kernel.
1689f7e01ab8SAndrey Konovalov */
1690f7e01ab8SAndrey Konovalov }
1691f7e01ab8SAndrey Konovalov
vmap_tags(struct kunit * test)1692f7e01ab8SAndrey Konovalov static void vmap_tags(struct kunit *test)
1693f7e01ab8SAndrey Konovalov {
1694f7e01ab8SAndrey Konovalov char *p_ptr, *v_ptr;
1695f7e01ab8SAndrey Konovalov struct page *p_page, *v_page;
1696f7e01ab8SAndrey Konovalov
1697f7e01ab8SAndrey Konovalov /*
1698f7e01ab8SAndrey Konovalov * This test is specifically crafted for the software tag-based mode,
1699f7e01ab8SAndrey Konovalov * the only tag-based mode that poisons vmap mappings.
1700f7e01ab8SAndrey Konovalov */
1701f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1702f7e01ab8SAndrey Konovalov
1703f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1704f7e01ab8SAndrey Konovalov
170514c99b99SAndrey Konovalov if (!kasan_vmalloc_enabled())
170614c99b99SAndrey Konovalov kunit_skip(test, "Test requires kasan.vmalloc=on");
170714c99b99SAndrey Konovalov
1708f7e01ab8SAndrey Konovalov p_page = alloc_pages(GFP_KERNEL, 1);
1709f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1710f7e01ab8SAndrey Konovalov p_ptr = page_address(p_page);
1711f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1712f7e01ab8SAndrey Konovalov
1713f7e01ab8SAndrey Konovalov v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1714f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1715f7e01ab8SAndrey Konovalov
1716f7e01ab8SAndrey Konovalov /*
1717f7e01ab8SAndrey Konovalov * We can't check for out-of-bounds bugs in this nor in the following
1718f7e01ab8SAndrey Konovalov * vmalloc tests, as allocations have page granularity and accessing
1719f7e01ab8SAndrey Konovalov * the guard page will crash the kernel.
1720f7e01ab8SAndrey Konovalov */
1721f7e01ab8SAndrey Konovalov
1722f7e01ab8SAndrey Konovalov KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1723f7e01ab8SAndrey Konovalov KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1724f7e01ab8SAndrey Konovalov
1725f7e01ab8SAndrey Konovalov /* Make sure that in-bounds accesses through both pointers work. */
1726f7e01ab8SAndrey Konovalov *p_ptr = 0;
1727f7e01ab8SAndrey Konovalov *v_ptr = 0;
1728f7e01ab8SAndrey Konovalov
1729f7e01ab8SAndrey Konovalov /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1730f7e01ab8SAndrey Konovalov v_page = vmalloc_to_page(v_ptr);
1731f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1732f7e01ab8SAndrey Konovalov KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1733f7e01ab8SAndrey Konovalov
1734f7e01ab8SAndrey Konovalov vunmap(v_ptr);
1735f7e01ab8SAndrey Konovalov free_pages((unsigned long)p_ptr, 1);
1736f7e01ab8SAndrey Konovalov }
1737f7e01ab8SAndrey Konovalov
vm_map_ram_tags(struct kunit * test)1738f7e01ab8SAndrey Konovalov static void vm_map_ram_tags(struct kunit *test)
1739f7e01ab8SAndrey Konovalov {
1740f7e01ab8SAndrey Konovalov char *p_ptr, *v_ptr;
1741f7e01ab8SAndrey Konovalov struct page *page;
1742f7e01ab8SAndrey Konovalov
1743f7e01ab8SAndrey Konovalov /*
1744f7e01ab8SAndrey Konovalov * This test is specifically crafted for the software tag-based mode,
1745f7e01ab8SAndrey Konovalov * the only tag-based mode that poisons vm_map_ram mappings.
1746f7e01ab8SAndrey Konovalov */
1747f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1748f7e01ab8SAndrey Konovalov
1749f7e01ab8SAndrey Konovalov page = alloc_pages(GFP_KERNEL, 1);
1750f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1751f7e01ab8SAndrey Konovalov p_ptr = page_address(page);
1752f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1753f7e01ab8SAndrey Konovalov
1754f7e01ab8SAndrey Konovalov v_ptr = vm_map_ram(&page, 1, -1);
1755f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1756f7e01ab8SAndrey Konovalov
1757f7e01ab8SAndrey Konovalov KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1758f7e01ab8SAndrey Konovalov KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1759f7e01ab8SAndrey Konovalov
1760f7e01ab8SAndrey Konovalov /* Make sure that in-bounds accesses through both pointers work. */
1761f7e01ab8SAndrey Konovalov *p_ptr = 0;
1762f7e01ab8SAndrey Konovalov *v_ptr = 0;
1763f7e01ab8SAndrey Konovalov
1764f7e01ab8SAndrey Konovalov vm_unmap_ram(v_ptr, 1);
1765f7e01ab8SAndrey Konovalov free_pages((unsigned long)p_ptr, 1);
1766f7e01ab8SAndrey Konovalov }
1767f7e01ab8SAndrey Konovalov
vmalloc_percpu(struct kunit * test)1768f7e01ab8SAndrey Konovalov static void vmalloc_percpu(struct kunit *test)
1769f7e01ab8SAndrey Konovalov {
1770f7e01ab8SAndrey Konovalov char __percpu *ptr;
1771f7e01ab8SAndrey Konovalov int cpu;
1772f7e01ab8SAndrey Konovalov
1773f7e01ab8SAndrey Konovalov /*
1774f7e01ab8SAndrey Konovalov * This test is specifically crafted for the software tag-based mode,
1775f7e01ab8SAndrey Konovalov * the only tag-based mode that poisons percpu mappings.
1776f7e01ab8SAndrey Konovalov */
1777f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1778f7e01ab8SAndrey Konovalov
1779f7e01ab8SAndrey Konovalov ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
1780f7e01ab8SAndrey Konovalov
1781f7e01ab8SAndrey Konovalov for_each_possible_cpu(cpu) {
1782f7e01ab8SAndrey Konovalov char *c_ptr = per_cpu_ptr(ptr, cpu);
1783f7e01ab8SAndrey Konovalov
1784f7e01ab8SAndrey Konovalov KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
1785f7e01ab8SAndrey Konovalov KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
1786f7e01ab8SAndrey Konovalov
1787f7e01ab8SAndrey Konovalov /* Make sure that in-bounds accesses don't crash the kernel. */
1788f7e01ab8SAndrey Konovalov *c_ptr = 0;
1789f7e01ab8SAndrey Konovalov }
1790f7e01ab8SAndrey Konovalov
1791f7e01ab8SAndrey Konovalov free_percpu(ptr);
1792f7e01ab8SAndrey Konovalov }
1793f7e01ab8SAndrey Konovalov
1794f7e01ab8SAndrey Konovalov /*
1795f7e01ab8SAndrey Konovalov * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1796f7e01ab8SAndrey Konovalov * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1797f7e01ab8SAndrey Konovalov * modes.
1798f7e01ab8SAndrey Konovalov */
match_all_not_assigned(struct kunit * test)1799f7e01ab8SAndrey Konovalov static void match_all_not_assigned(struct kunit *test)
1800f7e01ab8SAndrey Konovalov {
1801f7e01ab8SAndrey Konovalov char *ptr;
1802f7e01ab8SAndrey Konovalov struct page *pages;
1803f7e01ab8SAndrey Konovalov int i, size, order;
1804f7e01ab8SAndrey Konovalov
1805f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1806f7e01ab8SAndrey Konovalov
1807f7e01ab8SAndrey Konovalov for (i = 0; i < 256; i++) {
1808e8a533cbSJason A. Donenfeld size = get_random_u32_inclusive(1, 1024);
1809f7e01ab8SAndrey Konovalov ptr = kmalloc(size, GFP_KERNEL);
1810f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1811f7e01ab8SAndrey Konovalov KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1812f7e01ab8SAndrey Konovalov KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1813f7e01ab8SAndrey Konovalov kfree(ptr);
1814f7e01ab8SAndrey Konovalov }
1815f7e01ab8SAndrey Konovalov
1816f7e01ab8SAndrey Konovalov for (i = 0; i < 256; i++) {
1817e8a533cbSJason A. Donenfeld order = get_random_u32_inclusive(1, 4);
1818f7e01ab8SAndrey Konovalov pages = alloc_pages(GFP_KERNEL, order);
1819f7e01ab8SAndrey Konovalov ptr = page_address(pages);
1820f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1821f7e01ab8SAndrey Konovalov KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1822f7e01ab8SAndrey Konovalov KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1823f7e01ab8SAndrey Konovalov free_pages((unsigned long)ptr, order);
1824f7e01ab8SAndrey Konovalov }
1825f7e01ab8SAndrey Konovalov
182614c99b99SAndrey Konovalov if (!kasan_vmalloc_enabled())
1827f7e01ab8SAndrey Konovalov return;
1828f7e01ab8SAndrey Konovalov
1829f7e01ab8SAndrey Konovalov for (i = 0; i < 256; i++) {
1830e8a533cbSJason A. Donenfeld size = get_random_u32_inclusive(1, 1024);
1831f7e01ab8SAndrey Konovalov ptr = vmalloc(size);
1832f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1833f7e01ab8SAndrey Konovalov KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1834f7e01ab8SAndrey Konovalov KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1835f7e01ab8SAndrey Konovalov vfree(ptr);
1836f7e01ab8SAndrey Konovalov }
1837f7e01ab8SAndrey Konovalov }
1838f7e01ab8SAndrey Konovalov
1839f7e01ab8SAndrey Konovalov /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
match_all_ptr_tag(struct kunit * test)1840f7e01ab8SAndrey Konovalov static void match_all_ptr_tag(struct kunit *test)
1841f7e01ab8SAndrey Konovalov {
1842f7e01ab8SAndrey Konovalov char *ptr;
1843f7e01ab8SAndrey Konovalov u8 tag;
1844f7e01ab8SAndrey Konovalov
1845f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1846f7e01ab8SAndrey Konovalov
1847f7e01ab8SAndrey Konovalov ptr = kmalloc(128, GFP_KERNEL);
1848f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1849f7e01ab8SAndrey Konovalov
1850f7e01ab8SAndrey Konovalov /* Backup the assigned tag. */
1851f7e01ab8SAndrey Konovalov tag = get_tag(ptr);
1852f7e01ab8SAndrey Konovalov KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1853f7e01ab8SAndrey Konovalov
1854f7e01ab8SAndrey Konovalov /* Reset the tag to 0xff.*/
1855f7e01ab8SAndrey Konovalov ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1856f7e01ab8SAndrey Konovalov
1857f7e01ab8SAndrey Konovalov /* This access shouldn't trigger a KASAN report. */
1858f7e01ab8SAndrey Konovalov *ptr = 0;
1859f7e01ab8SAndrey Konovalov
1860f7e01ab8SAndrey Konovalov /* Recover the pointer tag and free. */
1861f7e01ab8SAndrey Konovalov ptr = set_tag(ptr, tag);
1862f7e01ab8SAndrey Konovalov kfree(ptr);
1863f7e01ab8SAndrey Konovalov }
1864f7e01ab8SAndrey Konovalov
1865f7e01ab8SAndrey Konovalov /* Check that there are no match-all memory tags for tag-based modes. */
match_all_mem_tag(struct kunit * test)1866f7e01ab8SAndrey Konovalov static void match_all_mem_tag(struct kunit *test)
1867f7e01ab8SAndrey Konovalov {
1868f7e01ab8SAndrey Konovalov char *ptr;
1869f7e01ab8SAndrey Konovalov int tag;
1870f7e01ab8SAndrey Konovalov
1871f7e01ab8SAndrey Konovalov KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1872f7e01ab8SAndrey Konovalov
1873f7e01ab8SAndrey Konovalov ptr = kmalloc(128, GFP_KERNEL);
1874f7e01ab8SAndrey Konovalov KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1875f7e01ab8SAndrey Konovalov KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1876f7e01ab8SAndrey Konovalov
1877f7e01ab8SAndrey Konovalov /* For each possible tag value not matching the pointer tag. */
1878f7e01ab8SAndrey Konovalov for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
18794e397274SAndrey Konovalov /*
18804e397274SAndrey Konovalov * For Software Tag-Based KASAN, skip the majority of tag
18814e397274SAndrey Konovalov * values to avoid the test printing too many reports.
18824e397274SAndrey Konovalov */
18834e397274SAndrey Konovalov if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
18844e397274SAndrey Konovalov tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
18854e397274SAndrey Konovalov continue;
18864e397274SAndrey Konovalov
1887f7e01ab8SAndrey Konovalov if (tag == get_tag(ptr))
1888f7e01ab8SAndrey Konovalov continue;
1889f7e01ab8SAndrey Konovalov
1890f7e01ab8SAndrey Konovalov /* Mark the first memory granule with the chosen memory tag. */
1891f7e01ab8SAndrey Konovalov kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1892f7e01ab8SAndrey Konovalov
1893f7e01ab8SAndrey Konovalov /* This access must cause a KASAN report. */
1894f7e01ab8SAndrey Konovalov KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1895f7e01ab8SAndrey Konovalov }
1896f7e01ab8SAndrey Konovalov
1897f7e01ab8SAndrey Konovalov /* Recover the memory tag and free. */
1898f7e01ab8SAndrey Konovalov kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1899f7e01ab8SAndrey Konovalov kfree(ptr);
1900f7e01ab8SAndrey Konovalov }
1901f7e01ab8SAndrey Konovalov
1902f7e01ab8SAndrey Konovalov static struct kunit_case kasan_kunit_test_cases[] = {
1903f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_oob_right),
1904f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_oob_left),
1905f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_node_oob_right),
190686b15969SAndrey Konovalov KUNIT_CASE(kmalloc_big_oob_right),
1907f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_large_oob_right),
19080f18ea6eSAndrey Konovalov KUNIT_CASE(kmalloc_large_uaf),
19090f18ea6eSAndrey Konovalov KUNIT_CASE(kmalloc_large_invalid_free),
19100f18ea6eSAndrey Konovalov KUNIT_CASE(page_alloc_oob_right),
19110f18ea6eSAndrey Konovalov KUNIT_CASE(page_alloc_uaf),
1912f7e01ab8SAndrey Konovalov KUNIT_CASE(krealloc_more_oob),
1913f7e01ab8SAndrey Konovalov KUNIT_CASE(krealloc_less_oob),
19140f18ea6eSAndrey Konovalov KUNIT_CASE(krealloc_large_more_oob),
19150f18ea6eSAndrey Konovalov KUNIT_CASE(krealloc_large_less_oob),
1916f7e01ab8SAndrey Konovalov KUNIT_CASE(krealloc_uaf),
1917f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_oob_16),
1918f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_uaf_16),
1919f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_oob_in_memset),
1920f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_oob_memset_2),
1921f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_oob_memset_4),
1922f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_oob_memset_8),
1923f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_oob_memset_16),
1924f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_memmove_negative_size),
1925f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_memmove_invalid_size),
1926f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_uaf),
1927f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_uaf_memset),
1928f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_uaf2),
1929f7e01ab8SAndrey Konovalov KUNIT_CASE(kmalloc_uaf3),
193086b15969SAndrey Konovalov KUNIT_CASE(kmalloc_double_kzfree),
193186b15969SAndrey Konovalov KUNIT_CASE(ksize_unpoisons_memory),
193286b15969SAndrey Konovalov KUNIT_CASE(ksize_uaf),
193386b15969SAndrey Konovalov KUNIT_CASE(rcu_uaf),
193486b15969SAndrey Konovalov KUNIT_CASE(workqueue_uaf),
1935f7e01ab8SAndrey Konovalov KUNIT_CASE(kfree_via_page),
1936f7e01ab8SAndrey Konovalov KUNIT_CASE(kfree_via_phys),
1937f7e01ab8SAndrey Konovalov KUNIT_CASE(kmem_cache_oob),
193886b15969SAndrey Konovalov KUNIT_CASE(kmem_cache_double_free),
193986b15969SAndrey Konovalov KUNIT_CASE(kmem_cache_invalid_free),
194086b15969SAndrey Konovalov KUNIT_CASE(kmem_cache_double_destroy),
1941f7e01ab8SAndrey Konovalov KUNIT_CASE(kmem_cache_accounted),
1942f7e01ab8SAndrey Konovalov KUNIT_CASE(kmem_cache_bulk),
19430f199eb4SAndrey Konovalov KUNIT_CASE(mempool_kmalloc_oob_right),
19440f199eb4SAndrey Konovalov KUNIT_CASE(mempool_kmalloc_large_oob_right),
19450f199eb4SAndrey Konovalov KUNIT_CASE(mempool_slab_oob_right),
19460f199eb4SAndrey Konovalov KUNIT_CASE(mempool_kmalloc_uaf),
19470f199eb4SAndrey Konovalov KUNIT_CASE(mempool_kmalloc_large_uaf),
19480f199eb4SAndrey Konovalov KUNIT_CASE(mempool_slab_uaf),
19490f199eb4SAndrey Konovalov KUNIT_CASE(mempool_page_alloc_uaf),
19500f199eb4SAndrey Konovalov KUNIT_CASE(mempool_kmalloc_double_free),
19510f199eb4SAndrey Konovalov KUNIT_CASE(mempool_kmalloc_large_double_free),
19520f199eb4SAndrey Konovalov KUNIT_CASE(mempool_page_alloc_double_free),
19530f199eb4SAndrey Konovalov KUNIT_CASE(mempool_kmalloc_invalid_free),
19540f199eb4SAndrey Konovalov KUNIT_CASE(mempool_kmalloc_large_invalid_free),
1955f7e01ab8SAndrey Konovalov KUNIT_CASE(kasan_global_oob_right),
1956f7e01ab8SAndrey Konovalov KUNIT_CASE(kasan_global_oob_left),
1957f7e01ab8SAndrey Konovalov KUNIT_CASE(kasan_stack_oob),
1958f7e01ab8SAndrey Konovalov KUNIT_CASE(kasan_alloca_oob_left),
1959f7e01ab8SAndrey Konovalov KUNIT_CASE(kasan_alloca_oob_right),
1960f7e01ab8SAndrey Konovalov KUNIT_CASE(kasan_memchr),
1961f7e01ab8SAndrey Konovalov KUNIT_CASE(kasan_memcmp),
1962f7e01ab8SAndrey Konovalov KUNIT_CASE(kasan_strings),
1963f7e01ab8SAndrey Konovalov KUNIT_CASE(kasan_bitops_generic),
1964f7e01ab8SAndrey Konovalov KUNIT_CASE(kasan_bitops_tags),
19654e76c8ccSPaul Heidekrüger KUNIT_CASE(kasan_atomics),
1966f7e01ab8SAndrey Konovalov KUNIT_CASE(vmalloc_helpers_tags),
1967f7e01ab8SAndrey Konovalov KUNIT_CASE(vmalloc_oob),
1968f7e01ab8SAndrey Konovalov KUNIT_CASE(vmap_tags),
1969f7e01ab8SAndrey Konovalov KUNIT_CASE(vm_map_ram_tags),
1970f7e01ab8SAndrey Konovalov KUNIT_CASE(vmalloc_percpu),
1971f7e01ab8SAndrey Konovalov KUNIT_CASE(match_all_not_assigned),
1972f7e01ab8SAndrey Konovalov KUNIT_CASE(match_all_ptr_tag),
1973f7e01ab8SAndrey Konovalov KUNIT_CASE(match_all_mem_tag),
1974f7e01ab8SAndrey Konovalov {}
1975f7e01ab8SAndrey Konovalov };
1976f7e01ab8SAndrey Konovalov
1977f7e01ab8SAndrey Konovalov static struct kunit_suite kasan_kunit_test_suite = {
1978f7e01ab8SAndrey Konovalov .name = "kasan",
1979f7e01ab8SAndrey Konovalov .test_cases = kasan_kunit_test_cases,
1980f7e01ab8SAndrey Konovalov .exit = kasan_test_exit,
19817ce0ea19SAndrey Konovalov .suite_init = kasan_suite_init,
19827ce0ea19SAndrey Konovalov .suite_exit = kasan_suite_exit,
1983f7e01ab8SAndrey Konovalov };
1984f7e01ab8SAndrey Konovalov
1985f7e01ab8SAndrey Konovalov kunit_test_suite(kasan_kunit_test_suite);
1986f7e01ab8SAndrey Konovalov
1987f7e01ab8SAndrey Konovalov MODULE_LICENSE("GPL");
1988