1db6fe4d6SKees Cook // SPDX-License-Identifier: GPL-2.0
2db6fe4d6SKees Cook #include <kunit/test.h>
3db6fe4d6SKees Cook #include <kunit/test-bug.h>
4db6fe4d6SKees Cook #include <linux/mm.h>
5db6fe4d6SKees Cook #include <linux/slab.h>
6db6fe4d6SKees Cook #include <linux/module.h>
7db6fe4d6SKees Cook #include <linux/kernel.h>
8db6fe4d6SKees Cook #include <linux/rcupdate.h>
905b00ffdSLinus Torvalds #include <linux/delay.h>
10db6fe4d6SKees Cook #include "../mm/slab.h"
11db6fe4d6SKees Cook
12db6fe4d6SKees Cook static struct kunit_resource resource;
13db6fe4d6SKees Cook static int slab_errors;
14db6fe4d6SKees Cook
15db6fe4d6SKees Cook /*
16db6fe4d6SKees Cook * Wrapper function for kmem_cache_create(), which reduces 2 parameters:
17db6fe4d6SKees Cook * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
18db6fe4d6SKees Cook * object from kfence pool, where the operation could be caught by both
19db6fe4d6SKees Cook * our test and kfence sanity check.
20db6fe4d6SKees Cook */
test_kmem_cache_create(const char * name,unsigned int size,slab_flags_t flags)21db6fe4d6SKees Cook static struct kmem_cache *test_kmem_cache_create(const char *name,
22db6fe4d6SKees Cook unsigned int size, slab_flags_t flags)
23db6fe4d6SKees Cook {
24db6fe4d6SKees Cook struct kmem_cache *s = kmem_cache_create(name, size, 0,
25db6fe4d6SKees Cook (flags | SLAB_NO_USER_FLAGS), NULL);
26db6fe4d6SKees Cook s->flags |= SLAB_SKIP_KFENCE;
27db6fe4d6SKees Cook return s;
28db6fe4d6SKees Cook }
29db6fe4d6SKees Cook
test_clobber_zone(struct kunit * test)30db6fe4d6SKees Cook static void test_clobber_zone(struct kunit *test)
31db6fe4d6SKees Cook {
32db6fe4d6SKees Cook struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
33db6fe4d6SKees Cook SLAB_RED_ZONE);
34db6fe4d6SKees Cook u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
35db6fe4d6SKees Cook
36db6fe4d6SKees Cook kasan_disable_current();
37db6fe4d6SKees Cook p[64] = 0x12;
38db6fe4d6SKees Cook
39db6fe4d6SKees Cook validate_slab_cache(s);
40db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, 2, slab_errors);
41db6fe4d6SKees Cook
42db6fe4d6SKees Cook kasan_enable_current();
43db6fe4d6SKees Cook kmem_cache_free(s, p);
44db6fe4d6SKees Cook kmem_cache_destroy(s);
45db6fe4d6SKees Cook }
46db6fe4d6SKees Cook
47db6fe4d6SKees Cook #ifndef CONFIG_KASAN
test_next_pointer(struct kunit * test)48db6fe4d6SKees Cook static void test_next_pointer(struct kunit *test)
49db6fe4d6SKees Cook {
50db6fe4d6SKees Cook struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
51db6fe4d6SKees Cook 64, SLAB_POISON);
52db6fe4d6SKees Cook u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
53db6fe4d6SKees Cook unsigned long tmp;
54db6fe4d6SKees Cook unsigned long *ptr_addr;
55db6fe4d6SKees Cook
56db6fe4d6SKees Cook kmem_cache_free(s, p);
57db6fe4d6SKees Cook
58db6fe4d6SKees Cook ptr_addr = (unsigned long *)(p + s->offset);
59db6fe4d6SKees Cook tmp = *ptr_addr;
60db6fe4d6SKees Cook p[s->offset] = ~p[s->offset];
61db6fe4d6SKees Cook
62db6fe4d6SKees Cook /*
63db6fe4d6SKees Cook * Expecting three errors.
64db6fe4d6SKees Cook * One for the corrupted freechain and the other one for the wrong
65db6fe4d6SKees Cook * count of objects in use. The third error is fixing broken cache.
66db6fe4d6SKees Cook */
67db6fe4d6SKees Cook validate_slab_cache(s);
68db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, 3, slab_errors);
69db6fe4d6SKees Cook
70db6fe4d6SKees Cook /*
71db6fe4d6SKees Cook * Try to repair corrupted freepointer.
72db6fe4d6SKees Cook * Still expecting two errors. The first for the wrong count
73db6fe4d6SKees Cook * of objects in use.
74db6fe4d6SKees Cook * The second error is for fixing broken cache.
75db6fe4d6SKees Cook */
76db6fe4d6SKees Cook *ptr_addr = tmp;
77db6fe4d6SKees Cook slab_errors = 0;
78db6fe4d6SKees Cook
79db6fe4d6SKees Cook validate_slab_cache(s);
80db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, 2, slab_errors);
81db6fe4d6SKees Cook
82db6fe4d6SKees Cook /*
83db6fe4d6SKees Cook * Previous validation repaired the count of objects in use.
84db6fe4d6SKees Cook * Now expecting no error.
85db6fe4d6SKees Cook */
86db6fe4d6SKees Cook slab_errors = 0;
87db6fe4d6SKees Cook validate_slab_cache(s);
88db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, 0, slab_errors);
89db6fe4d6SKees Cook
90db6fe4d6SKees Cook kmem_cache_destroy(s);
91db6fe4d6SKees Cook }
92db6fe4d6SKees Cook
test_first_word(struct kunit * test)93db6fe4d6SKees Cook static void test_first_word(struct kunit *test)
94db6fe4d6SKees Cook {
95db6fe4d6SKees Cook struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
96db6fe4d6SKees Cook 64, SLAB_POISON);
97db6fe4d6SKees Cook u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
98db6fe4d6SKees Cook
99db6fe4d6SKees Cook kmem_cache_free(s, p);
100db6fe4d6SKees Cook *p = 0x78;
101db6fe4d6SKees Cook
102db6fe4d6SKees Cook validate_slab_cache(s);
103db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, 2, slab_errors);
104db6fe4d6SKees Cook
105db6fe4d6SKees Cook kmem_cache_destroy(s);
106db6fe4d6SKees Cook }
107db6fe4d6SKees Cook
test_clobber_50th_byte(struct kunit * test)108db6fe4d6SKees Cook static void test_clobber_50th_byte(struct kunit *test)
109db6fe4d6SKees Cook {
110db6fe4d6SKees Cook struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
111db6fe4d6SKees Cook 64, SLAB_POISON);
112db6fe4d6SKees Cook u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
113db6fe4d6SKees Cook
114db6fe4d6SKees Cook kmem_cache_free(s, p);
115db6fe4d6SKees Cook p[50] = 0x9a;
116db6fe4d6SKees Cook
117db6fe4d6SKees Cook validate_slab_cache(s);
118db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, 2, slab_errors);
119db6fe4d6SKees Cook
120db6fe4d6SKees Cook kmem_cache_destroy(s);
121db6fe4d6SKees Cook }
122db6fe4d6SKees Cook #endif
123db6fe4d6SKees Cook
test_clobber_redzone_free(struct kunit * test)124db6fe4d6SKees Cook static void test_clobber_redzone_free(struct kunit *test)
125db6fe4d6SKees Cook {
126db6fe4d6SKees Cook struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
127db6fe4d6SKees Cook SLAB_RED_ZONE);
128db6fe4d6SKees Cook u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
129db6fe4d6SKees Cook
130db6fe4d6SKees Cook kasan_disable_current();
131db6fe4d6SKees Cook kmem_cache_free(s, p);
132db6fe4d6SKees Cook p[64] = 0xab;
133db6fe4d6SKees Cook
134db6fe4d6SKees Cook validate_slab_cache(s);
135db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, 2, slab_errors);
136db6fe4d6SKees Cook
137db6fe4d6SKees Cook kasan_enable_current();
138db6fe4d6SKees Cook kmem_cache_destroy(s);
139db6fe4d6SKees Cook }
140db6fe4d6SKees Cook
test_kmalloc_redzone_access(struct kunit * test)141db6fe4d6SKees Cook static void test_kmalloc_redzone_access(struct kunit *test)
142db6fe4d6SKees Cook {
143db6fe4d6SKees Cook struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
144db6fe4d6SKees Cook SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
145db6fe4d6SKees Cook u8 *p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 18));
146db6fe4d6SKees Cook
147db6fe4d6SKees Cook kasan_disable_current();
148db6fe4d6SKees Cook
149db6fe4d6SKees Cook /* Suppress the -Warray-bounds warning */
150db6fe4d6SKees Cook OPTIMIZER_HIDE_VAR(p);
151db6fe4d6SKees Cook p[18] = 0xab;
152db6fe4d6SKees Cook p[19] = 0xab;
153db6fe4d6SKees Cook
154db6fe4d6SKees Cook validate_slab_cache(s);
155db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, 2, slab_errors);
156db6fe4d6SKees Cook
157db6fe4d6SKees Cook kasan_enable_current();
158db6fe4d6SKees Cook kmem_cache_free(s, p);
159db6fe4d6SKees Cook kmem_cache_destroy(s);
160db6fe4d6SKees Cook }
161db6fe4d6SKees Cook
162db6fe4d6SKees Cook struct test_kfree_rcu_struct {
163db6fe4d6SKees Cook struct rcu_head rcu;
164db6fe4d6SKees Cook };
165db6fe4d6SKees Cook
test_kfree_rcu(struct kunit * test)166db6fe4d6SKees Cook static void test_kfree_rcu(struct kunit *test)
167db6fe4d6SKees Cook {
168db6fe4d6SKees Cook struct kmem_cache *s;
169db6fe4d6SKees Cook struct test_kfree_rcu_struct *p;
170db6fe4d6SKees Cook
171db6fe4d6SKees Cook if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
172db6fe4d6SKees Cook kunit_skip(test, "can't do kfree_rcu() when test is built-in");
173db6fe4d6SKees Cook
174db6fe4d6SKees Cook s = test_kmem_cache_create("TestSlub_kfree_rcu",
175db6fe4d6SKees Cook sizeof(struct test_kfree_rcu_struct),
176db6fe4d6SKees Cook SLAB_NO_MERGE);
177db6fe4d6SKees Cook p = kmem_cache_alloc(s, GFP_KERNEL);
178db6fe4d6SKees Cook
179db6fe4d6SKees Cook kfree_rcu(p, rcu);
180db6fe4d6SKees Cook kmem_cache_destroy(s);
181db6fe4d6SKees Cook
182db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, 0, slab_errors);
183db6fe4d6SKees Cook }
184db6fe4d6SKees Cook
18505b00ffdSLinus Torvalds struct cache_destroy_work {
18605b00ffdSLinus Torvalds struct work_struct work;
18705b00ffdSLinus Torvalds struct kmem_cache *s;
18805b00ffdSLinus Torvalds };
18905b00ffdSLinus Torvalds
cache_destroy_workfn(struct work_struct * w)19005b00ffdSLinus Torvalds static void cache_destroy_workfn(struct work_struct *w)
19105b00ffdSLinus Torvalds {
19205b00ffdSLinus Torvalds struct cache_destroy_work *cdw;
19305b00ffdSLinus Torvalds
19405b00ffdSLinus Torvalds cdw = container_of(w, struct cache_destroy_work, work);
19505b00ffdSLinus Torvalds kmem_cache_destroy(cdw->s);
19605b00ffdSLinus Torvalds }
19705b00ffdSLinus Torvalds
19805b00ffdSLinus Torvalds #define KMEM_CACHE_DESTROY_NR 10
19905b00ffdSLinus Torvalds
test_kfree_rcu_wq_destroy(struct kunit * test)20005b00ffdSLinus Torvalds static void test_kfree_rcu_wq_destroy(struct kunit *test)
20105b00ffdSLinus Torvalds {
20205b00ffdSLinus Torvalds struct test_kfree_rcu_struct *p;
20305b00ffdSLinus Torvalds struct cache_destroy_work cdw;
20405b00ffdSLinus Torvalds struct workqueue_struct *wq;
20505b00ffdSLinus Torvalds struct kmem_cache *s;
20605b00ffdSLinus Torvalds unsigned int delay;
20705b00ffdSLinus Torvalds int i;
20805b00ffdSLinus Torvalds
20905b00ffdSLinus Torvalds if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
21005b00ffdSLinus Torvalds kunit_skip(test, "can't do kfree_rcu() when test is built-in");
21105b00ffdSLinus Torvalds
21205b00ffdSLinus Torvalds INIT_WORK_ONSTACK(&cdw.work, cache_destroy_workfn);
21305b00ffdSLinus Torvalds wq = alloc_workqueue("test_kfree_rcu_destroy_wq",
21405b00ffdSLinus Torvalds WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
21505b00ffdSLinus Torvalds
21605b00ffdSLinus Torvalds if (!wq)
21705b00ffdSLinus Torvalds kunit_skip(test, "failed to alloc wq");
21805b00ffdSLinus Torvalds
21905b00ffdSLinus Torvalds for (i = 0; i < KMEM_CACHE_DESTROY_NR; i++) {
22005b00ffdSLinus Torvalds s = test_kmem_cache_create("TestSlub_kfree_rcu_wq_destroy",
22105b00ffdSLinus Torvalds sizeof(struct test_kfree_rcu_struct),
22205b00ffdSLinus Torvalds SLAB_NO_MERGE);
22305b00ffdSLinus Torvalds
22405b00ffdSLinus Torvalds if (!s)
22505b00ffdSLinus Torvalds kunit_skip(test, "failed to create cache");
22605b00ffdSLinus Torvalds
22705b00ffdSLinus Torvalds delay = get_random_u8();
22805b00ffdSLinus Torvalds p = kmem_cache_alloc(s, GFP_KERNEL);
22905b00ffdSLinus Torvalds kfree_rcu(p, rcu);
23005b00ffdSLinus Torvalds
23105b00ffdSLinus Torvalds cdw.s = s;
23205b00ffdSLinus Torvalds
23305b00ffdSLinus Torvalds msleep(delay);
23405b00ffdSLinus Torvalds queue_work(wq, &cdw.work);
23505b00ffdSLinus Torvalds flush_work(&cdw.work);
23605b00ffdSLinus Torvalds }
23705b00ffdSLinus Torvalds
23805b00ffdSLinus Torvalds destroy_workqueue(wq);
23905b00ffdSLinus Torvalds KUNIT_EXPECT_EQ(test, 0, slab_errors);
24005b00ffdSLinus Torvalds }
24105b00ffdSLinus Torvalds
test_leak_destroy(struct kunit * test)242db6fe4d6SKees Cook static void test_leak_destroy(struct kunit *test)
243db6fe4d6SKees Cook {
244db6fe4d6SKees Cook struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
245db6fe4d6SKees Cook 64, SLAB_NO_MERGE);
246db6fe4d6SKees Cook kmem_cache_alloc(s, GFP_KERNEL);
247db6fe4d6SKees Cook
248db6fe4d6SKees Cook kmem_cache_destroy(s);
249db6fe4d6SKees Cook
250db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, 2, slab_errors);
251db6fe4d6SKees Cook }
252db6fe4d6SKees Cook
test_krealloc_redzone_zeroing(struct kunit * test)253db6fe4d6SKees Cook static void test_krealloc_redzone_zeroing(struct kunit *test)
254db6fe4d6SKees Cook {
255db6fe4d6SKees Cook u8 *p;
256db6fe4d6SKees Cook int i;
257db6fe4d6SKees Cook struct kmem_cache *s = test_kmem_cache_create("TestSlub_krealloc", 64,
258db6fe4d6SKees Cook SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
259db6fe4d6SKees Cook
260db6fe4d6SKees Cook p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 48));
261db6fe4d6SKees Cook memset(p, 0xff, 48);
262db6fe4d6SKees Cook
263db6fe4d6SKees Cook kasan_disable_current();
264db6fe4d6SKees Cook OPTIMIZER_HIDE_VAR(p);
265db6fe4d6SKees Cook
266db6fe4d6SKees Cook /* Test shrink */
267db6fe4d6SKees Cook p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
268db6fe4d6SKees Cook for (i = 40; i < 64; i++)
269db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
270db6fe4d6SKees Cook
271db6fe4d6SKees Cook /* Test grow within the same 64B kmalloc object */
272db6fe4d6SKees Cook p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
273db6fe4d6SKees Cook for (i = 40; i < 56; i++)
274db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, p[i], 0);
275db6fe4d6SKees Cook for (i = 56; i < 64; i++)
276db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
277db6fe4d6SKees Cook
278db6fe4d6SKees Cook validate_slab_cache(s);
279db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, 0, slab_errors);
280db6fe4d6SKees Cook
281db6fe4d6SKees Cook memset(p, 0xff, 56);
282db6fe4d6SKees Cook /* Test grow with allocating a bigger 128B object */
283db6fe4d6SKees Cook p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);
284db6fe4d6SKees Cook for (i = 0; i < 56; i++)
285db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, p[i], 0xff);
286db6fe4d6SKees Cook for (i = 56; i < 112; i++)
287db6fe4d6SKees Cook KUNIT_EXPECT_EQ(test, p[i], 0);
288db6fe4d6SKees Cook
289db6fe4d6SKees Cook kfree(p);
290db6fe4d6SKees Cook kasan_enable_current();
291db6fe4d6SKees Cook kmem_cache_destroy(s);
292db6fe4d6SKees Cook }
293db6fe4d6SKees Cook
test_init(struct kunit * test)294db6fe4d6SKees Cook static int test_init(struct kunit *test)
295db6fe4d6SKees Cook {
296db6fe4d6SKees Cook slab_errors = 0;
297db6fe4d6SKees Cook
298db6fe4d6SKees Cook kunit_add_named_resource(test, NULL, NULL, &resource,
299db6fe4d6SKees Cook "slab_errors", &slab_errors);
300db6fe4d6SKees Cook return 0;
301db6fe4d6SKees Cook }
302db6fe4d6SKees Cook
303db6fe4d6SKees Cook static struct kunit_case test_cases[] = {
304db6fe4d6SKees Cook KUNIT_CASE(test_clobber_zone),
305db6fe4d6SKees Cook
306db6fe4d6SKees Cook #ifndef CONFIG_KASAN
307db6fe4d6SKees Cook KUNIT_CASE(test_next_pointer),
308db6fe4d6SKees Cook KUNIT_CASE(test_first_word),
309db6fe4d6SKees Cook KUNIT_CASE(test_clobber_50th_byte),
310db6fe4d6SKees Cook #endif
311db6fe4d6SKees Cook
312db6fe4d6SKees Cook KUNIT_CASE(test_clobber_redzone_free),
313db6fe4d6SKees Cook KUNIT_CASE(test_kmalloc_redzone_access),
314db6fe4d6SKees Cook KUNIT_CASE(test_kfree_rcu),
31505b00ffdSLinus Torvalds KUNIT_CASE(test_kfree_rcu_wq_destroy),
316db6fe4d6SKees Cook KUNIT_CASE(test_leak_destroy),
317db6fe4d6SKees Cook KUNIT_CASE(test_krealloc_redzone_zeroing),
318db6fe4d6SKees Cook {}
319db6fe4d6SKees Cook };
320db6fe4d6SKees Cook
321db6fe4d6SKees Cook static struct kunit_suite test_suite = {
322db6fe4d6SKees Cook .name = "slub_test",
323db6fe4d6SKees Cook .init = test_init,
324db6fe4d6SKees Cook .test_cases = test_cases,
325db6fe4d6SKees Cook };
326db6fe4d6SKees Cook kunit_test_suite(test_suite);
327db6fe4d6SKees Cook
328*61c4e6caSArnd Bergmann MODULE_DESCRIPTION("Kunit tests for slub allocator");
329db6fe4d6SKees Cook MODULE_LICENSE("GPL");
330