1 // SPDX-License-Identifier: GPL-2.0
2 #include <kunit/test.h>
3 #include <kunit/test-bug.h>
4 #include <linux/mm.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/rcupdate.h>
9 #include "../mm/slab.h"
10
11 static struct kunit_resource resource;
12 static int slab_errors;
13
14 /*
15 * Wrapper function for kmem_cache_create(), which reduces 2 parameters:
16 * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
17 * object from kfence pool, where the operation could be caught by both
18 * our test and kfence sanity check.
19 */
test_kmem_cache_create(const char * name,unsigned int size,slab_flags_t flags)20 static struct kmem_cache *test_kmem_cache_create(const char *name,
21 unsigned int size, slab_flags_t flags)
22 {
23 struct kmem_cache *s = kmem_cache_create(name, size, 0,
24 (flags | SLAB_NO_USER_FLAGS), NULL);
25 s->flags |= SLAB_SKIP_KFENCE;
26 return s;
27 }
28
test_clobber_zone(struct kunit * test)29 static void test_clobber_zone(struct kunit *test)
30 {
31 struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
32 SLAB_RED_ZONE);
33 u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
34
35 kasan_disable_current();
36 p[64] = 0x12;
37
38 validate_slab_cache(s);
39 KUNIT_EXPECT_EQ(test, 2, slab_errors);
40
41 kasan_enable_current();
42 kmem_cache_free(s, p);
43 kmem_cache_destroy(s);
44 }
45
46 #ifndef CONFIG_KASAN
test_next_pointer(struct kunit * test)47 static void test_next_pointer(struct kunit *test)
48 {
49 struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
50 64, SLAB_POISON);
51 u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
52 unsigned long tmp;
53 unsigned long *ptr_addr;
54
55 kmem_cache_free(s, p);
56
57 ptr_addr = (unsigned long *)(p + s->offset);
58 tmp = *ptr_addr;
59 p[s->offset] = ~p[s->offset];
60
61 /*
62 * Expecting three errors.
63 * One for the corrupted freechain and the other one for the wrong
64 * count of objects in use. The third error is fixing broken cache.
65 */
66 validate_slab_cache(s);
67 KUNIT_EXPECT_EQ(test, 3, slab_errors);
68
69 /*
70 * Try to repair corrupted freepointer.
71 * Still expecting two errors. The first for the wrong count
72 * of objects in use.
73 * The second error is for fixing broken cache.
74 */
75 *ptr_addr = tmp;
76 slab_errors = 0;
77
78 validate_slab_cache(s);
79 KUNIT_EXPECT_EQ(test, 2, slab_errors);
80
81 /*
82 * Previous validation repaired the count of objects in use.
83 * Now expecting no error.
84 */
85 slab_errors = 0;
86 validate_slab_cache(s);
87 KUNIT_EXPECT_EQ(test, 0, slab_errors);
88
89 kmem_cache_destroy(s);
90 }
91
test_first_word(struct kunit * test)92 static void test_first_word(struct kunit *test)
93 {
94 struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
95 64, SLAB_POISON);
96 u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
97
98 kmem_cache_free(s, p);
99 *p = 0x78;
100
101 validate_slab_cache(s);
102 KUNIT_EXPECT_EQ(test, 2, slab_errors);
103
104 kmem_cache_destroy(s);
105 }
106
test_clobber_50th_byte(struct kunit * test)107 static void test_clobber_50th_byte(struct kunit *test)
108 {
109 struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
110 64, SLAB_POISON);
111 u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
112
113 kmem_cache_free(s, p);
114 p[50] = 0x9a;
115
116 validate_slab_cache(s);
117 KUNIT_EXPECT_EQ(test, 2, slab_errors);
118
119 kmem_cache_destroy(s);
120 }
121 #endif
122
test_clobber_redzone_free(struct kunit * test)123 static void test_clobber_redzone_free(struct kunit *test)
124 {
125 struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
126 SLAB_RED_ZONE);
127 u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
128
129 kasan_disable_current();
130 kmem_cache_free(s, p);
131 p[64] = 0xab;
132
133 validate_slab_cache(s);
134 KUNIT_EXPECT_EQ(test, 2, slab_errors);
135
136 kasan_enable_current();
137 kmem_cache_destroy(s);
138 }
139
test_kmalloc_redzone_access(struct kunit * test)140 static void test_kmalloc_redzone_access(struct kunit *test)
141 {
142 struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
143 SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
144 u8 *p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 18));
145
146 kasan_disable_current();
147
148 /* Suppress the -Warray-bounds warning */
149 OPTIMIZER_HIDE_VAR(p);
150 p[18] = 0xab;
151 p[19] = 0xab;
152
153 validate_slab_cache(s);
154 KUNIT_EXPECT_EQ(test, 2, slab_errors);
155
156 kasan_enable_current();
157 kmem_cache_free(s, p);
158 kmem_cache_destroy(s);
159 }
160
161 struct test_kfree_rcu_struct {
162 struct rcu_head rcu;
163 };
164
test_kfree_rcu(struct kunit * test)165 static void test_kfree_rcu(struct kunit *test)
166 {
167 struct kmem_cache *s;
168 struct test_kfree_rcu_struct *p;
169
170 if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
171 kunit_skip(test, "can't do kfree_rcu() when test is built-in");
172
173 s = test_kmem_cache_create("TestSlub_kfree_rcu",
174 sizeof(struct test_kfree_rcu_struct),
175 SLAB_NO_MERGE);
176 p = kmem_cache_alloc(s, GFP_KERNEL);
177
178 kfree_rcu(p, rcu);
179 kmem_cache_destroy(s);
180
181 KUNIT_EXPECT_EQ(test, 0, slab_errors);
182 }
183
test_leak_destroy(struct kunit * test)184 static void test_leak_destroy(struct kunit *test)
185 {
186 struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
187 64, SLAB_NO_MERGE);
188 kmem_cache_alloc(s, GFP_KERNEL);
189
190 kmem_cache_destroy(s);
191
192 KUNIT_EXPECT_EQ(test, 2, slab_errors);
193 }
194
test_krealloc_redzone_zeroing(struct kunit * test)195 static void test_krealloc_redzone_zeroing(struct kunit *test)
196 {
197 u8 *p;
198 int i;
199 struct kmem_cache *s = test_kmem_cache_create("TestSlub_krealloc", 64,
200 SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
201
202 p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 48));
203 memset(p, 0xff, 48);
204
205 kasan_disable_current();
206 OPTIMIZER_HIDE_VAR(p);
207
208 /* Test shrink */
209 p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
210 for (i = 40; i < 64; i++)
211 KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
212
213 /* Test grow within the same 64B kmalloc object */
214 p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
215 for (i = 40; i < 56; i++)
216 KUNIT_EXPECT_EQ(test, p[i], 0);
217 for (i = 56; i < 64; i++)
218 KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
219
220 validate_slab_cache(s);
221 KUNIT_EXPECT_EQ(test, 0, slab_errors);
222
223 memset(p, 0xff, 56);
224 /* Test grow with allocating a bigger 128B object */
225 p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);
226 for (i = 0; i < 56; i++)
227 KUNIT_EXPECT_EQ(test, p[i], 0xff);
228 for (i = 56; i < 112; i++)
229 KUNIT_EXPECT_EQ(test, p[i], 0);
230
231 kfree(p);
232 kasan_enable_current();
233 kmem_cache_destroy(s);
234 }
235
test_init(struct kunit * test)236 static int test_init(struct kunit *test)
237 {
238 slab_errors = 0;
239
240 kunit_add_named_resource(test, NULL, NULL, &resource,
241 "slab_errors", &slab_errors);
242 return 0;
243 }
244
245 static struct kunit_case test_cases[] = {
246 KUNIT_CASE(test_clobber_zone),
247
248 #ifndef CONFIG_KASAN
249 KUNIT_CASE(test_next_pointer),
250 KUNIT_CASE(test_first_word),
251 KUNIT_CASE(test_clobber_50th_byte),
252 #endif
253
254 KUNIT_CASE(test_clobber_redzone_free),
255 KUNIT_CASE(test_kmalloc_redzone_access),
256 KUNIT_CASE(test_kfree_rcu),
257 KUNIT_CASE(test_leak_destroy),
258 KUNIT_CASE(test_krealloc_redzone_zeroing),
259 {}
260 };
261
262 static struct kunit_suite test_suite = {
263 .name = "slub_test",
264 .init = test_init,
265 .test_cases = test_cases,
266 };
267 kunit_test_suite(test_suite);
268
269 MODULE_LICENSE("GPL");
270