xref: /linux/lib/tests/slub_kunit.c (revision 92af129b4085cd561b59bfa1596653844cb82e4c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <kunit/test.h>
3 #include <kunit/test-bug.h>
4 #include <linux/mm.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/rcupdate.h>
9 #include <linux/delay.h>
10 #include <linux/perf_event.h>
11 #include "../mm/slab.h"
12 
13 static struct kunit_resource resource;
14 static int slab_errors;
15 
16 /*
17  * Wrapper function for kmem_cache_create(), which reduces 2 parameters:
18  * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
19  * object from kfence pool, where the operation could be caught by both
20  * our test and kfence sanity check.
21  */
22 static struct kmem_cache *test_kmem_cache_create(const char *name,
23 				unsigned int size, slab_flags_t flags)
24 {
25 	struct kmem_cache *s = kmem_cache_create(name, size, 0,
26 					(flags | SLAB_NO_USER_FLAGS), NULL);
27 	s->flags |= SLAB_SKIP_KFENCE;
28 	return s;
29 }
30 
31 static void test_clobber_zone(struct kunit *test)
32 {
33 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
34 							SLAB_RED_ZONE);
35 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
36 
37 	kasan_disable_current();
38 	p[64] = 0x12;
39 
40 	validate_slab_cache(s);
41 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
42 
43 	kasan_enable_current();
44 	kmem_cache_free(s, p);
45 	kmem_cache_destroy(s);
46 }
47 
48 #ifndef CONFIG_KASAN
49 static void test_next_pointer(struct kunit *test)
50 {
51 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
52 							64, SLAB_POISON);
53 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
54 	unsigned long tmp;
55 	unsigned long *ptr_addr;
56 
57 	kmem_cache_free(s, p);
58 
59 	ptr_addr = (unsigned long *)(p + s->offset);
60 	tmp = *ptr_addr;
61 	p[s->offset] = ~p[s->offset];
62 
63 	/*
64 	 * Expecting three errors.
65 	 * One for the corrupted freechain and the other one for the wrong
66 	 * count of objects in use. The third error is fixing broken cache.
67 	 */
68 	validate_slab_cache(s);
69 	KUNIT_EXPECT_EQ(test, 3, slab_errors);
70 
71 	/*
72 	 * Try to repair corrupted freepointer.
73 	 * Still expecting two errors. The first for the wrong count
74 	 * of objects in use.
75 	 * The second error is for fixing broken cache.
76 	 */
77 	*ptr_addr = tmp;
78 	slab_errors = 0;
79 
80 	validate_slab_cache(s);
81 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
82 
83 	/*
84 	 * Previous validation repaired the count of objects in use.
85 	 * Now expecting no error.
86 	 */
87 	slab_errors = 0;
88 	validate_slab_cache(s);
89 	KUNIT_EXPECT_EQ(test, 0, slab_errors);
90 
91 	kmem_cache_destroy(s);
92 }
93 
94 static void test_first_word(struct kunit *test)
95 {
96 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
97 							64, SLAB_POISON);
98 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
99 
100 	kmem_cache_free(s, p);
101 	*p = 0x78;
102 
103 	validate_slab_cache(s);
104 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
105 
106 	kmem_cache_destroy(s);
107 }
108 
109 static void test_clobber_50th_byte(struct kunit *test)
110 {
111 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
112 							64, SLAB_POISON);
113 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
114 
115 	kmem_cache_free(s, p);
116 	p[50] = 0x9a;
117 
118 	validate_slab_cache(s);
119 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
120 
121 	kmem_cache_destroy(s);
122 }
123 #endif
124 
125 static void test_clobber_redzone_free(struct kunit *test)
126 {
127 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
128 							SLAB_RED_ZONE);
129 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
130 
131 	kasan_disable_current();
132 	kmem_cache_free(s, p);
133 	p[64] = 0xab;
134 
135 	validate_slab_cache(s);
136 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
137 
138 	kasan_enable_current();
139 	kmem_cache_destroy(s);
140 }
141 
142 static void test_kmalloc_redzone_access(struct kunit *test)
143 {
144 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
145 				SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
146 	u8 *p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 18));
147 
148 	kasan_disable_current();
149 
150 	/* Suppress the -Warray-bounds warning */
151 	OPTIMIZER_HIDE_VAR(p);
152 	p[18] = 0xab;
153 	p[19] = 0xab;
154 
155 	validate_slab_cache(s);
156 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
157 
158 	kasan_enable_current();
159 	kmem_cache_free(s, p);
160 	kmem_cache_destroy(s);
161 }
162 
163 struct test_kfree_rcu_struct {
164 	struct rcu_head rcu;
165 };
166 
167 static void test_kfree_rcu(struct kunit *test)
168 {
169 	struct kmem_cache *s;
170 	struct test_kfree_rcu_struct *p;
171 
172 	if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
173 		kunit_skip(test, "can't do kfree_rcu() when test is built-in");
174 
175 	s = test_kmem_cache_create("TestSlub_kfree_rcu",
176 				   sizeof(struct test_kfree_rcu_struct),
177 				   SLAB_NO_MERGE);
178 	p = kmem_cache_alloc(s, GFP_KERNEL);
179 
180 	kfree_rcu(p, rcu);
181 	kmem_cache_destroy(s);
182 
183 	KUNIT_EXPECT_EQ(test, 0, slab_errors);
184 }
185 
186 struct cache_destroy_work {
187 	struct work_struct work;
188 	struct kmem_cache *s;
189 };
190 
191 static void cache_destroy_workfn(struct work_struct *w)
192 {
193 	struct cache_destroy_work *cdw;
194 
195 	cdw = container_of(w, struct cache_destroy_work, work);
196 	kmem_cache_destroy(cdw->s);
197 }
198 
199 #define KMEM_CACHE_DESTROY_NR 10
200 
201 static void test_kfree_rcu_wq_destroy(struct kunit *test)
202 {
203 	struct test_kfree_rcu_struct *p;
204 	struct cache_destroy_work cdw;
205 	struct workqueue_struct *wq;
206 	struct kmem_cache *s;
207 	unsigned int delay;
208 	int i;
209 
210 	if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
211 		kunit_skip(test, "can't do kfree_rcu() when test is built-in");
212 
213 	INIT_WORK_ONSTACK(&cdw.work, cache_destroy_workfn);
214 	wq = alloc_workqueue("test_kfree_rcu_destroy_wq",
215 			WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
216 
217 	if (!wq)
218 		kunit_skip(test, "failed to alloc wq");
219 
220 	for (i = 0; i < KMEM_CACHE_DESTROY_NR; i++) {
221 		s = test_kmem_cache_create("TestSlub_kfree_rcu_wq_destroy",
222 				sizeof(struct test_kfree_rcu_struct),
223 				SLAB_NO_MERGE);
224 
225 		if (!s)
226 			kunit_skip(test, "failed to create cache");
227 
228 		delay = get_random_u8();
229 		p = kmem_cache_alloc(s, GFP_KERNEL);
230 		kfree_rcu(p, rcu);
231 
232 		cdw.s = s;
233 
234 		msleep(delay);
235 		queue_work(wq, &cdw.work);
236 		flush_work(&cdw.work);
237 	}
238 
239 	destroy_workqueue(wq);
240 	KUNIT_EXPECT_EQ(test, 0, slab_errors);
241 }
242 
243 static void test_leak_destroy(struct kunit *test)
244 {
245 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
246 							64, SLAB_NO_MERGE);
247 	kmem_cache_alloc(s, GFP_KERNEL);
248 
249 	kmem_cache_destroy(s);
250 
251 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
252 }
253 
254 static void test_krealloc_redzone_zeroing(struct kunit *test)
255 {
256 	u8 *p;
257 	int i;
258 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_krealloc", 64,
259 				SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
260 
261 	p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 48));
262 	memset(p, 0xff, 48);
263 
264 	kasan_disable_current();
265 	OPTIMIZER_HIDE_VAR(p);
266 
267 	/* Test shrink */
268 	p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
269 	for (i = 40; i < 64; i++)
270 		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
271 
272 	/* Test grow within the same 64B kmalloc object */
273 	p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
274 	for (i = 40; i < 56; i++)
275 		KUNIT_EXPECT_EQ(test, p[i], 0);
276 	for (i = 56; i < 64; i++)
277 		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
278 
279 	validate_slab_cache(s);
280 	KUNIT_EXPECT_EQ(test, 0, slab_errors);
281 
282 	memset(p, 0xff, 56);
283 	/* Test grow with allocating a bigger 128B object */
284 	p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);
285 	for (i = 0; i < 56; i++)
286 		KUNIT_EXPECT_EQ(test, p[i], 0xff);
287 	for (i = 56; i < 112; i++)
288 		KUNIT_EXPECT_EQ(test, p[i], 0);
289 
290 	kfree(p);
291 	kasan_enable_current();
292 	kmem_cache_destroy(s);
293 }
294 
295 #ifdef CONFIG_PERF_EVENTS
296 #define NR_ITERATIONS 1000
297 #define NR_OBJECTS 1000
298 static void *objects[NR_OBJECTS];
299 
300 struct test_nolock_context {
301 	struct kunit *test;
302 	int callback_count;
303 	int alloc_ok;
304 	int alloc_fail;
305 	struct perf_event *event;
306 };
307 
308 static struct perf_event_attr hw_attr = {
309 	.type = PERF_TYPE_HARDWARE,
310 	.config = PERF_COUNT_HW_CPU_CYCLES,
311 	.size = sizeof(struct perf_event_attr),
312 	.pinned = 1,
313 	.disabled = 1,
314 	.freq = 1,
315 	.sample_freq = 100000,
316 };
317 
318 static void overflow_handler_test_kmalloc_kfree_nolock(struct perf_event *event,
319 						       struct perf_sample_data *data,
320 						       struct pt_regs *regs)
321 {
322 	void *objp;
323 	gfp_t gfp;
324 	struct test_nolock_context *ctx = event->overflow_handler_context;
325 
326 	/* __GFP_ACCOUNT to test kmalloc_nolock() in alloc_slab_obj_exts() */
327 	gfp = (ctx->callback_count % 2) ? 0 : __GFP_ACCOUNT;
328 	objp = kmalloc_nolock(64, gfp, NUMA_NO_NODE);
329 
330 	if (objp)
331 		ctx->alloc_ok++;
332 	else
333 		ctx->alloc_fail++;
334 
335 	kfree_nolock(objp);
336 	ctx->callback_count++;
337 }
338 
339 static void test_kmalloc_kfree_nolock(struct kunit *test)
340 {
341 	int i, j;
342 	struct test_nolock_context ctx = { .test = test };
343 	struct perf_event *event;
344 	bool alloc_fail = false;
345 
346 	event = perf_event_create_kernel_counter(&hw_attr, -1, current,
347 						 overflow_handler_test_kmalloc_kfree_nolock,
348 						 &ctx);
349 	if (IS_ERR(event))
350 		kunit_skip(test, "Failed to create perf event");
351 	ctx.event = event;
352 	perf_event_enable(ctx.event);
353 	for (i = 0; i < NR_ITERATIONS; i++) {
354 		for (j = 0; j < NR_OBJECTS; j++) {
355 			gfp_t gfp = (i % 2) ? GFP_KERNEL : GFP_KERNEL_ACCOUNT;
356 
357 			objects[j] = kmalloc(64, gfp);
358 			if (!objects[j]) {
359 				j--;
360 				while (j >= 0)
361 					kfree(objects[j--]);
362 				alloc_fail = true;
363 				goto cleanup;
364 			}
365 		}
366 		for (j = 0; j < NR_OBJECTS; j++)
367 			kfree(objects[j]);
368 	}
369 
370 cleanup:
371 	perf_event_disable(ctx.event);
372 	perf_event_release_kernel(ctx.event);
373 
374 	kunit_info(test, "callback_count: %d, alloc_ok: %d, alloc_fail: %d\n",
375 		   ctx.callback_count, ctx.alloc_ok, ctx.alloc_fail);
376 
377 	if (alloc_fail)
378 		kunit_skip(test, "Allocation failed");
379 	KUNIT_EXPECT_EQ(test, 0, slab_errors);
380 }
381 #endif
382 
383 static int test_init(struct kunit *test)
384 {
385 	slab_errors = 0;
386 
387 	kunit_add_named_resource(test, NULL, NULL, &resource,
388 					"slab_errors", &slab_errors);
389 	return 0;
390 }
391 
392 static struct kunit_case test_cases[] = {
393 	KUNIT_CASE(test_clobber_zone),
394 
395 #ifndef CONFIG_KASAN
396 	KUNIT_CASE(test_next_pointer),
397 	KUNIT_CASE(test_first_word),
398 	KUNIT_CASE(test_clobber_50th_byte),
399 #endif
400 
401 	KUNIT_CASE(test_clobber_redzone_free),
402 	KUNIT_CASE(test_kmalloc_redzone_access),
403 	KUNIT_CASE(test_kfree_rcu),
404 	KUNIT_CASE(test_kfree_rcu_wq_destroy),
405 	KUNIT_CASE(test_leak_destroy),
406 	KUNIT_CASE(test_krealloc_redzone_zeroing),
407 #ifdef CONFIG_PERF_EVENTS
408 	KUNIT_CASE_SLOW(test_kmalloc_kfree_nolock),
409 #endif
410 	{}
411 };
412 
413 static struct kunit_suite test_suite = {
414 	.name = "slub_test",
415 	.init = test_init,
416 	.test_cases = test_cases,
417 };
418 kunit_test_suite(test_suite);
419 
420 MODULE_DESCRIPTION("Kunit tests for slub allocator");
421 MODULE_LICENSE("GPL");
422