xref: /linux/mm/slab_common.c (revision f49040c7aaa5532a1f94355ef5073c49e6b32349)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Slab allocator functions that are independent of the allocator strategy
4  *
5  * (C) 2012 Christoph Lameter <cl@linux.com>
6  */
7 #include <linux/slab.h>
8 
9 #include <linux/mm.h>
10 #include <linux/poison.h>
11 #include <linux/interrupt.h>
12 #include <linux/memory.h>
13 #include <linux/cache.h>
14 #include <linux/compiler.h>
15 #include <linux/kfence.h>
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/uaccess.h>
19 #include <linux/seq_file.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/swiotlb.h>
22 #include <linux/proc_fs.h>
23 #include <linux/debugfs.h>
24 #include <linux/kmemleak.h>
25 #include <linux/kasan.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/page.h>
29 #include <linux/memcontrol.h>
30 #include <linux/stackdepot.h>
31 #include <trace/events/rcu.h>
32 
33 #include "../kernel/rcu/rcu.h"
34 #include "internal.h"
35 #include "slab.h"
36 
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/kmem.h>
39 
40 enum slab_state slab_state;
41 LIST_HEAD(slab_caches);
42 DEFINE_MUTEX(slab_mutex);
43 struct kmem_cache *kmem_cache;
44 
45 /*
46  * Set of flags that will prevent slab merging
47  */
48 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
49 		SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
50 		SLAB_FAILSLAB | SLAB_NO_MERGE)
51 
52 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
53 			 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
54 
55 /*
56  * Merge control. If this is set then no merging of slab caches will occur.
57  */
58 static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
59 
setup_slab_nomerge(char * str)60 static int __init setup_slab_nomerge(char *str)
61 {
62 	slab_nomerge = true;
63 	return 1;
64 }
65 
setup_slab_merge(char * str)66 static int __init setup_slab_merge(char *str)
67 {
68 	slab_nomerge = false;
69 	return 1;
70 }
71 
72 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
73 __setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
74 
75 __setup("slab_nomerge", setup_slab_nomerge);
76 __setup("slab_merge", setup_slab_merge);
77 
78 /*
79  * Determine the size of a slab object
80  */
kmem_cache_size(struct kmem_cache * s)81 unsigned int kmem_cache_size(struct kmem_cache *s)
82 {
83 	return s->object_size;
84 }
85 EXPORT_SYMBOL(kmem_cache_size);
86 
87 #ifdef CONFIG_DEBUG_VM
88 
kmem_cache_is_duplicate_name(const char * name)89 static bool kmem_cache_is_duplicate_name(const char *name)
90 {
91 	struct kmem_cache *s;
92 
93 	list_for_each_entry(s, &slab_caches, list) {
94 		if (!strcmp(s->name, name))
95 			return true;
96 	}
97 
98 	return false;
99 }
100 
kmem_cache_sanity_check(const char * name,unsigned int size)101 static int kmem_cache_sanity_check(const char *name, unsigned int size)
102 {
103 	if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
104 		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
105 		return -EINVAL;
106 	}
107 
108 	/* Duplicate names will confuse slabtop, et al */
109 	WARN(kmem_cache_is_duplicate_name(name),
110 			"kmem_cache of name '%s' already exists\n", name);
111 
112 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
113 	return 0;
114 }
115 #else
kmem_cache_sanity_check(const char * name,unsigned int size)116 static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
117 {
118 	return 0;
119 }
120 #endif
121 
122 /*
123  * Figure out what the alignment of the objects will be given a set of
124  * flags, a user specified alignment and the size of the objects.
125  */
calculate_alignment(slab_flags_t flags,unsigned int align,unsigned int size)126 static unsigned int calculate_alignment(slab_flags_t flags,
127 		unsigned int align, unsigned int size)
128 {
129 	/*
130 	 * If the user wants hardware cache aligned objects then follow that
131 	 * suggestion if the object is sufficiently large.
132 	 *
133 	 * The hardware cache alignment cannot override the specified
134 	 * alignment though. If that is greater then use it.
135 	 */
136 	if (flags & SLAB_HWCACHE_ALIGN) {
137 		unsigned int ralign;
138 
139 		ralign = cache_line_size();
140 		while (size <= ralign / 2)
141 			ralign /= 2;
142 		align = max(align, ralign);
143 	}
144 
145 	align = max(align, arch_slab_minalign());
146 
147 	return ALIGN(align, sizeof(void *));
148 }
149 
150 /*
151  * Find a mergeable slab cache
152  */
slab_unmergeable(struct kmem_cache * s)153 int slab_unmergeable(struct kmem_cache *s)
154 {
155 	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
156 		return 1;
157 
158 	if (s->ctor)
159 		return 1;
160 
161 #ifdef CONFIG_HARDENED_USERCOPY
162 	if (s->usersize)
163 		return 1;
164 #endif
165 
166 	/*
167 	 * We may have set a slab to be unmergeable during bootstrap.
168 	 */
169 	if (s->refcount < 0)
170 		return 1;
171 
172 	return 0;
173 }
174 
find_mergeable(unsigned int size,unsigned int align,slab_flags_t flags,const char * name,void (* ctor)(void *))175 struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
176 		slab_flags_t flags, const char *name, void (*ctor)(void *))
177 {
178 	struct kmem_cache *s;
179 
180 	if (slab_nomerge)
181 		return NULL;
182 
183 	if (ctor)
184 		return NULL;
185 
186 	flags = kmem_cache_flags(flags, name);
187 
188 	if (flags & SLAB_NEVER_MERGE)
189 		return NULL;
190 
191 	size = ALIGN(size, sizeof(void *));
192 	align = calculate_alignment(flags, align, size);
193 	size = ALIGN(size, align);
194 
195 	list_for_each_entry_reverse(s, &slab_caches, list) {
196 		if (slab_unmergeable(s))
197 			continue;
198 
199 		if (size > s->size)
200 			continue;
201 
202 		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
203 			continue;
204 		/*
205 		 * Check if alignment is compatible.
206 		 * Courtesy of Adrian Drzewiecki
207 		 */
208 		if ((s->size & ~(align - 1)) != s->size)
209 			continue;
210 
211 		if (s->size - size >= sizeof(void *))
212 			continue;
213 
214 		return s;
215 	}
216 	return NULL;
217 }
218 
create_cache(const char * name,unsigned int object_size,struct kmem_cache_args * args,slab_flags_t flags)219 static struct kmem_cache *create_cache(const char *name,
220 				       unsigned int object_size,
221 				       struct kmem_cache_args *args,
222 				       slab_flags_t flags)
223 {
224 	struct kmem_cache *s;
225 	int err;
226 
227 	/* If a custom freelist pointer is requested make sure it's sane. */
228 	err = -EINVAL;
229 	if (args->use_freeptr_offset &&
230 	    (args->freeptr_offset >= object_size ||
231 	     !(flags & SLAB_TYPESAFE_BY_RCU) ||
232 	     !IS_ALIGNED(args->freeptr_offset, __alignof__(freeptr_t))))
233 		goto out;
234 
235 	err = -ENOMEM;
236 	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
237 	if (!s)
238 		goto out;
239 	err = do_kmem_cache_create(s, name, object_size, args, flags);
240 	if (err)
241 		goto out_free_cache;
242 
243 	s->refcount = 1;
244 	list_add(&s->list, &slab_caches);
245 	return s;
246 
247 out_free_cache:
248 	kmem_cache_free(kmem_cache, s);
249 out:
250 	return ERR_PTR(err);
251 }
252 
253 /**
254  * __kmem_cache_create_args - Create a kmem cache.
255  * @name: A string which is used in /proc/slabinfo to identify this cache.
256  * @object_size: The size of objects to be created in this cache.
257  * @args: Additional arguments for the cache creation (see
258  *        &struct kmem_cache_args).
259  * @flags: See the desriptions of individual flags. The common ones are listed
260  *         in the description below.
261  *
262  * Not to be called directly, use the kmem_cache_create() wrapper with the same
263  * parameters.
264  *
265  * Commonly used @flags:
266  *
267  * &SLAB_ACCOUNT - Account allocations to memcg.
268  *
269  * &SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
270  *
271  * &SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
272  *
273  * &SLAB_TYPESAFE_BY_RCU - Slab page (not individual objects) freeing delayed
274  * by a grace period - see the full description before using.
275  *
276  * Context: Cannot be called within a interrupt, but can be interrupted.
277  *
278  * Return: a pointer to the cache on success, NULL on failure.
279  */
__kmem_cache_create_args(const char * name,unsigned int object_size,struct kmem_cache_args * args,slab_flags_t flags)280 struct kmem_cache *__kmem_cache_create_args(const char *name,
281 					    unsigned int object_size,
282 					    struct kmem_cache_args *args,
283 					    slab_flags_t flags)
284 {
285 	struct kmem_cache *s = NULL;
286 	const char *cache_name;
287 	int err;
288 
289 #ifdef CONFIG_SLUB_DEBUG
290 	/*
291 	 * If no slab_debug was enabled globally, the static key is not yet
292 	 * enabled by setup_slub_debug(). Enable it if the cache is being
293 	 * created with any of the debugging flags passed explicitly.
294 	 * It's also possible that this is the first cache created with
295 	 * SLAB_STORE_USER and we should init stack_depot for it.
296 	 */
297 	if (flags & SLAB_DEBUG_FLAGS)
298 		static_branch_enable(&slub_debug_enabled);
299 	if (flags & SLAB_STORE_USER)
300 		stack_depot_init();
301 #else
302 	flags &= ~SLAB_DEBUG_FLAGS;
303 #endif
304 
305 	mutex_lock(&slab_mutex);
306 
307 	err = kmem_cache_sanity_check(name, object_size);
308 	if (err) {
309 		goto out_unlock;
310 	}
311 
312 	if (flags & ~SLAB_FLAGS_PERMITTED) {
313 		err = -EINVAL;
314 		goto out_unlock;
315 	}
316 
317 	/* Fail closed on bad usersize of useroffset values. */
318 	if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
319 	    WARN_ON(!args->usersize && args->useroffset) ||
320 	    WARN_ON(object_size < args->usersize ||
321 		    object_size - args->usersize < args->useroffset))
322 		args->usersize = args->useroffset = 0;
323 
324 	if (!args->usersize)
325 		s = __kmem_cache_alias(name, object_size, args->align, flags,
326 				       args->ctor);
327 	if (s)
328 		goto out_unlock;
329 
330 	cache_name = kstrdup_const(name, GFP_KERNEL);
331 	if (!cache_name) {
332 		err = -ENOMEM;
333 		goto out_unlock;
334 	}
335 
336 	args->align = calculate_alignment(flags, args->align, object_size);
337 	s = create_cache(cache_name, object_size, args, flags);
338 	if (IS_ERR(s)) {
339 		err = PTR_ERR(s);
340 		kfree_const(cache_name);
341 	}
342 
343 out_unlock:
344 	mutex_unlock(&slab_mutex);
345 
346 	if (err) {
347 		if (flags & SLAB_PANIC)
348 			panic("%s: Failed to create slab '%s'. Error %d\n",
349 				__func__, name, err);
350 		else {
351 			pr_warn("%s(%s) failed with error %d\n",
352 				__func__, name, err);
353 			dump_stack();
354 		}
355 		return NULL;
356 	}
357 	return s;
358 }
359 EXPORT_SYMBOL(__kmem_cache_create_args);
360 
361 static struct kmem_cache *kmem_buckets_cache __ro_after_init;
362 
363 /**
364  * kmem_buckets_create - Create a set of caches that handle dynamic sized
365  *			 allocations via kmem_buckets_alloc()
366  * @name: A prefix string which is used in /proc/slabinfo to identify this
367  *	  cache. The individual caches with have their sizes as the suffix.
368  * @flags: SLAB flags (see kmem_cache_create() for details).
369  * @useroffset: Starting offset within an allocation that may be copied
370  *		to/from userspace.
371  * @usersize: How many bytes, starting at @useroffset, may be copied
372  *		to/from userspace.
373  * @ctor: A constructor for the objects, run when new allocations are made.
374  *
375  * Cannot be called within an interrupt, but can be interrupted.
376  *
377  * Return: a pointer to the cache on success, NULL on failure. When
378  * CONFIG_SLAB_BUCKETS is not enabled, ZERO_SIZE_PTR is returned, and
379  * subsequent calls to kmem_buckets_alloc() will fall back to kmalloc().
380  * (i.e. callers only need to check for NULL on failure.)
381  */
kmem_buckets_create(const char * name,slab_flags_t flags,unsigned int useroffset,unsigned int usersize,void (* ctor)(void *))382 kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
383 				  unsigned int useroffset,
384 				  unsigned int usersize,
385 				  void (*ctor)(void *))
386 {
387 	unsigned long mask = 0;
388 	unsigned int idx;
389 	kmem_buckets *b;
390 
391 	BUILD_BUG_ON(ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]) > BITS_PER_LONG);
392 
393 	/*
394 	 * When the separate buckets API is not built in, just return
395 	 * a non-NULL value for the kmem_buckets pointer, which will be
396 	 * unused when performing allocations.
397 	 */
398 	if (!IS_ENABLED(CONFIG_SLAB_BUCKETS))
399 		return ZERO_SIZE_PTR;
400 
401 	if (WARN_ON(!kmem_buckets_cache))
402 		return NULL;
403 
404 	b = kmem_cache_alloc(kmem_buckets_cache, GFP_KERNEL|__GFP_ZERO);
405 	if (WARN_ON(!b))
406 		return NULL;
407 
408 	flags |= SLAB_NO_MERGE;
409 
410 	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
411 		char *short_size, *cache_name;
412 		unsigned int cache_useroffset, cache_usersize;
413 		unsigned int size, aligned_idx;
414 
415 		if (!kmalloc_caches[KMALLOC_NORMAL][idx])
416 			continue;
417 
418 		size = kmalloc_caches[KMALLOC_NORMAL][idx]->object_size;
419 		if (!size)
420 			continue;
421 
422 		short_size = strchr(kmalloc_caches[KMALLOC_NORMAL][idx]->name, '-');
423 		if (WARN_ON(!short_size))
424 			goto fail;
425 
426 		if (useroffset >= size) {
427 			cache_useroffset = 0;
428 			cache_usersize = 0;
429 		} else {
430 			cache_useroffset = useroffset;
431 			cache_usersize = min(size - cache_useroffset, usersize);
432 		}
433 
434 		aligned_idx = __kmalloc_index(size, false);
435 		if (!(*b)[aligned_idx]) {
436 			cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1);
437 			if (WARN_ON(!cache_name))
438 				goto fail;
439 			(*b)[aligned_idx] = kmem_cache_create_usercopy(cache_name, size,
440 					0, flags, cache_useroffset,
441 					cache_usersize, ctor);
442 			kfree(cache_name);
443 			if (WARN_ON(!(*b)[aligned_idx]))
444 				goto fail;
445 			set_bit(aligned_idx, &mask);
446 		}
447 		if (idx != aligned_idx)
448 			(*b)[idx] = (*b)[aligned_idx];
449 	}
450 
451 	return b;
452 
453 fail:
454 	for_each_set_bit(idx, &mask, ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]))
455 		kmem_cache_destroy((*b)[idx]);
456 	kmem_cache_free(kmem_buckets_cache, b);
457 
458 	return NULL;
459 }
460 EXPORT_SYMBOL(kmem_buckets_create);
461 
462 /*
463  * For a given kmem_cache, kmem_cache_destroy() should only be called
464  * once or there will be a use-after-free problem. The actual deletion
465  * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
466  * protection. So they are now done without holding those locks.
467  */
kmem_cache_release(struct kmem_cache * s)468 static void kmem_cache_release(struct kmem_cache *s)
469 {
470 	kfence_shutdown_cache(s);
471 	if (__is_defined(SLAB_SUPPORTS_SYSFS) && slab_state >= FULL)
472 		sysfs_slab_release(s);
473 	else
474 		slab_kmem_cache_release(s);
475 }
476 
slab_kmem_cache_release(struct kmem_cache * s)477 void slab_kmem_cache_release(struct kmem_cache *s)
478 {
479 	__kmem_cache_release(s);
480 	kfree_const(s->name);
481 	kmem_cache_free(kmem_cache, s);
482 }
483 
kmem_cache_destroy(struct kmem_cache * s)484 void kmem_cache_destroy(struct kmem_cache *s)
485 {
486 	int err;
487 
488 	if (unlikely(!s) || !kasan_check_byte(s))
489 		return;
490 
491 	/* in-flight kfree_rcu()'s may include objects from our cache */
492 	kvfree_rcu_barrier();
493 
494 	if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) &&
495 	    (s->flags & SLAB_TYPESAFE_BY_RCU)) {
496 		/*
497 		 * Under CONFIG_SLUB_RCU_DEBUG, when objects in a
498 		 * SLAB_TYPESAFE_BY_RCU slab are freed, SLUB will internally
499 		 * defer their freeing with call_rcu().
500 		 * Wait for such call_rcu() invocations here before actually
501 		 * destroying the cache.
502 		 *
503 		 * It doesn't matter that we haven't looked at the slab refcount
504 		 * yet - slabs with SLAB_TYPESAFE_BY_RCU can't be merged, so
505 		 * the refcount should be 1 here.
506 		 */
507 		rcu_barrier();
508 	}
509 
510 	cpus_read_lock();
511 	mutex_lock(&slab_mutex);
512 
513 	s->refcount--;
514 	if (s->refcount) {
515 		mutex_unlock(&slab_mutex);
516 		cpus_read_unlock();
517 		return;
518 	}
519 
520 	/* free asan quarantined objects */
521 	kasan_cache_shutdown(s);
522 
523 	err = __kmem_cache_shutdown(s);
524 	if (!slab_in_kunit_test())
525 		WARN(err, "%s %s: Slab cache still has objects when called from %pS",
526 		     __func__, s->name, (void *)_RET_IP_);
527 
528 	list_del(&s->list);
529 
530 	mutex_unlock(&slab_mutex);
531 	cpus_read_unlock();
532 
533 	if (slab_state >= FULL)
534 		sysfs_slab_unlink(s);
535 	debugfs_slab_release(s);
536 
537 	if (err)
538 		return;
539 
540 	if (s->flags & SLAB_TYPESAFE_BY_RCU)
541 		rcu_barrier();
542 
543 	kmem_cache_release(s);
544 }
545 EXPORT_SYMBOL(kmem_cache_destroy);
546 
547 /**
548  * kmem_cache_shrink - Shrink a cache.
549  * @cachep: The cache to shrink.
550  *
551  * Releases as many slabs as possible for a cache.
552  * To help debugging, a zero exit status indicates all slabs were released.
553  *
554  * Return: %0 if all slabs were released, non-zero otherwise
555  */
kmem_cache_shrink(struct kmem_cache * cachep)556 int kmem_cache_shrink(struct kmem_cache *cachep)
557 {
558 	kasan_cache_shrink(cachep);
559 
560 	return __kmem_cache_shrink(cachep);
561 }
562 EXPORT_SYMBOL(kmem_cache_shrink);
563 
slab_is_available(void)564 bool slab_is_available(void)
565 {
566 	return slab_state >= UP;
567 }
568 
569 #ifdef CONFIG_PRINTK
kmem_obj_info(struct kmem_obj_info * kpp,void * object,struct slab * slab)570 static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
571 {
572 	if (__kfence_obj_info(kpp, object, slab))
573 		return;
574 	__kmem_obj_info(kpp, object, slab);
575 }
576 
577 /**
578  * kmem_dump_obj - Print available slab provenance information
579  * @object: slab object for which to find provenance information.
580  *
581  * This function uses pr_cont(), so that the caller is expected to have
582  * printed out whatever preamble is appropriate.  The provenance information
583  * depends on the type of object and on how much debugging is enabled.
584  * For a slab-cache object, the fact that it is a slab object is printed,
585  * and, if available, the slab name, return address, and stack trace from
586  * the allocation and last free path of that object.
587  *
588  * Return: %true if the pointer is to a not-yet-freed object from
589  * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
590  * is to an already-freed object, and %false otherwise.
591  */
kmem_dump_obj(void * object)592 bool kmem_dump_obj(void *object)
593 {
594 	char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
595 	int i;
596 	struct slab *slab;
597 	unsigned long ptroffset;
598 	struct kmem_obj_info kp = { };
599 
600 	/* Some arches consider ZERO_SIZE_PTR to be a valid address. */
601 	if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
602 		return false;
603 	slab = virt_to_slab(object);
604 	if (!slab)
605 		return false;
606 
607 	kmem_obj_info(&kp, object, slab);
608 	if (kp.kp_slab_cache)
609 		pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
610 	else
611 		pr_cont(" slab%s", cp);
612 	if (is_kfence_address(object))
613 		pr_cont(" (kfence)");
614 	if (kp.kp_objp)
615 		pr_cont(" start %px", kp.kp_objp);
616 	if (kp.kp_data_offset)
617 		pr_cont(" data offset %lu", kp.kp_data_offset);
618 	if (kp.kp_objp) {
619 		ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
620 		pr_cont(" pointer offset %lu", ptroffset);
621 	}
622 	if (kp.kp_slab_cache && kp.kp_slab_cache->object_size)
623 		pr_cont(" size %u", kp.kp_slab_cache->object_size);
624 	if (kp.kp_ret)
625 		pr_cont(" allocated at %pS\n", kp.kp_ret);
626 	else
627 		pr_cont("\n");
628 	for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
629 		if (!kp.kp_stack[i])
630 			break;
631 		pr_info("    %pS\n", kp.kp_stack[i]);
632 	}
633 
634 	if (kp.kp_free_stack[0])
635 		pr_cont(" Free path:\n");
636 
637 	for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) {
638 		if (!kp.kp_free_stack[i])
639 			break;
640 		pr_info("    %pS\n", kp.kp_free_stack[i]);
641 	}
642 
643 	return true;
644 }
645 EXPORT_SYMBOL_GPL(kmem_dump_obj);
646 #endif
647 
648 /* Create a cache during boot when no slab services are available yet */
create_boot_cache(struct kmem_cache * s,const char * name,unsigned int size,slab_flags_t flags,unsigned int useroffset,unsigned int usersize)649 void __init create_boot_cache(struct kmem_cache *s, const char *name,
650 		unsigned int size, slab_flags_t flags,
651 		unsigned int useroffset, unsigned int usersize)
652 {
653 	int err;
654 	unsigned int align = ARCH_KMALLOC_MINALIGN;
655 	struct kmem_cache_args kmem_args = {};
656 
657 	/*
658 	 * kmalloc caches guarantee alignment of at least the largest
659 	 * power-of-two divisor of the size. For power-of-two sizes,
660 	 * it is the size itself.
661 	 */
662 	if (flags & SLAB_KMALLOC)
663 		align = max(align, 1U << (ffs(size) - 1));
664 	kmem_args.align = calculate_alignment(flags, align, size);
665 
666 #ifdef CONFIG_HARDENED_USERCOPY
667 	kmem_args.useroffset = useroffset;
668 	kmem_args.usersize = usersize;
669 #endif
670 
671 	err = do_kmem_cache_create(s, name, size, &kmem_args, flags);
672 
673 	if (err)
674 		panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
675 					name, size, err);
676 
677 	s->refcount = -1;	/* Exempt from merging for now */
678 }
679 
create_kmalloc_cache(const char * name,unsigned int size,slab_flags_t flags)680 static struct kmem_cache *__init create_kmalloc_cache(const char *name,
681 						      unsigned int size,
682 						      slab_flags_t flags)
683 {
684 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
685 
686 	if (!s)
687 		panic("Out of memory when creating slab %s\n", name);
688 
689 	create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size);
690 	list_add(&s->list, &slab_caches);
691 	s->refcount = 1;
692 	return s;
693 }
694 
695 kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES] __ro_after_init =
696 { /* initialization for https://llvm.org/pr42570 */ };
697 EXPORT_SYMBOL(kmalloc_caches);
698 
699 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
700 unsigned long random_kmalloc_seed __ro_after_init;
701 EXPORT_SYMBOL(random_kmalloc_seed);
702 #endif
703 
704 /*
705  * Conversion table for small slabs sizes / 8 to the index in the
706  * kmalloc array. This is necessary for slabs < 192 since we have non power
707  * of two cache sizes there. The size of larger slabs can be determined using
708  * fls.
709  */
710 u8 kmalloc_size_index[24] __ro_after_init = {
711 	3,	/* 8 */
712 	4,	/* 16 */
713 	5,	/* 24 */
714 	5,	/* 32 */
715 	6,	/* 40 */
716 	6,	/* 48 */
717 	6,	/* 56 */
718 	6,	/* 64 */
719 	1,	/* 72 */
720 	1,	/* 80 */
721 	1,	/* 88 */
722 	1,	/* 96 */
723 	7,	/* 104 */
724 	7,	/* 112 */
725 	7,	/* 120 */
726 	7,	/* 128 */
727 	2,	/* 136 */
728 	2,	/* 144 */
729 	2,	/* 152 */
730 	2,	/* 160 */
731 	2,	/* 168 */
732 	2,	/* 176 */
733 	2,	/* 184 */
734 	2	/* 192 */
735 };
736 
kmalloc_size_roundup(size_t size)737 size_t kmalloc_size_roundup(size_t size)
738 {
739 	if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
740 		/*
741 		 * The flags don't matter since size_index is common to all.
742 		 * Neither does the caller for just getting ->object_size.
743 		 */
744 		return kmalloc_slab(size, NULL, GFP_KERNEL, 0)->object_size;
745 	}
746 
747 	/* Above the smaller buckets, size is a multiple of page size. */
748 	if (size && size <= KMALLOC_MAX_SIZE)
749 		return PAGE_SIZE << get_order(size);
750 
751 	/*
752 	 * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
753 	 * and very large size - kmalloc() may fail.
754 	 */
755 	return size;
756 
757 }
758 EXPORT_SYMBOL(kmalloc_size_roundup);
759 
760 #ifdef CONFIG_ZONE_DMA
761 #define KMALLOC_DMA_NAME(sz)	.name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
762 #else
763 #define KMALLOC_DMA_NAME(sz)
764 #endif
765 
766 #ifdef CONFIG_MEMCG
767 #define KMALLOC_CGROUP_NAME(sz)	.name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
768 #else
769 #define KMALLOC_CGROUP_NAME(sz)
770 #endif
771 
772 #ifndef CONFIG_SLUB_TINY
773 #define KMALLOC_RCL_NAME(sz)	.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz,
774 #else
775 #define KMALLOC_RCL_NAME(sz)
776 #endif
777 
778 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
779 #define __KMALLOC_RANDOM_CONCAT(a, b) a ## b
780 #define KMALLOC_RANDOM_NAME(N, sz) __KMALLOC_RANDOM_CONCAT(KMA_RAND_, N)(sz)
781 #define KMA_RAND_1(sz)                  .name[KMALLOC_RANDOM_START +  1] = "kmalloc-rnd-01-" #sz,
782 #define KMA_RAND_2(sz)  KMA_RAND_1(sz)  .name[KMALLOC_RANDOM_START +  2] = "kmalloc-rnd-02-" #sz,
783 #define KMA_RAND_3(sz)  KMA_RAND_2(sz)  .name[KMALLOC_RANDOM_START +  3] = "kmalloc-rnd-03-" #sz,
784 #define KMA_RAND_4(sz)  KMA_RAND_3(sz)  .name[KMALLOC_RANDOM_START +  4] = "kmalloc-rnd-04-" #sz,
785 #define KMA_RAND_5(sz)  KMA_RAND_4(sz)  .name[KMALLOC_RANDOM_START +  5] = "kmalloc-rnd-05-" #sz,
786 #define KMA_RAND_6(sz)  KMA_RAND_5(sz)  .name[KMALLOC_RANDOM_START +  6] = "kmalloc-rnd-06-" #sz,
787 #define KMA_RAND_7(sz)  KMA_RAND_6(sz)  .name[KMALLOC_RANDOM_START +  7] = "kmalloc-rnd-07-" #sz,
788 #define KMA_RAND_8(sz)  KMA_RAND_7(sz)  .name[KMALLOC_RANDOM_START +  8] = "kmalloc-rnd-08-" #sz,
789 #define KMA_RAND_9(sz)  KMA_RAND_8(sz)  .name[KMALLOC_RANDOM_START +  9] = "kmalloc-rnd-09-" #sz,
790 #define KMA_RAND_10(sz) KMA_RAND_9(sz)  .name[KMALLOC_RANDOM_START + 10] = "kmalloc-rnd-10-" #sz,
791 #define KMA_RAND_11(sz) KMA_RAND_10(sz) .name[KMALLOC_RANDOM_START + 11] = "kmalloc-rnd-11-" #sz,
792 #define KMA_RAND_12(sz) KMA_RAND_11(sz) .name[KMALLOC_RANDOM_START + 12] = "kmalloc-rnd-12-" #sz,
793 #define KMA_RAND_13(sz) KMA_RAND_12(sz) .name[KMALLOC_RANDOM_START + 13] = "kmalloc-rnd-13-" #sz,
794 #define KMA_RAND_14(sz) KMA_RAND_13(sz) .name[KMALLOC_RANDOM_START + 14] = "kmalloc-rnd-14-" #sz,
795 #define KMA_RAND_15(sz) KMA_RAND_14(sz) .name[KMALLOC_RANDOM_START + 15] = "kmalloc-rnd-15-" #sz,
796 #else // CONFIG_RANDOM_KMALLOC_CACHES
797 #define KMALLOC_RANDOM_NAME(N, sz)
798 #endif
799 
800 #define INIT_KMALLOC_INFO(__size, __short_size)			\
801 {								\
802 	.name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,	\
803 	KMALLOC_RCL_NAME(__short_size)				\
804 	KMALLOC_CGROUP_NAME(__short_size)			\
805 	KMALLOC_DMA_NAME(__short_size)				\
806 	KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size)	\
807 	.size = __size,						\
808 }
809 
810 /*
811  * kmalloc_info[] is to make slab_debug=,kmalloc-xx option work at boot time.
812  * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
813  * kmalloc-2M.
814  */
815 const struct kmalloc_info_struct kmalloc_info[] __initconst = {
816 	INIT_KMALLOC_INFO(0, 0),
817 	INIT_KMALLOC_INFO(96, 96),
818 	INIT_KMALLOC_INFO(192, 192),
819 	INIT_KMALLOC_INFO(8, 8),
820 	INIT_KMALLOC_INFO(16, 16),
821 	INIT_KMALLOC_INFO(32, 32),
822 	INIT_KMALLOC_INFO(64, 64),
823 	INIT_KMALLOC_INFO(128, 128),
824 	INIT_KMALLOC_INFO(256, 256),
825 	INIT_KMALLOC_INFO(512, 512),
826 	INIT_KMALLOC_INFO(1024, 1k),
827 	INIT_KMALLOC_INFO(2048, 2k),
828 	INIT_KMALLOC_INFO(4096, 4k),
829 	INIT_KMALLOC_INFO(8192, 8k),
830 	INIT_KMALLOC_INFO(16384, 16k),
831 	INIT_KMALLOC_INFO(32768, 32k),
832 	INIT_KMALLOC_INFO(65536, 64k),
833 	INIT_KMALLOC_INFO(131072, 128k),
834 	INIT_KMALLOC_INFO(262144, 256k),
835 	INIT_KMALLOC_INFO(524288, 512k),
836 	INIT_KMALLOC_INFO(1048576, 1M),
837 	INIT_KMALLOC_INFO(2097152, 2M)
838 };
839 
840 /*
841  * Patch up the size_index table if we have strange large alignment
842  * requirements for the kmalloc array. This is only the case for
843  * MIPS it seems. The standard arches will not generate any code here.
844  *
845  * Largest permitted alignment is 256 bytes due to the way we
846  * handle the index determination for the smaller caches.
847  *
848  * Make sure that nothing crazy happens if someone starts tinkering
849  * around with ARCH_KMALLOC_MINALIGN
850  */
setup_kmalloc_cache_index_table(void)851 void __init setup_kmalloc_cache_index_table(void)
852 {
853 	unsigned int i;
854 
855 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
856 		!is_power_of_2(KMALLOC_MIN_SIZE));
857 
858 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
859 		unsigned int elem = size_index_elem(i);
860 
861 		if (elem >= ARRAY_SIZE(kmalloc_size_index))
862 			break;
863 		kmalloc_size_index[elem] = KMALLOC_SHIFT_LOW;
864 	}
865 
866 	if (KMALLOC_MIN_SIZE >= 64) {
867 		/*
868 		 * The 96 byte sized cache is not used if the alignment
869 		 * is 64 byte.
870 		 */
871 		for (i = 64 + 8; i <= 96; i += 8)
872 			kmalloc_size_index[size_index_elem(i)] = 7;
873 
874 	}
875 
876 	if (KMALLOC_MIN_SIZE >= 128) {
877 		/*
878 		 * The 192 byte sized cache is not used if the alignment
879 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
880 		 * instead.
881 		 */
882 		for (i = 128 + 8; i <= 192; i += 8)
883 			kmalloc_size_index[size_index_elem(i)] = 8;
884 	}
885 }
886 
__kmalloc_minalign(void)887 static unsigned int __kmalloc_minalign(void)
888 {
889 	unsigned int minalign = dma_get_cache_alignment();
890 
891 	if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
892 	    is_swiotlb_allocated())
893 		minalign = ARCH_KMALLOC_MINALIGN;
894 
895 	return max(minalign, arch_slab_minalign());
896 }
897 
898 static void __init
new_kmalloc_cache(int idx,enum kmalloc_cache_type type)899 new_kmalloc_cache(int idx, enum kmalloc_cache_type type)
900 {
901 	slab_flags_t flags = 0;
902 	unsigned int minalign = __kmalloc_minalign();
903 	unsigned int aligned_size = kmalloc_info[idx].size;
904 	int aligned_idx = idx;
905 
906 	if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
907 		flags |= SLAB_RECLAIM_ACCOUNT;
908 	} else if (IS_ENABLED(CONFIG_MEMCG) && (type == KMALLOC_CGROUP)) {
909 		if (mem_cgroup_kmem_disabled()) {
910 			kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
911 			return;
912 		}
913 		flags |= SLAB_ACCOUNT;
914 	} else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) {
915 		flags |= SLAB_CACHE_DMA;
916 	}
917 
918 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
919 	if (type >= KMALLOC_RANDOM_START && type <= KMALLOC_RANDOM_END)
920 		flags |= SLAB_NO_MERGE;
921 #endif
922 
923 	/*
924 	 * If CONFIG_MEMCG is enabled, disable cache merging for
925 	 * KMALLOC_NORMAL caches.
926 	 */
927 	if (IS_ENABLED(CONFIG_MEMCG) && (type == KMALLOC_NORMAL))
928 		flags |= SLAB_NO_MERGE;
929 
930 	if (minalign > ARCH_KMALLOC_MINALIGN) {
931 		aligned_size = ALIGN(aligned_size, minalign);
932 		aligned_idx = __kmalloc_index(aligned_size, false);
933 	}
934 
935 	if (!kmalloc_caches[type][aligned_idx])
936 		kmalloc_caches[type][aligned_idx] = create_kmalloc_cache(
937 					kmalloc_info[aligned_idx].name[type],
938 					aligned_size, flags);
939 	if (idx != aligned_idx)
940 		kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx];
941 }
942 
943 /*
944  * Create the kmalloc array. Some of the regular kmalloc arrays
945  * may already have been created because they were needed to
946  * enable allocations for slab creation.
947  */
create_kmalloc_caches(void)948 void __init create_kmalloc_caches(void)
949 {
950 	int i;
951 	enum kmalloc_cache_type type;
952 
953 	/*
954 	 * Including KMALLOC_CGROUP if CONFIG_MEMCG defined
955 	 */
956 	for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
957 		/* Caches that are NOT of the two-to-the-power-of size. */
958 		if (KMALLOC_MIN_SIZE <= 32)
959 			new_kmalloc_cache(1, type);
960 		if (KMALLOC_MIN_SIZE <= 64)
961 			new_kmalloc_cache(2, type);
962 
963 		/* Caches that are of the two-to-the-power-of size. */
964 		for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
965 			new_kmalloc_cache(i, type);
966 	}
967 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
968 	random_kmalloc_seed = get_random_u64();
969 #endif
970 
971 	/* Kmalloc array is now usable */
972 	slab_state = UP;
973 
974 	if (IS_ENABLED(CONFIG_SLAB_BUCKETS))
975 		kmem_buckets_cache = kmem_cache_create("kmalloc_buckets",
976 						       sizeof(kmem_buckets),
977 						       0, SLAB_NO_MERGE, NULL);
978 }
979 
980 /**
981  * __ksize -- Report full size of underlying allocation
982  * @object: pointer to the object
983  *
984  * This should only be used internally to query the true size of allocations.
985  * It is not meant to be a way to discover the usable size of an allocation
986  * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
987  * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
988  * and/or FORTIFY_SOURCE.
989  *
990  * Return: size of the actual memory used by @object in bytes
991  */
__ksize(const void * object)992 size_t __ksize(const void *object)
993 {
994 	struct folio *folio;
995 
996 	if (unlikely(object == ZERO_SIZE_PTR))
997 		return 0;
998 
999 	folio = virt_to_folio(object);
1000 
1001 	if (unlikely(!folio_test_slab(folio))) {
1002 		if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
1003 			return 0;
1004 		if (WARN_ON(object != folio_address(folio)))
1005 			return 0;
1006 		return folio_size(folio);
1007 	}
1008 
1009 #ifdef CONFIG_SLUB_DEBUG
1010 	skip_orig_size_check(folio_slab(folio)->slab_cache, object);
1011 #endif
1012 
1013 	return slab_ksize(folio_slab(folio)->slab_cache);
1014 }
1015 
kmalloc_fix_flags(gfp_t flags)1016 gfp_t kmalloc_fix_flags(gfp_t flags)
1017 {
1018 	gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1019 
1020 	flags &= ~GFP_SLAB_BUG_MASK;
1021 	pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1022 			invalid_mask, &invalid_mask, flags, &flags);
1023 	dump_stack();
1024 
1025 	return flags;
1026 }
1027 
1028 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1029 /* Randomize a generic freelist */
freelist_randomize(unsigned int * list,unsigned int count)1030 static void freelist_randomize(unsigned int *list,
1031 			       unsigned int count)
1032 {
1033 	unsigned int rand;
1034 	unsigned int i;
1035 
1036 	for (i = 0; i < count; i++)
1037 		list[i] = i;
1038 
1039 	/* Fisher-Yates shuffle */
1040 	for (i = count - 1; i > 0; i--) {
1041 		rand = get_random_u32_below(i + 1);
1042 		swap(list[i], list[rand]);
1043 	}
1044 }
1045 
1046 /* Create a random sequence per cache */
cache_random_seq_create(struct kmem_cache * cachep,unsigned int count,gfp_t gfp)1047 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1048 				    gfp_t gfp)
1049 {
1050 
1051 	if (count < 2 || cachep->random_seq)
1052 		return 0;
1053 
1054 	cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1055 	if (!cachep->random_seq)
1056 		return -ENOMEM;
1057 
1058 	freelist_randomize(cachep->random_seq, count);
1059 	return 0;
1060 }
1061 
1062 /* Destroy the per-cache random freelist sequence */
cache_random_seq_destroy(struct kmem_cache * cachep)1063 void cache_random_seq_destroy(struct kmem_cache *cachep)
1064 {
1065 	kfree(cachep->random_seq);
1066 	cachep->random_seq = NULL;
1067 }
1068 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1069 
1070 #ifdef CONFIG_SLUB_DEBUG
1071 #define SLABINFO_RIGHTS (0400)
1072 
print_slabinfo_header(struct seq_file * m)1073 static void print_slabinfo_header(struct seq_file *m)
1074 {
1075 	/*
1076 	 * Output format version, so at least we can change it
1077 	 * without _too_ many complaints.
1078 	 */
1079 	seq_puts(m, "slabinfo - version: 2.1\n");
1080 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1081 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1082 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1083 	seq_putc(m, '\n');
1084 }
1085 
slab_start(struct seq_file * m,loff_t * pos)1086 static void *slab_start(struct seq_file *m, loff_t *pos)
1087 {
1088 	mutex_lock(&slab_mutex);
1089 	return seq_list_start(&slab_caches, *pos);
1090 }
1091 
slab_next(struct seq_file * m,void * p,loff_t * pos)1092 static void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1093 {
1094 	return seq_list_next(p, &slab_caches, pos);
1095 }
1096 
slab_stop(struct seq_file * m,void * p)1097 static void slab_stop(struct seq_file *m, void *p)
1098 {
1099 	mutex_unlock(&slab_mutex);
1100 }
1101 
cache_show(struct kmem_cache * s,struct seq_file * m)1102 static void cache_show(struct kmem_cache *s, struct seq_file *m)
1103 {
1104 	struct slabinfo sinfo;
1105 
1106 	memset(&sinfo, 0, sizeof(sinfo));
1107 	get_slabinfo(s, &sinfo);
1108 
1109 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1110 		   s->name, sinfo.active_objs, sinfo.num_objs, s->size,
1111 		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
1112 
1113 	seq_printf(m, " : tunables %4u %4u %4u",
1114 		   sinfo.limit, sinfo.batchcount, sinfo.shared);
1115 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
1116 		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1117 	seq_putc(m, '\n');
1118 }
1119 
slab_show(struct seq_file * m,void * p)1120 static int slab_show(struct seq_file *m, void *p)
1121 {
1122 	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1123 
1124 	if (p == slab_caches.next)
1125 		print_slabinfo_header(m);
1126 	cache_show(s, m);
1127 	return 0;
1128 }
1129 
dump_unreclaimable_slab(void)1130 void dump_unreclaimable_slab(void)
1131 {
1132 	struct kmem_cache *s;
1133 	struct slabinfo sinfo;
1134 
1135 	/*
1136 	 * Here acquiring slab_mutex is risky since we don't prefer to get
1137 	 * sleep in oom path. But, without mutex hold, it may introduce a
1138 	 * risk of crash.
1139 	 * Use mutex_trylock to protect the list traverse, dump nothing
1140 	 * without acquiring the mutex.
1141 	 */
1142 	if (!mutex_trylock(&slab_mutex)) {
1143 		pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1144 		return;
1145 	}
1146 
1147 	pr_info("Unreclaimable slab info:\n");
1148 	pr_info("Name                      Used          Total\n");
1149 
1150 	list_for_each_entry(s, &slab_caches, list) {
1151 		if (s->flags & SLAB_RECLAIM_ACCOUNT)
1152 			continue;
1153 
1154 		get_slabinfo(s, &sinfo);
1155 
1156 		if (sinfo.num_objs > 0)
1157 			pr_info("%-17s %10luKB %10luKB\n", s->name,
1158 				(sinfo.active_objs * s->size) / 1024,
1159 				(sinfo.num_objs * s->size) / 1024);
1160 	}
1161 	mutex_unlock(&slab_mutex);
1162 }
1163 
1164 /*
1165  * slabinfo_op - iterator that generates /proc/slabinfo
1166  *
1167  * Output layout:
1168  * cache-name
1169  * num-active-objs
1170  * total-objs
1171  * object size
1172  * num-active-slabs
1173  * total-slabs
1174  * num-pages-per-slab
1175  * + further values on SMP and with statistics enabled
1176  */
1177 static const struct seq_operations slabinfo_op = {
1178 	.start = slab_start,
1179 	.next = slab_next,
1180 	.stop = slab_stop,
1181 	.show = slab_show,
1182 };
1183 
slabinfo_open(struct inode * inode,struct file * file)1184 static int slabinfo_open(struct inode *inode, struct file *file)
1185 {
1186 	return seq_open(file, &slabinfo_op);
1187 }
1188 
1189 static const struct proc_ops slabinfo_proc_ops = {
1190 	.proc_flags	= PROC_ENTRY_PERMANENT,
1191 	.proc_open	= slabinfo_open,
1192 	.proc_read	= seq_read,
1193 	.proc_lseek	= seq_lseek,
1194 	.proc_release	= seq_release,
1195 };
1196 
slab_proc_init(void)1197 static int __init slab_proc_init(void)
1198 {
1199 	proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
1200 	return 0;
1201 }
1202 module_init(slab_proc_init);
1203 
1204 #endif /* CONFIG_SLUB_DEBUG */
1205 
1206 /**
1207  * kfree_sensitive - Clear sensitive information in memory before freeing
1208  * @p: object to free memory of
1209  *
1210  * The memory of the object @p points to is zeroed before freed.
1211  * If @p is %NULL, kfree_sensitive() does nothing.
1212  *
1213  * Note: this function zeroes the whole allocated buffer which can be a good
1214  * deal bigger than the requested buffer size passed to kmalloc(). So be
1215  * careful when using this function in performance sensitive code.
1216  */
kfree_sensitive(const void * p)1217 void kfree_sensitive(const void *p)
1218 {
1219 	size_t ks;
1220 	void *mem = (void *)p;
1221 
1222 	ks = ksize(mem);
1223 	if (ks) {
1224 		kasan_unpoison_range(mem, ks);
1225 		memzero_explicit(mem, ks);
1226 	}
1227 	kfree(mem);
1228 }
1229 EXPORT_SYMBOL(kfree_sensitive);
1230 
ksize(const void * objp)1231 size_t ksize(const void *objp)
1232 {
1233 	/*
1234 	 * We need to first check that the pointer to the object is valid.
1235 	 * The KASAN report printed from ksize() is more useful, then when
1236 	 * it's printed later when the behaviour could be undefined due to
1237 	 * a potential use-after-free or double-free.
1238 	 *
1239 	 * We use kasan_check_byte(), which is supported for the hardware
1240 	 * tag-based KASAN mode, unlike kasan_check_read/write().
1241 	 *
1242 	 * If the pointed to memory is invalid, we return 0 to avoid users of
1243 	 * ksize() writing to and potentially corrupting the memory region.
1244 	 *
1245 	 * We want to perform the check before __ksize(), to avoid potentially
1246 	 * crashing in __ksize() due to accessing invalid metadata.
1247 	 */
1248 	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
1249 		return 0;
1250 
1251 	return kfence_ksize(objp) ?: __ksize(objp);
1252 }
1253 EXPORT_SYMBOL(ksize);
1254 
1255 #ifdef CONFIG_BPF_SYSCALL
1256 #include <linux/btf.h>
1257 
1258 __bpf_kfunc_start_defs();
1259 
bpf_get_kmem_cache(u64 addr)1260 __bpf_kfunc struct kmem_cache *bpf_get_kmem_cache(u64 addr)
1261 {
1262 	struct slab *slab;
1263 
1264 	if (!virt_addr_valid((void *)(long)addr))
1265 		return NULL;
1266 
1267 	slab = virt_to_slab((void *)(long)addr);
1268 	return slab ? slab->slab_cache : NULL;
1269 }
1270 
1271 __bpf_kfunc_end_defs();
1272 #endif /* CONFIG_BPF_SYSCALL */
1273 
1274 /* Tracepoints definitions. */
1275 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1276 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1277 EXPORT_TRACEPOINT_SYMBOL(kfree);
1278 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1279 
1280 #ifndef CONFIG_KVFREE_RCU_BATCHED
1281 
kvfree_call_rcu(struct rcu_head * head,void * ptr)1282 void kvfree_call_rcu(struct rcu_head *head, void *ptr)
1283 {
1284 	if (head) {
1285 		kasan_record_aux_stack(ptr);
1286 		call_rcu(head, kvfree_rcu_cb);
1287 		return;
1288 	}
1289 
1290 	// kvfree_rcu(one_arg) call.
1291 	might_sleep();
1292 	synchronize_rcu();
1293 	kvfree(ptr);
1294 }
1295 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
1296 
kvfree_rcu_init(void)1297 void __init kvfree_rcu_init(void)
1298 {
1299 }
1300 
1301 #else /* CONFIG_KVFREE_RCU_BATCHED */
1302 
1303 /*
1304  * This rcu parameter is runtime-read-only. It reflects
1305  * a minimum allowed number of objects which can be cached
1306  * per-CPU. Object size is equal to one page. This value
1307  * can be changed at boot time.
1308  */
1309 static int rcu_min_cached_objs = 5;
1310 module_param(rcu_min_cached_objs, int, 0444);
1311 
1312 // A page shrinker can ask for pages to be freed to make them
1313 // available for other parts of the system. This usually happens
1314 // under low memory conditions, and in that case we should also
1315 // defer page-cache filling for a short time period.
1316 //
1317 // The default value is 5 seconds, which is long enough to reduce
1318 // interference with the shrinker while it asks other systems to
1319 // drain their caches.
1320 static int rcu_delay_page_cache_fill_msec = 5000;
1321 module_param(rcu_delay_page_cache_fill_msec, int, 0444);
1322 
1323 static struct workqueue_struct *rcu_reclaim_wq;
1324 
1325 /* Maximum number of jiffies to wait before draining a batch. */
1326 #define KFREE_DRAIN_JIFFIES (5 * HZ)
1327 #define KFREE_N_BATCHES 2
1328 #define FREE_N_CHANNELS 2
1329 
1330 /**
1331  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
1332  * @list: List node. All blocks are linked between each other
1333  * @gp_snap: Snapshot of RCU state for objects placed to this bulk
1334  * @nr_records: Number of active pointers in the array
1335  * @records: Array of the kvfree_rcu() pointers
1336  */
1337 struct kvfree_rcu_bulk_data {
1338 	struct list_head list;
1339 	struct rcu_gp_oldstate gp_snap;
1340 	unsigned long nr_records;
1341 	void *records[] __counted_by(nr_records);
1342 };
1343 
1344 /*
1345  * This macro defines how many entries the "records" array
1346  * will contain. It is based on the fact that the size of
1347  * kvfree_rcu_bulk_data structure becomes exactly one page.
1348  */
1349 #define KVFREE_BULK_MAX_ENTR \
1350 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
1351 
1352 /**
1353  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
1354  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
1355  * @head_free: List of kfree_rcu() objects waiting for a grace period
1356  * @head_free_gp_snap: Grace-period snapshot to check for attempted premature frees.
1357  * @bulk_head_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
1358  * @krcp: Pointer to @kfree_rcu_cpu structure
1359  */
1360 
1361 struct kfree_rcu_cpu_work {
1362 	struct rcu_work rcu_work;
1363 	struct rcu_head *head_free;
1364 	struct rcu_gp_oldstate head_free_gp_snap;
1365 	struct list_head bulk_head_free[FREE_N_CHANNELS];
1366 	struct kfree_rcu_cpu *krcp;
1367 };
1368 
1369 /**
1370  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
1371  * @head: List of kfree_rcu() objects not yet waiting for a grace period
1372  * @head_gp_snap: Snapshot of RCU state for objects placed to "@head"
1373  * @bulk_head: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
1374  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
1375  * @lock: Synchronize access to this structure
1376  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
1377  * @initialized: The @rcu_work fields have been initialized
1378  * @head_count: Number of objects in rcu_head singular list
1379  * @bulk_count: Number of objects in bulk-list
1380  * @bkvcache:
1381  *	A simple cache list that contains objects for reuse purpose.
1382  *	In order to save some per-cpu space the list is singular.
1383  *	Even though it is lockless an access has to be protected by the
1384  *	per-cpu lock.
1385  * @page_cache_work: A work to refill the cache when it is empty
1386  * @backoff_page_cache_fill: Delay cache refills
1387  * @work_in_progress: Indicates that page_cache_work is running
1388  * @hrtimer: A hrtimer for scheduling a page_cache_work
1389  * @nr_bkv_objs: number of allocated objects at @bkvcache.
1390  *
1391  * This is a per-CPU structure.  The reason that it is not included in
1392  * the rcu_data structure is to permit this code to be extracted from
1393  * the RCU files.  Such extraction could allow further optimization of
1394  * the interactions with the slab allocators.
1395  */
1396 struct kfree_rcu_cpu {
1397 	// Objects queued on a linked list
1398 	// through their rcu_head structures.
1399 	struct rcu_head *head;
1400 	unsigned long head_gp_snap;
1401 	atomic_t head_count;
1402 
1403 	// Objects queued on a bulk-list.
1404 	struct list_head bulk_head[FREE_N_CHANNELS];
1405 	atomic_t bulk_count[FREE_N_CHANNELS];
1406 
1407 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
1408 	raw_spinlock_t lock;
1409 	struct delayed_work monitor_work;
1410 	bool initialized;
1411 
1412 	struct delayed_work page_cache_work;
1413 	atomic_t backoff_page_cache_fill;
1414 	atomic_t work_in_progress;
1415 	struct hrtimer hrtimer;
1416 
1417 	struct llist_head bkvcache;
1418 	int nr_bkv_objs;
1419 };
1420 
1421 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
1422 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
1423 };
1424 
1425 static __always_inline void
debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data * bhead)1426 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
1427 {
1428 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1429 	int i;
1430 
1431 	for (i = 0; i < bhead->nr_records; i++)
1432 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
1433 #endif
1434 }
1435 
1436 static inline struct kfree_rcu_cpu *
krc_this_cpu_lock(unsigned long * flags)1437 krc_this_cpu_lock(unsigned long *flags)
1438 {
1439 	struct kfree_rcu_cpu *krcp;
1440 
1441 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
1442 	krcp = this_cpu_ptr(&krc);
1443 	raw_spin_lock(&krcp->lock);
1444 
1445 	return krcp;
1446 }
1447 
1448 static inline void
krc_this_cpu_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)1449 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
1450 {
1451 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
1452 }
1453 
1454 static inline struct kvfree_rcu_bulk_data *
get_cached_bnode(struct kfree_rcu_cpu * krcp)1455 get_cached_bnode(struct kfree_rcu_cpu *krcp)
1456 {
1457 	if (!krcp->nr_bkv_objs)
1458 		return NULL;
1459 
1460 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
1461 	return (struct kvfree_rcu_bulk_data *)
1462 		llist_del_first(&krcp->bkvcache);
1463 }
1464 
1465 static inline bool
put_cached_bnode(struct kfree_rcu_cpu * krcp,struct kvfree_rcu_bulk_data * bnode)1466 put_cached_bnode(struct kfree_rcu_cpu *krcp,
1467 	struct kvfree_rcu_bulk_data *bnode)
1468 {
1469 	// Check the limit.
1470 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
1471 		return false;
1472 
1473 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
1474 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
1475 	return true;
1476 }
1477 
1478 static int
drain_page_cache(struct kfree_rcu_cpu * krcp)1479 drain_page_cache(struct kfree_rcu_cpu *krcp)
1480 {
1481 	unsigned long flags;
1482 	struct llist_node *page_list, *pos, *n;
1483 	int freed = 0;
1484 
1485 	if (!rcu_min_cached_objs)
1486 		return 0;
1487 
1488 	raw_spin_lock_irqsave(&krcp->lock, flags);
1489 	page_list = llist_del_all(&krcp->bkvcache);
1490 	WRITE_ONCE(krcp->nr_bkv_objs, 0);
1491 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
1492 
1493 	llist_for_each_safe(pos, n, page_list) {
1494 		free_page((unsigned long)pos);
1495 		freed++;
1496 	}
1497 
1498 	return freed;
1499 }
1500 
1501 static void
kvfree_rcu_bulk(struct kfree_rcu_cpu * krcp,struct kvfree_rcu_bulk_data * bnode,int idx)1502 kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
1503 	struct kvfree_rcu_bulk_data *bnode, int idx)
1504 {
1505 	unsigned long flags;
1506 	int i;
1507 
1508 	if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) {
1509 		debug_rcu_bhead_unqueue(bnode);
1510 		rcu_lock_acquire(&rcu_callback_map);
1511 		if (idx == 0) { // kmalloc() / kfree().
1512 			trace_rcu_invoke_kfree_bulk_callback(
1513 				"slab", bnode->nr_records,
1514 				bnode->records);
1515 
1516 			kfree_bulk(bnode->nr_records, bnode->records);
1517 		} else { // vmalloc() / vfree().
1518 			for (i = 0; i < bnode->nr_records; i++) {
1519 				trace_rcu_invoke_kvfree_callback(
1520 					"slab", bnode->records[i], 0);
1521 
1522 				vfree(bnode->records[i]);
1523 			}
1524 		}
1525 		rcu_lock_release(&rcu_callback_map);
1526 	}
1527 
1528 	raw_spin_lock_irqsave(&krcp->lock, flags);
1529 	if (put_cached_bnode(krcp, bnode))
1530 		bnode = NULL;
1531 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
1532 
1533 	if (bnode)
1534 		free_page((unsigned long) bnode);
1535 
1536 	cond_resched_tasks_rcu_qs();
1537 }
1538 
1539 static void
kvfree_rcu_list(struct rcu_head * head)1540 kvfree_rcu_list(struct rcu_head *head)
1541 {
1542 	struct rcu_head *next;
1543 
1544 	for (; head; head = next) {
1545 		void *ptr = (void *) head->func;
1546 		unsigned long offset = (void *) head - ptr;
1547 
1548 		next = head->next;
1549 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
1550 		rcu_lock_acquire(&rcu_callback_map);
1551 		trace_rcu_invoke_kvfree_callback("slab", head, offset);
1552 
1553 		kvfree(ptr);
1554 
1555 		rcu_lock_release(&rcu_callback_map);
1556 		cond_resched_tasks_rcu_qs();
1557 	}
1558 }
1559 
1560 /*
1561  * This function is invoked in workqueue context after a grace period.
1562  * It frees all the objects queued on ->bulk_head_free or ->head_free.
1563  */
kfree_rcu_work(struct work_struct * work)1564 static void kfree_rcu_work(struct work_struct *work)
1565 {
1566 	unsigned long flags;
1567 	struct kvfree_rcu_bulk_data *bnode, *n;
1568 	struct list_head bulk_head[FREE_N_CHANNELS];
1569 	struct rcu_head *head;
1570 	struct kfree_rcu_cpu *krcp;
1571 	struct kfree_rcu_cpu_work *krwp;
1572 	struct rcu_gp_oldstate head_gp_snap;
1573 	int i;
1574 
1575 	krwp = container_of(to_rcu_work(work),
1576 		struct kfree_rcu_cpu_work, rcu_work);
1577 	krcp = krwp->krcp;
1578 
1579 	raw_spin_lock_irqsave(&krcp->lock, flags);
1580 	// Channels 1 and 2.
1581 	for (i = 0; i < FREE_N_CHANNELS; i++)
1582 		list_replace_init(&krwp->bulk_head_free[i], &bulk_head[i]);
1583 
1584 	// Channel 3.
1585 	head = krwp->head_free;
1586 	krwp->head_free = NULL;
1587 	head_gp_snap = krwp->head_free_gp_snap;
1588 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
1589 
1590 	// Handle the first two channels.
1591 	for (i = 0; i < FREE_N_CHANNELS; i++) {
1592 		// Start from the tail page, so a GP is likely passed for it.
1593 		list_for_each_entry_safe(bnode, n, &bulk_head[i], list)
1594 			kvfree_rcu_bulk(krcp, bnode, i);
1595 	}
1596 
1597 	/*
1598 	 * This is used when the "bulk" path can not be used for the
1599 	 * double-argument of kvfree_rcu().  This happens when the
1600 	 * page-cache is empty, which means that objects are instead
1601 	 * queued on a linked list through their rcu_head structures.
1602 	 * This list is named "Channel 3".
1603 	 */
1604 	if (head && !WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&head_gp_snap)))
1605 		kvfree_rcu_list(head);
1606 }
1607 
1608 static bool
need_offload_krc(struct kfree_rcu_cpu * krcp)1609 need_offload_krc(struct kfree_rcu_cpu *krcp)
1610 {
1611 	int i;
1612 
1613 	for (i = 0; i < FREE_N_CHANNELS; i++)
1614 		if (!list_empty(&krcp->bulk_head[i]))
1615 			return true;
1616 
1617 	return !!READ_ONCE(krcp->head);
1618 }
1619 
1620 static bool
need_wait_for_krwp_work(struct kfree_rcu_cpu_work * krwp)1621 need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
1622 {
1623 	int i;
1624 
1625 	for (i = 0; i < FREE_N_CHANNELS; i++)
1626 		if (!list_empty(&krwp->bulk_head_free[i]))
1627 			return true;
1628 
1629 	return !!krwp->head_free;
1630 }
1631 
krc_count(struct kfree_rcu_cpu * krcp)1632 static int krc_count(struct kfree_rcu_cpu *krcp)
1633 {
1634 	int sum = atomic_read(&krcp->head_count);
1635 	int i;
1636 
1637 	for (i = 0; i < FREE_N_CHANNELS; i++)
1638 		sum += atomic_read(&krcp->bulk_count[i]);
1639 
1640 	return sum;
1641 }
1642 
1643 static void
__schedule_delayed_monitor_work(struct kfree_rcu_cpu * krcp)1644 __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
1645 {
1646 	long delay, delay_left;
1647 
1648 	delay = krc_count(krcp) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES;
1649 	if (delayed_work_pending(&krcp->monitor_work)) {
1650 		delay_left = krcp->monitor_work.timer.expires - jiffies;
1651 		if (delay < delay_left)
1652 			mod_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay);
1653 		return;
1654 	}
1655 	queue_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay);
1656 }
1657 
1658 static void
schedule_delayed_monitor_work(struct kfree_rcu_cpu * krcp)1659 schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
1660 {
1661 	unsigned long flags;
1662 
1663 	raw_spin_lock_irqsave(&krcp->lock, flags);
1664 	__schedule_delayed_monitor_work(krcp);
1665 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
1666 }
1667 
1668 static void
kvfree_rcu_drain_ready(struct kfree_rcu_cpu * krcp)1669 kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
1670 {
1671 	struct list_head bulk_ready[FREE_N_CHANNELS];
1672 	struct kvfree_rcu_bulk_data *bnode, *n;
1673 	struct rcu_head *head_ready = NULL;
1674 	unsigned long flags;
1675 	int i;
1676 
1677 	raw_spin_lock_irqsave(&krcp->lock, flags);
1678 	for (i = 0; i < FREE_N_CHANNELS; i++) {
1679 		INIT_LIST_HEAD(&bulk_ready[i]);
1680 
1681 		list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) {
1682 			if (!poll_state_synchronize_rcu_full(&bnode->gp_snap))
1683 				break;
1684 
1685 			atomic_sub(bnode->nr_records, &krcp->bulk_count[i]);
1686 			list_move(&bnode->list, &bulk_ready[i]);
1687 		}
1688 	}
1689 
1690 	if (krcp->head && poll_state_synchronize_rcu(krcp->head_gp_snap)) {
1691 		head_ready = krcp->head;
1692 		atomic_set(&krcp->head_count, 0);
1693 		WRITE_ONCE(krcp->head, NULL);
1694 	}
1695 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
1696 
1697 	for (i = 0; i < FREE_N_CHANNELS; i++) {
1698 		list_for_each_entry_safe(bnode, n, &bulk_ready[i], list)
1699 			kvfree_rcu_bulk(krcp, bnode, i);
1700 	}
1701 
1702 	if (head_ready)
1703 		kvfree_rcu_list(head_ready);
1704 }
1705 
1706 /*
1707  * Return: %true if a work is queued, %false otherwise.
1708  */
1709 static bool
kvfree_rcu_queue_batch(struct kfree_rcu_cpu * krcp)1710 kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
1711 {
1712 	unsigned long flags;
1713 	bool queued = false;
1714 	int i, j;
1715 
1716 	raw_spin_lock_irqsave(&krcp->lock, flags);
1717 
1718 	// Attempt to start a new batch.
1719 	for (i = 0; i < KFREE_N_BATCHES; i++) {
1720 		struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
1721 
1722 		// Try to detach bulk_head or head and attach it, only when
1723 		// all channels are free.  Any channel is not free means at krwp
1724 		// there is on-going rcu work to handle krwp's free business.
1725 		if (need_wait_for_krwp_work(krwp))
1726 			continue;
1727 
1728 		// kvfree_rcu_drain_ready() might handle this krcp, if so give up.
1729 		if (need_offload_krc(krcp)) {
1730 			// Channel 1 corresponds to the SLAB-pointer bulk path.
1731 			// Channel 2 corresponds to vmalloc-pointer bulk path.
1732 			for (j = 0; j < FREE_N_CHANNELS; j++) {
1733 				if (list_empty(&krwp->bulk_head_free[j])) {
1734 					atomic_set(&krcp->bulk_count[j], 0);
1735 					list_replace_init(&krcp->bulk_head[j],
1736 						&krwp->bulk_head_free[j]);
1737 				}
1738 			}
1739 
1740 			// Channel 3 corresponds to both SLAB and vmalloc
1741 			// objects queued on the linked list.
1742 			if (!krwp->head_free) {
1743 				krwp->head_free = krcp->head;
1744 				get_state_synchronize_rcu_full(&krwp->head_free_gp_snap);
1745 				atomic_set(&krcp->head_count, 0);
1746 				WRITE_ONCE(krcp->head, NULL);
1747 			}
1748 
1749 			// One work is per one batch, so there are three
1750 			// "free channels", the batch can handle. Break
1751 			// the loop since it is done with this CPU thus
1752 			// queuing an RCU work is _always_ success here.
1753 			queued = queue_rcu_work(rcu_reclaim_wq, &krwp->rcu_work);
1754 			WARN_ON_ONCE(!queued);
1755 			break;
1756 		}
1757 	}
1758 
1759 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
1760 	return queued;
1761 }
1762 
1763 /*
1764  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
1765  */
kfree_rcu_monitor(struct work_struct * work)1766 static void kfree_rcu_monitor(struct work_struct *work)
1767 {
1768 	struct kfree_rcu_cpu *krcp = container_of(work,
1769 		struct kfree_rcu_cpu, monitor_work.work);
1770 
1771 	// Drain ready for reclaim.
1772 	kvfree_rcu_drain_ready(krcp);
1773 
1774 	// Queue a batch for a rest.
1775 	kvfree_rcu_queue_batch(krcp);
1776 
1777 	// If there is nothing to detach, it means that our job is
1778 	// successfully done here. In case of having at least one
1779 	// of the channels that is still busy we should rearm the
1780 	// work to repeat an attempt. Because previous batches are
1781 	// still in progress.
1782 	if (need_offload_krc(krcp))
1783 		schedule_delayed_monitor_work(krcp);
1784 }
1785 
fill_page_cache_func(struct work_struct * work)1786 static void fill_page_cache_func(struct work_struct *work)
1787 {
1788 	struct kvfree_rcu_bulk_data *bnode;
1789 	struct kfree_rcu_cpu *krcp =
1790 		container_of(work, struct kfree_rcu_cpu,
1791 			page_cache_work.work);
1792 	unsigned long flags;
1793 	int nr_pages;
1794 	bool pushed;
1795 	int i;
1796 
1797 	nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
1798 		1 : rcu_min_cached_objs;
1799 
1800 	for (i = READ_ONCE(krcp->nr_bkv_objs); i < nr_pages; i++) {
1801 		bnode = (struct kvfree_rcu_bulk_data *)
1802 			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1803 
1804 		if (!bnode)
1805 			break;
1806 
1807 		raw_spin_lock_irqsave(&krcp->lock, flags);
1808 		pushed = put_cached_bnode(krcp, bnode);
1809 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
1810 
1811 		if (!pushed) {
1812 			free_page((unsigned long) bnode);
1813 			break;
1814 		}
1815 	}
1816 
1817 	atomic_set(&krcp->work_in_progress, 0);
1818 	atomic_set(&krcp->backoff_page_cache_fill, 0);
1819 }
1820 
1821 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
1822 // state specified by flags.  If can_alloc is true, the caller must
1823 // be schedulable and not be holding any locks or mutexes that might be
1824 // acquired by the memory allocator or anything that it might invoke.
1825 // Returns true if ptr was successfully recorded, else the caller must
1826 // use a fallback.
1827 static inline bool
add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu ** krcp,unsigned long * flags,void * ptr,bool can_alloc)1828 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
1829 	unsigned long *flags, void *ptr, bool can_alloc)
1830 {
1831 	struct kvfree_rcu_bulk_data *bnode;
1832 	int idx;
1833 
1834 	*krcp = krc_this_cpu_lock(flags);
1835 	if (unlikely(!(*krcp)->initialized))
1836 		return false;
1837 
1838 	idx = !!is_vmalloc_addr(ptr);
1839 	bnode = list_first_entry_or_null(&(*krcp)->bulk_head[idx],
1840 		struct kvfree_rcu_bulk_data, list);
1841 
1842 	/* Check if a new block is required. */
1843 	if (!bnode || bnode->nr_records == KVFREE_BULK_MAX_ENTR) {
1844 		bnode = get_cached_bnode(*krcp);
1845 		if (!bnode && can_alloc) {
1846 			krc_this_cpu_unlock(*krcp, *flags);
1847 
1848 			// __GFP_NORETRY - allows a light-weight direct reclaim
1849 			// what is OK from minimizing of fallback hitting point of
1850 			// view. Apart of that it forbids any OOM invoking what is
1851 			// also beneficial since we are about to release memory soon.
1852 			//
1853 			// __GFP_NOMEMALLOC - prevents from consuming of all the
1854 			// memory reserves. Please note we have a fallback path.
1855 			//
1856 			// __GFP_NOWARN - it is supposed that an allocation can
1857 			// be failed under low memory or high memory pressure
1858 			// scenarios.
1859 			bnode = (struct kvfree_rcu_bulk_data *)
1860 				__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1861 			raw_spin_lock_irqsave(&(*krcp)->lock, *flags);
1862 		}
1863 
1864 		if (!bnode)
1865 			return false;
1866 
1867 		// Initialize the new block and attach it.
1868 		bnode->nr_records = 0;
1869 		list_add(&bnode->list, &(*krcp)->bulk_head[idx]);
1870 	}
1871 
1872 	// Finally insert and update the GP for this page.
1873 	bnode->nr_records++;
1874 	bnode->records[bnode->nr_records - 1] = ptr;
1875 	get_state_synchronize_rcu_full(&bnode->gp_snap);
1876 	atomic_inc(&(*krcp)->bulk_count[idx]);
1877 
1878 	return true;
1879 }
1880 
1881 static enum hrtimer_restart
schedule_page_work_fn(struct hrtimer * t)1882 schedule_page_work_fn(struct hrtimer *t)
1883 {
1884 	struct kfree_rcu_cpu *krcp =
1885 		container_of(t, struct kfree_rcu_cpu, hrtimer);
1886 
1887 	queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
1888 	return HRTIMER_NORESTART;
1889 }
1890 
1891 static void
run_page_cache_worker(struct kfree_rcu_cpu * krcp)1892 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
1893 {
1894 	// If cache disabled, bail out.
1895 	if (!rcu_min_cached_objs)
1896 		return;
1897 
1898 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
1899 			!atomic_xchg(&krcp->work_in_progress, 1)) {
1900 		if (atomic_read(&krcp->backoff_page_cache_fill)) {
1901 			queue_delayed_work(rcu_reclaim_wq,
1902 				&krcp->page_cache_work,
1903 					msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
1904 		} else {
1905 			hrtimer_setup(&krcp->hrtimer, schedule_page_work_fn, CLOCK_MONOTONIC,
1906 				      HRTIMER_MODE_REL);
1907 			hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
1908 		}
1909 	}
1910 }
1911 
kfree_rcu_scheduler_running(void)1912 void __init kfree_rcu_scheduler_running(void)
1913 {
1914 	int cpu;
1915 
1916 	for_each_possible_cpu(cpu) {
1917 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
1918 
1919 		if (need_offload_krc(krcp))
1920 			schedule_delayed_monitor_work(krcp);
1921 	}
1922 }
1923 
1924 /*
1925  * Queue a request for lazy invocation of the appropriate free routine
1926  * after a grace period.  Please note that three paths are maintained,
1927  * two for the common case using arrays of pointers and a third one that
1928  * is used only when the main paths cannot be used, for example, due to
1929  * memory pressure.
1930  *
1931  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
1932  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
1933  * be free'd in workqueue context. This allows us to: batch requests together to
1934  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
1935  */
kvfree_call_rcu(struct rcu_head * head,void * ptr)1936 void kvfree_call_rcu(struct rcu_head *head, void *ptr)
1937 {
1938 	unsigned long flags;
1939 	struct kfree_rcu_cpu *krcp;
1940 	bool success;
1941 
1942 	/*
1943 	 * Please note there is a limitation for the head-less
1944 	 * variant, that is why there is a clear rule for such
1945 	 * objects: it can be used from might_sleep() context
1946 	 * only. For other places please embed an rcu_head to
1947 	 * your data.
1948 	 */
1949 	if (!head)
1950 		might_sleep();
1951 
1952 	// Queue the object but don't yet schedule the batch.
1953 	if (debug_rcu_head_queue(ptr)) {
1954 		// Probable double kfree_rcu(), just leak.
1955 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
1956 			  __func__, head);
1957 
1958 		// Mark as success and leave.
1959 		return;
1960 	}
1961 
1962 	kasan_record_aux_stack(ptr);
1963 	success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
1964 	if (!success) {
1965 		run_page_cache_worker(krcp);
1966 
1967 		if (head == NULL)
1968 			// Inline if kvfree_rcu(one_arg) call.
1969 			goto unlock_return;
1970 
1971 		head->func = ptr;
1972 		head->next = krcp->head;
1973 		WRITE_ONCE(krcp->head, head);
1974 		atomic_inc(&krcp->head_count);
1975 
1976 		// Take a snapshot for this krcp.
1977 		krcp->head_gp_snap = get_state_synchronize_rcu();
1978 		success = true;
1979 	}
1980 
1981 	/*
1982 	 * The kvfree_rcu() caller considers the pointer freed at this point
1983 	 * and likely removes any references to it. Since the actual slab
1984 	 * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
1985 	 * this object (no scanning or false positives reporting).
1986 	 */
1987 	kmemleak_ignore(ptr);
1988 
1989 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
1990 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
1991 		__schedule_delayed_monitor_work(krcp);
1992 
1993 unlock_return:
1994 	krc_this_cpu_unlock(krcp, flags);
1995 
1996 	/*
1997 	 * Inline kvfree() after synchronize_rcu(). We can do
1998 	 * it from might_sleep() context only, so the current
1999 	 * CPU can pass the QS state.
2000 	 */
2001 	if (!success) {
2002 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
2003 		synchronize_rcu();
2004 		kvfree(ptr);
2005 	}
2006 }
2007 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
2008 
2009 /**
2010  * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
2011  *
2012  * Note that a single argument of kvfree_rcu() call has a slow path that
2013  * triggers synchronize_rcu() following by freeing a pointer. It is done
2014  * before the return from the function. Therefore for any single-argument
2015  * call that will result in a kfree() to a cache that is to be destroyed
2016  * during module exit, it is developer's responsibility to ensure that all
2017  * such calls have returned before the call to kmem_cache_destroy().
2018  */
kvfree_rcu_barrier(void)2019 void kvfree_rcu_barrier(void)
2020 {
2021 	struct kfree_rcu_cpu_work *krwp;
2022 	struct kfree_rcu_cpu *krcp;
2023 	bool queued;
2024 	int i, cpu;
2025 
2026 	/*
2027 	 * Firstly we detach objects and queue them over an RCU-batch
2028 	 * for all CPUs. Finally queued works are flushed for each CPU.
2029 	 *
2030 	 * Please note. If there are outstanding batches for a particular
2031 	 * CPU, those have to be finished first following by queuing a new.
2032 	 */
2033 	for_each_possible_cpu(cpu) {
2034 		krcp = per_cpu_ptr(&krc, cpu);
2035 
2036 		/*
2037 		 * Check if this CPU has any objects which have been queued for a
2038 		 * new GP completion. If not(means nothing to detach), we are done
2039 		 * with it. If any batch is pending/running for this "krcp", below
2040 		 * per-cpu flush_rcu_work() waits its completion(see last step).
2041 		 */
2042 		if (!need_offload_krc(krcp))
2043 			continue;
2044 
2045 		while (1) {
2046 			/*
2047 			 * If we are not able to queue a new RCU work it means:
2048 			 * - batches for this CPU are still in flight which should
2049 			 *   be flushed first and then repeat;
2050 			 * - no objects to detach, because of concurrency.
2051 			 */
2052 			queued = kvfree_rcu_queue_batch(krcp);
2053 
2054 			/*
2055 			 * Bail out, if there is no need to offload this "krcp"
2056 			 * anymore. As noted earlier it can run concurrently.
2057 			 */
2058 			if (queued || !need_offload_krc(krcp))
2059 				break;
2060 
2061 			/* There are ongoing batches. */
2062 			for (i = 0; i < KFREE_N_BATCHES; i++) {
2063 				krwp = &(krcp->krw_arr[i]);
2064 				flush_rcu_work(&krwp->rcu_work);
2065 			}
2066 		}
2067 	}
2068 
2069 	/*
2070 	 * Now we guarantee that all objects are flushed.
2071 	 */
2072 	for_each_possible_cpu(cpu) {
2073 		krcp = per_cpu_ptr(&krc, cpu);
2074 
2075 		/*
2076 		 * A monitor work can drain ready to reclaim objects
2077 		 * directly. Wait its completion if running or pending.
2078 		 */
2079 		cancel_delayed_work_sync(&krcp->monitor_work);
2080 
2081 		for (i = 0; i < KFREE_N_BATCHES; i++) {
2082 			krwp = &(krcp->krw_arr[i]);
2083 			flush_rcu_work(&krwp->rcu_work);
2084 		}
2085 	}
2086 }
2087 EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
2088 
2089 static unsigned long
kfree_rcu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)2090 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
2091 {
2092 	int cpu;
2093 	unsigned long count = 0;
2094 
2095 	/* Snapshot count of all CPUs */
2096 	for_each_possible_cpu(cpu) {
2097 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
2098 
2099 		count += krc_count(krcp);
2100 		count += READ_ONCE(krcp->nr_bkv_objs);
2101 		atomic_set(&krcp->backoff_page_cache_fill, 1);
2102 	}
2103 
2104 	return count == 0 ? SHRINK_EMPTY : count;
2105 }
2106 
2107 static unsigned long
kfree_rcu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)2108 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2109 {
2110 	int cpu, freed = 0;
2111 
2112 	for_each_possible_cpu(cpu) {
2113 		int count;
2114 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
2115 
2116 		count = krc_count(krcp);
2117 		count += drain_page_cache(krcp);
2118 		kfree_rcu_monitor(&krcp->monitor_work.work);
2119 
2120 		sc->nr_to_scan -= count;
2121 		freed += count;
2122 
2123 		if (sc->nr_to_scan <= 0)
2124 			break;
2125 	}
2126 
2127 	return freed == 0 ? SHRINK_STOP : freed;
2128 }
2129 
kvfree_rcu_init(void)2130 void __init kvfree_rcu_init(void)
2131 {
2132 	int cpu;
2133 	int i, j;
2134 	struct shrinker *kfree_rcu_shrinker;
2135 
2136 	rcu_reclaim_wq = alloc_workqueue("kvfree_rcu_reclaim",
2137 			WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2138 	WARN_ON(!rcu_reclaim_wq);
2139 
2140 	/* Clamp it to [0:100] seconds interval. */
2141 	if (rcu_delay_page_cache_fill_msec < 0 ||
2142 		rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
2143 
2144 		rcu_delay_page_cache_fill_msec =
2145 			clamp(rcu_delay_page_cache_fill_msec, 0,
2146 				(int) (100 * MSEC_PER_SEC));
2147 
2148 		pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
2149 			rcu_delay_page_cache_fill_msec);
2150 	}
2151 
2152 	for_each_possible_cpu(cpu) {
2153 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
2154 
2155 		for (i = 0; i < KFREE_N_BATCHES; i++) {
2156 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
2157 			krcp->krw_arr[i].krcp = krcp;
2158 
2159 			for (j = 0; j < FREE_N_CHANNELS; j++)
2160 				INIT_LIST_HEAD(&krcp->krw_arr[i].bulk_head_free[j]);
2161 		}
2162 
2163 		for (i = 0; i < FREE_N_CHANNELS; i++)
2164 			INIT_LIST_HEAD(&krcp->bulk_head[i]);
2165 
2166 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
2167 		INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
2168 		krcp->initialized = true;
2169 	}
2170 
2171 	kfree_rcu_shrinker = shrinker_alloc(0, "slab-kvfree-rcu");
2172 	if (!kfree_rcu_shrinker) {
2173 		pr_err("Failed to allocate kfree_rcu() shrinker!\n");
2174 		return;
2175 	}
2176 
2177 	kfree_rcu_shrinker->count_objects = kfree_rcu_shrink_count;
2178 	kfree_rcu_shrinker->scan_objects = kfree_rcu_shrink_scan;
2179 
2180 	shrinker_register(kfree_rcu_shrinker);
2181 }
2182 
2183 #endif /* CONFIG_KVFREE_RCU_BATCHED */
2184 
2185