xref: /linux/include/linux/slab.h (revision 148f95f75c513936d466bcc7e6bf73298da2212b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4  *
5  * (C) SGI 2006, Christoph Lameter
6  * 	Cleaned up and restructured to ease the addition of alternative
7  * 	implementations of SLAB allocators.
8  * (C) Linux Foundation 2008-2013
9  *      Unified interface for all slab allocators
10  */
11 
12 #ifndef _LINUX_SLAB_H
13 #define	_LINUX_SLAB_H
14 
15 #include <linux/bug.h>
16 #include <linux/cache.h>
17 #include <linux/gfp.h>
18 #include <linux/overflow.h>
19 #include <linux/types.h>
20 #include <linux/rcupdate.h>
21 #include <linux/workqueue.h>
22 #include <linux/percpu-refcount.h>
23 #include <linux/cleanup.h>
24 #include <linux/hash.h>
25 
26 enum _slab_flag_bits {
27 	_SLAB_CONSISTENCY_CHECKS,
28 	_SLAB_RED_ZONE,
29 	_SLAB_POISON,
30 	_SLAB_KMALLOC,
31 	_SLAB_HWCACHE_ALIGN,
32 	_SLAB_CACHE_DMA,
33 	_SLAB_CACHE_DMA32,
34 	_SLAB_STORE_USER,
35 	_SLAB_PANIC,
36 	_SLAB_TYPESAFE_BY_RCU,
37 	_SLAB_TRACE,
38 #ifdef CONFIG_DEBUG_OBJECTS
39 	_SLAB_DEBUG_OBJECTS,
40 #endif
41 	_SLAB_NOLEAKTRACE,
42 	_SLAB_NO_MERGE,
43 #ifdef CONFIG_FAILSLAB
44 	_SLAB_FAILSLAB,
45 #endif
46 #ifdef CONFIG_MEMCG
47 	_SLAB_ACCOUNT,
48 #endif
49 #ifdef CONFIG_KASAN_GENERIC
50 	_SLAB_KASAN,
51 #endif
52 	_SLAB_NO_USER_FLAGS,
53 #ifdef CONFIG_KFENCE
54 	_SLAB_SKIP_KFENCE,
55 #endif
56 #ifndef CONFIG_SLUB_TINY
57 	_SLAB_RECLAIM_ACCOUNT,
58 #endif
59 	_SLAB_OBJECT_POISON,
60 	_SLAB_CMPXCHG_DOUBLE,
61 	_SLAB_NO_OBJ_EXT,
62 #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
63 	_SLAB_OBJ_EXT_IN_OBJ,
64 #endif
65 	_SLAB_FLAGS_LAST_BIT
66 };
67 
68 #define __SLAB_FLAG_BIT(nr)	((slab_flags_t __force)(1U << (nr)))
69 #define __SLAB_FLAG_UNUSED	((slab_flags_t __force)(0U))
70 
71 /*
72  * Flags to pass to kmem_cache_create().
73  * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
74  */
75 /* DEBUG: Perform (expensive) checks on alloc/free */
76 #define SLAB_CONSISTENCY_CHECKS	__SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS)
77 /* DEBUG: Red zone objs in a cache */
78 #define SLAB_RED_ZONE		__SLAB_FLAG_BIT(_SLAB_RED_ZONE)
79 /* DEBUG: Poison objects */
80 #define SLAB_POISON		__SLAB_FLAG_BIT(_SLAB_POISON)
81 /* Indicate a kmalloc slab */
82 #define SLAB_KMALLOC		__SLAB_FLAG_BIT(_SLAB_KMALLOC)
83 /**
84  * define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
85  *
86  * Sufficiently large objects are aligned on cache line boundary. For object
87  * size smaller than a half of cache line size, the alignment is on the half of
88  * cache line size. In general, if object size is smaller than 1/2^n of cache
89  * line size, the alignment is adjusted to 1/2^n.
90  *
91  * If explicit alignment is also requested by the respective
92  * &struct kmem_cache_args field, the greater of both is alignments is applied.
93  */
94 #define SLAB_HWCACHE_ALIGN	__SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
95 /* Use GFP_DMA memory */
96 #define SLAB_CACHE_DMA		__SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
97 /* Use GFP_DMA32 memory */
98 #define SLAB_CACHE_DMA32	__SLAB_FLAG_BIT(_SLAB_CACHE_DMA32)
99 /* DEBUG: Store the last owner for bug hunting */
100 #define SLAB_STORE_USER		__SLAB_FLAG_BIT(_SLAB_STORE_USER)
101 /* Panic if kmem_cache_create() fails */
102 #define SLAB_PANIC		__SLAB_FLAG_BIT(_SLAB_PANIC)
103 /**
104  * define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
105  *
106  * This delays freeing the SLAB page by a grace period, it does _NOT_
107  * delay object freeing. This means that if you do kmem_cache_free()
108  * that memory location is free to be reused at any time. Thus it may
109  * be possible to see another object there in the same RCU grace period.
110  *
111  * This feature only ensures the memory location backing the object
112  * stays valid, the trick to using this is relying on an independent
113  * object validation pass. Something like:
114  *
115  * ::
116  *
117  *  begin:
118  *   rcu_read_lock();
119  *   obj = lockless_lookup(key);
120  *   if (obj) {
121  *     if (!try_get_ref(obj)) // might fail for free objects
122  *       rcu_read_unlock();
123  *       goto begin;
124  *
125  *     if (obj->key != key) { // not the object we expected
126  *       put_ref(obj);
127  *       rcu_read_unlock();
128  *       goto begin;
129  *     }
130  *   }
131  *  rcu_read_unlock();
132  *
133  * This is useful if we need to approach a kernel structure obliquely,
134  * from its address obtained without the usual locking. We can lock
135  * the structure to stabilize it and check it's still at the given address,
136  * only if we can be sure that the memory has not been meanwhile reused
137  * for some other kind of object (which our subsystem's lock might corrupt).
138  *
139  * rcu_read_lock before reading the address, then rcu_read_unlock after
140  * taking the spinlock within the structure expected at that address.
141  *
142  * Note that object identity check has to be done *after* acquiring a
143  * reference, therefore user has to ensure proper ordering for loads.
144  * Similarly, when initializing objects allocated with SLAB_TYPESAFE_BY_RCU,
145  * the newly allocated object has to be fully initialized *before* its
146  * refcount gets initialized and proper ordering for stores is required.
147  * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() are
148  * designed with the proper fences required for reference counting objects
149  * allocated with SLAB_TYPESAFE_BY_RCU.
150  *
151  * Note that it is not possible to acquire a lock within a structure
152  * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
153  * as described above.  The reason is that SLAB_TYPESAFE_BY_RCU pages
154  * are not zeroed before being given to the slab, which means that any
155  * locks must be initialized after each and every kmem_struct_alloc().
156  * Alternatively, make the ctor passed to kmem_cache_create() initialize
157  * the locks at page-allocation time, as is done in __i915_request_ctor(),
158  * sighand_ctor(), and anon_vma_ctor().  Such a ctor permits readers
159  * to safely acquire those ctor-initialized locks under rcu_read_lock()
160  * protection.
161  *
162  * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
163  */
164 #define SLAB_TYPESAFE_BY_RCU	__SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
165 /* Trace allocations and frees */
166 #define SLAB_TRACE		__SLAB_FLAG_BIT(_SLAB_TRACE)
167 
168 /* Flag to prevent checks on free */
169 #ifdef CONFIG_DEBUG_OBJECTS
170 # define SLAB_DEBUG_OBJECTS	__SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS)
171 #else
172 # define SLAB_DEBUG_OBJECTS	__SLAB_FLAG_UNUSED
173 #endif
174 
175 /* Avoid kmemleak tracing */
176 #define SLAB_NOLEAKTRACE	__SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE)
177 
178 /*
179  * Prevent merging with compatible kmem caches. This flag should be used
180  * cautiously. Valid use cases:
181  *
182  * - caches created for self-tests (e.g. kunit)
183  * - general caches created and used by a subsystem, only when a
184  *   (subsystem-specific) debug option is enabled
185  * - performance critical caches, should be very rare and consulted with slab
186  *   maintainers, and not used together with CONFIG_SLUB_TINY
187  */
188 #define SLAB_NO_MERGE		__SLAB_FLAG_BIT(_SLAB_NO_MERGE)
189 
190 /* Fault injection mark */
191 #ifdef CONFIG_FAILSLAB
192 # define SLAB_FAILSLAB		__SLAB_FLAG_BIT(_SLAB_FAILSLAB)
193 #else
194 # define SLAB_FAILSLAB		__SLAB_FLAG_UNUSED
195 #endif
196 /**
197  * define SLAB_ACCOUNT - Account allocations to memcg.
198  *
199  * All object allocations from this cache will be memcg accounted, regardless of
200  * __GFP_ACCOUNT being or not being passed to individual allocations.
201  */
202 #ifdef CONFIG_MEMCG
203 # define SLAB_ACCOUNT		__SLAB_FLAG_BIT(_SLAB_ACCOUNT)
204 #else
205 # define SLAB_ACCOUNT		__SLAB_FLAG_UNUSED
206 #endif
207 
208 #ifdef CONFIG_KASAN_GENERIC
209 #define SLAB_KASAN		__SLAB_FLAG_BIT(_SLAB_KASAN)
210 #else
211 #define SLAB_KASAN		__SLAB_FLAG_UNUSED
212 #endif
213 
214 /*
215  * Ignore user specified debugging flags.
216  * Intended for caches created for self-tests so they have only flags
217  * specified in the code and other flags are ignored.
218  */
219 #define SLAB_NO_USER_FLAGS	__SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS)
220 
221 #ifdef CONFIG_KFENCE
222 #define SLAB_SKIP_KFENCE	__SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE)
223 #else
224 #define SLAB_SKIP_KFENCE	__SLAB_FLAG_UNUSED
225 #endif
226 
227 /* The following flags affect the page allocator grouping pages by mobility */
228 /**
229  * define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
230  *
231  * Use this flag for caches that have an associated shrinker. As a result, slab
232  * pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by
233  * mobility, and are accounted in SReclaimable counter in /proc/meminfo
234  */
235 #ifndef CONFIG_SLUB_TINY
236 #define SLAB_RECLAIM_ACCOUNT	__SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
237 #else
238 #define SLAB_RECLAIM_ACCOUNT	__SLAB_FLAG_UNUSED
239 #endif
240 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
241 
242 /* Slab created using create_boot_cache */
243 #define SLAB_NO_OBJ_EXT		__SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT)
244 
245 #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
246 #define SLAB_OBJ_EXT_IN_OBJ	__SLAB_FLAG_BIT(_SLAB_OBJ_EXT_IN_OBJ)
247 #else
248 #define SLAB_OBJ_EXT_IN_OBJ	__SLAB_FLAG_UNUSED
249 #endif
250 
251 /*
252  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
253  *
254  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
255  *
256  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
257  * Both make kfree a no-op.
258  */
259 #define ZERO_SIZE_PTR ((void *)16)
260 
261 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
262 				(unsigned long)ZERO_SIZE_PTR)
263 
264 #include <linux/kasan.h>
265 
266 struct list_lru;
267 struct mem_cgroup;
268 /*
269  * struct kmem_cache related prototypes
270  */
271 bool slab_is_available(void);
272 
273 /**
274  * struct kmem_cache_args - Less common arguments for kmem_cache_create()
275  *
276  * Any uninitialized fields of the structure are interpreted as unused. The
277  * exception is @freeptr_offset where %0 is a valid value, so
278  * @use_freeptr_offset must be also set to %true in order to interpret the field
279  * as used. For @useroffset %0 is also valid, but only with non-%0
280  * @usersize.
281  *
282  * When %NULL args is passed to kmem_cache_create(), it is equivalent to all
283  * fields unused.
284  */
285 struct kmem_cache_args {
286 	/**
287 	 * @align: The required alignment for the objects.
288 	 *
289 	 * %0 means no specific alignment is requested.
290 	 */
291 	unsigned int align;
292 	/**
293 	 * @useroffset: Usercopy region offset.
294 	 *
295 	 * %0 is a valid offset, when @usersize is non-%0
296 	 */
297 	unsigned int useroffset;
298 	/**
299 	 * @usersize: Usercopy region size.
300 	 *
301 	 * %0 means no usercopy region is specified.
302 	 */
303 	unsigned int usersize;
304 	/**
305 	 * @freeptr_offset: Custom offset for the free pointer
306 	 * in caches with &SLAB_TYPESAFE_BY_RCU or @ctor
307 	 *
308 	 * By default, &SLAB_TYPESAFE_BY_RCU and @ctor caches place the free
309 	 * pointer outside of the object. This might cause the object to grow
310 	 * in size. Cache creators that have a reason to avoid this can specify
311 	 * a custom free pointer offset in their data structure where the free
312 	 * pointer will be placed.
313 	 *
314 	 * For caches with &SLAB_TYPESAFE_BY_RCU, the caller must ensure that
315 	 * the free pointer does not overlay fields required to guard against
316 	 * object recycling (See &SLAB_TYPESAFE_BY_RCU for details).
317 	 *
318 	 * For caches with @ctor, the caller must ensure that the free pointer
319 	 * does not overlay fields initialized by the constructor.
320 	 *
321 	 * Currently, only caches with &SLAB_TYPESAFE_BY_RCU or @ctor
322 	 * may specify @freeptr_offset.
323 	 *
324 	 * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
325 	 * is specified, @use_freeptr_offset must be set %true.
326 	 */
327 	unsigned int freeptr_offset;
328 	/**
329 	 * @use_freeptr_offset: Whether a @freeptr_offset is used.
330 	 */
331 	bool use_freeptr_offset;
332 	/**
333 	 * @ctor: A constructor for the objects.
334 	 *
335 	 * The constructor is invoked for each object in a newly allocated slab
336 	 * page. It is the cache user's responsibility to free object in the
337 	 * same state as after calling the constructor, or deal appropriately
338 	 * with any differences between a freshly constructed and a reallocated
339 	 * object.
340 	 *
341 	 * %NULL means no constructor.
342 	 */
343 	void (*ctor)(void *);
344 	/**
345 	 * @sheaf_capacity: Enable sheaves of given capacity for the cache.
346 	 *
347 	 * With a non-zero value, allocations from the cache go through caching
348 	 * arrays called sheaves. Each cpu has a main sheaf that's always
349 	 * present, and a spare sheaf that may be not present. When both become
350 	 * empty, there's an attempt to replace an empty sheaf with a full sheaf
351 	 * from the per-node barn.
352 	 *
353 	 * When no full sheaf is available, and gfp flags allow blocking, a
354 	 * sheaf is allocated and filled from slab(s) using bulk allocation.
355 	 * Otherwise the allocation falls back to the normal operation
356 	 * allocating a single object from a slab.
357 	 *
358 	 * Analogically when freeing and both percpu sheaves are full, the barn
359 	 * may replace it with an empty sheaf, unless it's over capacity. In
360 	 * that case a sheaf is bulk freed to slab pages.
361 	 *
362 	 * The sheaves do not enforce NUMA placement of objects, so allocations
363 	 * via kmem_cache_alloc_node() with a node specified other than
364 	 * NUMA_NO_NODE will bypass them.
365 	 *
366 	 * Bulk allocation and free operations also try to use the cpu sheaves
367 	 * and barn, but fallback to using slab pages directly.
368 	 *
369 	 * When slub_debug is enabled for the cache, the sheaf_capacity argument
370 	 * is ignored.
371 	 *
372 	 * %0 means no sheaves will be created.
373 	 */
374 	unsigned int sheaf_capacity;
375 };
376 
377 struct kmem_cache *__kmem_cache_create_args(const char *name,
378 					    unsigned int object_size,
379 					    struct kmem_cache_args *args,
380 					    slab_flags_t flags);
381 static inline struct kmem_cache *
382 __kmem_cache_create(const char *name, unsigned int size, unsigned int align,
383 		    slab_flags_t flags, void (*ctor)(void *))
384 {
385 	struct kmem_cache_args kmem_args = {
386 		.align	= align,
387 		.ctor	= ctor,
388 	};
389 
390 	return __kmem_cache_create_args(name, size, &kmem_args, flags);
391 }
392 
393 /**
394  * kmem_cache_create_usercopy - Create a kmem cache with a region suitable
395  * for copying to userspace.
396  * @name: A string which is used in /proc/slabinfo to identify this cache.
397  * @size: The size of objects to be created in this cache.
398  * @align: The required alignment for the objects.
399  * @flags: SLAB flags
400  * @useroffset: Usercopy region offset
401  * @usersize: Usercopy region size
402  * @ctor: A constructor for the objects, or %NULL.
403  *
404  * This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY()
405  * if whitelisting a single field is sufficient, or kmem_cache_create() with
406  * the necessary parameters passed via the args parameter (see
407  * &struct kmem_cache_args)
408  *
409  * Return: a pointer to the cache on success, NULL on failure.
410  */
411 static inline struct kmem_cache *
412 kmem_cache_create_usercopy(const char *name, unsigned int size,
413 			   unsigned int align, slab_flags_t flags,
414 			   unsigned int useroffset, unsigned int usersize,
415 			   void (*ctor)(void *))
416 {
417 	struct kmem_cache_args kmem_args = {
418 		.align		= align,
419 		.ctor		= ctor,
420 		.useroffset	= useroffset,
421 		.usersize	= usersize,
422 	};
423 
424 	return __kmem_cache_create_args(name, size, &kmem_args, flags);
425 }
426 
427 /* If NULL is passed for @args, use this variant with default arguments. */
428 static inline struct kmem_cache *
429 __kmem_cache_default_args(const char *name, unsigned int size,
430 			  struct kmem_cache_args *args,
431 			  slab_flags_t flags)
432 {
433 	struct kmem_cache_args kmem_default_args = {};
434 
435 	/* Make sure we don't get passed garbage. */
436 	if (WARN_ON_ONCE(args))
437 		return ERR_PTR(-EINVAL);
438 
439 	return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
440 }
441 
442 /**
443  * kmem_cache_create - Create a kmem cache.
444  * @__name: A string which is used in /proc/slabinfo to identify this cache.
445  * @__object_size: The size of objects to be created in this cache.
446  * @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL
447  *	    means defaults will be used for all the arguments.
448  *
449  * This is currently implemented as a macro using ``_Generic()`` to call
450  * either the new variant of the function, or a legacy one.
451  *
452  * The new variant has 4 parameters:
453  * ``kmem_cache_create(name, object_size, args, flags)``
454  *
455  * See __kmem_cache_create_args() which implements this.
456  *
457  * The legacy variant has 5 parameters:
458  * ``kmem_cache_create(name, object_size, align, flags, ctor)``
459  *
460  * The align and ctor parameters map to the respective fields of
461  * &struct kmem_cache_args
462  *
463  * Context: Cannot be called within a interrupt, but can be interrupted.
464  *
465  * Return: a pointer to the cache on success, NULL on failure.
466  */
467 #define kmem_cache_create(__name, __object_size, __args, ...)           \
468 	_Generic((__args),                                              \
469 		struct kmem_cache_args *: __kmem_cache_create_args,	\
470 		void *: __kmem_cache_default_args,			\
471 		default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
472 
473 void kmem_cache_destroy(struct kmem_cache *s);
474 int kmem_cache_shrink(struct kmem_cache *s);
475 
476 /*
477  * Please use this macro to create slab caches. Simply specify the
478  * name of the structure and maybe some flags that are listed above.
479  *
480  * The alignment of the struct determines object alignment. If you
481  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
482  * then the objects will be properly aligned in SMP configurations.
483  */
484 #define KMEM_CACHE(__struct, __flags)                                   \
485 	__kmem_cache_create_args(#__struct, sizeof(struct __struct),    \
486 			&(struct kmem_cache_args) {			\
487 				.align	= __alignof__(struct __struct), \
488 			}, (__flags))
489 
490 /*
491  * To whitelist a single field for copying to/from usercopy, use this
492  * macro instead for KMEM_CACHE() above.
493  */
494 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field)						\
495 	__kmem_cache_create_args(#__struct, sizeof(struct __struct),				\
496 			&(struct kmem_cache_args) {						\
497 				.align		= __alignof__(struct __struct),			\
498 				.useroffset	= offsetof(struct __struct, __field),		\
499 				.usersize	= sizeof_field(struct __struct, __field),	\
500 			}, (__flags))
501 
502 /*
503  * Common kmalloc functions provided by all allocators
504  */
505 void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size,
506 					       unsigned long align,
507 					       gfp_t flags, int nid) __realloc_size(2);
508 #define krealloc_noprof(_o, _s, _f)	krealloc_node_align_noprof(_o, _s, 1, _f, NUMA_NO_NODE)
509 #define krealloc_node_align(...)	alloc_hooks(krealloc_node_align_noprof(__VA_ARGS__))
510 #define krealloc_node(_o, _s, _f, _n)	krealloc_node_align(_o, _s, 1, _f, _n)
511 #define krealloc(...)			krealloc_node(__VA_ARGS__, NUMA_NO_NODE)
512 
513 void kfree(const void *objp);
514 void kfree_nolock(const void *objp);
515 void kfree_sensitive(const void *objp);
516 
517 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
518 DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
519 
520 /**
521  * ksize - Report actual allocation size of associated object
522  *
523  * @objp: Pointer returned from a prior kmalloc()-family allocation.
524  *
525  * This should not be used for writing beyond the originally requested
526  * allocation size. Either use krealloc() or round up the allocation size
527  * with kmalloc_size_roundup() prior to allocation. If this is used to
528  * access beyond the originally requested allocation size, UBSAN_BOUNDS
529  * and/or FORTIFY_SOURCE may trip, since they only know about the
530  * originally allocated size via the __alloc_size attribute.
531  */
532 size_t ksize(const void *objp);
533 
534 #ifdef CONFIG_PRINTK
535 bool kmem_dump_obj(void *object);
536 #else
537 static inline bool kmem_dump_obj(void *object) { return false; }
538 #endif
539 
540 /*
541  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
542  * alignment larger than the alignment of a 64-bit integer.
543  * Setting ARCH_DMA_MINALIGN in arch headers allows that.
544  */
545 #ifdef ARCH_HAS_DMA_MINALIGN
546 #if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN)
547 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
548 #endif
549 #endif
550 
551 #ifndef ARCH_KMALLOC_MINALIGN
552 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
553 #elif ARCH_KMALLOC_MINALIGN > 8
554 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
555 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
556 #endif
557 
558 /*
559  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
560  * Intended for arches that get misalignment faults even for 64 bit integer
561  * aligned buffers.
562  */
563 #ifndef ARCH_SLAB_MINALIGN
564 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
565 #endif
566 
567 /*
568  * Arches can define this function if they want to decide the minimum slab
569  * alignment at runtime. The value returned by the function must be a power
570  * of two and >= ARCH_SLAB_MINALIGN.
571  */
572 #ifndef arch_slab_minalign
573 static inline unsigned int arch_slab_minalign(void)
574 {
575 	return ARCH_SLAB_MINALIGN;
576 }
577 #endif
578 
579 /*
580  * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
581  * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
582  * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
583  */
584 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
585 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
586 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
587 
588 /*
589  * Kmalloc array related definitions
590  */
591 
592 /*
593  * SLUB directly allocates requests fitting in to an order-1 page
594  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
595  */
596 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
597 #define KMALLOC_SHIFT_MAX	(MAX_PAGE_ORDER + PAGE_SHIFT)
598 #ifndef KMALLOC_SHIFT_LOW
599 #define KMALLOC_SHIFT_LOW	3
600 #endif
601 
602 /* Maximum allocatable size */
603 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
604 /* Maximum size for which we actually use a slab cache */
605 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
606 /* Maximum order allocatable via the slab allocator */
607 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
608 
609 /*
610  * Kmalloc subsystem.
611  */
612 #ifndef KMALLOC_MIN_SIZE
613 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
614 #endif
615 
616 /*
617  * This restriction comes from byte sized index implementation.
618  * Page size is normally 2^12 bytes and, in this case, if we want to use
619  * byte sized index which can represent 2^8 entries, the size of the object
620  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
621  * If minimum size of kmalloc is less than 16, we use it as minimum object
622  * size and give up to use byte sized index.
623  */
624 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
625                                (KMALLOC_MIN_SIZE) : 16)
626 
627 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
628 #define RANDOM_KMALLOC_CACHES_NR	15 // # of cache copies
629 #else
630 #define RANDOM_KMALLOC_CACHES_NR	0
631 #endif
632 
633 /*
634  * Whenever changing this, take care of that kmalloc_type() and
635  * create_kmalloc_caches() still work as intended.
636  *
637  * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
638  * is for accounted but unreclaimable and non-dma objects. All the other
639  * kmem caches can have both accounted and unaccounted objects.
640  */
641 enum kmalloc_cache_type {
642 	KMALLOC_NORMAL = 0,
643 #ifndef CONFIG_ZONE_DMA
644 	KMALLOC_DMA = KMALLOC_NORMAL,
645 #endif
646 #ifndef CONFIG_MEMCG
647 	KMALLOC_CGROUP = KMALLOC_NORMAL,
648 #endif
649 	KMALLOC_RANDOM_START = KMALLOC_NORMAL,
650 	KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
651 #ifdef CONFIG_SLUB_TINY
652 	KMALLOC_RECLAIM = KMALLOC_NORMAL,
653 #else
654 	KMALLOC_RECLAIM,
655 #endif
656 #ifdef CONFIG_ZONE_DMA
657 	KMALLOC_DMA,
658 #endif
659 #ifdef CONFIG_MEMCG
660 	KMALLOC_CGROUP,
661 #endif
662 	NR_KMALLOC_TYPES
663 };
664 
665 typedef struct kmem_cache * kmem_buckets[KMALLOC_SHIFT_HIGH + 1];
666 
667 extern kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES];
668 
669 /*
670  * Define gfp bits that should not be set for KMALLOC_NORMAL.
671  */
672 #define KMALLOC_NOT_NORMAL_BITS					\
673 	(__GFP_RECLAIMABLE |					\
674 	(IS_ENABLED(CONFIG_ZONE_DMA)   ? __GFP_DMA : 0) |	\
675 	(IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0))
676 
677 extern unsigned long random_kmalloc_seed;
678 
679 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
680 {
681 	/*
682 	 * The most common case is KMALLOC_NORMAL, so test for it
683 	 * with a single branch for all the relevant flags.
684 	 */
685 	if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
686 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
687 		/* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
688 		return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
689 						      ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
690 #else
691 		return KMALLOC_NORMAL;
692 #endif
693 
694 	/*
695 	 * At least one of the flags has to be set. Their priorities in
696 	 * decreasing order are:
697 	 *  1) __GFP_DMA
698 	 *  2) __GFP_RECLAIMABLE
699 	 *  3) __GFP_ACCOUNT
700 	 */
701 	if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
702 		return KMALLOC_DMA;
703 	if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE))
704 		return KMALLOC_RECLAIM;
705 	else
706 		return KMALLOC_CGROUP;
707 }
708 
709 /*
710  * Figure out which kmalloc slab an allocation of a certain size
711  * belongs to.
712  * 0 = zero alloc
713  * 1 =  65 .. 96 bytes
714  * 2 = 129 .. 192 bytes
715  * n = 2^(n-1)+1 .. 2^n
716  *
717  * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
718  * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
719  * Callers where !size_is_constant should only be test modules, where runtime
720  * overheads of __kmalloc_index() can be tolerated.  Also see kmalloc_slab().
721  */
722 static __always_inline unsigned int __kmalloc_index(size_t size,
723 						    bool size_is_constant)
724 {
725 	if (!size)
726 		return 0;
727 
728 	if (size <= KMALLOC_MIN_SIZE)
729 		return KMALLOC_SHIFT_LOW;
730 
731 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
732 		return 1;
733 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
734 		return 2;
735 	if (size <=          8) return 3;
736 	if (size <=         16) return 4;
737 	if (size <=         32) return 5;
738 	if (size <=         64) return 6;
739 	if (size <=        128) return 7;
740 	if (size <=        256) return 8;
741 	if (size <=        512) return 9;
742 	if (size <=       1024) return 10;
743 	if (size <=   2 * 1024) return 11;
744 	if (size <=   4 * 1024) return 12;
745 	if (size <=   8 * 1024) return 13;
746 	if (size <=  16 * 1024) return 14;
747 	if (size <=  32 * 1024) return 15;
748 	if (size <=  64 * 1024) return 16;
749 	if (size <= 128 * 1024) return 17;
750 	if (size <= 256 * 1024) return 18;
751 	if (size <= 512 * 1024) return 19;
752 	if (size <= 1024 * 1024) return 20;
753 	if (size <=  2 * 1024 * 1024) return 21;
754 
755 	if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
756 		BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
757 	else
758 		BUG();
759 
760 	/* Will never be reached. Needed because the compiler may complain */
761 	return -1;
762 }
763 static_assert(PAGE_SHIFT <= 20);
764 #define kmalloc_index(s) __kmalloc_index(s, true)
765 
766 #include <linux/alloc_tag.h>
767 
768 /**
769  * kmem_cache_alloc - Allocate an object
770  * @cachep: The cache to allocate from.
771  * @flags: See kmalloc().
772  *
773  * Allocate an object from this cache.
774  * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
775  *
776  * Return: pointer to the new object or %NULL in case of error
777  */
778 void *kmem_cache_alloc_noprof(struct kmem_cache *cachep,
779 			      gfp_t flags) __assume_slab_alignment __malloc;
780 #define kmem_cache_alloc(...)			alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__))
781 
782 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
783 			    gfp_t gfpflags) __assume_slab_alignment __malloc;
784 #define kmem_cache_alloc_lru(...)	alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
785 
786 /**
787  * kmem_cache_charge - memcg charge an already allocated slab memory
788  * @objp: address of the slab object to memcg charge
789  * @gfpflags: describe the allocation context
790  *
791  * kmem_cache_charge allows charging a slab object to the current memcg,
792  * primarily in cases where charging at allocation time might not be possible
793  * because the target memcg is not known (i.e. softirq context)
794  *
795  * The objp should be pointer returned by the slab allocator functions like
796  * kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge
797  * behavior can be controlled through gfpflags parameter, which affects how the
798  * necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes
799  * that overcharging is requested instead of failure, but is not applied for the
800  * internal metadata allocation.
801  *
802  * There are several cases where it will return true even if the charging was
803  * not done:
804  * More specifically:
805  *
806  * 1. For !CONFIG_MEMCG or cgroup_disable=memory systems.
807  * 2. Already charged slab objects.
808  * 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc()
809  *    without __GFP_ACCOUNT
810  * 4. Allocating internal metadata has failed
811  *
812  * Return: true if charge was successful otherwise false.
813  */
814 bool kmem_cache_charge(void *objp, gfp_t gfpflags);
815 void kmem_cache_free(struct kmem_cache *s, void *objp);
816 
817 kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
818 				  unsigned int useroffset, unsigned int usersize,
819 				  void (*ctor)(void *));
820 
821 /*
822  * Bulk allocation and freeing operations. These are accelerated in an
823  * allocator specific way to avoid taking locks repeatedly or building
824  * metadata structures unnecessarily.
825  *
826  * Note that interrupts must be enabled when calling these functions.
827  */
828 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
829 
830 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
831 #define kmem_cache_alloc_bulk(...)	alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__))
832 
833 static __always_inline void kfree_bulk(size_t size, void **p)
834 {
835 	kmem_cache_free_bulk(NULL, size, p);
836 }
837 
838 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
839 				   int node) __assume_slab_alignment __malloc;
840 #define kmem_cache_alloc_node(...)	alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
841 
842 struct slab_sheaf *
843 kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
844 
845 int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
846 		struct slab_sheaf **sheafp, unsigned int size);
847 
848 void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
849 				       struct slab_sheaf *sheaf);
850 
851 void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp,
852 			struct slab_sheaf *sheaf) __assume_slab_alignment __malloc;
853 #define kmem_cache_alloc_from_sheaf(...)	\
854 			alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
855 
856 unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf);
857 
858 /*
859  * These macros allow declaring a kmem_buckets * parameter alongside size, which
860  * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
861  * sites don't have to pass NULL.
862  */
863 #ifdef CONFIG_SLAB_BUCKETS
864 #define DECL_BUCKET_PARAMS(_size, _b)	size_t (_size), kmem_buckets *(_b)
865 #define PASS_BUCKET_PARAMS(_size, _b)	(_size), (_b)
866 #define PASS_BUCKET_PARAM(_b)		(_b)
867 #else
868 #define DECL_BUCKET_PARAMS(_size, _b)	size_t (_size)
869 #define PASS_BUCKET_PARAMS(_size, _b)	(_size)
870 #define PASS_BUCKET_PARAM(_b)		NULL
871 #endif
872 
873 /*
874  * The following functions are not to be used directly and are intended only
875  * for internal use from kmalloc() and kmalloc_node()
876  * with the exception of kunit tests
877  */
878 
879 void *__kmalloc_noprof(size_t size, gfp_t flags)
880 				__assume_kmalloc_alignment __alloc_size(1);
881 
882 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
883 				__assume_kmalloc_alignment __alloc_size(1);
884 
885 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
886 				__assume_kmalloc_alignment __alloc_size(3);
887 
888 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
889 				  int node, size_t size)
890 				__assume_kmalloc_alignment __alloc_size(4);
891 
892 void *__kmalloc_large_noprof(size_t size, gfp_t flags)
893 				__assume_page_alignment __alloc_size(1);
894 
895 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
896 				__assume_page_alignment __alloc_size(1);
897 
898 /**
899  * kmalloc - allocate kernel memory
900  * @size: how many bytes of memory are required.
901  * @flags: describe the allocation context
902  *
903  * kmalloc is the normal method of allocating memory
904  * for objects smaller than page size in the kernel.
905  *
906  * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
907  * bytes. For @size of power of two bytes, the alignment is also guaranteed
908  * to be at least to the size. For other sizes, the alignment is guaranteed to
909  * be at least the largest power-of-two divisor of @size.
910  *
911  * The @flags argument may be one of the GFP flags defined at
912  * include/linux/gfp_types.h and described at
913  * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
914  *
915  * The recommended usage of the @flags is described at
916  * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
917  *
918  * Below is a brief outline of the most useful GFP flags
919  *
920  * %GFP_KERNEL
921  *	Allocate normal kernel ram. May sleep.
922  *
923  * %GFP_NOWAIT
924  *	Allocation will not sleep.
925  *
926  * %GFP_ATOMIC
927  *	Allocation will not sleep.  May use emergency pools.
928  *
929  * Also it is possible to set different flags by OR'ing
930  * in one or more of the following additional @flags:
931  *
932  * %__GFP_ZERO
933  *	Zero the allocated memory before returning. Also see kzalloc().
934  *
935  * %__GFP_HIGH
936  *	This allocation has high priority and may use emergency pools.
937  *
938  * %__GFP_NOFAIL
939  *	Indicate that this allocation is in no way allowed to fail
940  *	(think twice before using).
941  *
942  * %__GFP_NORETRY
943  *	If memory is not immediately available,
944  *	then give up at once.
945  *
946  * %__GFP_NOWARN
947  *	If allocation fails, don't issue any warnings.
948  *
949  * %__GFP_RETRY_MAYFAIL
950  *	Try really hard to succeed the allocation but fail
951  *	eventually.
952  */
953 static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags)
954 {
955 	if (__builtin_constant_p(size) && size) {
956 		unsigned int index;
957 
958 		if (size > KMALLOC_MAX_CACHE_SIZE)
959 			return __kmalloc_large_noprof(size, flags);
960 
961 		index = kmalloc_index(size);
962 		return __kmalloc_cache_noprof(
963 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
964 				flags, size);
965 	}
966 	return __kmalloc_noprof(size, flags);
967 }
968 #define kmalloc(...)				alloc_hooks(kmalloc_noprof(__VA_ARGS__))
969 
970 void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
971 #define kmalloc_nolock(...)			alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
972 
973 /**
974  * __alloc_objs - Allocate objects of a given type using
975  * @KMALLOC: which size-based kmalloc wrapper to allocate with.
976  * @GFP: GFP flags for the allocation.
977  * @TYPE: type to allocate space for.
978  * @COUNT: how many @TYPE objects to allocate.
979  *
980  * Returns: Newly allocated pointer to (first) @TYPE of @COUNT-many
981  * allocated @TYPE objects, or NULL on failure.
982  */
983 #define __alloc_objs(KMALLOC, GFP, TYPE, COUNT)				\
984 ({									\
985 	const size_t __obj_size = size_mul(sizeof(TYPE), COUNT);	\
986 	(TYPE *)KMALLOC(__obj_size, GFP);				\
987 })
988 
989 /**
990  * __alloc_flex - Allocate an object that has a trailing flexible array
991  * @KMALLOC: kmalloc wrapper function to use for allocation.
992  * @GFP: GFP flags for the allocation.
993  * @TYPE: type of structure to allocate space for.
994  * @FAM: The name of the flexible array member of @TYPE structure.
995  * @COUNT: how many @FAM elements to allocate space for.
996  *
997  * Returns: Newly allocated pointer to @TYPE with @COUNT-many trailing
998  * @FAM elements, or NULL on failure or if @COUNT cannot be represented
999  * by the member of @TYPE that counts the @FAM elements (annotated via
1000  * __counted_by()).
1001  */
1002 #define __alloc_flex(KMALLOC, GFP, TYPE, FAM, COUNT)			\
1003 ({									\
1004 	const size_t __count = (COUNT);					\
1005 	const size_t __obj_size = struct_size_t(TYPE, FAM, __count);	\
1006 	TYPE *__obj_ptr;						\
1007 	if (WARN_ON_ONCE(overflows_flex_counter_type(TYPE, FAM,	__count))) \
1008 		__obj_ptr = NULL;					\
1009 	else								\
1010 		__obj_ptr = KMALLOC(__obj_size, GFP);			\
1011 	if (__obj_ptr)							\
1012 		__set_flex_counter(__obj_ptr->FAM, __count);		\
1013 	__obj_ptr;							\
1014 })
1015 
1016 /**
1017  * kmalloc_obj - Allocate a single instance of the given type
1018  * @VAR_OR_TYPE: Variable or type to allocate.
1019  * @GFP: GFP flags for the allocation.
1020  *
1021  * Returns: newly allocated pointer to a @VAR_OR_TYPE on success, or NULL
1022  * on failure.
1023  */
1024 #define kmalloc_obj(VAR_OR_TYPE, GFP)			\
1025 	__alloc_objs(kmalloc, GFP, typeof(VAR_OR_TYPE), 1)
1026 
1027 /**
1028  * kmalloc_objs - Allocate an array of the given type
1029  * @VAR_OR_TYPE: Variable or type to allocate an array of.
1030  * @COUNT: How many elements in the array.
1031  * @GFP: GFP flags for the allocation.
1032  *
1033  * Returns: newly allocated pointer to array of @VAR_OR_TYPE on success,
1034  * or NULL on failure.
1035  */
1036 #define kmalloc_objs(VAR_OR_TYPE, COUNT, GFP)		\
1037 	__alloc_objs(kmalloc, GFP, typeof(VAR_OR_TYPE), COUNT)
1038 
1039 /**
1040  * kmalloc_flex - Allocate a single instance of the given flexible structure
1041  * @VAR_OR_TYPE: Variable or type to allocate (with its flex array).
1042  * @FAM: The name of the flexible array member of the structure.
1043  * @COUNT: How many flexible array member elements are desired.
1044  * @GFP: GFP flags for the allocation.
1045  *
1046  * Returns: newly allocated pointer to @VAR_OR_TYPE on success, NULL on
1047  * failure. If @FAM has been annotated with __counted_by(), the allocation
1048  * will immediately fail if @COUNT is larger than what the type of the
1049  * struct's counter variable can represent.
1050  */
1051 #define kmalloc_flex(VAR_OR_TYPE, FAM, COUNT, GFP)	\
1052 	__alloc_flex(kmalloc, GFP, typeof(VAR_OR_TYPE),	FAM, COUNT)
1053 
1054 /* All kzalloc aliases for kmalloc_(obj|objs|flex). */
1055 #define kzalloc_obj(P, GFP)				\
1056 	__alloc_objs(kzalloc, GFP, typeof(P), 1)
1057 #define kzalloc_objs(P, COUNT, GFP)			\
1058 	__alloc_objs(kzalloc, GFP, typeof(P), COUNT)
1059 #define kzalloc_flex(P, FAM, COUNT, GFP)		\
1060 	__alloc_flex(kzalloc, GFP, typeof(P), FAM, COUNT)
1061 
1062 /* All kvmalloc aliases for kmalloc_(obj|objs|flex). */
1063 #define kvmalloc_obj(P, GFP)				\
1064 	__alloc_objs(kvmalloc, GFP, typeof(P), 1)
1065 #define kvmalloc_objs(P, COUNT, GFP)			\
1066 	__alloc_objs(kvmalloc, GFP, typeof(P), COUNT)
1067 #define kvmalloc_flex(P, FAM, COUNT, GFP)		\
1068 	__alloc_flex(kvmalloc, GFP, typeof(P), FAM, COUNT)
1069 
1070 /* All kvzalloc aliases for kmalloc_(obj|objs|flex). */
1071 #define kvzalloc_obj(P, GFP)				\
1072 	__alloc_objs(kvzalloc, GFP, typeof(P), 1)
1073 #define kvzalloc_objs(P, COUNT, GFP)			\
1074 	__alloc_objs(kvzalloc, GFP, typeof(P), COUNT)
1075 #define kvzalloc_flex(P, FAM, COUNT, GFP)		\
1076 	__alloc_flex(kvzalloc, GFP, typeof(P), FAM, COUNT)
1077 
1078 #define kmem_buckets_alloc(_b, _size, _flags)	\
1079 	alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
1080 
1081 #define kmem_buckets_alloc_track_caller(_b, _size, _flags)	\
1082 	alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE, _RET_IP_))
1083 
1084 static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node)
1085 {
1086 	if (__builtin_constant_p(size) && size) {
1087 		unsigned int index;
1088 
1089 		if (size > KMALLOC_MAX_CACHE_SIZE)
1090 			return __kmalloc_large_node_noprof(size, flags, node);
1091 
1092 		index = kmalloc_index(size);
1093 		return __kmalloc_cache_node_noprof(
1094 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
1095 				flags, node, size);
1096 	}
1097 	return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
1098 }
1099 #define kmalloc_node(...)			alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
1100 
1101 /**
1102  * kmalloc_array - allocate memory for an array.
1103  * @n: number of elements.
1104  * @size: element size.
1105  * @flags: the type of memory to allocate (see kmalloc).
1106  */
1107 static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
1108 {
1109 	size_t bytes;
1110 
1111 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1112 		return NULL;
1113 	return kmalloc_noprof(bytes, flags);
1114 }
1115 #define kmalloc_array(...)			alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))
1116 
1117 /**
1118  * krealloc_array - reallocate memory for an array.
1119  * @p: pointer to the memory chunk to reallocate
1120  * @new_n: new number of elements to alloc
1121  * @new_size: new size of a single member of the array
1122  * @flags: the type of memory to allocate (see kmalloc)
1123  *
1124  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
1125  * initial memory allocation, every subsequent call to this API for the same
1126  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
1127  * __GFP_ZERO is not fully honored by this API.
1128  *
1129  * See krealloc_noprof() for further details.
1130  *
1131  * In any case, the contents of the object pointed to are preserved up to the
1132  * lesser of the new and old sizes.
1133  */
1134 static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p,
1135 								       size_t new_n,
1136 								       size_t new_size,
1137 								       gfp_t flags)
1138 {
1139 	size_t bytes;
1140 
1141 	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
1142 		return NULL;
1143 
1144 	return krealloc_noprof(p, bytes, flags);
1145 }
1146 #define krealloc_array(...)			alloc_hooks(krealloc_array_noprof(__VA_ARGS__))
1147 
1148 /**
1149  * kcalloc - allocate memory for an array. The memory is set to zero.
1150  * @n: number of elements.
1151  * @size: element size.
1152  * @flags: the type of memory to allocate (see kmalloc).
1153  */
1154 #define kcalloc(n, size, flags)		kmalloc_array(n, size, (flags) | __GFP_ZERO)
1155 
1156 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node,
1157 					 unsigned long caller) __alloc_size(1);
1158 #define kmalloc_node_track_caller_noprof(size, flags, node, caller) \
1159 	__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
1160 #define kmalloc_node_track_caller(...)		\
1161 	alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
1162 
1163 /*
1164  * kmalloc_track_caller is a special version of kmalloc that records the
1165  * calling function of the routine calling it for slab leak tracking instead
1166  * of just the calling function (confusing, eh?).
1167  * It's useful when the call to kmalloc comes from a widely-used standard
1168  * allocator where we care about the real place the memory allocation
1169  * request comes from.
1170  */
1171 #define kmalloc_track_caller(...)		kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE)
1172 
1173 #define kmalloc_track_caller_noprof(...)	\
1174 		kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_)
1175 
1176 static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags,
1177 							  int node)
1178 {
1179 	size_t bytes;
1180 
1181 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1182 		return NULL;
1183 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
1184 		return kmalloc_node_noprof(bytes, flags, node);
1185 	return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node);
1186 }
1187 #define kmalloc_array_node(...)			alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
1188 
1189 #define kcalloc_node(_n, _size, _flags, _node)	\
1190 	kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
1191 
1192 /*
1193  * Shortcuts
1194  */
1195 #define kmem_cache_zalloc(_k, _flags)		kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
1196 
1197 /**
1198  * kzalloc - allocate memory. The memory is set to zero.
1199  * @size: how many bytes of memory are required.
1200  * @flags: the type of memory to allocate (see kmalloc).
1201  */
1202 static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
1203 {
1204 	return kmalloc_noprof(size, flags | __GFP_ZERO);
1205 }
1206 #define kzalloc(...)				alloc_hooks(kzalloc_noprof(__VA_ARGS__))
1207 #define kzalloc_node(_size, _flags, _node)	kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
1208 
1209 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
1210 			     gfp_t flags, int node) __alloc_size(1);
1211 #define kvmalloc_node_align_noprof(_size, _align, _flags, _node)	\
1212 	__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, NULL), _align, _flags, _node)
1213 #define kvmalloc_node_align(...)		\
1214 	alloc_hooks(kvmalloc_node_align_noprof(__VA_ARGS__))
1215 #define kvmalloc_node(_s, _f, _n)		kvmalloc_node_align(_s, 1, _f, _n)
1216 #define kvmalloc(...)				kvmalloc_node(__VA_ARGS__, NUMA_NO_NODE)
1217 #define kvzalloc(_size, _flags)			kvmalloc(_size, (_flags)|__GFP_ZERO)
1218 
1219 #define kvzalloc_node(_size, _flags, _node)	kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
1220 
1221 #define kmem_buckets_valloc(_b, _size, _flags)	\
1222 	alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), 1, _flags, NUMA_NO_NODE))
1223 
1224 static inline __alloc_size(1, 2) void *
1225 kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
1226 {
1227 	size_t bytes;
1228 
1229 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1230 		return NULL;
1231 
1232 	return kvmalloc_node_align_noprof(bytes, 1, flags, node);
1233 }
1234 
1235 #define kvmalloc_array_noprof(...)		kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
1236 #define kvcalloc_node_noprof(_n,_s,_f,_node)	kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node)
1237 #define kvcalloc_noprof(...)			kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
1238 
1239 #define kvmalloc_array(...)			alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__))
1240 #define kvcalloc_node(...)			alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
1241 #define kvcalloc(...)				alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
1242 
1243 void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
1244 				  gfp_t flags, int nid) __realloc_size(2);
1245 #define kvrealloc_node_align(...)		\
1246 	alloc_hooks(kvrealloc_node_align_noprof(__VA_ARGS__))
1247 #define kvrealloc_node(_p, _s, _f, _n)		kvrealloc_node_align(_p, _s, 1, _f, _n)
1248 #define kvrealloc(...)				kvrealloc_node(__VA_ARGS__, NUMA_NO_NODE)
1249 
1250 extern void kvfree(const void *addr);
1251 DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))
1252 
1253 extern void kvfree_sensitive(const void *addr, size_t len);
1254 
1255 unsigned int kmem_cache_size(struct kmem_cache *s);
1256 
1257 #ifndef CONFIG_KVFREE_RCU_BATCHED
1258 static inline void kvfree_rcu_barrier(void)
1259 {
1260 	rcu_barrier();
1261 }
1262 
1263 static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
1264 {
1265 	rcu_barrier();
1266 }
1267 
1268 static inline void kfree_rcu_scheduler_running(void) { }
1269 #else
1270 void kvfree_rcu_barrier(void);
1271 
1272 void kvfree_rcu_barrier_on_cache(struct kmem_cache *s);
1273 
1274 void kfree_rcu_scheduler_running(void);
1275 #endif
1276 
1277 /**
1278  * kmalloc_size_roundup - Report allocation bucket size for the given size
1279  *
1280  * @size: Number of bytes to round up from.
1281  *
1282  * This returns the number of bytes that would be available in a kmalloc()
1283  * allocation of @size bytes. For example, a 126 byte request would be
1284  * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
1285  * for the general-purpose kmalloc()-based allocations, and is not for the
1286  * pre-sized kmem_cache_alloc()-based allocations.)
1287  *
1288  * Use this to kmalloc() the full bucket size ahead of time instead of using
1289  * ksize() to query the size after an allocation.
1290  */
1291 size_t kmalloc_size_roundup(size_t size);
1292 
1293 void __init kmem_cache_init_late(void);
1294 void __init kvfree_rcu_init(void);
1295 
1296 #endif	/* _LINUX_SLAB_H */
1297