xref: /linux/include/linux/slab.h (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4  *
5  * (C) SGI 2006, Christoph Lameter
6  * 	Cleaned up and restructured to ease the addition of alternative
7  * 	implementations of SLAB allocators.
8  * (C) Linux Foundation 2008-2013
9  *      Unified interface for all slab allocators
10  */
11 
12 #ifndef _LINUX_SLAB_H
13 #define	_LINUX_SLAB_H
14 
15 #include <linux/cache.h>
16 #include <linux/gfp.h>
17 #include <linux/overflow.h>
18 #include <linux/types.h>
19 #include <linux/rcupdate.h>
20 #include <linux/workqueue.h>
21 #include <linux/percpu-refcount.h>
22 #include <linux/cleanup.h>
23 #include <linux/hash.h>
24 
25 enum _slab_flag_bits {
26 	_SLAB_CONSISTENCY_CHECKS,
27 	_SLAB_RED_ZONE,
28 	_SLAB_POISON,
29 	_SLAB_KMALLOC,
30 	_SLAB_HWCACHE_ALIGN,
31 	_SLAB_CACHE_DMA,
32 	_SLAB_CACHE_DMA32,
33 	_SLAB_STORE_USER,
34 	_SLAB_PANIC,
35 	_SLAB_TYPESAFE_BY_RCU,
36 	_SLAB_TRACE,
37 #ifdef CONFIG_DEBUG_OBJECTS
38 	_SLAB_DEBUG_OBJECTS,
39 #endif
40 	_SLAB_NOLEAKTRACE,
41 	_SLAB_NO_MERGE,
42 #ifdef CONFIG_FAILSLAB
43 	_SLAB_FAILSLAB,
44 #endif
45 #ifdef CONFIG_MEMCG
46 	_SLAB_ACCOUNT,
47 #endif
48 #ifdef CONFIG_KASAN_GENERIC
49 	_SLAB_KASAN,
50 #endif
51 	_SLAB_NO_USER_FLAGS,
52 #ifdef CONFIG_KFENCE
53 	_SLAB_SKIP_KFENCE,
54 #endif
55 #ifndef CONFIG_SLUB_TINY
56 	_SLAB_RECLAIM_ACCOUNT,
57 #endif
58 	_SLAB_OBJECT_POISON,
59 	_SLAB_CMPXCHG_DOUBLE,
60 #ifdef CONFIG_SLAB_OBJ_EXT
61 	_SLAB_NO_OBJ_EXT,
62 #endif
63 	_SLAB_FLAGS_LAST_BIT
64 };
65 
66 #define __SLAB_FLAG_BIT(nr)	((slab_flags_t __force)(1U << (nr)))
67 #define __SLAB_FLAG_UNUSED	((slab_flags_t __force)(0U))
68 
69 /*
70  * Flags to pass to kmem_cache_create().
71  * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
72  */
73 /* DEBUG: Perform (expensive) checks on alloc/free */
74 #define SLAB_CONSISTENCY_CHECKS	__SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS)
75 /* DEBUG: Red zone objs in a cache */
76 #define SLAB_RED_ZONE		__SLAB_FLAG_BIT(_SLAB_RED_ZONE)
77 /* DEBUG: Poison objects */
78 #define SLAB_POISON		__SLAB_FLAG_BIT(_SLAB_POISON)
79 /* Indicate a kmalloc slab */
80 #define SLAB_KMALLOC		__SLAB_FLAG_BIT(_SLAB_KMALLOC)
81 /**
82  * define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
83  *
84  * Sufficiently large objects are aligned on cache line boundary. For object
85  * size smaller than a half of cache line size, the alignment is on the half of
86  * cache line size. In general, if object size is smaller than 1/2^n of cache
87  * line size, the alignment is adjusted to 1/2^n.
88  *
89  * If explicit alignment is also requested by the respective
90  * &struct kmem_cache_args field, the greater of both is alignments is applied.
91  */
92 #define SLAB_HWCACHE_ALIGN	__SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
93 /* Use GFP_DMA memory */
94 #define SLAB_CACHE_DMA		__SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
95 /* Use GFP_DMA32 memory */
96 #define SLAB_CACHE_DMA32	__SLAB_FLAG_BIT(_SLAB_CACHE_DMA32)
97 /* DEBUG: Store the last owner for bug hunting */
98 #define SLAB_STORE_USER		__SLAB_FLAG_BIT(_SLAB_STORE_USER)
99 /* Panic if kmem_cache_create() fails */
100 #define SLAB_PANIC		__SLAB_FLAG_BIT(_SLAB_PANIC)
101 /**
102  * define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
103  *
104  * This delays freeing the SLAB page by a grace period, it does _NOT_
105  * delay object freeing. This means that if you do kmem_cache_free()
106  * that memory location is free to be reused at any time. Thus it may
107  * be possible to see another object there in the same RCU grace period.
108  *
109  * This feature only ensures the memory location backing the object
110  * stays valid, the trick to using this is relying on an independent
111  * object validation pass. Something like:
112  *
113  * ::
114  *
115  *  begin:
116  *   rcu_read_lock();
117  *   obj = lockless_lookup(key);
118  *   if (obj) {
119  *     if (!try_get_ref(obj)) // might fail for free objects
120  *       rcu_read_unlock();
121  *       goto begin;
122  *
123  *     if (obj->key != key) { // not the object we expected
124  *       put_ref(obj);
125  *       rcu_read_unlock();
126  *       goto begin;
127  *     }
128  *   }
129  *  rcu_read_unlock();
130  *
131  * This is useful if we need to approach a kernel structure obliquely,
132  * from its address obtained without the usual locking. We can lock
133  * the structure to stabilize it and check it's still at the given address,
134  * only if we can be sure that the memory has not been meanwhile reused
135  * for some other kind of object (which our subsystem's lock might corrupt).
136  *
137  * rcu_read_lock before reading the address, then rcu_read_unlock after
138  * taking the spinlock within the structure expected at that address.
139  *
140  * Note that object identity check has to be done *after* acquiring a
141  * reference, therefore user has to ensure proper ordering for loads.
142  * Similarly, when initializing objects allocated with SLAB_TYPESAFE_BY_RCU,
143  * the newly allocated object has to be fully initialized *before* its
144  * refcount gets initialized and proper ordering for stores is required.
145  * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() are
146  * designed with the proper fences required for reference counting objects
147  * allocated with SLAB_TYPESAFE_BY_RCU.
148  *
149  * Note that it is not possible to acquire a lock within a structure
150  * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
151  * as described above.  The reason is that SLAB_TYPESAFE_BY_RCU pages
152  * are not zeroed before being given to the slab, which means that any
153  * locks must be initialized after each and every kmem_struct_alloc().
154  * Alternatively, make the ctor passed to kmem_cache_create() initialize
155  * the locks at page-allocation time, as is done in __i915_request_ctor(),
156  * sighand_ctor(), and anon_vma_ctor().  Such a ctor permits readers
157  * to safely acquire those ctor-initialized locks under rcu_read_lock()
158  * protection.
159  *
160  * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
161  */
162 #define SLAB_TYPESAFE_BY_RCU	__SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
163 /* Trace allocations and frees */
164 #define SLAB_TRACE		__SLAB_FLAG_BIT(_SLAB_TRACE)
165 
166 /* Flag to prevent checks on free */
167 #ifdef CONFIG_DEBUG_OBJECTS
168 # define SLAB_DEBUG_OBJECTS	__SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS)
169 #else
170 # define SLAB_DEBUG_OBJECTS	__SLAB_FLAG_UNUSED
171 #endif
172 
173 /* Avoid kmemleak tracing */
174 #define SLAB_NOLEAKTRACE	__SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE)
175 
176 /*
177  * Prevent merging with compatible kmem caches. This flag should be used
178  * cautiously. Valid use cases:
179  *
180  * - caches created for self-tests (e.g. kunit)
181  * - general caches created and used by a subsystem, only when a
182  *   (subsystem-specific) debug option is enabled
183  * - performance critical caches, should be very rare and consulted with slab
184  *   maintainers, and not used together with CONFIG_SLUB_TINY
185  */
186 #define SLAB_NO_MERGE		__SLAB_FLAG_BIT(_SLAB_NO_MERGE)
187 
188 /* Fault injection mark */
189 #ifdef CONFIG_FAILSLAB
190 # define SLAB_FAILSLAB		__SLAB_FLAG_BIT(_SLAB_FAILSLAB)
191 #else
192 # define SLAB_FAILSLAB		__SLAB_FLAG_UNUSED
193 #endif
194 /**
195  * define SLAB_ACCOUNT - Account allocations to memcg.
196  *
197  * All object allocations from this cache will be memcg accounted, regardless of
198  * __GFP_ACCOUNT being or not being passed to individual allocations.
199  */
200 #ifdef CONFIG_MEMCG
201 # define SLAB_ACCOUNT		__SLAB_FLAG_BIT(_SLAB_ACCOUNT)
202 #else
203 # define SLAB_ACCOUNT		__SLAB_FLAG_UNUSED
204 #endif
205 
206 #ifdef CONFIG_KASAN_GENERIC
207 #define SLAB_KASAN		__SLAB_FLAG_BIT(_SLAB_KASAN)
208 #else
209 #define SLAB_KASAN		__SLAB_FLAG_UNUSED
210 #endif
211 
212 /*
213  * Ignore user specified debugging flags.
214  * Intended for caches created for self-tests so they have only flags
215  * specified in the code and other flags are ignored.
216  */
217 #define SLAB_NO_USER_FLAGS	__SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS)
218 
219 #ifdef CONFIG_KFENCE
220 #define SLAB_SKIP_KFENCE	__SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE)
221 #else
222 #define SLAB_SKIP_KFENCE	__SLAB_FLAG_UNUSED
223 #endif
224 
225 /* The following flags affect the page allocator grouping pages by mobility */
226 /**
227  * define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
228  *
229  * Use this flag for caches that have an associated shrinker. As a result, slab
230  * pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by
231  * mobility, and are accounted in SReclaimable counter in /proc/meminfo
232  */
233 #ifndef CONFIG_SLUB_TINY
234 #define SLAB_RECLAIM_ACCOUNT	__SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
235 #else
236 #define SLAB_RECLAIM_ACCOUNT	__SLAB_FLAG_UNUSED
237 #endif
238 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
239 
240 /* Slab created using create_boot_cache */
241 #ifdef CONFIG_SLAB_OBJ_EXT
242 #define SLAB_NO_OBJ_EXT		__SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT)
243 #else
244 #define SLAB_NO_OBJ_EXT		__SLAB_FLAG_UNUSED
245 #endif
246 
247 /*
248  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
249  *
250  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
251  *
252  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
253  * Both make kfree a no-op.
254  */
255 #define ZERO_SIZE_PTR ((void *)16)
256 
257 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
258 				(unsigned long)ZERO_SIZE_PTR)
259 
260 #include <linux/kasan.h>
261 
262 struct list_lru;
263 struct mem_cgroup;
264 /*
265  * struct kmem_cache related prototypes
266  */
267 bool slab_is_available(void);
268 
269 /**
270  * struct kmem_cache_args - Less common arguments for kmem_cache_create()
271  *
272  * Any uninitialized fields of the structure are interpreted as unused. The
273  * exception is @freeptr_offset where %0 is a valid value, so
274  * @use_freeptr_offset must be also set to %true in order to interpret the field
275  * as used. For @useroffset %0 is also valid, but only with non-%0
276  * @usersize.
277  *
278  * When %NULL args is passed to kmem_cache_create(), it is equivalent to all
279  * fields unused.
280  */
281 struct kmem_cache_args {
282 	/**
283 	 * @align: The required alignment for the objects.
284 	 *
285 	 * %0 means no specific alignment is requested.
286 	 */
287 	unsigned int align;
288 	/**
289 	 * @useroffset: Usercopy region offset.
290 	 *
291 	 * %0 is a valid offset, when @usersize is non-%0
292 	 */
293 	unsigned int useroffset;
294 	/**
295 	 * @usersize: Usercopy region size.
296 	 *
297 	 * %0 means no usercopy region is specified.
298 	 */
299 	unsigned int usersize;
300 	/**
301 	 * @freeptr_offset: Custom offset for the free pointer
302 	 * in &SLAB_TYPESAFE_BY_RCU caches
303 	 *
304 	 * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
305 	 * outside of the object. This might cause the object to grow in size.
306 	 * Cache creators that have a reason to avoid this can specify a custom
307 	 * free pointer offset in their struct where the free pointer will be
308 	 * placed.
309 	 *
310 	 * Note that placing the free pointer inside the object requires the
311 	 * caller to ensure that no fields are invalidated that are required to
312 	 * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
313 	 * details).
314 	 *
315 	 * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
316 	 * is specified, %use_freeptr_offset must be set %true.
317 	 *
318 	 * Note that @ctor currently isn't supported with custom free pointers
319 	 * as a @ctor requires an external free pointer.
320 	 */
321 	unsigned int freeptr_offset;
322 	/**
323 	 * @use_freeptr_offset: Whether a @freeptr_offset is used.
324 	 */
325 	bool use_freeptr_offset;
326 	/**
327 	 * @ctor: A constructor for the objects.
328 	 *
329 	 * The constructor is invoked for each object in a newly allocated slab
330 	 * page. It is the cache user's responsibility to free object in the
331 	 * same state as after calling the constructor, or deal appropriately
332 	 * with any differences between a freshly constructed and a reallocated
333 	 * object.
334 	 *
335 	 * %NULL means no constructor.
336 	 */
337 	void (*ctor)(void *);
338 	/**
339 	 * @sheaf_capacity: Enable sheaves of given capacity for the cache.
340 	 *
341 	 * With a non-zero value, allocations from the cache go through caching
342 	 * arrays called sheaves. Each cpu has a main sheaf that's always
343 	 * present, and a spare sheaf that may be not present. When both become
344 	 * empty, there's an attempt to replace an empty sheaf with a full sheaf
345 	 * from the per-node barn.
346 	 *
347 	 * When no full sheaf is available, and gfp flags allow blocking, a
348 	 * sheaf is allocated and filled from slab(s) using bulk allocation.
349 	 * Otherwise the allocation falls back to the normal operation
350 	 * allocating a single object from a slab.
351 	 *
352 	 * Analogically when freeing and both percpu sheaves are full, the barn
353 	 * may replace it with an empty sheaf, unless it's over capacity. In
354 	 * that case a sheaf is bulk freed to slab pages.
355 	 *
356 	 * The sheaves do not enforce NUMA placement of objects, so allocations
357 	 * via kmem_cache_alloc_node() with a node specified other than
358 	 * NUMA_NO_NODE will bypass them.
359 	 *
360 	 * Bulk allocation and free operations also try to use the cpu sheaves
361 	 * and barn, but fallback to using slab pages directly.
362 	 *
363 	 * When slub_debug is enabled for the cache, the sheaf_capacity argument
364 	 * is ignored.
365 	 *
366 	 * %0 means no sheaves will be created.
367 	 */
368 	unsigned int sheaf_capacity;
369 };
370 
371 struct kmem_cache *__kmem_cache_create_args(const char *name,
372 					    unsigned int object_size,
373 					    struct kmem_cache_args *args,
374 					    slab_flags_t flags);
375 static inline struct kmem_cache *
__kmem_cache_create(const char * name,unsigned int size,unsigned int align,slab_flags_t flags,void (* ctor)(void *))376 __kmem_cache_create(const char *name, unsigned int size, unsigned int align,
377 		    slab_flags_t flags, void (*ctor)(void *))
378 {
379 	struct kmem_cache_args kmem_args = {
380 		.align	= align,
381 		.ctor	= ctor,
382 	};
383 
384 	return __kmem_cache_create_args(name, size, &kmem_args, flags);
385 }
386 
387 /**
388  * kmem_cache_create_usercopy - Create a kmem cache with a region suitable
389  * for copying to userspace.
390  * @name: A string which is used in /proc/slabinfo to identify this cache.
391  * @size: The size of objects to be created in this cache.
392  * @align: The required alignment for the objects.
393  * @flags: SLAB flags
394  * @useroffset: Usercopy region offset
395  * @usersize: Usercopy region size
396  * @ctor: A constructor for the objects, or %NULL.
397  *
398  * This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY()
399  * if whitelisting a single field is sufficient, or kmem_cache_create() with
400  * the necessary parameters passed via the args parameter (see
401  * &struct kmem_cache_args)
402  *
403  * Return: a pointer to the cache on success, NULL on failure.
404  */
405 static inline struct kmem_cache *
kmem_cache_create_usercopy(const char * name,unsigned int size,unsigned int align,slab_flags_t flags,unsigned int useroffset,unsigned int usersize,void (* ctor)(void *))406 kmem_cache_create_usercopy(const char *name, unsigned int size,
407 			   unsigned int align, slab_flags_t flags,
408 			   unsigned int useroffset, unsigned int usersize,
409 			   void (*ctor)(void *))
410 {
411 	struct kmem_cache_args kmem_args = {
412 		.align		= align,
413 		.ctor		= ctor,
414 		.useroffset	= useroffset,
415 		.usersize	= usersize,
416 	};
417 
418 	return __kmem_cache_create_args(name, size, &kmem_args, flags);
419 }
420 
421 /* If NULL is passed for @args, use this variant with default arguments. */
422 static inline struct kmem_cache *
__kmem_cache_default_args(const char * name,unsigned int size,struct kmem_cache_args * args,slab_flags_t flags)423 __kmem_cache_default_args(const char *name, unsigned int size,
424 			  struct kmem_cache_args *args,
425 			  slab_flags_t flags)
426 {
427 	struct kmem_cache_args kmem_default_args = {};
428 
429 	/* Make sure we don't get passed garbage. */
430 	if (WARN_ON_ONCE(args))
431 		return ERR_PTR(-EINVAL);
432 
433 	return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
434 }
435 
436 /**
437  * kmem_cache_create - Create a kmem cache.
438  * @__name: A string which is used in /proc/slabinfo to identify this cache.
439  * @__object_size: The size of objects to be created in this cache.
440  * @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL
441  *	    means defaults will be used for all the arguments.
442  *
443  * This is currently implemented as a macro using ``_Generic()`` to call
444  * either the new variant of the function, or a legacy one.
445  *
446  * The new variant has 4 parameters:
447  * ``kmem_cache_create(name, object_size, args, flags)``
448  *
449  * See __kmem_cache_create_args() which implements this.
450  *
451  * The legacy variant has 5 parameters:
452  * ``kmem_cache_create(name, object_size, align, flags, ctor)``
453  *
454  * The align and ctor parameters map to the respective fields of
455  * &struct kmem_cache_args
456  *
457  * Context: Cannot be called within a interrupt, but can be interrupted.
458  *
459  * Return: a pointer to the cache on success, NULL on failure.
460  */
461 #define kmem_cache_create(__name, __object_size, __args, ...)           \
462 	_Generic((__args),                                              \
463 		struct kmem_cache_args *: __kmem_cache_create_args,	\
464 		void *: __kmem_cache_default_args,			\
465 		default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
466 
467 void kmem_cache_destroy(struct kmem_cache *s);
468 int kmem_cache_shrink(struct kmem_cache *s);
469 
470 /*
471  * Please use this macro to create slab caches. Simply specify the
472  * name of the structure and maybe some flags that are listed above.
473  *
474  * The alignment of the struct determines object alignment. If you
475  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
476  * then the objects will be properly aligned in SMP configurations.
477  */
478 #define KMEM_CACHE(__struct, __flags)                                   \
479 	__kmem_cache_create_args(#__struct, sizeof(struct __struct),    \
480 			&(struct kmem_cache_args) {			\
481 				.align	= __alignof__(struct __struct), \
482 			}, (__flags))
483 
484 /*
485  * To whitelist a single field for copying to/from usercopy, use this
486  * macro instead for KMEM_CACHE() above.
487  */
488 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field)						\
489 	__kmem_cache_create_args(#__struct, sizeof(struct __struct),				\
490 			&(struct kmem_cache_args) {						\
491 				.align		= __alignof__(struct __struct),			\
492 				.useroffset	= offsetof(struct __struct, __field),		\
493 				.usersize	= sizeof_field(struct __struct, __field),	\
494 			}, (__flags))
495 
496 /*
497  * Common kmalloc functions provided by all allocators
498  */
499 void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size,
500 					       unsigned long align,
501 					       gfp_t flags, int nid) __realloc_size(2);
502 #define krealloc_noprof(_o, _s, _f)	krealloc_node_align_noprof(_o, _s, 1, _f, NUMA_NO_NODE)
503 #define krealloc_node_align(...)	alloc_hooks(krealloc_node_align_noprof(__VA_ARGS__))
504 #define krealloc_node(_o, _s, _f, _n)	krealloc_node_align(_o, _s, 1, _f, _n)
505 #define krealloc(...)			krealloc_node(__VA_ARGS__, NUMA_NO_NODE)
506 
507 void kfree(const void *objp);
508 void kfree_nolock(const void *objp);
509 void kfree_sensitive(const void *objp);
510 size_t __ksize(const void *objp);
511 
512 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
513 DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
514 
515 /**
516  * ksize - Report actual allocation size of associated object
517  *
518  * @objp: Pointer returned from a prior kmalloc()-family allocation.
519  *
520  * This should not be used for writing beyond the originally requested
521  * allocation size. Either use krealloc() or round up the allocation size
522  * with kmalloc_size_roundup() prior to allocation. If this is used to
523  * access beyond the originally requested allocation size, UBSAN_BOUNDS
524  * and/or FORTIFY_SOURCE may trip, since they only know about the
525  * originally allocated size via the __alloc_size attribute.
526  */
527 size_t ksize(const void *objp);
528 
529 #ifdef CONFIG_PRINTK
530 bool kmem_dump_obj(void *object);
531 #else
kmem_dump_obj(void * object)532 static inline bool kmem_dump_obj(void *object) { return false; }
533 #endif
534 
535 /*
536  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
537  * alignment larger than the alignment of a 64-bit integer.
538  * Setting ARCH_DMA_MINALIGN in arch headers allows that.
539  */
540 #ifdef ARCH_HAS_DMA_MINALIGN
541 #if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN)
542 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
543 #endif
544 #endif
545 
546 #ifndef ARCH_KMALLOC_MINALIGN
547 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
548 #elif ARCH_KMALLOC_MINALIGN > 8
549 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
550 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
551 #endif
552 
553 /*
554  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
555  * Intended for arches that get misalignment faults even for 64 bit integer
556  * aligned buffers.
557  */
558 #ifndef ARCH_SLAB_MINALIGN
559 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
560 #endif
561 
562 /*
563  * Arches can define this function if they want to decide the minimum slab
564  * alignment at runtime. The value returned by the function must be a power
565  * of two and >= ARCH_SLAB_MINALIGN.
566  */
567 #ifndef arch_slab_minalign
arch_slab_minalign(void)568 static inline unsigned int arch_slab_minalign(void)
569 {
570 	return ARCH_SLAB_MINALIGN;
571 }
572 #endif
573 
574 /*
575  * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
576  * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
577  * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
578  */
579 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
580 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
581 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
582 
583 /*
584  * Kmalloc array related definitions
585  */
586 
587 /*
588  * SLUB directly allocates requests fitting in to an order-1 page
589  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
590  */
591 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
592 #define KMALLOC_SHIFT_MAX	(MAX_PAGE_ORDER + PAGE_SHIFT)
593 #ifndef KMALLOC_SHIFT_LOW
594 #define KMALLOC_SHIFT_LOW	3
595 #endif
596 
597 /* Maximum allocatable size */
598 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
599 /* Maximum size for which we actually use a slab cache */
600 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
601 /* Maximum order allocatable via the slab allocator */
602 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
603 
604 /*
605  * Kmalloc subsystem.
606  */
607 #ifndef KMALLOC_MIN_SIZE
608 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
609 #endif
610 
611 /*
612  * This restriction comes from byte sized index implementation.
613  * Page size is normally 2^12 bytes and, in this case, if we want to use
614  * byte sized index which can represent 2^8 entries, the size of the object
615  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
616  * If minimum size of kmalloc is less than 16, we use it as minimum object
617  * size and give up to use byte sized index.
618  */
619 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
620                                (KMALLOC_MIN_SIZE) : 16)
621 
622 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
623 #define RANDOM_KMALLOC_CACHES_NR	15 // # of cache copies
624 #else
625 #define RANDOM_KMALLOC_CACHES_NR	0
626 #endif
627 
628 /*
629  * Whenever changing this, take care of that kmalloc_type() and
630  * create_kmalloc_caches() still work as intended.
631  *
632  * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
633  * is for accounted but unreclaimable and non-dma objects. All the other
634  * kmem caches can have both accounted and unaccounted objects.
635  */
636 enum kmalloc_cache_type {
637 	KMALLOC_NORMAL = 0,
638 #ifndef CONFIG_ZONE_DMA
639 	KMALLOC_DMA = KMALLOC_NORMAL,
640 #endif
641 #ifndef CONFIG_MEMCG
642 	KMALLOC_CGROUP = KMALLOC_NORMAL,
643 #endif
644 	KMALLOC_RANDOM_START = KMALLOC_NORMAL,
645 	KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
646 #ifdef CONFIG_SLUB_TINY
647 	KMALLOC_RECLAIM = KMALLOC_NORMAL,
648 #else
649 	KMALLOC_RECLAIM,
650 #endif
651 #ifdef CONFIG_ZONE_DMA
652 	KMALLOC_DMA,
653 #endif
654 #ifdef CONFIG_MEMCG
655 	KMALLOC_CGROUP,
656 #endif
657 	NR_KMALLOC_TYPES
658 };
659 
660 typedef struct kmem_cache * kmem_buckets[KMALLOC_SHIFT_HIGH + 1];
661 
662 extern kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES];
663 
664 /*
665  * Define gfp bits that should not be set for KMALLOC_NORMAL.
666  */
667 #define KMALLOC_NOT_NORMAL_BITS					\
668 	(__GFP_RECLAIMABLE |					\
669 	(IS_ENABLED(CONFIG_ZONE_DMA)   ? __GFP_DMA : 0) |	\
670 	(IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0))
671 
672 extern unsigned long random_kmalloc_seed;
673 
kmalloc_type(gfp_t flags,unsigned long caller)674 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
675 {
676 	/*
677 	 * The most common case is KMALLOC_NORMAL, so test for it
678 	 * with a single branch for all the relevant flags.
679 	 */
680 	if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
681 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
682 		/* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
683 		return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
684 						      ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
685 #else
686 		return KMALLOC_NORMAL;
687 #endif
688 
689 	/*
690 	 * At least one of the flags has to be set. Their priorities in
691 	 * decreasing order are:
692 	 *  1) __GFP_DMA
693 	 *  2) __GFP_RECLAIMABLE
694 	 *  3) __GFP_ACCOUNT
695 	 */
696 	if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
697 		return KMALLOC_DMA;
698 	if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE))
699 		return KMALLOC_RECLAIM;
700 	else
701 		return KMALLOC_CGROUP;
702 }
703 
704 /*
705  * Figure out which kmalloc slab an allocation of a certain size
706  * belongs to.
707  * 0 = zero alloc
708  * 1 =  65 .. 96 bytes
709  * 2 = 129 .. 192 bytes
710  * n = 2^(n-1)+1 .. 2^n
711  *
712  * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
713  * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
714  * Callers where !size_is_constant should only be test modules, where runtime
715  * overheads of __kmalloc_index() can be tolerated.  Also see kmalloc_slab().
716  */
__kmalloc_index(size_t size,bool size_is_constant)717 static __always_inline unsigned int __kmalloc_index(size_t size,
718 						    bool size_is_constant)
719 {
720 	if (!size)
721 		return 0;
722 
723 	if (size <= KMALLOC_MIN_SIZE)
724 		return KMALLOC_SHIFT_LOW;
725 
726 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
727 		return 1;
728 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
729 		return 2;
730 	if (size <=          8) return 3;
731 	if (size <=         16) return 4;
732 	if (size <=         32) return 5;
733 	if (size <=         64) return 6;
734 	if (size <=        128) return 7;
735 	if (size <=        256) return 8;
736 	if (size <=        512) return 9;
737 	if (size <=       1024) return 10;
738 	if (size <=   2 * 1024) return 11;
739 	if (size <=   4 * 1024) return 12;
740 	if (size <=   8 * 1024) return 13;
741 	if (size <=  16 * 1024) return 14;
742 	if (size <=  32 * 1024) return 15;
743 	if (size <=  64 * 1024) return 16;
744 	if (size <= 128 * 1024) return 17;
745 	if (size <= 256 * 1024) return 18;
746 	if (size <= 512 * 1024) return 19;
747 	if (size <= 1024 * 1024) return 20;
748 	if (size <=  2 * 1024 * 1024) return 21;
749 
750 	if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
751 		BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
752 	else
753 		BUG();
754 
755 	/* Will never be reached. Needed because the compiler may complain */
756 	return -1;
757 }
758 static_assert(PAGE_SHIFT <= 20);
759 #define kmalloc_index(s) __kmalloc_index(s, true)
760 
761 #include <linux/alloc_tag.h>
762 
763 /**
764  * kmem_cache_alloc - Allocate an object
765  * @cachep: The cache to allocate from.
766  * @flags: See kmalloc().
767  *
768  * Allocate an object from this cache.
769  * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
770  *
771  * Return: pointer to the new object or %NULL in case of error
772  */
773 void *kmem_cache_alloc_noprof(struct kmem_cache *cachep,
774 			      gfp_t flags) __assume_slab_alignment __malloc;
775 #define kmem_cache_alloc(...)			alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__))
776 
777 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
778 			    gfp_t gfpflags) __assume_slab_alignment __malloc;
779 #define kmem_cache_alloc_lru(...)	alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
780 
781 /**
782  * kmem_cache_charge - memcg charge an already allocated slab memory
783  * @objp: address of the slab object to memcg charge
784  * @gfpflags: describe the allocation context
785  *
786  * kmem_cache_charge allows charging a slab object to the current memcg,
787  * primarily in cases where charging at allocation time might not be possible
788  * because the target memcg is not known (i.e. softirq context)
789  *
790  * The objp should be pointer returned by the slab allocator functions like
791  * kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge
792  * behavior can be controlled through gfpflags parameter, which affects how the
793  * necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes
794  * that overcharging is requested instead of failure, but is not applied for the
795  * internal metadata allocation.
796  *
797  * There are several cases where it will return true even if the charging was
798  * not done:
799  * More specifically:
800  *
801  * 1. For !CONFIG_MEMCG or cgroup_disable=memory systems.
802  * 2. Already charged slab objects.
803  * 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc()
804  *    without __GFP_ACCOUNT
805  * 4. Allocating internal metadata has failed
806  *
807  * Return: true if charge was successful otherwise false.
808  */
809 bool kmem_cache_charge(void *objp, gfp_t gfpflags);
810 void kmem_cache_free(struct kmem_cache *s, void *objp);
811 
812 kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
813 				  unsigned int useroffset, unsigned int usersize,
814 				  void (*ctor)(void *));
815 
816 /*
817  * Bulk allocation and freeing operations. These are accelerated in an
818  * allocator specific way to avoid taking locks repeatedly or building
819  * metadata structures unnecessarily.
820  *
821  * Note that interrupts must be enabled when calling these functions.
822  */
823 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
824 
825 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
826 #define kmem_cache_alloc_bulk(...)	alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__))
827 
kfree_bulk(size_t size,void ** p)828 static __always_inline void kfree_bulk(size_t size, void **p)
829 {
830 	kmem_cache_free_bulk(NULL, size, p);
831 }
832 
833 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
834 				   int node) __assume_slab_alignment __malloc;
835 #define kmem_cache_alloc_node(...)	alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
836 
837 struct slab_sheaf *
838 kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
839 
840 int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
841 		struct slab_sheaf **sheafp, unsigned int size);
842 
843 void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
844 				       struct slab_sheaf *sheaf);
845 
846 void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp,
847 			struct slab_sheaf *sheaf) __assume_slab_alignment __malloc;
848 #define kmem_cache_alloc_from_sheaf(...)	\
849 			alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
850 
851 unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf);
852 
853 /*
854  * These macros allow declaring a kmem_buckets * parameter alongside size, which
855  * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
856  * sites don't have to pass NULL.
857  */
858 #ifdef CONFIG_SLAB_BUCKETS
859 #define DECL_BUCKET_PARAMS(_size, _b)	size_t (_size), kmem_buckets *(_b)
860 #define PASS_BUCKET_PARAMS(_size, _b)	(_size), (_b)
861 #define PASS_BUCKET_PARAM(_b)		(_b)
862 #else
863 #define DECL_BUCKET_PARAMS(_size, _b)	size_t (_size)
864 #define PASS_BUCKET_PARAMS(_size, _b)	(_size)
865 #define PASS_BUCKET_PARAM(_b)		NULL
866 #endif
867 
868 /*
869  * The following functions are not to be used directly and are intended only
870  * for internal use from kmalloc() and kmalloc_node()
871  * with the exception of kunit tests
872  */
873 
874 void *__kmalloc_noprof(size_t size, gfp_t flags)
875 				__assume_kmalloc_alignment __alloc_size(1);
876 
877 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
878 				__assume_kmalloc_alignment __alloc_size(1);
879 
880 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
881 				__assume_kmalloc_alignment __alloc_size(3);
882 
883 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
884 				  int node, size_t size)
885 				__assume_kmalloc_alignment __alloc_size(4);
886 
887 void *__kmalloc_large_noprof(size_t size, gfp_t flags)
888 				__assume_page_alignment __alloc_size(1);
889 
890 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
891 				__assume_page_alignment __alloc_size(1);
892 
893 /**
894  * kmalloc - allocate kernel memory
895  * @size: how many bytes of memory are required.
896  * @flags: describe the allocation context
897  *
898  * kmalloc is the normal method of allocating memory
899  * for objects smaller than page size in the kernel.
900  *
901  * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
902  * bytes. For @size of power of two bytes, the alignment is also guaranteed
903  * to be at least to the size. For other sizes, the alignment is guaranteed to
904  * be at least the largest power-of-two divisor of @size.
905  *
906  * The @flags argument may be one of the GFP flags defined at
907  * include/linux/gfp_types.h and described at
908  * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
909  *
910  * The recommended usage of the @flags is described at
911  * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
912  *
913  * Below is a brief outline of the most useful GFP flags
914  *
915  * %GFP_KERNEL
916  *	Allocate normal kernel ram. May sleep.
917  *
918  * %GFP_NOWAIT
919  *	Allocation will not sleep.
920  *
921  * %GFP_ATOMIC
922  *	Allocation will not sleep.  May use emergency pools.
923  *
924  * Also it is possible to set different flags by OR'ing
925  * in one or more of the following additional @flags:
926  *
927  * %__GFP_ZERO
928  *	Zero the allocated memory before returning. Also see kzalloc().
929  *
930  * %__GFP_HIGH
931  *	This allocation has high priority and may use emergency pools.
932  *
933  * %__GFP_NOFAIL
934  *	Indicate that this allocation is in no way allowed to fail
935  *	(think twice before using).
936  *
937  * %__GFP_NORETRY
938  *	If memory is not immediately available,
939  *	then give up at once.
940  *
941  * %__GFP_NOWARN
942  *	If allocation fails, don't issue any warnings.
943  *
944  * %__GFP_RETRY_MAYFAIL
945  *	Try really hard to succeed the allocation but fail
946  *	eventually.
947  */
kmalloc_noprof(size_t size,gfp_t flags)948 static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags)
949 {
950 	if (__builtin_constant_p(size) && size) {
951 		unsigned int index;
952 
953 		if (size > KMALLOC_MAX_CACHE_SIZE)
954 			return __kmalloc_large_noprof(size, flags);
955 
956 		index = kmalloc_index(size);
957 		return __kmalloc_cache_noprof(
958 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
959 				flags, size);
960 	}
961 	return __kmalloc_noprof(size, flags);
962 }
963 #define kmalloc(...)				alloc_hooks(kmalloc_noprof(__VA_ARGS__))
964 
965 void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
966 #define kmalloc_nolock(...)			alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
967 
968 #define kmem_buckets_alloc(_b, _size, _flags)	\
969 	alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
970 
971 #define kmem_buckets_alloc_track_caller(_b, _size, _flags)	\
972 	alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE, _RET_IP_))
973 
kmalloc_node_noprof(size_t size,gfp_t flags,int node)974 static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node)
975 {
976 	if (__builtin_constant_p(size) && size) {
977 		unsigned int index;
978 
979 		if (size > KMALLOC_MAX_CACHE_SIZE)
980 			return __kmalloc_large_node_noprof(size, flags, node);
981 
982 		index = kmalloc_index(size);
983 		return __kmalloc_cache_node_noprof(
984 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
985 				flags, node, size);
986 	}
987 	return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
988 }
989 #define kmalloc_node(...)			alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
990 
991 /**
992  * kmalloc_array - allocate memory for an array.
993  * @n: number of elements.
994  * @size: element size.
995  * @flags: the type of memory to allocate (see kmalloc).
996  */
kmalloc_array_noprof(size_t n,size_t size,gfp_t flags)997 static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
998 {
999 	size_t bytes;
1000 
1001 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1002 		return NULL;
1003 	return kmalloc_noprof(bytes, flags);
1004 }
1005 #define kmalloc_array(...)			alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))
1006 
1007 /**
1008  * krealloc_array - reallocate memory for an array.
1009  * @p: pointer to the memory chunk to reallocate
1010  * @new_n: new number of elements to alloc
1011  * @new_size: new size of a single member of the array
1012  * @flags: the type of memory to allocate (see kmalloc)
1013  *
1014  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
1015  * initial memory allocation, every subsequent call to this API for the same
1016  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
1017  * __GFP_ZERO is not fully honored by this API.
1018  *
1019  * See krealloc_noprof() for further details.
1020  *
1021  * In any case, the contents of the object pointed to are preserved up to the
1022  * lesser of the new and old sizes.
1023  */
krealloc_array_noprof(void * p,size_t new_n,size_t new_size,gfp_t flags)1024 static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p,
1025 								       size_t new_n,
1026 								       size_t new_size,
1027 								       gfp_t flags)
1028 {
1029 	size_t bytes;
1030 
1031 	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
1032 		return NULL;
1033 
1034 	return krealloc_noprof(p, bytes, flags);
1035 }
1036 #define krealloc_array(...)			alloc_hooks(krealloc_array_noprof(__VA_ARGS__))
1037 
1038 /**
1039  * kcalloc - allocate memory for an array. The memory is set to zero.
1040  * @n: number of elements.
1041  * @size: element size.
1042  * @flags: the type of memory to allocate (see kmalloc).
1043  */
1044 #define kcalloc(n, size, flags)		kmalloc_array(n, size, (flags) | __GFP_ZERO)
1045 
1046 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node,
1047 					 unsigned long caller) __alloc_size(1);
1048 #define kmalloc_node_track_caller_noprof(size, flags, node, caller) \
1049 	__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
1050 #define kmalloc_node_track_caller(...)		\
1051 	alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
1052 
1053 /*
1054  * kmalloc_track_caller is a special version of kmalloc that records the
1055  * calling function of the routine calling it for slab leak tracking instead
1056  * of just the calling function (confusing, eh?).
1057  * It's useful when the call to kmalloc comes from a widely-used standard
1058  * allocator where we care about the real place the memory allocation
1059  * request comes from.
1060  */
1061 #define kmalloc_track_caller(...)		kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE)
1062 
1063 #define kmalloc_track_caller_noprof(...)	\
1064 		kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_)
1065 
kmalloc_array_node_noprof(size_t n,size_t size,gfp_t flags,int node)1066 static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags,
1067 							  int node)
1068 {
1069 	size_t bytes;
1070 
1071 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1072 		return NULL;
1073 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
1074 		return kmalloc_node_noprof(bytes, flags, node);
1075 	return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node);
1076 }
1077 #define kmalloc_array_node(...)			alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
1078 
1079 #define kcalloc_node(_n, _size, _flags, _node)	\
1080 	kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
1081 
1082 /*
1083  * Shortcuts
1084  */
1085 #define kmem_cache_zalloc(_k, _flags)		kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
1086 
1087 /**
1088  * kzalloc - allocate memory. The memory is set to zero.
1089  * @size: how many bytes of memory are required.
1090  * @flags: the type of memory to allocate (see kmalloc).
1091  */
kzalloc_noprof(size_t size,gfp_t flags)1092 static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
1093 {
1094 	return kmalloc_noprof(size, flags | __GFP_ZERO);
1095 }
1096 #define kzalloc(...)				alloc_hooks(kzalloc_noprof(__VA_ARGS__))
1097 #define kzalloc_node(_size, _flags, _node)	kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
1098 
1099 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
1100 			     gfp_t flags, int node) __alloc_size(1);
1101 #define kvmalloc_node_align_noprof(_size, _align, _flags, _node)	\
1102 	__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, NULL), _align, _flags, _node)
1103 #define kvmalloc_node_align(...)		\
1104 	alloc_hooks(kvmalloc_node_align_noprof(__VA_ARGS__))
1105 #define kvmalloc_node(_s, _f, _n)		kvmalloc_node_align(_s, 1, _f, _n)
1106 #define kvmalloc(...)				kvmalloc_node(__VA_ARGS__, NUMA_NO_NODE)
1107 #define kvzalloc(_size, _flags)			kvmalloc(_size, (_flags)|__GFP_ZERO)
1108 
1109 #define kvzalloc_node(_size, _flags, _node)	kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
1110 
1111 #define kmem_buckets_valloc(_b, _size, _flags)	\
1112 	alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), 1, _flags, NUMA_NO_NODE))
1113 
1114 static inline __alloc_size(1, 2) void *
kvmalloc_array_node_noprof(size_t n,size_t size,gfp_t flags,int node)1115 kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
1116 {
1117 	size_t bytes;
1118 
1119 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1120 		return NULL;
1121 
1122 	return kvmalloc_node_align_noprof(bytes, 1, flags, node);
1123 }
1124 
1125 #define kvmalloc_array_noprof(...)		kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
1126 #define kvcalloc_node_noprof(_n,_s,_f,_node)	kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node)
1127 #define kvcalloc_noprof(...)			kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
1128 
1129 #define kvmalloc_array(...)			alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__))
1130 #define kvcalloc_node(...)			alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
1131 #define kvcalloc(...)				alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
1132 
1133 void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
1134 				  gfp_t flags, int nid) __realloc_size(2);
1135 #define kvrealloc_node_align(...)		\
1136 	alloc_hooks(kvrealloc_node_align_noprof(__VA_ARGS__))
1137 #define kvrealloc_node(_p, _s, _f, _n)		kvrealloc_node_align(_p, _s, 1, _f, _n)
1138 #define kvrealloc(...)				kvrealloc_node(__VA_ARGS__, NUMA_NO_NODE)
1139 
1140 extern void kvfree(const void *addr);
1141 DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))
1142 
1143 extern void kvfree_sensitive(const void *addr, size_t len);
1144 
1145 unsigned int kmem_cache_size(struct kmem_cache *s);
1146 
1147 #ifndef CONFIG_KVFREE_RCU_BATCHED
kvfree_rcu_barrier(void)1148 static inline void kvfree_rcu_barrier(void)
1149 {
1150 	rcu_barrier();
1151 }
1152 
kfree_rcu_scheduler_running(void)1153 static inline void kfree_rcu_scheduler_running(void) { }
1154 #else
1155 void kvfree_rcu_barrier(void);
1156 
1157 void kfree_rcu_scheduler_running(void);
1158 #endif
1159 
1160 /**
1161  * kmalloc_size_roundup - Report allocation bucket size for the given size
1162  *
1163  * @size: Number of bytes to round up from.
1164  *
1165  * This returns the number of bytes that would be available in a kmalloc()
1166  * allocation of @size bytes. For example, a 126 byte request would be
1167  * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
1168  * for the general-purpose kmalloc()-based allocations, and is not for the
1169  * pre-sized kmem_cache_alloc()-based allocations.)
1170  *
1171  * Use this to kmalloc() the full bucket size ahead of time instead of using
1172  * ksize() to query the size after an allocation.
1173  */
1174 size_t kmalloc_size_roundup(size_t size);
1175 
1176 void __init kmem_cache_init_late(void);
1177 void __init kvfree_rcu_init(void);
1178 
1179 #endif	/* _LINUX_SLAB_H */
1180