xref: /linux/mm/slab.h (revision b687034b1a4d85333ced0fe07f67b17276cccdc8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef MM_SLAB_H
3 #define MM_SLAB_H
4 
5 #include <linux/reciprocal_div.h>
6 #include <linux/list_lru.h>
7 #include <linux/local_lock.h>
8 #include <linux/random.h>
9 #include <linux/kobject.h>
10 #include <linux/sched/mm.h>
11 #include <linux/memcontrol.h>
12 #include <linux/kfence.h>
13 #include <linux/kasan.h>
14 
15 /*
16  * Internal slab definitions
17  */
18 
19 #ifdef CONFIG_64BIT
20 # ifdef system_has_cmpxchg128
21 # define system_has_freelist_aba()	system_has_cmpxchg128()
22 # define try_cmpxchg_freelist		try_cmpxchg128
23 # endif
24 #define this_cpu_try_cmpxchg_freelist	this_cpu_try_cmpxchg128
25 typedef u128 freelist_full_t;
26 #else /* CONFIG_64BIT */
27 # ifdef system_has_cmpxchg64
28 # define system_has_freelist_aba()	system_has_cmpxchg64()
29 # define try_cmpxchg_freelist		try_cmpxchg64
30 # endif
31 #define this_cpu_try_cmpxchg_freelist	this_cpu_try_cmpxchg64
32 typedef u64 freelist_full_t;
33 #endif /* CONFIG_64BIT */
34 
35 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
36 #undef system_has_freelist_aba
37 #endif
38 
39 /*
40  * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
41  * problems with cmpxchg of just a pointer.
42  */
43 struct freelist_counters {
44 	union {
45 		struct {
46 			void *freelist;
47 			union {
48 				unsigned long counters;
49 				struct {
50 					unsigned inuse:16;
51 					unsigned objects:15;
52 					/*
53 					 * If slab debugging is enabled then the
54 					 * frozen bit can be reused to indicate
55 					 * that the slab was corrupted
56 					 */
57 					unsigned frozen:1;
58 				};
59 			};
60 		};
61 #ifdef system_has_freelist_aba
62 		freelist_full_t freelist_counters;
63 #endif
64 	};
65 };
66 
67 /* Reuses the bits in struct page */
68 struct slab {
69 	memdesc_flags_t flags;
70 
71 	struct kmem_cache *slab_cache;
72 	union {
73 		struct {
74 			union {
75 				struct list_head slab_list;
76 				struct { /* For deferred deactivate_slab() */
77 					struct llist_node llnode;
78 					void *flush_freelist;
79 				};
80 #ifdef CONFIG_SLUB_CPU_PARTIAL
81 				struct {
82 					struct slab *next;
83 					int slabs;	/* Nr of slabs left */
84 				};
85 #endif
86 			};
87 			/* Double-word boundary */
88 			struct freelist_counters;
89 		};
90 		struct rcu_head rcu_head;
91 	};
92 
93 	unsigned int __page_type;
94 	atomic_t __page_refcount;
95 #ifdef CONFIG_SLAB_OBJ_EXT
96 	unsigned long obj_exts;
97 #endif
98 };
99 
100 #define SLAB_MATCH(pg, sl)						\
101 	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
102 SLAB_MATCH(flags, flags);
103 SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
104 SLAB_MATCH(_refcount, __page_refcount);
105 #ifdef CONFIG_MEMCG
106 SLAB_MATCH(memcg_data, obj_exts);
107 #elif defined(CONFIG_SLAB_OBJ_EXT)
108 SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
109 #endif
110 #undef SLAB_MATCH
111 static_assert(sizeof(struct slab) <= sizeof(struct page));
112 #if defined(system_has_freelist_aba)
113 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(struct freelist_counters)));
114 #endif
115 
116 /**
117  * slab_folio - The folio allocated for a slab
118  * @s: The slab.
119  *
120  * Slabs are allocated as folios that contain the individual objects and are
121  * using some fields in the first struct page of the folio - those fields are
122  * now accessed by struct slab. It is occasionally necessary to convert back to
123  * a folio in order to communicate with the rest of the mm.  Please use this
124  * helper function instead of casting yourself, as the implementation may change
125  * in the future.
126  */
127 #define slab_folio(s)		(_Generic((s),				\
128 	const struct slab *:	(const struct folio *)s,		\
129 	struct slab *:		(struct folio *)s))
130 
131 /**
132  * page_slab - Converts from struct page to its slab.
133  * @page: A page which may or may not belong to a slab.
134  *
135  * Return: The slab which contains this page or NULL if the page does
136  * not belong to a slab.  This includes pages returned from large kmalloc.
137  */
page_slab(const struct page * page)138 static inline struct slab *page_slab(const struct page *page)
139 {
140 	unsigned long head;
141 
142 	head = READ_ONCE(page->compound_head);
143 	if (head & 1)
144 		page = (struct page *)(head - 1);
145 	if (data_race(page->page_type >> 24) != PGTY_slab)
146 		page = NULL;
147 
148 	return (struct slab *)page;
149 }
150 
151 /**
152  * slab_page - The first struct page allocated for a slab
153  * @s: The slab.
154  *
155  * A convenience wrapper for converting slab to the first struct page of the
156  * underlying folio, to communicate with code not yet converted to folio or
157  * struct slab.
158  */
159 #define slab_page(s) folio_page(slab_folio(s), 0)
160 
slab_address(const struct slab * slab)161 static inline void *slab_address(const struct slab *slab)
162 {
163 	return folio_address(slab_folio(slab));
164 }
165 
slab_nid(const struct slab * slab)166 static inline int slab_nid(const struct slab *slab)
167 {
168 	return memdesc_nid(slab->flags);
169 }
170 
slab_pgdat(const struct slab * slab)171 static inline pg_data_t *slab_pgdat(const struct slab *slab)
172 {
173 	return NODE_DATA(slab_nid(slab));
174 }
175 
virt_to_slab(const void * addr)176 static inline struct slab *virt_to_slab(const void *addr)
177 {
178 	return page_slab(virt_to_page(addr));
179 }
180 
slab_order(const struct slab * slab)181 static inline int slab_order(const struct slab *slab)
182 {
183 	return folio_order(slab_folio(slab));
184 }
185 
slab_size(const struct slab * slab)186 static inline size_t slab_size(const struct slab *slab)
187 {
188 	return PAGE_SIZE << slab_order(slab);
189 }
190 
191 #ifdef CONFIG_SLUB_CPU_PARTIAL
192 #define slub_percpu_partial(c)			((c)->partial)
193 
194 #define slub_set_percpu_partial(c, p)		\
195 ({						\
196 	slub_percpu_partial(c) = (p)->next;	\
197 })
198 
199 #define slub_percpu_partial_read_once(c)	READ_ONCE(slub_percpu_partial(c))
200 #else
201 #define slub_percpu_partial(c)			NULL
202 
203 #define slub_set_percpu_partial(c, p)
204 
205 #define slub_percpu_partial_read_once(c)	NULL
206 #endif // CONFIG_SLUB_CPU_PARTIAL
207 
208 /*
209  * Word size structure that can be atomically updated or read and that
210  * contains both the order and the number of objects that a slab of the
211  * given order would contain.
212  */
213 struct kmem_cache_order_objects {
214 	unsigned int x;
215 };
216 
217 /*
218  * Slab cache management.
219  */
220 struct kmem_cache {
221 	struct kmem_cache_cpu __percpu *cpu_slab;
222 	struct lock_class_key lock_key;
223 	struct slub_percpu_sheaves __percpu *cpu_sheaves;
224 	/* Used for retrieving partial slabs, etc. */
225 	slab_flags_t flags;
226 	unsigned long min_partial;
227 	unsigned int size;		/* Object size including metadata */
228 	unsigned int object_size;	/* Object size without metadata */
229 	struct reciprocal_value reciprocal_size;
230 	unsigned int offset;		/* Free pointer offset */
231 #ifdef CONFIG_SLUB_CPU_PARTIAL
232 	/* Number of per cpu partial objects to keep around */
233 	unsigned int cpu_partial;
234 	/* Number of per cpu partial slabs to keep around */
235 	unsigned int cpu_partial_slabs;
236 #endif
237 	unsigned int sheaf_capacity;
238 	struct kmem_cache_order_objects oo;
239 
240 	/* Allocation and freeing of slabs */
241 	struct kmem_cache_order_objects min;
242 	gfp_t allocflags;		/* gfp flags to use on each alloc */
243 	int refcount;			/* Refcount for slab cache destroy */
244 	void (*ctor)(void *object);	/* Object constructor */
245 	unsigned int inuse;		/* Offset to metadata */
246 	unsigned int align;		/* Alignment */
247 	unsigned int red_left_pad;	/* Left redzone padding size */
248 	const char *name;		/* Name (only for display!) */
249 	struct list_head list;		/* List of slab caches */
250 #ifdef CONFIG_SYSFS
251 	struct kobject kobj;		/* For sysfs */
252 #endif
253 #ifdef CONFIG_SLAB_FREELIST_HARDENED
254 	unsigned long random;
255 #endif
256 
257 #ifdef CONFIG_NUMA
258 	/*
259 	 * Defragmentation by allocating from a remote node.
260 	 */
261 	unsigned int remote_node_defrag_ratio;
262 #endif
263 
264 #ifdef CONFIG_SLAB_FREELIST_RANDOM
265 	unsigned int *random_seq;
266 #endif
267 
268 #ifdef CONFIG_KASAN_GENERIC
269 	struct kasan_cache kasan_info;
270 #endif
271 
272 #ifdef CONFIG_HARDENED_USERCOPY
273 	unsigned int useroffset;	/* Usercopy region offset */
274 	unsigned int usersize;		/* Usercopy region size */
275 #endif
276 
277 	struct kmem_cache_node *node[MAX_NUMNODES];
278 };
279 
280 #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
281 #define SLAB_SUPPORTS_SYSFS 1
282 void sysfs_slab_unlink(struct kmem_cache *s);
283 void sysfs_slab_release(struct kmem_cache *s);
284 #else
sysfs_slab_unlink(struct kmem_cache * s)285 static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
sysfs_slab_release(struct kmem_cache * s)286 static inline void sysfs_slab_release(struct kmem_cache *s) { }
287 #endif
288 
289 void *fixup_red_left(struct kmem_cache *s, void *p);
290 
nearest_obj(struct kmem_cache * cache,const struct slab * slab,void * x)291 static inline void *nearest_obj(struct kmem_cache *cache,
292 				const struct slab *slab, void *x)
293 {
294 	void *object = x - (x - slab_address(slab)) % cache->size;
295 	void *last_object = slab_address(slab) +
296 		(slab->objects - 1) * cache->size;
297 	void *result = (unlikely(object > last_object)) ? last_object : object;
298 
299 	result = fixup_red_left(cache, result);
300 	return result;
301 }
302 
303 /* Determine object index from a given position */
__obj_to_index(const struct kmem_cache * cache,void * addr,void * obj)304 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
305 					  void *addr, void *obj)
306 {
307 	return reciprocal_divide(kasan_reset_tag(obj) - addr,
308 				 cache->reciprocal_size);
309 }
310 
obj_to_index(const struct kmem_cache * cache,const struct slab * slab,void * obj)311 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
312 					const struct slab *slab, void *obj)
313 {
314 	if (is_kfence_address(obj))
315 		return 0;
316 	return __obj_to_index(cache, slab_address(slab), obj);
317 }
318 
objs_per_slab(const struct kmem_cache * cache,const struct slab * slab)319 static inline int objs_per_slab(const struct kmem_cache *cache,
320 				const struct slab *slab)
321 {
322 	return slab->objects;
323 }
324 
325 /*
326  * State of the slab allocator.
327  *
328  * This is used to describe the states of the allocator during bootup.
329  * Allocators use this to gradually bootstrap themselves. Most allocators
330  * have the problem that the structures used for managing slab caches are
331  * allocated from slab caches themselves.
332  */
333 enum slab_state {
334 	DOWN,			/* No slab functionality yet */
335 	PARTIAL,		/* SLUB: kmem_cache_node available */
336 	UP,			/* Slab caches usable but not all extras yet */
337 	FULL			/* Everything is working */
338 };
339 
340 extern enum slab_state slab_state;
341 
342 /* The slab cache mutex protects the management structures during changes */
343 extern struct mutex slab_mutex;
344 
345 /* The list of all slab caches on the system */
346 extern struct list_head slab_caches;
347 
348 /* The slab cache that manages slab cache information */
349 extern struct kmem_cache *kmem_cache;
350 
351 /* A table of kmalloc cache names and sizes */
352 extern const struct kmalloc_info_struct {
353 	const char *name[NR_KMALLOC_TYPES];
354 	unsigned int size;
355 } kmalloc_info[];
356 
357 /* Kmalloc array related functions */
358 void setup_kmalloc_cache_index_table(void);
359 void create_kmalloc_caches(void);
360 
361 extern u8 kmalloc_size_index[24];
362 
size_index_elem(unsigned int bytes)363 static inline unsigned int size_index_elem(unsigned int bytes)
364 {
365 	return (bytes - 1) / 8;
366 }
367 
368 /*
369  * Find the kmem_cache structure that serves a given size of
370  * allocation
371  *
372  * This assumes size is larger than zero and not larger than
373  * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
374  */
375 static inline struct kmem_cache *
kmalloc_slab(size_t size,kmem_buckets * b,gfp_t flags,unsigned long caller)376 kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
377 {
378 	unsigned int index;
379 
380 	if (!b)
381 		b = &kmalloc_caches[kmalloc_type(flags, caller)];
382 	if (size <= 192)
383 		index = kmalloc_size_index[size_index_elem(size)];
384 	else
385 		index = fls(size - 1);
386 
387 	return (*b)[index];
388 }
389 
390 gfp_t kmalloc_fix_flags(gfp_t flags);
391 
392 /* Functions provided by the slab allocators */
393 int do_kmem_cache_create(struct kmem_cache *s, const char *name,
394 			 unsigned int size, struct kmem_cache_args *args,
395 			 slab_flags_t flags);
396 
397 void __init kmem_cache_init(void);
398 extern void create_boot_cache(struct kmem_cache *, const char *name,
399 			unsigned int size, slab_flags_t flags,
400 			unsigned int useroffset, unsigned int usersize);
401 
402 int slab_unmergeable(struct kmem_cache *s);
403 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
404 		slab_flags_t flags, const char *name, void (*ctor)(void *));
405 struct kmem_cache *
406 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
407 		   slab_flags_t flags, void (*ctor)(void *));
408 
409 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name);
410 
is_kmalloc_cache(struct kmem_cache * s)411 static inline bool is_kmalloc_cache(struct kmem_cache *s)
412 {
413 	return (s->flags & SLAB_KMALLOC);
414 }
415 
is_kmalloc_normal(struct kmem_cache * s)416 static inline bool is_kmalloc_normal(struct kmem_cache *s)
417 {
418 	if (!is_kmalloc_cache(s))
419 		return false;
420 	return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
421 }
422 
423 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj);
424 void flush_all_rcu_sheaves(void);
425 
426 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
427 			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
428 			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS | \
429 			 SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
430 			 SLAB_TEMPORARY | SLAB_ACCOUNT | \
431 			 SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
432 
433 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
434 			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
435 
436 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS)
437 
438 bool __kmem_cache_empty(struct kmem_cache *);
439 int __kmem_cache_shutdown(struct kmem_cache *);
440 void __kmem_cache_release(struct kmem_cache *);
441 int __kmem_cache_shrink(struct kmem_cache *);
442 void slab_kmem_cache_release(struct kmem_cache *);
443 
444 struct seq_file;
445 struct file;
446 
447 struct slabinfo {
448 	unsigned long active_objs;
449 	unsigned long num_objs;
450 	unsigned long active_slabs;
451 	unsigned long num_slabs;
452 	unsigned long shared_avail;
453 	unsigned int limit;
454 	unsigned int batchcount;
455 	unsigned int shared;
456 	unsigned int objects_per_slab;
457 	unsigned int cache_order;
458 };
459 
460 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
461 
462 #ifdef CONFIG_SLUB_DEBUG
463 #ifdef CONFIG_SLUB_DEBUG_ON
464 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
465 #else
466 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
467 #endif
468 extern void print_tracking(struct kmem_cache *s, void *object);
469 long validate_slab_cache(struct kmem_cache *s);
__slub_debug_enabled(void)470 static inline bool __slub_debug_enabled(void)
471 {
472 	return static_branch_unlikely(&slub_debug_enabled);
473 }
474 #else
print_tracking(struct kmem_cache * s,void * object)475 static inline void print_tracking(struct kmem_cache *s, void *object)
476 {
477 }
__slub_debug_enabled(void)478 static inline bool __slub_debug_enabled(void)
479 {
480 	return false;
481 }
482 #endif
483 
484 /*
485  * Returns true if any of the specified slab_debug flags is enabled for the
486  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
487  * the static key.
488  */
kmem_cache_debug_flags(struct kmem_cache * s,slab_flags_t flags)489 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
490 {
491 	if (IS_ENABLED(CONFIG_SLUB_DEBUG))
492 		VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
493 	if (__slub_debug_enabled())
494 		return s->flags & flags;
495 	return false;
496 }
497 
498 #if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT)
499 bool slab_in_kunit_test(void);
500 #else
slab_in_kunit_test(void)501 static inline bool slab_in_kunit_test(void) { return false; }
502 #endif
503 
504 #ifdef CONFIG_SLAB_OBJ_EXT
505 
506 /*
507  * slab_obj_exts - get the pointer to the slab object extension vector
508  * associated with a slab.
509  * @slab: a pointer to the slab struct
510  *
511  * Returns a pointer to the object extension vector associated with the slab,
512  * or NULL if no such vector has been associated yet.
513  */
slab_obj_exts(struct slab * slab)514 static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
515 {
516 	unsigned long obj_exts = READ_ONCE(slab->obj_exts);
517 
518 #ifdef CONFIG_MEMCG
519 	/*
520 	 * obj_exts should be either NULL, a valid pointer with
521 	 * MEMCG_DATA_OBJEXTS bit set or be equal to OBJEXTS_ALLOC_FAIL.
522 	 */
523 	VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS) &&
524 		       obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab));
525 	VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
526 #endif
527 	return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
528 }
529 
530 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
531                         gfp_t gfp, bool new_slab);
532 
533 #else /* CONFIG_SLAB_OBJ_EXT */
534 
slab_obj_exts(struct slab * slab)535 static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
536 {
537 	return NULL;
538 }
539 
540 #endif /* CONFIG_SLAB_OBJ_EXT */
541 
cache_vmstat_idx(struct kmem_cache * s)542 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
543 {
544 	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
545 		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
546 }
547 
548 #ifdef CONFIG_MEMCG
549 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
550 				  gfp_t flags, size_t size, void **p);
551 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
552 			    void **p, int objects, struct slabobj_ext *obj_exts);
553 #endif
554 
555 void kvfree_rcu_cb(struct rcu_head *head);
556 
557 size_t __ksize(const void *objp);
558 
slab_ksize(const struct kmem_cache * s)559 static inline size_t slab_ksize(const struct kmem_cache *s)
560 {
561 #ifdef CONFIG_SLUB_DEBUG
562 	/*
563 	 * Debugging requires use of the padding between object
564 	 * and whatever may come after it.
565 	 */
566 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
567 		return s->object_size;
568 #endif
569 	if (s->flags & SLAB_KASAN)
570 		return s->object_size;
571 	/*
572 	 * If we have the need to store the freelist pointer
573 	 * back there or track user information then we can
574 	 * only use the space before that information.
575 	 */
576 	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
577 		return s->inuse;
578 	/*
579 	 * Else we can use all the padding etc for the allocation
580 	 */
581 	return s->size;
582 }
583 
large_kmalloc_order(const struct page * page)584 static inline unsigned int large_kmalloc_order(const struct page *page)
585 {
586 	return page[1].flags.f & 0xff;
587 }
588 
large_kmalloc_size(const struct page * page)589 static inline size_t large_kmalloc_size(const struct page *page)
590 {
591 	return PAGE_SIZE << large_kmalloc_order(page);
592 }
593 
594 #ifdef CONFIG_SLUB_DEBUG
595 void dump_unreclaimable_slab(void);
596 #else
dump_unreclaimable_slab(void)597 static inline void dump_unreclaimable_slab(void)
598 {
599 }
600 #endif
601 
602 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
603 
604 #ifdef CONFIG_SLAB_FREELIST_RANDOM
605 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
606 			gfp_t gfp);
607 void cache_random_seq_destroy(struct kmem_cache *cachep);
608 #else
cache_random_seq_create(struct kmem_cache * cachep,unsigned int count,gfp_t gfp)609 static inline int cache_random_seq_create(struct kmem_cache *cachep,
610 					unsigned int count, gfp_t gfp)
611 {
612 	return 0;
613 }
cache_random_seq_destroy(struct kmem_cache * cachep)614 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
615 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
616 
slab_want_init_on_alloc(gfp_t flags,struct kmem_cache * c)617 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
618 {
619 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
620 				&init_on_alloc)) {
621 		if (c->ctor)
622 			return false;
623 		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
624 			return flags & __GFP_ZERO;
625 		return true;
626 	}
627 	return flags & __GFP_ZERO;
628 }
629 
slab_want_init_on_free(struct kmem_cache * c)630 static inline bool slab_want_init_on_free(struct kmem_cache *c)
631 {
632 	if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
633 				&init_on_free))
634 		return !(c->ctor ||
635 			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
636 	return false;
637 }
638 
639 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
640 void debugfs_slab_release(struct kmem_cache *);
641 #else
debugfs_slab_release(struct kmem_cache * s)642 static inline void debugfs_slab_release(struct kmem_cache *s) { }
643 #endif
644 
645 #ifdef CONFIG_PRINTK
646 #define KS_ADDRS_COUNT 16
647 struct kmem_obj_info {
648 	void *kp_ptr;
649 	struct slab *kp_slab;
650 	void *kp_objp;
651 	unsigned long kp_data_offset;
652 	struct kmem_cache *kp_slab_cache;
653 	void *kp_ret;
654 	void *kp_stack[KS_ADDRS_COUNT];
655 	void *kp_free_stack[KS_ADDRS_COUNT];
656 };
657 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
658 #endif
659 
660 void __check_heap_object(const void *ptr, unsigned long n,
661 			 const struct slab *slab, bool to_user);
662 
663 void defer_free_barrier(void);
664 
slub_debug_orig_size(struct kmem_cache * s)665 static inline bool slub_debug_orig_size(struct kmem_cache *s)
666 {
667 	return (kmem_cache_debug_flags(s, SLAB_STORE_USER) &&
668 			(s->flags & SLAB_KMALLOC));
669 }
670 
671 #ifdef CONFIG_SLUB_DEBUG
672 void skip_orig_size_check(struct kmem_cache *s, const void *object);
673 #endif
674 
675 #endif /* MM_SLAB_H */
676